hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
c51e6f68639722693aa72c317b21d1a1a9991fef
52,990
ipynb
Jupyter Notebook
AMAN_NLP_VIMP-CODE/Project-5_Sentiment_Analysis_Amn/Hotel review sentiment analysis.ipynb
vitthalkcontact/NLP
97ecb4fe31bb43b97010e7ff7c503d833d816ce5
[ "Unlicense" ]
1
2020-10-14T14:18:55.000Z
2020-10-14T14:18:55.000Z
AMAN_NLP_VIMP-CODE/Project-5_Sentiment_Analysis_Amn/Hotel review sentiment analysis.ipynb
vitthalkcontact/NLP
97ecb4fe31bb43b97010e7ff7c503d833d816ce5
[ "Unlicense" ]
null
null
null
AMAN_NLP_VIMP-CODE/Project-5_Sentiment_Analysis_Amn/Hotel review sentiment analysis.ipynb
vitthalkcontact/NLP
97ecb4fe31bb43b97010e7ff7c503d833d816ce5
[ "Unlicense" ]
null
null
null
46.31993
9,236
0.616022
[ [ [ "# NLP - Hotel review sentiment analysis in python", "_____no_output_____" ] ], [ [ "#warnings :)\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport os\ndir_Path = 'D:\\\\01_DATA_SCIENCE_FINAL\\\\D-00000-NLP\\\\NLP-CODES\\\\AMAN-NLP-CODES\\\\AMAN_NLP_VIMP-CODE\\\\Project-6_Sentiment_Analysis_Amn\\\\'\nos.chdir(dir_Path)", "_____no_output_____" ] ], [ [ "## Data Facts and Import ", "_____no_output_____" ] ], [ [ "import pandas as pd \n# Local directory\nReviewdata = pd.read_csv('train.csv')\n#Data Credit - https://www.kaggle.com/anu0012/hotel-review/data", "_____no_output_____" ], [ "Reviewdata.head()", "_____no_output_____" ], [ "Reviewdata.shape", "_____no_output_____" ], [ "Reviewdata.head()", "_____no_output_____" ], [ "Reviewdata.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 38932 entries, 0 to 38931\nData columns (total 5 columns):\nUser_ID 38932 non-null object\nDescription 38932 non-null object\nBrowser_Used 38932 non-null object\nDevice_Used 38932 non-null object\nIs_Response 38932 non-null object\ndtypes: object(5)\nmemory usage: 1.5+ MB\n" ], [ "Reviewdata.describe().transpose()", "_____no_output_____" ] ], [ [ "## Data Cleaning / EDA", "_____no_output_____" ] ], [ [ "### Checking Missing values in the Data Set and printing the Percentage for Missing Values for Each Columns ###\n\ncount = Reviewdata.isnull().sum().sort_values(ascending=False)\npercentage = ((Reviewdata.isnull().sum()/len(Reviewdata)*100)).sort_values(ascending=False)\nmissing_data = pd.concat([count, percentage], axis=1,\nkeys=['Count','Percentage'])\n\nprint('Count and percentage of missing values for the columns:')\n\nmissing_data", "Count and percentage of missing values for the columns:\n" ], [ "print(\"Missing values count:\")\nprint(Reviewdata.Is_Response.value_counts())\n\nprint(\"*\"*12)\n\nprint(\"Missing values %ge:\")\nprint(round(Reviewdata.Is_Response.value_counts(normalize=True)*100),2)\n\nprint(\"*\"*12)\n \nimport seaborn as sns\nsns.countplot(Reviewdata.Is_Response)\nplt.show()", "Missing values count:\nhappy 26521\nnot happy 12411\nName: Is_Response, dtype: int64\n************\nMissing values %ge:\nhappy 68.0\nnot happy 32.0\nName: Is_Response, dtype: float64 2\n************\n" ], [ "### Checking for the Distribution of Default ###\nimport matplotlib.pyplot as plt\n%matplotlib inline\nprint('Percentage for default\\n')\nprint(round(Reviewdata.Is_Response.value_counts(normalize=True)*100,2))\nround(Reviewdata.Is_Response.value_counts(normalize=True)*100,2).plot(kind='bar')\nplt.title('Percentage Distributions by review type')\nplt.show()", "Percentage for default\n\nhappy 68.12\nnot happy 31.88\nName: Is_Response, dtype: float64\n" ], [ "#Removing columns\nReviewdata.drop(columns = ['User_ID', 'Browser_Used', 'Device_Used'], inplace = True)", "_____no_output_____" ], [ "# Apply first level cleaning\nimport re\nimport string\n\n#This function converts to lower-case, removes square bracket, removes numbers and punctuation\ndef text_clean_1(text):\n text = text.lower()\n text = re.sub('\\[.*?\\]', '', text)\n text = re.sub('[%s]' % re.escape(string.punctuation), '', text)\n text = re.sub('\\w*\\d\\w*', '', text)\n return text\n\ncleaned1 = lambda x: text_clean_1(x)", "_____no_output_____" ], [ "# Let's take a look at the updated text\nReviewdata['cleaned_description'] = pd.DataFrame(Reviewdata.Description.apply(cleaned1))\nReviewdata.head(10)", "_____no_output_____" ], [ "# Apply a second round of cleaning\ndef text_clean_2(text):\n text = re.sub('[‘’“”…]', '', text)\n text = re.sub('\\n', '', text)\n return text\n\ncleaned2 = lambda x: text_clean_2(x)", "_____no_output_____" ], [ "# Let's take a look at the updated text\nReviewdata['cleaned_description_new'] = pd.DataFrame(Reviewdata['cleaned_description'].apply(cleaned2))\nReviewdata.head(10)", "_____no_output_____" ] ], [ [ "## Model training ", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\nIndependent_var = Reviewdata.cleaned_description_new\nDependent_var = Reviewdata.Is_Response\n\nIV_train, IV_test, DV_train, DV_test = train_test_split(Independent_var, Dependent_var, test_size = 0.1, random_state = 225)\n\nprint('IV_train :', len(IV_train))\nprint('IV_test  :', len(IV_test))\nprint('DV_train :', len(DV_train))\nprint('DV_test  :', len(DV_test))\n", "IV_train : 35038\nIV_test  : 3894\nDV_train : 35038\nDV_test  : 3894\n" ], [ "from sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\n\ntvec = TfidfVectorizer()\nclf2 = LogisticRegression(solver = \"lbfgs\")\n\n\nfrom sklearn.pipeline import Pipeline", "_____no_output_____" ], [ "model = Pipeline([('vectorizer',tvec),('classifier',clf2)])\n\nmodel.fit(IV_train, DV_train)\n\n\nfrom sklearn.metrics import confusion_matrix\n\npredictions = model.predict(IV_test)\n\nconfusion_matrix(predictions, DV_test)", "_____no_output_____" ] ], [ [ "## Model prediciton ", "_____no_output_____" ] ], [ [ "from sklearn.metrics import accuracy_score, precision_score, recall_score\n\nprint(\"Accuracy : \", accuracy_score(predictions, DV_test))\nprint(\"Precision : \", precision_score(predictions, DV_test, average = 'weighted'))\nprint(\"Recall : \", recall_score(predictions, DV_test, average = 'weighted'))", "Accuracy : 0.8823831535695943\nPrecision : 0.8890590818181386\nRecall : 0.8823831535695943\n" ] ], [ [ "## Trying on new reviews ", "_____no_output_____" ] ], [ [ "example = [\"I'm happy\"]\nresult = model.predict(example)\n\nprint(result)", "['happy']\n" ], [ "example = [\"I'm frustrated\"]\nresult = model.predict(example)\n\nprint(result)", "['not happy']\n" ], [ "# Drawback???\nexample = [\"I'm not happy\"]\nresult = model.predict(example)\n\nprint(result)", "['happy']\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
c51e71e490ab71aea9950255e3ffc36af309e708
35,191
ipynb
Jupyter Notebook
docs_src/callbacks.general_sched.ipynb
ivaylojelev/fastai1
e201aa26c7064c6b5fff29e677a3f66950bc95ff
[ "Apache-2.0" ]
59
2020-08-18T03:41:35.000Z
2022-03-23T03:51:55.000Z
docs_src/callbacks.general_sched.ipynb
ivaylojelev/fastai1
e201aa26c7064c6b5fff29e677a3f66950bc95ff
[ "Apache-2.0" ]
17
2020-08-25T14:15:32.000Z
2022-03-27T02:12:19.000Z
docs_src/callbacks.general_sched.ipynb
ivaylojelev/fastai1
e201aa26c7064c6b5fff29e677a3f66950bc95ff
[ "Apache-2.0" ]
89
2020-08-17T23:45:42.000Z
2022-03-27T20:53:43.000Z
82.607981
19,804
0.789179
[ [ [ "## TrainingPhase and General scheduler", "_____no_output_____" ], [ "Creates a scheduler that lets you train a model with following different [`TrainingPhase`](/callbacks.general_sched.html#TrainingPhase).", "_____no_output_____" ] ], [ [ "from fastai.gen_doc.nbdoc import *\nfrom fastai.callbacks.general_sched import * \nfrom fastai.vision import *", "_____no_output_____" ], [ "show_doc(TrainingPhase)", "_____no_output_____" ] ], [ [ "You can then schedule any hyper-parameter you want by using the following method.", "_____no_output_____" ] ], [ [ "show_doc(TrainingPhase.schedule_hp)", "_____no_output_____" ] ], [ [ "The phase will make the hyper-parameter vary from the first value in `vals` to the second, following `anneal`. If an annealing function is specified but `vals` is a float, it will decay to 0. If no annealing function is specified, the default is a linear annealing for a tuple, a constant parameter if it's a float. ", "_____no_output_____" ] ], [ [ "jekyll_note(\"\"\"If you want to use discriminative values, you can pass an numpy array in `vals` (or a tuple\nof them for start and stop).\"\"\")", "_____no_output_____" ] ], [ [ "The basic hyper-parameters are named:\n- 'lr' for learning rate\n- 'mom' for momentum (or beta1 in Adam)\n- 'beta' for the beta2 in Adam or the alpha in RMSprop\n- 'wd' for weight decay\n\nYou can also add any hyper-parameter that is in your optimizer (even if it's custom or a [`GeneralOptimizer`](/general_optimizer.html#GeneralOptimizer)), like 'eps' if you're using Adam. ", "_____no_output_____" ], [ "Let's make an example by using this to code [SGD with warm restarts](https://arxiv.org/abs/1608.03983).", "_____no_output_____" ] ], [ [ "def fit_sgd_warm(learn, n_cycles, lr, mom, cycle_len, cycle_mult):\n n = len(learn.data.train_dl)\n phases = [(TrainingPhase(n * (cycle_len * cycle_mult**i))\n .schedule_hp('lr', lr, anneal=annealing_cos)\n .schedule_hp('mom', mom)) for i in range(n_cycles)]\n sched = GeneralScheduler(learn, phases)\n learn.callbacks.append(sched)\n if cycle_mult != 1:\n total_epochs = int(cycle_len * (1 - (cycle_mult)**n_cycles)/(1-cycle_mult)) \n else: total_epochs = n_cycles * cycle_len\n learn.fit(total_epochs)", "_____no_output_____" ], [ "path = untar_data(URLs.MNIST_SAMPLE)\ndata = ImageDataBunch.from_folder(path)\nlearn = Learner(data, simple_cnn((3,16,16,2)), metrics=accuracy)\nfit_sgd_warm(learn, 3, 1e-3, 0.9, 1, 2)", "_____no_output_____" ], [ "learn.recorder.plot_lr()", "_____no_output_____" ], [ "show_doc(GeneralScheduler)", "_____no_output_____" ] ], [ [ "### Callback methods", "_____no_output_____" ], [ "You don't call these yourself - they're called by fastai's [`Callback`](/callback.html#Callback) system automatically to enable the class's functionality.", "_____no_output_____" ] ], [ [ "show_doc(GeneralScheduler.on_batch_end, doc_string=False)", "_____no_output_____" ] ], [ [ "Takes a step in the current phase and prepare the hyperparameters for the next batch.", "_____no_output_____" ] ], [ [ "show_doc(GeneralScheduler.on_train_begin, doc_string=False)", "_____no_output_____" ] ], [ [ "Initiates the hyperparameters to the start values of the first phase. ", "_____no_output_____" ], [ "## Undocumented Methods - Methods moved below this line will intentionally be hidden", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
c51ea92316cd73bc2d7cf9dc0a24e17bc20d57fa
7,466
ipynb
Jupyter Notebook
doc/source/problems/custom.ipynb
Alaya-in-Matrix/pymoo
02d6e7085f5fe88dbd56b2a9f5173abe20c54caf
[ "Apache-2.0" ]
2
2021-03-28T03:06:35.000Z
2021-03-28T03:40:08.000Z
doc/source/problems/custom.ipynb
Alaya-in-Matrix/pymoo
02d6e7085f5fe88dbd56b2a9f5173abe20c54caf
[ "Apache-2.0" ]
null
null
null
doc/source/problems/custom.ipynb
Alaya-in-Matrix/pymoo
02d6e7085f5fe88dbd56b2a9f5173abe20c54caf
[ "Apache-2.0" ]
1
2022-03-31T08:19:13.000Z
2022-03-31T08:19:13.000Z
28.826255
372
0.536298
[ [ [ ".. _nb_custom:", "_____no_output_____" ] ], [ [ "## Problem Definition", "_____no_output_____" ], [ "In the following different ways of loading or implementing an optimization problem in our framework are discussed.", "_____no_output_____" ], [ "### By Class\n\nA very detailed description of defining a problem through a class is already provided in the [Getting Started Guide](../getting_started.ipynb).\nThe following definition of a simple optimization problem with **one** objective and **two** constraints is considered. The problem has two constants, *const_1* and *const_2*, which can be modified by initiating the problem with different parameters. By default, it consists of 10 variables, and the lower and upper bounds are within $[-5, 5]$ for all variables. \n\n**Note**: The example below uses the `autograd` library, which calculates the gradients through automatic differentiation.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport autograd.numpy as anp\n\nfrom pymoo.model.problem import Problem\n\nclass MyProblem(Problem):\n\n def __init__(self, const_1=5, const_2=0.1):\n\n # define lower and upper bounds - 1d array with length equal to number of variable\n xl = -5 * anp.ones(10)\n xu = 5 * anp.ones(10)\n\n super().__init__(n_var=10, n_obj=1, n_constr=2, xl=xl, xu=xu, evaluation_of=\"auto\")\n\n # store custom variables needed for evaluation\n self.const_1 = const_1\n self.const_2 = const_2\n\n def _evaluate(self, x, out, *args, **kwargs):\n f = anp.sum(anp.power(x, 2) - self.const_1 * anp.cos(2 * anp.pi * x), axis=1)\n g1 = (x[:, 0] + x[:, 1]) - self.const_2\n g2 = self.const_2 - (x[:, 2] + x[:, 3])\n\n out[\"F\"] = f\n out[\"G\"] = anp.column_stack([g1, g2])\n\n", "_____no_output_____" ] ], [ [ "After creating a problem object, the evaluation function can be called. The `return_values_of` parameter can be overwritten to modify the list of returned parameters. The gradients for the objectives `dF` and constraints `dG` can be obtained as follows:", "_____no_output_____" ] ], [ [ "problem = MyProblem()\nF, G, CV, feasible, dF, dG = problem.evaluate(np.random.rand(100, 10),\n return_values_of=[\"F\", \"G\", \"CV\", \"feasible\", \"dF\", \"dG\"])\n", "_____no_output_____" ] ], [ [ ".. _nb_problem_elementwise:", "_____no_output_____" ] ], [ [ "**Elementwise Evaluation**\n\nIf the problem can not be executed using matrix operations, a serialized evaluation can be indicated using the `elementwise_evaluation=True` flag. If the flag is set, then an outer loop is already implemented, an `x` is only a **one**-dimensional array.", "_____no_output_____" ] ], [ [ "class MyProblem(Problem):\n\n def __init__(self, **kwargs):\n super().__init__(n_var=2, n_obj=1, elementwise_evaluation=True, **kwargs)\n\n def _evaluate(self, x, out, *args, **kwargs):\n out[\"F\"] = x.sum()", "_____no_output_____" ] ], [ [ "### By Function\n\nAnother way of defining a problem is through functions. One the one hand, many function calls need to be performed to evaluate a set of solutions, but on the other hand, it is a very intuitive way of defining a problem.", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom pymoo.model.problem import FunctionalProblem\n\n\nobjs = [\n lambda x: np.sum((x - 2) ** 2),\n lambda x: np.sum((x + 2) ** 2)\n]\n\nconstr_ieq = [\n lambda x: np.sum((x - 1) ** 2)\n]\n\n\nproblem = FunctionalProblem(10,\n objs,\n constr_ieq=constr_ieq,\n xl=np.array([-10, -5, -10]),\n xu=np.array([10, 5, 10])\n )\n\nF, CV = problem.evaluate(np.random.rand(3, 10))\n\nprint(f\"F: {F}\\n\")\nprint(f\"CV: {CV}\")\n\n# END from_string", "F: [[22.84784614 64.22214206]\n [23.3410745 62.37470214]\n [24.74016798 60.31023594]]\n\nCV: [[3.19142012]\n [3.09948141]\n [3.63268497]]\n" ] ], [ [ "### By String\n\nIn our framework, various test problems are already implemented and available by providing the corresponding problem name we have assigned to it. A couple of problems can be further parameterized by providing the number of variables, constraints, or other problem-dependent constants.", "_____no_output_____" ] ], [ [ "from pymoo.factory import get_problem\n\np = get_problem(\"dtlz1_-1\", n_var=20, n_obj=5)\n\n# create a simple test problem from string\np = get_problem(\"Ackley\")\n\n# the input name is not case sensitive\np = get_problem(\"ackley\")\n\n# also input parameter can be provided directly\np = get_problem(\"dtlz1_-1\", n_var=20, n_obj=5)", "_____no_output_____" ] ], [ [ "## API", "_____no_output_____" ] ], [ [ ".. class:: pymoo.model.problem.Problem\n\n .. automethod:: evaluate(X)\n .. automethod:: pareto_front(X)\n", "_____no_output_____" ] ] ]
[ "raw", "markdown", "code", "markdown", "code", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "raw" ]
[ [ "raw" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "raw" ] ]
c51eb34bf772ee9fdee38144520ed40b0a665feb
766
ipynb
Jupyter Notebook
Hello.ipynb
ewelina291988/ewd_matrix
cbd0d07b029d94685be50104f243f83c5649dc7c
[ "MIT" ]
null
null
null
Hello.ipynb
ewelina291988/ewd_matrix
cbd0d07b029d94685be50104f243f83c5649dc7c
[ "MIT" ]
null
null
null
Hello.ipynb
ewelina291988/ewd_matrix
cbd0d07b029d94685be50104f243f83c5649dc7c
[ "MIT" ]
null
null
null
766
766
0.71671
[ [ [ "print('Hello')", "Hello\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
c51ec3616dd1ebb21922239f22c7d9fbdd9abba9
21,601
ipynb
Jupyter Notebook
experiments_not_in_study/testing_ml_classifiers_benign_vs_all.ipynb
jehalladay/DDoS_Research
924bd94ae9f255009d19460521d76a12aec85212
[ "MIT" ]
7
2021-07-26T00:27:56.000Z
2022-03-07T03:33:28.000Z
experiments_not_in_study/testing_ml_classifiers_benign_vs_all.ipynb
jehalladay/DDoS_Research
924bd94ae9f255009d19460521d76a12aec85212
[ "MIT" ]
null
null
null
experiments_not_in_study/testing_ml_classifiers_benign_vs_all.ipynb
jehalladay/DDoS_Research
924bd94ae9f255009d19460521d76a12aec85212
[ "MIT" ]
1
2021-12-16T00:41:31.000Z
2021-12-16T00:41:31.000Z
43.115768
1,913
0.570529
[ [ [ "import os, platform, pprint, sys\r\nimport fastai\r\nimport keras\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sn\r\nimport sklearn\r\n\r\n# from fastai.tabular.data import TabularDataLoaders\r\n# from fastai.tabular.all import FillMissing, Categorify, Normalize, tabular_learner, accuracy, ClassificationInterpretation, ShowGraphCallback\r\n\r\nfrom itertools import cycle\r\n\r\nfrom keras.layers import Dense\r\nfrom keras.metrics import CategoricalAccuracy, Recall, Precision, AUC\r\nfrom keras.models import Sequential\r\nfrom keras.utils import to_categorical, normalize\r\n\r\nfrom math import sqrt\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.metrics import roc_curve, auc\r\nfrom sklearn.model_selection import StratifiedKFold, train_test_split\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\n\r\nseed: int = 14\r\n\r\n\r\n# set up pretty printer for easier data evaluation\r\npretty = pprint.PrettyPrinter(indent=4, width=30).pprint\r\n\r\n\r\n# declare file paths for the data we will be working on\r\nfile_path_1: str = '../data/prepared/baseline/Benign_vs_DDoS.csv'\r\nfile_path_2: str = '../data/prepared/timebased/Benign_vs_DDoS.csv'\r\nmodelPath : str = './models'\r\n\r\n\r\n# print library and python versions for reproducibility\r\nprint(\r\n f'''\r\n python:\\t{platform.python_version()}\r\n\r\n \\tfastai:\\t\\t{fastai.__version__}\r\n \\tkeras:\\t\\t{keras.__version__}\r\n \\tmatplotlib:\\t{mpl.__version__}\r\n \\tnumpy:\\t\\t{np.__version__}\r\n \\tpandas:\\t\\t{pd.__version__}\r\n \\tseaborn:\\t{sn.__version__}\r\n \\tsklearn:\\t{sklearn.__version__}\r\n '''\r\n)", "\n python:\t3.7.10\n\n \tfastai:\t\t2.4.1\n \tkeras:\t\t2.3.1\n \tmatplotlib:\t3.3.4\n \tnumpy:\t\t1.20.3\n \tpandas:\t\t1.2.5\n \tseaborn:\t0.11.1\n \tsklearn:\t0.24.2\n \n" ], [ "def load_data(filePath: str) -> pd.DataFrame:\r\n '''\r\n Loads the Dataset from the given filepath and caches it for quick access in the future\r\n Function will only work when filepath is a .csv file\r\n '''\r\n\r\n # slice off the ./CSV/ from the filePath\r\n if filePath[0] == '.' and filePath[1] == '.':\r\n filePathClean: str = filePath[17::]\r\n pickleDump: str = f'../data/cache/{filePathClean}.pickle'\r\n else:\r\n pickleDump: str = f'../data/cache/{filePath}.pickle'\r\n \r\n print(f'Loading Dataset: {filePath}')\r\n print(f'\\tTo Dataset Cache: {pickleDump}\\n')\r\n \r\n # check if data already exists within cache\r\n if os.path.exists(pickleDump):\r\n df = pd.read_pickle(pickleDump)\r\n \r\n # if not, load data and cache it\r\n else:\r\n df = pd.read_csv(filePath, low_memory=True)\r\n df.to_pickle(pickleDump)\r\n\r\n \r\n return df", "_____no_output_____" ], [ "def show_conf_matrix(model=None, X_test=None, y_test=None, classes=[], file=''):\r\n # Techniques from https://stackoverflow.com/questions/29647749/seaborn-showing-scientific-notation-in-heatmap-for-3-digit-numbers\r\n # and https://stackoverflow.com/questions/35572000/how-can-i-plot-a-confusion-matrix#51163585\r\n \r\n predictions = model.predict(X_test)\r\n matrix = [ [ 0 for j in range(len(predictions[0])) ] for i in range(len(predictions[0])) ]\r\n for i in range(len(predictions)):\r\n pred = predictions[i]\r\n test = y_test[i]\r\n\r\n guess = np.argmax(pred)\r\n actual = np.argmax(test)\r\n\r\n matrix[actual][guess] += 1\r\n \r\n df_cm = pd.DataFrame(matrix, range(len(matrix)), range(len(matrix)))\r\n int_cols = df_cm.columns\r\n df_cm.columns = classes\r\n df_cm.index = classes\r\n\r\n fig = plt.figure(figsize=(10,7))\r\n sn.set(font_scale=1.5) # for label size\r\n ax = sn.heatmap(df_cm, annot=True, annot_kws={\"size\": 16}, fmt='g', cmap=sn.color_palette(\"Blues\")) # font size\r\n ax.set_ylabel('Actual')\r\n ax.set_xlabel('Predicted')\r\n plt.tight_layout()\r\n \r\n fig.savefig('conf_matrix_{}.png'.format(file))\r\n\r\n plt.show()\r\n \r\ndef show_roc_curve(model=None, X_test=None, y_test=None, classes=[], file=''):\r\n y_score = model.predict(X_test)\r\n \r\n n_classes = len(classes)\r\n \r\n # Produce ROC curve from https://hackernoon.com/simple-guide-on-how-to-generate-roc-plot-for-keras-classifier-2ecc6c73115a\r\n # Note that I am working through this code and I'm going to clean it up as I learn more about how it works\r\n import numpy as np\r\n from numpy import interp\r\n import matplotlib.pyplot as plt\r\n from itertools import cycle\r\n from sklearn.metrics import roc_curve, auc\r\n\r\n # Plot linewidth.\r\n lw = 2\r\n\r\n # Compute ROC curve and ROC area for each class\r\n fpr = dict()\r\n tpr = dict()\r\n roc_auc = dict()\r\n for i in range(n_classes):\r\n fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])\r\n roc_auc[i] = auc(fpr[i], tpr[i])\r\n\r\n # Compute micro-average ROC curve and ROC area\r\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_score.ravel())\r\n roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\r\n\r\n # Compute macro-average ROC curve and ROC area\r\n\r\n # First aggregate all false positive rates\r\n all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))\r\n\r\n # Then interpolate all ROC curves at this points\r\n mean_tpr = np.zeros_like(all_fpr)\r\n for i in range(n_classes):\r\n mean_tpr += interp(all_fpr, fpr[i], tpr[i])\r\n\r\n # Finally average it and compute AUC\r\n mean_tpr /= n_classes\r\n\r\n fpr[\"macro\"] = all_fpr\r\n tpr[\"macro\"] = mean_tpr\r\n roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\r\n\r\n # Plot all ROC curves of all the classes\r\n fig = plt.figure(figsize=(12,12))\r\n\r\n colors = cycle(['red', 'blue', 'orange', 'green', 'violet', 'teal', 'turquoise', 'pink'])\r\n for i, color in zip(range(n_classes), colors):\r\n plt.plot(fpr[i], tpr[i], color=color, lw=lw,\r\n label='ROC curve of {0} (area = {1:0.2f})'.format(classes[i], roc_auc[i]))\r\n\r\n plt.plot([0, 1], [0, 1], 'k--', lw=lw)\r\n plt.xlim([0.0, 1.0])\r\n plt.ylim([0.0, 1.05])\r\n plt.ylabel('True Positive Rate (Sensativity)')\r\n plt.xlabel('False Positive Rate (1-Specificity)')\r\n plt.title('Receiver Operating Characteristic of the Classes')\r\n plt.legend(loc=\"lower right\")\r\n \r\n fig.savefig('roc_curve_classes_{}.png'.format(file))\r\n \r\n plt.show()\r\n \r\n # Plot all ROC curves with micro and macro averages\r\n fig = plt.figure(figsize=(12,12))\r\n plt.plot(fpr[\"micro\"], tpr[\"micro\"],\r\n label='micro-average ROC curve (area = {0:0.2f})'\r\n ''.format(roc_auc[\"micro\"]),\r\n color='deeppink', linestyle=':', linewidth=4)\r\n\r\n plt.plot(fpr[\"macro\"], tpr[\"macro\"],\r\n label='macro-average ROC curve (area = {0:0.2f})'\r\n ''.format(roc_auc[\"macro\"]),\r\n color='navy', linestyle=':', linewidth=4)\r\n\r\n plt.plot([0, 1], [0, 1], 'k--', lw=lw)\r\n plt.xlim([0.0, 1.0])\r\n plt.ylim([0.0, 1.05])\r\n plt.ylabel('True Positive Rate (Sensativity)')\r\n plt.xlabel('False Positive Rate (1-Specificity)')\r\n plt.title('Receiver Operating Characteristic of the Micro and Macro Averages')\r\n plt.legend(loc=\"lower right\")\r\n \r\n fig.savefig('roc_curve_micromacro_{}.png'.format(file))\r\n \r\n plt.show()", "_____no_output_____" ], [ "def get_std(x=[], xbar=0):\r\n o2=0\r\n for xi in x:\r\n o2 += (xi - xbar)**2\r\n o2 /= len(x)-1\r\n return sqrt(o2)", "_____no_output_____" ], [ "baseline_df : pd.DataFrame = load_data(file_path_1)\r\ntimebased_df: pd.DataFrame = load_data(file_path_2)", "Loading Dataset: ../data/prepared/baseline/Benign_vs_DDoS.csv\n\tTo Dataset Cache: ../data/cache/baseline/Benign_vs_DDoS.csv.pickle\n\nLoading Dataset: ../data/prepared/timebased/Benign_vs_DDoS.csv\n\tTo Dataset Cache: ../data/cache/timebased/Benign_vs_DDoS.csv.pickle\n\n" ], [ "dep_var = 'Label'\r\n\r\nind_vars_baseline = (baseline_df.columns.difference([dep_var])).tolist()\r\nind_vars_timebased = (timebased_df.columns.difference([dep_var])).tolist()\r\n\r\nbaseline_Xy = (baseline_df[ind_vars_baseline], baseline_df[dep_var])\r\ntimebased_Xy = (timebased_df[ind_vars_timebased], timebased_df[dep_var])", "_____no_output_____" ], [ "names: list = ['Benign', 'DDoS']", "_____no_output_____" ], [ "X = baseline_Xy[0]\r\nx = baseline_Xy[0]\r\nY = baseline_Xy[1]\r\n\r\nnum_classes = Y.nunique()\r\n\r\nencoder = LabelEncoder()\r\ny = encoder.fit_transform(Y)", "_____no_output_____" ], [ "# Lists for accuracies collected from models\r\nlist_rf = []\r\nlist_dt = []\r\nlist_knn = []\r\nlist_dnn = []\r\n\r\nstd_rf = []\r\nstd_dt = [] \r\nstd_knn = []\r\nstd_dnn = []\r\n\r\n\r\n# Mean accuracies for each model\r\nmean_rf = 0\r\nmean_dt = 0\r\nmean_knn = 0\r\nmean_dnn = 0\r\n\r\n# Keep to calculate std\r\nresults_rf = []\r\nresults_dt = []\r\nresults_knn = [] \r\nresults_dnn = []\r\n\r\n# 10-fold Stratified Cross-Validation\r\nn_splits = 10\r\nskf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed)\r\nfor train_idxs, test_idxs in skf.split(X, y):\r\n # Define the training and testing sets\r\n X_train, X_test = X.iloc[train_idxs], X.iloc[test_idxs]\r\n y_train, y_test = y[train_idxs], y[test_idxs]\r\n \r\n # Create a different version of the y_train and y_test for the Deep Neural Network\r\n # y_train_dnn = to_categorical(y_train, num_classes=num_classes)\r\n # y_test_dnn = to_categorical(y_test, num_classes=num_classes)\r\n \r\n # Initialize the sklearn models\r\n rf = RandomForestClassifier(random_state=seed)\r\n dt = DecisionTreeClassifier(random_state=seed)\r\n knn = KNeighborsClassifier()\r\n \r\n # # Deep Neural Network\r\n # dnn = Sequential([\r\n # Dense(256, input_shape=(69,)),\r\n # Dense(128, activation='relu'),\r\n # Dense(64, activation='relu'),\r\n # Dense(32, activation='relu'),\r\n # Dense(2, activation='softmax')\r\n # ])\r\n # dnn.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\r\n \r\n \r\n # Train the models\r\n rf.fit(X_train, y_train)\r\n dt.fit(X_train, y_train)\r\n knn.fit(X_train, y_train)\r\n # dnn.fit(x=X_train, y=y_train_dnn, batch_size=25, epochs=100, verbose=0, validation_data=(X_test, y_test_dnn))\r\n \r\n # Evaluate the models\r\n results_rf.append(rf.score(X_test, y_test))\r\n results_dt.append(dt.score(X_test, y_test))\r\n results_knn.append(knn.score(X_test, y_test)) \r\n # results_dnn.append( (dnn.evaluate(X_test, y_test_dnn, verbose=0) )[1] )\r\n \r\n # print('Random Forest')\r\n # show_roc_curve(model=rf, X_test=X_test, y_test=y_test, classes=names)\r\n # print('Decision Tree')\r\n # show_roc_curve(model=dt, X_test=X_test, y_test=y_test, classes=names)\r\n # print('k-Nearest Neighbor')\r\n # show_roc_curve(model=knn, X_test=X_test, y_test=y_test, classes=names)\r\n # # print('Deep Learning')\r\n # show_roc_curve(model=dnn, X_test=X_test, y_test=y_test_dnn, classes=names)\r\n\r\n print('Random Forest')\r\n show_conf_matrix(model=rf, X_test=X_test, y_test=y_test, classes=names)\r\n print('Decision Tree')\r\n show_conf_matrix(model=dt, X_test=X_test, y_test=y_test, classes=names)\r\n print('k-Nearest Neighbor')\r\n show_conf_matrix(model=knn, X_test=X_test, y_test=y_test, classes=names)\r\n # print('Deep Learning')\r\n # show_conf_matrix(model=dnn, X_test=X_test, y_test=y_test_dnn, classes=names) \r\n \r\n #print('Results from DNN: {}'.format(results_dnn))\r\n \r\n # Add the results to the running mean\r\n mean_rf += results_rf[-1] / (n_splits * 1.0)\r\n mean_dt += results_dt[-1] / (n_splits * 1.0)\r\n mean_knn += results_knn[-1] / (n_splits * 1.0)\r\n # mean_dnn += results_dnn[-1] / (n_splits * 1.0)\r\n \r\n# Push the mean results from all of the splits to the lists\r\nlist_rf.append(mean_rf)\r\nlist_dt.append(mean_dt)\r\nlist_knn.append(mean_knn)\r\n# list_dnn.append(mean_dnn)\r\n\r\nstd_rf.append(get_std(results_rf, mean_rf))\r\nstd_dt.append(get_std(results_dt, mean_dt))\r\nstd_knn.append(get_std(results_knn, mean_knn))\r\n# std_dnn.append(get_std(results_dnn, mean_dnn))\r\n\r\nprint('done')\r\n\r\nprint('All trainings complete!')", "Random Forest\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c51ec736a1ff924e103443374e8391920f8b845e
16,933
ipynb
Jupyter Notebook
Lab-4-Landsat-NDVI/Landsat_NDVI_Timeseries.ipynb
awslabs/serverless-chatbots-workshop
23f2db3110ba779bf8962470489bcdb0ffc183ff
[ "Apache-2.0" ]
80
2016-11-30T07:44:52.000Z
2017-11-20T15:01:33.000Z
Lab-4-Landsat-NDVI/Landsat_NDVI_Timeseries.ipynb
doppiomacchiatto/pywren-workshops
9a99fc02e3673fca055dc342c5bbabdb4f851849
[ "Apache-2.0" ]
6
2016-12-28T09:57:34.000Z
2017-10-18T14:43:02.000Z
Lab-4-Landsat-NDVI/Landsat_NDVI_Timeseries.ipynb
doppiomacchiatto/pywren-workshops
9a99fc02e3673fca055dc342c5bbabdb4f851849
[ "Apache-2.0" ]
41
2016-11-30T19:15:27.000Z
2017-11-07T13:46:00.000Z
37.297357
723
0.602197
[ [ [ "# Using AWS Lambda and PyWren for Landsat 8 Time Series\nThis notebook is a simple demonstration of drilling a timeseries of NDVI values from the [Landsat 8 scenes held on AWS](https://landsatonaws.com/)\n\n### Credits\n- NDVI PyWren - [Peter Scarth](mailto:[email protected]?subject=AWS%20Lambda%20and%20PyWren) (Joint Remote Sensing Research Program)\n- [RemotePixel](https://github.com/RemotePixel/remotepixel-api) - Landsat 8 NDVI GeoTIFF parsing function\n- [PyWren](https://github.com/pywren/pywren) - Project by BCCI and riselab. Makes it easy to executive massive parallel map queries across [AWS Lambda](https://aws.amazon.com/lambda/)\n\n#### Additional notes\nThe below remotely executed function will deliver results usually in under a minute for the full timeseries of more than 100 images, and we can simply plot the resulting timeseries or do further analysis. BUT, the points may well be cloud or cloud shadow contaminated. We haven’t done any cloud masking to the imagery, but we do have the scene metadata on the probable amount of cloud across the entire scene. We use this to weight a [smoothing spline](https://docs.scipy.org/doc/scipy-0.19.1/reference/generated/scipy.interpolate.UnivariateSpline.html), such that an observation with no reported cloud over the scene has full weight, and an observation with a reported 100% of the scene with cloud has zero weight. ", "_____no_output_____" ], [ "# Step by Step instructions", "_____no_output_____" ], [ "### Setup Logging (optional)\nOnly activate the below lines if you want to see all debug messages from PyWren. _Note: The output will be rather chatty and lengthy._", "_____no_output_____" ] ], [ [ "import logging\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n%env PYWREN_LOGLEVEL=INFO", "_____no_output_____" ] ], [ [ "### Setup all the necessary libraries\nThis will setup all the necessary libraries to properly display our results and it also imports the library that allows us to query Landsat 8 data from the [AWS Public Dataset](https://aws.amazon.com/public-datasets/landsat/):", "_____no_output_____" ] ], [ [ "import requests, json, numpy, datetime, os, boto3\nfrom IPython.display import HTML, display, Image\nimport matplotlib.pyplot as plt\nimport l8_ndvi\nfrom scipy.interpolate import UnivariateSpline\nimport pywren\n\n# Function to return a Landsat 8 scene list given a Longitude,Latitude string\n# This uses the amazing developmentseed Satellite API\n# https://github.com/sat-utils/sat-api\ndef getSceneList(lonLat):\n scenes=[]\n url = \"https://api.developmentseed.org/satellites/landsat\"\n params = dict(\n contains=lonLat,\n satellite_name=\"landsat-8\",\n limit=\"1000\") \n # Call the API to grab the scene metadata\n sceneMetaData = json.loads(requests.get(url=url, params=params).content)\n # Parse the metadata\n for record in sceneMetaData[\"results\"]:\n scene = str(record['aws_index'].split('/')[-2]) \n # This is a bit of a hack to get around some versioning problem on the API :(\n # Related to this issue https://github.com/sat-utils/sat-api/issues/18 \n if scene[-2:] == '01':\n scene = scene[:-2] + '00'\n if scene[-2:] == '02':\n scene = scene[:-2] + '00'\n if scene[-2:] == '03':\n scene = scene[:-2] + '02'\n scenes.append(scene) \n return scenes\n\n\n# Function to call a AWS Lambda function to drill a single pixel and compute the NDVI\ndef getNDVI(scene):\n return l8_ndvi.point(scene, eval(lonLat))\n", "_____no_output_____" ] ], [ [ "### Run the code locally over a point of interest\nLet's have a look at Hong Kong, an urban area with some country parks surrounding the city: [114.1095,22.3964](https://goo.gl/maps/PhDLAdLbiQT2)\n\nFirst we need to retrieve the available Landsat 8 scenes from the point of interest:", "_____no_output_____" ] ], [ [ "lonLat = '114.1095,22.3964'\nscenesHK = getSceneList('114.1095,22.3964')\n#print(scenesHK)\ndisplay(HTML('Total scenes: <b>' + str(len(scenesHK)) + '</b>'))", "_____no_output_____" ] ], [ [ "Now let's find out the NDVI and the amount of clouds on a specific scene locally on our machine:", "_____no_output_____" ] ], [ [ "lonLat = '114.1095,22.3964'\nthumbnail = l8_ndvi.thumb('LC08_L1TP_121045_20170829_20170914_01_T1', eval(lonLat))\ndisplay(Image(url=thumbnail, format='jpg'))\nresult = getNDVI('LC08_L1TP_121045_20170829_20170914_01_T1')\n#display(result)\ndisplay(HTML('<b>Date:</b> '+result['date']))\ndisplay(HTML('<b>Amount of clouds:</b> '+str(result['cloud'])+'%'))\ndisplay(HTML('<b>NDVI:</b> '+str(result['ndvi'])))", "_____no_output_____" ] ], [ [ "Great, time to try this with an observation on a cloudier day. Please note that the NDVI drops too, as we are not able to actually receive much data fom the land surface:", "_____no_output_____" ] ], [ [ "lonLat = '114.1095,22.3964'\nthumbnail = l8_ndvi.thumb('LC08_L1GT_122044_20171108_20171108_01_RT', eval(lonLat))\ndisplay(Image(url=thumbnail, format='jpg'))\nresult = getNDVI('LC08_L1GT_122044_20171108_20171108_01_RT')\n#display(result)\ndisplay(HTML('<b>Date:</b> '+result['date']))\ndisplay(HTML('<b>Amount of clouds:</b> '+str(result['cloud'])+'%'))\ndisplay(HTML('<b>NDVI:</b> '+str(result['ndvi'])))", "_____no_output_____" ] ], [ [ "### Massively Parallel calculation with PyWren\n\nNow let's try this with multiple scenes and send it to PyWren, however to accomplish this we need to change our PyWren AWS Lambda function to include the necessary libraries such as rasterio and GDAL. Since those libraries are compiled C code, PyWren will not be able to pickle it up and send it to the Lambda function. Hence we will update the entire PyWren function to include the necessary binaries that have been compiled on an Amazon EC2 instance with Amazon Linux. We pre-packaged this and made it available via https://s3-us-west-2.amazonaws.com/pywren-workshop/lambda_function.zip\n\nYou can simple push this code to your PyWren AWS Lambda function with below command, assuming you named the function with the default name pywren_1 and region us-west-2:", "_____no_output_____" ] ], [ [ "lambdaclient = boto3.client('lambda', 'us-west-2')\n\nresponse = lambdaclient.update_function_code(\n FunctionName='pywren_1',\n Publish=True,\n S3Bucket='pywren-workshop',\n S3Key='lambda_function.zip'\n)\n\nresponse = lambdaclient.update_function_configuration(\n FunctionName='pywren_1',\n Environment={\n 'Variables': {\n 'GDAL_DATA': '/var/task/lib/gdal'\n }\n }\n)", "_____no_output_____" ] ], [ [ "If you look at the list of available scenes, we have a rather large amount. This is a good use-case for PyWren as it will allows us to have AWS Lambda perform the calculation of NDVI and clouds for us - furthermore it will have a faster connectivity to read and write from Amazon S3. If you want to know more details about the calculation, have a look at [l8_ndvi.py](/edit/Lab-4-Landsat-NDVI/l8_ndvi.py).\n\nOk let's try this on the latest 200 collected Landsat 8 images GeoTIFFs of Hong Kong:", "_____no_output_____" ] ], [ [ "lonLat = '114.1095,22.3964'\npwex = pywren.default_executor()\nresultsHK = pywren.get_all_results(pwex.map(getNDVI, scenesHK[:200]))\ndisplay(resultsHK)", "_____no_output_____" ] ], [ [ "### Display results\nLet's try to render our results in a nice HTML table first:", "_____no_output_____" ] ], [ [ "#Remove results where we couldn't retrieve data from the scene \nresults = filter(None, resultsHK)\n\n#Render a nice HTML table to display result\nhtml = '<table><tr><td><b>Date</b></td><td><b>Clouds</b></td><td><b>NDVI</b></td></tr>'\nfor x in results: \n html = html + '<tr>'\n html = html + '<td>' + x['date'] + '</td>'\n html = html + '<td>' + str(x['cloud']) + '%</td>'\n html = html + '<td '\n if (x['ndvi'] > 0.5):\n html = html + ' bgcolor=\"#00FF00\">'\n elif (x['ndvi'] > 0.1):\n html = html + ' bgcolor=\"#FFFF00\">'\n else:\n html = html + ' bgcolor=\"#FF0000\">'\n html = html + str(round(abs(x['ndvi']),2)) + '</td>'\n html = html + '</tr>'\nhtml = html + '</table>'\ndisplay(HTML(html))\n", "_____no_output_____" ] ], [ [ "This provides us a good overview but would quickly become difficult to read as the datapoints expand - let's use [Matplotlib](https://matplotlib.org/) instead to plot this out:", "_____no_output_____" ] ], [ [ "timeSeries = filter(None,resultsHK)\n\n# Extract the data trom the list of results\ntimeStamps = [datetime.datetime.strptime(obs['date'],'%Y-%m-%d') for obs in timeSeries if 'date' in obs]\nndviSeries = [obs['ndvi'] for obs in timeSeries if 'ndvi' in obs]\ncloudSeries = [obs['cloud']/100 for obs in timeSeries if 'cloud' in obs]\n\n# Create a time variable as the x axis to fit the observations\n# First we convert to seconds\ntimeSecs = numpy.array([(obsTime-datetime.datetime(1970,1,1)).total_seconds() for obsTime in timeStamps])\n# And then normalise from 0 to 1 to avoid any numerical issues in the fitting\nfitTime = ((timeSecs-numpy.min(timeSecs))/(numpy.max(timeSecs)-numpy.min(timeSecs)))\n\n# Smooth the data by fitting a spline weighted by cloud amount\nsmoothedNDVI=UnivariateSpline(\n fitTime[numpy.argsort(fitTime)],\n numpy.array(ndviSeries)[numpy.argsort(fitTime)],\n w=(1.0-numpy.array(cloudSeries)[numpy.argsort(fitTime)])**2.0,\n k=2,\n s=0.1)(fitTime)\n\nfig = plt.figure(figsize=(16,10))\nplt.plot(timeStamps,ndviSeries, 'gx',label='Raw NDVI Data')\nplt.plot(timeStamps,ndviSeries, 'y:', linewidth=1)\nplt.plot(timeStamps,cloudSeries, 'b.', linewidth=1,label='Scene Cloud Percent')\nplt.plot(timeStamps,cloudSeries, 'b:', linewidth=1)\n#plt.plot(timeStamps,smoothedNDVI, 'r--', linewidth=3,label='Cloudfree Weighted Spline')\nplt.xlabel('Date', fontsize=16)\nplt.ylabel('NDVI', fontsize=16)\nplt.title('AWS Lambda Landsat 8 NDVI Drill (Hong Kong)', fontsize=20)\nplt.grid(True)\nplt.ylim([-.1,1.0])\nplt.legend(fontsize=14)\nplt.show()", "_____no_output_____" ] ], [ [ "### Run the code over another location\nThis test site is a cotton farming area in Queensland, Australia [147.870599,-28.744617](https://goo.gl/maps/GF5szf7vZo82)\n\nLet's first acquire some scenes:", "_____no_output_____" ] ], [ [ "lonLat = '147.870599,-28.744617'\nscenesQLD = getSceneList(lonLat)\n#print(scenesQLD)\ndisplay(HTML('Total scenes: <b>' + str(len(scenesQLD)) + '</b>'))", "_____no_output_____" ] ], [ [ "Let's first have a look at an individual observation first on our local machine:", "_____no_output_____" ] ], [ [ "thumbnail = l8_ndvi.thumb('LC80920802017118LGN00', eval(lonLat))\ndisplay(Image(url=thumbnail, format='jpg'))\nresult = getNDVI('LC80920802017118LGN00')\n#display(result)\ndisplay(HTML('<b>Date:</b> '+result['date']))\ndisplay(HTML('<b>Amount of clouds:</b> '+str(result['cloud'])+'%'))\ndisplay(HTML('<b>NDVI:</b> '+str(result['ndvi'])))\n", "_____no_output_____" ] ], [ [ "### Pywren Time\nLet's process this across all of the observations in parallel using AWS Lambda:", "_____no_output_____" ] ], [ [ "pwex = pywren.default_executor()\nresultsQLD = pywren.get_all_results(pwex.map(getNDVI, scenesQLD))\ndisplay(resultsQLD)", "_____no_output_____" ] ], [ [ "Now let's plot this out again:", "_____no_output_____" ] ], [ [ "timeSeries = filter(None,resultsQLD)\n\n# Extract the data trom the list of results\ntimeStamps = [datetime.datetime.strptime(obs['date'],'%Y-%m-%d') for obs in timeSeries if 'date' in obs]\nndviSeries = [obs['ndvi'] for obs in timeSeries if 'ndvi' in obs]\ncloudSeries = [obs['cloud']/100 for obs in timeSeries if 'cloud' in obs]\n\n# Create a time variable as the x axis to fit the observations\n# First we convert to seconds\ntimeSecs = numpy.array([(obsTime-datetime.datetime(1970,1,1)).total_seconds() for obsTime in timeStamps])\n# And then normalise from 0 to 1 to avoid any numerical issues in the fitting\nfitTime = ((timeSecs-numpy.min(timeSecs))/(numpy.max(timeSecs)-numpy.min(timeSecs)))\n\n# Smooth the data by fitting a spline weighted by cloud amount\nsmoothedNDVI=UnivariateSpline(\n fitTime[numpy.argsort(fitTime)],\n numpy.array(ndviSeries)[numpy.argsort(fitTime)],\n w=(1.0-numpy.array(cloudSeries)[numpy.argsort(fitTime)])**2.0,\n k=2,\n s=0.1)(fitTime)\n\n\nfig = plt.figure(figsize=(16,10))\nplt.plot(timeStamps,ndviSeries, 'gx',label='Raw NDVI Data')\nplt.plot(timeStamps,ndviSeries, 'g:', linewidth=1)\nplt.plot(timeStamps,cloudSeries, 'b.', linewidth=1,label='Scene Cloud Percent')\nplt.plot(timeStamps,smoothedNDVI, 'r--', linewidth=3,label='Cloudfree Weighted Spline')\nplt.xlabel('Date', fontsize=16)\nplt.ylabel('NDVI', fontsize=16)\nplt.title('AWS Lambda Landsat 8 NDVI Drill (Cotton Farm QLD, Australia)', fontsize=20)\nplt.grid(True)\nplt.ylim([-.1,1.0])\nplt.legend(fontsize=14)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
c51ede76f2c52c35a7167f7ee3a355cd0892f9f1
1,555
ipynb
Jupyter Notebook
.ipynb_checkpoints/1.0 量化交易简介-checkpoint.ipynb
Yanie1asdfg/Quant-Lectures
4e4b84cf2aff290b715a7924277335a23f5e8168
[ "MIT" ]
6
2020-12-29T07:53:46.000Z
2022-01-17T07:07:54.000Z
.ipynb_checkpoints/1.0 量化交易简介-checkpoint.ipynb
Yanie1asdfg/Quant-Lectures
4e4b84cf2aff290b715a7924277335a23f5e8168
[ "MIT" ]
null
null
null
.ipynb_checkpoints/1.0 量化交易简介-checkpoint.ipynb
Yanie1asdfg/Quant-Lectures
4e4b84cf2aff290b715a7924277335a23f5e8168
[ "MIT" ]
4
2020-12-28T03:11:26.000Z
2021-02-09T06:12:51.000Z
18.081395
41
0.454662
[ [ [ "# 量化交易", "_____no_output_____" ], [ "### **量化投资的哲学**", "_____no_output_____" ], [ "### **1.收益来源**\n* 所有的投资收益来自于市场的不同参与方对标的价值看法的时空错位\n* 估值的认知差异引起价格波动\n* 价格波动创造盈利空间\n\n### **2.波动分类**\n* 系统性波动:大周期级别的波动\n* 相对性波动:两个标的价差波动\n* 交易性波动:分钟级别的波动\n\n### **3.交易策略研发**\n* 分类策略研发:不同市场环境策略的研发\n* 策略环境研究\n* 策略配置优化\n\n### **4.效率**\n* 信息效率【有效市场假说】\n > 价格反映一切市场信息,价格不可预测\n* 价格运动效率【技术分析】\n > 价格反映一切信息 \n > 历史会重复 \n > 价格以趋势方式运动\n* 极化分型效率\n\n### **5.交易圣杯**\n* 没有稳赚不赔的策略,分散是唯一免费的午餐 \n* 仓位管理和资金管理无比重要\n\n### **胜率**\n出手赚钱的次数与总出手次数相比\n\n### **赔率**\n平均每次出手赚到的钱/平均每次出手赔的钱", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
c51ef2c040e1e799a8a672c76e00b90f5c98d1fa
7,678
ipynb
Jupyter Notebook
neo_review/scripts/xgb/xgb.ipynb
kondounagi/japanese_movies_dataset
349f217cd04e07fd44a401ecb2f2dcaea7bc2e5e
[ "MIT" ]
1
2019-08-05T21:43:09.000Z
2019-08-05T21:43:09.000Z
neo_review/scripts/xgb/xgb.ipynb
kondounagi/japanese_movies_dataset
349f217cd04e07fd44a401ecb2f2dcaea7bc2e5e
[ "MIT" ]
3
2020-03-31T05:53:37.000Z
2021-12-13T20:07:39.000Z
neo_review/scripts/xgb/xgb.ipynb
kondounagi/japanese_movies_dataset
349f217cd04e07fd44a401ecb2f2dcaea7bc2e5e
[ "MIT" ]
null
null
null
30.347826
122
0.431102
[ [ [ "import xgboost as xgb\nimport pandas as pd", "_____no_output_____" ], [ "# 読み出し\ndata = pd.read_pickle('data.pkl')\nnomination_onehot = pd.read_pickle('nomination_onehot.pkl')\nselected_performers_onehot = pd.read_pickle('selected_performers_onehot.pkl')\nselected_directors_onehot = pd.read_pickle('selected_directors_onehot.pkl')\nselected_studio_onehot = pd.read_pickle('selected_studio_onehot.pkl')\nselected_scriptwriter_onehot = pd.read_pickle('selected_scriptwriter_onehot.pkl')\nreview_dataframe = pd.read_pickle('review_dataframe.pkl')\ntfidf = pd.read_pickle('tfidf.pkl')", "_____no_output_____" ], [ "table = pd.concat([\n data[['prize', 'title', 'year', 'screen_time']],\n nomination_onehot,\n selected_performers_onehot,\n selected_directors_onehot,\n selected_studio_onehot,\n selected_scriptwriter_onehot\n], axis = 1)", "_____no_output_____" ], [ "for year in range(1978, 2019 + 1):\n rg = xgb.XGBRegressor(silent= True)\n X = table.query('year != {}'.format(year)).drop(['prize', 'title', 'year'], axis = 1).values\n y = table.query('year != {}'.format(year))['prize'].values\n rg.fit(X,y)\n result = rg.predict(table.query('year == {}'.format(year)).drop(['prize', 'title', 'year'], axis = 1).values)\n prize = table.query('year == {}'.format(year))\n title = table.query('year == {}'.format(year))['title'].copy()\n title[prize['prize'] == 1] = title[prize['prize'] == 1].map(lambda s: '★' + s)\n print(year)\n print(pd.Series(result, index = title.values).sort_values(ascending=False) )\n print('')", "_____no_output_____" ], [ "frames = [\n data.query('year == 2004')[['title', 'production_studio', 'other_nominates']],\n review_dataframe\n ]\n\ndef asdf(s):\n s['len'] = len(s['reviews'])\n return s\n \npd.concat(\n frames,\n axis = 1,\n join = 'inner'\n).apply(asdf, axis = 1).drop(['reviews'], axis = 1)", "_____no_output_____" ], [ "from sklearn.decomposition import PCA\n\npca = PCA(n_components=20)\npca.fit(tfidf.values)\ntfidf_df = pd.DataFrame(pca.transform(tfidf.values), index = tfidf.index)", "_____no_output_____" ], [ "table = pd.concat([\n data[['prize', 'title', 'year']],\n tfidf\n], axis = 1)", "_____no_output_____" ], [ "for year in range(1978, 2019 + 1):\n rg = xgb.XGBRegressor(silent= True)\n X = table.query('year != {}'.format(year)).drop(['prize', 'title', 'year'], axis = 1).values\n y = table.query('year != {}'.format(year))['prize'].values\n rg.fit(X,y)\n result = rg.predict(table.query('year == {}'.format(year)).drop(['prize', 'title', 'year'], axis = 1).values)\n prize = table.query('year == {}'.format(year))\n title = table.query('year == {}'.format(year))['title'].copy()\n title[prize['prize'] == 1] = title[prize['prize'] == 1].map(lambda s: '★' + s)\n print(year)\n print(pd.Series(result, index = title.values).sort_values(ascending=False) )\n print('')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c51f087a54e3fa771ab9a9e2402be9438a298860
13,765
ipynb
Jupyter Notebook
Exception Handling.ipynb
himanshu-1205/Python-Tutorial
2e7d6d7a42bfc8619effca7d685174a4feeae93b
[ "Apache-2.0" ]
1
2020-12-02T07:19:05.000Z
2020-12-02T07:19:05.000Z
Exception Handling.ipynb
himanshu-1205/Python-Tutorial
2e7d6d7a42bfc8619effca7d685174a4feeae93b
[ "Apache-2.0" ]
null
null
null
Exception Handling.ipynb
himanshu-1205/Python-Tutorial
2e7d6d7a42bfc8619effca7d685174a4feeae93b
[ "Apache-2.0" ]
null
null
null
39.554598
1,470
0.572394
[ [ [ "# Errors and Exceptions\nThere are two types of errors:-\n1. Synatax Errors\n2. Exception\n", "_____no_output_____" ] ], [ [ "# Synatx error example\nwhile True print('Hello world')", "_____no_output_____" ], [ "# Exception Example\nprint(10 * (1/0))", "_____no_output_____" ] ], [ [ "## Handling Exceptions", "_____no_output_____" ] ], [ [ "while True:\n try:\n x = int(input(\"Please enter a number: \"))\n break\n except ValueError:\n print(\"Oops! That was no valid number. Try again...\")", "Please enter a number: 5\n" ] ], [ [ "## Else keyword in exception handling\nYou can use the else keyword to define a block of code to be executed if no errors were raised:\n\n\n", "_____no_output_____" ] ], [ [ "try:\n print(\"Hello\")\nexcept:\n print(\"Something went wrong\")\nelse:\n print(\"Nothing went wrong\")", "Hello\nNothing went wrong\n" ] ], [ [ "## Finally\nThe finally block, if specified, will be executed regardless if the try block raises an error or not.\n\n", "_____no_output_____" ] ], [ [ "try:\n print(q)\nexcept:\n print(\"Something went wrong\")\nfinally:\n print(\"The 'try except' is finished\")", "Something went wrong\nThe 'try except' is finished\n" ] ], [ [ "## Raise an exception\n", "_____no_output_____" ] ], [ [ "x = -1\n\nif x < 0:\n raise Exception(\"Sorry, no numbers below zero\")", "_____no_output_____" ] ], [ [ "## Type error\nIt is used to indicate what type of error to raise", "_____no_output_____" ] ], [ [ "x = \"hello\"\n\nif not type(x) is int:\n raise TypeError(\"Only integers are allowed\")", "_____no_output_____" ], [ "# Sys library is used to show what type of error is this\n\nimport sys\n\nrandomList = ['a', 0, 2]\n\nfor entry in randomList:\n try:\n print(\"The entry is\", entry)\n r = 1/int(entry)\n break\n except:\n print(\"Oops!\", sys.exc_info()[0], \"occurred.\")\n print(\"Next entry.\")\n print()\nprint(\"The reciprocal of\", entry, \"is\", r)", "The entry is a\nOops! <class 'ValueError'> occurred.\nNext entry.\n\nThe entry is 0\nOops! <class 'ZeroDivisionError'> occurred.\nNext entry.\n\nThe entry is 2\nThe reciprocal of 2 is 0.5\n" ] ], [ [ "### User-defined Exceptions", "_____no_output_____" ] ], [ [ "class CustomException(Exception):\n \"\"\"Base class for other exceptions\"\"\"\n pass\nclass PrecedingLetterError(CustomException):\n \"\"\"Raised when the entered alphabet is smaller than the actual one\"\"\"\n pass\nclass SucceedingLetterError(CustomException):\n \"\"\"Raised when the entered alphabet is larger than the actual one\"\"\"\n pass\n# we need to guess this alphabet till we get it right\nalphabet = 'k'\nwhile True:\n try:\n foo = input( \"Enter an alphabet: \" )\n if foo < alphabet:\n raise PrecedingLetterError\n elif foo > alphabet:\n raise SucceedingLetterError\n break\n except PrecedingLetterError:\n print(\"The entered alphabet is preceding one, try again!\")\n print('')\n except SucceedingLetterError:\n print(\"The entered alphabet is succeeding one, try again!\")\n print('')\n print(\"Congratulations! You guessed it correctly.\")\n ", "Enter an alphabet: l\nThe entered alphabet is succeeding one, try again!\n\nCongratulations! You guessed it correctly.\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
c51f09c738f3d17e180e53c43f5258cba2eec5cb
5,096
ipynb
Jupyter Notebook
data/texas/texas.ipynb
hazdzz/MGCs
8038a6cec430e44d630a07d24340229070381c6d
[ "BSD-3-Clause" ]
4
2021-10-16T00:23:57.000Z
2022-01-13T10:14:01.000Z
data/texas/texas.ipynb
hazdzz/MGCs
8038a6cec430e44d630a07d24340229070381c6d
[ "BSD-3-Clause" ]
null
null
null
data/texas/texas.ipynb
hazdzz/MGCs
8038a6cec430e44d630a07d24340229070381c6d
[ "BSD-3-Clause" ]
null
null
null
24.266667
113
0.523548
[ [ [ "import os\nimport numpy as np\nimport pandas as pd\nimport random", "_____no_output_____" ], [ "labels = np.genfromtxt('labels.csv', delimiter=',')", "_____no_output_____" ], [ "c1 = 0\nc2 = 0\nc3 = 0\nc4 = 0\nc5 = 0\n\nlist_a = []\nlist_b = []\nlist_c = []\nlist_d = []\nlist_e = []\n\nfor i in range(labels.shape[0]):\n if labels[i][0] == 1:\n c1 += 1\n list_a.append(i)\n elif labels[i][1] == 1:\n c2 += 1\n list_b.append(i)\n elif labels[i][2] == 1:\n c3 += 1\n list_c.append(i)\n elif labels[i][3] == 1:\n c4 += 1\n list_d.append(i)\n elif labels[i][4] == 1:\n c5 += 1\n list_e.append(i)", "_____no_output_____" ], [ "print(c1)\nprint(c2)\nprint(c3)\nprint(c4)\nprint(c5)", "34\n31\n18\n1\n103\n" ], [ "random.seed(97)\nnp.random.seed(97)\nos.environ['PYTHONHASHSEED'] = str(97)\n\narr_a_train = np.random.choice(list_a, size=20, replace=False)\narr_b_train = np.random.choice(list_b, size=19, replace=False)\narr_c_train = np.random.choice(list_c, size=10, replace=False)\narr_d_train = np.random.choice(list_d, size=1, replace=False)\narr_e_train = np.random.choice(list_e, size=61, replace=False)", "_____no_output_____" ], [ "arr_a = np.array(list_a)\narr_b = np.array(list_b)\narr_c = np.array(list_c)\narr_d = np.array(list_d)\narr_e = np.array(list_e)\n\narr_a_val_test = np.setdiff1d(arr_a, arr_a_train)\narr_b_val_test = np.setdiff1d(arr_b, arr_b_train)\narr_c_val_test = np.setdiff1d(arr_c, arr_c_train)\n\narr_e_val_test = np.setdiff1d(arr_e, arr_e_train)", "_____no_output_____" ], [ "random.seed(97)\nnp.random.seed(97)\nos.environ['PYTHONHASHSEED'] = str(97)\n\narr_a_val = np.random.choice(arr_a_val_test, size=7, replace=False)\narr_a_test = np.setdiff1d(arr_a_val_test, arr_a_val)\n\narr_b_val = np.random.choice(arr_b_val_test, size=6, replace=False)\narr_b_test = np.setdiff1d(arr_b_val_test, arr_b_val)\n\narr_c_val = np.random.choice(arr_c_val_test, size=4, replace=False)\narr_c_test = np.setdiff1d(arr_c_val_test, arr_c_val)\n\narr_e_val = np.random.choice(arr_e_val_test, size=21, replace=False)\narr_e_test = np.setdiff1d(arr_e_val_test, arr_e_val)", "_____no_output_____" ], [ "idx_train = np.concatenate((arr_a_train, arr_b_train, arr_c_train, arr_d_train, arr_e_train), axis=None)\nidx_val = np.concatenate((arr_a_val, arr_b_val, arr_c_val, arr_e_val), axis=None)\nidx_test = np.concatenate((arr_a_test, arr_b_test, arr_c_test, arr_e_test), axis=None)", "_____no_output_____" ], [ "idx_train.sort()\nidx_val.sort()\nidx_test.sort()", "_____no_output_____" ], [ "np.savetxt('idx_train.csv', idx_train, fmt='%s', delimiter=',')\nnp.savetxt('idx_val.csv', idx_val, fmt='%s', delimiter=',')\nnp.savetxt('idx_test.csv', idx_test, fmt='%s', delimiter=',')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c51f0a144f6610ddcfd98913e58d74092f8057d8
209,169
ipynb
Jupyter Notebook
chapter1-nlp-essentials/SMS_Spam_Detection.ipynb
PacktMrunal/Advanced-NLP-with-TensorFlow-2
b05215a281268802bb439970856120a70db20942
[ "MIT" ]
7
2020-11-27T22:40:15.000Z
2021-11-08T13:10:35.000Z
chapter1-nlp-essentials/SMS_Spam_Detection.ipynb
PacktMrunal/Advanced-NLP-with-TensorFlow-2
b05215a281268802bb439970856120a70db20942
[ "MIT" ]
null
null
null
chapter1-nlp-essentials/SMS_Spam_Detection.ipynb
PacktMrunal/Advanced-NLP-with-TensorFlow-2
b05215a281268802bb439970856120a70db20942
[ "MIT" ]
6
2020-11-27T22:40:19.000Z
2021-12-29T03:01:11.000Z
36.063621
1,722
0.359308
[ [ [ "%tensorflow_version 2.x\nimport tensorflow as tf\n#from tf.keras.models import Sequential\n#from tf.keras.layers import Dense\nimport os\nimport io\n\ntf.__version__", "_____no_output_____" ] ], [ [ "# Download Data", "_____no_output_____" ] ], [ [ "# Download the zip file\npath_to_zip = tf.keras.utils.get_file(\"smsspamcollection.zip\",\n origin=\"https://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip\",\n extract=True)\n\n# Unzip the file into a folder\n!unzip $path_to_zip -d data", "Downloading data from https://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip\n204800/203415 [==============================] - 1s 3us/step\nArchive: /root/.keras/datasets/smsspamcollection.zip\n inflating: data/SMSSpamCollection \n inflating: data/readme \n" ], [ "# optional step - helps if colab gets disconnected\n# from google.colab import drive\n# drive.mount('/content/drive')", "_____no_output_____" ], [ "# Test data reading\n# lines = io.open('/content/drive/My Drive/colab-data/SMSSpamCollection').read().strip().split('\\n')\nlines = io.open('/content/data/SMSSpamCollection').read().strip().split('\\n')\nlines[0]", "_____no_output_____" ] ], [ [ "# Pre-Process Data", "_____no_output_____" ] ], [ [ "spam_dataset = []\ncount = 0\nfor line in lines:\n label, text = line.split('\\t')\n if label.lower().strip() == 'spam':\n spam_dataset.append((1, text.strip()))\n count += 1\n else:\n spam_dataset.append(((0, text.strip())))\n\nprint(spam_dataset[0])\nprint(\"Spam: \", count)", "(0, 'Go until jurong point, crazy.. Available only in bugis n great world la e buffet... Cine there got amore wat...')\nSpam: 747\n" ] ], [ [ "# Data Normalization", "_____no_output_____" ] ], [ [ "import pandas as pd ", "_____no_output_____" ], [ "df = pd.DataFrame(spam_dataset, columns=['Spam', 'Message'])", "_____no_output_____" ], [ "import re\n\n# Normalization functions\n\ndef message_length(x):\n # returns total number of characters\n return len(x)\n\ndef num_capitals(x):\n _, count = re.subn(r'[A-Z]', '', x) # only works in english\n return count\n\ndef num_punctuation(x):\n _, count = re.subn(r'\\W', '', x)\n return count\n\n", "_____no_output_____" ], [ "df['Capitals'] = df['Message'].apply(num_capitals)\ndf['Punctuation'] = df['Message'].apply(num_punctuation)\ndf['Length'] = df['Message'].apply(message_length)", "_____no_output_____" ], [ "df.describe()", "_____no_output_____" ], [ "train=df.sample(frac=0.8,random_state=42) #random state is a seed value\ntest=df.drop(train.index)", "_____no_output_____" ], [ "train.describe()", "_____no_output_____" ], [ "test.describe()", "_____no_output_____" ] ], [ [ "# Model Building", "_____no_output_____" ] ], [ [ "# Basic 1-layer neural network model for evaluation\ndef make_model(input_dims=3, num_units=12):\n model = tf.keras.Sequential()\n\n # Adds a densely-connected layer with 12 units to the model:\n model.add(tf.keras.layers.Dense(num_units, \n input_dim=input_dims, \n activation='relu'))\n\n # Add a sigmoid layer with a binary output unit:\n model.add(tf.keras.layers.Dense(1, activation='sigmoid'))\n model.compile(loss='binary_crossentropy', optimizer='adam', \n metrics=['accuracy'])\n return model", "_____no_output_____" ], [ "x_train = train[['Length', 'Punctuation', 'Capitals']]\ny_train = train[['Spam']]\n\nx_test = test[['Length', 'Punctuation', 'Capitals']]\ny_test = test[['Spam']]", "_____no_output_____" ], [ "x_train", "_____no_output_____" ], [ "model = make_model()", "_____no_output_____" ], [ "model.fit(x_train, y_train, epochs=10, batch_size=10)", "Epoch 1/10\n446/446 [==============================] - 1s 3ms/step - loss: 3.1590 - accuracy: 0.7179\nEpoch 2/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.4046 - accuracy: 0.8706\nEpoch 3/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.3565 - accuracy: 0.8818\nEpoch 4/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.3227 - accuracy: 0.8863\nEpoch 5/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.3113 - accuracy: 0.8832\nEpoch 6/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.2884 - accuracy: 0.8876\nEpoch 7/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.2842 - accuracy: 0.8935\nEpoch 8/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.2796 - accuracy: 0.8912\nEpoch 9/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.2750 - accuracy: 0.8912\nEpoch 10/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.2791 - accuracy: 0.8906\n" ], [ "model.evaluate(x_test, y_test)", "35/35 [==============================] - 0s 2ms/step - loss: 0.2662 - accuracy: 0.8915\n" ], [ "y_train_pred = model.predict_classes(x_train)", "WARNING:tensorflow:From <ipython-input-20-ce9f4b8a8791>:1: Sequential.predict_classes (from tensorflow.python.keras.engine.sequential) is deprecated and will be removed after 2021-01-01.\nInstructions for updating:\nPlease use instead:* `np.argmax(model.predict(x), axis=-1)`, if your model does multi-class classification (e.g. if it uses a `softmax` last-layer activation).* `(model.predict(x) > 0.5).astype(\"int32\")`, if your model does binary classification (e.g. if it uses a `sigmoid` last-layer activation).\n" ], [ "# confusion matrix\ntf.math.confusion_matrix(tf.constant(y_train.Spam), \n y_train_pred)", "_____no_output_____" ], [ "sum(y_train_pred)", "_____no_output_____" ], [ "y_test_pred = model.predict_classes(x_test)\ntf.math.confusion_matrix(tf.constant(y_test.Spam), y_test_pred)", "_____no_output_____" ] ], [ [ "# Tokenization and Stop Word Removal", "_____no_output_____" ] ], [ [ "sentence = 'Go until jurong point, crazy.. Available only in bugis n great world'\nsentence.split()", "_____no_output_____" ], [ "!pip install stanza # StanfordNLP has become https://github.com/stanfordnlp/stanza/", "Collecting stanza\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/e7/8b/3a9e7a8d8cb14ad6afffc3983b7a7322a3a24d94ebc978a70746fcffc085/stanza-1.1.1-py3-none-any.whl (227kB)\n\r\u001b[K |█▍ | 10kB 9.1MB/s eta 0:00:01\r\u001b[K |██▉ | 20kB 1.7MB/s eta 0:00:01\r\u001b[K |████▎ | 30kB 2.2MB/s eta 0:00:01\r\u001b[K |█████▊ | 40kB 2.5MB/s eta 0:00:01\r\u001b[K |███████▏ | 51kB 2.0MB/s eta 0:00:01\r\u001b[K |████████▋ | 61kB 2.3MB/s eta 0:00:01\r\u001b[K |██████████ | 71kB 2.5MB/s eta 0:00:01\r\u001b[K |███████████▌ | 81kB 2.7MB/s eta 0:00:01\r\u001b[K |█████████████ | 92kB 2.9MB/s eta 0:00:01\r\u001b[K |██████████████▍ | 102kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████████▉ | 112kB 2.8MB/s eta 0:00:01\r\u001b[K |█████████████████▎ | 122kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████████████▊ | 133kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████████████▏ | 143kB 2.8MB/s eta 0:00:01\r\u001b[K |█████████████████████▋ | 153kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████████████████ | 163kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████████████████▌ | 174kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████████████████████ | 184kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████████████████████▍ | 194kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████████████████████▉ | 204kB 2.8MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▎ | 215kB 2.8MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▊| 225kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 235kB 2.8MB/s \n\u001b[?25hRequirement already satisfied: torch>=1.3.0 in /usr/local/lib/python3.6/dist-packages (from stanza) (1.6.0+cu101)\nRequirement already satisfied: protobuf in /usr/local/lib/python3.6/dist-packages (from stanza) (3.12.4)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from stanza) (1.18.5)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from stanza) (4.41.1)\nRequirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from stanza) (2.23.0)\nRequirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from torch>=1.3.0->stanza) (0.16.0)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf->stanza) (50.3.0)\nRequirement already satisfied: six>=1.9 in /usr/local/lib/python3.6/dist-packages (from protobuf->stanza) (1.15.0)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->stanza) (2020.6.20)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->stanza) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->stanza) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->stanza) (2.10)\nInstalling collected packages: stanza\nSuccessfully installed stanza-1.1.1\n" ], [ "import stanza", "_____no_output_____" ], [ "en = stanza.download('en') ", "Downloading https://raw.githubusercontent.com/stanfordnlp/stanza-resources/master/resources_1.1.0.json: 122kB [00:00, 8.26MB/s] \n2020-10-14 04:13:38 INFO: Downloading default packages for language: en (English)...\nDownloading http://nlp.stanford.edu/software/stanza/1.1.0/en/default.zip: 100%|██████████| 428M/428M [06:14<00:00, 1.14MB/s]\n2020-10-14 04:20:01 INFO: Finished downloading models and saved to /root/stanza_resources.\n" ], [ "en = stanza.Pipeline(lang='en')", "2020-10-14 04:20:02 INFO: Loading these models for language: en (English):\n=========================\n| Processor | Package |\n-------------------------\n| tokenize | ewt |\n| pos | ewt |\n| lemma | ewt |\n| depparse | ewt |\n| sentiment | sstplus |\n| ner | ontonotes |\n=========================\n\n2020-10-14 04:20:02 INFO: Use device: gpu\n2020-10-14 04:20:02 INFO: Loading: tokenize\n2020-10-14 04:20:12 INFO: Loading: pos\n2020-10-14 04:20:13 INFO: Loading: lemma\n2020-10-14 04:20:13 INFO: Loading: depparse\n2020-10-14 04:20:14 INFO: Loading: sentiment\n2020-10-14 04:20:15 INFO: Loading: ner\n2020-10-14 04:20:16 INFO: Done loading processors!\n" ], [ "sentence", "_____no_output_____" ], [ "tokenized = en(sentence)", "_____no_output_____" ], [ "len(tokenized.sentences)", "_____no_output_____" ], [ "for snt in tokenized.sentences:\n for word in snt.tokens:\n print(word.text)\n print(\"<End of Sentence>\")", "Go\nuntil\njurong\npoint\n,\ncrazy\n..\n<End of Sentence>\nAvailable\nonly\nin\nbugis\nn\ngreat\nworld\n<End of Sentence>\n" ] ], [ [ "## Dependency Parsing Example", "_____no_output_____" ] ], [ [ "en2 = stanza.Pipeline(lang='en')\npr2 = en2(\"Hari went to school\")\nfor snt in pr2.sentences:\n for word in snt.tokens:\n print(word)\n print(\"<End of Sentence>\")", "2020-10-14 04:20:48 INFO: Loading these models for language: en (English):\n=========================\n| Processor | Package |\n-------------------------\n| tokenize | ewt |\n| pos | ewt |\n| lemma | ewt |\n| depparse | ewt |\n| sentiment | sstplus |\n| ner | ontonotes |\n=========================\n\n2020-10-14 04:20:48 INFO: Use device: gpu\n2020-10-14 04:20:48 INFO: Loading: tokenize\n2020-10-14 04:20:48 INFO: Loading: pos\n2020-10-14 04:20:49 INFO: Loading: lemma\n2020-10-14 04:20:49 INFO: Loading: depparse\n2020-10-14 04:20:50 INFO: Loading: sentiment\n2020-10-14 04:20:51 INFO: Loading: ner\n2020-10-14 04:20:52 INFO: Done loading processors!\n" ] ], [ [ "## Japanese Tokenization Example", "_____no_output_____" ] ], [ [ "jp = stanza.download('ja') ", "Downloading https://raw.githubusercontent.com/stanfordnlp/stanza-resources/master/resources_1.1.0.json: 122kB [00:00, 10.2MB/s] \n2020-10-14 04:21:10 INFO: Downloading default packages for language: ja (Japanese)...\nDownloading http://nlp.stanford.edu/software/stanza/1.1.0/ja/default.zip: 100%|██████████| 220M/220M [05:35<00:00, 656kB/s] \n2020-10-14 04:26:50 INFO: Finished downloading models and saved to /root/stanza_resources.\n" ], [ "jp = stanza.Pipeline(lang='ja')", "2020-10-14 04:26:50 INFO: Loading these models for language: ja (Japanese):\n=======================\n| Processor | Package |\n-----------------------\n| tokenize | gsd |\n| pos | gsd |\n| lemma | gsd |\n| depparse | gsd |\n=======================\n\n2020-10-14 04:26:50 INFO: Use device: gpu\n2020-10-14 04:26:50 INFO: Loading: tokenize\n2020-10-14 04:26:50 INFO: Loading: pos\n2020-10-14 04:26:51 INFO: Loading: lemma\n2020-10-14 04:26:51 INFO: Loading: depparse\n2020-10-14 04:26:52 INFO: Done loading processors!\n" ], [ "jp_line = jp(\"選挙管理委員会\")", "_____no_output_____" ], [ "for snt in jp_line.sentences:\n for word in snt.tokens:\n print(word.text)", "選挙\n管理\n委員会\n" ] ], [ [ "# Adding Word Count Feature ", "_____no_output_____" ] ], [ [ "def word_counts(x, pipeline=en):\n doc = pipeline(x)\n count = sum( [ len(sentence.tokens) for sentence in doc.sentences] )\n return count\n", "_____no_output_____" ], [ "#en = snlp.Pipeline(lang='en', processors='tokenize')\ndf['Words'] = df['Message'].apply(word_counts)", "_____no_output_____" ], [ "df.describe()", "_____no_output_____" ], [ "#train=df.sample(frac=0.8,random_state=42) #random state is a seed value\n#test=df.drop(train.index)\n\ntrain['Words'] = train['Message'].apply(word_counts)\ntest['Words'] = test['Message'].apply(word_counts)\n", "_____no_output_____" ], [ "x_train = train[['Length', 'Punctuation', 'Capitals', 'Words']]\ny_train = train[['Spam']]\n\nx_test = test[['Length', 'Punctuation', 'Capitals' , 'Words']]\ny_test = test[['Spam']]\n\nmodel = make_model(input_dims=4)\n", "_____no_output_____" ], [ "model.fit(x_train, y_train, epochs=10, batch_size=10)", "Epoch 1/10\n446/446 [==============================] - 1s 3ms/step - loss: 10.4237 - accuracy: 0.6217\nEpoch 2/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.5158 - accuracy: 0.8616\nEpoch 3/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.3754 - accuracy: 0.8888\nEpoch 4/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.3256 - accuracy: 0.8971\nEpoch 5/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.3057 - accuracy: 0.8921\nEpoch 6/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.3082 - accuracy: 0.8939\nEpoch 7/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.2950 - accuracy: 0.8941\nEpoch 8/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.2870 - accuracy: 0.8966\nEpoch 9/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.2922 - accuracy: 0.8973\nEpoch 10/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.2857 - accuracy: 0.8941\n" ], [ "model.evaluate(x_test, y_test)", "35/35 [==============================] - 0s 2ms/step - loss: 0.2647 - accuracy: 0.8969\n" ] ], [ [ "## Stop Word Removal", "_____no_output_____" ] ], [ [ "!pip install stopwordsiso", "Collecting stopwordsiso\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/3e/03/4c5f24b654bb9459f81aa5c1b60b094b804286b99dca9f2e116c9eb01ac8/stopwordsiso-0.6.1-py3-none-any.whl (73kB)\n\r\u001b[K |████▌ | 10kB 19.4MB/s eta 0:00:01\r\u001b[K |█████████ | 20kB 1.7MB/s eta 0:00:01\r\u001b[K |█████████████▍ | 30kB 2.1MB/s eta 0:00:01\r\u001b[K |█████████████████▉ | 40kB 2.4MB/s eta 0:00:01\r\u001b[K |██████████████████████▎ | 51kB 2.0MB/s eta 0:00:01\r\u001b[K |██████████████████████████▊ | 61kB 2.3MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▏| 71kB 2.5MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 81kB 2.3MB/s \n\u001b[?25hInstalling collected packages: stopwordsiso\nSuccessfully installed stopwordsiso-0.6.1\n" ], [ "import stopwordsiso as stopwords\n\nstopwords.langs()", "_____no_output_____" ], [ "sorted(stopwords.stopwords('en'))", "_____no_output_____" ], [ "en_sw = stopwords.stopwords('en')\n\ndef word_counts(x, pipeline=en):\n doc = pipeline(x)\n count = 0\n for sentence in doc.sentences:\n for token in sentence.tokens:\n if token.text.lower() not in en_sw:\n count += 1\n return count", "_____no_output_____" ], [ "train['Words'] = train['Message'].apply(word_counts)\ntest['Words'] = test['Message'].apply(word_counts)", "_____no_output_____" ], [ "x_train = train[['Length', 'Punctuation', 'Capitals', 'Words']]\ny_train = train[['Spam']]\n\nx_test = test[['Length', 'Punctuation', 'Capitals' , 'Words']]\ny_test = test[['Spam']]\n\nmodel = make_model(input_dims=4)\n#model = make_model(input_dims=3)\n\nmodel.fit(x_train, y_train, epochs=10, batch_size=10)", "Epoch 1/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.5050 - accuracy: 0.8778\nEpoch 2/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.2953 - accuracy: 0.9159\nEpoch 3/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.2448 - accuracy: 0.9253\nEpoch 4/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.2150 - accuracy: 0.9312\nEpoch 5/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.2045 - accuracy: 0.9316\nEpoch 6/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.1978 - accuracy: 0.9377\nEpoch 7/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.1936 - accuracy: 0.9379\nEpoch 8/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.1891 - accuracy: 0.9363\nEpoch 9/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.1902 - accuracy: 0.9356\nEpoch 10/10\n446/446 [==============================] - 1s 3ms/step - loss: 0.2000 - accuracy: 0.9336\n" ] ], [ [ "## POS Based Features", "_____no_output_____" ] ], [ [ "en = stanza.Pipeline(lang='en')\n\ntxt = \"Yo you around? A friend of mine's lookin.\"\npos = en(txt)", "2020-10-14 04:51:48 INFO: Loading these models for language: en (English):\n=========================\n| Processor | Package |\n-------------------------\n| tokenize | ewt |\n| pos | ewt |\n| lemma | ewt |\n| depparse | ewt |\n| sentiment | sstplus |\n| ner | ontonotes |\n=========================\n\n2020-10-14 04:51:48 INFO: Use device: gpu\n2020-10-14 04:51:48 INFO: Loading: tokenize\n2020-10-14 04:51:48 INFO: Loading: pos\n2020-10-14 04:51:49 INFO: Loading: lemma\n2020-10-14 04:51:49 INFO: Loading: depparse\n2020-10-14 04:51:50 INFO: Loading: sentiment\n2020-10-14 04:51:51 INFO: Loading: ner\n2020-10-14 04:51:51 INFO: Done loading processors!\n" ], [ "def print_pos(doc):\n text = \"\"\n for sentence in doc.sentences:\n for token in sentence.tokens:\n text += token.words[0].text + \"/\" + \\\n token.words[0].upos + \" \"\n text += \"\\n\"\n return text", "_____no_output_____" ], [ "print(print_pos(pos))", "Yo/PRON you/PRON around/ADV ?/PUNCT \nA/DET friend/NOUN of/ADP mine/PRON 's/PART lookin/NOUN ./PUNCT \n\n" ], [ "en_sw = stopwords.stopwords('en')\n\ndef word_counts_v3(x, pipeline=en):\n doc = pipeline(x)\n count = 0\n for sentence in doc.sentences:\n for token in sentence.tokens:\n if token.text.lower() not in en_sw and \\\n token.words[0].upos not in ['PUNCT', 'SYM']:\n count += 1\n return count", "_____no_output_____" ], [ "print(word_counts(txt), word_counts_v3(txt))", "6 4\n" ], [ "train['Test'] = 0\ntrain.describe()", "_____no_output_____" ], [ "def word_counts_v3(x, pipeline=en):\n doc = pipeline(x)\n totals = 0.\n count = 0.\n non_word = 0.\n for sentence in doc.sentences:\n totals += len(sentence.tokens) # (1)\n for token in sentence.tokens:\n if token.text.lower() not in en_sw:\n if token.words[0].upos not in ['PUNCT', 'SYM']:\n count += 1.\n else:\n non_word += 1.\n non_word = non_word / totals\n return pd.Series([count, non_word], index=['Words_NoPunct', 'Punct'])", "_____no_output_____" ], [ "x = train[:10]\nx.describe()", "_____no_output_____" ], [ "train_tmp = train['Message'].apply(word_counts_v3)\ntrain = pd.concat([train, train_tmp], axis=1)\ntrain.describe()", "_____no_output_____" ], [ "test_tmp = test['Message'].apply(word_counts_v3)\ntest = pd.concat([test, test_tmp], axis=1)\ntest.describe()", "_____no_output_____" ], [ "z = pd.concat([x, train_tmp], axis=1)\nz.describe()", "_____no_output_____" ], [ "z.loc[z['Spam']==0].describe()", "_____no_output_____" ], [ "z.loc[z['Spam']==1].describe()", "_____no_output_____" ], [ "aa = [word_counts_v3(y) for y in x['Message']]", "_____no_output_____" ], [ "ab = pd.DataFrame(aa)\nab.describe()", "_____no_output_____" ] ], [ [ "# Lemmatization", "_____no_output_____" ] ], [ [ "\ntext = \"Stemming is aimed at reducing vocabulary and aid un-derstanding of\" +\\\n \" morphological processes. This helps people un-derstand the\" +\\\n \" morphology of words and reduce size of corpus.\"\n\nlemma = en(text)", "_____no_output_____" ], [ "lemmas = \"\"\nfor sentence in lemma.sentences:\n for token in sentence.tokens:\n lemmas += token.words[0].lemma +\"/\" + \\\n token.words[0].upos + \" \"\n lemmas += \"\\n\"\n\nprint(lemmas)", "stemming/NOUN be/AUX aim/VERB at/SCONJ reduce/VERB vocabulary/NOUN and/CCONJ aid/NOUN un/NOUN -/PUNCT derstanding/NOUN of/ADP morphological/ADJ process/NOUN ./PUNCT \nthis/PRON help/VERB people/NOUN un/NOUN -/PUNCT derstand/VERB the/DET morphology/NOUN of/ADP word/NOUN and/CCONJ reduce/VERB size/NOUN of/ADP corpus/NOUN ./PUNCT \n\n" ] ], [ [ "# TF-IDF Based Model\n", "_____no_output_____" ] ], [ [ "# if not installed already\n!pip install sklearn", "Requirement already satisfied: sklearn in /usr/local/lib/python3.6/dist-packages (0.0)\nRequirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (from sklearn) (0.22.2.post1)\nRequirement already satisfied: scipy>=0.17.0 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->sklearn) (1.4.1)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->sklearn) (0.16.0)\nRequirement already satisfied: numpy>=1.11.0 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->sklearn) (1.18.5)\n" ], [ "corpus = [\n \"I like fruits. Fruits like bananas\",\n \"I love bananas but eat an apple\",\n \"An apple a day keeps the doctor away\"\n]\n", "_____no_output_____" ] ], [ [ "## Count Vectorization", "_____no_output_____" ] ], [ [ "from sklearn.feature_extraction.text import CountVectorizer\n\nvectorizer = CountVectorizer()\nX = vectorizer.fit_transform(corpus)\n\nvectorizer.get_feature_names()", "_____no_output_____" ], [ "X.toarray()", "_____no_output_____" ], [ "from sklearn.metrics.pairwise import cosine_similarity\n\ncosine_similarity(X.toarray())", "_____no_output_____" ], [ "query = vectorizer.transform([\"apple and bananas\"])\n\ncosine_similarity(X, query)", "_____no_output_____" ] ], [ [ "## TF-IDF Vectorization", "_____no_output_____" ] ], [ [ "import pandas as pd\nfrom sklearn.feature_extraction.text import TfidfTransformer\n\ntransformer = TfidfTransformer(smooth_idf=False)\ntfidf = transformer.fit_transform(X.toarray())\n\npd.DataFrame(tfidf.toarray(), \n columns=vectorizer.get_feature_names())", "_____no_output_____" ], [ "from sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.preprocessing import LabelEncoder\n\ntfidf = TfidfVectorizer(binary=True)\nX = tfidf.fit_transform(train['Message']).astype('float32')\nX_test = tfidf.transform(test['Message']).astype('float32')", "_____no_output_____" ], [ "X.shape", "_____no_output_____" ], [ "from keras.utils import np_utils\n\n_, cols = X.shape\nmodel2 = make_model(cols) # to match tf-idf dimensions\nlb = LabelEncoder()\ny = lb.fit_transform(y_train)\ndummy_y_train = np_utils.to_categorical(y)\nmodel2.fit(X.toarray(), y_train, epochs=10, batch_size=10)", "/usr/local/lib/python3.6/dist-packages/sklearn/preprocessing/_label.py:251: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n y = column_or_1d(y, warn=True)\n" ], [ "model2.evaluate(X_test.toarray(), y_test)", "35/35 [==============================] - 0s 3ms/step - loss: 0.0577 - accuracy: 0.9839\n" ], [ "train.loc[train.Spam == 1].describe() ", "_____no_output_____" ] ], [ [ "# Word Vectors", "_____no_output_____" ] ], [ [ "# memory limit may be exceeded. Try deleting some objects before running this next section\n# or copy this section to a different notebook.\n!pip install gensim", "Requirement already satisfied: gensim in /usr/local/lib/python3.6/dist-packages (3.6.0)\nRequirement already satisfied: smart-open>=1.2.1 in /usr/local/lib/python3.6/dist-packages (from gensim) (2.2.0)\nRequirement already satisfied: scipy>=0.18.1 in /usr/local/lib/python3.6/dist-packages (from gensim) (1.4.1)\nRequirement already satisfied: six>=1.5.0 in /usr/local/lib/python3.6/dist-packages (from gensim) (1.15.0)\nRequirement already satisfied: numpy>=1.11.3 in /usr/local/lib/python3.6/dist-packages (from gensim) (1.18.5)\nRequirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from smart-open>=1.2.1->gensim) (2.23.0)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->smart-open>=1.2.1->gensim) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->smart-open>=1.2.1->gensim) (2.10)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->smart-open>=1.2.1->gensim) (2020.6.20)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->smart-open>=1.2.1->gensim) (3.0.4)\n" ], [ "from gensim.models.word2vec import Word2Vec\nimport gensim.downloader as api\n", "_____no_output_____" ], [ "api.info()", "_____no_output_____" ], [ "model_w2v = api.load(\"word2vec-google-news-300\")", "/usr/local/lib/python3.6/dist-packages/smart_open/smart_open_lib.py:252: UserWarning: This function is deprecated, use smart_open.open instead. See the migration notes for details: https://github.com/RaRe-Technologies/smart_open/blob/master/README.rst#migrating-to-the-new-open-function\n 'See the migration notes for details: %s' % _MIGRATION_NOTES_URL\n" ], [ "model_w2v.most_similar(\"cookies\",topn=10)", "/usr/local/lib/python3.6/dist-packages/gensim/matutils.py:737: FutureWarning: Conversion of the second argument of issubdtype from `int` to `np.signedinteger` is deprecated. In future, it will be treated as `np.int64 == np.dtype(int).type`.\n if np.issubdtype(vec.dtype, np.int):\n" ], [ "model_w2v.doesnt_match([\"USA\",\"Canada\",\"India\",\"Tokyo\"])", "/usr/local/lib/python3.6/dist-packages/gensim/models/keyedvectors.py:895: FutureWarning: arrays to stack must be passed as a \"sequence\" type such as list or tuple. Support for non-sequence iterables such as generators is deprecated as of NumPy 1.16 and will raise an error in the future.\n vectors = vstack(self.word_vec(word, use_norm=True) for word in used_words).astype(REAL)\n/usr/local/lib/python3.6/dist-packages/gensim/matutils.py:737: FutureWarning: Conversion of the second argument of issubdtype from `int` to `np.signedinteger` is deprecated. In future, it will be treated as `np.int64 == np.dtype(int).type`.\n if np.issubdtype(vec.dtype, np.int):\n" ], [ "king = model_w2v['king']\nman = model_w2v['man']\nwoman = model_w2v['woman']\n\nqueen = king - man + woman \nmodel_w2v.similar_by_vector(queen)", "/usr/local/lib/python3.6/dist-packages/gensim/matutils.py:737: FutureWarning: Conversion of the second argument of issubdtype from `int` to `np.signedinteger` is deprecated. In future, it will be treated as `np.int64 == np.dtype(int).type`.\n if np.issubdtype(vec.dtype, np.int):\n" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c51f14fe2d883cf0f4e059415401f15ff2164dad
449,348
ipynb
Jupyter Notebook
render_file.ipynb
qqullar/coursework
514876bfce0d55f0e99f8605c6a1d7b9bc3ae25b
[ "MIT" ]
null
null
null
render_file.ipynb
qqullar/coursework
514876bfce0d55f0e99f8605c6a1d7b9bc3ae25b
[ "MIT" ]
null
null
null
render_file.ipynb
qqullar/coursework
514876bfce0d55f0e99f8605c6a1d7b9bc3ae25b
[ "MIT" ]
null
null
null
367.715221
56,816
0.923028
[ [ [ "import os\n\nfrom skimage.filters.rank import median\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport skimage.data as data\nimport skimage.segmentation as seg\nimport skimage.filters as filters\nimport skimage.draw as draw\nimport skimage.color as color\nfrom scipy.ndimage.filters import convolve\nfrom skimage.filters import threshold_otsu\nfrom skimage.filters.rank import entropy\nfrom skimage.morphology import disk\nfrom skimage.filters import threshold_multiotsu\nimport skimage\nfrom skimage.restoration import (denoise_tv_chambolle, denoise_bilateral,\n denoise_wavelet, estimate_sigma)\nimport cv2", "_____no_output_____" ], [ "matrices_bc = []\n\ndir_path_bc = r\"D:\\Documents\\Курсова файли\\needed_files\\BC\\404\"\nentries_control = os.listdir(dir_path_bc)\n\ni = 0\nfor file_name in entries_control:\n matrices_bc.append([])\n with open(dir_path_bc + fr\"\\{file_name}\") as f:\n lines = f.readlines()\n for line in lines:\n t = np.array([int(float(x)) for x in line.split()], dtype=np.uint8)\n matrices_bc[i].append(t)\n i += 1 ", "_____no_output_____" ], [ "I = np.array(matrices_bc[0][:-1], dtype=np.uint8)", "_____no_output_____" ], [ "np.std(I)", "_____no_output_____" ], [ "plt.imshow(I,cmap='gray',label=\"(0,1)\")", "_____no_output_____" ], [ "I_new = median(I, disk(1))", "_____no_output_____" ], [ "print(disk(2))", "[[0 0 1 0 0]\n [0 1 1 1 0]\n [1 1 1 1 1]\n [0 1 1 1 0]\n [0 0 1 0 0]]\n" ], [ "plt.imshow(I_new, cmap=\"gray\")", "_____no_output_____" ], [ "from skimage.filters.rank import mean_bilateral\n\nbilat = mean_bilateral(I.astype(np.uint16), disk(1), s0=10, s1=10)\nplt.imshow(bilat, cmap=\"gray\")", "_____no_output_____" ], [ "denoised = denoise_tv_chambolle(I, weight=0.005,eps=0.001)\nplt.imshow(denoised, cmap=\"gray\")", "_____no_output_____" ], [ "plt.imshow(entropy(denoised, disk(7)), cmap=\"gray\")", "C:\\Users\\Vlad\\AppData\\Roaming\\Python\\Python38\\site-packages\\IPython\\core\\interactiveshell.py:3441: UserWarning: Possible precision loss converting image of type float64 to uint8 as required by rank filters. Convert manually using skimage.util.img_as_ubyte to silence this warning.\n exec(code_obj, self.user_global_ns, self.user_ns)\n" ], [ "bilat_n = entropy(bilat, disk(7))\nplt.imshow(bilat_n, cmap=\"gray\")", "_____no_output_____" ], [ "#plt.imshow(new_matr,cmap='gray_r', vmin=new_matr.min(), vmax=new_matr.max())", "_____no_output_____" ] ], [ [ "## Sobel filter (bad)", "_____no_output_____" ] ], [ [ "# sacrificial_bridge = np.zeros((50,50))\n# sacrificial_bridge[22:30, 0:21] = 1\n# sacrificial_bridge[22:30, 30:] = 1\n# sacrificial_bridge[25:27, 21:30] = 1\n# plt.imshow(sacrificial_bridge, cmap='gray')\n# plt.show()", "_____no_output_____" ], [ "# # Build Sobel filter for the x dimension\n# s_x = np.array([[1, 0, -1],\n# [2, 0, -2],\n# [1, 0, -1]])\n# # Build a Sobel filter for the y dimension\n# s_y = s_x.T # transposes the matrix", "_____no_output_____" ], [ "# res_x = convolve(sacrificial_bridge, s_x)\n# res_y = convolve(sacrificial_bridge, s_y)\n\n# B = np.sqrt(res_x**2 + res_y**2)\n# plt.imshow(B, cmap=\"gray\")", "_____no_output_____" ], [ "# res_x = convolve(I, s_x)\n# res_y = convolve(I, s_y)\n\n# # square the responses, to capture both sides of each edge\n# G = np.sqrt(res_x**2 + res_y**2)\n# plt.imshow(G)", "_____no_output_____" ] ], [ [ "## Gabor filter $ g(x, y ; \\lambda, \\theta, \\psi, \\sigma, \\gamma)=\\exp \\left(-\\frac{x^{\\prime 2}+\\gamma^{2} y^{\\prime 2}}{2 \\sigma^{2}}\\right) \\exp \\left(i\\left(2 \\pi \\frac{x^{\\prime}}{\\lambda}+\\psi\\right)\\right) $", "_____no_output_____" ] ], [ [ "ksize=45\ntheta=np.pi/2\nkernel = cv2.getGaborKernel((ksize, ksize), 5.0, theta, 10.0, 0.9, 0, ktype=cv2.CV_32F)\nfiltered_image = cv2.filter2D(I, cv2.CV_8UC3, kernel)\nplt.imshow(filtered_image, cmap='gray')", "_____no_output_____" ] ], [ [ "## Entropy", "_____no_output_____" ] ], [ [ "entropy_img = entropy(I, disk(11))\nplt.imshow(entropy_img, cmap=\"gray\")", "_____no_output_____" ], [ "entropy_max = np.amax(entropy_img)\nentropy_min = np.amin(entropy_img)\nprint(entropy_max)", "4.793897129615557\n" ], [ "plt.hist(entropy_img.flat, bins=500) ", "_____no_output_____" ], [ "?threshold_otsu", "_____no_output_____" ], [ "# thresh = threshold_otsu(entropy_img, nbins=500)\n\n# #Now let us binarize the entropy image \n# binary = entropy_img <= thresh\n# plt.imshow(binary)", "_____no_output_____" ], [ "# binary.shape\n# ?np.reshape", "_____no_output_____" ], [ "thresholds = threshold_multiotsu(entropy_img, classes=3, nbins=500)\nprint(thresholds)\nregions = np.digitize(entropy_img, bins=thresholds)\nprint(regions.max(), regions.min())\nseg1 = (regions == 0)\nseg2 = (regions == 1)\nseg3 = (regions == 2)\nprint(seg3)\nplt.imshow(regions)", "[3.5852522 3.96974505]\n2 0\n[[False False False ... False False False]\n [False False False ... False False False]\n [False False False ... False False False]\n ...\n [False False False ... False False False]\n [False False False ... False False False]\n [False False False ... False False False]]\n" ], [ "def p(i,j, matr, d):\n n_rows, n_cols = matr.shape\n dx, dy = d\n \n res = 0\n for x in range(n_rows):\n for y in range(n_cols):\n props1 = [x + dx < n_rows, y + dy < n_cols]\n \n if all(props1):\n if matr[x][y] == i and matr[x + dx][y + dy] == j:\n res += 1\n \n return res\n\ndef coincidence_matr(image, d):\n \"\"\"\n d -- (dx, dy) vector\n image -- N x M matrix\n \"\"\"\n \n res_matr = np.zeros((256, 256))\n vmin, vmax = image.min(), image.max()\n \n # it actually makes sense to look only at\n # rectangle (vmnin x vmax) and make the least\n # equals zero\n for i in range(vmin, vmax):\n for j in range(vmin, vmax):\n res_matr[i, j] = p(i, j, image, d)\n \n \n return res_matr\n", "_____no_output_____" ], [ "%%time\ncoic_entropy = coincidence_matr(I, (0,1))", "_____no_output_____" ], [ "\n\ndef t_(x, a, b):\n \"\"\"[a,b] -> [0, 255]\"\"\"\n assert b > a\n m = 255 / (b - a)\n d = -255 * a / (b - a)\n \n return m * x + d\n\na_min = coic_entropy.min()\nb_max = coic_entropy.max()\nprint(a_min,b_max)\ncoic_entropy = t_(coic_entropy,a_min, b_max)\nbad = coic_entropy < (0.05 * b_max)\nprint(coic_entropy.min(), coic_entropy.max())\n\ncoic_entropy[bad] = 0", "_____no_output_____" ], [ "print(coic_entropy)\nplt.figure(figsize=(10,10))\n# plt.axhline(100)\n# plt.axhline(150)\n# plt.axvline(100)\n# plt.axvline(150)\n\nint_image = coic_entropy.astype(np.uint8)\nprint(int_image)\nnp.savetxt('test1.out', int_image, delimiter=',') \noriginal_array = np.loadtxt(\"test1.out\",delimiter=',').reshape(256, 256)\nplt.imshow(original_array[100:150,100:150], cmap=\"gray_r\")\n#plt.savefig(fname=\"c.png\")\n", "_____no_output_____" ], [ "nonzero = (coic_entropy != 0)\nplt.hist(coic_entropy[nonzero],bins=200)\n#plt.hist(coic_entropy[nonzero].flat, bins=100) ", "_____no_output_____" ], [ "thresh_hold = threshold_otsu(coic_entropy[nonzero],nbins=200)\n\n\nnew_img = np.zeros((256, 256))\nn,m = new_img.shape\n\nfor i in range(n):\n for j in range(m):\n if coic_entropy[i,j] > 0:\n if coic_entropy[i,j] > thresh_hold:\n new_img[i,j] = 1\n else:\n new_img[i,j] = 3\n \nplt.imshow(new_img[110:145,110:145])", "_____no_output_____" ], [ "# энтропия фигня тут кнчн\nen_coic = entropy(coic_entropy[110:145,110:145].astype(np.uint8), disk(2))\nthresh_hold = threshold_otsu(en_coic,nbins=200)\n\nplt.imshow(en_coic,cmap=\"gray_r\")\n\n# new_img = np.zeros((256, 256))\n# n,m = new_img.shape\n\n# for i in range(n):\n# for j in range(m):\n# if coic_entropy[i,j] > 0:\n# if coic_entropy[i,j] > thresh_hold:\n# new_img[i,j] = 1\n# else:\n# new_img[i,j] = 3\n# plt.imshow(en_coic[100:150,100:150], cmap=\"gray_r\")", "_____no_output_____" ], [ "print(I.min(), I.max())\nplt.imshow(I)", "_____no_output_____" ], [ "new_image = np.zeros((159, 160, 3))\nnew_image[seg1] = (150,0,0)\nnew_image[seg2] = (0,150,0)\nnew_image[seg3] = (255,255,255)\nplt.imshow(new_image.astype(np.uint8))", "_____no_output_____" ], [ "matrices_control = []\n\ndir_path = r\"D:\\Documents\\Курсова файли\\needed_files\\Control\\2\"\nentries_control = os.listdir(dir_path)\n\ni = 0\nfor file_name in entries_control:\n matrices_control.append([])\n with open(dir_path + fr\"\\{file_name}\") as f:\n lines = f.readlines()\n for line in lines:\n t = np.array([int(float(x)) for x in line.split()], dtype=np.uint8)\n matrices_control[i].append(t)\n i += 1", "_____no_output_____" ], [ "I_control = np.array(matrices_control[1][:-1])\n\nplt.imshow(I_control, cmap=\"gray\")", "_____no_output_____" ], [ "control_coic = coincidence_matr(I_control, (0,1))", "_____no_output_____" ], [ "plt.imshow(control_coic, cmap=\"gray_r\")", "_____no_output_____" ], [ "good_contorcontrol_coic = control_coic > (0.05 * control_coic.max())\nprint(m, good)\n\nrows, cols = good_contorcontrol_coic.shape\nfor i in range(rows):\n for j in range(cols):\n if not good_contorcontrol_coic[i,j]:\n control_coic[i,j] = 0", "_____no_output_____" ], [ "plt.figure(figsize=(10,10))\nplt.imshow(np.vstack((control_coic, np.full(256, 255))), cmap=\"gray_r\")", "_____no_output_____" ], [ "I_control_med = median(I_control, disk(3))\n\nplt.imshow(I_control_med, cmap=\"gray\")", "_____no_output_____" ], [ "entropy_img_control = entropy(I_control_med, disk(12))\nplt.imshow(entropy_img_control[100:150,100:150], cmap=\"gray\")", "_____no_output_____" ], [ "thresholds_control = threshold_multiotsu(entropy_img_control, classes=3, nbins=500)\n\nregions_control = np.digitize(entropy_img_control, bins=thresholds)\nplt.imshow(regions_control)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c51f2b6ec94167b3585ad3fd378d6e67439d4c75
515,703
ipynb
Jupyter Notebook
examples/3D_PODT_phase_simulation/3D_PODT_Phase_forward.ipynb
mehta-lab/waveorder
9892c20955d3487778fd440a0d7f4f86334e7b8e
[ "Unlicense" ]
2
2020-12-19T02:55:09.000Z
2022-02-24T19:40:26.000Z
examples/3D_PODT_phase_simulation/3D_PODT_Phase_forward.ipynb
mehta-lab/waveorder
9892c20955d3487778fd440a0d7f4f86334e7b8e
[ "Unlicense" ]
42
2021-01-20T22:34:14.000Z
2022-03-31T00:13:37.000Z
examples/3D_PODT_phase_simulation/3D_PODT_Phase_forward.ipynb
mehta-lab/waveorder
9892c20955d3487778fd440a0d7f4f86334e7b8e
[ "Unlicense" ]
null
null
null
1,222.045024
260,088
0.957811
[ [ [ "# 3D Partially coherent ODT forward simulation\nThis forward simulation is based on the SEAGLE paper ([here](https://ieeexplore.ieee.org/abstract/document/8074742)): <br>\n```H.-Y. Liu, D. Liu, H. Mansour, P. T. Boufounos, L. Waller, and U. S. Kamilov, \"SEAGLE: Sparsity-Driven Image Reconstruction Under Multiple Scattering,\" IEEE Trans. Computational Imaging vol.4, pp.73-86 (2018).```<br>\nand the 3D PODT paper ([here](https://www.osapublishing.org/oe/fulltext.cfm?uri=oe-25-14-15699&id=368361)): <br>\n```J. M. Soto, J. A. Rodrigo, and T. Alieva, \"Label-free quantitative 3D tomographic imaging for partially coherent light microscopy,\" Opt. Express 25, 15699-15712 (2017).```<br>", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy.fft import fft, ifft, fft2, ifft2, fftshift, ifftshift, fftn, ifftn\n\nimport pickle\nimport waveorder as wo\n\n%load_ext autoreload\n%autoreload 2\n%matplotlib inline\nplt.style.use(['dark_background']) # Plotting option for dark background\n", "_____no_output_____" ] ], [ [ "### Experiment parameters", "_____no_output_____" ] ], [ [ "N = 256 # number of pixel in y dimension\nM = 256 # number of pixel in x dimension\nL = 100 # number of layers in z dimension\nn_media = 1.46 # refractive index in the media\nmag = 63 # magnification\nps = 6.5/mag # effective pixel size\npsz = 0.25 # axial pixel size\nlambda_illu = 0.532 # wavelength\nNA_obj = 1.2 # objective NA\nNA_illu = 0.9 # illumination NA", "_____no_output_____" ] ], [ [ "### Sample creation", "_____no_output_____" ] ], [ [ "radius = 5\nblur_size = 2*ps\nsphere, _, _ = wo.gen_sphere_target((N,M,L), ps, psz, radius, blur_size)\nwo.image_stack_viewer(np.transpose(sphere,(2,0,1)))", "_____no_output_____" ], [ "# Physical value assignment\n\nn_sample = 1.50 \n\nRI_map = np.zeros_like(sphere)\nRI_map[sphere > 0] = sphere[sphere > 0]*(n_sample-n_media)\nRI_map += n_media\nt_obj = np.exp(1j*2*np.pi*psz*(RI_map-n_media))\n", "_____no_output_____" ], [ "wo.image_stack_viewer(np.transpose(np.angle(t_obj),(2,0,1)))", "_____no_output_____" ] ], [ [ "### Setup acquisition", "_____no_output_____" ] ], [ [ "# Subsampled Source pattern\n\nxx, yy, fxx, fyy = wo.gen_coordinate((N, M), ps)\nSource_cont = wo.gen_Pupil(fxx, fyy, NA_illu, lambda_illu)\n\n\nSource_discrete = wo.Source_subsample(Source_cont, lambda_illu*fxx, lambda_illu*fyy, subsampled_NA = 0.1)", "_____no_output_____" ], [ "plt.figure(figsize=(10,10))\nplt.imshow(fftshift(Source_discrete),cmap='gray')\n", "_____no_output_____" ], [ "np.sum(Source_discrete)", "_____no_output_____" ], [ "z_defocus = (np.r_[:L]-L//2)*psz\nchi = 0.1*2*np.pi\nsetup = wo.waveorder_microscopy((N,M), lambda_illu, ps, NA_obj, NA_illu, z_defocus, chi, \\\n n_media = n_media, phase_deconv='3D', illu_mode='Arbitrary', Source=Source_cont)\n\nsimulator = wo.waveorder_microscopy_simulator((N,M), lambda_illu, ps, NA_obj, NA_illu, z_defocus, chi, \\\n n_media = n_media, illu_mode='Arbitrary', Source=Source_discrete)\n", "_____no_output_____" ], [ "plt.figure(figsize=(5,5))\nplt.imshow(fftshift(setup.Source), cmap='gray')\nplt.colorbar()", "_____no_output_____" ], [ "H_re_vis = fftshift(setup.H_re)\n\nwo.plot_multicolumn([np.real(H_re_vis)[:,:,L//2], np.transpose(np.real(H_re_vis)[N//2,:,:]), \\\n np.imag(H_re_vis)[:,:,L//2], np.transpose(np.imag(H_re_vis)[N//2,:,:])], \\\n num_col=2, size=8, set_title=True, \\\n titles=['$xy$-slice of Re{$H_{re}$} at $u_z=0$', '$xz$-slice of Re{$H_{re}$} at $u_y=0$', \\\n '$xy$-slice of Im{$H_{re}$} at $u_z=0$', '$xz$-slice of Im{$H_{re}$} at $u_y=0$'], colormap='jet')", "_____no_output_____" ], [ "H_im_vis = fftshift(setup.H_im)\n\nwo.plot_multicolumn([np.real(H_im_vis)[:,:,L//2], np.transpose(np.real(H_im_vis)[N//2,:,:]), \\\n np.imag(H_im_vis)[:,:,L//2], np.transpose(np.imag(H_im_vis)[N//2,:,:])], \\\n num_col=2, size=8, set_title=True, \\\n titles=['$xy$-slice of Re{$H_{im}$} at $u_z=0$', '$xz$-slice of Re{$H_{im}$} at $u_y=0$', \\\n '$xy$-slice of Im{$H_{im}$} at $u_z=0$', '$xz$-slice of Im{$H_{im}$} at $u_y=0$'], colormap='jet')", "_____no_output_____" ], [ "I_meas = simulator.simulate_3D_scalar_measurements(t_obj)", "Number of point sources considered (100 / 253) in pattern (1 / 1), elapsed time: 234.21\nNumber of point sources considered (200 / 253) in pattern (1 / 1), elapsed time: 485.94\nNumber of point sources considered (253 / 253) in pattern (1 / 1), elapsed time: 663.73\n" ], [ "wo.image_stack_viewer(np.transpose(np.abs(I_meas),(0,1,2)))", "_____no_output_____" ], [ "# Save simulations\n\noutput_file = '3D_PODT_simulation'\n\nnp.savez(output_file, I_meas=I_meas, lambda_illu=lambda_illu, \\\n n_media=n_media, NA_obj=NA_obj, NA_illu=NA_illu, ps=ps, psz=psz, Source_cont=Source_cont)\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c51f33d25fc0cf8ddd2e3aa6755cd6b98f5fc8f7
33,358
ipynb
Jupyter Notebook
01_fastai.dataloader.ipynb
muellerzr/fastdebug
45a370aa408d382b5d58e403a6bea34705574844
[ "Apache-2.0" ]
26
2021-04-22T23:08:23.000Z
2022-01-10T23:54:07.000Z
01_fastai.dataloader.ipynb
muellerzr/fastdebug
45a370aa408d382b5d58e403a6bea34705574844
[ "Apache-2.0" ]
7
2021-04-23T03:26:09.000Z
2021-06-22T05:17:14.000Z
01_fastai.dataloader.ipynb
muellerzr/fastdebug
45a370aa408d382b5d58e403a6bea34705574844
[ "Apache-2.0" ]
5
2021-04-25T14:56:25.000Z
2022-02-01T17:35:57.000Z
187.404494
2,424
0.701211
[ [ [ "#default_exp fastai.dataloader", "_____no_output_____" ] ], [ [ "# DataLoader Errors\n> Errors and exceptions for any step of the `DataLoader` process", "_____no_output_____" ], [ "This includes `after_item`, `after_batch`, and collating. Anything in relation to the `Datasets` or anything before the `DataLoader` process can be found in `fastdebug.fastai.dataset`", "_____no_output_____" ] ], [ [ "#export\nimport inflect\nfrom fastcore.basics import patch\nfrom fastai.data.core import TfmdDL\nfrom fastai.data.load import DataLoader, fa_collate, fa_convert", "_____no_output_____" ], [ "#export\ndef collate_error(e:Exception, batch):\n \"\"\"\n Raises an explicit error when the batch could not collate, stating\n what items in the batch are different sizes and their types\n \"\"\"\n p = inflect.engine()\n err = f'Error when trying to collate the data into batches with fa_collate, '\n err += 'at least two tensors in the batch are not the same size.\\n\\n'\n # we need to iterate through the entire batch and find a mismatch\n length = len(batch[0])\n for idx in range(length): # for each type in the batch\n for i, item in enumerate(batch):\n if i == 0:\n shape_a = item[idx].shape\n type_a = item[idx].__class__.__name__\n elif item[idx].shape != shape_a:\n shape_b = item[idx].shape\n if shape_a != shape_b:\n err += f'Mismatch found within the {p.ordinal(idx)} axis of the batch and is of type {type_a}:\\n'\n err += f'The first item has shape: {shape_a}\\n'\n err += f'The {p.number_to_words(p.ordinal(i+1))} item has shape: {shape_b}\\n\\n'\n err += f'Please include a transform in `after_item` that ensures all data of type {type_a} is the same size'\n e.args = [err]\n raise e", "_____no_output_____" ], [ "#export\n@patch\ndef create_batch(self:DataLoader, b):\n \"Collate a list of items into a batch.\"\n func = (fa_collate,fa_convert)[self.prebatched]\n try:\n return func(b)\n except Exception as e:\n if not self.prebatched:\n collate_error(e, b) \n else: raise e", "_____no_output_____" ] ], [ [ "`collate_error` is `@patch`'d into `DataLoader`'s `create_batch` function through importing this module, so if there is any possible reason why the data cannot be collated into the batch, it is presented to the user.\n\nAn example is below, where we forgot to include an item transform that resizes all our images to the same size:", "_____no_output_____" ] ], [ [ "#failing\nfrom fastai.vision.all import *\npath = untar_data(URLs.PETS)/'images'\ndls = ImageDataLoaders.from_name_func(\n path, get_image_files(path), valid_pct=0.2,\n label_func=lambda x: x[0].isupper())\n\nx,y = dls.train.one_batch()", "_____no_output_____" ], [ "#export\n@patch\ndef new(self:TfmdDL, dataset=None, cls=None, **kwargs):\n res = super(TfmdDL, self).new(dataset, cls, do_setup=False, **kwargs)\n if not hasattr(self, '_n_inp') or not hasattr(self, '_types'):\n try:\n self._one_pass()\n res._n_inp,res._types = self._n_inp,self._types\n except Exception as e: \n print(\"Could not do one pass in your dataloader, there is something wrong in it\")\n raise e\n else: res._n_inp,res._types = self._n_inp,self._types\n return res", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
c51f416c77b806078b6ef6c2c044a8ccfaeb1203
234,891
ipynb
Jupyter Notebook
Distributed-Resource/2021_jupyter/setColorClusteredScatter.ipynb
kidrabit/Data-Visualization-Lab-RND
baa19ee4e9f3422a052794e50791495632290b36
[ "Apache-2.0" ]
1
2022-01-18T01:53:34.000Z
2022-01-18T01:53:34.000Z
Distributed-Resource/2021_jupyter/setColorClusteredScatter.ipynb
kidrabit/Data-Visualization-Lab-RND
baa19ee4e9f3422a052794e50791495632290b36
[ "Apache-2.0" ]
null
null
null
Distributed-Resource/2021_jupyter/setColorClusteredScatter.ipynb
kidrabit/Data-Visualization-Lab-RND
baa19ee4e9f3422a052794e50791495632290b36
[ "Apache-2.0" ]
null
null
null
225.856731
95,268
0.876687
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n\nx = np.arange(5)\ny = x\nt = x\nfig, (ax1, ax2) = plt.subplots(1, 2)\nax1.scatter(x, y, c=t, cmap='viridis')\nax2.scatter(x, y, c=t, cmap='viridis_r')\n\ncolor = \"red\"\nplt.scatter(x, y, c=color)", "_____no_output_____" ], [ "sequence_of_colors = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\",\"red\", \"orange\", \"yellow\", \"green\", \"blue\"]\nplt.scatter(x, y, c=sequence_of_colors)\n", "_____no_output_____" ], [ "sample_size = 1000\ncolor_num = 3\n\nX = np.random.normal(0, 1, sample_size)\nY = np.random.normal(0, 1, sample_size)\nC = np.random.randint(0, color_num, sample_size)\n\nprint(\"X.shape : {}, \\n{}\".format(X.shape, X))\nprint(\"Y.shape : {}, \\n{}\".format(Y.shape, Y))\nprint(\"C.shape : {}, \\n{}\".format(C.shape, C))\n\nplt.figure(figsize=(12, 4))\nplt.scatter(X, Y, c=C, s=20, cmap=plt.cm.get_cmap('rainbow', color_num), alpha=0.5)\nplt.colorbar(ticks=range(color_num), format='color: %d', label='color')\nplt.show()", "X.shape : (1000,), \n[-1.76099723e+00 -8.10957875e-01 -1.53751591e+00 5.00964119e-01\n -1.68285127e-01 -4.39034853e-01 -1.09874970e+00 1.27960058e+00\n 1.87994876e-01 -2.42326018e-01 -1.85215482e-01 2.04658100e+00\n 2.56413394e-01 -3.14658376e-01 1.87346978e-01 8.51595749e-01\n -4.30134639e-01 -3.14856904e-01 -6.00324304e-01 2.79690575e-01\n -9.59448975e-01 6.23965327e-01 -1.66002696e+00 4.83852861e-02\n -2.40189558e-02 -1.62320100e+00 1.37188244e+00 -2.89535015e-02\n -7.87049182e-01 -1.64233884e+00 -3.54979912e-01 -7.54002821e-01\n -2.28233489e+00 1.42306302e+00 -8.81340205e-01 1.54418195e+00\n 1.34145549e+00 -1.07534356e+00 -6.39651450e-01 4.97086918e-01\n -1.50760869e-01 8.54537011e-01 -5.61187167e-01 1.94093333e-01\n 4.29023856e-01 -2.80912583e-01 -6.98570656e-01 5.24927897e-01\n -1.14642761e+00 -6.00728655e-01 -5.32786132e-01 5.87832629e-01\n 2.63216771e-01 -3.48568161e-01 -1.85247985e+00 -1.05948151e-01\n -3.86591981e-01 -1.41176819e+00 -1.48234228e+00 -7.69668753e-02\n 3.11073179e-01 -1.57233357e-01 -1.55425819e-01 6.76469983e-01\n -4.51757746e-01 -3.47706446e-01 -7.94194095e-01 -2.07713469e+00\n -1.33691655e+00 -6.51794326e-01 -1.76684590e+00 -3.32129936e-01\n 9.60295389e-01 -1.53568977e+00 1.02315294e+00 -7.67548147e-02\n -9.24321957e-01 -1.37955819e+00 1.34628637e-01 -1.26264716e-01\n 2.03248333e-01 -1.06943484e+00 1.28951134e+00 5.78013394e-02\n -8.45869446e-01 -2.19412691e-01 -4.67245830e-02 6.14096010e-01\n -2.72660959e-02 5.06125659e-01 -5.98218163e-01 -6.43309066e-01\n 4.81059146e-01 1.94421845e-01 4.51401362e-01 2.11172316e-01\n 6.34199809e-02 1.21026727e+00 -1.12965551e+00 7.70660639e-01\n -8.90906985e-01 7.94253572e-02 7.63061472e-01 4.26418177e-02\n 3.61768523e-01 9.11618120e-01 -4.72477991e-01 -1.99983493e-01\n -1.35985591e+00 -3.29245252e-01 -1.12432441e+00 1.65088096e+00\n 4.03905260e-01 -2.89565676e-01 1.79359071e-01 9.64667047e-01\n 2.26155761e-02 -1.92345863e-01 9.25550199e-01 -1.78001431e+00\n 9.92949338e-01 7.77141680e-01 1.32677865e+00 -1.20440002e-01\n 4.07876249e-01 -7.98533351e-01 1.20032958e+00 4.32820690e-01\n -3.82600021e-01 3.19591035e+00 2.20773649e-01 -1.20034314e-01\n -7.76925099e-01 1.27674723e+00 -5.46681593e-01 -3.45858670e-01\n -3.20874696e-02 -1.06647689e+00 1.24231561e+00 2.15689671e-01\n 1.24352654e-01 4.00039017e-01 -1.32794826e+00 5.64972286e-01\n -5.34553132e-02 -2.23167327e+00 1.08151574e+00 -4.06934461e-01\n 1.51173241e+00 7.27543627e-01 6.42859155e-01 4.09602355e-01\n -8.58405683e-01 1.62358355e+00 -4.22232467e-01 8.23162404e-01\n 3.30972389e-01 -4.78510511e-01 9.85590157e-01 -1.56387198e+00\n 3.90839682e-01 -6.90366460e-01 -1.77325067e+00 1.40403242e-01\n 3.26283950e-01 1.47321702e+00 -8.41169495e-02 -1.81331603e-01\n -2.99752013e-01 6.15733770e-01 3.65673432e-01 -4.85033673e-01\n -2.40417777e-01 -6.72162178e-01 -3.73907017e-01 7.88301554e-01\n -1.78844135e+00 -7.30734044e-01 1.45342466e-01 4.98746680e-01\n -5.06676993e-01 -1.03691170e-01 -1.11391267e+00 2.90711716e-01\n 1.41111680e-01 8.55579346e-01 1.85661274e-01 6.71685878e-01\n -4.98819433e-01 3.03411603e+00 1.44703033e+00 2.33149631e+00\n 7.13463437e-02 5.52152339e-01 7.42604067e-02 5.78725067e-01\n -6.33012542e-01 -1.04307160e+00 7.70089881e-01 -8.82178073e-01\n 1.07885620e-01 -1.60867475e+00 -2.86970699e+00 -4.26794632e-01\n -5.32875643e-01 4.86225129e-01 6.73833772e-01 -1.01775476e+00\n 1.03688285e+00 -1.72884376e+00 -6.22071253e-01 -1.98526755e+00\n 4.21740808e-01 3.96164284e-01 -7.44742874e-01 -6.02864869e-01\n -2.21581696e+00 4.20399650e-01 7.50658021e-01 9.64894789e-01\n 6.12872371e-01 7.11148222e-01 4.79347028e-01 5.21923946e-01\n 4.69741392e-01 -1.49458263e-01 1.85188045e-01 1.91573913e+00\n -2.82825120e-01 3.91636739e-01 3.44839398e-01 1.35033130e+00\n -1.83322328e-01 -3.57447301e-01 -7.81266869e-01 -5.36492796e-01\n 9.42234312e-02 -8.63432063e-01 8.22147321e-01 1.79750821e-01\n -1.48057985e+00 -7.24234588e-01 -1.14431322e+00 -1.25637902e+00\n -1.31929796e+00 8.00211500e-01 1.01652083e+00 1.52966088e+00\n -1.29915327e+00 6.41068640e-01 -8.90982421e-01 -9.88327707e-02\n 1.48244276e+00 -8.63710324e-01 3.78920040e-01 1.70692260e-01\n -4.99685396e-01 1.51721143e+00 4.19671134e-01 -1.62628749e+00\n -7.01799368e-01 -4.07168407e-01 -8.46258677e-01 3.86563980e-01\n -1.04729081e-02 1.41917596e+00 -6.99392148e-01 4.34367279e-01\n 3.86727723e-01 -9.06030828e-01 -1.99648732e+00 8.66122477e-01\n -5.84262467e-01 -4.58796629e-01 4.39880079e-01 5.12196332e-01\n 5.49706317e-01 -9.07628745e-01 -3.12428420e-01 9.95467760e-02\n 1.18835192e+00 5.95865334e-01 -1.63502671e+00 2.17754506e-01\n -1.62059983e+00 1.63308526e+00 -8.25200078e-01 2.64677457e-01\n -1.70396688e+00 8.24297921e-02 9.11252284e-01 6.91862079e-01\n -2.18510737e+00 -1.02185233e+00 1.11220114e+00 -2.24571369e+00\n 1.48716037e+00 6.97275102e-01 -1.37341873e-01 -1.22061904e+00\n -8.65091832e-02 -3.59069655e-01 9.37580421e-01 -2.99972036e-01\n -2.04113899e-01 -7.57635294e-01 3.43394676e-01 -2.62960269e-01\n 8.92714385e-01 1.55205904e+00 -5.22494259e-01 -1.06390141e+00\n -3.10804597e+00 1.97600841e+00 -5.77230920e-01 1.13131404e+00\n 2.83226377e-01 -1.53844295e-01 1.88692970e-01 1.03614632e+00\n -1.53987547e+00 9.48052801e-01 -4.83230852e-01 -3.78161592e-01\n -8.97417043e-01 1.66574206e+00 -4.12745170e-01 -8.93276258e-01\n -8.54940772e-01 -7.49537679e-01 7.39257798e-01 2.31245783e+00\n -4.48826771e-01 3.07396840e-01 4.58491308e-01 2.25329320e-01\n -1.39548901e-01 1.39990266e+00 -3.12150716e-01 1.58859223e-01\n -2.23689384e+00 -2.12348077e+00 -6.14145452e-01 1.15439792e+00\n -1.22078011e+00 1.54358334e+00 6.40233193e-01 -9.71417204e-01\n -6.63240172e-01 -4.46662796e-01 1.10050763e+00 2.35147087e-01\n -6.16060417e-01 2.98156421e-01 1.49802915e+00 4.91443960e-01\n 7.85453369e-01 -1.82680024e+00 -3.68779236e-01 1.68618124e+00\n 1.56248747e+00 8.73575670e-01 -8.41668235e-01 -5.37977910e-02\n -2.87747488e+00 -6.57514307e-01 -8.42553431e-01 2.86136556e-01\n -5.43794247e-01 8.58825634e-01 -1.10458927e+00 5.02088110e-01\n 5.01380850e-01 -3.15370609e-01 1.71581631e+00 1.02349036e+00\n -9.12748101e-01 1.17584961e+00 1.45943572e+00 -3.44020340e-01\n -1.01543621e+00 4.44888044e-01 2.42819087e-03 7.91255705e-01\n -1.84088199e+00 1.23001862e+00 -6.15926493e-01 3.12394965e-01\n 2.02093965e+00 1.76602470e+00 -1.63910418e+00 -1.53117091e-01\n 3.38815284e-01 -1.17805982e+00 2.33307725e+00 5.05547367e-01\n -1.66100384e-01 1.43100612e-01 3.26267467e-01 5.35988244e-01\n -6.10458069e-01 6.15075474e-02 2.26195519e-02 1.97131786e+00\n 1.49771808e+00 2.66866159e-01 -8.72400616e-01 2.69660455e-01\n 1.64081095e+00 -2.43013992e-01 7.10212372e-01 4.87042942e-01\n -4.19181721e-01 -9.05744403e-01 -1.95283136e+00 -4.60012472e-01\n 1.48158744e-01 1.16260361e+00 6.51755076e-01 2.28584768e-01\n 4.06649436e-02 4.44301259e-02 -3.48225392e-01 8.83222320e-01\n -5.94887061e-01 1.10769498e+00 -7.14832380e-01 -1.80663051e+00\n 8.29877532e-02 -3.76838449e-01 1.63518592e+00 1.53248056e+00\n 3.38887694e-01 -1.11738927e-01 1.03084454e+00 1.53610500e+00\n 1.93163322e+00 -1.81657334e+00 -5.06002794e-01 -8.41051180e-01\n 8.31214951e-02 -1.74099535e-01 -5.25501437e-01 1.54824416e+00\n -3.14571392e-01 -1.58249784e+00 -4.68137394e-01 -5.08036460e-01\n -1.61018200e-01 -1.09340234e+00 -1.66999991e+00 1.67830468e-01\n 1.22889284e+00 5.39973804e-02 -1.99948737e-01 9.83512384e-01\n 3.53585021e-03 -6.34769000e-01 -1.37757845e+00 -1.10910375e+00\n -9.22582255e-02 8.53641923e-01 2.92933068e-01 -3.01612030e-01\n -1.50675232e+00 -5.44937034e-02 -1.48119030e-01 -2.48653050e-01\n 1.50333318e+00 -8.22063845e-02 -1.81182998e+00 -1.70248983e-01\n -1.54491570e-01 -1.97664141e+00 3.96507660e-02 5.18205101e-01\n -9.83636563e-01 -2.73722908e-01 -1.36380100e+00 1.65397562e+00\n -1.12836970e+00 -3.23225818e-01 -1.41822331e+00 3.28416468e-01\n 1.51353451e+00 -1.30739319e+00 1.33000646e+00 1.12211755e+00\n 3.23530843e-01 -1.06709708e+00 -2.79515941e-01 7.69658887e-01\n 1.78811419e-02 -3.17520030e-01 -3.17167949e-01 2.06443715e+00\n -2.16994819e+00 2.86660797e-02 -3.23169488e-01 4.76475598e-01\n -1.02632806e+00 -8.30722025e-01 -5.83218776e-01 -7.84197882e-01\n 5.71177313e-02 1.01669119e+00 8.15628485e-01 4.91648667e-01\n -1.84288793e-01 -1.17107731e+00 -4.51826434e-01 -9.48334974e-01\n -1.64203067e+00 2.23910851e-01 -8.60906150e-01 -1.06571410e+00\n 4.37168147e-01 -4.42825496e-01 -8.05338725e-01 1.31012479e+00\n 6.36433691e-01 4.33011700e-01 1.81834252e-01 -8.34559901e-02\n -2.48448079e-01 1.08337440e+00 -1.07468097e+00 2.82768355e-01\n -5.51652310e-01 2.80960291e-01 1.38723924e+00 -5.96599038e-01\n 2.70216035e-01 8.24315190e-01 -1.31161961e+00 7.41468625e-01\n -3.89982683e-01 1.19969865e+00 1.20455815e+00 -1.20963975e-01\n -4.08698695e-01 3.87404105e-01 -2.39584880e-01 -2.46098367e-01\n 1.67316543e-01 1.67483745e+00 6.64242589e-01 6.02051320e-01\n 3.07783529e-01 5.31520877e-01 -1.06355831e-01 -6.92684488e-01\n -4.55588243e-01 1.00852780e+00 -9.65565601e-01 -2.03516645e+00\n 1.60338306e-01 -9.06303225e-01 2.48062187e+00 6.11406367e-01\n -9.80433996e-01 1.49548730e+00 1.79083236e+00 -2.05045424e-01\n 3.08836651e-01 3.76194818e-01 -1.11321361e+00 -2.65619227e-01\n -1.25705762e+00 7.74618852e-01 1.37470524e+00 -2.11383207e+00\n -1.09544390e-01 8.88893481e-01 -9.77596472e-01 1.47507101e+00\n 9.04913054e-01 3.08066850e-01 9.66628577e-01 -1.70928052e-01\n 8.45592427e-03 6.37104345e-01 -4.01205628e-01 -3.68494656e-01\n 2.43960433e+00 -1.08470850e+00 -3.42409954e-01 1.79652201e+00\n -1.30141681e+00 1.19337361e+00 3.17195436e-01 5.62675204e-01\n -1.57111082e+00 7.06847994e-01 -9.94730304e-01 8.31347651e-01\n 6.59319557e-01 2.73289742e+00 -8.32653408e-01 2.06947601e-01\n 7.12548981e-02 1.25160450e+00 1.35334285e+00 5.33580954e-02\n -1.08960312e+00 1.05390500e+00 -1.91893595e+00 6.46294334e-01\n -7.83832060e-01 3.08270878e-01 -4.39113188e-01 -6.74386458e-01\n 1.74307752e-01 -8.39004786e-01 1.01170439e+00 2.32631944e+00\n -1.54149410e+00 1.49809976e+00 1.51574029e+00 -3.66806039e-01\n 6.43632349e-01 4.17265192e-01 8.12470687e-01 -1.04546033e+00\n -8.14762534e-01 -1.39879512e+00 -2.45722064e-01 -8.53559621e-02\n 2.30470186e-01 3.89770687e-01 -1.14489367e+00 -1.36408182e+00\n 1.60627083e+00 1.02791046e-01 -9.14929368e-01 2.08901801e+00\n -2.23063441e-01 2.05590902e-01 1.14412663e-01 -1.33654166e+00\n -2.63904234e-01 7.59055751e-01 1.42104345e+00 -2.10496102e-01\n 3.24980173e-01 1.10370012e+00 -1.25894767e-01 1.98706937e+00\n 4.18836573e-01 2.15846829e-01 2.55887342e-02 -1.99029495e-01\n 2.26106121e+00 -1.15860905e-01 4.12921768e-01 1.12363455e+00\n -2.46460175e+00 1.03698590e-01 1.80098671e-01 -7.19054770e-01\n -1.61772350e+00 -1.03626294e+00 1.23719077e+00 1.83846683e-01\n -1.30940462e+00 8.26646076e-01 -1.55625432e+00 2.96753362e-01\n -3.93726491e-01 3.89866382e-01 1.15812075e+00 7.65475416e-01\n -1.46467166e+00 1.52184101e+00 -2.35883824e-01 -1.40685446e-01\n 1.45470433e+00 3.46911932e-02 1.74973703e+00 2.17525101e+00\n 1.59943266e-02 -6.03261515e-01 -8.08279980e-01 1.44399930e+00\n 7.75658974e-01 -7.03033931e-01 4.94889956e-01 -2.42044733e-01\n 4.76019218e-01 6.31611627e-01 3.70591584e-01 5.57971648e-01\n -2.03486515e+00 -2.19365456e+00 6.21900208e-01 -6.23924706e-01\n -1.38615076e+00 9.08505002e-01 -6.01549423e-01 4.23804058e-01\n 5.31643833e-01 4.63855803e-01 -4.92716783e-01 7.55642845e-01\n 6.74093930e-01 3.98009376e-01 -1.59302552e+00 -5.89490374e-02\n -1.16334144e+00 -7.79578903e-01 1.13729255e+00 6.14327672e-01\n -8.55771419e-01 6.16757522e-01 -6.23679972e-01 1.38030753e+00\n -1.30311130e+00 -1.19775286e+00 2.64845516e+00 6.61892572e-01\n 7.50629061e-02 -7.33796086e-01 7.33267489e-01 -1.60825016e+00\n 3.07829193e+00 1.34708023e-01 -7.09591695e-01 -1.41735412e+00\n 1.04523975e+00 4.89892368e-01 -3.82530975e-01 1.34384521e-02\n 9.09570106e-01 3.57688989e-02 4.06319325e-01 -6.89699204e-01\n 1.59891957e-01 -1.39617037e+00 -8.82975694e-02 6.54412248e-01\n 4.15209007e-01 7.72232277e-01 -1.09766044e+00 -8.66104318e-01\n -7.56367287e-02 6.81378612e-02 5.83430883e-02 1.72331637e-01\n 9.84598680e-01 -3.61963774e-01 1.04715049e+00 -6.51616189e-02\n -1.90298987e+00 3.19717956e-01 -1.00658249e+00 -1.20943300e+00\n -3.12799433e-01 4.46347940e-01 1.08296217e+00 2.60664909e+00\n -1.05710377e-01 -9.37397046e-01 -8.88456779e-02 -1.29919948e+00\n 3.23466769e-01 1.64336082e+00 -2.07436141e+00 8.43118439e-01\n 7.26646611e-01 -5.32959633e-02 3.93525049e-01 -1.13467582e+00\n 2.16587308e-01 -5.37819663e-01 -1.10982868e+00 -1.67406781e-01\n 2.25449101e+00 3.69940968e-01 3.40433178e-01 -2.50953318e+00\n -6.80219369e-01 2.55067984e-01 -1.95015115e+00 1.96207151e-01\n -1.06239113e+00 -6.79636659e-03 1.27615169e+00 9.90605056e-01\n -4.77978109e-02 1.43842814e+00 -4.32215125e-01 -1.57741521e+00\n -1.50599281e+00 8.09714232e-01 -1.92766384e-01 -1.56854906e+00\n -1.68860443e+00 5.11717866e-01 2.01801497e-01 -2.06814110e-01\n 7.14826450e-01 8.19465238e-01 9.89396323e-01 1.25842576e-01\n -6.35069989e-01 1.04493456e+00 4.55393802e-01 1.03609318e-01\n -1.34293636e-02 6.68948379e-01 2.15843688e+00 -9.99563822e-01\n 1.31398309e+00 -1.06701969e+00 5.65709532e-01 1.49216793e+00\n 4.49126208e-01 8.23849300e-01 -5.82462647e-01 1.31033329e+00\n -4.05917356e-01 -3.35119869e-01 -2.58365946e+00 -5.84178793e-01\n 6.00062853e-01 1.21130581e-01 -4.03186412e-01 -1.13554892e+00\n -2.14681929e-02 3.53570338e-01 -4.52855011e-02 -2.63129959e+00\n -3.95994998e-01 1.34257765e+00 2.89806334e-01 -5.54802310e-01\n -3.83864333e-01 -1.24044405e+00 7.85499960e-01 8.89144992e-01\n -6.00226262e-01 -1.46931765e+00 -4.92772608e-02 -6.23348561e-01\n 4.08873029e-02 6.90126007e-01 4.64495922e-01 -1.17082699e+00\n 5.43556615e-01 -1.64075339e+00 1.04411531e+00 -1.12978256e-01\n 1.08113294e+00 9.55693175e-01 -1.25712739e+00 1.42604576e+00\n 7.63428508e-01 5.90297121e-01 -6.29263129e-01 -1.41723294e+00\n 1.12010686e+00 -4.96342392e-01 -2.48731948e-01 5.17477664e-01\n 3.25467403e-01 3.36268764e-01 -1.18749116e+00 -1.37328484e+00\n -4.28129095e-01 -5.99228229e-01 -8.34898897e-01 4.07369847e-01\n 1.27093799e+00 -8.17554547e-01 -3.57098766e-01 -7.04385102e-02\n -4.07279197e-01 -1.47461301e+00 5.06867555e-01 -5.16437160e-01\n 4.70761384e-01 1.77587580e+00 1.39734308e+00 8.08397357e-01\n 1.11574516e+00 3.58212027e-01 -2.04555696e+00 6.36621956e-01\n -1.28591789e+00 -1.50174815e-01 -1.91412319e-01 1.23193095e+00\n -3.98529081e-02 4.21334174e-01 8.78457969e-01 8.50926652e-01\n -8.09948633e-01 -3.61521677e-01 -9.63113033e-01 7.67387486e-01\n -7.33345305e-01 3.28983015e-01 9.82784074e-02 9.00975698e-01\n -4.16431090e-01 1.61104356e-01 -1.94455803e-01 8.20222510e-01\n 9.61151027e-01 7.51098639e-01 8.01168431e-02 5.17625324e-01\n 1.70995285e-02 1.18664810e+00 -3.54923672e-01 1.71862626e+00\n -1.06427062e+00 1.78427361e+00 -5.26433323e-01 8.54123369e-01\n -4.53073345e-02 -6.23429687e-01 -1.17155351e-01 -9.77397185e-01\n -1.50911478e+00 -8.25686685e-01 7.57336256e-01 -1.23671551e+00\n 3.33046062e-01 4.10982546e-02 -6.23056094e-01 1.13870054e+00\n 1.03890071e+00 -7.14739483e-01 -1.31489768e+00 -5.17343559e-01\n -5.91137074e-01 -1.15838944e+00 -3.85494366e-01 -5.30474237e-01\n 5.55817919e-01 1.17733940e+00 1.58519569e+00 7.21428213e-01\n -1.20440010e+00 3.35919383e-01 -1.79793189e-01 -3.95985602e-01\n -1.03836920e+00 -1.31136644e+00 -1.98437133e-01 -4.12205279e-01\n 2.99513339e-01 1.37608736e+00 -7.26672593e-01 -4.20616422e-01\n -9.80911722e-01 -7.18651788e-01 3.68771185e-01 -9.17544688e-01\n 1.22911867e+00 -5.62261945e-01 -1.49513906e+00 -2.09861893e+00\n -2.34723439e-01 -1.10859864e-01 -1.17596712e-01 -7.58460073e-01\n -6.35323093e-01 -1.42565246e+00 -9.30476557e-01 1.21883506e+00\n -7.03851928e-01 -8.57640541e-01 6.84911538e-01 -1.34113706e+00\n 8.63550128e-01 2.93234383e-01 3.09897731e-02 6.68793070e-01\n 3.30929771e-01 8.12007626e-01 9.37757319e-01 5.23081056e-01\n 1.03754598e-01 4.17828370e-01 -6.97064942e-01 2.45900450e+00\n 2.03569070e-05 -2.80651607e-01 1.25699713e-01 1.04363415e+00\n -6.87277546e-01 -1.53163434e+00 3.83236150e-01 9.32421530e-01\n 1.02670201e-02 -1.47880536e-01 -3.94598070e-01 -1.31858140e+00]\nY.shape : (1000,), \n[ 3.60036244e-01 8.66871392e-02 4.95967885e-02 -1.45370523e+00\n 5.23662377e-01 7.44167435e-01 8.62983385e-01 -8.40562945e-01\n -5.77620624e-01 -8.78417831e-01 5.32544604e-01 -5.62157192e-01\n -1.15353178e+00 1.03161214e+00 8.60367250e-02 -1.40170021e+00\n 6.57126254e-01 1.53629011e-04 -8.25186436e-01 2.40621335e-01\n -8.24651431e-01 -2.53831999e-01 -1.31295658e+00 -1.30302534e+00\n 2.77795670e-01 -6.70970398e-01 -3.22117460e-01 8.06113068e-01\n 2.72533615e-01 -2.67213919e-01 -1.11904470e+00 3.60256873e-02\n 1.41665139e+00 2.81862063e-01 3.43963977e-01 -8.03328566e-01\n 2.37974807e-01 -2.02627044e+00 -1.01182722e+00 3.48111997e-01\n 1.12416014e-01 5.15201774e-01 8.18589916e-01 4.96644691e-01\n -5.72123384e-01 1.06253416e+00 1.25856876e+00 -2.14813055e+00\n -2.91391395e+00 2.21217055e+00 1.53696480e+00 -3.85348173e-01\n 3.11740446e-01 2.74186829e+00 5.34872614e-01 -1.46239930e+00\n 2.25976533e+00 -7.76691699e-01 1.83707847e+00 4.02004633e-01\n 1.79789329e+00 -4.11697144e-01 1.02449130e+00 5.69185352e-01\n 3.73654903e-01 1.07588572e+00 2.41581938e-01 1.03172080e+00\n 1.06393394e+00 3.83832681e-01 1.77008459e+00 7.12635021e-01\n -1.46061595e+00 6.10642183e-02 1.57500395e-01 -9.93145500e-01\n -1.03303612e+00 -5.62958469e-01 9.87273775e-01 3.64135757e-01\n -2.73013452e-01 -1.75443690e+00 -3.43805814e-01 -6.92988460e-02\n -7.29434378e-03 2.54757706e-01 6.86493396e-01 3.86048160e-02\n -5.36630966e-01 -1.18094578e+00 -8.46917174e-01 -1.46518370e+00\n -5.05180225e-01 -8.41804898e-01 4.19728846e-01 1.83403927e+00\n -2.55108932e-01 8.72817681e-01 -1.14265672e+00 -2.66448098e-01\n 2.13996823e+00 -6.91519709e-01 -9.14985848e-01 -7.75481770e-01\n 8.23374331e-01 1.38437066e+00 -1.53668514e+00 2.02603620e-02\n -8.30220516e-01 7.65706559e-01 7.44878989e-01 2.03744535e-01\n 2.29144639e-01 8.87696295e-01 4.69870729e-01 5.80441063e-01\n -1.43172624e-02 1.45774055e+00 1.01917794e+00 -9.95195979e-01\n -6.05318310e-01 1.52204952e+00 -8.41308726e-01 -1.57267551e-01\n 3.74782034e-01 -9.49083395e-01 1.06037779e+00 3.59976783e-01\n -1.50336915e-02 -1.31186317e+00 1.68270879e+00 -2.51019889e-01\n -9.34346355e-01 -1.35637437e+00 -1.14422638e+00 2.32579220e-01\n 1.47926229e+00 7.57151969e-01 -6.59783473e-01 6.00143992e-01\n -7.63896023e-01 6.41812432e-01 3.87431347e-01 -9.41466178e-01\n 1.23703247e+00 -1.67901852e+00 1.15612141e+00 1.45459028e+00\n -9.06588701e-01 -1.47778122e+00 1.27575352e+00 6.45617071e-01\n 6.77014545e-01 2.68768000e-01 9.07495598e-01 2.71918531e-01\n 1.15335985e+00 1.48722868e+00 -4.32552379e-01 2.53865902e-01\n -7.68762551e-01 -4.04206369e-01 -7.90348292e-01 -8.15791150e-01\n -1.68178702e-01 1.41415774e+00 -4.76079102e-01 -7.19848998e-01\n 1.16485959e+00 1.00702476e+00 -4.19083641e-01 9.39276402e-02\n 8.63742706e-01 -1.17421278e+00 4.69504289e-01 -2.68597578e-01\n 6.87357313e-01 7.85644571e-01 2.20773451e-01 6.40745976e-01\n 4.10845823e-02 -3.01980691e-01 9.47249264e-01 -8.48878769e-02\n 9.14755315e-01 -2.15021218e-01 1.23032993e-01 -4.27144450e-01\n -8.01514877e-01 -3.33813438e-01 -6.54416007e-01 1.19575212e+00\n 1.37712540e+00 -2.06174803e+00 4.96171583e-02 -1.86904073e-01\n -7.18108708e-01 -1.09845232e-01 1.77686526e-01 -4.71241703e-01\n -1.19442258e+00 4.92536236e-01 -3.18762809e-01 -3.83871433e-01\n 1.62905973e+00 -1.04869607e+00 9.27184374e-01 -7.89852831e-01\n -3.51775856e-01 5.47653494e-02 2.66841305e-01 -6.81698233e-01\n -7.54591977e-01 9.54328519e-01 -3.56465955e-01 -1.49308678e+00\n -3.28696799e-01 9.76192354e-01 -2.10924358e+00 1.36651909e+00\n -6.07500338e-01 2.20045707e-01 1.19736030e-01 -4.52703400e-01\n -1.70745039e+00 1.26316231e-01 5.23192845e-01 7.39351496e-01\n 2.36777394e+00 2.40316457e-01 -2.03863385e-01 4.94977525e-01\n -1.14206534e+00 -1.32093569e-02 4.89615806e-01 6.44038712e-01\n 4.48879559e-01 -7.55695257e-01 1.56869599e-01 -3.95203737e-01\n 6.94617312e-01 -3.73046833e-01 -2.21275165e-01 1.90176926e-01\n 2.18674835e+00 1.09686975e+00 -7.44576207e-01 7.89289959e-01\n -3.22075310e-01 -7.02483399e-01 -6.55029307e-01 4.53359780e-01\n 9.97828399e-02 -2.44265301e+00 1.12237544e+00 4.38976285e-01\n -5.58416343e-01 -2.79839431e-01 6.52500999e-01 -4.20864416e-01\n -2.08330643e+00 9.81367188e-01 7.35489782e-02 -3.64568807e-02\n 8.14817139e-02 -8.88658545e-02 -1.71923593e-01 -1.57745629e+00\n 1.33081189e-01 1.02685180e+00 -1.17858984e+00 1.60598080e+00\n -9.79744356e-01 -9.79260817e-01 8.56088187e-01 5.85239533e-01\n 3.95838013e-01 4.63640646e-01 -1.09119201e+00 -2.28275629e+00\n 3.19774650e+00 -1.38399747e+00 -1.67852466e+00 -1.24629708e+00\n -2.26828979e-01 -1.44347587e+00 1.44764406e+00 7.35965842e-01\n -4.49743182e-01 -1.93859338e+00 -2.41256316e-01 -6.94623016e-02\n -2.93667151e+00 4.03399616e-01 2.47553384e-01 -2.24539160e+00\n -1.06470506e+00 -5.61146335e-01 -1.03587421e+00 -9.65978946e-02\n -3.48064088e-01 -3.79438988e-01 2.30480755e-01 -1.93992006e-01\n -3.18420340e-01 -2.51444862e+00 1.12571679e+00 -1.02949485e+00\n -2.04523914e+00 -2.97293180e-01 -2.79246269e-01 1.41820423e+00\n -1.47835767e+00 5.22097105e-01 -6.59804668e-01 -3.14479882e+00\n 6.98963724e-01 8.57975867e-01 2.04678734e-01 1.39582735e-01\n -6.85537516e-01 -1.38893804e+00 5.02136337e-01 8.86541871e-01\n 1.41573281e+00 2.08131587e+00 -1.46976429e+00 9.97109911e-01\n 2.42245962e-01 -5.58494690e-01 -2.27969158e+00 1.26035574e+00\n 1.67551307e+00 -9.65711209e-01 9.15474219e-01 1.69002674e+00\n 6.59747340e-01 1.03814421e+00 5.14949723e-01 1.69370303e+00\n 1.21173180e+00 1.66466967e+00 4.45055347e-01 1.06951674e+00\n 1.04911065e-01 1.29700178e+00 -4.21833851e-01 -2.08666282e+00\n -1.64921518e+00 -6.37418960e-01 6.64706758e-01 3.81872679e-01\n -3.31461760e-03 3.35464928e-01 3.14430063e-02 2.52810792e-01\n -8.97519605e-01 -8.17603209e-01 1.65223375e+00 1.73978656e-01\n 2.00363602e-01 1.48768783e+00 -2.78394637e-01 -5.11447811e-01\n -3.00857984e-02 -5.60645764e-01 1.32000175e-01 -2.53415004e-01\n -1.82714307e+00 -1.75772148e+00 2.22425502e-01 -7.13182005e-01\n -3.21927514e-01 -3.18031912e-02 -1.32114050e+00 4.29461675e-01\n -3.22632438e-01 -2.85014512e-02 2.81691755e-01 -2.11930919e+00\n 1.27137651e+00 -1.37154246e+00 1.05813982e-02 4.73801070e-01\n -5.99363214e-01 6.25109485e-01 1.11381107e+00 5.50450831e-01\n -1.30564524e+00 1.80562274e+00 1.20263282e+00 -3.77948983e-02\n -1.41748827e+00 -1.75361457e+00 -9.86686386e-01 8.71135915e-01\n 5.71183364e-01 -6.45254610e-01 9.88303815e-01 -1.75229908e+00\n 7.49534661e-01 -5.93740991e-01 -5.96013936e-01 3.77828412e-01\n 1.94147784e-01 -7.97132822e-01 -1.52170309e+00 5.73798031e-02\n -1.09972745e+00 -1.14587233e+00 1.54146440e+00 1.65378759e+00\n 5.72607520e-01 -1.59503664e+00 3.08757505e-01 1.81769199e+00\n 3.36724309e-01 -1.18289958e+00 1.86452052e+00 -1.25591889e-01\n -8.30240727e-01 -6.81863636e-01 -8.31729704e-01 -1.12905247e+00\n -1.28222483e+00 9.46985558e-01 1.15967419e+00 -9.75118365e-01\n 7.15314270e-01 -5.40311222e-01 1.10386328e+00 -2.00235094e+00\n -1.31051064e+00 6.94967947e-01 5.01696812e-01 7.02606629e-01\n 9.06382567e-01 7.62961197e-01 9.49840608e-01 -8.38472482e-01\n -1.25681090e+00 -6.50799768e-01 -4.87659664e-02 -2.76809815e-02\n -1.36905039e+00 1.19753892e+00 -5.45235865e-01 -1.17946535e+00\n 2.02013142e-01 4.93228104e-01 5.81397720e-01 -6.08296571e-03\n 3.57971796e-02 -1.12673957e-01 -1.44526462e+00 1.44654700e+00\n -9.11128511e-01 2.46983584e-01 -1.07113879e-01 -1.57902811e+00\n -7.29553765e-01 8.47892484e-01 4.91270392e-01 6.57816584e-02\n -2.44471467e+00 1.41009114e-01 -2.22259566e-01 9.59141665e-01\n 1.63109156e+00 -7.81663480e-02 -1.17022895e-01 -5.02187024e-01\n -6.57341677e-01 1.02417650e+00 -5.21593569e-01 1.40408274e+00\n 2.43745608e-01 3.48569058e-01 -3.15779386e-01 1.46061374e+00\n -6.47839086e-01 -1.13073173e+00 7.21121688e-01 2.18244262e-01\n 9.23009842e-01 -9.96794601e-01 1.06978079e+00 1.03232999e+00\n 9.19766721e-01 -3.29585791e-01 -7.72045989e-01 -2.09043304e-01\n -1.20240636e+00 1.28772671e+00 1.06881974e+00 3.26476844e-01\n -4.01210978e-02 1.55758772e+00 -9.22066167e-01 -1.39467001e+00\n 9.19432914e-01 3.32052002e-01 -8.90850347e-01 -4.67482361e-01\n -1.08158837e+00 -4.07523584e-01 9.26199024e-01 -7.79736122e-01\n 9.96811209e-01 8.63980886e-02 -4.67734900e-01 -5.38637670e-02\n 4.79585632e-01 6.93289561e-01 2.16479941e+00 -2.50022995e-01\n -8.40894313e-01 -1.31528118e+00 -1.11195251e+00 -3.88082893e-01\n 1.64094095e+00 -9.68774987e-01 1.05935114e+00 -1.53958011e+00\n -1.23399593e+00 8.75289595e-01 1.33353369e+00 -1.34644338e+00\n -3.79861213e-01 1.39816963e+00 5.14015574e-01 9.42788160e-01\n -1.08187313e-01 -5.54609807e-01 5.65717708e-01 5.49631829e-01\n 1.18570915e-01 2.38828878e+00 9.00211528e-01 4.62097348e-01\n 7.10229550e-01 -4.13089685e-01 -1.49596277e-01 -1.84302998e-01\n -1.66563155e+00 1.27338053e+00 1.20797389e+00 -7.10260006e-01\n -2.35176748e+00 -1.08147639e+00 -2.87768940e-01 -5.40243277e-02\n 1.55021498e+00 7.38849659e-01 8.77839255e-01 1.69889835e-01\n -4.04972580e-03 1.17451268e+00 -1.37203911e+00 6.71321693e-01\n 7.67131018e-01 7.10714899e-01 -6.98813694e-01 -3.97152869e-01\n -7.98630751e-01 -1.60989537e+00 -1.07590863e+00 -2.26493982e-01\n 2.12923004e-01 -1.01681482e-01 -6.75615557e-01 2.74050158e-01\n 3.14596661e-01 6.00952351e-01 4.91264374e-01 6.59143836e-01\n 1.17497899e+00 1.99468936e+00 5.53175384e-01 -6.31532426e-01\n 4.36628862e-01 1.73691521e+00 -6.57549783e-01 2.64541499e-01\n 2.03381446e+00 -1.28293591e-01 4.68554052e-01 -9.08073155e-01\n 9.41641232e-01 -1.30224230e+00 -5.83568476e-01 1.69381678e-01\n 8.88951748e-01 6.20026268e-02 1.30430536e+00 -7.65745730e-01\n 8.29830805e-01 -5.72227875e-01 1.03201162e+00 -1.50326697e+00\n 1.00271588e+00 -1.65126863e+00 1.12536037e+00 6.39532230e-01\n -3.55883829e-01 -1.08252763e+00 1.35512152e+00 7.38607871e-01\n 1.07904075e+00 2.55460337e-02 7.88583118e-01 -4.92800374e-01\n 1.44986283e+00 3.69048686e-01 1.47577669e+00 -9.88418637e-01\n -1.28202272e+00 4.22743704e-01 -2.97433676e-01 1.66977739e+00\n -1.05720880e+00 2.65009562e-01 -3.00751372e-01 -1.34433688e+00\n 1.81902092e+00 -7.79535199e-01 -2.16579642e-01 -5.00065853e-01\n 3.18450089e-02 5.14613591e-01 3.41943179e-01 8.14166951e-01\n -5.17386926e-01 9.16497559e-01 -4.98699700e-01 -7.86482550e-01\n -4.37281305e-01 1.62350740e-01 -1.59543191e-01 6.57098327e-01\n 1.99408746e-01 6.93237886e-01 -4.43851750e-01 -1.24374626e-01\n 2.15534267e+00 -1.93548255e+00 2.30849860e+00 -1.47177520e+00\n -1.84055695e-01 6.40265525e-01 1.59009477e+00 3.95033444e-01\n 9.76295082e-01 -8.41124460e-01 4.95227653e-01 -2.86463330e-01\n 3.25832623e-02 6.89412889e-01 1.41089570e+00 -6.90559113e-01\n 5.94550051e-01 1.72991200e+00 -3.14123005e-01 4.41940536e-01\n 1.62196518e+00 -1.87406533e+00 1.39252902e+00 -1.53112669e+00\n 1.54097244e+00 1.10656241e-01 -2.05099190e+00 2.66971129e+00\n -1.53289515e-01 4.89476786e-01 4.58037849e-01 -3.01066444e-01\n 8.17965544e-01 4.72790310e-01 1.34932729e+00 -1.07643803e+00\n 6.10634428e-01 -1.03187609e+00 2.78503467e-02 -8.49142464e-01\n -4.49651138e-01 -4.22155367e-01 -1.05570299e+00 1.40490607e+00\n -2.12935528e-01 -3.01772135e-03 -2.64387037e-01 -8.86147023e-01\n 9.81088574e-01 8.88830904e-01 -3.51646935e-01 8.43944676e-04\n 1.37616257e+00 -9.09219715e-01 -1.53366347e+00 6.17060377e-01\n 5.73011502e-01 -7.17191851e-01 1.37777864e-02 -4.98510365e-02\n -1.99540163e+00 2.44607066e-01 8.14956811e-01 1.32405408e+00\n 2.05532554e-01 1.75565771e+00 -4.27006576e-01 7.65808014e-01\n 2.88979349e-02 1.04582874e+00 6.44726609e-01 2.64663294e-01\n 2.96370023e-01 -8.23606028e-01 3.95746820e-01 -8.20448681e-01\n -2.09198360e-02 1.30177196e+00 1.42649124e-01 -6.18371869e-01\n -1.21123956e+00 4.82562855e-01 -2.66105052e-01 3.28754355e-01\n -9.79675397e-02 -1.69534248e-01 -6.61978376e-01 7.63689096e-01\n -4.00740589e-01 1.91413744e-01 8.45344355e-04 3.87443529e-01\n -8.73959556e-01 1.26887523e+00 -6.34111147e-02 -9.86696729e-02\n 4.51971392e-01 8.25204850e-01 -6.79319624e-01 -1.03663042e+00\n 1.49563755e+00 1.15331540e+00 1.58230377e+00 6.50049910e-01\n 9.56394974e-01 -4.05609001e-01 -1.79721696e+00 -7.21285965e-01\n -3.08145165e-01 8.73296410e-01 -6.97076363e-01 -2.33426924e-01\n -1.67202629e+00 -4.50850584e-01 -2.00790707e-02 -5.80881521e-01\n -4.73383708e-01 3.29500653e-01 -3.44057802e-01 -3.07181157e-01\n -2.72954998e-01 -5.29987649e-01 1.56595064e+00 8.23199843e-01\n -2.30437517e+00 1.16917121e+00 1.52581791e+00 3.79465604e-01\n -2.36018707e+00 6.05014327e-01 4.27990658e-01 -8.06295455e-01\n -1.74818941e+00 4.26234259e-01 -2.21067796e+00 -1.65739698e+00\n -1.24809713e+00 -2.85237764e-01 -3.95584817e-01 -1.02824066e+00\n -1.30261291e+00 -2.14391017e-01 7.46204740e-01 1.16782905e+00\n -2.79301262e-01 6.70051934e-02 9.76243285e-01 -6.10632050e-01\n 6.34046444e-01 9.73649124e-01 2.93501890e-02 3.88862907e-01\n 5.74612883e-01 2.40715052e-01 9.53891788e-01 -8.59136779e-01\n -4.74156537e-01 -2.11893470e+00 -3.83300860e-02 4.07190727e-01\n 3.94227887e-01 2.04669541e-01 6.59286594e-02 -6.85079354e-01\n -5.23307052e-01 1.87784972e+00 -1.04165291e-01 4.48062681e-01\n -3.32930699e-01 8.96245130e-01 5.92276993e-01 -1.61761468e-01\n 3.80622270e-02 7.96234293e-01 -1.08882359e-01 -1.00286486e+00\n 3.74784408e-01 -7.98186708e-01 -5.37296319e-01 7.63009618e-01\n -2.21736694e+00 -3.61207125e-01 5.83076328e-01 -3.33099628e-01\n -4.96224037e-01 -1.53193372e-01 6.06403400e-01 -4.15311705e-01\n 2.11863437e-01 -6.66328870e-01 3.49260622e-01 -1.39919754e+00\n -1.86638931e+00 -9.23278327e-01 3.56000721e-01 -3.06110844e-01\n 1.19020178e+00 1.30563944e+00 1.35959241e+00 -9.65923653e-01\n -3.60009741e-01 -3.92244187e-01 -1.21225035e+00 9.68306930e-01\n 7.59250224e-01 -5.59502733e-01 -1.37590436e+00 6.20974781e-01\n 5.50264828e-01 -1.33362334e+00 3.10776212e+00 -1.80346490e+00\n 5.28716788e-01 3.68304077e-01 5.69797196e-01 3.58573537e-01\n 3.44219634e-01 1.08581537e-01 -1.48717677e-01 1.72444779e+00\n 5.06522361e-01 2.34409456e+00 1.24755303e+00 -6.36814354e-01\n 3.56613944e-01 8.02602607e-01 -2.81507916e-03 8.70990041e-01\n 6.49358136e-01 -3.87168391e-01 6.67496766e-01 -1.26235166e+00\n -4.63124018e-01 1.23675657e+00 -1.20811533e+00 1.01964108e+00\n -7.15534566e-02 -1.41162919e+00 1.43428624e+00 5.40438255e-01\n 4.13352472e-01 -7.13902530e-02 1.87946762e-01 5.10829010e-01\n 3.07221040e-01 -2.35979800e+00 -2.15102022e-01 -1.03231964e+00\n -1.55124251e+00 6.76483673e-01 1.60145368e-01 -2.92008238e-01\n 2.04637271e-01 1.53957111e-01 -7.90996806e-01 -9.42437104e-01\n 3.85497255e-01 3.87314293e-01 -2.34321276e-02 1.07217569e-01\n -9.52498943e-01 -5.07309600e-01 2.61348818e-01 6.77382588e-02\n 1.07605152e+00 -9.24840232e-01 6.65368058e-01 1.07643829e+00\n 4.29724301e-01 5.73292784e-01 -1.88768996e-01 3.97906221e-01\n -1.24697170e+00 -6.23154839e-01 -7.70559691e-01 2.22471758e+00\n 1.27642774e+00 -1.73861527e+00 -2.31780138e+00 4.79460460e-01\n 7.47422453e-01 5.07798869e-01 6.98159018e-01 -8.78828729e-01\n 9.81891898e-01 -4.38484767e-01 -4.53257080e-01 4.89815708e-01\n -1.02775733e+00 8.38175459e-01 -1.45244989e-01 1.14817578e+00\n -8.35778764e-01 -1.35131012e+00 3.28956652e-01 -1.39304757e+00\n -5.23772116e-02 -1.68645430e+00 -5.37955976e-02 -4.04718701e-01\n -7.57378864e-01 -2.23649081e-01 4.98587232e-01 -7.00400976e-02\n 2.21055448e+00 -2.44366948e-01 -2.37639634e+00 3.30088537e-01\n -2.75262605e+00 2.34327691e-01 5.91405215e-01 -1.08480863e+00\n 4.42931738e-01 1.38861649e-01 -8.02998018e-01 5.53624520e-02\n 1.78205018e+00 -5.88266299e-01 2.54288428e-01 -1.91954924e+00\n -9.90026667e-01 1.66348215e+00 3.01339634e-01 -1.64846266e+00\n -1.14443387e+00 -1.12807133e+00 -3.84521404e-01 -3.52030867e-01\n -2.05381820e-03 -1.18492780e-01 -4.44668423e-01 -6.87291842e-01\n -2.64694667e-01 -1.72799197e+00 -2.03029180e-01 3.72036200e-01\n -1.60255991e-01 -1.03756777e+00 -8.02155195e-01 -6.77366739e-01\n 4.73362213e-02 1.44007295e+00 9.86710896e-01 2.52740656e+00\n -9.19706430e-01 -2.39459848e-01 4.54020915e-01 1.62466391e+00\n -5.81951246e-01 -9.29021962e-01 1.02656835e+00 -2.10793310e-01\n -6.90643995e-01 -6.95757321e-01 -4.77920346e-01 -3.16042340e-01]\nC.shape : (1000,), \n[2 1 0 0 2 0 0 1 0 0 0 0 1 1 2 0 0 0 2 0 2 0 1 0 1 1 2 2 2 1 0 2 0 1 2 2 0\n 0 0 2 1 2 0 2 2 1 2 2 1 0 1 0 0 0 2 2 0 1 0 2 2 2 0 1 2 1 0 0 1 1 0 0 2 1\n 0 2 1 1 1 0 0 2 1 2 2 0 0 1 0 2 0 2 1 1 2 2 0 0 0 0 1 2 2 2 1 1 1 0 2 0 2\n 1 0 0 1 0 1 1 2 1 0 1 0 2 0 2 0 2 2 2 1 1 1 2 2 2 1 0 2 1 2 1 0 0 2 0 1 1\n 0 2 0 1 1 0 2 0 0 1 2 0 2 0 1 2 1 1 0 2 1 2 1 2 1 2 2 1 0 2 1 0 0 2 1 0 0\n 2 0 2 2 0 1 1 1 1 1 1 2 0 0 0 1 0 2 1 2 2 1 0 1 0 0 1 1 0 2 0 1 1 2 1 1 1\n 1 0 1 1 2 0 0 1 2 2 0 2 0 0 2 0 1 1 1 0 1 1 0 0 1 0 2 2 2 0 2 0 0 1 0 0 2\n 1 0 0 1 0 0 2 1 0 2 2 0 0 0 0 0 0 1 1 1 1 1 0 2 2 0 2 1 2 0 1 1 0 2 0 0 2\n 2 0 2 1 2 0 0 1 1 2 1 2 1 2 2 2 0 1 0 1 1 2 0 2 0 2 1 0 1 1 2 0 0 2 2 2 1\n 2 1 2 1 2 0 2 0 1 0 2 2 0 1 1 1 2 2 2 1 0 0 0 2 1 0 1 2 0 1 2 0 2 0 2 2 0\n 2 0 2 1 2 0 1 1 2 0 1 0 1 2 0 0 2 0 1 2 1 2 0 0 0 2 2 1 1 0 0 2 0 1 0 1 0\n 0 2 1 1 1 1 0 0 2 0 0 1 1 2 2 1 1 2 1 0 1 1 1 1 0 1 1 0 0 1 2 0 1 1 1 2 2\n 0 1 2 2 1 2 0 1 0 1 2 2 2 2 2 1 0 0 2 2 1 0 0 0 0 1 2 2 2 2 1 0 1 1 0 2 0\n 1 1 2 2 0 0 1 1 2 1 0 2 1 2 1 0 0 1 0 1 0 2 1 1 1 1 2 2 0 1 1 2 0 2 2 0 0\n 0 1 2 0 0 0 2 1 1 2 1 1 1 2 2 2 1 1 1 2 1 0 2 0 0 2 0 0 0 2 1 1 1 0 2 0 0\n 0 2 0 2 2 2 1 1 1 2 0 1 0 1 0 0 1 0 0 1 2 0 2 2 0 2 1 2 2 1 1 0 2 1 1 0 0\n 1 0 0 2 1 0 0 2 1 2 2 1 0 2 1 1 2 1 0 2 1 2 2 1 2 0 2 1 2 2 1 1 1 1 2 2 2\n 1 1 0 0 1 0 0 1 0 1 1 2 0 1 2 2 1 1 2 1 0 0 2 1 1 0 1 0 2 0 1 0 2 0 0 2 1\n 0 1 2 0 1 0 0 2 2 2 1 1 1 0 1 0 0 0 1 1 0 0 0 1 1 2 2 0 2 2 1 2 1 0 1 0 2\n 2 0 0 0 1 0 1 0 1 1 2 1 1 1 2 1 1 2 1 1 1 0 0 2 0 2 0 0 0 1 1 1 0 2 0 2 1\n 2 0 0 2 0 0 0 0 1 0 1 1 2 1 1 2 0 0 2 0 0 1 0 1 0 2 0 2 0 2 1 1 0 2 2 1 2\n 2 1 1 2 2 2 0 2 2 0 0 1 0 0 2 0 0 1 2 0 2 0 2 0 2 1 0 2 1 0 2 2 0 2 2 0 2\n 2 0 2 0 1 0 1 0 1 0 0 1 1 0 2 0 0 2 2 1 2 1 0 0 2 0 2 1 1 0 1 0 1 2 1 2 2\n 2 2 1 2 2 2 2 2 2 0 0 0 1 2 1 1 1 1 1 1 2 1 1 2 2 1 2 2 0 1 2 1 1 0 0 1 2\n 1 2 0 1 0 1 0 0 2 2 2 0 0 0 0 2 0 1 0 0 1 1 0 1 0 1 1 1 2 1 2 2 0 1 0 0 1\n 0 1 1 2 2 2 1 1 0 2 2 2 0 1 1 0 1 0 2 1 0 0 0 1 0 1 0 1 0 1 0 2 0 1 1 0 2\n 1 0 2 2 2 1 1 0 1 0 2 0 1 0 2 1 0 1 0 0 1 2 0 2 2 2 2 2 1 1 0 1 1 1 1 2 1\n 1]\n" ], [ "plt.cm.get_cmap('rainbow', color_num)", "_____no_output_____" ], [ "for a in np.linspace(0, 1.0, 5):\n print(plt.cm.rainbow(a))", "(0.5, 0.0, 1.0, 1.0)\n(0.0019607843137254832, 0.7092813076058535, 0.9232891061054894, 1.0)\n(0.503921568627451, 0.9999810273487268, 0.7049255469061472, 1.0)\n(1.0, 0.7005430375932911, 0.37841105004231035, 1.0)\n(1.0, 1.2246467991473532e-16, 6.123233995736766e-17, 1.0)\n" ], [ "import numpy as np \nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport itertools\n\nsample_size = 100\nx = np.vstack([\n np.random.normal(0, 1, sample_size).reshape(sample_size//2, 2), \n np.random.normal(2, 1, sample_size).reshape(sample_size//2, 2), \n np.random.normal(4, 1, sample_size).reshape(sample_size//2, 2)\n])#50,2\ny = np.array(list(itertools.chain.from_iterable([ [i+1 for j in range(0, sample_size//2)] for i in range(0, 3)])))\ny = y.reshape(-1, 1)\n\ndf = pd.DataFrame(np.hstack([x, y]), columns=['x1', 'x2', 'y'])\n\nprint(\"x : {}, y : {}, df : {}\".format(x.shape, y.shape, df.shape))\nprint(df)\n\nc_lst = [plt.cm.rainbow(a) for a in np.linspace(0.0, 1.0, len(set(df['y'])))]\n\nplt.figure(figsize=(12, 4))\nfor i, g in enumerate(df.groupby('y')):\n plt.scatter(g[1]['x1'], g[1]['x2'], color=c_lst[i], label='group {}'.format(int(g[0])), alpha=0.5)\nplt.legend()\nplt.show()", "x : (150, 2), y : (150, 1), df : (150, 3)\n x1 x2 y\n0 2.423207 0.786940 1.0\n1 -1.119072 -0.766465 1.0\n2 1.439103 1.211885 1.0\n3 -1.432155 0.113745 1.0\n4 0.150998 1.691901 1.0\n.. ... ... ...\n145 3.999326 2.974575 3.0\n146 4.532834 4.314051 3.0\n147 4.032028 5.290013 3.0\n148 2.421906 2.249826 3.0\n149 4.443413 5.310679 3.0\n\n[150 rows x 3 columns]\n" ], [ "import numpy as np \nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport itertools\nfrom matplotlib import colors\n\n_cmap = ['#1A90F0', '#F93252', '#FEA250', '#276B29', '#362700', \n '#2C2572', '#D25ABE', '#4AB836', '#A859EA', '#65C459', \n '#C90B18', '#E02FD1', '#5FAFD4', '#DAF779', '#ECEE25', \n '#56B390', '#F3BBBE', '#8FC0AE', '#0F16F5', '#8A9EFE', \n '#A23965', '#03F70C', '#A8D520', '#952B77', '#2A493C', \n '#E8DB82', '#7C01AC', '#1938A3', '#3C4249', '#BC3D92', \n '#DEEDB1', '#3C673E', '#65F3D7', '#77110B', '#D16DD6', \n '#08EF68', '#CFFD6F', '#DC6B26', '#912D5D', '#8CA6F8', \n '#04EE96', '#54B0C1', '#6CBE38', '#24633B', '#DE41DD', \n '#5EF270', '#896991', '#E6D381', '#7B0681', '#D66C07'\n ]\n\nsample_size = 256\nx = np.vstack([\n np.random.normal(0, 1, sample_size).reshape(sample_size//2, 2), \n np.random.normal(2, 1, sample_size).reshape(sample_size//2, 2), \n np.random.normal(4, 1, sample_size).reshape(sample_size//2, 2),\n np.random.normal(3, 1, sample_size).reshape(sample_size//2, 2)\n])#50,2\nprint(x.shape)\ny = np.array(list(itertools.chain.from_iterable([ [i+1 for j in range(0, int(sample_size/4))] for i in range(0, 8)])))\ny = y.reshape(-1, 1)\n\ndf = pd.DataFrame(np.hstack([x, y]), columns=['x1', 'x2', 'y'])\n\nc_lst = [plt.cm.rainbow(a) for a in np.linspace(0.0, 1.0, len(set(df['y'])))]\n\nplt.figure(figsize=(12, 4))\nprint(\"groupby : \", df.groupby('y'))\nfor i, g in enumerate(df.groupby('y')):\n print(i, \"g[1]\", g[1])\n print(i, \"g[0]\", g[0])\n plt.scatter(g[1]['x1'], g[1]['x2'], color=_cmap[i], label='group {}'.format(int(g[0])), alpha=0.5)\nplt.legend()\nplt.show()", "(512, 2)\ngroupby : <pandas.core.groupby.generic.DataFrameGroupBy object at 0x000001E2E30C11D0>\n0 g[1] x1 x2 y\n0 -0.640573 -1.111698 1.0\n1 -0.062906 0.513578 1.0\n2 0.564721 -0.702725 1.0\n3 -0.600265 0.492202 1.0\n4 0.667594 -0.372987 1.0\n.. ... ... ...\n59 0.188234 1.121310 1.0\n60 0.146818 1.143033 1.0\n61 -0.302742 1.230840 1.0\n62 -1.978857 -0.957770 1.0\n63 -0.406510 -1.093352 1.0\n\n[64 rows x 3 columns]\n0 g[0] 1.0\n1 g[1] x1 x2 y\n64 0.224609 -0.435019 2.0\n65 0.502404 -0.093547 2.0\n66 -1.226480 -1.594582 2.0\n67 0.023910 1.364843 2.0\n68 1.027505 1.066810 2.0\n.. ... ... ...\n123 -1.138385 0.314427 2.0\n124 0.788601 0.582145 2.0\n125 -0.695789 -1.732923 2.0\n126 0.245577 -0.705937 2.0\n127 -0.086445 0.094620 2.0\n\n[64 rows x 3 columns]\n1 g[0] 2.0\n2 g[1] x1 x2 y\n128 2.182309 4.145320 3.0\n129 0.601731 1.428498 3.0\n130 2.775000 1.443760 3.0\n131 3.231938 2.167548 3.0\n132 0.488885 1.896721 3.0\n.. ... ... ...\n187 -0.302589 0.777738 3.0\n188 3.426425 2.814923 3.0\n189 2.344834 3.254760 3.0\n190 2.451047 2.102114 3.0\n191 2.101392 1.971320 3.0\n\n[64 rows x 3 columns]\n2 g[0] 3.0\n3 g[1] x1 x2 y\n192 1.108772 1.540557 4.0\n193 1.489782 3.376645 4.0\n194 2.366641 -1.011934 4.0\n195 2.918287 2.736010 4.0\n196 2.845362 1.663191 4.0\n.. ... ... ...\n251 0.564354 1.413389 4.0\n252 2.202266 3.469983 4.0\n253 1.388615 1.293008 4.0\n254 2.381112 0.719538 4.0\n255 2.615946 0.893177 4.0\n\n[64 rows x 3 columns]\n3 g[0] 4.0\n4 g[1] x1 x2 y\n256 5.489827 3.481764 5.0\n257 6.038418 1.978111 5.0\n258 4.035728 3.843737 5.0\n259 4.982008 5.073081 5.0\n260 4.120030 2.648877 5.0\n.. ... ... ...\n315 5.245437 3.706052 5.0\n316 3.600270 3.331459 5.0\n317 5.750742 4.705039 5.0\n318 4.261903 4.634050 5.0\n319 4.678631 4.599012 5.0\n\n[64 rows x 3 columns]\n4 g[0] 5.0\n5 g[1] x1 x2 y\n320 3.089516 3.775998 6.0\n321 5.199715 3.673846 6.0\n322 5.041419 3.858416 6.0\n323 2.685544 4.106538 6.0\n324 4.158472 3.492941 6.0\n.. ... ... ...\n379 5.196519 3.614927 6.0\n380 5.293035 3.756933 6.0\n381 4.364816 4.737832 6.0\n382 4.117298 2.432747 6.0\n383 4.478627 4.667203 6.0\n\n[64 rows x 3 columns]\n5 g[0] 6.0\n6 g[1] x1 x2 y\n384 3.248789 4.913002 7.0\n385 2.433245 3.330332 7.0\n386 5.123070 2.304730 7.0\n387 2.057365 5.354395 7.0\n388 2.459067 4.521528 7.0\n.. ... ... ...\n443 1.540937 4.408613 7.0\n444 3.157544 3.249481 7.0\n445 4.371144 3.049284 7.0\n446 2.765444 2.279894 7.0\n447 3.050301 4.939663 7.0\n\n[64 rows x 3 columns]\n6 g[0] 7.0\n7 g[1] x1 x2 y\n448 3.043304 4.469317 8.0\n449 4.253051 4.037980 8.0\n450 2.968519 2.392685 8.0\n451 1.830336 4.790626 8.0\n452 3.718098 2.258369 8.0\n.. ... ... ...\n507 1.754375 4.056626 8.0\n508 3.964342 2.904370 8.0\n509 3.264446 1.016059 8.0\n510 4.264944 2.940507 8.0\n511 3.853722 2.219794 8.0\n\n[64 rows x 3 columns]\n7 g[0] 8.0\n" ], [ "import matplotlib.pyplot as plt\nimport numpy as np\nfrom struct import unpack\nfrom sklearn import cluster\nimport datetime\nimport seaborn as sns\nfrom sklearn.preprocessing import PowerTransformer, normalize, MinMaxScaler, StandardScaler\nfrom struct import pack\nfrom matplotlib import colors\nfrom sklearn.metrics import silhouette_score, silhouette_samples\nimport matplotlib.cm as cm\nimport matplotlib\n\n\n_cmap = colors.ListedColormap(['#1A90F0', '#F93252', '#FEA250', '#276B29', '#362700', \n '#2C2572', '#D25ABE', '#4AB836', '#A859EA', '#65C459', \n '#C90B18', '#E02FD1', '#5FAFD4', '#DAF779', '#ECEE25', \n '#56B390', '#F3BBBE', '#8FC0AE', '#0F16F5', '#8A9EFE', \n '#A23965', '#03F70C', '#A8D520', '#952B77', '#2A493C', \n '#E8DB82', '#7C01AC', '#1938A3', '#3C4249', '#BC3D92', \n '#DEEDB1', '#3C673E', '#65F3D7', '#77110B', '#D16DD6', \n '#08EF68', '#CFFD6F', '#DC6B26', '#912D5D', '#8CA6F8', \n '#04EE96', '#54B0C1', '#6CBE38', '#24633B', '#DE41DD', \n '#5EF270', '#896991', '#E6D381', '#7B0681', '#D66C07'\n ])", "_____no_output_____" ], [ "#matplotlib.colors.ListedColormap(colors, name='from_list', N=None)\n\n\ntest = matplotlib.colors.ListedColormap(_cmap.colors[:5])\nprint(test.colors)\nprint(_cmap.colors[:5])", "['#1A90F0', '#F93252', '#FEA250', '#276B29', '#362700']\n['#1A90F0', '#F93252', '#FEA250', '#276B29', '#362700']\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c51f483435f856a7f67ea7e9cf66a1c0f05ae61c
14,492
ipynb
Jupyter Notebook
Course3_Applied_Machine_Learning_in_Python/.ipynb_checkpoints/Assignment+4-checkpoint.ipynb
Collumbus/Applied_Data_Science-with_Python-Coursera
b567072ff4ec41a44416071fc05d95c7ed285f1d
[ "MIT" ]
null
null
null
Course3_Applied_Machine_Learning_in_Python/.ipynb_checkpoints/Assignment+4-checkpoint.ipynb
Collumbus/Applied_Data_Science-with_Python-Coursera
b567072ff4ec41a44416071fc05d95c7ed285f1d
[ "MIT" ]
null
null
null
Course3_Applied_Machine_Learning_in_Python/.ipynb_checkpoints/Assignment+4-checkpoint.ipynb
Collumbus/Applied_Data_Science-with_Python-Coursera
b567072ff4ec41a44416071fc05d95c7ed285f1d
[ "MIT" ]
null
null
null
50.319444
773
0.639387
[ [ [ "---\n\n_You are currently looking at **version 1.1** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-machine-learning/resources/bANLa) course resource._\n\n---", "_____no_output_____" ], [ "## Assignment 4 - Understanding and Predicting Property Maintenance Fines\n\nThis assignment is based on a data challenge from the Michigan Data Science Team ([MDST](http://midas.umich.edu/mdst/)). \n\nThe Michigan Data Science Team ([MDST](http://midas.umich.edu/mdst/)) and the Michigan Student Symposium for Interdisciplinary Statistical Sciences ([MSSISS](https://sites.lsa.umich.edu/mssiss/)) have partnered with the City of Detroit to help solve one of the most pressing problems facing Detroit - blight. [Blight violations](http://www.detroitmi.gov/How-Do-I/Report/Blight-Complaint-FAQs) are issued by the city to individuals who allow their properties to remain in a deteriorated condition. Every year, the city of Detroit issues millions of dollars in fines to residents and every year, many of these fines remain unpaid. Enforcing unpaid blight fines is a costly and tedious process, so the city wants to know: how can we increase blight ticket compliance?\n\nThe first step in answering this question is understanding when and why a resident might fail to comply with a blight ticket. This is where predictive modeling comes in. For this assignment, your task is to predict whether a given blight ticket will be paid on time.\n\nAll data for this assignment has been provided to us through the [Detroit Open Data Portal](https://data.detroitmi.gov/). **Only the data already included in your Coursera directory can be used for training the model for this assignment.** Nonetheless, we encourage you to look into data from other Detroit datasets to help inform feature creation and model selection. We recommend taking a look at the following related datasets:\n\n* [Building Permits](https://data.detroitmi.gov/Property-Parcels/Building-Permits/xw2a-a7tf)\n* [Trades Permits](https://data.detroitmi.gov/Property-Parcels/Trades-Permits/635b-dsgv)\n* [Improve Detroit: Submitted Issues](https://data.detroitmi.gov/Government/Improve-Detroit-Submitted-Issues/fwz3-w3yn)\n* [DPD: Citizen Complaints](https://data.detroitmi.gov/Public-Safety/DPD-Citizen-Complaints-2016/kahe-efs3)\n* [Parcel Map](https://data.detroitmi.gov/Property-Parcels/Parcel-Map/fxkw-udwf)\n\n___\n\nWe provide you with two data files for use in training and validating your models: train.csv and test.csv. Each row in these two files corresponds to a single blight ticket, and includes information about when, why, and to whom each ticket was issued. The target variable is compliance, which is True if the ticket was paid early, on time, or within one month of the hearing data, False if the ticket was paid after the hearing date or not at all, and Null if the violator was found not responsible. Compliance, as well as a handful of other variables that will not be available at test-time, are only included in train.csv.\n\nNote: All tickets where the violators were found not responsible are not considered during evaluation. They are included in the training set as an additional source of data for visualization, and to enable unsupervised and semi-supervised approaches. However, they are not included in the test set.\n\n<br>\n\n**File descriptions** (Use only this data for training your model!)\n\n readonly/train.csv - the training set (all tickets issued 2004-2011)\n readonly/test.csv - the test set (all tickets issued 2012-2016)\n readonly/addresses.csv & readonly/latlons.csv - mapping from ticket id to addresses, and from addresses to lat/lon coordinates. \n Note: misspelled addresses may be incorrectly geolocated.\n\n<br>\n\n**Data fields**\n\ntrain.csv & test.csv\n\n ticket_id - unique identifier for tickets\n agency_name - Agency that issued the ticket\n inspector_name - Name of inspector that issued the ticket\n violator_name - Name of the person/organization that the ticket was issued to\n violation_street_number, violation_street_name, violation_zip_code - Address where the violation occurred\n mailing_address_str_number, mailing_address_str_name, city, state, zip_code, non_us_str_code, country - Mailing address of the violator\n ticket_issued_date - Date and time the ticket was issued\n hearing_date - Date and time the violator's hearing was scheduled\n violation_code, violation_description - Type of violation\n disposition - Judgment and judgement type\n fine_amount - Violation fine amount, excluding fees\n admin_fee - $20 fee assigned to responsible judgments\nstate_fee - $10 fee assigned to responsible judgments\n late_fee - 10% fee assigned to responsible judgments\n discount_amount - discount applied, if any\n clean_up_cost - DPW clean-up or graffiti removal cost\n judgment_amount - Sum of all fines and fees\n grafitti_status - Flag for graffiti violations\n \ntrain.csv only\n\n payment_amount - Amount paid, if any\n payment_date - Date payment was made, if it was received\n payment_status - Current payment status as of Feb 1 2017\n balance_due - Fines and fees still owed\n collection_status - Flag for payments in collections\n compliance [target variable for prediction] \n Null = Not responsible\n 0 = Responsible, non-compliant\n 1 = Responsible, compliant\n compliance_detail - More information on why each ticket was marked compliant or non-compliant\n\n\n___\n\n## Evaluation\n\nYour predictions will be given as the probability that the corresponding blight ticket will be paid on time.\n\nThe evaluation metric for this assignment is the Area Under the ROC Curve (AUC). \n\nYour grade will be based on the AUC score computed for your classifier. A model which with an AUROC of 0.7 passes this assignment, over 0.75 will recieve full points.\n___\n\nFor this assignment, create a function that trains a model to predict blight ticket compliance in Detroit using `readonly/train.csv`. Using this model, return a series of length 61001 with the data being the probability that each corresponding ticket from `readonly/test.csv` will be paid, and the index being the ticket_id.\n\nExample:\n\n ticket_id\n 284932 0.531842\n 285362 0.401958\n 285361 0.105928\n 285338 0.018572\n ...\n 376499 0.208567\n 376500 0.818759\n 369851 0.018528\n Name: compliance, dtype: float32\n \n### Hints\n\n* Make sure your code is working before submitting it to the autograder.\n\n* Print out your result to see whether there is anything weird (e.g., all probabilities are the same).\n\n* Generally the total runtime should be less than 10 mins. You should NOT use Neural Network related classifiers (e.g., MLPClassifier) in this question. \n\n* Try to avoid global variables. If you have other functions besides blight_model, you should move those functions inside the scope of blight_model.\n\n* Refer to the pinned threads in Week 4's discussion forum when there is something you could not figure it out.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\n\ndef dblight_model():\n \n # Your code here\n \n return # Your answer here", "_____no_output_____" ], [ "def blight_model():\n from sklearn.neural_network import MLPClassifier\n from sklearn.preprocessing import MinMaxScaler\n from sklearn.tree import DecisionTreeClassifier\n from datetime import datetime\n def tg(hearing_date_str, ticket_issued_date_str):\n if not hearing_date_str or type(hearing_date_str)!=str: return 73\n hearing_date = datetime.strptime(hearing_date_str, \"%Y-%m-%d %H:%M:%S\")\n ticket_issued_date = datetime.strptime(ticket_issued_date_str, \"%Y-%m-%d %H:%M:%S\")\n gap = hearing_date - ticket_issued_date\n return gap.days\n train_data = pd.read_csv('readonly/train.csv', encoding = 'ISO-8859-1')\n test_data = pd.read_csv('readonly/test.csv')\n train_data = train_data[(train_data['compliance'] == 0) | (train_data['compliance'] == 1)]\n address = pd.read_csv('readonly/addresses.csv')\n latlons = pd.read_csv('readonly/latlons.csv')\n address = address.set_index('address').join(latlons.set_index('address'), how='left')\n train_data = train_data.set_index('ticket_id').join(address.set_index('ticket_id'))\n test_data = test_data.set_index('ticket_id').join(address.set_index('ticket_id'))\n train_data = train_data[~train_data['hearing_date'].isnull()]\n train_data['tg'] = train_data.apply(lambda row: tg(row['hearing_date'], row['ticket_issued_date']), axis=1)\n test_data['tg'] = test_data.apply(lambda row: tg(row['hearing_date'], row['ticket_issued_date']), axis=1)\n feature_to_be_splitted = ['agency_name', 'state', 'disposition']\n train_data.lat.fillna(method='pad', inplace=True)\n train_data.lon.fillna(method='pad', inplace=True)\n train_data.state.fillna(method='pad', inplace=True)\n\n test_data.lat.fillna(method='pad', inplace=True)\n test_data.lon.fillna(method='pad', inplace=True)\n test_data.state.fillna(method='pad', inplace=True)\n train_data = pd.get_dummies(train_data, columns=feature_to_be_splitted)\n test_data = pd.get_dummies(test_data, columns=feature_to_be_splitted)\n list_to_remove_train = [\n 'balance_due',\n 'collection_status',\n 'compliance_detail',\n 'payment_amount',\n 'payment_date',\n 'payment_status'\n ]\n list_to_remove_all = ['fine_amount', 'violator_name', 'zip_code', 'country', 'city',\n 'inspector_name', 'violation_street_number', 'violation_street_name',\n 'violation_zip_code', 'violation_description',\n 'mailing_address_str_number', 'mailing_address_str_name',\n 'non_us_str_code',\n 'ticket_issued_date', 'hearing_date', 'grafitti_status', 'violation_code']\n train_data.drop(list_to_remove_train, axis=1, inplace=True)\n train_data.drop(list_to_remove_all, axis=1, inplace=True)\n test_data.drop(list_to_remove_all, axis=1, inplace=True)\n train_features = train_data.columns.drop('compliance')\n train_features_set = set(train_features)\n \n for feature in set(train_features):\n if feature not in test_data:\n train_features_set.remove(feature)\n train_features = list(train_features_set)\n \n X_train = train_data[train_features]\n y_train = train_data.compliance\n X_test = test_data[train_features]\n \n scaler = MinMaxScaler()\n X_train_scaled = scaler.fit_transform(X_train)\n X_test_scaled = scaler.transform(X_test)\n \n clf = MLPClassifier(hidden_layer_sizes = [100, 10], alpha = 5,\n random_state = 0, solver='lbfgs', verbose=0)\n\n clf.fit(X_train_scaled, y_train)\n\n test_proba = clf.predict_proba(X_test_scaled)[:,1]\n\n \n final_df = pd.read_csv('readonly/test.csv', encoding = \"ISO-8859-1\")\n final_df['compliance'] = test_proba\n final_df.set_index('ticket_id', inplace=True)\n \n return final_df.compliance\nblight_model()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ] ]
c51f4aae943c9cdcf0e4006be7636a14b65ab561
145,326
ipynb
Jupyter Notebook
ai-platform-tf/CAIP/table_tf.ipynb
tuti-org/tuti-repo
5140f122c9160634f1a1690a09165bbe761d8567
[ "Apache-2.0" ]
16
2021-06-14T15:10:26.000Z
2021-11-16T15:32:36.000Z
ai-platform-tf/CAIP/table_tf.ipynb
tuti-org/tuti-repo
5140f122c9160634f1a1690a09165bbe761d8567
[ "Apache-2.0" ]
null
null
null
ai-platform-tf/CAIP/table_tf.ipynb
tuti-org/tuti-repo
5140f122c9160634f1a1690a09165bbe761d8567
[ "Apache-2.0" ]
7
2021-06-14T18:12:40.000Z
2021-11-15T22:26:35.000Z
58.317014
44,804
0.704581
[ [ [ "# ==============================================================================\n# Copyright 2021 Google LLC. This software is provided as-is, without warranty\n# or representation for any use or purpose. Your use of it is subject to your\n# agreement with Google.\n# ==============================================================================\n#\n# Author: Chanchal Chatterjee\n# Email: [email protected]\n#", "_____no_output_____" ], [ "# To these first:\n# 1. Create a VM with TF 2.1\n# 2. Create the following buckets in your project:\n# Root Bucket: BUCKET_NAME = 'tuti_asset' 'gs://$BUCKET_NAME'\n# Model Results Directory: FOLDER_RESULTS = 'tf_models' 'gs://$BUCKET_NAME/$FOLDER_RESULTS'\n# Data directory: FOLDER_DATA = 'datasets' 'gs://$BUCKET_NAME/$FOLDER_DATA'\n# The data: INPUT_FILE_NAME = 'mortgage_structured.csv'\n# 3. In your VM create directory called ./model_dir\n", "_____no_output_____" ], [ "# Uninstall old packages\n#!pip3 uninstall -r requirements-uninstall.txt -y\n", "_____no_output_____" ], [ "# Install packages\n# https://cloud.google.com/ai-platform/training/docs/runtime-version-list\n#!pip3 install -r requirements-rt2.1.txt --user --ignore-installed\n\n# If VM created with TF2.1 Enterprise (no GPUs), all you need to install is cloudml-hypertune\n!pip3 install cloudml-hypertune --user --ignore-installed\n", "Collecting cloudml-hypertune\n Using cached cloudml_hypertune-0.1.0.dev6-py2.py3-none-any.whl\nInstalling collected packages: cloudml-hypertune\nSuccessfully installed cloudml-hypertune-0.1.0.dev6\n" ], [ "# Import packages\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' \n#0 = all messages are logged (default behavior)\n#1 = INFO messages are not printed\n#2 = INFO and WARNING messages are not printed\n#3 = INFO, WARNING, and ERROR messages are not printed\n\nimport numpy as np\nfrom google.cloud import storage\nimport tensorflow as tf\n#import matplotlib.pyplot as plt\n#from tensorflow.keras import models\n\nprint(\"TF Version= \", tf.__version__)\nprint(\"Keras Version= \", tf.keras.__version__)\n", "TF Version= 2.1.3\nKeras Version= 2.3.0-tf\n" ], [ "# Utility functions\n\n#------\ndef find_best_model_dir(model_dir, offset=1, maxFlag=1):\n # Get a list of model directories\n all_models = ! gsutil ls $model_dir\n print(\"\")\n print(\"All Models = \")\n print(*all_models, sep='\\n')\n\n # Check if model dirs exist\n if ((\"CommandException\" in all_models[0]) or (len(all_models) <= 1)):\n print(\"Create the models first.\")\n return \"\"\n\n # Find the best model from checkpoints\n import re\n best_acc = -np.Inf\n if (maxFlag != 1):\n best_acc = np.Inf\n best_model_dir = \"\"\n tup_list = []\n for i in range(1,len(all_models)):\n all_floats = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", all_models[i]) #Find the floats in the string\n cur_acc = -float(all_floats[-offset]) #which item is the model optimization metric\n tup_list.append([all_models[i],cur_acc])\n if (maxFlag*(cur_acc > best_acc) or (1-maxFlag)*(cur_acc < best_acc)):\n best_acc = cur_acc\n best_model_dir = all_models[i]\n if maxFlag:\n tup_list.sort(key=lambda tup: tup[1], reverse=False)\n else:\n tup_list.sort(key=lambda tup: tup[1], reverse=True)\n #for i in range(len(tup_list)):\n # print(tup_list[i][0])\n print(\"Best Accuracy from Checkpoints = \", best_acc)\n print(\"Best Model Dir from Checkpoints = \", best_model_dir)\n \n return best_model_dir\n", "_____no_output_____" ], [ "from oauth2client.client import GoogleCredentials\nfrom googleapiclient import discovery\nfrom googleapiclient import errors\nimport json\n\n#------\n# Python module to get the best hypertuned model parameters\ndef pyth_get_hypertuned_parameters(project_name, job_name, maxFlag):\n # Define the credentials for the service account\n #credentials = service_account.Credentials.from_service_account_file(<PATH TO CREDENTIALS JSON>)\n credentials = GoogleCredentials.get_application_default()\n\n # Define the project id and the job id and format it for the api request\n project_id = 'projects/{}'.format(project_name)\n job_id = '{}/jobs/{}'.format(project_id, job_name)\n\n # Build the service\n cloudml = discovery.build('ml', 'v1', cache_discovery=False, credentials=credentials)\n\n # Execute the request and pass in the job id\n request = cloudml.projects().jobs().get(name=job_id)\n\n try:\n response = request.execute()\n # Handle a successful request\n except errors.HttpError as err:\n tf.compat.v1.logging.error('There was an error getting the hyperparameters. Check the details:')\n tf.compat.v1.logging.error(err._get_reason())\n\n # Get just the best hp values\n if maxFlag:\n best_model = response['trainingOutput']['trials'][0]\n else:\n best_model = response['trainingOutput']['trials'][-1]\n #print('Best Hyperparameters:')\n #print(json.dumps(best_model, indent=4))\n\n nTrials = len(response['trainingOutput']['trials'])\n for i in range(0,nTrials):\n state = response['trainingOutput']['trials'][i]['state']\n trialId = response['trainingOutput']['trials'][i]['trialId']\n objV = -1\n if (state == 'SUCCEEDED'):\n objV = response['trainingOutput']['trials'][i]['finalMetric']['objectiveValue']\n print('objective=', objV, ' trialId=', trialId, state)\n d = response['trainingOutput']['trials'][i]['hyperparameters']\n for key, value in d.items():\n print(' ', key, value)\n return best_model\n", "_____no_output_____" ] ], [ [ "# Setup", "_____no_output_____" ] ], [ [ "# Get the project id\nproj_id = !gcloud config list project --format \"value(core.project)\"\nproj_id[0]\n", "_____no_output_____" ], [ "USER = 'cchatterj'\nPROJECT_ID = proj_id[0]\nBUCKET_NAME = 'tuti_asset' #Use a unique name\nFOLDER_RESULTS = 'tf_models'\nFOLDER_DATA = 'datasets'\nREGION = 'us-central1'\nZONE1 = 'us-central1-a'\nRUNTIME_VERSION = 2.1\nJOB_DIR = 'gs://' + BUCKET_NAME + '/' + FOLDER_RESULTS + '/jobdir'\nMODEL_DIR = 'gs://' + BUCKET_NAME + '/' + FOLDER_RESULTS + '/models'\nINPUT_FILE_NAME = 'mortgage_structured.csv'\n", "_____no_output_____" ], [ "!gcloud config set project $PROJECT_ID\n!gcloud config set compute/zone $ZONE1\n!gcloud config set compute/region $REGION\n!gcloud config list\n#!gcloud config config-helper --format \"value(configuration.properties.core.project)\"\n", "Updated property [core/project].\nUpdated property [compute/zone].\nUpdated property [compute/region].\n[compute]\nregion = us-central1\nzone = us-central1-a\n[core]\naccount = [email protected]\ndisable_usage_reporting = True\nproject = img-seg-3d\n\nYour active configuration is: [default]\n" ], [ "# Clean old job logs, job packages and models\n!gsutil -m -q rm $JOB_DIR/packages/**\n!gsutil -m -q rm $MODEL_DIR/model**\n", "_____no_output_____" ] ], [ [ "# ML Model", "_____no_output_____" ] ], [ [ "# Create the tf_trainer directory and load the trainer files in it\n!mkdir -p trainer\n", "_____no_output_____" ], [ "%%writefile ./trainer/inputs.py\n\n# Create the train and label lists\nimport math\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\n\n#------\ndef load_data(input_file):\n # Read the data\n print(input_file)\n #try:\n table_data = pd.read_csv(input_file)\n #except:\n # print(\"Oops! That is invalid filename. Try again...\")\n # return\n\n print(table_data.shape)\n\n # ---------------------------------------\n # Pre-processing\n # ---------------------------------------\n\n # Drop useless columns\n table_data.drop(['LOAN_SEQUENCE_NUMBER'], axis=1, inplace=True)\n\n # Inputs to an XGBoost model must be numeric. One hot encoding was \n # previously found to yield better results \n # than label encoding for the particular\n strcols = [col for col in table_data.columns if table_data[col].dtype == 'object']\n table_data = pd.get_dummies(table_data, columns=strcols)\n\n # Train Test Split and write out the train-test files\n\n # Split with a small test size so as to allow our model to train on more data\n X_train, X_test, y_train, y_test = \\\n train_test_split(table_data.drop('TARGET', axis=1), \n table_data['TARGET'],\n stratify=table_data['TARGET'], \n shuffle=True, test_size=0.2\n )\n\n # Remove Null and NAN\n X_train = X_train.fillna(0)\n X_test = X_test.fillna(0)\n \n # Check the shape\n print(\"X_train shape = \", X_train.shape)\n print(\"X_test shape = \", X_test.shape)\n \n y_train_cat = tf.keras.utils.to_categorical(y_train)\n y_test_cat = tf.keras.utils.to_categorical(y_test)\n print(\"y_train shape = \", y_train_cat.shape)\n print(\"y_test shape = \", y_test_cat.shape)\n\n # count number of classes\n #values, counts = np.unique(y_train, return_counts=True)\n #NUM_CLASSES = len(values)\n #print(\"Number of classes \", NUM_CLASSES)\n\n #train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train))\n #train_dataset = train_dataset.shuffle(100).batch(batch_size)\n #test_dataset = tf.data.Dataset.from_tensor_slices((X_test, y_test))\n #test_dataset = test_dataset.shuffle(100).batch(batch_size)\n \n return [X_train, X_test, y_train_cat, y_test_cat]\n", "Overwriting ./trainer/inputs.py\n" ], [ "%%writefile ./trainer/model.py\n\nimport tensorflow as tf\nimport numpy as np\n\ndef tf_model(input_dim, output_dim, model_depth: int = 1, dropout_rate: float = 0.02):\n from tensorflow.keras.models import Sequential\n from tensorflow.keras.layers import Dense, Dropout\n\n decr = int((input_dim-output_dim-16)/model_depth) ^ 1\n\n model = Sequential()\n model.add(Dense(128, input_dim=input_dim, activation=tf.nn.relu))\n for i in range(1,model_depth):\n model.add(Dense(input_dim-i*decr, activation=tf.nn.relu, kernel_regularizer='l2'))\n model.add(Dropout(dropout_rate))\n model.add(Dense(output_dim, activation=tf.nn.softmax))\n print(model.summary())\n\n return model\n\n\ndef custom_loss(y_true, y_pred):\n custom_loss = mean(square(y_true - y_pred), axis=-1)\n return custom_loss\n\ndef custom_metric(y_true, y_pred):\n custom_metric = mean(square(y_true - y_pred), axis=-1)\n return custom_metric\n", "Overwriting ./trainer/model.py\n" ] ], [ [ "## Package for distributed training", "_____no_output_____" ] ], [ [ "%%writefile ./setup.py\n\n# python3\n\n# ==============================================================================\n# Copyright 2020 Google LLC. This software is provided as-is, without warranty\n# or representation for any use or purpose. Your use of it is subject to your\n# agreement with Google.\n# ==============================================================================\n\n# https://cloud.google.com/ai-platform/training/docs/runtime-version-list\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n#Runtime 2.1\nREQUIRED_PACKAGES = ['tensorflow==2.1.0',\n 'pandas==0.25.3',\n 'scikit-learn==0.22',\n 'google-cloud-storage==1.23.0',\n 'gcsfs==0.6.1',\n 'cloudml-hypertune',\n ]\nsetup(\n name='trainer',\n version='0.1',\n install_requires=REQUIRED_PACKAGES,\n packages=find_packages(),\n include_package_data=True,\n description='Trainer package for Tensorflow Task'\n)\n", "Overwriting ./setup.py\n" ] ], [ [ "## Training functions", "_____no_output_____" ] ], [ [ "%%writefile ./trainer/__init__.py\n# python3\n\n# ==============================================================================\n# Copyright 2020 Google LLC. This software is provided as-is, without warranty\n# or representation for any use or purpose. Your use of it is subject to your\n# agreement with Google.\n# ==============================================================================\n", "Overwriting ./trainer/__init__.py\n" ], [ "%%writefile ./trainer/train.py\n\n# python3\n# ==============================================================================\n# Copyright 2020 Google LLC. This software is provided as-is, without warranty\n# or representation for any use or purpose. Your use of it is subject to your\n# agreement with Google.\n# ==============================================================================\n\nimport os\nimport json\nimport tensorflow as tf\nimport numpy as np\nimport datetime as datetime\nfrom pytz import timezone\nimport hypertune\nimport argparse\nfrom trainer import model\nfrom trainer import inputs\n\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \n#0 = all messages are logged (default behavior)\n#1 = INFO messages are not printed\n#2 = INFO and WARNING messages are not printed\n#3 = INFO, WARNING, and ERROR messages are not printed\n\n\ndef parse_arguments():\n \"\"\"Argument parser.\n Returns:\n Dictionary of arguments.\n \"\"\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_depth', default=3, type=int, \n help='Hyperparameter: depth of model')\n parser.add_argument('--dropout_rate', default=0.02, type=float, \n help='Hyperparameter: Drop out rate')\n parser.add_argument('--learning_rate', default=0.0001, type=float, \n help='Hyperparameter: initial learning rate')\n parser.add_argument('--batch_size', default=4, type=int, \n help='batch size of the deep network')\n parser.add_argument('--epochs', default=1, type=int, \n help='number of epochs.')\n parser.add_argument('--model_dir', default=\"\",\n help='Directory to store model checkpoints and logs.')\n parser.add_argument('--input_file', default=\"\",\n help='Directory to store model checkpoints and logs.')\n parser.add_argument('--verbosity', choices=['DEBUG','ERROR','FATAL','INFO','WARN'],\n default='FATAL')\n args, _ = parser.parse_known_args()\n return args\n\n\ndef get_callbacks(args, early_stop_patience: int = 3):\n \"\"\"Creates Keras callbacks for model training.\"\"\"\n\n # Get trialId\n trialId = json.loads(os.environ.get(\"TF_CONFIG\", \"{}\")).get(\"task\", {}).get(\"trial\", \"\")\n if trialId == '':\n trialId = '0'\n print(\"trialId=\", trialId)\n\n curTime = datetime.datetime.now(timezone('US/Pacific')).strftime('%H%M%S')\n \n # Modify model_dir paths to include trialId\n model_dir = args.model_dir + \"/checkpoints/cp-\"+curTime+\"-\"+trialId+\"-{val_accuracy:.4f}\"\n log_dir = args.model_dir + \"/log_dir\"\n\n tensorboard_cb = tf.keras.callbacks.TensorBoard(log_dir, histogram_freq=1)\n checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(model_dir, monitor='val_accuracy', mode='max', \n verbose=0, save_best_only=True,\n save_weights_only=False)\n earlystop_cb = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)\n\n return [checkpoint_cb, tensorboard_cb, earlystop_cb]\n\n\nif __name__ == \"__main__\":\n\n # ---------------------------------------\n # Parse Arguments\n # ---------------------------------------\n args = parse_arguments()\n #args.model_dir = MODEL_DIR + datetime.datetime.now(timezone('US/Pacific')).strftime('/model_%m%d%Y_%H%M')\n #args.input_file = 'gs://' + BUCKET_NAME + '/' + FOLDER_DATA + '/' + INPUT_FILE_NAME\n print(args)\n\n # ---------------------------------------\n # Input Data & Preprocessing\n # ---------------------------------------\n print(\"Input and pre-process data ...\")\n # Extract train_seismic, train_label\n train_test_data = inputs.load_data(args.input_file)\n X_train = train_test_data[0]\n X_test = train_test_data[1]\n y_train = train_test_data[2]\n y_test = train_test_data[3]\n\n # ---------------------------------------\n # Train model\n # ---------------------------------------\n print(\"Creating model ...\")\n print(\"x_train\")\n print(X_train.shape[1])\n print(\"y_train\")\n print(y_train.shape[1])\n\n tf_model = model.tf_model(X_train.shape[1], y_train.shape[1], \n model_depth=args.model_depth,\n dropout_rate=args.dropout_rate)\n \n tf_model.compile(optimizer=tf.keras.optimizers.Adam(lr=args.learning_rate),\n loss='mean_squared_error',\n metrics=['accuracy'])\n \n print(\"Fitting model ...\")\n callbacks = get_callbacks(args, 3)\n histy = tf_model.fit(np.array(X_train), y_train, \n epochs=args.epochs,\n batch_size=args.batch_size,\n validation_data=[np.array(X_test),y_test],\n callbacks=callbacks)\n\n # TBD save history for visualization\n\n final_epoch_accuracy = histy.history['accuracy'][-1]\n final_epoch_count = len(histy.history['accuracy'])\n\n print('final_epoch_accuracy = %.6f' % final_epoch_accuracy)\n print('final_epoch_count = %2d' % final_epoch_count)\n", "Overwriting ./trainer/train.py\n" ], [ "%%time\n# Run the training manually\n# Training parameters\nfrom datetime import datetime\nfrom pytz import timezone\n\nMODEL_DEPTH = 2\nDROPOUT_RATE = 0.01\nLEARNING_RATE = 0.00005\nEPOCHS = 1\nBATCH_SIZE = 32\n\nMODEL_DIR_PYTH = MODEL_DIR + datetime.now(timezone('US/Pacific')).strftime('/model_%m%d%Y_%H%M')\nINPUT_FILE = 'gs://' + BUCKET_NAME + '/' + FOLDER_DATA + '/' + INPUT_FILE_NAME\n\nprint('MODEL_DEPTH = %2d' % MODEL_DEPTH)\nprint('DROPOUT_RATE = %.4f' % DROPOUT_RATE)\nprint('LEARNING_RATE = %.6f' % LEARNING_RATE)\nprint('EPOCHS = %2d' % EPOCHS)\nprint('BATCH_SIZE = %2d' % BATCH_SIZE)\nprint(\"MODEL_DIR =\", MODEL_DIR_PYTH)\nprint(\"INPUT_FILE =\", INPUT_FILE)\n\n# Run training\n! python3 -m trainer.train --model_depth=$MODEL_DEPTH --dropout_rate=$DROPOUT_RATE \\\n --learning_rate=$LEARNING_RATE \\\n --epochs=$EPOCHS \\\n --batch_size=$BATCH_SIZE \\\n --model_dir=$MODEL_DIR_PYTH \\\n --input_file=$INPUT_FILE\n", "MODEL_DEPTH = 2\nDROPOUT_RATE = 0.0100\nLEARNING_RATE = 0.000050\nEPOCHS = 1\nBATCH_SIZE = 32\nMODEL_DIR = gs://tuti_asset/tf_models/models/model_05082021_1239\nINPUT_FILE = gs://tuti_asset/datasets/mortgage_structured.csv\nNamespace(batch_size=32, dropout_rate=0.01, epochs=1, input_file='gs://tuti_asset/datasets/mortgage_structured.csv', learning_rate=5e-05, model_depth=2, model_dir='gs://tuti_asset/tf_models/models/model_05082021_1239', verbosity='FATAL')\nInput and pre-process data ...\ngs://tuti_asset/datasets/mortgage_structured.csv\n(104044, 48)\nX_train shape = (83235, 149)\nX_test shape = (20809, 149)\ny_train shape = (83235, 4)\ny_test shape = (20809, 4)\nCreating model ...\nx_train\n149\ny_train\n4\nModel: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense (Dense) (None, 128) 19200 \n_________________________________________________________________\ndense_1 (Dense) (None, 84) 10836 \n_________________________________________________________________\ndropout (Dropout) (None, 84) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 4) 340 \n=================================================================\nTotal params: 30,376\nTrainable params: 30,376\nNon-trainable params: 0\n_________________________________________________________________\nNone\nFitting model ...\ntrialId= 0\nTrain on 83235 samples, validate on 20809 samples\n 32/83235 [..............................] - ETA: 26:19 - loss: 1.0729 - accuracy: 0.8750WARNING:tensorflow:Method (on_train_batch_end) is slow compared to the batch update (0.811241). Check your callbacks.\n83104/83235 [============================>.] - ETA: 0s - loss: 0.5106 - accuracy: 0.9242WARNING:tensorflow:From /opt/conda/lib/python3.7/site-packages/tensorflow_core/python/ops/resource_variable_ops.py:1786: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\nInstructions for updating:\nIf using Keras pass *_constraint arguments to layers.\n83235/83235 [==============================] - 26s 315us/sample - loss: 0.5102 - accuracy: 0.9242 - val_loss: 0.2555 - val_accuracy: 0.9288\nfinal_epoch_accuracy = 0.924203\nfinal_epoch_count = 1\nCPU times: user 1.03 s, sys: 180 ms, total: 1.21 s\nWall time: 34.2 s\n" ], [ "# Test with latest saved model\nbest_model_dir_pyth = find_best_model_dir(MODEL_DIR_PYTH+'/checkpoints', offset=1, maxFlag=1)\n#acc = test_saved_model(best_model_dir_pyth, 0)\n", "\nAll Models = \ngs://tuti_asset/tf_models/models/model_05082021_1239/checkpoints/\ngs://tuti_asset/tf_models/models/model_05082021_1239/checkpoints/cp-123928-0-0.9288/\ngs://tuti_asset/tf_models/models/model_05082021_1239/checkpoints/cp-123928-0-0.9288/\nBest Accuracy from Checkpoints = 0.9288\nBest Model Dir from Checkpoints = gs://tuti_asset/tf_models/models/model_05082021_1239/checkpoints/cp-123928-0-0.9288/\n" ], [ "%%time\n\n#***CREATE model_dir in local VM***\n!mkdir -p model_dir\n\nfrom trainer import model\n\n# Copy the model from storage to local memory\n!gsutil -m cp -r $best_model_dir_pyth* ./model_dir\n\n# Load the model\nloaded_model = tf.keras.models.load_model('./model_dir', compile=False)#, \n #custom_objects={\"custom_loss\": model.custom_loss, \"custom_mse\": model.custom_mse})\nprint(\"Signature \", loaded_model.signatures)\nprint(\"\")\n\n# Display model\ntf.keras.utils.plot_model(loaded_model, show_shapes=True)\n", "Copying gs://tuti_asset/tf_models/models/model_05072021_2028/checkpoints/cp-202822-0-0.9289/saved_model.pb...\nCopying gs://tuti_asset/tf_models/models/model_05072021_2028/checkpoints/cp-202822-0-0.9289/variables/variables.data-00000-of-00001...\nCopying gs://tuti_asset/tf_models/models/model_05072021_2028/checkpoints/cp-202822-0-0.9289/variables/variables.index...\nSignature _SignatureMap({'serving_default': <tensorflow.python.saved_model.load._WrapperFunction object at 0x7f8544b75810>})\n\nCPU times: user 365 ms, sys: 78 ms, total: 443 ms\nWall time: 2.02 s\n" ] ], [ [ "------\n# Training", "_____no_output_____" ] ], [ [ "# Create the config directory and load the trainer files in it\n!mkdir -p config\n", "_____no_output_____" ], [ "%%writefile ./config/config.yaml\n\n# python3\n# ==============================================================================\n# Copyright 2020 Google LLC. This software is provided as-is, without warranty\n# or representation for any use or purpose. Your use of it is subject to your\n# agreement with Google.\n# ==============================================================================\n\n# https://cloud.google.com/sdk/gcloud/reference/ai-platform/jobs/submit/training#--scale-tier\n# https://www.kaggle.com/c/passenger-screening-algorithm-challenge/discussion/37087\n# https://cloud.google.com/ai-platform/training/docs/using-gpus\n\n#trainingInput:\n# scaleTier: CUSTOM\n# masterType: n1-highmem-16\n# masterConfig:\n# acceleratorConfig:\n# count: 2\n# type: NVIDIA_TESLA_V100\n\n#trainingInput:\n# scaleTier: CUSTOM\n# masterType: n1-highmem-8\n# masterConfig:\n# acceleratorConfig:\n# count: 1\n# type: NVIDIA_TESLA_T4\n\n# masterType: n1-highcpu-16\n# workerType: cloud_tpu\n# workerCount: 1\n# workerConfig:\n# acceleratorConfig:\n# type: TPU_V3\n# count: 8\n\n#trainingInput:\n# scaleTier: CUSTOM\n# masterType: complex_model_m\n# workerType: complex_model_m\n# parameterServerType: large_model\n# workerCount: 6\n# parameterServerCount: 1\n# scheduling:\n# maxWaitTime: 3600s\n# maxRunningTime: 7200s\n\n#trainingInput:\n# runtimeVersion: \"2.1\"\n# scaleTier: CUSTOM\n# masterType: standard_gpu\n# workerCount: 9\n# workerType: standard_gpu\n# parameterServerCount: 3\n# parameterServerType: standard\n\n#trainingInput:\n# scaleTier: BASIC-GPU\n \n#trainingInput:\n# region: us-central1\n# scaleTier: CUSTOM\n# masterType: complex_model_m\n# workerType: complex_model_m_gpu\n# parameterServerType: large_model\n# workerCount: 4\n# parameterServerCount: 2\n\ntrainingInput:\n scaleTier: standard-1\n", "Overwriting ./config/config.yaml\n" ], [ "from datetime import datetime\nfrom pytz import timezone\nJOBNAME_TRN = 'tf_train_'+ USER + '_' + \\\n datetime.now(timezone('US/Pacific')).strftime(\"%m%d%y_%H%M\")\nJOB_CONFIG = \"config/config.yaml\"\nMODEL_DIR_TRN = MODEL_DIR + datetime.now(timezone('US/Pacific')).strftime('/model_%m%d%Y_%H%M')\nINPUT_FILE = 'gs://' + BUCKET_NAME + '/' + FOLDER_DATA + '/' + INPUT_FILE_NAME\n\nprint(\"Job Name = \", JOBNAME_TRN)\nprint(\"Job Dir = \", JOB_DIR)\nprint(\"MODEL_DIR =\", MODEL_DIR_TRN)\nprint(\"INPUT_FILE =\", INPUT_FILE)\n\n# Training parameters\nMODEL_DEPTH = 3\nDROPOUT_RATE = 0.02\nLEARNING_RATE = 0.0001\nEPOCHS = 2\nBATCH_SIZE = 32\n\nprint('MODEL_DEPTH = %2d' % MODEL_DEPTH)\nprint('DROPOUT_RATE = %.4f' % DROPOUT_RATE)\nprint('LEARNING_RATE = %.6f' % LEARNING_RATE)\nprint('EPOCHS = %2d' % EPOCHS)\nprint('BATCH_SIZE = %2d' % BATCH_SIZE)\n\n", "Job Name = tf_train_cchatterj_050721_2029\nJob Dir = gs://tuti_asset/tf_models/jobdir\nMODEL_DIR = gs://tuti_asset/tf_models/models/model_05072021_2029\nINPUT_FILE = gs://tuti_asset/datasets/mortgage_structured.csv\nMODEL_DEPTH = 3\nDROPOUT_RATE = 0.0200\nLEARNING_RATE = 0.000100\nEPOCHS = 2\nBATCH_SIZE = 32\n" ], [ "# https://cloud.google.com/sdk/gcloud/reference/ai-platform/jobs/submit/training\n\nTRAIN_LABELS = \"mode=train,owner=\"+USER\n\n# submit the training job\n! gcloud ai-platform jobs submit training $JOBNAME_TRN \\\n --package-path $(pwd)/trainer \\\n --module-name trainer.train \\\n --region $REGION \\\n --python-version 3.7 \\\n --runtime-version $RUNTIME_VERSION \\\n --job-dir $JOB_DIR \\\n --config $JOB_CONFIG \\\n --labels $TRAIN_LABELS \\\n -- \\\n --model_depth=$MODEL_DEPTH \\\n --dropout_rate=$DROPOUT_RATE \\\n --learning_rate=$LEARNING_RATE \\\n --epochs=$EPOCHS \\\n --batch_size=$BATCH_SIZE \\\n --model_dir=$MODEL_DIR_TRN \\\n --input_file=$INPUT_FILE\n", "Job [tf_train_cchatterj_050721_2029] submitted successfully.\nYour job is still active. You may view the status of your job with the command\n\n $ gcloud ai-platform jobs describe tf_train_cchatterj_050721_2029\n\nor continue streaming the logs with the command\n\n $ gcloud ai-platform jobs stream-logs tf_train_cchatterj_050721_2029\njobId: tf_train_cchatterj_050721_2029\nstate: QUEUED\n" ], [ "# check the training job status\n! gcloud ai-platform jobs describe $JOBNAME_TRN\n", "createTime: '2021-05-08T03:29:28Z'\nendTime: '2021-05-08T03:38:43Z'\netag: F-hA7AzGb84=\njobId: tf_train_cchatterj_050721_2029\nlabels:\n mode: train\n owner: cchatterj\nstartTime: '2021-05-08T03:37:12Z'\nstate: SUCCEEDED\ntrainingInput:\n args:\n - --model_depth=3\n - --dropout_rate=0.02\n - --learning_rate=0.0001\n - --epochs=2\n - --batch_size=32\n - --model_dir=gs://tuti_asset/tf_models/models/model_05072021_2029\n - --input_file=gs://tuti_asset/datasets/mortgage_structured.csv\n jobDir: gs://tuti_asset/tf_models/jobdir\n packageUris:\n - gs://tuti_asset/tf_models/jobdir/packages/4f8ba8fc866713a6e7b8f4202c38ae66bd5913b96f7539c634af9e14cc419131/trainer-0.1.tar.gz\n pythonModule: trainer.train\n pythonVersion: '3.7'\n region: us-central1\n runtimeVersion: '2.1'\n scaleTier: STANDARD_1\ntrainingOutput:\n consumedMLUnits: 0.68\n\nView job in the Cloud Console at:\nhttps://console.cloud.google.com/mlengine/jobs/tf_train_cchatterj_050721_2029?project=img-seg-3d\n\nView logs at:\nhttps://console.cloud.google.com/logs?resource=ml_job%2Fjob_id%2Ftf_train_cchatterj_050721_2029&project=img-seg-3d\n" ], [ "# Print Errors\n#response = ! gcloud logging read \"resource.labels.job_id=$JOBNAME_TRN severity>=ERROR\"\n#for i in range(0,len(response)):\n# if 'message' in response[i]:\n# print(response[i])", "_____no_output_____" ], [ "# Test with latest saved model\nbest_model_dir_trn = find_best_model_dir(MODEL_DIR_TRN+'/checkpoints', offset=1, maxFlag=1)\n#acc = test_saved_model(best_model_dir_trn, 0)\n", "\nAll Models = \ngs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/\ngs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/cp-203733-0-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/cp-203742-0-0.0186/\ngs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/cp-203742-0-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/cp-203745-0-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/cp-203747-0-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/cp-203750-0-0.0340/\ngs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/cp-203750-0-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/cp-203835-0-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/cp-203742-0-0.0186/\ngs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/cp-203750-0-0.0340/\ngs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/cp-203733-0-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/cp-203742-0-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/cp-203745-0-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/cp-203747-0-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/cp-203750-0-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/cp-203835-0-0.9289/\nBest Accuracy from Checkpoints = 0.9289\nBest Model Dir from Checkpoints = gs://tuti_asset/tf_models/models/model_05072021_2029/checkpoints/cp-203733-0-0.9289/\n" ] ], [ [ "------\n# Hyper Parameter Tuning", "_____no_output_____" ] ], [ [ "# Create the tf directory and load the trainer files in it\n!cp ./trainer/train.py ./trainer/train_hpt.py\n", "_____no_output_____" ], [ "%%writefile -a ./trainer/train_hpt.py\n\n \"\"\"This method updates a CAIP HPTuning Job with a final metric for the job.\n In TF2.X the user must either use hypertune or a custom callback with\n tf.summary.scalar to update CAIP HP Tuning jobs. This function uses\n hypertune, which appears to be the preferred solution. Hypertune also works\n with containers, without code change.\n Args:\n metric_tag: The metric being optimized. This MUST MATCH the\n hyperparameterMetricTag specificed in the hyperparameter tuning yaml.\n metric_value: The value to report at the end of model training.\n global_step: An int value to specify the number of trainin steps completed\n at the time the metric was reported.\n \"\"\"\n\n hpt = hypertune.HyperTune()\n hpt.report_hyperparameter_tuning_metric(\n hyperparameter_metric_tag='accuracy',\n metric_value=final_epoch_accuracy,\n global_step=final_epoch_count\n )\n", "Appending to ./trainer/train_hpt.py\n" ], [ "%%writefile ./config/hptuning_config.yaml\n\n# python3\n# ==============================================================================\n# Copyright 2020 Google LLC. This software is provided as-is, without warranty\n# or representation for any use or purpose. Your use of it is subject to your\n# agreement with Google.\n# ==============================================================================\n\n# https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs\n# https://cloud.google.com/sdk/gcloud/reference/ai-platform/jobs/submit/training\n\n#trainingInput:\n# scaleTier: CUSTOM\n# masterType: n1-highmem-8\n# masterConfig:\n# acceleratorConfig:\n# count: 1\n# type: NVIDIA_TESLA_T4\n#\n# masterType: standard_p100\n# workerType: standard_p100\n# parameterServerType: standard_p100\n# workerCount: 8\n# parameterServerCount: 1\n# runtimeVersion: $RUNTIME_VERSION\n# pythonVersion: '3.7'\n\n#trainingInput:\n# scaleTier: CUSTOM\n# masterType: complex_model_m\n# workerType: complex_model_m\n# parameterServerType: large_model\n# workerCount: 9\n# parameterServerCount: 3\n# scheduling:\n# maxWaitTime: 3600s\n# maxRunningTime: 7200s\n\n#trainingInput:\n# scaleTier: BASIC-GPU\n\n#trainingInput:\n# scaleTier: CUSTOM\n# masterType: n1-highmem-16\n# masterConfig:\n# acceleratorConfig:\n# count: 2\n# type: NVIDIA_TESLA_V100\n\ntrainingInput:\n scaleTier: STANDARD-1\n hyperparameters:\n goal: MAXIMIZE\n hyperparameterMetricTag: accuracy\n maxTrials: 4\n maxParallelTrials: 4\n enableTrialEarlyStopping: True\n params:\n - parameterName: model_depth\n type: INTEGER\n minValue: 2\n maxValue: 4\n scaleType: UNIT_LINEAR_SCALE\n - parameterName: epochs\n type: INTEGER\n minValue: 1\n maxValue: 3\n scaleType: UNIT_LINEAR_SCALE\n", "Overwriting ./config/hptuning_config.yaml\n" ], [ "from datetime import datetime\nfrom pytz import timezone\n\nJOBNAME_HPT = 'tf_hptrn_' + USER + '_' + \\\n datetime.now(timezone('US/Pacific')).strftime(\"%m%d%y_%H%M\")\nJOB_CONFIG = \"./config/hptuning_config.yaml\"\nMODEL_DIR_HPT = MODEL_DIR + datetime.now(timezone('US/Pacific')).strftime('/model_%m%d%Y_%H%M')\nINPUT_FILE = 'gs://' + BUCKET_NAME + '/' + FOLDER_DATA + '/' + INPUT_FILE_NAME\n\nprint(\"Job Name = \", JOBNAME_HPT)\nprint(\"Job Dir = \", JOB_DIR)\nprint(\"MODEL_DIR =\", MODEL_DIR_HPT)\nprint(\"INPUT_FILE =\", INPUT_FILE)\n\n# Training parameters\nDROPOUT_RATE = 0.02\nLEARNING_RATE = 0.0001\nBATCH_SIZE = 32\n", "Job Name = tf_hptrn_cchatterj_050721_2125\nJob Dir = gs://tuti_asset/tf_models/jobdir\nMODEL_DIR = gs://tuti_asset/tf_models/models/model_05072021_2125\nINPUT_FILE = gs://tuti_asset/datasets/mortgage_structured.csv\n" ], [ "# submit the training job\nHT_LABELS = \"mode=hypertrain,owner=\"+USER\n\n! gcloud ai-platform jobs submit training $JOBNAME_HPT \\\n --package-path $(pwd)/trainer \\\n --module-name trainer.train_hpt \\\n --python-version 3.7 \\\n --runtime-version $RUNTIME_VERSION \\\n --region $REGION \\\n --job-dir $JOB_DIR \\\n --config $JOB_CONFIG \\\n --labels $HT_LABELS \\\n -- \\\n --dropout_rate=$DROPOUT_RATE \\\n --learning_rate=$LEARNING_RATE \\\n --batch_size=$BATCH_SIZE \\\n --model_dir=$MODEL_DIR_HPT \\\n --input_file=$INPUT_FILE\n", "Job [tf_hptrn_cchatterj_050721_2125] submitted successfully.\nYour job is still active. You may view the status of your job with the command\n\n $ gcloud ai-platform jobs describe tf_hptrn_cchatterj_050721_2125\n\nor continue streaming the logs with the command\n\n $ gcloud ai-platform jobs stream-logs tf_hptrn_cchatterj_050721_2125\njobId: tf_hptrn_cchatterj_050721_2125\nstate: QUEUED\n" ], [ "# check the hyperparameter training job status\n! gcloud ai-platform jobs describe $JOBNAME_HPT\n", "createTime: '2021-05-08T04:25:37Z'\nendTime: '2021-05-08T04:38:34Z'\netag: wGblmgO1A8o=\njobId: tf_hptrn_cchatterj_050721_2125\nlabels:\n mode: hypertrain\n owner: cchatterj\nstartTime: '2021-05-08T04:25:42Z'\nstate: SUCCEEDED\ntrainingInput:\n args:\n - --dropout_rate=0.02\n - --learning_rate=0.0001\n - --batch_size=32\n - --model_dir=gs://tuti_asset/tf_models/models/model_05072021_2125\n - --input_file=gs://tuti_asset/datasets/mortgage_structured.csv\n hyperparameters:\n enableTrialEarlyStopping: true\n goal: MAXIMIZE\n hyperparameterMetricTag: accuracy\n maxParallelTrials: 4\n maxTrials: 4\n params:\n - maxValue: 4.0\n minValue: 2.0\n parameterName: model_depth\n scaleType: UNIT_LINEAR_SCALE\n type: INTEGER\n - maxValue: 3.0\n minValue: 1.0\n parameterName: epochs\n scaleType: UNIT_LINEAR_SCALE\n type: INTEGER\n jobDir: gs://tuti_asset/tf_models/jobdir\n packageUris:\n - gs://tuti_asset/tf_models/jobdir/packages/481b3b998d8665967bd09bd0076485ebe7510f453bb8a54b7a7a1b7a2e855d35/trainer-0.1.tar.gz\n pythonModule: trainer.train_hpt\n pythonVersion: '3.7'\n region: us-central1\n runtimeVersion: '2.1'\n scaleTier: STANDARD_1\ntrainingOutput:\n completedTrialCount: '4'\n consumedMLUnits: 2.7\n hyperparameterMetricTag: accuracy\n isHyperparameterTuningJob: true\n trials:\n - endTime: '2021-05-08T04:36:00Z'\n finalMetric:\n objectiveValue: 0.933586\n trainingStep: '3'\n hyperparameters:\n epochs: '3'\n model_depth: '4'\n startTime: '2021-05-08T04:25:47.155195628Z'\n state: SUCCEEDED\n trialId: '3'\n - endTime: '2021-05-08T04:35:54Z'\n finalMetric:\n objectiveValue: 0.927038\n trainingStep: '2'\n hyperparameters:\n epochs: '2'\n model_depth: '3'\n startTime: '2021-05-08T04:25:47.155139568Z'\n state: SUCCEEDED\n trialId: '2'\n - endTime: '2021-05-08T04:36:41Z'\n finalMetric:\n objectiveValue: 0.91709\n trainingStep: '1'\n hyperparameters:\n epochs: '1'\n model_depth: '4'\n startTime: '2021-05-08T04:25:47.155245908Z'\n state: SUCCEEDED\n trialId: '4'\n - endTime: '2021-05-08T04:36:51Z'\n finalMetric:\n objectiveValue: 0.127122\n trainingStep: '3'\n hyperparameters:\n epochs: '3'\n model_depth: '3'\n startTime: '2021-05-08T04:25:47.154965218Z'\n state: SUCCEEDED\n trialId: '1'\n\nView job in the Cloud Console at:\nhttps://console.cloud.google.com/mlengine/jobs/tf_hptrn_cchatterj_050721_2125?project=img-seg-3d\n\nView logs at:\nhttps://console.cloud.google.com/logs?resource=ml_job%2Fjob_id%2Ftf_hptrn_cchatterj_050721_2125&project=img-seg-3d\n" ], [ "# Print Errors\n#response = ! gcloud logging read \"resource.labels.job_id=$JOBNAME_HPT severity>=ERROR\"\n#for i in range(0,len(response)):\n# if 'message' in response[i]:\n# print(response[i])", "_____no_output_____" ], [ "# Get the best model parameters from Cloud API\nbest_model = pyth_get_hypertuned_parameters(PROJECT_ID, JOBNAME_HPT, 1)\nMODEL_DEPTH = best_model['hyperparameters']['model_depth']\nEPOCHS = best_model['hyperparameters']['epochs']\nprint('')\nprint('Objective=', best_model['finalMetric']['objectiveValue'])\nprint('MODEL_DEPTH =', MODEL_DEPTH)\nprint('EPOCHS =', EPOCHS)\n", "objective= 0.9335856437683105 trialId= 3 SUCCEEDED\n epochs 3\n model_depth 4\nobjective= 0.9270378947257996 trialId= 2 SUCCEEDED\n epochs 2\n model_depth 3\nobjective= 0.9170901775360107 trialId= 4 SUCCEEDED\n epochs 1\n model_depth 4\nobjective= 0.12712199985980988 trialId= 1 SUCCEEDED\n epochs 3\n model_depth 3\n\nObjective= 0.9335856437683105\nMODEL_DEPTH = 4\nEPOCHS = 3\n" ], [ "# Find count of checkpoints\nall_models = ! gsutil ls {MODEL_DIR_HPT+'/checkpoints'}\nprint(\"Total Hypertrained Models=\", len(all_models))\n\n# Test with latest saved model\nbest_model_dir_hyp = find_best_model_dir(MODEL_DIR_HPT+'/checkpoints', offset=1, maxFlag=1)\n#acc = test_saved_model(best_model_dir_hyp, 0)\n\n#import keras.backend as K\n#loaded_model = tf.keras.models.load_model(MODEL_DIR_PARAM+'/checkpoints')\n#print(\"learning_rate=\", K.eval(loaded_model.optimizer.lr))\n#tf.keras.utils.plot_model(loaded_model, show_shapes=True)\n", "Total Hypertrained Models= 36\n\nAll Models = \ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213445-3-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213503-2-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213503-3-0.0185/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213503-3-0.0340/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213506-3-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213507-2-0.0185/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213508-4-0.0186/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213510-4-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213513-2-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213513-3-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213513-3-0.9496/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213515-3-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213516-2-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213517-2-0.9281/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213517-2-0.9287/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213518-2-0.0186/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213518-3-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213518-3-0.9429/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213519-4-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213520-4-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213522-1-0.0340/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213522-4-0.0186/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213528-1-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213536-1-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213538-3-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213542-1-0.0185/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213542-1-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213600-1-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213601-4-0.0186/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213603-4-0.0340/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213609-4-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213611-1-0.0185/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213611-1-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213612-1-0.9282/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213612-1-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213503-3-0.0185/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213507-2-0.0185/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213542-1-0.0185/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213611-1-0.0185/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213508-4-0.0186/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213518-2-0.0186/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213522-4-0.0186/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213601-4-0.0186/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213503-3-0.0340/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213522-1-0.0340/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213603-4-0.0340/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213517-2-0.9281/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213612-1-0.9282/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213517-2-0.9287/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213445-3-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213503-2-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213506-3-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213510-4-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213513-2-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213513-3-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213515-3-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213516-2-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213518-3-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213519-4-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213520-4-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213528-1-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213536-1-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213538-3-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213542-1-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213600-1-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213609-4-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213611-1-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213612-1-0.9289/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213518-3-0.9429/\ngs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213513-3-0.9496/\nBest Accuracy from Checkpoints = 0.9496\nBest Model Dir from Checkpoints = gs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213513-3-0.9496/\n" ] ], [ [ "--------\n# Deploy the Model", "_____no_output_____" ] ], [ [ "## https://cloud.google.com/ai-platform/prediction/docs/machine-types-online-prediction#available_machine_types\n# We need 2 versions of the same model:\n# 1. Batch prediction model deployed on a mls1-c1-m2 cluster\n# 2. Online prediction model deployed on a n1-standard-16 cluster\n# Batch prediction does not support GPU and n1-standard-16 clusters.\n\n# Run the Deploy Model section twice:\n# 1. As a BATCH Mode version use MODEL_VERSION = MODEL_VERSION_BATCH\n# 2. As a ONLINE Mode version use MODEL_VERSION = MODEL_VERSION_ONLINE\n", "_____no_output_____" ], [ "# Regional End points with python\n#https://cloud.google.com/ai-platform/prediction/docs/regional-endpoints#python", "_____no_output_____" ], [ "MODEL_NAME = \"loan_model_1\"\nMODEL_VERSION_BATCH = \"batch_v1\"\nMODEL_VERSION_ONLINE = \"online_v1\"\n\n#Run this as Batch first then Online\n#MODEL_VERSION = MODEL_VERSION_ONLINE\nMODEL_VERSION = MODEL_VERSION_BATCH\n\n# List all models\n\nprint(\"\\nList of Models in Global Endpoint)\")\n!gcloud ai-platform models list --region=global\n\n# List all versions of model\n\nprint(\"\\nList of Versions in Global Endpoint)\")\n!gcloud ai-platform versions list --model $MODEL_NAME --region=global\n", "\nList of Models in Global Endpoint)\nUsing endpoint [https://ml.googleapis.com/]\nNAME DEFAULT_VERSION_NAME\nkfp_xgb_model kfp_xgb_bst_v0_1\nloan_model_1 online_v1\nmnist_model_1 online_v1\nxgb_model elvinzhu_xgb_bst\n\nList of Versions in Global Endpoint)\nUsing endpoint [https://ml.googleapis.com/]\nNAME DEPLOYMENT_URI STATE\nonline_v1 gs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213513-3-0.9496/ READY\n" ], [ "#!gcloud ai-platform versions delete $MODEL_VERSION_BATCH --model $MODEL_NAME --quiet --region=global\n#!gcloud ai-platform models delete $MODEL_NAME --quiet --region=global\n", "_____no_output_____" ], [ "# List all models\n\nprint(\"\\nList of Models in Global Endpoint)\")\n!gcloud ai-platform models list --region=global\n\n# List all versions of model\n\nprint(\"\\nList of Versions in Global Endpoint)\")\n!gcloud ai-platform versions list --model $MODEL_NAME --region=global\n", "\nList of Models in Global Endpoint)\nUsing endpoint [https://ml.googleapis.com/]\nNAME DEFAULT_VERSION_NAME\nkfp_xgb_model kfp_xgb_bst_v0_1\nloan_model_1 online_v1\nmnist_model_1 online_v1\nxgb_model elvinzhu_xgb_bst\n\nList of Versions in Global Endpoint)\nUsing endpoint [https://ml.googleapis.com/]\nNAME DEPLOYMENT_URI STATE\nonline_v1 gs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213513-3-0.9496/ READY\n" ], [ "# create the model if it doesn't already exist\nmodelname = !gcloud ai-platform models list | grep -w $MODEL_NAME\nprint(modelname)\nif (len(modelname) <= 1) or ('Listed 0 items.' in modelname[1]):\n print(\"Creating model \" + MODEL_NAME)\n # Global endpoint\n !gcloud ai-platform models create $MODEL_NAME --enable-logging --regions $REGION\nelse:\n print(\"Model \" + MODEL_NAME + \" exist\")\n \nprint(\"\\nList of Models in Global Endpoint)\")\n!gcloud ai-platform models list --region=global\n", "['Using endpoint [https://us-central1-ml.googleapis.com/]', 'Listed 0 items.']\nCreating model loan_model_1\nUsing endpoint [https://ml.googleapis.com/]\n\u001b[1;31mERROR:\u001b[0m (gcloud.ai-platform.models.create) Resource in projects [img-seg-3d] is the subject of a conflict: Field: model.name Error: A model with the same name already exists.\n- '@type': type.googleapis.com/google.rpc.BadRequest\n fieldViolations:\n - description: A model with the same name already exists.\n field: model.name\n\nList of Models in Global Endpoint)\nUsing endpoint [https://ml.googleapis.com/]\nNAME DEFAULT_VERSION_NAME\nkfp_xgb_model kfp_xgb_bst_v0_1\nloan_model_1 online_v1\nmnist_model_1 online_v1\nxgb_model elvinzhu_xgb_bst\n" ], [ "%%time\n\nprint(\"Model Name =\", MODEL_NAME)\nprint(\"Model Versions =\", MODEL_VERSION)\n\n# Get a list of model directories\nbest_model_dir = best_model_dir_hyp\nprint(\"Best Model Dir: \", best_model_dir)\n\nMODEL_FRAMEWORK = \"TENSORFLOW\"\nMODEL_DESCRIPTION = \"SEQ_MODEL_1\"\nMODEL_LABELS=\"team=ourteam,phase=test,owner=\"+USER\n\nMACHINE_TYPE = \"mls1-c1-m2\"\nif (MODEL_VERSION == MODEL_VERSION_BATCH):\n MACHINE_TYPE = \"mls1-c1-m2\"\n MODEL_LABELS = MODEL_LABELS+\",mode=batch\"\nif (MODEL_VERSION == MODEL_VERSION_ONLINE):\n MACHINE_TYPE = \"mls1-c1-m2\" #\"n1-standard-32\"\n MODEL_LABELS = MODEL_LABELS+\",mode=online\"\n\n# Deploy the model\n! gcloud beta ai-platform versions create $MODEL_VERSION \\\n --model $MODEL_NAME \\\n --origin $best_model_dir \\\n --runtime-version $RUNTIME_VERSION \\\n --python-version=3.7 \\\n --description=$MODEL_DESCRIPTION \\\n --labels $MODEL_LABELS \\\n --machine-type=$MACHINE_TYPE \\\n --framework $MODEL_FRAMEWORK \\\n --region global\n", "Model Name = loan_model_1\nModel Versions = batch_v1\nBest Model Dir: gs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213513-3-0.9496/\nUsing endpoint [https://ml.googleapis.com/]\nCreating version (this might take a few minutes)......done. \nCPU times: user 1.21 s, sys: 348 ms, total: 1.56 s\nWall time: 52.2 s\n" ], [ "# List all models\n\nprint(\"\\nList of Models in Global Endpoint)\")\n!gcloud ai-platform models list --region=global\n\nprint(\"\\nList of Models in Regional Endpoint)\")\n!gcloud ai-platform models list --region=$REGION\n\n# List all versions of model\n\nprint(\"\\nList of Versions in Global Endpoint)\")\n!gcloud ai-platform versions list --model $MODEL_NAME --region=global\n\n#print(\"\\nList of Versions in Regional Endpoint)\")\n#!gcloud ai-platform versions list --model $MODEL_NAME --region=$REGION\n", "\nList of Models in Global Endpoint)\nUsing endpoint [https://ml.googleapis.com/]\nNAME DEFAULT_VERSION_NAME\nkfp_xgb_model kfp_xgb_bst_v0_1\nloan_model_1 online_v1\nmnist_model_1 online_v1\nxgb_model elvinzhu_xgb_bst\n\nList of Models in Regional Endpoint)\nUsing endpoint [https://us-central1-ml.googleapis.com/]\nListed 0 items.\n\nList of Versions in Global Endpoint)\nUsing endpoint [https://ml.googleapis.com/]\nNAME DEPLOYMENT_URI STATE\nbatch_v1 gs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213513-3-0.9496/ READY\nonline_v1 gs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213513-3-0.9496/ READY\n" ] ], [ [ "------\n# Predictions with the deployed model", "_____no_output_____" ] ], [ [ "%%time\n\nfrom trainer import model\n\n# Copy the model from storage to local memory\n!gsutil -m cp -r $best_model_dir_hyp* ./model_dir\n\n# Load the model\nloaded_model = tf.keras.models.load_model('./model_dir', compile=False) #, \n #custom_objects={\"custom_loss\": model.custom_loss,\"custom_mse\": model.custom_mse})\nprint(\"Signature \", loaded_model.signatures)\n\n# Check the model layers\nmodel_layers = [layer.name for layer in loaded_model.layers]\nprint(\"\")\nprint(\"Model Input Layer=\", model_layers[0])\nprint(\"Model Output Layer=\", model_layers[-1])\nprint(\"\")\n", "Copying gs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213513-3-0.9496/saved_model.pb...\nCopying gs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213513-3-0.9496/variables/variables.data-00000-of-00001...\nCopying gs://tuti_asset/tf_models/models/model_05072021_2125/checkpoints/cp-213513-3-0.9496/variables/variables.index...\nSignature _SignatureMap({'serving_default': <tensorflow.python.saved_model.load._WrapperFunction object at 0x7f8546b9bbd0>})\n\nModel Input Layer= dense\nModel Output Layer= dense_4\n\nCPU times: user 411 ms, sys: 52.8 ms, total: 464 ms\nWall time: 1.95 s\n" ], [ "from trainer import inputs\ninput_file = 'gs://' + BUCKET_NAME + '/' + FOLDER_DATA + '/' + INPUT_FILE_NAME\ntrain_test_data = inputs.load_data(input_file)\nX_test = train_test_data[1]\ny_test = train_test_data[3]\n", "gs://tuti_asset/datasets/mortgage_structured.csv\n(104044, 48)\nX_train shape = (83235, 149)\nX_test shape = (20809, 149)\ny_train shape = (83235, 4)\ny_test shape = (20809, 4)\n" ] ], [ [ "## Online Prediction with python", "_____no_output_____" ] ], [ [ "%%time\n\n# Online Prediction with Python - works for global end points only\n\n# Use MODEL_VERSION_ONLINE not MODEL_VERSION_BATCH\nMODEL_VERSION = MODEL_VERSION_ONLINE\n\nfrom oauth2client.client import GoogleCredentials\nfrom googleapiclient import discovery\nfrom googleapiclient import errors\nimport json\n\n#tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n#tf.get_logger().setLevel('ERROR')\n\nprint(\"Project ID =\", PROJECT_ID)\nprint(\"Model Name =\", MODEL_NAME)\nprint(\"Model Version =\", MODEL_VERSION)\n\nmodel_name = 'projects/{}/models/{}'.format(PROJECT_ID, MODEL_NAME)\nif MODEL_VERSION is not None:\n model_name += '/versions/{}'.format(MODEL_VERSION)\ncredentials = GoogleCredentials.get_application_default()\nservice = discovery.build('ml', 'v1', cache_discovery=False, credentials=credentials)\nprint(\"model_name=\", model_name)\n\npprobas_temp = []\nbatch_size = 32\nn_samples = min(1000,X_test.shape[0])\nprint(\"batch_size=\", batch_size)\nprint(\"n_samples=\", n_samples)\n\nfor i in range(0, n_samples, batch_size):\n j = min(i+batch_size, n_samples)\n print(\"Processing samples\", i, j)\n request = service.projects().predict(name=model_name, \\\n body={'instances': np.array(X_test)[i:j].tolist()})\n try:\n response = request.execute()\n pprobas_temp += response['predictions']\n except errors.HttpError as err:\n # Something went wrong, print out some information.\n tf.compat.v1.logging.error('There was an error getting the job info, Check the details:')\n tf.compat.v1.logging.error(err._get_reason())\n break\n", "Project ID = img-seg-3d\nModel Name = loan_model_1\nModel Version = online_v1\nmodel_name= projects/img-seg-3d/models/loan_model_1/versions/online_v1\nbatch_size= 32\nn_samples= 1000\nProcessing samples 0 32\nProcessing samples 32 64\nProcessing samples 64 96\nProcessing samples 96 128\nProcessing samples 128 160\nProcessing samples 160 192\nProcessing samples 192 224\nProcessing samples 224 256\nProcessing samples 256 288\nProcessing samples 288 320\nProcessing samples 320 352\nProcessing samples 352 384\nProcessing samples 384 416\nProcessing samples 416 448\nProcessing samples 448 480\nProcessing samples 480 512\nProcessing samples 512 544\nProcessing samples 544 576\nProcessing samples 576 608\nProcessing samples 608 640\nProcessing samples 640 672\nProcessing samples 672 704\nProcessing samples 704 736\nProcessing samples 736 768\nProcessing samples 768 800\nProcessing samples 800 832\nProcessing samples 832 864\nProcessing samples 864 896\nProcessing samples 896 928\nProcessing samples 928 960\nProcessing samples 960 992\nProcessing samples 992 1000\nCPU times: user 468 ms, sys: 17.9 ms, total: 486 ms\nWall time: 2.13 s\n" ], [ "# Show the prediction results as an array\n\nnPreds = len(pprobas_temp)\nnClasses = y_test.shape[1]\npprobas = np.zeros((nPreds, nClasses))\nfor i in range(nPreds):\n pprobas[i,:] = np.array(pprobas_temp[i][model_layers[-1]])\npprobas = np.round(pprobas, 2)\npprobas\n", "_____no_output_____" ] ], [ [ "## Batch Prediction with GCLOUD", "_____no_output_____" ] ], [ [ "# Write batch data to file in GCS\n\nimport shutil\n\n# Clean current directory\nDATA_DIR = './batch_data'\nshutil.rmtree(DATA_DIR, ignore_errors=True)\nos.makedirs(DATA_DIR)\n\nn_samples = min(1000,X_test.shape[0])\nnFiles = 10\nnRecsPerFile = min(1000,n_samples//nFiles)\nprint(\"n_samples =\", n_samples)\nprint(\"nFiles =\", nFiles)\nprint(\"nRecsPerFile =\", nRecsPerFile)\n\n# Create nFiles files with nImagesPerFile images each\nfor i in range(nFiles):\n with open(f'{DATA_DIR}/unkeyed_batch_{i}.json', \"w\") as file:\n for z in range(nRecsPerFile):\n print(f'{{\"dense_input\": {np.array(X_test)[i*nRecsPerFile+z].tolist()}}}', file=file)\n #print(f'{{\"{model_layers[0]}\": {np.array(X_test)[i*nRecsPerFile+z].tolist()}}}', file=file)\n #key = f'key_{i}_{z}'\n #print(f'{{\"image\": {X_test_images[z].tolist()}, \"key\": \"{key}\"}}', file=file)\n\n# Write batch data to gcs file\n!gsutil -m cp -r ./batch_data gs://$BUCKET_NAME/$FOLDER_RESULTS/\n \n# Remove old batch prediction results\n!gsutil -m rm -r gs://$BUCKET_NAME/$FOLDER_RESULTS/batch_predictions\n", "n_samples = 1000\nnFiles = 10\nnRecsPerFile = 100\nCopying file://./batch_data/unkeyed_batch_2.json [Content-Type=application/json]...\nCopying file://./batch_data/unkeyed_batch_5.json [Content-Type=application/json]...\nCopying file://./batch_data/unkeyed_batch_3.json [Content-Type=application/json]...\nCopying file://./batch_data/unkeyed_batch_6.json [Content-Type=application/json]...\nCopying file://./batch_data/unkeyed_batch_9.json [Content-Type=application/json]...\nCopying file://./batch_data/unkeyed_batch_1.json [Content-Type=application/json]...\nCopying file://./batch_data/unkeyed_batch_8.json [Content-Type=application/json]...\nCopying file://./batch_data/unkeyed_batch_7.json [Content-Type=application/json]...\nCopying file://./batch_data/unkeyed_batch_0.json [Content-Type=application/json]...\nCopying file://./batch_data/unkeyed_batch_4.json [Content-Type=application/json]...\n/ [10/10 files][870.8 KiB/870.8 KiB] 100% Done \nOperation completed over 10 objects/870.8 KiB. \nRemoving gs://tuti_asset/tf_models/batch_predictions/prediction.errors_stats-00000-of-00001#1620443695530011...\nRemoving gs://tuti_asset/tf_models/batch_predictions/prediction.results-00000-of-00010#1620443727343913...\nRemoving gs://tuti_asset/tf_models/batch_predictions/prediction.results-00004-of-00010#1620443727345017...\nRemoving gs://tuti_asset/tf_models/batch_predictions/prediction.results-00005-of-00010#1620443727415709...\nRemoving gs://tuti_asset/tf_models/batch_predictions/prediction.results-00001-of-00010#1620443727400692...\nRemoving gs://tuti_asset/tf_models/batch_predictions/prediction.results-00002-of-00010#1620443727341106...\nRemoving gs://tuti_asset/tf_models/batch_predictions/prediction.results-00009-of-00010#1620443727359804...\nRemoving gs://tuti_asset/tf_models/batch_predictions/prediction.results-00003-of-00010#1620443727350994...\nRemoving gs://tuti_asset/tf_models/batch_predictions/prediction.results-00006-of-00010#1620443727341698...\nRemoving gs://tuti_asset/tf_models/batch_predictions/prediction.results-00007-of-00010#1620443727345316...\nRemoving gs://tuti_asset/tf_models/batch_predictions/prediction.results-00008-of-00010#1620443727373599...\n/ [11/11 objects] 100% Done \nOperation completed over 11 objects. \n" ], [ "from datetime import datetime\nfrom pytz import timezone\n\nDATA_FORMAT=\"text\" # JSON data format\nINPUT_PATHS='gs://' + BUCKET_NAME + '/' + FOLDER_RESULTS + '/batch_data/*'\nOUTPUT_PATH='gs://' + BUCKET_NAME + '/' + FOLDER_RESULTS + '/batch_predictions'\nPRED_LABELS=\"mode=batch,team=ourteam,phase=test,owner=\"+USER\nSIGNATURE_NAME=\"serving_default\"\n\nJOBNAME_BATCH = 'tf_batch_predict_'+ USER + '_' + \\\n datetime.now(timezone('US/Pacific')).strftime(\"%m%d%y_%H%M\")\n\nprint(\"INPUT_PATHS = \", INPUT_PATHS)\nprint(\"OUTPUT_PATH = \", OUTPUT_PATH)\nprint(\"Job Name = \", JOBNAME_BATCH)\n", "INPUT_PATHS = gs://tuti_asset/tf_models/batch_data/*\nOUTPUT_PATH = gs://tuti_asset/tf_models/batch_predictions\nJob Name = tf_batch_predict_cchatterj_050721_2217\n" ], [ "# Only works with global endpoint\n# Submit batch predict job\n# Use MODEL_VERSION_BATCH not MODEL_VERSION_ONLINE\nMODEL_VERSION = MODEL_VERSION_BATCH\n\n!gcloud ai-platform jobs submit prediction $JOBNAME_BATCH \\\n --model=$MODEL_NAME \\\n --version=$MODEL_VERSION \\\n --input-paths=$INPUT_PATHS \\\n --output-path=$OUTPUT_PATH \\\n --data-format=$DATA_FORMAT \\\n --labels=$PRED_LABELS \\\n --signature-name=$SIGNATURE_NAME \\\n --region=$REGION\n", "Job [tf_batch_predict_cchatterj_050721_2217] submitted successfully.\nYour job is still active. You may view the status of your job with the command\n\n $ gcloud ai-platform jobs describe tf_batch_predict_cchatterj_050721_2217\n\nor continue streaming the logs with the command\n\n $ gcloud ai-platform jobs stream-logs tf_batch_predict_cchatterj_050721_2217\njobId: tf_batch_predict_cchatterj_050721_2217\nstate: QUEUED\n" ], [ "# check the batch prediction job status\n! gcloud ai-platform jobs describe $JOBNAME_BATCH\n", "createTime: '2021-05-08T05:17:55Z'\nendTime: '2021-05-08T05:24:48Z'\netag: VtPn6bp-80g=\njobId: tf_batch_predict_cchatterj_050721_2217\nlabels:\n mode: batch\n owner: cchatterj\n phase: test\n team: ourteam\npredictionInput:\n dataFormat: JSON\n inputPaths:\n - gs://tuti_asset/tf_models/batch_data/*\n outputPath: gs://tuti_asset/tf_models/batch_predictions\n region: us-central1\n runtimeVersion: '2.1'\n signatureName: serving_default\n versionName: projects/img-seg-3d/models/loan_model_1/versions/batch_v1\npredictionOutput:\n nodeHours: 0.13\n outputPath: gs://tuti_asset/tf_models/batch_predictions\n predictionCount: '1000'\nstartTime: '2021-05-08T05:17:56Z'\nstate: SUCCEEDED\n\nView job in the Cloud Console at:\nhttps://console.cloud.google.com/mlengine/jobs/tf_batch_predict_cchatterj_050721_2217?project=img-seg-3d\n\nView logs at:\nhttps://console.cloud.google.com/logs?resource=ml_job%2Fjob_id%2Ftf_batch_predict_cchatterj_050721_2217&project=img-seg-3d\n" ], [ "# Print Errors\n#response = ! gcloud logging read \"resource.labels.job_id=$JOBNAME_BATCH severity>=ERROR\"\n#for i in range(0,len(response)):\n# if 'message' in response[i]:\n# print(response[i])\n", "_____no_output_____" ], [ "print(\"errors\")\n!gsutil cat $OUTPUT_PATH/prediction.errors_stats-00000-of-00001\nprint(\"batch prediction results\")\n!gsutil cat $OUTPUT_PATH/prediction.results-00000-of-00010\n", "errors\nbatch prediction results\n{\"dense_4\": [1.0, 1.0406388993958553e-19, 1.4212099800694908e-16, 8.73646490846459e-18]}\n{\"dense_4\": [0.9950793981552124, 0.0003781084669753909, 0.002196859335526824, 0.0023456888739019632]}\n{\"dense_4\": [0.9471668601036072, 0.013947566971182823, 0.02370576374232769, 0.015179785899817944]}\n{\"dense_4\": [0.9389595985412598, 0.014931815676391125, 0.02704501897096634, 0.01906345598399639]}\n{\"dense_4\": [0.9999895095825195, 3.8273580571512866e-07, 6.259979272726923e-06, 3.77660262529389e-06]}\n{\"dense_4\": [0.9965618252754211, 0.0006547770462930202, 0.0018533668480813503, 0.0009299058583565056]}\n{\"dense_4\": [0.9855900406837463, 0.00351746310479939, 0.007098924834281206, 0.003793543903157115]}\n{\"dense_4\": [0.9927776455879211, 0.0010063608642667532, 0.0035145131405442953, 0.0027014994993805885]}\n{\"dense_4\": [0.8834134340286255, 0.012198326177895069, 0.042322881519794464, 0.062065403908491135]}\n{\"dense_4\": [0.9787063002586365, 0.002845538081601262, 0.009469740092754364, 0.008978517726063728]}\n{\"dense_4\": [0.7159351706504822, 0.013366897590458393, 0.080536387860775, 0.19016161561012268]}\n{\"dense_4\": [0.9972729086875916, 0.0002654607524164021, 0.001345631666481495, 0.0011159599525853992]}\n{\"dense_4\": [0.9885297417640686, 0.0012551131658256054, 0.005166104529052973, 0.005049004685133696]}\n{\"dense_4\": [1.0, 8.801104983691133e-11, 6.35297947582103e-09, 2.4914903473671757e-09]}\n{\"dense_4\": [0.9794571995735168, 0.004705007188022137, 0.00989012885838747, 0.005947771482169628]}\n{\"dense_4\": [0.9736256003379822, 0.0064895120449364185, 0.012451340444386005, 0.0074334642849862576]}\n{\"dense_4\": [0.9964019060134888, 0.0007203655550256371, 0.0019345913315191865, 0.0009431246435269713]}\n{\"dense_4\": [0.9845829010009766, 0.0020854007452726364, 0.0070695155300199986, 0.0062621235847473145]}\n{\"dense_4\": [0.9987205862998962, 0.00023088674061000347, 0.0007310876389965415, 0.0003173479053657502]}\n{\"dense_4\": [0.9997119307518005, 1.5057189557410311e-05, 0.00014746136730536819, 0.00012556451838463545]}\n{\"dense_4\": [1.0, 4.937643627300758e-10, 2.2093527007882585e-08, 7.849372707369184e-09]}\n{\"dense_4\": [0.9998797178268433, 5.71216560274479e-06, 6.439750723075122e-05, 5.014264752389863e-05]}\n{\"dense_4\": [0.962269127368927, 0.0017698229057714343, 0.012345613911747932, 0.023615479469299316]}\n{\"dense_4\": [1.0, 3.4846666745589807e-20, 6.664864187091681e-17, 4.508225328281382e-18]}\n{\"dense_4\": [0.9999886751174927, 1.0138343213839107e-06, 7.904184712970164e-06, 2.401995288892067e-06]}\n{\"dense_4\": [0.991414487361908, 0.000759160378947854, 0.0037823952734470367, 0.0040439423173666]}\n{\"dense_4\": [0.9778268933296204, 0.0024036215618252754, 0.009427698329091072, 0.010341757908463478]}\n{\"dense_4\": [0.9982080459594727, 0.00016726148896850646, 0.0009035101975314319, 0.0007211291813291609]}\n{\"dense_4\": [0.9355444312095642, 0.0074008433148264885, 0.025274120271205902, 0.03178056329488754]}\n{\"dense_4\": [0.9999457597732544, 4.003079538961174e-06, 3.3306241675745696e-05, 1.6868338207132183e-05]}\n{\"dense_4\": [0.9988945126533508, 9.734471677802503e-05, 0.0005716219311580062, 0.00043664901750162244]}\n{\"dense_4\": [0.9767530560493469, 0.002156543079763651, 0.00948976632207632, 0.011600611731410027]}\n{\"dense_4\": [0.2613782286643982, 0.06239756569266319, 0.22189730405807495, 0.45432689785957336]}\n{\"dense_4\": [0.9394073486328125, 0.015068836510181427, 0.026946118101477623, 0.01857774145901203]}\n{\"dense_4\": [0.9966097474098206, 0.0003539365134201944, 0.001666023745201528, 0.0013703546719625592]}\n{\"dense_4\": [0.9998996257781982, 6.660515737166861e-06, 5.824269101140089e-05, 3.545348590705544e-05]}\n{\"dense_4\": [0.9998363256454468, 1.4140381608740427e-05, 9.601439523976296e-05, 5.339899507816881e-05]}\n{\"dense_4\": [0.9964227080345154, 0.0007681692368350923, 0.001926389173604548, 0.0008826203411445022]}\n{\"dense_4\": [0.9819023609161377, 0.0007102465024217963, 0.005979315377771854, 0.011407936923205853]}\n{\"dense_4\": [0.9833847284317017, 0.00241726147942245, 0.0076404716819524765, 0.006557476241141558]}\n{\"dense_4\": [0.4658888280391693, 0.06821835041046143, 0.17714478075504303, 0.2887480556964874]}\n{\"dense_4\": [0.6960043907165527, 0.03882082179188728, 0.10552909970283508, 0.159645676612854]}\n{\"dense_4\": [0.9582505822181702, 0.00950667355209589, 0.01908721588551998, 0.01315553579479456]}\n{\"dense_4\": [0.57773357629776, 0.029471399262547493, 0.12932273745536804, 0.263472318649292]}\n{\"dense_4\": [0.9958224296569824, 0.00017657711578067392, 0.001610358594916761, 0.0023907336872071028]}\n{\"dense_4\": [0.9992313385009766, 5.410410449258052e-05, 0.0003902331809513271, 0.0003243142564315349]}\n{\"dense_4\": [0.9906831979751587, 0.002022305503487587, 0.004731849767267704, 0.002562569919973612]}\n{\"dense_4\": [0.9978749752044678, 0.00044942606473341584, 0.001181050087325275, 0.0004945964319631457]}\n{\"dense_4\": [0.9031904339790344, 0.026648882776498795, 0.04140288010239601, 0.02875780314207077]}\n{\"dense_4\": [0.9970409274101257, 0.0003209956339560449, 0.0014776504831388593, 0.0011604003375396132]}\n{\"dense_4\": [0.9996731281280518, 2.1041232685092837e-05, 0.0001735855475999415, 0.00013216464139986783]}\n{\"dense_4\": [0.9995488524436951, 7.44503631722182e-05, 0.0002719227341003716, 0.00010472663416294381]}\n{\"dense_4\": [0.2757647931575775, 0.15401388704776764, 0.24602949619293213, 0.3241918385028839]}\n{\"dense_4\": [0.998097836971283, 0.00035955343628302217, 0.0010616419604048133, 0.0004809012170881033]}\n{\"dense_4\": [0.9777057766914368, 0.00462777866050601, 0.01063967403024435, 0.007026837673038244]}\n{\"dense_4\": [0.9999991655349731, 2.672371479661706e-08, 6.325943786578136e-07, 2.960880749469652e-07]}\n{\"dense_4\": [0.9311922192573547, 0.0028797152917832136, 0.020729511976242065, 0.045198533684015274]}\n{\"dense_4\": [0.9993436932563782, 3.384433875908144e-05, 0.0003131423145532608, 0.00030930759385228157]}\n{\"dense_4\": [0.993147075176239, 0.0006056115962564945, 0.0030722192022949457, 0.003175125690177083]}\n{\"dense_4\": [0.21888841688632965, 0.024526873603463173, 0.18533819913864136, 0.5712464451789856]}\n{\"dense_4\": [0.9999914169311523, 1.8885280894664902e-07, 4.6472368921968155e-06, 3.732945515366737e-06]}\n{\"dense_4\": [0.9414093494415283, 0.004668118432164192, 0.021192273125052452, 0.03273025527596474]}\n{\"dense_4\": [0.9937894940376282, 0.000555538572371006, 0.0028166791889816523, 0.002838222309947014]}\n{\"dense_4\": [0.8482758402824402, 0.02002708986401558, 0.056399568915367126, 0.07529757171869278]}\n{\"dense_4\": [0.9969450831413269, 0.0006934214616194367, 0.001662889844737947, 0.0006985218496993184]}\n{\"dense_4\": [0.9853657484054565, 0.0019216181244701147, 0.006689657457172871, 0.006023051682859659]}\n{\"dense_4\": [0.9999674558639526, 1.5100006294233026e-06, 1.897907532111276e-05, 1.2031984624627512e-05]}\n{\"dense_4\": [0.8857852816581726, 0.0069313268177211285, 0.03654671087861061, 0.07073669135570526]}\n{\"dense_4\": [0.9660223722457886, 0.007468124385923147, 0.015744710341095924, 0.010764755308628082]}\n{\"dense_4\": [0.9388401508331299, 0.012131755240261555, 0.02642589434981346, 0.02260223776102066]}\n{\"dense_4\": [0.9735822081565857, 0.004326478578150272, 0.011938325129449368, 0.010153102688491344]}\n{\"dense_4\": [0.7750371098518372, 0.03294631466269493, 0.08208659291267395, 0.10992991179227829]}\n{\"dense_4\": [0.9965576529502869, 0.0006493406253866851, 0.0018546285573393106, 0.0009384456207044423]}\n{\"dense_4\": [0.994271993637085, 0.0005950409104116261, 0.0027012978680431843, 0.002431699074804783]}\n{\"dense_4\": [0.7062795758247375, 0.02644459530711174, 0.09473884850740433, 0.17253698408603668]}\n{\"dense_4\": [0.9999600648880005, 1.430818542758061e-06, 2.1742593162343837e-05, 1.6857500668265857e-05]}\n{\"dense_4\": [0.9971351623535156, 0.000531127443537116, 0.0015605851076543331, 0.0007731079822406173]}\n{\"dense_4\": [0.269513338804245, 0.11255288124084473, 0.24357756972312927, 0.3743562400341034]}\n{\"dense_4\": [0.9448685050010681, 0.015151184983551502, 0.024673372507095337, 0.015306956134736538]}\n{\"dense_4\": [0.9980208873748779, 0.00018530469969846308, 0.000992547837086022, 0.0008011723984964192]}\n{\"dense_4\": [0.9826585054397583, 0.0010723575251176953, 0.0065426090732216835, 0.009726482443511486]}\n{\"dense_4\": [0.26948580145835876, 0.11306258291006088, 0.2438189536333084, 0.37363266944885254]}\n{\"dense_4\": [0.999357283115387, 0.00010972494055749848, 0.0003803546424023807, 0.0001526201085653156]}\n{\"dense_4\": [0.984793484210968, 0.001003748970106244, 0.005913605913519859, 0.008289182558655739]}\n{\"dense_4\": [0.9993600249290466, 0.00010764782200567424, 0.0003793782670982182, 0.00015292283205781132]}\n{\"dense_4\": [0.9997250437736511, 2.074145777442027e-05, 0.0001522126403870061, 0.00010205983562627807]}\n{\"dense_4\": [0.9586782455444336, 0.005181663203984499, 0.01717030443251133, 0.018969852477312088]}\n{\"dense_4\": [0.9979335069656372, 0.00023075826175045222, 0.001065800548531115, 0.0007699057459831238]}\n{\"dense_4\": [0.9810433983802795, 0.0049103326164186, 0.009170581586658955, 0.004875617567449808]}\n{\"dense_4\": [0.9999843835830688, 5.110513825457019e-07, 8.933151548262686e-06, 6.246511020435719e-06]}\n{\"dense_4\": [0.9874180555343628, 0.0024517392739653587, 0.006210142746567726, 0.003920107148587704]}\n{\"dense_4\": [0.9996114373207092, 1.8770571841741912e-05, 0.0001904625678434968, 0.00017937201482709497]}\n{\"dense_4\": [0.957525908946991, 0.009078256785869598, 0.019242551177740097, 0.01415331196039915]}\n{\"dense_4\": [0.9929063320159912, 0.0009004070307128131, 0.0034123556688427925, 0.0027810053434222937]}\n{\"dense_4\": [0.9813647866249084, 0.003623340977355838, 0.008891412056982517, 0.006120408419519663]}\n{\"dense_4\": [0.9897024631500244, 0.0011451425962150097, 0.004692420829087496, 0.004460110794752836]}\n{\"dense_4\": [0.9739224314689636, 0.0032456154003739357, 0.011242972686886787, 0.011589066125452518]}\n{\"dense_4\": [0.9990286827087402, 7.674412336200476e-05, 0.0004963857936672866, 0.0003982596390414983]}\n{\"dense_4\": [0.9982178807258606, 0.00017581999418325722, 0.0009086029604077339, 0.0006976057775318623]}\n{\"dense_4\": [0.8030107021331787, 0.026807885617017746, 0.07184862345457077, 0.09833275526762009]}\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
c51f5253239e185ae1532e5d2823677c75e59cc9
2,830
ipynb
Jupyter Notebook
Machine Learning/numpy_arrays_condicion.ipynb
Rodrigo-Flores/python3-study
26b9a57b5a7273582c89e4441dd5efb8d40030af
[ "MIT" ]
1
2022-02-20T19:38:37.000Z
2022-02-20T19:38:37.000Z
Machine Learning/numpy_arrays_condicion.ipynb
Rodrigo-Flores/python3-study
26b9a57b5a7273582c89e4441dd5efb8d40030af
[ "MIT" ]
null
null
null
Machine Learning/numpy_arrays_condicion.ipynb
Rodrigo-Flores/python3-study
26b9a57b5a7273582c89e4441dd5efb8d40030af
[ "MIT" ]
null
null
null
17.048193
81
0.446996
[ [ [ "import numpy as np", "_____no_output_____" ], [ "array = np.arange(0,10)\narray", "_____no_output_____" ], [ "condicion = array > 5\ncondicion", "_____no_output_____" ], [ "array[condicion]", "_____no_output_____" ], [ "array6 = array[condicion]\narray6", "_____no_output_____" ], [ "array[array > 5]", "_____no_output_____" ], [ "array[array%2 == 0]", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
c51f56f22b049d2d115a10fd36bbf03aedec9046
11,137
ipynb
Jupyter Notebook
Contact Mechanics/Hertz-1 assumptions and geometry.ipynb
mikeWShef/Tribology_notebooks
e87a9db004ede049ef7df8895e2d1927bb3d7968
[ "BSD-2-Clause" ]
2
2019-09-06T13:21:40.000Z
2020-04-25T10:46:58.000Z
Contact Mechanics/Hertz-1 assumptions and geometry.ipynb
mikeWShef/Tribology_notebooks
e87a9db004ede049ef7df8895e2d1927bb3d7968
[ "BSD-2-Clause" ]
null
null
null
Contact Mechanics/Hertz-1 assumptions and geometry.ipynb
mikeWShef/Tribology_notebooks
e87a9db004ede049ef7df8895e2d1927bb3d7968
[ "BSD-2-Clause" ]
null
null
null
42.026415
414
0.576906
[ [ [ "# Hertzian conatct 1\n\n## Assumptions\nWhen two objects are brought into contact they intially touch along a line or at a single point. If any load is transmitted throught the contact the point or line grows to an area. The size of this area, the pressure distribtion inside it and the resulting stresses in each solid require a theory of contact to describe.\n\nThe first satisfactory theory for round bodies was presented by Hertz in 1880 who worked on it during his christmas holiday at the age of twenty three. He assumed that: \n\nThe bodies could be considered as semi infinite elastic half spaces from a stress perspective as the contact area is normally much smaller than the size of the bodies, it is also assumed strains are small. This means that the normal integral equations for surface contact can be used:\n\nThe contact is also assumed to be frictionless so the contact equations reduce to:\n\n$\\Psi_1=\\int_S \\int p(\\epsilon,\\eta)ln(\\rho+z)\\ d\\epsilon\\ d\\eta$ [1]\n\n$\\Psi=\\int_S \\int \\frac{p(\\epsilon,\\eta)}{\\rho}\\ d\\epsilon\\ d\\eta$ [2]\n\n$u_x=-\\frac{1+v}{2\\pi E}\\left((1-2v)\\frac{\\delta\\Psi_1}{\\delta x}+z\\frac{\\delta\\Psi}{\\delta x}\\right) $ [3a]\n\n$u_y=-\\frac{1+v}{2\\pi E}\\left((1-2v)\\frac{\\delta\\Psi_1}{\\delta y}+z\\frac{\\delta\\Psi}{\\delta y}\\right) $ [3b]\n\n$u_z=-\\frac{1+v}{2\\pi E}\\left(2(1-v)\\Psi+z\\frac{\\delta\\Psi}{\\delta z}\\right) $ [3c]", "_____no_output_____" ] ], [ [ "from IPython.display import Image\nImage(\"figures/hertz_probelm reduction.png\")", "_____no_output_____" ] ], [ [ "For the shape of the surfaces: it was asumed that they are smooth on both the micro scale and the macro scale. Assuming that they are smooth on the micro scale means that small irregulararities which would cause discontinuous contact and local pressure variations are ignored. ", "_____no_output_____" ], [ "## Geometry\n\nAssuming that the surfaces are smooth on the macro scale implies that the surface profiles are continuous up to their second derivative. Meaning that the surfaces can be described by polynomials:\n\n$z_1=A_1'x+B_1'y+A_1x^2+B_1y^2+C_1xy+...$ [4]\n\nWith higher order terms being neglected. By choosing the location of the origin to be at the point of contact and the orientation of the xy plane to be inline wiht the principal radii of the surface the equation above reduces to:\n\n$z_1=\\frac{1}{2R'_1}x_1^2+\\frac{1}{2R''_1}y_1^2$ [5]\n\nWhere $R'_1$ and $R''_1$ are the principal radii of the first surface at the origin. \n\n### They are the maximum and minimum radii of curvature across all possible cross sections \n\nThe following widget allows you to change the principal radii of a surface and the angle between it an the coordinate axes", "_____no_output_____" ] ], [ [ "from matplotlib import pyplot as plt\nfrom matplotlib.lines import Line2D\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D \nfrom __future__ import print_function\nfrom ipywidgets import interact, interactive, fixed, interact_manual\nimport ipywidgets as widgets\nplt.rcParams['figure.figsize'] = [15, 10]", "_____no_output_____" ], [ "@interact(r1=(-10,10),r2=(-10,10),theta=(0,np.pi),continuous_update=False)\ndef plot_surface(r1=5,r2=0,theta=0):\n \"\"\"\n Plots a surface given two principal radii and the angle relative to the coordinate axes\n \n Parameters\n ----------\n r1,r2 : float\n principal radii\n theta : float\n Angle between the plane of the first principal radius and the coordinate axes\n \"\"\"\n X,Y=np.meshgrid(np.linspace(-1,1,20),np.linspace(-1,1,20))\n X_dash=X*np.cos(theta)-Y*np.sin(theta)\n Y_dash=Y*np.cos(theta)+X*np.sin(theta)\n r1 = r1 if np.abs(r1)>=1 else float('inf')\n r2 = r2 if np.abs(r2)>=1 else float('inf')\n Z=0.5/r1*X_dash**2+0.5/r2*Y_dash**2\n \n x1=np.linspace(-1.5,1.5)\n y1=np.zeros_like(x1)\n z1=0.5/r1*x1**2\n \n y2=np.linspace(-1.5,1.5)\n x2=np.zeros_like(y2)\n z2=0.5/r2*y2**2\n \n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(X, Y, Z)\n \n ax.plot((x1*np.cos(-theta)-y1*np.sin(-theta)),x1*np.sin(-theta)+y1*np.cos(-theta),z1)\n ax.plot((x2*np.cos(-theta)-y2*np.sin(-theta)),x2*np.sin(-theta)+y2*np.cos(-theta),z2)\n \n ax.set_xlim(-1, 1)\n ax.set_ylim(-1, 1)\n ax.set_zlim(-0.5, 0.5)", "_____no_output_____" ] ], [ [ "A similar equation defines the second surface:\n\n$z_2=-\\left(\\frac{1}{2R'_2}x_2^2+\\frac{1}{2R''_2}y_2^2\\right)$ [6]\n\nThe separation between these surfaces is then given as $h=z_1-z_2$. \n\nby writing equation 4 and its counterpart on common axes, it is clear that the gap between the surfaces can be written as:\n\n$h=Ax^2+By^2+Cxy$ [7]\n\nAnd by a suitable choice of orientation of the xy plane the C term can be made to equal 0. As such when ever two surfaces with parabolic shape are brought into contact (with no load) the gap between them can be defined as a single parabola:\n\n$h=Ax^2+Bx^2=\\frac{1}{2R'_{gap}}x^2+\\frac{1}{2R''_{gap}}y^2$ [8]\n\n#### The values $R'_{gap}$ and $R''_{gap}$ are called the principal radii of relative curvature.\n\nThese relate to the principal radii of each of the bodies through the equations below:\n\n$(A+B)=\\frac{1}{2}\\left(\\frac{1}{R'_{gap}}+\\frac{1}{R''_{gap}}\\right)=\\frac{1}{2}\\left(\\frac{1}{R'_1}+\\frac{1}{R''_1}+\\frac{1}{R'_2}+\\frac{1}{R''_2}\\right)$\n\n\n\nThe next widget shows the shpae of the gap between two bodies in contact allowing you to set the principal radii of each boday and the angle between them:", "_____no_output_____" ] ], [ [ "@interact(top_r1=(-10,10),top_r2=(-10,10),\n bottom_r1=(-10,10),bottom_r2=(-10,10),\n theta=(0,np.pi),continuous_update=False)\ndef plot_two_surfaces(top_r1=2,top_r2=5,bottom_r1=4,bottom_r2=-9,theta=0.3):\n \"\"\"\n Plots 2 surfaces and the gap between them\n \n Parameters\n ----------\n top_r1,top_r2,bottom_r1,bottom_r2 : float\n The principal radii of the top and bottom surface\n theta : float\n The angel between the first principal radii of the surfaces\n \"\"\"\n X,Y=np.meshgrid(np.linspace(-1,1,20),np.linspace(-1,1,20))\n X_dash=X*np.cos(theta)-Y*np.sin(theta)\n Y_dash=Y*np.cos(theta)+X*np.sin(theta)\n top_r1 = top_r1 if np.abs(top_r1)>=1 else float('inf')\n top_r2 = top_r2 if np.abs(top_r2)>=1 else float('inf')\n bottom_r1 = bottom_r1 if np.abs(bottom_r1)>=1 else float('inf')\n bottom_r2 = bottom_r2 if np.abs(bottom_r2)>=1 else float('inf')\n \n Z_top=0.5/top_r1*X_dash**2+0.5/top_r2*Y_dash**2\n Z_bottom=-1*(0.5/bottom_r1*X**2+0.5/bottom_r2*Y**2)\n \n \n fig = plt.figure()\n ax = fig.add_subplot(121, projection='3d')\n ax.set_title(\"Surfaces\")\n ax2 = fig.add_subplot(122)\n ax2.set_title(\"Gap\")\n ax2.axis(\"equal\")\n ax2.set_adjustable(\"box\")\n ax2.set_xlim([-1,1])\n ax2.set_ylim([-1,1])\n \n ax.plot_surface(X, Y, Z_top)\n ax.plot_surface(X, Y, Z_bottom)\n \n if top_r1==top_r2==bottom_r1==bottom_r2==float('inf'):\n ax2.text(s='Flat surfaces, no gap', x=-0.6, y=-0.1)\n else:\n ax2.contour(X,Y,Z_top-Z_bottom)\n \n \n div=((1/top_r2)-(1/top_r1))\n if div==0:\n lam=float('inf')\n else:\n lam=((1/bottom_r2)-(1/bottom_r1))/div\n beta=-1*np.arctan((np.sin(2*theta))/(lam+np.cos(2*theta)))/2\n if beta<=(np.pi/4):\n x=1\n y=np.tan(beta)\n else:\n x=np.tan(beta)\n y=1\n ax2.add_line(Line2D([x,-1*x],[y,-1*y]))\n beta-=np.pi/2\n if beta<=(np.pi/4):\n x=1\n y=np.tan(beta)\n else:\n x=np.tan(beta)\n y=1\n ax2.add_line(Line2D([x,-1*x],[y,-1*y]))", "_____no_output_____" ] ], [ [ "From the form of equation 8 it is clear that the contours of constant gap (the contours plotted by the widget) are elipitcal in shape. With axes in the ratio $(R'_gap/R''_gap)^{1/2}$. In the special case of equal principal radii for each body (spherical contact) the contours of separation will be circular. From the symmetry of this problem it is clear that this will remain true when a load is applied. \n\nAdditonally, when two cylinders are brought in to contact with their axes parallel the contours of separation are straight lines parallel to the axes of the cylinders. When loaded the cylinders will also make contact along a narrow strip parallel to the axes of the cylinders. \n\nWe might expect, then that for the general case the contour of contact under load will follow the same eliptical shape as the contours of separation. This is infact the case but the proof will have to wait for the next section", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
c51f5b3624735b253b8f6101f9e0e0faf004cbdd
33,801
ipynb
Jupyter Notebook
EMAPP/Excel.py.ipynb
dwij2812/Siemens-App
e5d12be006fa728527b230eaa66e740eba02048f
[ "MIT" ]
3
2018-05-21T11:05:29.000Z
2018-05-28T13:38:10.000Z
EMAPP/Excel.py.ipynb
dwij2812/Siemens-App
e5d12be006fa728527b230eaa66e740eba02048f
[ "MIT" ]
301
2018-06-12T23:12:44.000Z
2021-07-28T23:15:06.000Z
EMAPP/Excel.py.ipynb
dwij2812/Siemens-App
e5d12be006fa728527b230eaa66e740eba02048f
[ "MIT" ]
null
null
null
58.479239
14,884
0.718115
[ [ [ "import matplotlib.pyplot as plt\nimport numpy\nfrom numpy import genfromtxt\nimport csv\nimport pandas as pd\nfrom operator import itemgetter\nfrom datetime import*\nfrom openpyxl import load_workbook,Workbook\nfrom openpyxl.styles import PatternFill, Border, Side, Alignment, Protection, Font\nimport openpyxl\nfrom win32com import client\nprint('Libraries Imported Successfully......')\n", "Libraries Imported Successfully......\n" ], [ "#######################################################################\ndef nearest(items, pivot):\n return min(items, key=lambda x: abs(x - pivot))\n#######################################################################\ndef add_one_month(t):\n \"\"\"Return a `datetime.date` or `datetime.datetime` (as given) that is\n one month earlier.\n\n Note that the resultant day of the month might change if the following\n month has fewer days:\n\n >>> add_one_month(datetime.date(2010, 1, 31))\n datetime.date(2010, 2, 28)\n \"\"\"\n import datetime\n one_day = datetime.timedelta(days=1)\n one_month_later = t + one_day\n while one_month_later.month == t.month: # advance to start of next month\n one_month_later += one_day\n target_month = one_month_later.month\n while one_month_later.day < t.day: # advance to appropriate day\n one_month_later += one_day\n if one_month_later.month != target_month: # gone too far\n one_month_later -= one_day\n break\n return one_month_later\n#######################################################################\ndef subtract_one_month(t):\n \"\"\"Return a `datetime.date` or `datetime.datetime` (as given) that is\n one month later.\n\n Note that the resultant day of the month might change if the following\n month has fewer days:\n\n >>> subtract_one_month(datetime.date(2010, 3, 31))\n datetime.date(2010, 2, 28)\n \"\"\"\n import datetime\n one_day = datetime.timedelta(days=1)\n one_month_earlier = t - one_day\n while one_month_earlier.month == t.month or one_month_earlier.day > t.day:\n one_month_earlier -= one_day\n return one_month_earlier\n#######################################################################\nprint('Custom Functions Loaded into the Current Path')\n", "Custom Functions Loaded into the Current Path\n" ], [ "values=[]\ndates=[]\ncombine=[]\nwith open('hyatt.csv', 'r') as csvFile:\n reader = csv.reader(csvFile)\n for row in reader:\n values.append(row[1])\n dates.append(row[1])\n combine.append(row)\ncsvFile.close()\n#print(values)\n#print(dates)\n#print(combine)\nprint('Data Loaded into the Program Successfuly')\n", "Data Loaded into the Program Successfuly\n" ], [ "print('The Number of Values are: ',len(values))\nprint('The Number of Dates are:',len(dates))\n", "The Number of Values are: 43184\nThe Number of Dates are: 43184\n" ], [ "combine = sorted(combine, key=itemgetter(0))\n\"\"\"for i in combine:\n print(i)\"\"\"\nfor i in combine:\n m2=i[0]\n m2=datetime.strptime(m2,'%d/%m/%y %I:%M %p')\n i[0]=m2\ncombine = sorted(combine, key=itemgetter(0))\n\"\"\"for i in combine:\n print(i)\"\"\"\nref_min=combine[0][0].date()\nmin_time=datetime.strptime('0000','%H%M').time()\nref_min=datetime.combine(ref_min, min_time)\nprint(type(ref_min))\nref_max=combine[-1][0].date()\nmax_time=datetime.strptime('2359','%H%M').time()\nref_max=datetime.combine(ref_max, max_time)\nprint(ref_max)\nref_max = add_one_month(ref_max)\ndates=[]\nfor i in combine:\n dates.append(i[0])\ni=ref_min\nindices=[]\nwhile i<ref_max:\n k=nearest(dates,i)\n print('The corresponding Lowest time related to this reading is: ',k)\n index=dates.index(k)\n print(index)\n indices.append(index)\n i = add_one_month(i)\n print(i)\nprint('The Number of Indices are: ',len(indices))\n", "<class 'datetime.datetime'>\n2018-05-31 23:59:00\nThe corresponding Lowest time related to this reading is: 2018-01-01 00:05:00\n0\n2018-02-01 00:00:00\nThe corresponding Lowest time related to this reading is: 2018-01-31 23:58:00\n8850\n2018-03-01 00:00:00\nThe corresponding Lowest time related to this reading is: 2018-02-28 23:58:00\n16741\n2018-04-01 00:00:00\nThe corresponding Lowest time related to this reading is: 2018-04-01 00:02:00\n25669\n2018-05-01 00:00:00\nThe corresponding Lowest time related to this reading is: 2018-05-01 00:02:00\n34304\n2018-06-01 00:00:00\nThe corresponding Lowest time related to this reading is: 2018-05-31 23:31:00\n43183\n2018-07-01 00:00:00\nThe Number of Indices are: 6\n" ], [ "k=ref_min.date()\nconsump=[]\nlower=[]\nupper=[]\nfor i in range(len(indices)-1):\n r_min=float(values[indices[i]])\n lower.append(r_min)\n r_up=float(values[indices[i+1]])\n upper.append(r_up)\n consumption=r_up-r_min\n consump.append(consumption)\n print('The Consumption on ',k.strftime('%d-%m-%Y'),' is : ',consumption)\n k = add_one_month(k)\n", "The Consumption on 01-01-2018 is : 830609.2499999999\nThe Consumption on 01-02-2018 is : 872401.3700000001\nThe Consumption on 01-03-2018 is : 1093021.5\nThe Consumption on 01-04-2018 is : 1184403.0\nThe Consumption on 01-05-2018 is : 1312053.5\n" ], [ "rate=float(input('Enter the Rate per KWH consumed for Cost Calculation: '))\nk=ref_min.date()\ncost=[]\nfor i in consump:\n r=float(i)*rate\n print('The Cost of Electricity for ',k.strftime('%d-%m-%Y'),' is :',r)\n cost.append(r)\n k = add_one_month(k)\n", "Enter the Rate per KWH consumed for Cost Calculation: 10\nThe Cost of Electricity for 01-01-2018 is : 8306092.499999999\nThe Cost of Electricity for 01-02-2018 is : 8724013.700000001\nThe Cost of Electricity for 01-03-2018 is : 10930215.0\nThe Cost of Electricity for 01-04-2018 is : 11844030.0\nThe Cost of Electricity for 01-05-2018 is : 13120535.0\n" ], [ "print('\\n====================Final Output====================\\n')\nk=ref_min.date()\ndate_list=[]\nwrite=[]\nwrite2=[]\ncust=input('Please Enter The Customer Name: ')\n", "\n====================Final Output====================\n\nPlease Enter The Customer Name: Siemens\n" ], [ "row=['Customer Name: ',cust]\nwrite.append(row)\nrow=['Address Line 1: ',\"3, National Hwy 9, Premnagar, \"]\nwrite.append(row)\nrow=['Address Line 2: ',\"Ashok Nagar, Pune, Maharashtra 411016\"]\nwrite.append(row)\nrow=['']\nwrite.append(row)\nrow=['Electricity Bill Invoice']\nwrite.append(row)\nrow=['From: ',ref_min.date()]\nwrite.append(row)\nrow=['To: ',subtract_one_month(ref_max.date())+timedelta(days=1)]\nwrite.append(row)\nrow=['']\nwrite.append(row)\nrow=['Reading Date','Previous Reading','Present Reading','Consumption','Cost']\nwrite.append(row)\nfor i in range(len(indices)-1):\n row=[]\n row2=[]\n print('--------------------------------------')\n print('Date:\\t\\t',k)\n row.append(k)\n row2.append(k)\n date_list.append(k)\n k = add_one_month(k)\n print('Lower Reading:\\t',lower[i])\n row.append(lower[i])\n row2.append(lower[i])\n print('Upper Reading:\\t',upper[i])\n row.append(upper[i])\n row2.append(upper[i])\n print('Consumption:\\t',consump[i])\n row.append(consump[i])\n row2.append(consump[i])\n print('Cost:\\t\\t',cost[i])\n row.append(cost[i])\n row2.append(cost[i])\n write.append(row)\n write2.append(row2)\n", "--------------------------------------\nDate:\t\t 2018-01-01\nLower Reading:\t 940770.88\nUpper Reading:\t 1771380.13\nConsumption:\t 830609.2499999999\nCost:\t\t 8306092.499999999\n--------------------------------------\nDate:\t\t 2018-02-01\nLower Reading:\t 1771380.13\nUpper Reading:\t 2643781.5\nConsumption:\t 872401.3700000001\nCost:\t\t 8724013.700000001\n--------------------------------------\nDate:\t\t 2018-03-01\nLower Reading:\t 2643781.5\nUpper Reading:\t 3736803.0\nConsumption:\t 1093021.5\nCost:\t\t 10930215.0\n--------------------------------------\nDate:\t\t 2018-04-01\nLower Reading:\t 3736803.0\nUpper Reading:\t 4921206.0\nConsumption:\t 1184403.0\nCost:\t\t 11844030.0\n--------------------------------------\nDate:\t\t 2018-05-01\nLower Reading:\t 4921206.0\nUpper Reading:\t 6233259.5\nConsumption:\t 1312053.5\nCost:\t\t 13120535.0\n" ], [ "plt.plot(date_list,consump)\nplt.show()\nplt.savefig('graph.png')\n", "_____no_output_____" ], [ "row=['Total Consumption: ', sum(consump)]\nwrite.append(row)\nrow=['Cost Per Unit: ',rate]\nwrite.append(row)\nrow=['Total Bill Ammount: ',sum(cost)]\nwrite.append(row)\nwith open('output.csv', 'w') as csvFile:\n for row in write:\n writer = csv.writer(csvFile,lineterminator='\\n')\n writer.writerow(row)\ncsvFile.close()\nprint('CSV FILE Generated as Output.csv')\n", "CSV FILE Generated as Output.csv\n" ], [ "###########################################################################\nwb=load_workbook('Book1.xlsx')\nws1=wb.get_sheet_by_name('Sheet1')\n# shs is list\nws1['B2']=cust\nws1['B3']='3, National Hwy 9, Premnagar, '\nws1['B4']='Ashok Nagar, Pune, Maharashtra 411016'\nws1['B6']=ref_min.date()\nws1['E6']=subtract_one_month(ref_max.date())+timedelta(days=1)\nrow=9\ncolumn=1\nfor r in write2:\n column=1\n for i in r:\n ws1.cell(row,column).value=i\n column+=1\n row+=1\n\"\"\"\nrow+=1\ncolumn=1\nws1.cell(row,column).value='Total Consumption: '\nws1.cell(row,column).font=Font(bold=True)\ncolumn+=1\nws1.cell(row,column).value=sum(consump)\ncolumn-=1\nrow+=1\nws1.cell(row,column).value='Total Cost: '\nws1.cell(row,column).font=Font(bold=True)\ncolumn+=1\nws1.cell(row,column).value=sum(cost)\ncolumn-=1\"\"\"\nthick_border_right=Border(right=Side(style='thick'))\nws1['E2'].border=thick_border_right\nws1['E3'].border=thick_border_right\nws1['E4'].border=thick_border_right\nthick_border = Border(left=Side(style='thick'), right=Side(style='thick'), top=Side(style='thick'), bottom=Side(style='thick'))\nws1['A15']='Total Consumption'\nws1['A15'].font=Font(bold=True)\nws1['A15'].border=thick_border\nws1['B15'].border=thick_border\nws1['C15'].border=thick_border\nws1['D15'].border=thick_border\nws1['A16']='Total Cost'\nws1['A16'].font=Font(bold=True)\nws1['A16'].border=thick_border\nws1['B16'].border=thick_border\nws1['C16'].border=thick_border\nws1['D16'].border=thick_border\nws1['E15']=sum(consump)\nws1['E16']=sum(cost)\nimg = openpyxl.drawing.image.Image('logo.jpg')\nimg.anchor='A1'\nws1.add_image(img)\nwb.save('Book1.xlsx')\nprint('Excel Workbook Generated as Book1.xlsx')\n#############################################################################\n", "Excel Workbook Generated as Book1.xlsx\n" ], [ "xlApp = client.Dispatch(\"Excel.Application\")\nbooks = xlApp.Workbooks.Open('E:\\Internship\\Siemens\\EMAPP\\Book1.xlsx')\nws = books.Worksheets[0]\nws.Visible = 1\nws.ExportAsFixedFormat(0, 'E:\\Internship\\Siemens\\EMAPP\\trial.pdf')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c51f61de6358b525244159b56cbbcc5021bc1c76
18,912
ipynb
Jupyter Notebook
material/PY0101EN-3-2-Loops.ipynb
sergiodealencar/courses
c9d86b27b0185cc82624b01ed76653dbc12554a3
[ "MIT" ]
null
null
null
material/PY0101EN-3-2-Loops.ipynb
sergiodealencar/courses
c9d86b27b0185cc82624b01ed76653dbc12554a3
[ "MIT" ]
null
null
null
material/PY0101EN-3-2-Loops.ipynb
sergiodealencar/courses
c9d86b27b0185cc82624b01ed76653dbc12554a3
[ "MIT" ]
null
null
null
25.317269
716
0.510205
[ [ [ "<center>\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/Logos/organization_logo/organization_logo.png\" width=\"300\" alt=\"cognitiveclass.ai logo\" />\n</center>\n\n# Loops in Python\n\nEstimated time needed: **20** minutes\n\n## Objectives\n\nAfter completing this lab you will be able to:\n\n- work with the loop statements in Python, including for-loop and while-loop.\n", "_____no_output_____" ], [ "<h1>Loops in Python</h1>\n", "_____no_output_____" ], [ "<p><strong>Welcome!</strong> This notebook will teach you about the loops in the Python Programming Language. By the end of this lab, you'll know how to use the loop statements in Python, including for loop, and while loop.</p>\n", "_____no_output_____" ], [ "<h2>Table of Contents</h2>\n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <ul>\n <li>\n <a href=\"#loop\">Loops</a>\n <ul>\n <li><a href=\"range\">Range</a></li>\n <li><a href=\"for\">What is <code>for</code> loop?</a></li>\n <li><a href=\"while\">What is <code>while</code> loop?</a></li>\n </ul>\n </li>\n <li>\n <a href=\"#quiz\">Quiz on Loops</a>\n </li>\n </ul>\n\n</div>\n\n<hr>\n", "_____no_output_____" ], [ "<h2 id=\"loop\">Loops</h2>\n", "_____no_output_____" ], [ "<h3 id=\"range\">Range</h3>\n", "_____no_output_____" ], [ "Sometimes, you might want to repeat a given operation many times. Repeated executions like this are performed by <b>loops</b>. We will look at two types of loops, <code>for</code> loops and <code>while</code> loops.\n\nBefore we discuss loops lets discuss the <code>range</code> object. It is helpful to think of the range object as an ordered list. For now, let's look at the simplest case. If we would like to generate an object that contains elements ordered from 0 to 2 we simply use the following command:\n", "_____no_output_____" ] ], [ [ "# Use the range\n\nrange(3)", "_____no_output_____" ] ], [ [ "<img src=\"https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%203/images/range.PNG\" width=\"300\" />\n", "_____no_output_____" ], [ "**_NOTE: While in Python 2.x it returned a list as seen in video lessons, in 3.x it returns a range object._**\n", "_____no_output_____" ], [ "<h3 id=\"for\">What is <code>for</code> loop?</h3>\n", "_____no_output_____" ], [ "The <code>for</code> loop enables you to execute a code block multiple times. For example, you would use this if you would like to print out every element in a list. \nLet's try to use a <code>for</code> loop to print all the years presented in the list <code>dates</code>:\n", "_____no_output_____" ], [ "This can be done as follows:\n", "_____no_output_____" ] ], [ [ "# For loop example\n\ndates = [1982,1980,1973]\nN = len(dates)\n\nfor i in range(N):\n print(dates[i]) ", "1982\n1980\n1973\n" ] ], [ [ "The code in the indent is executed <code>N</code> times, each time the value of <code>i</code> is increased by 1 for every execution. The statement executed is to <code>print</code> out the value in the list at index <code>i</code> as shown here:\n", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%203/Images/LoopsForRange.gif\" width=\"800\" />\n", "_____no_output_____" ], [ "In this example we can print out a sequence of numbers from 0 to 7:\n", "_____no_output_____" ] ], [ [ "# Example of for loop\n\nfor i in range(0, 8):\n print(i)", "0\n1\n2\n3\n4\n5\n6\n7\n" ] ], [ [ "In Python we can directly access the elements in the list as follows: \n", "_____no_output_____" ] ], [ [ "# Exmaple of for loop, loop through list\n\nfor year in dates: \n print(year) ", "1982\n1980\n1973\n" ] ], [ [ "For each iteration, the value of the variable <code>years</code> behaves like the value of <code>dates[i]</code> in the first example:\n", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%203/Images/LoopsForList.gif\" width=\"800\">\n", "_____no_output_____" ], [ "We can change the elements in a list:\n", "_____no_output_____" ] ], [ [ "# Use for loop to change the elements in list\n\nsquares = ['red', 'yellow', 'green', 'purple', 'blue']\n\nfor i in range(0, 5):\n print(\"Before square \", i, 'is', squares[i])\n squares[i] = 'white'\n print(\"After square \", i, 'is', squares[i])", "Before square 0 is red\nAfter square 0 is white\nBefore square 1 is yellow\nAfter square 1 is white\nBefore square 2 is green\nAfter square 2 is white\nBefore square 3 is purple\nAfter square 3 is white\nBefore square 4 is blue\nAfter square 4 is white\n" ] ], [ [ " We can access the index and the elements of a list as follows: \n", "_____no_output_____" ] ], [ [ "# Loop through the list and iterate on both index and element value\n\nsquares=['red', 'yellow', 'green', 'purple', 'blue']\n\nfor i, square in enumerate(squares):\n print(i, square)", "0 red\n1 yellow\n2 green\n3 purple\n4 blue\n" ] ], [ [ "<h3 id=\"while\">What is <code>while</code> loop?</h3>\n", "_____no_output_____" ], [ "As you can see, the <code>for</code> loop is used for a controlled flow of repetition. However, what if we don't know when we want to stop the loop? What if we want to keep executing a code block until a certain condition is met? The <code>while</code> loop exists as a tool for repeated execution based on a condition. The code block will keep being executed until the given logical condition returns a **False** boolean value.\n", "_____no_output_____" ], [ "Let’s say we would like to iterate through list <code>dates</code> and stop at the year 1973, then print out the number of iterations. This can be done with the following block of code:\n", "_____no_output_____" ] ], [ [ "# While Loop Example\n\ndates = [1982, 1980, 1973, 2000]\n\ni = 0\nyear = dates[0]\n\nwhile(year != 1973): \n print(year)\n i = i + 1\n year = dates[i]\n \n\nprint(\"It took \", i ,\"repetitions to get out of loop.\")", "1982\n1980\nIt took 2 repetitions to get out of loop.\n" ] ], [ [ "A while loop iterates merely until the condition in the argument is not met, as shown in the following figure:\n", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%203/Images/LoopsWhile.gif\" width=\"650\" />\n", "_____no_output_____" ], [ "<hr>\n", "_____no_output_____" ], [ "<h2 id=\"quiz\">Quiz on Loops</h2>\n", "_____no_output_____" ], [ "Write a <code>for</code> loop the prints out all the element between <b>-5</b> and <b>5</b> using the range function.\n", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\nfor i in range(-5,6):\n print(i)", "-5\n-4\n-3\n-2\n-1\n0\n1\n2\n3\n4\n5\n" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nfor i in range(-5, 6):\n print(i)\n \n```\n\n</details>\n", "_____no_output_____" ], [ "Print the elements of the following list:\n<code>Genres=[ 'rock', 'R&B', 'Soundtrack', 'R&B', 'soul', 'pop']</code>\nMake sure you follow Python conventions.\n", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\nGenres=[ 'rock', 'R&B', 'Soundtrack', 'R&B', 'soul', 'pop']\nfor genre in Genres:\n print(genre)", "rock\nR&B\nSoundtrack\nR&B\nsoul\npop\n" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nGenres = ['rock', 'R&B', 'Soundtrack', 'R&B', 'soul', 'pop']\nfor Genre in Genres:\n print(Genre)\n \n```\n\n</details>\n", "_____no_output_____" ], [ "<hr>\n", "_____no_output_____" ], [ "Write a for loop that prints out the following list: <code>squares=['red', 'yellow', 'green', 'purple', 'blue']</code>\n", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\nsquares=['red', 'yellow', 'green', 'purple', 'blue']\nfor square in squares:\n print(square)", "red\nyellow\ngreen\npurple\nblue\n" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nsquares=['red', 'yellow', 'green', 'purple', 'blue']\nfor square in squares:\n print(square)\n \n```\n\n</details>\n", "_____no_output_____" ], [ "<hr>\n", "_____no_output_____" ], [ "Write a while loop to display the values of the Rating of an album playlist stored in the list <code>PlayListRatings</code>. If the score is less than 6, exit the loop. The list <code>PlayListRatings</code> is given by: <code>PlayListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10]</code>\n", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\nPlayListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10]\ni = 1\nRating = PlayListRatings[0]\nwhile(i < len(PlayListRatings) and Rating >= 6):\n print(Rating)\n Rating = PlayListRatings[i]\n i = i + 1", "10\n9.5\n10\n8\n7.5\n" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nPlayListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10]\ni = 1\nRating = PlayListRatings[0]\nwhile(i < len(PlayListRatings) and Rating >= 6):\n print(Rating)\n Rating = PlayListRatings[i]\n i = i + 1\n \n```\n\n</details>\n", "_____no_output_____" ], [ "<hr>\n", "_____no_output_____" ], [ "Write a while loop to copy the strings <code>'orange'</code> of the list <code>squares</code> to the list <code>new_squares</code>. Stop and exit the loop if the value on the list is not <code>'orange'</code>:\n", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\n\nsquares = ['orange', 'orange', 'purple', 'blue ', 'orange']\nnew_squares = []\ni = 0\nwhile(i < len(squares) and squares[i] == 'orange'):\n new_squares.append(squares[i])\n i = i + 1\nprint (new_squares)\n", "['orange', 'orange']\n" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nsquares = ['orange', 'orange', 'purple', 'blue ', 'orange']\nnew_squares = []\ni = 0\nwhile(i < len(squares) and squares[i] == 'orange'):\n new_squares.append(squares[i])\n i = i + 1\nprint (new_squares)\n \n```\n\n</details>\n \n", "_____no_output_____" ], [ "<hr>\n<h2>The last exercise!</h2>\n<p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href=\"https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/\" target=\"_blank\">this article</a> to learn how to share your work.\n<hr>\n", "_____no_output_____" ], [ "## Author\n\n<a href=\"https://www.linkedin.com/in/joseph-s-50398b136/\" target=\"_blank\">Joseph Santarcangelo</a>\n\n## Other contributors\n\n<a href=\"www.linkedin.com/in/jiahui-mavis-zhou-a4537814a\">Mavis Zhou</a>\n\n## Change Log\n\n| Date (YYYY-MM-DD) | Version | Changed By | Change Description |\n| ----------------- | ------- | ---------- | ---------------------------------- |\n| 2020-08-26 | 2.0 | Lavanya | Moved lab to course repo in GitLab |\n| | | | |\n| | | | |\n\n<hr/>\n\n## <h3 align=\"center\"> © IBM Corporation 2020. All rights reserved. <h3/>\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
c51f6551cce7b11e57a212027225aaea47962b6c
732,651
ipynb
Jupyter Notebook
Github_user_analysis.ipynb
damminhtien/Github_user_analysis
076d6a50faaae0c55e13b0325a3e19804d84504f
[ "Apache-2.0" ]
1
2021-05-06T07:54:42.000Z
2021-05-06T07:54:42.000Z
Github_user_analysis.ipynb
damminhtien/Github_user_analysis
076d6a50faaae0c55e13b0325a3e19804d84504f
[ "Apache-2.0" ]
2
2019-10-25T06:45:49.000Z
2019-10-26T07:01:10.000Z
Github_user_analysis.ipynb
damminhtien/github_user_analysis
076d6a50faaae0c55e13b0325a3e19804d84504f
[ "Apache-2.0" ]
null
null
null
257.975704
520,244
0.82433
[ [ [ "import json\nimport requests\nimport numpy as np\nimport pandas as pd\nimport pandas as pd\nimport requests\nfrom requests.auth import HTTPBasicAuth\n\nUSERNAME = 'damminhtien'\nPASSWORD = '**********'\nTARGET_USER = 'damminhtien'\n\nauthentication = HTTPBasicAuth(USERNAME, PASSWORD)", "_____no_output_____" ], [ "import uuid\nfrom IPython.display import display_javascript, display_html, display\n\nclass printJSON(object):\n def __init__(self, json_data):\n if isinstance(json_data, dict):\n self.json_str = json.dumps(json_data)\n else:\n self.json_str = json_data\n self.uuid = str(uuid.uuid4())\n\n def _ipython_display_(self):\n display_html('<div id=\"{}\" style=\"height: 100%; width:100%; color:red; background: #2f0743;\"></div>'.format(self.uuid), raw=True)\n display_javascript(\"\"\"\n require([\"https://rawgit.com/caldwell/renderjson/master/renderjson.js\"], function() {\n document.getElementById('%s').appendChild(renderjson(%s))\n });\n \"\"\" % (self.uuid, self.json_str), raw=True)", "_____no_output_____" ], [ "user_data = requests.get('https://api.github.com/users/' + TARGET_USER,\n auth = authentication)\nuser_data = user_data.json()", "_____no_output_____" ], [ "printJSON(user_data)", "_____no_output_____" ], [ "from PIL import Image\nfrom io import BytesIO\nfrom IPython.display import display, HTML\nimport tabulate\n\nresponse = requests.get(user_data['avatar_url'])\nava_img = Image.open(BytesIO(response.content))\ndisplay(ava_img)\n\ntable = [[\"Name:\", user_data['name']],\n [\"Company:\", user_data['company']],\n [\"Bio:\", user_data['bio']],\n [\"Public_repos:\", user_data['public_repos']],\n [\"Number followers:\", user_data['followers']],\n [\"Number following users:\", user_data['following']],\n [\"Date joined:\", user_data['created_at']]]\ndisplay(HTML(tabulate.tabulate(table, tablefmt='html')))", "_____no_output_____" ], [ "url = user_data['repos_url']\npage_no = 1\nrepos_data = []\nwhile (True):\n response = requests.get(url, auth = authentication)\n response = response.json()\n repos_data = repos_data + response\n repos_fetched = len(response)\n if (repos_fetched == 30):\n page_no = page_no + 1\n url = str(user_data['repos_url']) + '?page=' + str(page_no)\n else:\n break\n \n", "_____no_output_____" ], [ "printJSON(repos_data[0])", "_____no_output_____" ], [ "_LANGUAGE_IGNORE = ['HTML', 'CSS', 'Jupyter Notebook']\nLANGUAGE_USED = []\nTIMES_USED = []\nSTAR_COUNT = []\nfor rd in repos_data:\n if rd['fork']: continue\n response = requests.get(rd['languages_url'], auth = authentication)\n response = response.json()\n language_rd = list(response.keys())\n for l in language_rd:\n if l in _LANGUAGE_IGNORE: continue\n if l not in LANGUAGE_USED: \n LANGUAGE_USED.append(l)\n TIMES_USED.append(response[l])\n else:\n TIMES_USED[LANGUAGE_USED.index(l)] += response[l] ", "_____no_output_____" ], [ "language_data = {'Languages': LANGUAGE_USED, 'Times': TIMES_USED} \nlanguage_df = pd.DataFrame(language_data).sort_values(by=['Times'])\nlanguage_df ", "_____no_output_____" ], [ "import plotly.express as px\n\nfig = px.bar(language_df, x='Languages', y='Times',\n color='Languages',\n labels={'pop':'Statistic languages were used by user'}, height=400)\nfig.show()", "_____no_output_____" ], [ "repos_information = []\nfor i, repo in enumerate(repos_data):\n data = []\n data.append(repo['id'])\n data.append(repo['name'])\n data.append(repo['description'])\n data.append(repo['created_at'])\n data.append(repo['updated_at'])\n data.append(repo['owner']['login'])\n data.append(repo['license']['name'] if repo['license'] != None else None)\n data.append(repo['has_wiki'])\n data.append(repo['fork'])\n data.append(repo['forks_count'])\n data.append(repo['open_issues_count'])\n data.append(repo['stargazers_count'])\n data.append(repo['watchers_count'])\n data.append(repo['url'])\n data.append(repo['commits_url'].split(\"{\")[0])\n data.append(repo['url'] + '/languages')\n repos_information.append(data)\n \nrepos_df = pd.DataFrame(repos_information, columns = ['Id', 'Name', 'Description', 'Created on', 'Updated on', \n 'Owner', 'License', 'Includes wiki', 'Is Fork','Forks count', \n 'Issues count', 'Stars count', 'Watchers count',\n 'Repo URL', 'Commits URL', 'Languages URL'])\n\nrepos_df", "_____no_output_____" ], [ "repos_df.describe()", "_____no_output_____" ], [ "star_fig = px.bar(repos_df[repos_df['Stars count']>0].sort_values(by=['Stars count']), x='Name', y='Stars count',\n color='Forks count', hover_data=['Description', 'License', 'Owner'],\n labels={'pop':'Statistic languages were used by user'})\nstar_fig.show()", "_____no_output_____" ], [ "url = repos_df.loc[23, 'Commits URL']\nresponse = requests.get(url, auth = authentication)\nresponse = response.json()\nprintJSON(response[0])", "_____no_output_____" ], [ "commits_information = []\nfor i in range(repos_df.shape[0]):\n if repos_df.loc[i, 'Is Fork']: continue\n url = repos_df.loc[i, 'Commits URL']\n page_no = 1\n while (True):\n try:\n response = requests.get(url, auth = authentication)\n response = response.json()\n for commit in response:\n commit_data = []\n commit_data.append(repos_df.loc[i, 'Name'])\n commit_data.append(repos_df.loc[i, 'Id'])\n commit_data.append(commit['commit']['committer']['date'])\n commit_data.append(commit['commit']['message'])\n commits_information.append(commit_data)\n if (len(response) == 30):\n page_no = page_no + 1\n url = repos_df.loc[i, 'Commits URL'] + '?page=' + str(page_no)\n else:\n break\n except:\n print(url + ' fetch failed')\n break\n\ncommits_df = pd.DataFrame(commits_information, columns = ['Name', 'Repo Id', 'Date', 'Message'])", "https://api.github.com/repos/damminhtien/Facial-Landmarks/commits fetch failed\n" ], [ "commits_df", "_____no_output_____" ], [ "print(\"Two most common commit messages: {}\".format(' and '.join(commits_df['Message'].value_counts().index[:2])))", "Two most common commit messages: Update README.md and first commit\n" ], [ "commit_per_repo_fig = px.bar(commits_df.groupby('Name').count().reset_index(level=['Name']), x='Name', y='Message',\n color='Name',\n labels={'pop':'Commit per repositories'})\ncommit_per_repo_fig.show()", "_____no_output_____" ], [ "commits_df['Year'] = commits_df['Date'].apply(lambda x: x.split('-')[0])\nyearly_stats = commits_df.groupby('Year').count()['Repo Id']\nyearly_stats_df = yearly_stats.to_frame().reset_index(level=['Year'])\nyearly_stats_df", "_____no_output_____" ], [ "yearly_stats_fig = px.bar(yearly_stats_df, x='Year', y='Repo Id',\n color='Year',\n labels={'pop':'Commit per Year'})\nyearly_stats_fig.show()", "_____no_output_____" ], [ "commits_df['Month'] = commits_df['Date'].apply(lambda x: x.split('-')[1])\ndef commits_in_month_arr(year): \n n_commits = [0,0,0,0,0,0,0,0,0,0,0,0,0]\n commits_in_month_df = commits_df[commits_df['Year'] == str(year)].groupby('Month').count().reset_index(level=['Month']).drop(['Name', 'Date', 'Message', 'Year'], axis=1)\n for i, m in enumerate(commits_in_month_df['Month']):\n n_commits[int(m)] = n_commits[int(m)] + commits_in_month_df['Repo Id'][i]\n return n_commits", "_____no_output_____" ], [ "import plotly.graph_objects as go\n\nMONTHS = ['January', 'Febuary', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']\n\n\n# Create traces\nfig = go.Figure()\nfig.add_trace(go.Scatter(x=MONTHS, y=commits_in_month_arr(2017),\n mode='lines+markers',\n name='2017'))\nfig.add_trace(go.Scatter(x=MONTHS, y=commits_in_month_arr(2018),\n mode='lines+markers',\n name='2018'))\nfig.add_trace(go.Scatter(x=MONTHS, y=commits_in_month_arr(2019),\n mode='lines+markers',\n name='2019'))\n\nfig.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c51f6594a1111d93c3719a9a0024cc3d6d58ebb1
20,967
ipynb
Jupyter Notebook
Blob_batch_processing.ipynb
trucabrac/blob_Jan2022
69e30aaab443dc9e35e7fc7a7eeeea088ef02271
[ "CC0-1.0" ]
null
null
null
Blob_batch_processing.ipynb
trucabrac/blob_Jan2022
69e30aaab443dc9e35e7fc7a7eeeea088ef02271
[ "CC0-1.0" ]
null
null
null
Blob_batch_processing.ipynb
trucabrac/blob_Jan2022
69e30aaab443dc9e35e7fc7a7eeeea088ef02271
[ "CC0-1.0" ]
null
null
null
39.937143
1,192
0.522869
[ [ [ "<a href=\"https://colab.research.google.com/github/trucabrac/blob_Jan2022/blob/main/Blob_batch_processing.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import numpy as np\nimport cv2\nimport os\nimport glob\nfrom skimage.filters import gaussian\nfrom skimage import img_as_ubyte\nimport random\nfrom google.colab.patches import cv2_imshow\nimport csv", "_____no_output_____" ] ], [ [ "#1. Read images and store them in an array", "_____no_output_____" ] ], [ [ "from google.colab import drive\ndrive.mount('/content/drive')\n%cd /content/drive/MyDrive/blob-tl/", "Mounted at /content/drive\n/content/drive/MyDrive/blob-tl\n" ], [ "%cd tl-selec/tl10/\n#%cd ../tl9/\n%ls", "/content/drive/MyDrive/blob-tl/tl-selec/tl10\ntl10-018.jpeg tl10-126.jpeg tl10-207.jpeg tl10-315.jpeg\ntl10-045.jpeg tl10-153.jpeg tl10-234.jpeg tl10-342.jpeg\ntl10-072.jpeg tl10-180.jpeg tl10-261.jpeg tl10-369.jpeg\ntl10-099.jpeg tl10-200.jpeg tl10-288.jpeg tl10-396.jpeg\n" ], [ "#################################################\n#Capture all mages into an array and then iterate through each image\n#Normally used for machine learning workflows.\n\nimages_list = []\nimages_names = []\nSIZE = 512\n\npath = \"*.*\"\n#pathOut = \"test-clas/\"\npathOut = \"../tl-contour/\" #folder to create beforehand\n#path = \"tl4-proc/*.*\"\n#pathOut = \"tl4-clas/\"\n#label = 'tl4-'\n\n#First create a stack array of all images\nfor file in glob.glob(path):\n print(file) #just stop here to see all file names printed\n img0= cv2.imread(file, 0) #now, we can read each file since we have the full path\n #img = cv2.cvtColor(imgIn, cv2.IMREAD_GRAYSCALE)\n #img = cv2.resize(img, (SIZE, SIZE))\n images_list.append(img0)\n images_names.append(file)\n \nimages_list = np.array(images_list) ", "tl10-018.jpeg\ntl10-045.jpeg\ntl10-072.jpeg\ntl10-099.jpeg\ntl10-126.jpeg\ntl10-153.jpeg\ntl10-180.jpeg\ntl10-200.jpeg\ntl10-207.jpeg\ntl10-234.jpeg\ntl10-261.jpeg\ntl10-288.jpeg\ntl10-315.jpeg\ntl10-342.jpeg\ntl10-369.jpeg\ntl10-396.jpeg\n" ] ], [ [ "#2. Import Keras ImageNet models", "_____no_output_____" ] ], [ [ "from tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.applications.resnet50 import ResNet50\nfrom tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions\nfrom tensorflow.keras.applications.vgg16 import VGG16\nfrom tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions\nfrom tensorflow.keras.applications.xception import Xception\nfrom tensorflow.keras.applications.xception import preprocess_input, decode_predictions\nfrom tensorflow.keras.applications.mobilenet import MobileNet\nfrom tensorflow.keras.applications.mobilenet import preprocess_input, decode_predictions", "_____no_output_____" ] ], [ [ "#3. Define functions", "_____no_output_____" ] ], [ [ "font = cv2.FONT_HERSHEY_SIMPLEX\n\ndef getContours(im,word):\n (hc, wc) = im.shape[:2]\n x1 = wc/2\n x2=wc/2\n y1=hc/2\n y2=hc/2\n \n contours,hierarchy = cv2.findContours(im, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\n for cnt in contours:\n area= cv2.contourArea(cnt)\n if area>0:\n imgMid=cv2.drawContours(imgContour,cnt,-1,(0,255,0),3)\n #peri = cv2.arcLength(cnt,True)\n #approx = cv2.approxPolyDP(cnt,) \n \n #draw bounding rectangles\n x,y,w,h = cv2.boundingRect(cnt)\n if x < x1: x1 = x-20\n if x+w > x2: x2 = x+w+20\n if y < y1: y1 = y-20\n if y+h > y2: y2 = y+h+20\n imgFinal = cv2.rectangle(imgMid,(x1,y1),(x2,y2),(255,0,0),6)\n cv2.putText(imgFinal, word, (x1, y1-20), font, 2, (255,0,0), 9)", "_____no_output_____" ], [ "font = cv2.FONT_HERSHEY_SIMPLEX\n\n#alternative without bounding box and word\ndef getContours2(im):\n (hc, wc) = im.shape[:2]\n x1 = wc/2\n x2=wc/2\n y1=hc/2\n y2=hc/2\n \n contours,hierarchy = cv2.findContours(im, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\n for cnt in contours:\n area= cv2.contourArea(cnt)\n if area>0:\n imgMid=cv2.drawContours(imgContour,cnt,-1,(0,255,0),3)\n #peri = cv2.arcLength(cnt,True)\n #approx = cv2.approxPolyDP(cnt,) \n \n imgFinal = imgMid\n", "_____no_output_____" ], [ "#run just once when creating the csv\nf = open('/content/drive/MyDrive/blob-tl/blobtl-clas-prob-top3.csv', 'w')\n# create the csv writer\nwriter = csv.writer(f)\n# write the header\n#header = ['imgpath', 'class']\nwriter.writerow(['imgpath', 'class'])\nf.close()", "_____no_output_____" ], [ "# open the csv file in the write mode to store classifications\nf = open('/content/drive/MyDrive/blob-tl/blobtl-clas-prob-top3.csv', 'a')\n# create the csv writer\nwriter = csv.writer(f)", "_____no_output_____" ] ], [ [ "#4. Process each image -> classif + text", "_____no_output_____" ] ], [ [ "#Process each image in the array\nimg_number = 0\nfor image in range(images_list.shape[0]):\n inImg = images_list[image,:,:] #Grey images. For color add another dim.\n #smoothed_image = img_as_ubyte(gaussian(inImg, sigma=5, mode='constant', cval=0.0))\n\n #preprocess img \n dim = (224, 224)\n sizImg = cv2.resize(inImg, dim, interpolation=cv2.INTER_LINEAR)\n #cv2_imshow(sizImg)\n x = cv2.cvtColor(sizImg, cv2.COLOR_GRAY2RGB)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n\n #Classify with Keras models\n #call randomly one of the models\n nModel = random.randint(0,2)\n if nModel==0:\n model_vgg16 = VGG16(weights='imagenet')\n preds = model_vgg16.predict(x)\n if nModel==1:\n model_rn50 = ResNet50(weights='imagenet')\n preds = model_rn50.predict(x)\n if nModel==2:\n model_mobilenet = MobileNet(weights='imagenet')\n preds = model_mobilenet.predict(x)\n # decode the results into a list of tuples (class, description, probability)\n # and choose 1 result randomly from the top20\n pred_top = decode_predictions(preds, top=20)[0]\n #print('Predicted:', pred_top)\n topn = random.randint(0,19)\n #print(topn)\n pred_dis = pred_top[topn][1]\n #print('Predicted:', pred_dis)\n\n #find contours and draw bounding box in image\n imgCanny = cv2.Canny(inImg,500,500)\n #cv2_imshow(imgCanny)\n imgContour = cv2.cvtColor(inImg, cv2.COLOR_GRAY2RGB)\n getContours(imgCanny,pred_dis)\n #cv2_imshow(imgContour)\n\n imgPath = pathOut+label+str(img_number)\n #save image with contour\n cv2.imwrite(pathOut+label+str(img_number)+\".jpg\", imgContour)\n #store img ref and class in csv\n #writer.writerow([imgPath, pred_dis])\n\n #increment\n img_number +=1 ", "_____no_output_____" ], [ "# Process images contours only\n#Process each image in the array\nimg_number = 0\nlabel = 'test'\nfor image in range(images_list.shape[0]):\n inImg = images_list[image,:,:] #Grey images. For color add another dim.\n #smoothed_image = img_as_ubyte(gaussian(inImg, sigma=5, mode='constant', cval=0.0))\n\n #find contours and draw them\n imgCanny = cv2.Canny(inImg,300,300)\n #cv2_imshow(imgCanny)\n imgContour = cv2.cvtColor(inImg, cv2.COLOR_GRAY2RGB)\n getContours2(imgCanny)\n #cv2_imshow(imgContour)\n\n #imgPath = pathOut+images_names[image]+str(img_number)\n #save image with contour\n #cv2.imwrite(pathOut+label+str(img_number)+\".jpg\", imgContour)\n cv2.imwrite(pathOut+images_names[image], imgContour)\n #store img ref and class in csv\n #writer.writerow([imgPath, pred_dis])\n\n #increment\n img_number +=1 ", "_____no_output_____" ], [ "#Process Keras classifications only\nimg_number = 0\nfor image in range(images_list.shape[0]):\n inImg = images_list[image,:,:] #Grey images. For color add another dim.\n #smoothed_image = img_as_ubyte(gaussian(inImg, sigma=5, mode='constant', cval=0.0))\n\n #preprocess img \n dim = (224, 224)\n sizImg = cv2.resize(inImg, dim, interpolation=cv2.INTER_LINEAR)\n #cv2_imshow(sizImg)\n x = cv2.cvtColor(sizImg, cv2.COLOR_GRAY2RGB)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n\n #Classify with Keras models\n #call randomly one of the models\n nModel = random.randint(0,2)\n if nModel==0:\n model_vgg16 = VGG16(weights='imagenet')\n preds = model_vgg16.predict(x)\n if nModel==1:\n model_rn50 = ResNet50(weights='imagenet')\n preds = model_rn50.predict(x)\n if nModel==2:\n model_mobilenet = MobileNet(weights='imagenet')\n preds = model_mobilenet.predict(x)\n # decode the results into a list of tuples (class, description, probability)\n # and choose 1 result randomly from the top20\n pred_top = decode_predictions(preds, top=3)[0]\n #print('Predicted:', pred_top)\n topn = random.randint(0,2)\n #print(topn)\n pred_dis = pred_top[topn][1]\n pred_p = pred_top[topn][2] * 100\n #print('Predicted:', pred_dis)\n\n imgPath = pathOut+label+str(img_number)\n proba = str(pred_p)+'%'\n #save image with contour\n #cv2.imwrite(pathOut+images_names[image]+\".jpg\", imgContour)\n #store img ref and class in csv\n writer.writerow([images_names[image], pred_dis, proba])\n\n #increment\n img_number +=1 ", "Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5\n553467904/553467096 [==============================] - 3s 0us/step\n553476096/553467096 [==============================] - 3s 0us/step\nDownloading data from https://storage.googleapis.com/download.tensorflow.org/data/imagenet_class_index.json\n40960/35363 [==================================] - 0s 0us/step\n49152/35363 [=========================================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/mobilenet_1_0_224_tf.h5\n17227776/17225924 [==============================] - 0s 0us/step\n17235968/17225924 [==============================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5\n102973440/102967424 [==============================] - 1s 0us/step\n102981632/102967424 [==============================] - 1s 0us/step\nWARNING:tensorflow:5 out of the last 5 calls to <function Model.make_predict_function.<locals>.predict_function at 0x7f56b26bddd0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\nWARNING:tensorflow:6 out of the last 6 calls to <function Model.make_predict_function.<locals>.predict_function at 0x7f56b2611830> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" ], [ "# close the file\nf.close()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
c51f7097d6b65b8262e318944db6f2d4247859ee
30,837
ipynb
Jupyter Notebook
soluciones/ca.nova/Tarea 3/NovaCristian_Tarea3.ipynb
policelula/FISI2028-202120
fa3578d74a79b395ac03cddc1d44fba2fa5ae1fc
[ "MIT" ]
null
null
null
soluciones/ca.nova/Tarea 3/NovaCristian_Tarea3.ipynb
policelula/FISI2028-202120
fa3578d74a79b395ac03cddc1d44fba2fa5ae1fc
[ "MIT" ]
null
null
null
soluciones/ca.nova/Tarea 3/NovaCristian_Tarea3.ipynb
policelula/FISI2028-202120
fa3578d74a79b395ac03cddc1d44fba2fa5ae1fc
[ "MIT" ]
null
null
null
52.354839
13,768
0.694393
[ [ [ "import numpy as np\nimport pandas as pd\nimport scipy as sp\nimport sklearn as sl\nimport seaborn as sns; sns.set()\nimport matplotlib as mpl\nfrom sklearn.linear_model import LinearRegression\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d\nfrom matplotlib import cm\n%matplotlib inline", "_____no_output_____" ] ], [ [ "# Tarea 3: Encuentre la regresión\n\nUd recibe unos datos $x$ y $y$ cómo se muestran a continuación. Ud debe responder cuatro preguntas a partir de estos datos. Suponga que ud tiene un modelo tal que $y=f(x)$ más aún desconoce $f$.", "_____no_output_____" ] ], [ [ "df = pd.read_pickle('ex1.gz')\nsns.scatterplot(x='x',y='y',data=df)\nplt.show()\ndf", "_____no_output_____" ] ], [ [ "## (A) Pendiente e intercepto\nDetermine la pendiente de los datos en el intervalo $[0,1.5]$ y el valor del intercepto con el eje $y$. Es decir, $f(0)=?$. ¿Cuál es el valor de $r^2$?", "_____no_output_____" ] ], [ [ "k = df[(df.x >= 0) & (df.x <= 1.5)]\nk\nx1= k['x'].values.reshape(-1,1)\nx2= k['y'].values.reshape(-1,1)\n\nmodelo = LinearRegression()\nmodelo.fit(x1,x2)\nintercepto = modelo.intercept_\nm = modelo.coef_\nr2 = modelo.score(x1,x2)\n\nprint(\"Intercepto: \", intercepto)\nprint(\"Pendiente: \", m)\nprint(\"R^2: \", r2)", "Intercepto: [0.18270691]\nPendiente: [[0.81638696]]\nR^2: 0.9316416262309236\n" ] ], [ [ "## (B) Regresión polinomial\nSuponga que quiere realizar la siguiente regresión polinomial,\n$$y=\\beta_1+\\beta_2x+\\beta_2x^2+\\beta_2x^3+\\beta_2x^4+\\beta_2x^5.$$\nPlantee la función de costo que le permita calcular los coeficientes y calcule $\\beta_1$, $\\beta_2$, $\\beta_3$, $\\beta_4$, y $\\beta_5$. ¿Cuál es el $r^2$?\n\nCalcule $f(0)$ y compare con los resultados anteriores", "_____no_output_____" ] ], [ [ "def L(x,A,b):\n m,n = A.shape\n X = np.matrix(x).T\n DeltaB=(A*X-b)\n \n return (DeltaB.T*DeltaB)[0,0]/m ", "_____no_output_____" ], [ "Y = df.loc[:, ['y']]\nY\n\nX = df.loc[:, ['x']].rename(columns={'x': 'x1'})\nX.insert(0, 'x0', 1)\nX['x2'] = X['x1']*X['x1']\nX['x3'] = X['x1']**3\nX['x4'] = X['x1']**4\nX['x5'] = X['x1']**5\nXi = X.to_numpy()\nYi = Y.to_numpy()", "_____no_output_____" ], [ "op = sp.optimize.minimize(fun=L,x0=np.zeros(Xi.shape[1]), args = (Xi,Yi), tol=1e-10)\nprint(\"El valor para los coeficientes es:\",op['x'])\nprint(\"El valor para f(0):\",op['x'][0])", "El valor para los coeficientes es: [ 0.35735852 -0.8426089 3.78479752 -3.00339415 0.85844665 -0.08305836]\nEl valor para f(0): 0.3573585206136642\n" ], [ "y = df[\"y\"]\nb = np.linspace(0,4,100)\ndef f(a,b,c,d,e,f,x):\n return a*x**5 + b*x**4 + c*x**3 + d*x**2 + e*x + f\n\np = f(op['x'][5],op['x'][4],op['x'][3],op['x'][2],op['x'][1],op['x'][0],b)\nr2 = 1-np.sum((p-y)**2)/np.sum((y-y.mean())**2)\nr2\n", "_____no_output_____" ], [ "print(\"Es posible apreciar un resultado similar al metodo de la polinomial exacta, evidenciando que ambos metodos poseen una buena precision con solo algunas variaciones en cifras decimales\")", "Es posible apreciar un resultado similar al metodo de la polinomial exacta, evidenciando que ambos metodos poseen una buena precision con solo algunas variaciones en cifras decimales\n" ] ], [ [ "## (C) Regresión polinomial exacta\nResulta, que cuando se quiere hacer alguna regresión polinomial esta se puede hacer de forma exacta. ¿Cómo? Suponga que ud va a considerar que su problema en lugar de tener $1$ variable ($x$) tiene $n+1$, siendo $n$ el orden del polinomio a ajustar. Es decir, sus nuevas variables van a ser $\\{x_0,\\,x_1,\\,x_2,\\,x_3,\\dots,\\,x_n\\}$ definiendo $x_j=x^j$. Así pues, siguiendo el mismo procedimiento para la regresión lineal multidimensional que realizamos para el ejercicio de datos inmobiliarios, puede encontrar los valores de los coeficientes $\\beta_1$, $\\beta_2$, $\\beta_3$, $\\beta_4$, y $\\beta_5$. Encuentre estos valores y compare con los resultados en la sección **(B)**.\n\nCalcule $f(0)$ y compare con los resultados anteriores.\n\n> Si ud se pregunta si esto es posible la respuesta es sí. Inclusive, esto se puede extender a cualquier a cualquier conjunto de funciones, tal que $x_j=f_j(x)$, que represente un conjunto \"linealmente independiente\" (¡Me estoy adelantando a *Fourier*!). Para quienes quieran explorar algunas curiosidades matemáticas, cuando $n+1$ es igual al número de puntos o valores de $x$ (y todos diferentes) la matriz es siempre invertible y resulta ser la inversa de una matriz de Vandermonde.", "_____no_output_____" ] ], [ [ "rt = np.linalg.inv(Xi.T @ Xi) @ Xi.T @ Yi\nb0, b1, b2, b3, b4, b5 = rt\ncoefs = str(b0) +','+ str(b1) + ',' + str(b2) + ',' + str(b3) + ',' + str(b4) + ',' + str(b5)\n\nprint(f\"los coeficientes son = {coefs}\")\nprint(f\"El valor de f(0) es :\", rt[0])", "los coeficientes son = [0.35644668],[-0.83588268],[3.77276401],[-2.99526012],[0.85614705],[-0.08282854]\nEl valor de f(0) es : [0.35644668]\n" ], [ "print(\"Se confirma como el valor para f(0) resulta muy preciso al ser comparado con valor de la regresión polinomica y a su vez resulta ser exacto si analizamos lo esperado por la grafica \")", "Se confirma como el valor para f(0) resulta muy preciso al ser comparado con valor de la regresión polinomica y a su vez resulta ser exacto si analizamos lo esperado por la grafica \n" ] ], [ [ "## (D) Regresión a un modelo teórico\n\nSuponga que su modelo teórico es el siguiente:\n$$y=\\frac{a}{\\left[(x-b)^2+c\\right]^\\gamma}.$$\nHalle $a$, $b$, $c$ y $\\gamma$.\n\nCalcule $f(0)$ y compare con los resultados anteriores", "_____no_output_____" ] ], [ [ "def f(i,x):\n return (i[0])/((x-i[1])**2 + i[2])**i[3]\n\ndef L(i2,x,y): \n dy = f(i2,x) - y\n return np.dot(dy,dy)/len(y)\n", "_____no_output_____" ], [ "x = df[\"x\"]\nop = sp.optimize.minimize(fun=L, x0=np.array([0,0,1,0]), args = (x,y), method='L-BFGS-B', tol=1e-8)\n\nprint(\"Los valores de a,b,c y omega son\",op['x'])\nprint(\"El valor de f(0) es:\", f(op.x,0))", "Los valores de a,b,c y omega son [0.68739469 1.39239691 0.49563981 0.93662845]\nEl valor de f(0) es: 0.29874386057083674\n" ], [ "print(\"Con respecto a los dos anteriores metodos utilizados, este nos arrojo un valor de 0.2987 evidenciando menor presicion y exactitud, por lo que podriamos decir que este metodo es el menos optimo\")", "Con respecto a los dos anteriores metodos utilizados, este nos arrojo un valor de 0.2987 evidenciando menor presicion y exactitud, por lo que podriamos decir que este metodo es el menos optimo\n" ], [ " ", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
c51f7260ab1881df3544a6bf93dbc1e0d1eeb4da
219,569
ipynb
Jupyter Notebook
1.2 - Missing Data.ipynb
SPWLA-ORG/spwla2021_ml_workshop
32aee12f97b028962fe557a55f76eced5d5fd7d3
[ "MIT" ]
4
2021-05-12T13:49:08.000Z
2021-12-01T02:15:50.000Z
1.2 - Missing Data.ipynb
SPWLA-ORG/spwla2021_ml_workshop
32aee12f97b028962fe557a55f76eced5d5fd7d3
[ "MIT" ]
null
null
null
1.2 - Missing Data.ipynb
SPWLA-ORG/spwla2021_ml_workshop
32aee12f97b028962fe557a55f76eced5d5fd7d3
[ "MIT" ]
6
2021-05-12T13:46:13.000Z
2021-09-16T08:42:52.000Z
177.932739
102,200
0.865432
[ [ [ "# Missing Data\n\nMissing values are a common problem within datasets. Data can be missing for a number of reasons, including tool/sensor failure, data vintage, telemetry issues, stick and pull, and omissing by choice. \n\nThere are a number of tools we can use to identify missing data, some of these methods include:\n\n- Pandas Dataframe summaries\n- MissingNo Library\n- Visualisations\n\nHow to handle missing data is controversial, some argue that data should be filled in using techniques such as: mean imputation, regression imputations, whereas others argue that it is best to remove that data to prevent adding further uncertainty to the final results.\n\nIn this notebook, we are going to use: Variable Discarding and Listwise Deletion.", "_____no_output_____" ], [ "# Importing Libraries & Data\nThe first step is to import the libraries that we will require for working with the data. \nFor this notebook, we will be using:\n- pandas for loading and storing the data\n- matplotlib and seaborn for visualising the data\n- numpy for a number of calculation methods\n- missingno to visualise where missing data exists", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\nimport missingno as msno\nimport numpy as np", "_____no_output_____" ] ], [ [ "Next, we will load the data in using the pandas `read_csv` function and assign it to the variable `df`. The data will now be stored within a structured object known as a dataframe.", "_____no_output_____" ] ], [ [ "df = pd.read_csv('data/spwla_volve_data.csv')", "_____no_output_____" ] ], [ [ "As seen in the previous notebook, we can call upon a few methods to check the data quality. \n\nThe `.head()` method allows us to view the first 5 rows of the dataframe.", "_____no_output_____" ] ], [ [ "df.head()", "_____no_output_____" ] ], [ [ "The describe method provides us some summary statistics. To identify if we have missing data using this method, we need to look at the count row. If we assume that MD (measured depth) is the most complete column, we have 27,845 data points. Now, if we look at DT and DTS, we can see we only have 5,493 and 5,420 data points respectively. A number of other columns also have lower numbers, namely: RPCELM, PHIF, SW, VSH.", "_____no_output_____" ] ], [ [ "df.describe()", "_____no_output_____" ] ], [ [ "To gain a clearer insight, we can call upon the `info()` method to see how many non-null values exist for each column. Right away we can see the ones highlighted previously have lower numbers of non-null values.", "_____no_output_____" ] ], [ [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 27845 entries, 0 to 27844\nData columns (total 16 columns):\nwellName 27845 non-null object\nMD 27845 non-null float64\nBS 27845 non-null float64\nCALI 27845 non-null float64\nDT 5493 non-null float64\nDTS 5420 non-null float64\nGR 27845 non-null float64\nNPHI 27845 non-null float64\nRACEHM 27845 non-null float64\nRACELM 27845 non-null float64\nRHOB 27845 non-null float64\nRPCEHM 27845 non-null float64\nRPCELM 27600 non-null float64\nPHIF 27736 non-null float64\nSW 27736 non-null float64\nVSH 27844 non-null float64\ndtypes: float64(15), object(1)\nmemory usage: 3.4+ MB\n" ] ], [ [ "## Using missingno to Visualise Data Sparsity\n\nThe missingno library is designed to take a dataframe and allow you to visualise where gaps may exist. \n\nWe can simply call upon the `.matrix()` method and pass in the dataframe object. When we do, we generate a graphical view of the dataframe.\n\nIn the plot below, we can see that there are significant gaps within the DT and DTS columns, with minor gaps in the RPCELM, PHIF, and SW columns. \n\nThe sparkline to the right hand side of the plot provides an indication of data completeness. If the line is at the maximum value (to the right) it shows that data row as being complete. ", "_____no_output_____" ] ], [ [ "msno.matrix(df)\nplt.show()", "_____no_output_____" ] ], [ [ "Another plot we can call upon is the bar plot, which provides a graphical summary of the number of points in each columns.", "_____no_output_____" ] ], [ [ "msno.bar(df)", "_____no_output_____" ] ], [ [ "## Using matplotlib\n\nWe can generate our own plots to show how the data sparsity varies across each of the wells. In order to do this, we need to manipulate the dataframe.\n\nFirst we create a copy of the dataframe to work on separately, and then replace each column with a value of 1 if the data is non-null.\n\nTo make our plot work, we need to increment each column's value by 1. This allows us to plot each column as an offset to the previous one.", "_____no_output_____" ] ], [ [ "data_nan = df.copy()\nfor num, col in enumerate(data_nan.columns[2:]):\n data_nan[col] = data_nan[col].notnull() * (num + 1)\n data_nan[col].replace(0, num, inplace=True)", "_____no_output_____" ] ], [ [ "When we view the header of the dataframe we now have a series of columns with increasing values from 1 to 14.", "_____no_output_____" ] ], [ [ "data_nan.head()", "_____no_output_____" ] ], [ [ "Next, we can group the dataframe by the wellName column.", "_____no_output_____" ] ], [ [ "grouped = data_nan.groupby('wellName')", "_____no_output_____" ] ], [ [ "We can then create multiple subplots for each well using the new dataframe. Rather than creating subplots within subplots, we can shade from the previous column's max value to the current column's max value if the data is present. If data is absent, it will be displayed as a gap.", "_____no_output_____" ] ], [ [ "#Setup the labels we want to display on the x-axis\nlabels = ['BS', 'CALI', 'DT', 'DTS', 'GR', 'NPHI', 'RACEHM', 'RACELM', 'RHOB', 'RPCEHM', 'RPCELM', 'PHIF', 'SW', 'VSH']\n#Setup the figure and the subplots\nfig, axs = plt.subplots(3, 2, figsize=(20,20))\n#Loop through each well and column in the grouped dataframe\nfor (name, well), ax in zip(grouped, axs.flat):\n ax.set_xlim(0,9)\n \n #Setup the depth range\n ax.set_ylim(well.MD.max() + 50, well.MD.min() - 50)\n ax.set_ylim(well.MD.max() + 50, well.MD.min() - 50)\n \n # Create multiple fill betweens for each curve# This is between\n # the number representing null values and the number representing\n # actual values\n ticks = []\n ticks_labels = []\n for i, curve in enumerate(labels):\n ax.fill_betweenx(well.MD, i, well[curve], facecolor='lightblue')\n ticks.append(i)\n ticks_labels.append(i+0.5)\n \n # add extra value on to ticks\n ticks.append(len(ticks))\n \n #Setup the grid, axis labels and ticks\n ax.grid(axis='x', alpha=0.5, color='black')\n ax.set_ylabel('DEPTH (m)', fontsize=18, fontweight='bold')\n \n #Position vertical lines at the boundaries between the bars\n ax.set_xticks(ticks, minor=False)\n \n #Position the curve names in the centre of each column\n ax.set_xticks(ticks_labels, minor=True)\n \n #Setup the x-axis tick labels\n ax.set_xticklabels(labels, rotation='vertical', minor=True, verticalalignment='bottom', fontsize=14)\n ax.set_xticklabels('', minor=False)\n ax.tick_params(axis='x', which='minor', pad=-10)\n ax.tick_params(axis='y', labelsize=14 )\n \n #Assign the well name as the title to each subplot\n ax.set_title(name, fontsize=16, fontweight='bold')\nplt.tight_layout()\nplt.subplots_adjust(hspace=0.15, wspace=0.25)\n# plt.savefig('missingdata.png', dpi=200)\nplt.show()", "_____no_output_____" ] ], [ [ "From the plot, we can not only see the data range of each well, but we can also see that 2 of the 5 wells have missing DT and DTS curves, 2 of the wells have missing data within RPCELM, and 2 of the wells have missing values in the PHIF and SW curves.", "_____no_output_____" ], [ "## Dealing With Missing Data\n### Discarding Variables\n\nAs DT and DTS are missing in two of the wells, we have the option to remove these wells from the dataset, or we can remove these two columns for all of the wells.\n\nThe following is an example of how we remove the two curves from the dataframe. For this we can pass in a list of the columns names to the `drop()` function, the axis, which we want to drop data along, in this case the columns (axis=1), and the `inplace=True` argument allows us to physically remove these values from the dataframe.", "_____no_output_____" ] ], [ [ "df.drop(df[['DT', 'DTS']], axis=1, inplace=True)", "_____no_output_____" ] ], [ [ "If we view the header of the dataframe, we will see that we have removed the required columns.", "_____no_output_____" ] ], [ [ "df.head()", "_____no_output_____" ] ], [ [ "However, if we call upon the info method, we can see we still have null values within the dataframe.", "_____no_output_____" ] ], [ [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 27845 entries, 0 to 27844\nData columns (total 14 columns):\nwellName 27845 non-null object\nMD 27845 non-null float64\nBS 27845 non-null float64\nCALI 27845 non-null float64\nGR 27845 non-null float64\nNPHI 27845 non-null float64\nRACEHM 27845 non-null float64\nRACELM 27845 non-null float64\nRHOB 27845 non-null float64\nRPCEHM 27845 non-null float64\nRPCELM 27600 non-null float64\nPHIF 27736 non-null float64\nSW 27736 non-null float64\nVSH 27844 non-null float64\ndtypes: float64(13), object(1)\nmemory usage: 3.0+ MB\n" ] ], [ [ "### Discarding NaNs", "_____no_output_____" ], [ "We can drop missing values by calling upon a special function called `dropna()`. This will remove any NaN (Not a Number) values from the dataframe. The `inplace=True` argument allows us to physically remove these values from the dataframe.", "_____no_output_____" ] ], [ [ "df.dropna(inplace=True)", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 27491 entries, 0 to 27844\nData columns (total 14 columns):\nwellName 27491 non-null object\nMD 27491 non-null float64\nBS 27491 non-null float64\nCALI 27491 non-null float64\nGR 27491 non-null float64\nNPHI 27491 non-null float64\nRACEHM 27491 non-null float64\nRACELM 27491 non-null float64\nRHOB 27491 non-null float64\nRPCEHM 27491 non-null float64\nRPCELM 27491 non-null float64\nPHIF 27491 non-null float64\nSW 27491 non-null float64\nVSH 27491 non-null float64\ndtypes: float64(13), object(1)\nmemory usage: 3.1+ MB\n" ] ], [ [ "# Summary\n\nThis short notebook has shown three separate ways to visualise missing data. The first is by interrogating the dataframe, the second, by using the missingno library and thirdly by creating a custom visualisation with matplotlib.\n\nAt the end, we covered two ways in which missing data can be removed from the dataframe. The first by discarding variables, and the second by discarding missing values within the rows.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ] ]
c51f81155bab00da2b0532de19d48463c3566002
195,890
ipynb
Jupyter Notebook
examples/filters/adding_chords.ipynb
madeline-am/porespy
17ed88baa153372997fa52c85d18b6e3ca0767e9
[ "MIT" ]
3
2020-09-02T20:02:55.000Z
2021-07-09T03:50:49.000Z
examples/filters/adding_chords.ipynb
madeline-am/porespy
17ed88baa153372997fa52c85d18b6e3ca0767e9
[ "MIT" ]
null
null
null
examples/filters/adding_chords.ipynb
madeline-am/porespy
17ed88baa153372997fa52c85d18b6e3ca0767e9
[ "MIT" ]
null
null
null
504.871134
61,204
0.945153
[ [ [ "# Applying Chords to 2D and 3D Images\n", "_____no_output_____" ], [ "## Importing packages", "_____no_output_____" ] ], [ [ "import time\nimport porespy as ps\nps.visualization.set_mpl_style()", "_____no_output_____" ] ], [ [ "Import the usual packages from the Scipy ecosystem:", "_____no_output_____" ] ], [ [ "import scipy as sp\nimport scipy.ndimage as spim\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "## Demonstration on 2D Image\nStart by creating an image using the ``blobs`` function in ``generators``. The useful thing about this function is that images can be created with anisotropy. These are exactly the sort of images where chord length distributions are useful, since chords can be drawn in different directions, to probe the anisotropic pore sizes.", "_____no_output_____" ] ], [ [ "im = ps.generators.blobs(shape=[400, 400], blobiness=[2, 1])", "_____no_output_____" ] ], [ [ "The image can be visualized easily using matplotlib's ``imshow`` function:", "_____no_output_____" ] ], [ [ "# NBVAL_IGNORE_OUTPUT\nplt.figure(figsize=[6, 6])\nfig = plt.imshow(im)", "_____no_output_____" ] ], [ [ "Determining chord-length distributions requires first adding chords to the image, which is done using the ``apply_chords`` function. The following code applies chords to the image in the x-direction (along ``axis=0``), then applies them in the y-direction (``axis=1``). The two images are then plotted using ``matplotlib``. ", "_____no_output_____" ] ], [ [ "# NBVAL_IGNORE_OUTPUT\ncrds_x = ps.filters.apply_chords(im=im, spacing=4, axis=0)\ncrds_y = ps.filters.apply_chords(im=im, spacing=4, axis=1)\nfig, ax = plt.subplots(1, 2, figsize=[10, 5])\nax[0].imshow(crds_x)\nax[1].imshow(crds_y)", "_____no_output_____" ] ], [ [ "Note that none of the chords touch the edge of the image. These chords are trimmed by default since they are artificially shorter than they should be and would skew the results. This behavior is optional and these chords can be kept by setting ``trim_edges=False``.", "_____no_output_____" ], [ "It is sometimes useful to colorize the chords by their length. PoreSpy includes a function called ``region_size`` which counts the number of voxels in each connected region of an image, and replaces those voxels with the numerical value of the region size. This is illustrated below:", "_____no_output_____" ] ], [ [ "# NBVAL_IGNORE_OUTPUT\nsz_x = ps.filters.region_size(crds_x)\nsz_y = ps.filters.region_size(crds_y)\nfig, ax = plt.subplots(1, 2, figsize=[10, 6])\nax[0].imshow(sz_x)\nax[1].imshow(sz_y)", "_____no_output_____" ] ], [ [ "Although the above images are useful for quick visualization, they are not quantitative. To get quantitative chord length distributions, pass the chord image(s) to the ``chord_length_distribution`` functions in the ``metrics`` submodule:", "_____no_output_____" ] ], [ [ "data_x = ps.metrics.chord_length_distribution(crds_x, bins=25)\ndata_y = ps.metrics.chord_length_distribution(crds_y, bins=25)", "_____no_output_____" ] ], [ [ "This function, like many of the functions in the ``metrics`` module, returns a named tuple containing various arrays. The advantage of the named tuple is that each array can be accessed by name as attributes, such as ``data_x.pdf``. To see all the available attributes (i.e. arrays) use the autocomplete function if your IDE, the following:", "_____no_output_____" ] ], [ [ "print(data_x._fields)", "('L', 'pdf', 'cdf', 'relfreq', 'bin_centers', 'bin_edges', 'bin_widths')\n" ] ], [ [ "Now we can print the results of the chord-length distribution as bar graphs:", "_____no_output_____" ] ], [ [ "# NBVAL_IGNORE_OUTPUT\nplt.figure(figsize=[6, 6])\nbar = plt.bar(x=data_y.L, height=data_y.cdf, width=data_y.bin_widths, color='b', edgecolor='k', alpha=0.5)\nbar = plt.bar(x=data_x.L, height=data_x.cdf, width=data_x.bin_widths, color='r', edgecolor='k', alpha=0.5)", "_____no_output_____" ] ], [ [ "The key point to see here is that the blue bars are for the y-direction, which was the elongated direction, and as expected they show a tendency toward longer chords. ", "_____no_output_____" ], [ "## Application to 3D images", "_____no_output_____" ], [ "Chords can just as easily be applied to 3D images. Let's create an artificial image of fibers, aligned in the YZ plane, but oriented randomly in the X direction", "_____no_output_____" ] ], [ [ "# NBVAL_IGNORE_OUTPUT\nim = ps.generators.cylinders(shape=[200, 400, 400], radius=8, ncylinders=200, )\nplt.imshow(im[:, :, 100])", "100%|█████████████████████████████████████████████████████████████████████████████████████| 200/200 [00:02<00:00, 75.50it/s]\n" ] ], [ [ "As above, we must apply chords to the image then pass the chord image to the ``chord_length_distribution`` function:", "_____no_output_____" ] ], [ [ "# NBVAL_IGNORE_OUTPUT\ncrds = ps.filters.apply_chords(im=im, axis=0)\nplt.imshow(crds[:, :, 100])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
c51fc4cf0c20758492b7d59bc7575e110b86cb56
923,900
ipynb
Jupyter Notebook
Exercise#6/Exercise6.ipynb
Vinihcampos/dim0451_DIP
0b1c75d3255231148b6329637c4d7f185df99482
[ "MIT" ]
1
2019-04-03T12:36:19.000Z
2019-04-03T12:36:19.000Z
Exercise#6/Exercise6.ipynb
Vinihcampos/dim0451_DIP
0b1c75d3255231148b6329637c4d7f185df99482
[ "MIT" ]
null
null
null
Exercise#6/Exercise6.ipynb
Vinihcampos/dim0451_DIP
0b1c75d3255231148b6329637c4d7f185df99482
[ "MIT" ]
null
null
null
2,816.768293
255,380
0.964257
[ [ [ "# Exercise 6", "_____no_output_____" ] ], [ [ "# Importing libs\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "apple = cv2.imread('images/apple.jpg')\napple = cv2.cvtColor(apple, cv2.COLOR_BGR2RGB)\napple = cv2.resize(apple, (512,512))\n\norange = cv2.imread('images/orange.jpg')\norange = cv2.cvtColor(orange, cv2.COLOR_BGR2RGB)\norange = cv2.resize(orange, (512,512))\n\nplt.figure(figsize=(10,10))\nax1 = plt.subplot(121)\nax1.imshow(apple)\nax2 = plt.subplot(122)\nax2.imshow(orange)\n\nax1.axis('off')\nax2.axis('off')\n\nax1.text(0.5,-0.1, \"Apple\", ha=\"center\", transform=ax1.transAxes)\nax2.text(0.5,-0.1, \"Orange\", ha=\"center\", transform=ax2.transAxes)", "_____no_output_____" ], [ "def combine(img1, img2):\n result = np.zeros(img1.shape, dtype='uint')\n \n h,w,_ = img1.shape\n \n result[:,0:w//2,:] = img1[:,0:w//2,:]\n result[:,w//2:,:] = img2[:,w//2:,:]\n \n return result.astype('uint8')", "_____no_output_____" ], [ "apple_orange = combine(apple,orange)\n\nplt.imshow(apple_orange)\nplt.axis('off')\nplt.figtext(0.5, 0, 'Apple + Orange', horizontalalignment='center')\nplt.show()", "_____no_output_____" ], [ "def buildPyramid(levels, left,right=None):\n lresult = left\n rresult = right if type(right) is np.ndarray else left\n \n for i in range(levels):\n lresult = cv2.pyrDown(lresult)\n rresult = cv2.pyrDown(rresult)\n \n for i in range(levels):\n lresult = cv2.pyrUp(lresult)\n rresult = cv2.pyrUp(rresult)\n \n return combine(lresult,rresult)", "_____no_output_____" ], [ "apple_orange_pyramid = buildPyramid(3, apple_orange)\n\nplt.figure(figsize=(10,10))\nax1 = plt.subplot(121)\nax1.imshow(apple_orange)\nax2 = plt.subplot(122)\nax2.imshow(apple_orange_pyramid)\n\nax1.axis('off')\nax2.axis('off')\n\nax1.text(0.5,-0.1, \"Raw\", ha=\"center\", transform=ax1.transAxes)\nax2.text(0.5,-0.1, \"After Pyramid\", ha=\"center\", transform=ax2.transAxes)", "_____no_output_____" ], [ "apple_orange_pyramid = buildPyramid(3, apple, orange)\n\nplt.figure(figsize=(10,10))\nax1 = plt.subplot(121)\nax1.imshow(apple_orange)\nax2 = plt.subplot(122)\nax2.imshow(apple_orange_pyramid)\n\nax1.axis('off')\nax2.axis('off')\n\nax1.text(0.5,-0.1, \"Raw\", ha=\"center\", transform=ax1.transAxes)\nax2.text(0.5,-0.1, \"After Pyramid\", ha=\"center\", transform=ax2.transAxes)", "_____no_output_____" ] ], [ [ "## Another implementation", "_____no_output_____" ] ], [ [ "def buildPyramid2(levels, left,right=None):\n lresult = left\n rresult = right if type(right) is np.ndarray else left\n \n for i in range(levels):\n lresult = cv2.pyrDown(lresult)\n rresult = cv2.pyrDown(rresult)\n \n result = combine(lresult,rresult)\n \n for i in range(levels):\n result = cv2.pyrUp(result)\n \n return result", "_____no_output_____" ], [ "apple_orange_pyramid = buildPyramid2(3, apple, orange)\n\nplt.figure(figsize=(10,10))\nax1 = plt.subplot(121)\nax1.imshow(apple_orange)\nax2 = plt.subplot(122)\nax2.imshow(apple_orange_pyramid)\n\nax1.axis('off')\nax2.axis('off')\n\nax1.text(0.5,-0.1, \"Raw\", ha=\"center\", transform=ax1.transAxes)\nax2.text(0.5,-0.1, \"After Pyramid\", ha=\"center\", transform=ax2.transAxes)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
c51fc67c9c261d79edf4a1a706a587e05459d269
2,927
ipynb
Jupyter Notebook
notebooks/showImages.ipynb
CortanaIntelligenceGallery/imageclassificationcntklaptop
b9327fc279c5ba020f128faee93eb0e9fc077fa1
[ "MIT" ]
null
null
null
notebooks/showImages.ipynb
CortanaIntelligenceGallery/imageclassificationcntklaptop
b9327fc279c5ba020f128faee93eb0e9fc077fa1
[ "MIT" ]
null
null
null
notebooks/showImages.ipynb
CortanaIntelligenceGallery/imageclassificationcntklaptop
b9327fc279c5ba020f128faee93eb0e9fc077fa1
[ "MIT" ]
null
null
null
34.034884
358
0.608473
[ [ [ "# Image dataset visualization\nThis notebook can be used to browse throught all images, inspect for each image what class it is annotated as, and if wanted correct the annotation. \n\nChanging image annotation:\n- Simply use the drop-down menue under each image to change its class.\n- As is explained in part 3 of the documentation, all images are in subfolders of *DATA_DIR\\images\\fashionTexture\\*, where the subfolder names equal the class name. To change the class of an image it needs to be moved to the new subfolder - this is exaclty what the UI in this notebook does. \n- Example: Given an image labeled as 'striped' with filename 105.jpg, and we change the drop-box to 'dotted'. The image is then moved from the file path *DATA_DIR\\images\\fashionTexture\\striped\\105.jpg* to *DATA_DIR\\images\\fashionTexture\\dotted\\105.jpg*. This assumes that the destination does not exist or otherwise the UI will throw an error.", "_____no_output_____" ] ], [ [ "import sys, os\nsys.path.append(\".\")\nsys.path.append(\"..\")\nsys.path.append(\"libraries\")\nsys.path.append(\"../libraries\")\nfrom IPython.display import display\nfrom utilities_general_v2 import *\nfrom ui_annotation import AnnotationUI\nfrom PARAMETERS import procDir, imgOrigDir\n%autosave 0\n\nboShowTrainingSet = True #Set to False to show test set instead\n\n# Load data\nlutLabel2Id = readPickle(pathJoin(procDir, \"lutLabel2Id.pickle\"))\nif not boShowTrainingSet:\n imgDict = readPickle(pathJoin(procDir, \"imgDictTest.pickle\"))\nelse:\n imgDict = readPickle(pathJoin(procDir, \"imgDictTrain.pickle\"))", "_____no_output_____" ], [ "# Instantiate and show annotation UI\nannotationUI = AnnotationUI(imgOrigDir, imgDict, lutLabel2Id, gridSize=(2, 2)) #, wZoomImgWidth = 350)\ndisplay(annotationUI.ui)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ] ]
c51ff62380f55575e36c31af9fd1f730fbeb7a24
28,041
ipynb
Jupyter Notebook
assignment2.ipynb
Nathanahel/ML_assignments
772aa395fde941e2041c5c1d69755a97d01f2610
[ "MIT" ]
null
null
null
assignment2.ipynb
Nathanahel/ML_assignments
772aa395fde941e2041c5c1d69755a97d01f2610
[ "MIT" ]
null
null
null
assignment2.ipynb
Nathanahel/ML_assignments
772aa395fde941e2041c5c1d69755a97d01f2610
[ "MIT" ]
null
null
null
48.937173
300
0.3757
[ [ [ "from bs4 import BeautifulSoup\nimport os\nimport random\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.svm import LinearSVC\nfrom sklearn.naive_bayes import ComplementNB\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report", "_____no_output_____" ], [ "filepaths = []\nfor root, dirs, files in os.walk(os.getcwd() + \"/reuters21578/\"):\n for file in files:\n if os.path.splitext(file)[1] == '.sgm':\n filepaths.append(os.path.join(root, file))\n\n\nfile_list = [open(file, 'r', encoding='ISO-8859-1') for file in filepaths]\nsoup_list = [BeautifulSoup(file,'lxml') for file in file_list]", "_____no_output_____" ], [ "def find_topics(soup):\n tuple_topics = [(topic.parent.get('newid'),i) for topic in soup.find_all('topics') for i in topic.strings]\n return tuple_topics\n\ndef find_texts(soup):\n dic_text = {find.parent.get('newid'):find.text.replace(find.title.string if find.parent.title is not None else \"\",\"\").replace(find.dateline.string if find.dateline is not None else \"\",\"\").replace(\"\\n\",\"\") for find in soup.find_all('text') if find.parent.topics.contents!=[]}\n return dic_text\n\ndef get_strs(soup):\n topics = find_topics(soup)\n text = find_texts(soup)\n strs = [topic[1] + \"_label_\" + text.get(topic[0]) for topic in topics]\n return strs\n\ndef write_to_txt(strs):\n file = open('raw_y_X.txt','w',encoding='utf-8')\n for i in strs:\n file.write(i+'\\n')\n file.close()", "_____no_output_____" ], [ "strs_s = []\nfor soup in soup_list:\n strs = get_strs(soup)\n for st in strs:\n strs_s.append(st)\n\nrandom.shuffle(strs_s)\nwrite_to_txt(strs_s)", "_____no_output_____" ], [ "X_raw = []\ny_raw = []\nwith open(\"raw_y_X.txt\", \"r\") as infile:\n lines = infile.readlines()\n for line in lines:\n y_raw.append(line.split(\"_label_\")[0])\n X_raw.append(line.split(\"_label_\")[1])", "_____no_output_____" ], [ "vectorizer = TfidfVectorizer(ngram_range=(1,2), stop_words=\"english\")\n\n##################20newsgroups########################\n\nnewsgroups_train = fetch_20newsgroups(subset=\"train\")\nX_news = vectorizer.fit_transform(newsgroups_train.data)\ny_news = newsgroups_train.target\n\n##################Reuters###############################\n\nX_reuters = vectorizer.fit_transform(X_raw)\nlabel_encoder = LabelEncoder()\ny_reuters = label_encoder.fit_transform(y_raw)", "_____no_output_____" ], [ "X_news_train, X_news_test, y_news_train, y_news_test = train_test_split(X_news, y_news, test_size=0.25)\nlsvc_news = LinearSVC(loss=\"squared_hinge\", penalty=\"l2\", C=1, multi_class=\"ovr\")\nlsvc_news.fit(X_news_train, y_news_train)\nprint(classification_report(y_news_test, lsvc_news.predict(X_news_test)))\n", " precision recall f1-score support\n\n 0 0.94 0.97 0.95 128\n 1 0.88 0.91 0.89 163\n 2 0.88 0.92 0.90 137\n 3 0.83 0.79 0.81 136\n 4 0.92 0.92 0.92 135\n 5 0.92 0.89 0.91 152\n 6 0.82 0.90 0.86 146\n 7 0.92 0.93 0.93 133\n 8 0.96 0.96 0.96 163\n 9 0.97 0.95 0.96 134\n 10 0.93 0.97 0.95 116\n 11 0.97 0.97 0.97 159\n 12 0.91 0.90 0.91 139\n 13 0.99 0.95 0.97 148\n 14 0.96 0.97 0.97 158\n 15 0.94 0.98 0.96 167\n 16 0.94 0.97 0.96 138\n 17 0.97 0.99 0.98 147\n 18 0.96 0.87 0.92 124\n 19 0.99 0.83 0.90 106\n\n accuracy 0.93 2829\n macro avg 0.93 0.93 0.93 2829\nweighted avg 0.93 0.93 0.93 2829\n\n" ], [ "X_reuters_train, X_reuters_test, y_reuters_train, y_reuters_test = train_test_split(X_reuters, y_reuters, test_size=0.25)\nlsvc_reuters = LinearSVC(loss=\"squared_hinge\", penalty=\"l2\", C=1, multi_class=\"ovr\")\nlsvc_reuters.fit(X_reuters_train, y_reuters_train)\nprint(classification_report(y_reuters_test, lsvc_reuters.predict(X_reuters_test)))", " precision recall f1-score support\n\n 0 0.65 0.96 0.77 605\n 1 0.73 0.57 0.64 14\n 3 0.00 0.00 0.00 9\n 5 0.35 0.18 0.24 33\n 6 0.00 0.00 0.00 2\n 7 0.14 0.19 0.16 16\n 11 0.67 0.71 0.69 17\n 12 0.00 0.00 0.00 0\n 13 0.00 0.00 0.00 2\n 14 0.83 0.72 0.77 40\n 15 0.67 0.67 0.67 15\n 16 0.00 0.00 0.00 0\n 17 0.00 0.00 0.00 64\n 18 0.00 0.00 0.00 1\n 20 0.23 0.23 0.23 13\n 21 0.00 0.00 0.00 1\n 23 0.56 0.38 0.45 26\n 24 0.00 0.00 0.00 1\n 25 0.60 0.49 0.54 173\n 29 0.00 0.00 0.00 55\n 30 0.00 0.00 0.00 2\n 31 0.98 0.92 0.95 1031\n 32 0.00 0.00 0.00 2\n 33 0.00 0.00 0.00 1\n 34 0.75 0.43 0.55 7\n 35 0.25 0.18 0.21 11\n 36 0.30 0.44 0.36 36\n 37 0.68 0.61 0.64 44\n 38 0.11 0.16 0.13 148\n 39 0.00 0.00 0.00 1\n 40 0.00 0.00 0.00 1\n 41 0.67 0.60 0.63 10\n 43 0.00 0.00 0.00 8\n 44 0.80 0.67 0.73 6\n 45 1.00 0.17 0.29 6\n 46 1.00 0.33 0.50 3\n 47 0.56 0.41 0.47 140\n 48 0.00 0.00 0.00 2\n 49 0.60 0.60 0.60 15\n 50 0.91 0.50 0.65 20\n 51 0.00 0.00 0.00 2\n 52 0.72 0.76 0.74 17\n 53 0.00 0.00 0.00 0\n 54 0.33 0.14 0.20 7\n 55 0.67 0.40 0.50 5\n 57 0.00 0.00 0.00 1\n 58 0.00 0.00 0.00 2\n 59 0.00 0.00 0.00 1\n 60 0.29 0.20 0.24 35\n 61 0.00 0.00 0.00 3\n 62 0.20 0.09 0.13 11\n 63 0.35 0.36 0.36 183\n 64 0.73 0.49 0.59 55\n 65 0.00 0.00 0.00 3\n 66 0.24 0.29 0.26 31\n 67 0.00 0.00 0.00 2\n 68 0.00 0.00 0.00 0\n 69 0.00 0.00 0.00 1\n 70 0.00 0.00 0.00 2\n 71 0.17 0.07 0.10 59\n 72 0.80 0.57 0.67 7\n 73 0.00 0.00 0.00 1\n 74 0.00 0.00 0.00 13\n 75 0.00 0.00 0.00 1\n 77 0.00 0.00 0.00 5\n 78 0.00 0.00 0.00 2\n 79 0.00 0.00 0.00 1\n 80 0.00 0.00 0.00 1\n 81 0.00 0.00 0.00 1\n 82 0.00 0.00 0.00 2\n 85 0.00 0.00 0.00 1\n 86 0.20 0.08 0.12 12\n 87 0.00 0.00 0.00 1\n 88 0.70 0.35 0.47 20\n 89 1.00 0.50 0.67 6\n 90 0.09 0.07 0.08 14\n 91 0.00 0.00 0.00 1\n 92 0.83 0.50 0.62 10\n 94 0.00 0.00 0.00 1\n 95 0.00 0.00 0.00 0\n 96 0.00 0.00 0.00 2\n 97 0.44 0.41 0.43 68\n 98 0.00 0.00 0.00 7\n 99 0.00 0.00 0.00 1\n 100 0.00 0.00 0.00 7\n 101 0.00 0.00 0.00 5\n 102 0.00 0.00 0.00 7\n 103 0.03 0.04 0.04 24\n 104 0.00 0.00 0.00 3\n 105 0.50 0.08 0.13 13\n 106 0.84 0.69 0.76 52\n 108 0.00 0.00 0.00 4\n 109 0.00 0.00 0.00 4\n 110 0.00 0.00 0.00 0\n 111 0.00 0.00 0.00 4\n 112 1.00 0.80 0.89 10\n 113 0.52 0.64 0.57 119\n 114 0.21 0.25 0.23 36\n 115 0.03 0.02 0.03 84\n 116 0.00 0.00 0.00 1\n 117 0.57 0.57 0.57 7\n 118 0.00 0.00 0.00 12\n 119 0.10 0.20 0.13 5\n\n accuracy 0.61 3576\n macro avg 0.25 0.19 0.21 3576\nweighted avg 0.60 0.61 0.59 3576\n\n" ], [ "cnb_news = ComplementNB(alpha=1)\ncnb_news.fit(X_news_train, y_news_train)\nprint(classification_report(y_news_test, cnb_news.predict(X_news_test)))", " precision recall f1-score support\n\n 0 0.89 0.98 0.93 128\n 1 0.91 0.82 0.86 163\n 2 0.84 0.86 0.85 137\n 3 0.76 0.75 0.76 136\n 4 0.94 0.89 0.92 135\n 5 0.92 0.89 0.91 152\n 6 0.88 0.81 0.84 146\n 7 0.86 0.92 0.89 133\n 8 0.95 0.95 0.95 163\n 9 0.91 0.94 0.93 134\n 10 0.86 0.97 0.91 116\n 11 0.95 0.98 0.97 159\n 12 0.91 0.83 0.87 139\n 13 0.96 0.92 0.94 148\n 14 0.93 0.97 0.95 158\n 15 0.91 0.97 0.94 167\n 16 0.87 0.97 0.92 138\n 17 0.91 0.99 0.95 147\n 18 0.90 0.84 0.87 124\n 19 0.95 0.70 0.80 106\n\n accuracy 0.90 2829\n macro avg 0.90 0.90 0.90 2829\nweighted avg 0.90 0.90 0.90 2829\n\n" ], [ "cnb_reuters = ComplementNB(alpha=1)\ncnb_reuters.fit(X_reuters_train, y_reuters_train)\nprint(classification_report(y_reuters_test, cnb_reuters.predict(X_reuters_test)))", " precision recall f1-score support\n\n 0 0.64 0.96 0.77 605\n 1 0.88 0.50 0.64 14\n 3 0.00 0.00 0.00 9\n 5 0.38 0.09 0.15 33\n 6 0.00 0.00 0.00 2\n 7 0.07 0.06 0.06 16\n 11 0.67 0.59 0.62 17\n 12 0.00 0.00 0.00 0\n 13 0.00 0.00 0.00 2\n 14 0.84 0.68 0.75 40\n 15 0.64 0.60 0.62 15\n 16 0.00 0.00 0.00 0\n 17 0.00 0.00 0.00 64\n 18 0.00 0.00 0.00 1\n 20 0.25 0.08 0.12 13\n 21 0.00 0.00 0.00 1\n 23 0.38 0.35 0.36 26\n 24 0.00 0.00 0.00 1\n 25 0.64 0.51 0.57 173\n 29 0.00 0.00 0.00 55\n 30 0.00 0.00 0.00 2\n 31 0.83 0.93 0.88 1031\n 32 0.00 0.00 0.00 2\n 33 0.00 0.00 0.00 1\n 34 1.00 0.14 0.25 7\n 35 0.20 0.09 0.13 11\n 36 0.27 0.33 0.30 36\n 37 0.69 0.50 0.58 44\n 38 0.13 0.22 0.16 148\n 39 0.00 0.00 0.00 1\n 40 0.00 0.00 0.00 1\n 41 0.71 0.50 0.59 10\n 43 0.00 0.00 0.00 8\n 44 0.75 0.50 0.60 6\n 45 0.00 0.00 0.00 6\n 46 0.00 0.00 0.00 3\n 47 0.61 0.27 0.38 140\n 48 0.00 0.00 0.00 2\n 49 0.60 0.40 0.48 15\n 50 0.80 0.20 0.32 20\n 51 0.00 0.00 0.00 2\n 52 0.71 0.29 0.42 17\n 53 0.00 0.00 0.00 0\n 54 0.50 0.14 0.22 7\n 55 0.50 0.20 0.29 5\n 57 0.00 0.00 0.00 1\n 58 0.00 0.00 0.00 2\n 59 0.00 0.00 0.00 1\n 60 0.18 0.11 0.14 35\n 61 0.00 0.00 0.00 3\n 62 0.00 0.00 0.00 11\n 63 0.37 0.51 0.43 183\n 64 0.82 0.33 0.47 55\n 65 0.00 0.00 0.00 3\n 66 0.18 0.13 0.15 31\n 67 0.00 0.00 0.00 2\n 69 0.00 0.00 0.00 1\n 70 0.00 0.00 0.00 2\n 71 0.14 0.05 0.07 59\n 72 1.00 0.57 0.73 7\n 73 0.00 0.00 0.00 1\n 74 0.00 0.00 0.00 13\n 75 0.00 0.00 0.00 1\n 77 0.00 0.00 0.00 5\n 78 0.00 0.00 0.00 2\n 79 0.00 0.00 0.00 1\n 80 0.00 0.00 0.00 1\n 81 0.00 0.00 0.00 1\n 82 0.00 0.00 0.00 2\n 85 0.00 0.00 0.00 1\n 86 0.20 0.08 0.12 12\n 87 0.00 0.00 0.00 1\n 88 0.00 0.00 0.00 20\n 89 0.00 0.00 0.00 6\n 90 0.00 0.00 0.00 14\n 91 0.00 0.00 0.00 1\n 92 0.80 0.40 0.53 10\n 94 0.00 0.00 0.00 1\n 95 0.00 0.00 0.00 0\n 96 0.00 0.00 0.00 2\n 97 0.46 0.40 0.43 68\n 98 0.00 0.00 0.00 7\n 99 0.00 0.00 0.00 1\n 100 0.00 0.00 0.00 7\n 101 0.00 0.00 0.00 5\n 102 0.00 0.00 0.00 7\n 103 0.00 0.00 0.00 24\n 104 0.00 0.00 0.00 3\n 105 1.00 0.08 0.14 13\n 106 0.80 0.54 0.64 52\n 108 0.00 0.00 0.00 4\n 109 0.00 0.00 0.00 4\n 110 0.00 0.00 0.00 0\n 111 0.00 0.00 0.00 4\n 112 1.00 0.60 0.75 10\n 113 0.43 0.60 0.50 119\n 114 0.17 0.14 0.15 36\n 115 0.00 0.00 0.00 84\n 116 0.00 0.00 0.00 1\n 117 0.50 0.14 0.22 7\n 118 0.00 0.00 0.00 12\n 119 0.00 0.00 0.00 5\n\n accuracy 0.58 3576\n macro avg 0.21 0.14 0.15 3576\nweighted avg 0.55 0.58 0.55 3576\n\n" ] ], [ [ "**Preprocessing**\n\nI used BeautifulSoup to parse the Reuters data instead of Regular Expressions, which turned out to be more difficult than expected.\nWhen parsing the data, only documents with the label \\\\<TOPICS\\> were chosen, and texts were read in by stripping the title and dateline information.\nThe \"newid\" information makes the text and its topic match.\nThe X_raw would be all the texts, and the y_raw would be their corresponding topics.\nThe topic and text of every document were stored in a .txt file with a \\_label_ mark separating them.\n\nThen TF-IDF vectorizer helped with the encoding of both datasets with unigram and bigram features excluding the stop words. Label Encoder of sklearn was used to encode target labels with value between 0 and n_classes-1.\n\n**Model Selection**\n\nTwo models were implemented, Linear Support Vector Classifier as the non-probabilistic one and Complement Naive Bayes as the probabilistic one. \nLinearSVC has more flexibility in the choice of penalties and loss functions, and Complement NB is suitable for imbalanced datasets, in our case, the Reuters dataset. The inductive bias of a SVM is that distinct classes tend to be separated by wide margins (maximum margin).\nThe naive bayes classifier assumes that the inputs are independent of each other, and the input only depends on the output label.\n\nThe train and test sets were split by sklearn. I tried several test sizes and found out that the performance did not vary a lot, and I went with 0.25.\n\nFor hyperparameters, I chose to use the default values for both classifiers.\n\n**Evaluation**\n\nThe evaluation metric I chose was classification report in sklearn. It shows the accuracy, recall, and F1 score for each label and overall.\nThe results I got from the experiments showed that the 20 newsgroups dataset outperformed Reuters a lot on both classifiers.\nThe overall accuracy for 20 newsgroups dataset was about 0.9, while the overall accuracy for Reuters was around 0.6.\nWhen classifying the Reuters dataset, labels with high frequency were predicted with higher accuracy and recall, whereas rare labels got nearly 0. \nI think it is because the Reuters dataset is not as well-formed as the other one. There's a lot of \"noise\" in the texts (e.g. many texts are like \\*\\*\\*\\*\\*Blah Blah Blah).\n\nAlso, the 20 newsgroups dataset only has 20 labels whereas the Reuters dataset has 120 labels, making the classification task harder. With so many labels to classify, more input data is needed for the chosen model to learn.\n\nAs for the warining of zero division, there are zero accuracies and recalls so it is natural to see that.", "_____no_output_____" ] ] ]
[ "code", "markdown" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
c51ff7e262018fe5f718a9c7f17c60076cde0ad3
234,899
ipynb
Jupyter Notebook
lesson 3/results/GFPE.ipynb
gtpedrosa/Python4WindEnergy
f8ad09018420cfb3a419173f97b129de7118d814
[ "Apache-2.0" ]
48
2015-01-19T18:21:10.000Z
2021-11-27T22:41:06.000Z
lesson 3/results/GFPE.ipynb
gtpedrosa/Python4WindEnergy
f8ad09018420cfb3a419173f97b129de7118d814
[ "Apache-2.0" ]
1
2016-05-24T06:07:07.000Z
2016-05-24T08:26:29.000Z
lesson 3/results/GFPE.ipynb
gtpedrosa/Python4WindEnergy
f8ad09018420cfb3a419173f97b129de7118d814
[ "Apache-2.0" ]
24
2015-06-26T14:44:07.000Z
2021-06-07T18:36:52.000Z
821.325175
99,915
0.937999
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
c52000f68263c7a4c86b3843bd1a2ecb097e197c
7,686
ipynb
Jupyter Notebook
chan/01_Centre_Expand_Handler_Alpha_13.ipynb
fs714/concurrency-example
fbff041804b9c46fb7f21ebbae22acff745c7b0c
[ "Apache-2.0" ]
null
null
null
chan/01_Centre_Expand_Handler_Alpha_13.ipynb
fs714/concurrency-example
fbff041804b9c46fb7f21ebbae22acff745c7b0c
[ "Apache-2.0" ]
null
null
null
chan/01_Centre_Expand_Handler_Alpha_13.ipynb
fs714/concurrency-example
fbff041804b9c46fb7f21ebbae22acff745c7b0c
[ "Apache-2.0" ]
1
2020-03-10T15:47:05.000Z
2020-03-10T15:47:05.000Z
32.706383
101
0.457976
[ [ [ "%matplotlib inline\n\nimport matplotlib.pyplot as plt\nfrom IPython.core.debugger import Pdb; pdb = Pdb()\n\n\ndef get_down_centre_last_low(p_list):\n zn_num = len(p_list) - 1\n available_num = min(9, (zn_num - 6))\n \n index = len(p_list) - 4\n for i in range(0, available_num // 2):\n if p_list[index - 2] < p_list[index]:\n index = index -2\n else:\n return index\n return index + 2\n\ndef get_down_centre_first_high(p_list):\n s = max(enumerate(p_list[3:]), key=lambda x: x[1])[0]\n return s + 3\n\ndef down_centre_expand_spliter(p_list):\n lr0 = get_down_centre_last_low(p_list)\n hr0 = (max(enumerate(p_list[lr0 - 1:]), key=lambda x: x[1])[0]) + lr0 - 1\n hl0 = get_down_centre_first_high(p_list[: lr0 - 2])\n \n if p_list[hr0] > p_list[hl0] and (len(p_list) - hr0) > 5:\n hl0 = hr0\n lr0 = lr0 + (len(p_list) - hr0) // 2\n # lr0 = hr0 + 3\n return [0, hl0, lr0, len(p_list) - 1], [p_list[0], p_list[hl0], p_list[lr0], p_list[-1]]\n\n\n# y = [0, 100, 60, 130, 70, 120, 40, 90, 50, 140, 85, 105]\n# y = [0, 100, 60, 110, 70, 72, 61, 143, 77, 91, 82, 100, 83, 124, 89, 99]\n# y = [0, 100, 60, 110, 70, 115, 75, 120, 80, 125, 85, 130, 90, 135]\n# y = [0, 100, 60, 110, 70, 78, 77, 121, 60, 93, 82, 141, 78, 134]\n\n# x = list(range(0, len(y)))\n# gg = [min(y[1], y[3])] * len(y)\n# dd = [max(y[2], y[4])] * len(y)\n\n# plt.figure(figsize=(len(y),4))\n# plt.grid()\n# plt.plot(x, y)\n# plt.plot(x, gg, '--')\n# plt.plot(x, dd, '--')\n# sx, sy = down_centre_expand_spliter(y)\n# plt.plot(sx, sy)\n# plt.show()\n", "_____no_output_____" ], [ "# Centre Expand Prototype\n%matplotlib inline\n\nimport matplotlib.pyplot as plt\n\n\ny_base = [0, 100, 60, 130, 70, 120, 40, 90, 50, 140, 85, 105, 55, 80]\n\nfor i in range(10, len(y_base)):\n y = y_base[:(i + 1)]\n x = list(range(0, len(y)))\n gg = [min(y[1], y[3])] * len(y)\n dd = [max(y[2], y[4])] * len(y)\n\n plt.figure(figsize=(i,4))\n plt.grid()\n plt.plot(x, y)\n plt.plot(x, gg, '--')\n plt.plot(x, dd, '--')\n if i % 2 == 1:\n sx, sy = down_centre_expand_spliter(y)\n plt.plot(sx, sy)\n plt.show()\n ", "_____no_output_____" ], [ "# Random Centre Generator\n%matplotlib inline\n\nimport random\nimport matplotlib.pyplot as plt\n\ny_max = 150\ny_min = 50\nnum_max = 14\n\ndef generate_next(y_list, direction):\n if direction == 1:\n y_list.append(random.randint(max(y_list[2], y_list[4], y_list[-1]) + 1, y_max))\n elif direction == -1:\n y_list.append(random.randint(y_min, min(y_list[1], y_list[3], y_list[-1]) - 1))\n\ny_base = [0, 100, 60, 110, 70]\n# y_base = [0, 110, 70, 100, 60]\n# y_base = [0, 100, 60, 90, 70]\n# y_base = [0, 90, 70, 100, 60]\n\ndirection = 1\nfor i in range(5, num_max):\n generate_next(y_base, direction)\n direction = 0 - direction\n\nprint(y_base)\nfor i in range(11, len(y_base), 2):\n y = y_base[:(i + 1)]\n x = list(range(0, len(y)))\n gg = [min(y[1], y[3])] * len(y)\n dd = [max(y[2], y[4])] * len(y)\n\n plt.figure(figsize=(i,4))\n plt.title(y)\n plt.grid()\n plt.plot(x, y)\n plt.plot(x, gg, '--')\n plt.plot(x, dd, '--')\n sx, sy = down_centre_expand_spliter(y)\n plt.plot(sx, sy)\n plt.show()\n ", "_____no_output_____" ], [ "%matplotlib inline\n\nimport matplotlib.pyplot as plt\n\n# Group 1\n# y_base = [0, 100, 60, 110, 70, 99, 66, 121, 91, 141, 57, 111, 69, 111]\n# y_base = [0, 100, 60, 110, 70, 105, 58, 102, 74, 137, 87, 142, 55, 128]\ny_base = [0, 100, 60, 110, 70, 115, 75, 120, 80, 125, 85, 130, 90, 135]\n# y_base = [0, 100, 60, 110, 70, 120, 80, 130, 90, 140, 50, 75]\n# y_base = [0, 100, 60, 110, 70, 114, 52, 75, 54, 77, 65, 100, 66, 87, 70, 116]\n# y_base = [0, 100, 60, 110, 70, 72, 61, 143, 77, 91, 82, 100, 83, 124, 89, 99, 89, 105]\n\n# Group 2\n# y_base = [0, 110, 70, 100, 60, 142, 51, 93, 78, 109, 60, 116, 50, 106]\n# y_base = [0, 110, 70, 100, 60, 88, 70, 128, 82, 125, 72, 80, 63, 119]\n# y_base = [0, 110, 70, 100, 60, 74, 66, 86, 57, 143, 50, 95, 70, 91]\n# y_base = [0, 110, 70, 100, 60, 77, 73, 122, 96, 116, 82, 124, 69, 129]\n# y_base = [0, 110, 70, 100, 60, 147, 53, 120, 77, 103, 56, 76, 74, 92]\n# y_base = [0, 110, 70, 100, 60, 95, 55, 90, 50, 85, 45, 80, 40, 75]\n\n# Group 3\n# y_base = [0, 100, 60, 90, 70, 107, 55, 123, 79, 112, 64, 85, 74, 110]\n# y_base = [0, 100, 60, 90, 70, 77, 55, 107, 76, 141, 87, 91, 60, 83]\n# y_base = [0, 100, 60, 90, 70, 114, 67, 93, 58, 134, 53, 138, 64, 107]\n# y_base = [0, 100, 60, 90, 70, 77, 66, 84, 79, 108, 87, 107, 72, 89]\n# y_base = [0, 100, 60, 90, 70, 88, 72, 86, 74, 84, 76, 82, 74, 80]\n\n# Group 4\n# y_base = [0, 90, 70, 100, 60, 131, 57, 144, 85, 109, 82, 124, 87, 101]\n# y_base = [0, 90, 70, 100, 60, 150, 56, 112, 63, 95, 84, 118, 58, 110]\n# y_base = [0, 90, 70, 100, 60, 145, 64, 112, 69, 86, 71, 119, 54, 95]\n# y_base = [0, 90, 70, 100, 60, 105, 55, 110, 50, 115, 45, 120, 40, 125]\n\nfor i in range(11, len(y_base), 2):\n y = y_base[:(i + 1)]\n x = list(range(0, len(y)))\n gg = [min(y[1], y[3])] * len(y)\n dd = [max(y[2], y[4])] * len(y)\n\n plt.figure(figsize=(i,4))\n plt.title(y)\n plt.grid()\n plt.plot(x, y)\n plt.plot(x, gg, '--')\n plt.plot(x, dd, '--')\n sx, sy = down_centre_expand_spliter(y)\n plt.plot(sx, sy)\n plt.show()\n ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
c5200ba5408005f41130745423c6df00d02c3db7
14,459
ipynb
Jupyter Notebook
00_core.ipynb
robeson1010/SSLRS
45bc4aa17766c06a0f509202dc9ff0b534c61ca9
[ "Apache-2.0" ]
null
null
null
00_core.ipynb
robeson1010/SSLRS
45bc4aa17766c06a0f509202dc9ff0b534c61ca9
[ "Apache-2.0" ]
null
null
null
00_core.ipynb
robeson1010/SSLRS
45bc4aa17766c06a0f509202dc9ff0b534c61ca9
[ "Apache-2.0" ]
null
null
null
30.633475
120
0.455564
[ [ [ "# default_exp core", "_____no_output_____" ] ], [ [ "# module name here\n\n> API details.", "_____no_output_____" ] ], [ [ "#hide\nfrom nbdev.showdoc import *", "_____no_output_____" ], [ "#export\nimport pandas as pd\nfrom tqdm import tqdm_notebook as tqdm\nimport json\nimport numpy as np\nfrom fastai.vision.all import *\nimport albumentations as A\nimport skimage.io as skio\nimport warnings\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ], [ "with open('./data/BigEarthNet-S2_19-classes_models/label_indices.json', 'rb') as f:\n label_indices = json.load(f)\nlabel_conversion = label_indices['label_conversion']\nBigEarthNet_19_label_idx = {v: k for k, v in label_indices['BigEarthNet-19_labels'].items()}\ndef get_label(patch_json):\n original_labels = patch_json['labels']\n original_labels_multi_hot = np.zeros(\n len(label_indices['original_labels'].keys()), dtype=int)\n BigEarthNet_19_labels_multi_hot = np.zeros(len(label_conversion),dtype=int)\n for label in original_labels:\n original_labels_multi_hot[label_indices['original_labels'][label]] = 1\n\n for i in range(len(label_conversion)):\n BigEarthNet_19_labels_multi_hot[i] = (\n np.sum(original_labels_multi_hot[label_conversion[i]]) > 0\n ).astype(int)\n\n BigEarthNet_19_labels = ''\n for i in np.where(BigEarthNet_19_labels_multi_hot == 1)[0]:\n BigEarthNet_19_labels+=str(i)+' '\n return BigEarthNet_19_labels[:-1]\n", "_____no_output_____" ], [ "# # create data\n# df=pd.read_csv('./data/BigEarthNet-S2_19-classes_models/splits/train.csv',header=None)\n# df['Isval']=0\n# df2=pd.read_csv('./data/BigEarthNet-S2_19-classes_models/splits/val.csv',header=None)\n# df2['Isval']=1\n# df=pd.concat([df,df2])\n# df=df.rename(columns={0: \"fname\"})\n# df['label']=''\n# for i in tqdm(range(len(df))):\n# with open('./data/BigEarthNet-v1.0/'+df.iat[i,0]+'/'+df.iat[i,0]+'_labels_metadata.json', 'rb') as f:\n# patch_json = json.load(f)\n# df.iat[i,2]=get_label(patch_json) \n# df.iat[i,0]='./data/BigEarthNet-v1.0/'+df.iat[i,0]+'/'+df.iat[i,0]+'.tif' ", "_____no_output_____" ], [ "#export\ndef open_tif(fn, cls=torch.Tensor):\n im = skio.imread(str(fn))/10000\n im = im.transpose(1,2,0).astype('float32')\n return cls(im)\nclass MSTensorImage(TensorImage):\n \n @classmethod\n def create(cls, data:(Path,str,ndarray), chnls=None):\n \n if isinstance(data, Path) or isinstance(data, str):\n if str(data).endswith('tif'): im = open_tif(fn=data,cls=torch.Tensor)\n\n elif isinstance(data, ndarray): \n im = torch.from_numpy(data)\n else:\n im = data\n \n return cls(im)", "_____no_output_____" ], [ "df=pd.read_csv('./data/file.csv')", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "db=DataBlock(blocks=(TransformBlock(type_tfms=partial(MSTensorImage.create)), MultiCategoryBlock),\n splitter=ColSplitter('Isval'),\n get_x=ColReader('fname'),\n get_y=ColReader('label', label_delim=' '))\n# batch_tfms=aug_transforms(size=224))\n# db.summary(source=df)", "_____no_output_____" ], [ "ds = db.datasets(source=df)", "_____no_output_____" ], [ "#export\nBAND_STATS = {\n 'S2':{\n 'mean': {\n 'B01': 340.76769064,\n 'B02': 429.9430203,\n 'B03': 614.21682446,\n 'B04': 590.23569706,\n 'B05': 950.68368468,\n 'B06': 1792.46290469,\n 'B07': 2075.46795189,\n 'B08': 2218.94553375,\n 'B8A': 2266.46036911,\n 'B09': 2246.0605464,\n 'B11': 1594.42694882,\n 'B12': 1009.32729131\n },\n 'std': {\n 'B01': 554.81258967,\n 'B02': 572.41639287,\n 'B03': 582.87945694,\n 'B04': 675.88746967,\n 'B05': 729.89827633,\n 'B06': 1096.01480586,\n 'B07': 1273.45393088,\n 'B08': 1365.45589904,\n 'B8A': 1356.13789355,\n 'B09': 1302.3292881,\n 'B11': 1079.19066363,\n 'B12': 818.86747235\n }\n },\n 'S1': {\n 'mean': {\n 'VV': -12.619993741972035,\n 'VH': -19.29044597721542,\n 'VV/VH': 0.6525036195871579,\n },\n 'std': {\n 'VV': 5.115911777546365,\n 'VH': 5.464428464912864,\n 'VV/VH': 30.75264076801808,\n },\n 'min': {\n 'VV': -74.33214569091797,\n 'VH': -75.11137390136719,\n 'R': 3.21E-2\n },\n 'max': {\n 'VV': 34.60696029663086,\n 'VH': 33.59768295288086,\n 'R': 1.08\n }\n }\n }\n", "_____no_output_____" ], [ "#export\nbands=['B02','B03', 'B04', 'B05','B06', 'B07', 'B11', 'B08','B8A', 'B12']", "_____no_output_____" ], [ "#export\nmeans=[BAND_STATS['S2']['mean'][band]/10000 for band in bands]\nstds=[BAND_STATS['S2']['std'][band]/10000 for band in bands]", "_____no_output_____" ], [ "#export \n# Now we will create a pipe of transformations\nfrom albumentations.pytorch import ToTensorV2\naug_pipe = A.Compose([A.ShiftScaleRotate(p=.5),\n A.HorizontalFlip(),\n A.Normalize(mean=means,std=stds,max_pixel_value=1.0),\n ToTensorV2()] \n )\nval_pipe = A.Compose([\n A.Normalize(mean=means,std=stds,max_pixel_value=1.0),\n ToTensorV2()] \n )\n\nclass TrainTransform(ItemTransform):\n split_idx = 0\n def __init__(self, aug,split=0): \n self.aug = aug\n# self.split_idx = split\n def encodes(self, x):\n aug = self.aug(image=x[0].numpy())\n# print(torch.cat((aug['image0'],aug['image1']),axis=0).shape)\n return aug['image'], x[1]\nclass ValTransform(ItemTransform):\n split_idx = 1\n def __init__(self, aug,split=0): \n self.aug = aug\n# self.split_idx = split\n def encodes(self, x):\n aug = self.aug(image=x[0].numpy())\n# print(torch.cat((aug['image0'],aug['image1']),axis=0).shape)\n return aug['image'], x[1]\n\n# Create our class with this aug_pipe\naug = TrainTransform(aug_pipe)\naug2=ValTransform(val_pipe)", "_____no_output_____" ], [ "db = DataBlock(blocks=(TransformBlock(type_tfms=partial(MSTensorImage.create)), MultiCategoryBlock),\n splitter=ColSplitter('Isval'),\n get_x=ColReader('fname'),\n get_y=ColReader('label', label_delim=' '),\n item_tfms=[aug,aug2]\n )", "_____no_output_____" ], [ "dls = db.dataloaders(source=df, bs=2, num_workers=0)", "_____no_output_____" ], [ "aa,bb=first(dls.train)", "_____no_output_____" ], [ "aa.min()", "_____no_output_____" ], [ "from nbdev.export import notebook2script\n", "_____no_output_____" ], [ "notebook2script(fname='./00_core.ipynb')", "Converted 00_core.ipynb.\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c520255bcd6a1c75df077cd0a3b5b664cf50663e
469,029
ipynb
Jupyter Notebook
notebooks/MCMC.ipynb
patricks1/MachineLearningStatistics
dbc4e55b2c9638b6b2814a34b87bc2c9d5fdddd4
[ "BSD-3-Clause" ]
null
null
null
notebooks/MCMC.ipynb
patricks1/MachineLearningStatistics
dbc4e55b2c9638b6b2814a34b87bc2c9d5fdddd4
[ "BSD-3-Clause" ]
null
null
null
notebooks/MCMC.ipynb
patricks1/MachineLearningStatistics
dbc4e55b2c9638b6b2814a34b87bc2c9d5fdddd4
[ "BSD-3-Clause" ]
null
null
null
351.859715
126,040
0.927975
[ [ [ "# Machine Learning and Statistics for Physicists", "_____no_output_____" ], [ "Material for a [UC Irvine](https://uci.edu/) course offered by the [Department of Physics and Astronomy](https://www.physics.uci.edu/).\n\nContent is maintained on [github](github.com/dkirkby/MachineLearningStatistics) and distributed under a [BSD3 license](https://opensource.org/licenses/BSD-3-Clause).\n\n[Table of contents](Contents.ipynb)", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()\nimport numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "from sklearn import neighbors", "_____no_output_____" ] ], [ [ "## Markov Chain Monte Carlo", "_____no_output_____" ], [ "Markov-chain Monte Carlo (MCMC) is an algorithm to generate random samples from an un-normalized probability density. In other words, you want sample from $P(\\vec{z})$ but can only evaluate $f(\\vec{z})$ where\n$$\nP(\\vec{z}) = \\frac{f(\\vec{z})}{\\int d\\vec{z}\\,f(\\vec{z})} \\; .\n$$\nNote that $0 \\le P(\\vec{z}) \\le 1$ requires that $f(\\vec{z}) \\ge 0$ everywhere and that the integral has a non-zero finite value.", "_____no_output_____" ], [ "### Examples", "_____no_output_____" ], [ "We will start with some simple motivating examples before diving into the Bayesian applications and the theory of Markov chains.\n\nThe function\n$$\nf(z) = \\begin{cases}\n\\sqrt{1 - z^4} & |z| < 1 \\\\\n0 & |z| \\ge 1\n\\end{cases}\n$$\nis never negative and has a finite integral:", "_____no_output_____" ] ], [ [ "def plotf(zlim=1.2):\n z = np.linspace(-zlim, +zlim, 250)\n plt.plot(z, np.sqrt(np.maximum(0, 1 - z ** 4)))\n plt.xlim(-zlim, +zlim)\n\nplotf()", "_____no_output_____" ] ], [ [ "However, the normalization integral cannot be evaluated analytically (it is related to the [complete elliptic integral of the first kind](https://en.wikipedia.org/wiki/Elliptic_integral#Complete_elliptic_integral_of_the_first_kind)), so this is a good candidate for MCMC sampling using the MLS `MCMC_sample` function (which wraps [emcee](http://dfm.io/emcee/)):", "_____no_output_____" ] ], [ [ "from mls import MCMC_sample", "_____no_output_____" ], [ "def logf(z):\n return 0.5 * np.log(1 - z ** 4) if np.abs(z) < 1 else -np.inf\n\ngen = np.random.RandomState(seed=123)\nsamples = MCMC_sample(logf, z=[0], nsamples=20000, random_state=gen)", "_____no_output_____" ] ], [ [ "The notation `z=[0]` identifies `z` as the parameter we want to sample (starting at the value 0). The result is a Pandas DataFrame of generated samples:", "_____no_output_____" ] ], [ [ "samples[:5]", "_____no_output_____" ] ], [ [ "The generated samples are (approximately) drawn from the normalized $P(z)$ corresponding to the $f(z)$ provided:", "_____no_output_____" ] ], [ [ "plt.hist(samples['z'], range=(-1,1), bins=25);", "_____no_output_____" ] ], [ [ "<span style=\"color:limegreen\">What are MCMC samples good for?</span> They allow us to estimate the expectation value of an arbitrary $g(z)$ using [importance sampling](https://en.wikipedia.org/wiki/Importance_sampling):\n$$\n\\langle g(\\vec{z})\\rangle_P \\equiv \\int d\\vec{z}\\, g(\\vec{z})\\, P(\\vec{z})\n\\simeq \\frac{1}{N} \\sum_{i=1}^N g(\\vec{z}_i) \\; ,\n$$\nwhere $\\vec{z}_1, \\vec{z}_2, \\ldots$ are the MCMC samples.\n\nFor example, to estimate the expectation value of $g(z) = z^2$ (aka the variance) with the samples generated above:", "_____no_output_____" ] ], [ [ "np.mean(samples['z'] ** 2)", "_____no_output_____" ] ], [ [ "Expectation values of more complex functions are equally easy, for example, $g(z) = \\sin(\\pi z)^2$,", "_____no_output_____" ] ], [ [ "np.mean(np.sin(np.pi * samples['z']) ** 2)", "_____no_output_____" ] ], [ [ "Recall that the reason we are using MCMC is because <span style=\"color:limegreen\">we do not know the value of the normalization constant</span>:\n$$\n\\int d\\vec{z}\\,f(\\vec{z}) \\; .\n$$\nHowever, we can use MCMC samples to estimate its value as follows:\n - First, build an empirical estimate of the normalized probability density $P(\\vec{z})$ using any density estimation method.\n - Second, compare this density estimate (which is noisy, but normalized by construction) with the original un-normalized $f(\\vec{z})$: they should have the same shape and their ratio is the unknown normalization constant.\n\nFor example, use KDE to estimate the density of our generated samples:", "_____no_output_____" ] ], [ [ "fit = neighbors.KernelDensity(kernel='gaussian', bandwidth=0.01).fit(samples)", "_____no_output_____" ] ], [ [ "Now take the ratio of the (normalized and noisy) KDE density estimate and the (un-normalized and smooth) $f(z)$ on a grid of $z$ values:", "_____no_output_____" ] ], [ [ "def plotfit(zlim=1.2, Pmin=0.1):\n z = np.linspace(-zlim, +zlim, 250)\n f = np.sqrt(np.maximum(0, 1 - z ** 4))\n P = np.exp(fit.score_samples(z.reshape(-1, 1)))\n plt.plot(z, f, label='$f(z)$')\n plt.fill_between(z, P, alpha=0.5, label='$P(z)$')\n ratio = f / P\n sel = P > Pmin\n plt.plot(z[sel], ratio[sel], '.', label='$P(z)/f(z)$')\n mean_ratio = np.mean(ratio[sel])\n print('mean P(z)/f(z) = {:.3f}'.format(mean_ratio))\n plt.axhline(mean_ratio, ls='--', c='k')\n plt.xlim(-zlim, +zlim)\n plt.legend(loc='upper left', ncol=3)\n\nplotfit()", "mean P(z)/f(z) = 1.761\n" ] ], [ [ "The estimated $P(z)$ does not look great, but <span style=\"color:limegreen\">the mean</span> of $f(z) / P(z)$ estimates the normalization constant. In practice, we restrict this mean to $z$ values where $P(z)$ is above some minimum to avoid regions where the empirical density estimate is poorly determined.\n\nIn the example above, the true value of the integral rounds to 1.748 so our numerical accuracy is roughly 1%.", "_____no_output_____" ], [ "Note that <span style=\"color:limegreen\">we cannot simply use $g(z) = 1$</span> in the importance sampled integral above to estimate the normalization constant since it gives exactly one! The unknown constant is the integral of $f(z)$, not $P(z)$.", "_____no_output_____" ], [ "Next, we try a multidimensional example:\n$$\nf(\\vec{z}, \\vec{z}_0, r) =\n\\begin{cases}\n\\exp\\left(-|\\vec{z} - \\vec{z}_0|^2/2\\right) & |\\vec{z}| < r \\\\\n0 & |\\vec{z}| \\ge r\n\\end{cases}\n$$\nThis function describes an un-normalized Gaussian PDF centered at $\\vec{z}_0$ and clipped outside $|\\vec{z}| < r$. The normalization integral has no analytic solution except in the limits $\\vec{z}_0\\rightarrow 0$ or $r\\rightarrow\\infty$.\n\nTo generate MCMC samples in 2D:", "_____no_output_____" ] ], [ [ "def logf(x, y, x0, y0, r):\n z = np.array([x, y])\n z0 = np.array([x0, y0])\n return -0.5 * np.sum((z - z0) ** 2) if np.sum(z ** 2) < r ** 2 else -np.inf", "_____no_output_____" ] ], [ [ "The variables to sample are assigned initial values in square brackets and all other arguments are treated as fixed hyperparameters:", "_____no_output_____" ] ], [ [ "samples = MCMC_sample(logf, x=[0], y=[0], x0=1, y0=-2, r=3, nsamples=10000)", "_____no_output_____" ] ], [ [ "The generated samples now have two columns:", "_____no_output_____" ] ], [ [ "samples[:5]", "_____no_output_____" ] ], [ [ "A scatter plot shows a 2D Gaussian distribution clipped to a circle and offset from its center, as expected:", "_____no_output_____" ] ], [ [ "plt.scatter(samples['x'], samples['y'], s=10)\nplt.scatter(1, -2, marker='+', s=500, lw=5, c='white')\nplt.gca().add_artist(plt.Circle((0, 0), 3, lw=4, ec='red', fc='none'))\nplt.gca().set_aspect(1)", "_____no_output_____" ] ], [ [ "With multidimensional samples, we can estimate expectation values of <span style=\"color:limegreen\">marginal PDFs</span> just as easily as the full joint PDF. In our 2D example, the marginal PDFs are:\n$$\nP_X(x) = \\int dy\\, P(x, y) \\quad , \\quad P_Y(y) = \\int dx\\, P(x, y) \\; .\n$$\nFor example, the expectation value of $g(x)$ with respect to $P_X$ is:\n$$\n\\langle g\\rangle \\equiv \\int dx\\, g(x) P_X(x) = \\int dx\\, g(x) \\int dy\\, P(x, y) = \\int dx dy\\, g(x)\\, P(x,y) \\; .\n$$\nIn other words, the <span style=\"color:limegreen\">expectation value with respect to a marginal PDF</span> is equal to the <span style=\"color:limegreen\">expectation with respect to the full joint PDF</span>.\n\nFor example, the expectation value of $g(x) = x$ (aka the mean) with respect to $P_X(x)$ is:", "_____no_output_____" ], [ "samples x * samples y", "_____no_output_____" ] ], [ [ "np.mean(samples['x'])", "_____no_output_____" ] ], [ [ "We can also estimate the density of a marginal PDF by simply dropping the columns that are integrated out before plugging into a density estimator. For example:", "_____no_output_____" ] ], [ [ "fitX = neighbors.KernelDensity(kernel='gaussian', bandwidth=0.1).fit(samples.drop(columns='y'))\nfitY = neighbors.KernelDensity(kernel='gaussian', bandwidth=0.1).fit(samples.drop(columns='x'))", "_____no_output_____" ], [ "def plotfitXY(r=3):\n xy = np.linspace(-r, +r, 250)\n Px = np.exp(fitX.score_samples(xy.reshape(-1, 1)))\n Py = np.exp(fitY.score_samples(xy.reshape(-1, 1)))\n plt.plot(xy, Px, label='$P_X(x)$')\n plt.plot(xy, Py, label='$P_Y(y)$')\n plt.legend()\n \nplotfitXY()", "_____no_output_____" ] ], [ [ "### Bayesian Inference with MCMC", "_____no_output_____" ], [ "We introduced MCMC above as a general purpose algorithm for sampling any un-normalized PDF, without any reference to Bayesian (or frequentist) statistics. We also never specified whether $\\vec{z}$ was something observed (data) or latent (parameters and hyperparameters), because it doesn't matter to MCMC.\n\nHowever, MCMC is an excellent tool for performing numerical inferences using the generalized Bayes' rule we met earlier:\n$$\nP(\\Theta_M\\mid D, M) = \\frac{\\color{orange}{P(D\\mid \\Theta_M, M)}\\,\\color{purple}{P(\\Theta_M\\mid M)}}{P(D\\mid M)}\n$$\n- <span style=\"color:orange\">Liklihood</span>\n- <span style=\"color:purple\">Prior</span>\n\nIn particular, the normalizing denominator (aka the \"evidence\"):\n$$\nP(D\\mid M) = \\int d\\Theta_M' P(D\\mid \\Theta_M', M)\\, P(\\Theta_M'\\mid M)\n$$\nis often not practical to calculate, so we can only calculate the un-normalized numerator\n$$\nP(D\\mid \\Theta_M, M)\\,P(\\Theta_M\\mid M) \\; ,\n$$\nwhich combines the *likelihood of the data* and the *prior probability of the model*.\n\nIf we treat the observed data $D$ and hyperparameters $M$ as fixed, then the appropriate function to plug into an MCMC is:\n$$\n\\log f(\\Theta) = \\log P(D\\mid \\Theta_M, M) + \\log P(\\Theta_M\\mid M) \\; .\n$$\nThe machinery described above then enables us to generate samples $\\Theta_1, \\Theta_2, \\ldots$ drawn from the *posterior* distribution, and therefore make interesting statements about probabilities involving model parameters.\n\nThe likelihood function depends on the data and model, so could be anything, but we often assume Gaussian errors in the data, which leads to the multivariate Gaussian PDF we met earlier ($d$ is the number of data features):\n$$\nP(\\vec{x}\\mid \\Theta_M, M) =\n\\left(2\\pi\\right)^{-d/2}\\,\\left| C\\right|^{-1/2}\\,\n\\exp\\left[ -\\frac{1}{2} \\left(\\vec{x} - \\vec{\\mu}\\right)^T C^{-1} \\left(\\vec{x} - \\vec{\\mu}\\right) \\right]\n$$\nIn the most general case, $\\vec{\\mu}$ and $C$ are functions of everything: the data $D$, the parameters $\\Theta_M$ and the hyperparameters $M$.\n\nWhen we have $N$ independent observations, $\\vec{x}_1, \\vec{x}_2, \\ldots$, their combined likelihood is the product of each sample's likelihood:\n$$\nP(\\vec{x}_1, \\vec{x}_2, \\ldots\\mid \\Theta_M, M) = \\prod_{i=1}^N\\, P(\\vec{x}_i\\mid \\Theta_M, M)\n$$", "_____no_output_____" ], [ "As an example, consider fitting a straight line $y = m x + b$, with parameters $m$ and $b$, to data with two features $x$ and $y$. The relevant log-likelihood function is:\n$$\n\\log{\\cal L}(m, b; D) = -\\frac{N}{2}\\log(2\\pi\\sigma_y^2)\n-\\frac{1}{2\\sigma_y^2} \\sum_{i=1}^N\\, (y_i - m x_i - b)^2 \\; ,\n$$\nwhere the error in $y$, $\\sigma_y$, is a fixed hyperparameter. Note that the first term is the Gaussian PDF normalization factor.\n\nFirst generate some data on a straight line with measurement errors in $y$ (so our assumed model is correct):", "_____no_output_____" ] ], [ [ "gen = np.random.RandomState(seed=123)\nN, m_true, b_true, sigy_true = 10, 0.5, -0.2, 0.1\nx_data = gen.uniform(-1, +1, size=N)\ny_data = m_true * x_data + b_true + gen.normal(scale=sigy_true, size=N)\n\nplt.errorbar(x_data, y_data, sigy_true, fmt='o', markersize=5)\nplt.plot([-1, +1], [-m_true+b_true,+m_true+b_true], 'r:')\nplt.xlabel('x'); plt.ylabel('y');", "_____no_output_____" ] ], [ [ "Next, define the log-likelihood function:", "_____no_output_____" ] ], [ [ "def loglike(x, y, m, b, sigy):\n N = len(x)\n norm = 0.5 * N * np.log(2 * np.pi * sigy ** 2)\n return -0.5 * np.sum((y - m * x - b) ** 2) / sigy ** 2 - norm", "_____no_output_____" ] ], [ [ "Finally, <span style=\"color:limegreen\">generate some MCMC samples of the posterior $P(m, b\\mid D, M)$</span> assuming uniform priors $P(b,m\\mid \\sigma_y) = 1$:", "_____no_output_____" ] ], [ [ "samples = MCMC_sample(loglike, m=[m_true], b=[b_true],\n x=x_data, y=y_data, sigy=sigy_true, nsamples=10000, random_state=gen)", "_____no_output_____" ], [ "sns.jointplot('m', 'b', samples, xlim=(0.2,0.8), ylim=(-0.3,0.0), stat_func=None);", "_____no_output_____" ], [ "samples.describe(percentiles=[])", "_____no_output_____" ] ], [ [ "**EXERCISE:** We always require a starting point to generate MCMC samples. In this example, we used the true parameter values as starting points:\n```\nm=[m_true], b=[b_true]\n```\nWhat happens if you chose different starting points? Try changing the starting values by $\\pm 0.1$ and see how this affects the resulting means and standard deviations for $m$ and $b$.", "_____no_output_____" ] ], [ [ "samples = MCMC_sample(loglike, m=[m_true+0.1], b=[b_true+0.1],\n x=x_data, y=y_data, sigy=sigy_true, nsamples=10000, random_state=gen)", "_____no_output_____" ], [ "samples.describe(percentiles=[])", "_____no_output_____" ], [ "samples = MCMC_sample(loglike, m=[m_true-0.1], b=[b_true-0.1],\n x=x_data, y=y_data, sigy=sigy_true, nsamples=10000, random_state=gen)", "_____no_output_____" ], [ "samples.describe(percentiles=[])", "_____no_output_____" ] ], [ [ "The changes are small compared with the offsets ($\\pm 0.1$) and the standard deviations in each parameter.", "_____no_output_____" ] ], [ [ "# Add your solution here...", "_____no_output_____" ] ], [ [ "The `MCMC_sample` function can apply independent (i.e., factorized) priors on each parameter:\n$$\nP(\\Theta\\mid M) = \\prod_j P(\\theta_j\\mid M)\n$$\nDefine the two most commonly used independent priors:", "_____no_output_____" ] ], [ [ "def TopHat(lo, hi):\n \"\"\"Return un-normalized log(prior) for x in [lo,hi]\"\"\"\n return lambda x: 0 if (lo <= x <= hi) else -np.inf", "_____no_output_____" ], [ "def Gauss(mu, sigma):\n \"\"\"Return un-normalized log(prior) for x ~ N(mu,sigma)\"\"\"\n return lambda x: -0.5 * ((x - mu) / sigma) ** 2", "_____no_output_____" ] ], [ [ "To apply a prior, we replace `z=[value]` with `z=[value,logprior]`. For example, suppose we believe that $0.4 \\le m \\le 0.7$:", "_____no_output_____" ] ], [ [ "samples = MCMC_sample(loglike, m=[m_true,TopHat(0.4,0.7)], b=[b_true],\n x=x_data, y=y_data, sigy=sigy_true, nsamples=10000, random_state=gen)", "_____no_output_____" ], [ "sns.jointplot('m', 'b', samples, xlim=(0.2,0.8), ylim=(-0.3,0.0), stat_func=None);", "_____no_output_____" ] ], [ [ "We can also add a prior on $b$. For example, suppose a previous measurement found $b = -0.20 \\pm 0.02$ (in which case, the new data is not adding much information about $b$):", "_____no_output_____" ] ], [ [ "samples = MCMC_sample(loglike, m=[m_true,TopHat(0.4,0.7)], b=[b_true,Gauss(-0.20,0.02)],\n x=x_data, y=y_data, sigy=sigy_true, nsamples=10000, random_state=gen)", "_____no_output_____" ], [ "sns.jointplot('m', 'b', samples, xlim=(0.2,0.8), ylim=(-0.3,0.0), stat_func=None);", "_____no_output_____" ] ], [ [ "**EXERCISE:** Suppose we know that all $y_i$ values have the same error $\\sigma_y$ but we do not know its value.\n - Generate samples of $(m, b, \\sigma_y)$ using `m=[m_true], b=[b_true], sigy=[sigy_true]`.\n - Look at the samples with an `sns.pairplot`.\n - Which panel shows the marginalized posterior $P(\\sigma_y\\mid D)$? Do you understand its peculiar shape?\n - Add a prior on $\\sigma_y$ to fix this peculiar shape.", "_____no_output_____" ] ], [ [ "gen = np.random.RandomState(seed=123)\nsamples = MCMC_sample(loglike, m=[m_true], b=[b_true], sigy=[sigy_true],\n x=x_data, y=y_data, nsamples=10000, random_state=gen)", "_____no_output_____" ], [ "sns.pairplot(samples);", "_____no_output_____" ], [ "samples = MCMC_sample(loglike, m=[m_true], b=[b_true], sigy=[sigy_true, TopHat(0.01,1)],\n x=x_data, y=y_data, nsamples=10000, random_state=gen)", "_____no_output_____" ], [ "sns.pairplot(samples);", "_____no_output_____" ], [ "# Add your solution here...", "_____no_output_____" ] ], [ [ "For a more in-depth case study of the many subtleties in fitting a straight line, read this 55-page [article by Hogg, Bovy and Lang](https://arxiv.org/abs/1008.4686).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
c5202562f080cec37c23dacefe2209f74f3d663a
15,731
ipynb
Jupyter Notebook
docs/source/offline_analysis.ipynb
Jedite/CommunicationWithoutMovement
7de5da4cac5f4ecbd140b2eee32a2244e54981c8
[ "MIT" ]
44
2020-02-07T15:01:47.000Z
2022-03-21T14:36:15.000Z
docs/source/offline_analysis.ipynb
Jedite/CommunicationWithoutMovement
7de5da4cac5f4ecbd140b2eee32a2244e54981c8
[ "MIT" ]
17
2020-02-07T17:11:23.000Z
2022-02-20T18:01:42.000Z
docs/source/offline_analysis.ipynb
Jedite/CommunicationWithoutMovement
7de5da4cac5f4ecbd140b2eee32a2244e54981c8
[ "MIT" ]
19
2020-02-07T17:13:22.000Z
2022-03-17T01:22:35.000Z
55.586572
594
0.661624
[ [ [ "# Offline analysis of a [mindaffectBCI](https://github.com/mindaffect) savefile\n\nSo you have successfully run a BCI experiment and want to have a closer look at the data, and try different analysis settings? \n\nOr you have a BCI experiment file from the internet, e.g. MOABB, and want to try it with the mindaffectBCI analysis decoder? \n\nThen you want to do an off-line analysis of this data! \n\nThis notebook shows how to such a quick post-hoc analysis of a previously saved dataset. By the end of this tutorial you will be able to:\n * Load a mindaffectBCI savefile\n * generate summary plots which show; the per-channel grand average spectrum, the data-summary statistics, per-trial decoding results, the raw stimulus-resonse ERPs, the model as trained by the decoder, the per-trial BCI performance plots.\n * understand how to use these plots to identify problems in the data (such as artifacts, excessive line-noise) or the BCI operation\n * understand how to change analysis parameters and the used classifier to develop improved decoders", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom mindaffectBCI.decoder.analyse_datasets import debug_test_dataset\nfrom mindaffectBCI.decoder.offline.load_mindaffectBCI import load_mindaffectBCI\nimport matplotlib.pyplot as plt\n%matplotlib inline\n%load_ext autoreload\n%autoreload 2\nplt.rcParams['figure.figsize'] = [12, 8] # bigger default figures", "_____no_output_____" ] ], [ [ "## Specify the save file you wish to analyse.\n\nYou can either specify:\n * the full file name to load, e.g. '~/Downloads/mindaffectBCI_200901_1154.txt'\n * a wildcard filename, e.g. '~/Downloads/mindaffectBCI*.txt', in which case the **most recent** matching file will be loaded.\n * `None`, or '-', in which case the most recent file from the default `logs` directory will be loaded.", "_____no_output_____" ] ], [ [ "# select the file to load\n#savefile = '~/../../logs/mindaffectBCI_200901_1154_ssvep.txt'\nsavefile = None # use the most recent file in the logs directory\nsavefile = 'mindaffectBCI_exampledata.txt'", "_____no_output_____" ] ], [ [ "## Load the *RAW*data\n\nLoad, with minimal pre-processing to see what the raw signals look like. Note: we turn off the default filtering and downsampling with `stopband=None, fs_out=None` to get a true raw dataset.\n\nIt will then plot the grand-aver-spectrum of this raw data. This plot shows for each EEG channel the signal power across different signal frequencies. This is useful to check for artifacts (seen as peaks in the spectrum at specific frequencies, such as 50hz), or bad-channels (seen as channels with excessively high or low power in general.)\n\nDuring loading the system will print some summary information about the loaded data and preprocessing applied. Including:\n * The filter and downsampling applied\n * The number of trails in the data and their durations\n * The trail data-slice used, measured relative to the trial start event\n * The EEG and STIMULUS meta-information, in terms of the array shape, e.g. (13,575,4) and the axis labels, e.g. (trials, time, channels) respectively. \n ", "_____no_output_____" ] ], [ [ "X, Y, coords = load_mindaffectBCI(savefile, stopband=None, fs_out=None)\n# output is: X=eeg, Y=stimulus, coords=meta-info about dimensions of X and Y\nprint(\"EEG: X({}){} @{}Hz\".format([c['name'] for c in coords],X.shape,coords[1]['fs'])) \nprint(\"STIMULUS: Y({}){}\".format([c['name'] for c in coords[:-1]]+['output'],Y.shape))\n# Plot the grand average spectrum to get idea of the signal quality\nfrom mindaffectBCI.decoder.preprocess import plot_grand_average_spectrum\nplot_grand_average_spectrum(X, fs=coords[1]['fs'], ch_names=coords[-1]['coords'], log=True)", "_____no_output_____" ] ], [ [ "## Reload the data, with standard preprocessing.\n\nThis time, we want to analysis the loaded data for the BCI signal. Whilst we can do this after loading, to keep the analysis as similar as possible to the on-line system where the decoder only sees pre-processed data, we will reload wand apply the pre-processing directly. This also has the benefit of making the datafile smaller. \n\nTo reproduce the pre-processing done in the on-line BCI we will set the pre-processing to:\n * temporally filter the data to the BCI relevant range. Temporal filtering is a standard technique to remove sigal frequencies which we know only contain noise. For the noise-tag brain response we know it is mainly in the frequency range from 3 to about 25 Hz. Thus, we specifcy a bandpass filter to only retain these frequencies with: \n `stopband=(3,25,'bandpass')`\n * The orginal EEG is sampled 250 times per second. However, the BCI relevant signal changes at most at 25 times per second, thus the EEG is sampled much more rapidly than needed -- so processing it takes undeeded computational resources. Thus, we downsmaple the data to save some computation. To avoid signal-artifacts, as a general 'rule of thumb' you should downsample to about 3 times your maximum signal frequency. In this case we use an output sample rate of 4 times, or 100 h with:\n `fs_out=100`", "_____no_output_____" ] ], [ [ "X, Y, coords = load_mindaffectBCI(savefile, stopband=(3,25,'bandpass'), fs_out=100)\n# output is: X=eeg, Y=stimulus, coords=meta-info about dimensions of X and Y\nprint(\"EEG: X({}){} @{}Hz\".format([c['name'] for c in coords],X.shape,coords[1]['fs'])) \nprint(\"STIMULUS: Y({}){}\".format([c['name'] for c in coords[:-1]]+['output'],Y.shape))", "_____no_output_____" ] ], [ [ "## Analyse the data\n\nThe following code runs the standard initial analysis and data-set visualization, in one go with some standard analysis parameters:\n\n * tau_ms : the length of the modelled stimulus response (in milliseconds)\n \n * evtlabs : the type of brain feaatures to transform the stimulus information into prior to fitting the model in this case \n * 're' -> rising edge \n * 'fe' -> falling edge\n \n see `stim2event.py` for more information on possible transformations\n * rank : the rank of the CCA model to fit\n \n * model : the type of model to fit. 'cca' corrospends to the Cannonical Correlation Analysis model.", "_____no_output_____" ], [ "This generates many visualizations. The most important are:\n\n 1. **Summary Statistics**: Summary statistics for the data with, \n This has vertically 3 sub-parts.\n row 1: Cxx : this is the spatial cross-correlation of the EEG channels\n row 2: Cxy : this is the cross-correlation of the stimulus with the EEG. Which for discrete stimuli as used in this BCI is essentially another view of the ERP.\n row 3: Cyy : the auto-cross covariance of the stimulus features with the other (time-delayed) stimulus features\n\n <img src='images/SummaryStatistics.png' width=200>\n\n 2. **Grand Average Spectrum** : This shows for each data channel the power over different signal frequencies. This is useful to identify artifacts in the data, which tend to show up as peaks in the spectrum at different frequencies, e.g. high power below 3Hz indicate movement artifacts, high power at 50/60hz indicates excessive line-noise interference. \n\n <img src='images/GrandAverageSpectrum.png' width=200>\n\n\n 3. **ERP** : This plot shows for each EEG channel the averaged measured response over time after the triggering stimulus. This is the conventional plot that you find in many neuroscientific publications.\n\n <img src='images/ERP.png' width=200>\n\n 4. **Decoding Curve** + **Yerr** : The decoder accumulates information during a trial to make it's predictions better. These pair of plots show this information as a 'decoding curve' which shows two important things:\n a) **Yerr** : which is the **true** error-rate of the systems predictions, with increasing trial time. \n b) **Perr** : which is the systems own **estimation** of it's prediction error. This estimation is used by the system to identify when it is confident enough to make a selection and stop a trial early. Thus, this should ideally be as accurate as possible, so it's near 1 when Yerr is 1 and near 0 when Yerr is 0. In the DecodingCurve plot Perr is shown by colored dots, with red being Yerr=1 and green being Yerr=0. Thus, if the error estimates are good you should see red dots at the top left (wrong with short trials) and green dots at the bottom right (right with more data).\n\n <img src='images/DecodingCurve.png' width=200> <img src='images/Ycorrect.png' width=200>\n\n 5. **Trial Summary** : This plot gives a direct trial-by-trial view of the input data and the BCI performance. With each trial plotted individually running from left to right top to bottom.\n <img src='images/TrialSummary.png' width=400>\n \n Zooming in on a single trial, we see that vertically it has 5 sub-parts:\n \n a) **X** : this is the pre-processed input EEG data, with time horizontially, and channels with different colored lines vertically. \n \n b) **Y** : this is the raw stimulus information, with time horizontially and outputs vertically.\n \n c) **Fe** : this is the systems predicted score for each type of stimulus-event, generated by applying the model to the raw EEG (e.g. 're','fe')\n \n d) **Fy** : this is the systems _accumulated_ predicted score for each output, generated by combining the predicted stimulus scores with the stimulus information. Here the **true** output is in black with the other outputs in grey. Thus, if the system is working correctly, the true output has the highest score and will be the highest line.\n \n e) **Py** : this is the systems **estimated** target probability for each output, generated by softmaxing the Fy scores. Again, the true target is in black with the others in grey. So if the system is working well the black line is near 0 when it's incorrect, and then jumps to 1 when it is correct.\n \n <img src='images/TrialSummary_single.png' width=200>\n\n 6. *Model*: plot of the fitted model, in two sub-plots with: a) the fitted models spatial-filter -- which shows the importance of each EEG channel, b) the models impulse response -- which shows how the brain responds over time to the different types of stimulus event\n\n <img src='images/ForwardModel.png' width=200>", "_____no_output_____" ] ], [ [ "clsfr=debug_test_dataset(X, Y, coords,\n model='cca', evtlabs=('re','fe'), rank=1, tau_ms=450)", "_____no_output_____" ] ], [ [ "## Alternative Analyse\nThe basic analysis system has many parameters you can tweak to test different analysis methods. The following code runs the standard initial analysis and data-set visualization, in one go with some standard analysis parameters:\n\ntau_ms : the length of the modelled stimulus response (in milliseconds)\nevtlabs : the type of brain feaatures to transform the stimulus information into prior to fitting the model in this case\n're' -> rising edge\n'fe' -> falling edge see stim2event.py for more information on possible transformations\nrank : the rank of the CCA model to fit\nmodel : the type of model to fit. 'cca' corrospends to the Cannonical Correlation Analysis model.\n other options include: \n * 'ridge' = ridge-regression, \n * 'fwd' = Forward Modelling, \n * 'bwd' = Backward Modelling, \n * 'lr' = logistic-regression, \n * 'svc' = support vector machine\n \nSee the help for `mindaffectBCI.decoder.model_fitting.BaseSequence2Sequence` or `mindaffectBCI.decoder.analyse_datasets.analyse_dataset` for more details on the other options.\n", "_____no_output_____" ], [ "Here we use a Logistic Regression classifier to classify single stimulus-responses into rising-edge (re) or falling-edge (fe) responses.\n\nNote: we also include some additional pre-processing in this case, which consists of:\n * **whiten** : this will do a spatial whitening, so that the data input to the classifier is **spatially** decorrelated. This happens automatically with the CCA classifier, and has been found useful to suppress artifacts in the data.\n * **whiten_spectrum** : this will approximately decorrelate different frequencies in the data. In effect this flattens the peaks and troughs in the data frequency spectrum. This pre-processing also been found useful to suppress artifacts in the data.\n\nFurther, as this is now a classification problem, we set `ignore_unlabelled=True`. This means that samples which are not either rising edges or falling edges will not be given to the classifier -- so in the end we train a simple binary classifier.", "_____no_output_____" ] ], [ [ "# test different inner classifier. Here we use a Logistic Regression classifier to classify single stimulus-responses into rising-edge (re) or falling-edge (fe) responses. \ndebug_test_dataset(X, Y, coords,\n preprocess_args=dict(badChannelThresh=3, badTrialThresh=None, whiten=.01, whiten_spectrum=.1),\n model='lr', evtlabs=('re', 'fe'), tau_ms=450, ignore_unlabelled=True)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
c5203ab529c23845a64d47acdc2c3ae87e84081b
1,293
ipynb
Jupyter Notebook
notebooks/Sample Dataset.ipynb
roycek7/Analyzing-the-Factors-Affecting-Delivery-Time
1b0a0164834fd112882151780edde7a118b751c8
[ "MIT" ]
null
null
null
notebooks/Sample Dataset.ipynb
roycek7/Analyzing-the-Factors-Affecting-Delivery-Time
1b0a0164834fd112882151780edde7a118b751c8
[ "MIT" ]
null
null
null
notebooks/Sample Dataset.ipynb
roycek7/Analyzing-the-Factors-Affecting-Delivery-Time
1b0a0164834fd112882151780edde7a118b751c8
[ "MIT" ]
null
null
null
21.55
84
0.532869
[ [ [ "import pandas as pd\nimport random", "_____no_output_____" ], [ "p = 0.1 # 10% of the lines\n# keep the header, then take only 10% of lines\n# if random from [0,1] interval is greater than 0.1 the row will be skipped\ndf = pd.read_csv(\n '../data/interim/consolidated_ecommerce_olist_1.csv',\n header=0, \n skiprows=lambda i: i>0 and random.random() > p\n)\ndf.to_csv('../data/interim/consolidated_ecommerce_olist_1_sample.csv')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
c5203f850e39844a949cc35eb6d809c43ce32915
8,933
ipynb
Jupyter Notebook
src/ionotomo/notebooks/FermatClass.ipynb
Joshuaalbert/IonoTomo
9f50fbac698d43a824dd098d76dce93504c7b879
[ "Apache-2.0" ]
7
2017-06-22T08:47:07.000Z
2021-07-01T12:33:02.000Z
src/ionotomo/notebooks/FermatClass.ipynb
Joshuaalbert/IonoTomo
9f50fbac698d43a824dd098d76dce93504c7b879
[ "Apache-2.0" ]
1
2019-04-03T15:21:19.000Z
2019-04-03T15:48:31.000Z
src/ionotomo/notebooks/FermatClass.ipynb
Joshuaalbert/IonoTomo
9f50fbac698d43a824dd098d76dce93504c7b879
[ "Apache-2.0" ]
2
2020-03-01T16:20:00.000Z
2020-07-07T15:09:02.000Z
40.238739
121
0.397627
[ [ [ "import numpy as np\nfrom scipy.integrate import odeint\nfrom TricubicInterpolation import TriCubic\n\nclass Fermat(object):\n def __init__(self,neTCI=None,frequency = 120e6,type='s',straightLineApprox=True):\n '''Fermat principle. type = \"s\" means arch length is the indepedent variable\n type=\"z\" means z coordinate is the independent variable.'''\n self.type = type\n self.frequency = frequency#Hz\n self.straightLineApprox = straightLineApprox\n if neTCI is not None:\n self.ne2n(neTCI) \n return\n \n def loadFunc(self,file):\n '''Load the model given in `file`'''\n data = np.load(file)\n if 'ne' in data.keys():\n ne = data['ne']\n xvec = data['xvec']\n yvec = data['yvec']\n zvec = data['zvec']\n self.ne2n(TriCubic(xvec,yvec,zvec,ne,useCache=True))\n return\n if 'n' in data.keys():\n ne = data['n']\n xvec = data['xvec']\n yvec = data['yvec']\n zvec = data['zvec']\n self.n2ne(TriCubic(xvec,yvec,zvec,n,useCache=True))\n return\n \n def saveFunc(self,file):\n np.savez(file,xvec=self.nTCI.xvec,yvec=self.nTCI.yvec,zvec=self.nTCI.zvec,n=self.nTCI.m,ne=self.neTCI.m)\n \n def ne2n(self,neTCI):\n '''Analytically turn electron density to refractive index. Assume ne in m^-3'''\n self.neTCI = neTCI\n #copy object\n self.nTCI = neTCI.copy(default=1.)\n #inplace change to refractive index\n self.nTCI.m *= -8.980**2/self.frequency**2\n self.nTCI.m += 1.\n self.nTCI.m = np.sqrt(self.nTCI.m)\n #wp = 5.63e4*np.sqrt(ne/1e6)/2pi#Hz^2 m^3 lightman p 226\n return self.nTCI\n \n def n2ne(self,nTCI):\n \"\"\"Get electron density in m^-3 from refractive index\"\"\"\n self.nTCI = nTCI\n #convert to \n self.neTCI = nTCI.copy()\n self.neTCI.m *= -self.neTCI.m\n self.neTCI.m += 1.\n self.neTCI.m *= self.frequency**2/8.980**2\n #wp = 5.63e4*np.sqrt(ne/1e6)/2pi#Hz^2 m^3 lightman p 226\n return self.neTCI\n \n def eulerODE(self,y,t,*args):\n '''return pxdot,pydot,pzdot,xdot,ydot,zdot,sdot'''\n #print(y)\n px,py,pz,x,y,z,s = y\n if self.straightLineApprox:\n n,nx,ny,nz = 1.,0,0,0\n else:\n n,nx,ny,nz,nxy,nxz,nyz,nxyz = self.nTCI.interp(x,y,z,doDiff=True)\n #from ne\n #ne,nex,ney,nez,nexy,nexz,neyz,nexyz = self.neTCI.interp(x,y,z,doDiff=True)\n #A = - 8.98**2/self.frequency**2\n #n = math.sqrt(1. + A*ne)\n #ndot = A/(2.*n)\n #nx = ndot * nex\n #ny = ndot * ney\n #nz = ndot * nez\n if self.type == 'z':\n sdot = n / pz\n pxdot = nx*n/pz\n pydot = ny*n/pz\n pzdot = nz*n/pz\n\n xdot = px / pz\n ydot = py / pz\n zdot = 1.\n \n if self.type == 's':\n sdot = 1.\n pxdot = nx\n pydot = ny\n pzdot = nz\n\n xdot = px / n\n ydot = py / n\n zdot = pz / n\n \n return [pxdot,pydot,pzdot,xdot,ydot,zdot,sdot]\n \n def jacODE(self,y,t,*args):\n '''return d ydot / d y, with derivatives down columns for speed'''\n px,py,pz,x,y,z,s = y\n if self.straightLineApprox:\n n,nx,ny,nz,nxy,nxz,nyz = 1.,0,0,0,0,0,0\n else:\n n,nx,ny,nz,nxy,nxz,nyz,nxyz = self.nTCI.interp(x,y,z,doDiff=True)\n #TCI only gaurentees C1 and C2 information is lost, second order anyways\n nxx,nyy,nzz = 0.,0.,0.\n #from electron density\n #ne,nex,ney,nez,nexy,nexz,neyz,nexyz = self.neTCI.interp(x,y,z,doDiff=True)\n #A = - 8.98**2/self.frequency**2\n #n = math.sqrt(1. + A*ne)\n #ndot = A/(2.*n)\n #nx = ndot * nex\n #ny = ndot * ney\n #nz = ndot * nez\n #ndotdot = -(A * ndot)/(2. * n**2)\n #nxy = ndotdot * nex*ney + ndot * nexy\n #nxz = ndotdot * nex * nez + ndot * nexz\n #nyz = ndotdot * ney * nez + ndot * neyz \n if self.type == 'z':\n x0 = n\n x1 = nx\n x2 = pz**(-2)\n x3 = x0*x2\n x4 = 1./pz\n x5 = ny\n x6 = x4*(x0*nxy + x1*x5)\n x7 = nz\n x8 = x4*(x0*nxz + x1*x7)\n x9 = x4*(x0*nyz + x5*x7)\n jac = np.array([[ 0, 0, -x1*x3, x4*(x0*nxx + x1**2),x6, x8, 0.],\n [ 0, 0, -x3*x5,x6, x4*(x0*nyy + x5**2), x9, 0.],\n [ 0, 0, -x3*x7,x8, x9, x4*(x0*nzz + x7**2), 0.],\n [x4, 0, -px*x2, 0, 0, 0, 0.],\n [ 0, x4, -py*x2, 0, 0, 0, 0.],\n [ 0, 0, 0, 0, 0, 0, 0.],\n [ 0, 0,-x3,x1*x4, x4*x5, x4*x7, 0.]])\n \n if self.type == 's':\n x0 = n\n x1 = nxy\n x2 = nxz\n x3 = nyz\n x4 = 1./x0\n x5 = nx\n x6 = x0**(-2)\n x7 = px*x6\n x8 = ny\n x9 = nz\n x10 = py*x6\n x11 = pz*x6\n jac = np.array([[ 0, 0, 0, nxx, x1, x2, 0.],\n [ 0, 0, 0, x1, nyy, x3, 0.],\n [ 0, 0, 0, x2, x3, nzz, 0.],\n [x4, 0, 0, -x5*x7, -x7*x8, -x7*x9, 0.],\n [ 0, x4, 0, -x10*x5, -x10*x8, -x10*x9, 0.],\n [ 0, 0, x4, -x11*x5, -x11*x8, -x11*x9, 0.],\n [ 0, 0, 0, 0, 0, 0, 0.]])\n return jac\n \n def integrateRay(self,origin,direction,tmax,N=100):\n '''Integrate ray defined by the ``origin`` and ``direction`` along the independent variable (s or z)\n until tmax. \n ``N`` - the number of partitions along the ray to save ray trajectory.'''\n x0,y0,z0 = origin\n xdot0,ydot0,zdot0 = direction\n sdot = np.sqrt(xdot0**2 + ydot0**2 + zdot0**2)\n #momentum\n px0 = xdot0/sdot\n py0 = ydot0/sdot\n pz0 = zdot0/sdot\n #px,py,pz,x,y,z,s\n init = [px0,py0,pz0,x0,y0,z0,0]\n if self.type == 'z':\n tarray = np.linspace(z0,tmax,N)\n if self.type == 's':\n tarray = np.linspace(0,tmax,N)\n Y,info = odeint(self.eulerODE, init, tarray,Dfun = self.jacODE, col_deriv = True, full_output=1)\n #print(info['hu'].shape,np.sum(info['hu']),info['hu'])\n #print(Y)\n x = Y[:,3]\n y = Y[:,4]\n z = Y[:,5]\n s = Y[:,6]\n return x,y,z,s \n\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
c520424a3a4343c9019a0b9f3ddfb95dfa612ba8
663,310
ipynb
Jupyter Notebook
Módulo Desafio/desafio_final.ipynb
edson-venancio/bootcamp-machine-learning
e98718062560831cd2a99d226390d1989d19b9d7
[ "MIT" ]
null
null
null
Módulo Desafio/desafio_final.ipynb
edson-venancio/bootcamp-machine-learning
e98718062560831cd2a99d226390d1989d19b9d7
[ "MIT" ]
null
null
null
Módulo Desafio/desafio_final.ipynb
edson-venancio/bootcamp-machine-learning
e98718062560831cd2a99d226390d1989d19b9d7
[ "MIT" ]
null
null
null
212.735728
230,896
0.905287
[ [ [ "## Desafio Final", "_____no_output_____" ] ], [ [ "# imports de avisos\nimport sys\nimport warnings\nimport matplotlib.cbook\nwarnings.simplefilter(\"ignore\")\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\nwarnings.filterwarnings(\"ignore\", category=matplotlib.cbook.mplDeprecation)\n\n# imports para manipulação de dados\nimport numpy as np\nimport pandas as pd\nimport scipy\nimport statsmodels.api as sm\nimport math\nimport itertools\n\n# imports para visualização de dados\nimport matplotlib.pyplot as plt\nimport matplotlib as m\nimport matplotlib.dates as mdates\nfrom matplotlib.ticker import MaxNLocator\nimport seaborn as sns\nimport plotly as py\nimport plotly.express as px\nimport plotly.graph_objs as go \nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\n\npd.options.display.max_columns = 2000\npd.options.display.max_rows = 2000", "_____no_output_____" ], [ "# função para criar um gráfico de distribuição para cada feature do dataset\ndef plot_distribution(dataset, cols=5, width=20, height=25, hspace=0.4, wspace=0.5):\n fig = plt.figure(figsize=(width, height))\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=wspace, hspace=hspace)\n rows = math.ceil(float(dataset.shape[1]) / cols)\n for i, column in enumerate(dataset.columns):\n ax = fig.add_subplot(rows, cols, i + 1)\n ax.set_title(column)\n if dataset.dtypes[column] == np.object:\n g = sns.countplot(y=column, \n data=dataset,\n order=dataset[column].value_counts().index[:10])\n substrings = [s.get_text()[:20] for s in g.get_yticklabels()]\n g.set(yticklabels=substrings)\n plt.xticks(rotation=25)\n else:\n g = sns.distplot(dataset[column])\n plt.xticks(rotation=25) \n \n# função para calcular o coeficiente de correlação entre duas variáveis\ndef rsquared(x, y):\n slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x, y)\n return r_value**2 ", "_____no_output_____" ], [ "# carregando o dataset\ndf_cars = pd.read_csv('cars.csv')", "_____no_output_____" ], [ "# apresentando as 5 primeiras linhas do dataset\ndf_cars.head()", "_____no_output_____" ], [ "# mostrando as dimensões do dataset\ndf_cars.shape", "_____no_output_____" ], [ "# verificando os tipos de variáveis e se existem ou não valores nulos\ndf_cars.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 261 entries, 0 to 260\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 mpg 261 non-null float64\n 1 cylinders 261 non-null int64 \n 2 cubicinches 261 non-null object \n 3 hp 261 non-null int64 \n 4 weightlbs 261 non-null object \n 5 time-to-60 261 non-null int64 \n 6 year 261 non-null int64 \n 7 brand 261 non-null object \ndtypes: float64(1), int64(4), object(3)\nmemory usage: 16.4+ KB\n" ], [ "df_cars.dtypes.value_counts()", "_____no_output_____" ] ], [ [ "#### Após a utilização da biblioteca pandas para a leitura dos dados sobre os valores lidos, é CORRETO afirmar que:\n\n- Não foram encontrados valores nulos após a leitura dos dados. ", "_____no_output_____" ] ], [ [ "display(df_cars.isna().sum())\ndisplay(df_cars.isnull().sum())", "_____no_output_____" ], [ "# gráfico de distribuição para cada feature do dataset\ncolumns = ['mpg', 'cylinders', 'cubicinches', 'hp', 'weightlbs', 'time-to-60', 'year', 'brand']\nplot_distribution(df_cars[columns], cols=3, width=30, height=20, hspace=0.45, wspace=0.5)", "_____no_output_____" ] ], [ [ "#### Realize a transformação das colunas *“cubicinches”* e *“weightlbs”* do tipo “string” para o tipo numérico utilizando o *pd.to_numeric()* e o parâmetro *errors='coerce'*. Após essa transformação, é CORRETO afirmar:\n\n- Essa transformação adiciona valores nulos ao nosso dataset.", "_____no_output_____" ] ], [ [ "df_cars['cubicinches'] = pd.to_numeric(df_cars['cubicinches'], errors='coerce')\ndf_cars['weightlbs'] = pd.to_numeric(df_cars['weightlbs'], errors='coerce')\ndf_cars.isnull().sum()", "_____no_output_____" ] ], [ [ "#### Indique quais eram os índices dos valores presentes no dataset que *“forçaram”* o pandas a compreender a variável *“cubicinches”* como string.", "_____no_output_____" ] ], [ [ "df_cars[df_cars['cubicinches'].isna()]", "_____no_output_____" ], [ "index_null = df_cars['cubicinches'].isna()\nindex_null[index_null.isin([True])].index", "_____no_output_____" ] ], [ [ "#### Após a transformação das variáveis “string” para os valores numéricos, quantos valores nulos (células no dataframe) passaram a existir no dataset?", "_____no_output_____" ] ], [ [ "df_cars.isna().sum().sum()", "_____no_output_____" ] ], [ [ "#### Substitua os valores nulos introduzidos no dataset após a transformação pelo valor médio das colunas. Qual é o novo valor médio da coluna *“weightlbs”*?", "_____no_output_____" ] ], [ [ "df_cars['cubicinches'].fillna(df_cars['cubicinches'].mean(), inplace=True)\ndf_cars['weightlbs'].fillna(df_cars['weightlbs'].mean(), inplace=True)\ndf_cars.describe()", "_____no_output_____" ], [ "df_cars['weightlbs'].mean()", "_____no_output_____" ], [ "# verificando os dados da feature 'time-to-60' através de um boxplot\nsns.set_style(\"whitegrid\") \nsns.boxplot(y='time-to-60', data=df_cars)", "_____no_output_____" ], [ "sns.boxplot(x=df_cars['time-to-60'])", "_____no_output_____" ] ], [ [ "#### Após substituir os valores nulos pela média das colunas, selecione as colunas *“mpg”, “cylinders”, “cubicinches”, “hp”, “weightlbs”, “time-to-60”, “year”*. \n#### Qual é o valor da mediana para a característica *“mpg”*?", "_____no_output_____" ] ], [ [ "df_cars2 = df_cars[['mpg', 'cylinders', 'cubicinches', 'hp', 'weightlbs', 'time-to-60', 'year']]\ndf_cars2.head()", "_____no_output_____" ], [ "df_cars2['mpg'].median()", "_____no_output_____" ] ], [ [ "#### Qual é a afirmação CORRETA sobre o valor de 14,00 para a variável *“time-to-60”*?\n\n- 75% dos dados são maiores que o valor de 14,00.", "_____no_output_____" ] ], [ [ "df_cars['time-to-60'].describe()", "_____no_output_____" ] ], [ [ "#### Sobre o coeficiente de correlação de Pearson entre as variáveis *“cylinders”* e *“mpg”*, é correto afirmar, EXCETO:\n\n- Mesmo não sendo igual a 1, é possível dizer que à medida que a variável *“cylinders”* aumenta, a variável *“mpg”* reduz em uma direção oposta.\n- Caso fosse calculado o coeficiente de determinação entre essas duas variáveis, o valor seria, aproximadamente, 0,6.\n- Quando um coeficiente de correlação de Pearson é igual a 1, o coeficiente de determinação também será igual a 1. \n- **Mesmo não sendo igual a 1, é possível dizer que à medida em que a variável *“cylinders”* aumenta, a variável *“mpg”* também aumenta na mesma direção.**", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10, 5))\nmatriz_de_correlação = df_cars[['cylinders','mpg']].corr()\nsns.heatmap(matriz_de_correlação, annot=True, vmin=-1, vmax=1, center=0)\nplt.show()", "_____no_output_____" ], [ "# visualiza um gráfico entre as variaveis \"cylinders\" e \"mpg\" e verifica se existe alguma correlação linear\nplt.figure(figsize=(18, 8))\nsns.regplot(x='cylinders', y='mpg', data=df_cars, color='b', x_jitter=0.2)\nplt.xlabel('cylinders')\nplt.ylabel('mpg')\nplt.title('Relação entre \"cylinders\" e \"mpg\"', fontsize=20)\nplt.show()", "_____no_output_____" ], [ "# calculando o coeficiente de correlação entre \"cylinders\" e \"mpg\" através do r2\nrsquared(df_cars['cylinders'], df_cars['mpg'])", "_____no_output_____" ] ], [ [ "#### Sobre o boxplot da variável *“hp”*, é correto afirmar, EXCETO:\n\n- Através do boxplot, é possível perceber que a mediana encontra-se entre os valores de 80 e 100.\n- **Existe uma maior dispersão no segundo quartil quando comparamos com o terceiro.**\n- Não foi identificada a presença de possíveis outliers nos dados. \n- Cada um dos quartis possui a mesma quantidade de valores para a variável *“hp”*.", "_____no_output_____" ] ], [ [ "sns.boxplot(x=df_cars['hp'])", "_____no_output_____" ], [ "# verificando os dados da feature 'hp' através de um boxplot\nsns.set_style(\"whitegrid\") \nsns.boxplot(y='hp', data=df_cars)", "_____no_output_____" ] ], [ [ "### Pré-processamento", "_____no_output_____" ] ], [ [ "# normalização dos dados\nfrom sklearn.preprocessing import StandardScaler\nnormaliza = StandardScaler() \n\n# definindo somente colunas numéricas a serem normalizadas\nnum_cols = df_cars.columns[df_cars.dtypes.apply(lambda c: np.issubdtype(c, np.number))]\n\n# criando uma cópia do dataset original\ndf_cars4 = df_cars[num_cols]\n\n# normalizando os dados\ndf_cars4[num_cols] = normaliza.fit_transform(df_cars4[num_cols])\n\n# exibindo os primeiros registros\ndf_cars4.head()", "_____no_output_____" ] ], [ [ "#### Após normalizado, utilizando a função *StandardScaler()*, qual é o maior valor para a variável *“hp”*?", "_____no_output_____" ] ], [ [ "# verificando o maior valor para a feature \"hp\"\ndf_cars4['hp'].max()", "_____no_output_____" ] ], [ [ "#### Aplicando o PCA, conforme a definição acima, qual é o valor da variância explicada pelo primeiro componente principal?", "_____no_output_____" ] ], [ [ "# criando o objeto PCA com 7 componentes\nfrom sklearn.decomposition import PCA\npca = PCA(n_components=7)\n\n# realizando o fit com os dados normalizados\nprincipalComponents = pca.fit_transform(df_cars4)\n\n# salvando em um dataframe\nPCA_components = pd.DataFrame(principalComponents)\nPCA_components.head()", "_____no_output_____" ], [ "# exibindo o valor da variância explicada por cada componente\nprint(pca.explained_variance_ratio_)", "[0.72371349 0.12675138 0.09126131 0.02773591 0.01766794 0.00798425\n 0.00488572]\n" ], [ "# plot da variação explicada pelos componentes\nfeatures = range(pca.n_components_)\n\nfig, aux = plt.subplots(1, 1, figsize=(18, 8))\nplt.bar(features, pca.explained_variance_ratio_, color='navy')\nplt.xlabel('PCA features')\nplt.ylabel('variance %')\nplt.xticks(features)", "_____no_output_____" ] ], [ [ "### Algoritmo K-Means", "_____no_output_____" ], [ "#### Utilize os três primeiros componentes principais para construir o K-means com um número de 3 clusters. Sobre os clusters, é INCORRETO afirmar que:\n\n- Cada um dos clusters possui características próprias.\n- **Todos os clusters possuem a mesma quantidade de elementos.**\n- Existem 3 centroides após a aplicação da clusterização.\n- Os centroides, utilizando apenas as 3 componentes principais, possuem 3 dimensões.", "_____no_output_____" ] ], [ [ "from sklearn.cluster import KMeans\n\nkmeans = KMeans(n_clusters=3, random_state=42)\n\n# treinando o modelo utilizando apenas os três primeiros componentes principais\nkmeans.fit(PCA_components.iloc[:,:3])\n\n# realizando as previsões dos cluster\nx_clustered = kmeans.predict(PCA_components.iloc[:,:3])\n\n# definindo um mapa de cor para cada cluster\ncolor_map = {0:'r', 1: 'g', 2: 'b'}\nlabel_color = [color_map[l] for l in x_clustered]\n\n# definindo os centróides\ncenters = np.array(kmeans.cluster_centers_)", "_____no_output_____" ], [ "# exibindo um gráfico scatter\nfig, aux = plt.subplots(1, 1, figsize=(18, 8))\nplt.title('Kmeans com centróides', fontsize=20)\nplt.scatter(principalComponents[:,0], principalComponents[:,1], c=label_color, alpha=0.5) \nplt.scatter(centers[:,0], centers[:,1], marker=\"x\", color='navy', s=500)\nplt.show()", "_____no_output_____" ], [ "# criando um dataframe do nosso PCA\ndf = pd.DataFrame(PCA_components)\n\n# selecionando somente os 3 primeiros componentes\ndf = df[[0,1,2]]\ndf['cluster'] = x_clustered\n\n# visualizando nossos clusters com os dados do PCA\nsns.pairplot(df, hue='cluster', palette='Dark2', diag_kind='kde', height=3)", "_____no_output_____" ], [ "# verificando a quantidade em cada um dos clusters\nprint(df['cluster'].value_counts())\n\n# exibindo em um gráfico\ndf['cluster'].value_counts().plot(kind ='bar')\nplt.ylabel('Count')", "2 109\n0 77\n1 75\nName: cluster, dtype: int64\n" ] ], [ [ "### Árvore de Decisão", "_____no_output_____" ], [ "#### Após todo o processamento realizado nos itens anteriores, crie uma coluna que contenha a variável de eficiência do veículo. Veículos que percorrem mais de 25 milhas com um galão (*“mpg” > 25*) devem ser considerados eficientes. Utilize as colunas *“cylinders”, “cubicinches”, “hp”, “weightlbs”, “time-to-60”* como entradas e como saída a coluna de eficiência criada.\n#### Utilizando a árvore de decisão como mostrado, qual é a acurácia do modelo?", "_____no_output_____" ] ], [ [ "# realizando o merge com o dataset original e do pca, gerando um novo dataset\ndf_final = df_cars.merge(df, left_index=True, right_index=True)\n\n# cria a nova feature \"mpg\"\ndf_final['efficiency'] = np.where(df_final['mpg'] > 25, 1, 0)\n\n# Exibir o dataset final\ndf_final.head()", "_____no_output_____" ], [ "y = df_final['efficiency']\nx = df_final[['cylinders', 'cubicinches', 'hp', 'weightlbs', 'time-to-60']]\n\nnormaliza = StandardScaler() \nx = normaliza.fit_transform(x)\n\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.30, random_state = 42) \n\nprint(x_train.shape, y_train.shape, x_test.shape, y_test.shape)", "(182, 5) (182,) (79, 5) (79,)\n" ], [ "# aplicando um modelo de classificação via árvore de decisão\nfrom sklearn.tree import DecisionTreeClassifier\nclf_arvore = DecisionTreeClassifier(random_state = 42) \nclf_arvore.fit(x_train, y_train)", "_____no_output_____" ], [ "# realiza a previsão com os dados\ny_pred_arvore = clf_arvore.predict(x_test)", "_____no_output_____" ], [ "from sklearn.metrics import accuracy_score\nacuracia = accuracy_score(y_test, y_pred_arvore)\nprint('Acurácia da Árvore de Decisão: ', acuracia)", "Acurácia da Árvore de Decisão: 0.8734177215189873\n" ], [ "# realiza o plot da matriz de confusão com o seaborn\nfrom sklearn.metrics import classification_report, confusion_matrix\nmatriz_confusao = confusion_matrix(y_test, y_pred_arvore)\nsns.heatmap(matriz_confusao, annot=True, vmin=0, vmax=40, center=20)\nplt.show()", "_____no_output_____" ], [ "# realiza o plot da matriz de confusão\nfrom mlxtend.plotting import plot_confusion_matrix\nfig, ax = plot_confusion_matrix(conf_mat = matriz_confusao)\nplt.show()", "_____no_output_____" ], [ "print(classification_report(y_test, y_pred_arvore))", " precision recall f1-score support\n\n 0 0.94 0.80 0.87 41\n 1 0.82 0.95 0.88 38\n\n accuracy 0.87 79\n macro avg 0.88 0.88 0.87 79\nweighted avg 0.88 0.87 0.87 79\n\n" ] ], [ [ "#### Sobre a matriz de confusão obtida após a aplicação da árvore de decisão, como mostrado anteriormente, é INCORRETO afirmar:\n\n- A matriz de confusão se constitui em uma estratégia ainda mais importante quando um dataset não está balanceado.\n- A diagonal principal da matriz mostra as instâncias em que as previsões foram corretas.\n- **Existem duas vezes mais veículos considerados não eficientes que instâncias de veículos eficientes.**\n- Os falso-positivos correspondem a instâncias em que o algoritmo considerou a previsão como verdadeira e, na realidade, ela era falsa.", "_____no_output_____" ], [ "### Regressão Logística", "_____no_output_____" ], [ "#### Utilizando a mesma divisão de dados entre treinamento e teste empregada para a análise anterior, aplique o modelo de regressão logística como mostrado na descrição do trabalho.\n#### Comparando os resultados obtidos com o modelo de árvore de decisão, é INCORRETO afirmar que:\n\n- Como os dois modelos obtiveram um resultado superior a 80% de acurácia, a escolha sobre qual utilizar deve e pode ser feita a partir de outros critérios, como a complexidade do modelo.\n- **A regressão logística não deveria ser aplicada ao problema, pois ela trabalha apenas com dados categóricos.**\n- A acurácia de ambos os modelos foi superior a 80%.\n- A árvore de decisão e a regressão logística podem ser utilizadas para previsão em regressões.", "_____no_output_____" ] ], [ [ "# aplicando um modelo de classificação via regressão logística\nfrom sklearn.linear_model import LogisticRegression\nclf_log = LogisticRegression(random_state = 42)\nclf_log.fit(x_train, y_train)", "_____no_output_____" ], [ "# realiza a previsão com os dados\ny_pred_log = clf_log.predict(x_test)", "_____no_output_____" ], [ "acuracia = accuracy_score(y_test, y_pred_log)\nprint('Acurácia da Regressão Logística: ', acuracia)", "Acurácia da Regressão Logística: 0.8607594936708861\n" ], [ "# realiza o plot da matriz de confusão com o seaborn\nmatriz_confusao = confusion_matrix(y_test, y_pred_log)\nsns.heatmap(matriz_confusao, annot=True, vmin=0, vmax=40, center=20)\nplt.show()", "_____no_output_____" ], [ "# realiza o plot da matriz de confusão\nfig, ax = plot_confusion_matrix(conf_mat = matriz_confusao)\nplt.show()", "_____no_output_____" ], [ "print(classification_report(y_test, y_pred_log))", " precision recall f1-score support\n\n 0 0.86 0.88 0.87 41\n 1 0.86 0.84 0.85 38\n\n accuracy 0.86 79\n macro avg 0.86 0.86 0.86 79\nweighted avg 0.86 0.86 0.86 79\n\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
c5204aa14ffcc9e010393781405ad3c892a873b9
5,458
ipynb
Jupyter Notebook
Hawaii vacation/database_engineering.ipynb
tardis123/Hawaii-vacation
2aa6ca028c24d924cbcb786a73af6b89ce999645
[ "ADSL" ]
null
null
null
Hawaii vacation/database_engineering.ipynb
tardis123/Hawaii-vacation
2aa6ca028c24d924cbcb786a73af6b89ce999645
[ "ADSL" ]
null
null
null
Hawaii vacation/database_engineering.ipynb
tardis123/Hawaii-vacation
2aa6ca028c24d924cbcb786a73af6b89ce999645
[ "ADSL" ]
null
null
null
25.624413
119
0.557164
[ [ [ "# Database engineering\n\nIn this section we'll create:\n\n+ table schemas using SQLAlchemy ORM\n+ create a database in SQLite\n+ load the cleaned Hawaii climate data into pandas dataframes\n+ upload the data from the pandas dataframes into the SQLite database", "_____no_output_____" ] ], [ [ "# Dependencies\nimport pandas as pd\nimport sqlite3\nfrom sqlalchemy import Column, ForeignKey, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship, create_session\nfrom sqlalchemy import create_engine, MetaData\nfrom sqlalchemy.ext.automap import automap_base", "_____no_output_____" ], [ "# Define and create a database engine \nengine = create_engine('sqlite:///hawaii.sqlite', echo=False)", "_____no_output_____" ], [ "# Use SQLAlchemy to create a database table schema\nBase = declarative_base()\n\nclass Station(Base):\n\n __tablename__ = \"station\"\n\n station_id = Column(Integer, primary_key=True)\n station = Column(String, nullable=False)\n name = Column(String, nullable=False)\n latitude = Column(Integer, nullable=False)\n longitude = Column(Integer, nullable=False)\n elevation = Column(Integer, nullable=False)\n \n children = relationship(\"measurement\", back_populates=\"parent\")\n \n def __init__(self, name):\n\n self.name = name\n \nclass Measurement(Base):\n \n __tablename__ = \"measurement\"\n \n measurement_id = Column(Integer, primary_key=True)\n station = Column(String)\n date = Column(String)\n prcp = Column(Integer)\n tobs = Column(Integer) \n \n parent = relationship(\"station\", back_populates=\"parent\")\n \n def __init__(self, name):\n\n self.name = name \n\n# Generate schema\nBase.metadata.create_all(engine)", "_____no_output_____" ], [ "# Reflect database into a new model\nBase = automap_base()\n# Reflect tables\nBase.prepare(engine)\n\n# Access and reflect metadata \nmetadata = MetaData(bind=engine)\nmetadata.reflect()\n# Create database session object\nsession = create_session(bind = engine)", "_____no_output_____" ], [ "# Check whether classes and tables exist\nfor mappedclass in Base.classes:\n print(mappedclass)\n\nfor mdtable in Base.metadata.tables:\n print(mdtable)", "_____no_output_____" ], [ "# Define SQLite connection and cursor\nconn = sqlite3.connect(\"hawaii.sqlite\")\ncur = conn.cursor()", "_____no_output_____" ], [ "# Delete any existing table data (for test purposes only)\n# https://stackoverflow.com/questions/11233128/how-to-clean-the-database-dropping-all-records-using-sqlalchemy\nfor tbl in metadata.sorted_tables:\n engine.execute(tbl.delete())\nconn.commit()\n## Compact SQLite file\nconn.execute(\"VACUUM\")", "_____no_output_____" ], [ "# Load clean data\nstation_df = pd.read_csv(\"clean_hawaii_stations.csv\")\nmeasurement_df = pd.read_csv(\"clean_hawaii_measurements.csv\")", "_____no_output_____" ], [ "# Append data to SQLAlchemy tables\nstation_df.to_sql('station', conn, if_exists='append', index=False)\nmeasurement_df.to_sql('measurement', conn, if_exists='append', index=False)", "_____no_output_____" ], [ "# Close connection\nconn.close()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c520560ca096e365dc2d5930fa8e9ad0ef375313
145,607
ipynb
Jupyter Notebook
6-9 Data Analysis with Python/02. data-wrangling.ipynb
MLunov/IBM-Data-Science-Professional-Certificate
90003843e9a53a2705d9cde6dd4206595fb8a41f
[ "MIT" ]
1
2020-02-25T13:17:10.000Z
2020-02-25T13:17:10.000Z
6-9 Data Analysis with Python/02. data-wrangling.ipynb
MLunov/IBM-Data-Science-Professional-Certificate
90003843e9a53a2705d9cde6dd4206595fb8a41f
[ "MIT" ]
null
null
null
6-9 Data Analysis with Python/02. data-wrangling.ipynb
MLunov/IBM-Data-Science-Professional-Certificate
90003843e9a53a2705d9cde6dd4206595fb8a41f
[ "MIT" ]
2
2020-03-12T20:58:56.000Z
2020-10-12T22:18:43.000Z
36.843877
8,804
0.49203
[ [ [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <a href=\"https://cocl.us/corsera_da0101en_notebook_top\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/TopAd.png\" width=\"750\" align=\"center\">\n </a>\n</div>", "_____no_output_____" ], [ "<a href=\"https://www.bigdatauniversity.com\"><img src = \"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/CCLog.png\" width = 300, align = \"center\"></a>\n\n<h1 align=center><font size=5>Data Analysis with Python</font></h1>", "_____no_output_____" ], [ "<h1>Data Wrangling</h1>", "_____no_output_____" ], [ "<h3>Welcome!</h3>\n\nBy the end of this notebook, you will have learned the basics of Data Wrangling! ", "_____no_output_____" ], [ "<h2>Table of content</h2>\n\n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n<ul>\n <li><a href=\"#identify_handle_missing_values\">Identify and handle missing values</a>\n <ul>\n <li><a href=\"#identify_missing_values\">Identify missing values</a></li>\n <li><a href=\"#deal_missing_values\">Deal with missing values</a></li>\n <li><a href=\"#correct_data_format\">Correct data format</a></li>\n </ul>\n </li>\n <li><a href=\"#data_standardization\">Data standardization</a></li>\n <li><a href=\"#data_normalization\">Data Normalization (centering/scaling)</a></li>\n <li><a href=\"#binning\">Binning</a></li>\n <li><a href=\"#indicator\">Indicator variable</a></li>\n</ul>\n \nEstimated Time Needed: <strong>30 min</strong>\n</div>\n \n<hr>", "_____no_output_____" ], [ "<h2>What is the purpose of Data Wrangling?</h2>", "_____no_output_____" ], [ "Data Wrangling is the process of converting data from the initial format to a format that may be better for analysis.", "_____no_output_____" ], [ "<h3>What is the fuel consumption (L/100k) rate for the diesel car?</h3>", "_____no_output_____" ], [ "<h3>Import data</h3>\n<p>\nYou can find the \"Automobile Data Set\" from the following link: <a href=\"https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data\">https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data</a>. \nWe will be using this data set throughout this course.\n</p>", "_____no_output_____" ], [ "<h4>Import pandas</h4> ", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pylab as plt", "_____no_output_____" ] ], [ [ "<h2>Reading the data set from the URL and adding the related headers.</h2>", "_____no_output_____" ], [ "URL of the dataset", "_____no_output_____" ], [ "This dataset was hosted on IBM Cloud object click <a href=\"https://cocl.us/corsera_da0101en_notebook_bottom\">HERE</a> for free storage ", "_____no_output_____" ] ], [ [ "filename = \"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/auto.csv\"", "_____no_output_____" ] ], [ [ " Python list <b>headers</b> containing name of headers ", "_____no_output_____" ] ], [ [ "headers = [\"symboling\",\"normalized-losses\",\"make\",\"fuel-type\",\"aspiration\", \"num-of-doors\",\"body-style\",\n \"drive-wheels\",\"engine-location\",\"wheel-base\", \"length\",\"width\",\"height\",\"curb-weight\",\"engine-type\",\n \"num-of-cylinders\", \"engine-size\",\"fuel-system\",\"bore\",\"stroke\",\"compression-ratio\",\"horsepower\",\n \"peak-rpm\",\"city-mpg\",\"highway-mpg\",\"price\"]", "_____no_output_____" ] ], [ [ "Use the Pandas method <b>read_csv()</b> to load the data from the web address. Set the parameter \"names\" equal to the Python list \"headers\".", "_____no_output_____" ] ], [ [ "df = pd.read_csv(filename, names = headers)", "_____no_output_____" ] ], [ [ " Use the method <b>head()</b> to display the first five rows of the dataframe. ", "_____no_output_____" ] ], [ [ "# To see what the data set looks like, we'll use the head() method.\ndf.head()", "_____no_output_____" ] ], [ [ "As we can see, several question marks appeared in the dataframe; those are missing values which may hinder our further analysis. \n<div>So, how do we identify all those missing values and deal with them?</div> \n\n\n<b>How to work with missing data?</b>\n\nSteps for working with missing data:\n<ol>\n <li>dentify missing data</li>\n <li>deal with missing data</li>\n <li>correct data format</li>\n</ol>", "_____no_output_____" ], [ "<h2 id=\"identify_handle_missing_values\">Identify and handle missing values</h2>\n\n\n<h3 id=\"identify_missing_values\">Identify missing values</h3>\n<h4>Convert \"?\" to NaN</h4>\nIn the car dataset, missing data comes with the question mark \"?\".\nWe replace \"?\" with NaN (Not a Number), which is Python's default missing value marker, for reasons of computational speed and convenience. Here we use the function: \n <pre>.replace(A, B, inplace = True) </pre>\nto replace A by B", "_____no_output_____" ] ], [ [ "import numpy as np\n\n# replace \"?\" to NaN\ndf.replace(\"?\", np.nan, inplace = True)\ndf.head(5)", "_____no_output_____" ] ], [ [ "dentify_missing_values\n\n<h4>Evaluating for Missing Data</h4>\n\nThe missing values are converted to Python's default. We use Python's built-in functions to identify these missing values. There are two methods to detect missing data:\n<ol>\n <li><b>.isnull()</b></li>\n <li><b>.notnull()</b></li>\n</ol>\nThe output is a boolean value indicating whether the value that is passed into the argument is in fact missing data.", "_____no_output_____" ] ], [ [ "missing_data = df.isnull()\nmissing_data.head(5)", "_____no_output_____" ] ], [ [ "\"True\" stands for missing value, while \"False\" stands for not missing value.", "_____no_output_____" ], [ "<h4>Count missing values in each column</h4>\n<p>\nUsing a for loop in Python, we can quickly figure out the number of missing values in each column. As mentioned above, \"True\" represents a missing value, \"False\" means the value is present in the dataset. In the body of the for loop the method \".value_counts()\" counts the number of \"True\" values. \n</p>", "_____no_output_____" ] ], [ [ "for column in missing_data.columns.values.tolist():\n print(column)\n print (missing_data[column].value_counts())\n print(\"\") ", "symboling\nFalse 205\nName: symboling, dtype: int64\n\nnormalized-losses\nFalse 164\nTrue 41\nName: normalized-losses, dtype: int64\n\nmake\nFalse 205\nName: make, dtype: int64\n\nfuel-type\nFalse 205\nName: fuel-type, dtype: int64\n\naspiration\nFalse 205\nName: aspiration, dtype: int64\n\nnum-of-doors\nFalse 203\nTrue 2\nName: num-of-doors, dtype: int64\n\nbody-style\nFalse 205\nName: body-style, dtype: int64\n\ndrive-wheels\nFalse 205\nName: drive-wheels, dtype: int64\n\nengine-location\nFalse 205\nName: engine-location, dtype: int64\n\nwheel-base\nFalse 205\nName: wheel-base, dtype: int64\n\nlength\nFalse 205\nName: length, dtype: int64\n\nwidth\nFalse 205\nName: width, dtype: int64\n\nheight\nFalse 205\nName: height, dtype: int64\n\ncurb-weight\nFalse 205\nName: curb-weight, dtype: int64\n\nengine-type\nFalse 205\nName: engine-type, dtype: int64\n\nnum-of-cylinders\nFalse 205\nName: num-of-cylinders, dtype: int64\n\nengine-size\nFalse 205\nName: engine-size, dtype: int64\n\nfuel-system\nFalse 205\nName: fuel-system, dtype: int64\n\nbore\nFalse 201\nTrue 4\nName: bore, dtype: int64\n\nstroke\nFalse 201\nTrue 4\nName: stroke, dtype: int64\n\ncompression-ratio\nFalse 205\nName: compression-ratio, dtype: int64\n\nhorsepower\nFalse 203\nTrue 2\nName: horsepower, dtype: int64\n\npeak-rpm\nFalse 203\nTrue 2\nName: peak-rpm, dtype: int64\n\ncity-mpg\nFalse 205\nName: city-mpg, dtype: int64\n\nhighway-mpg\nFalse 205\nName: highway-mpg, dtype: int64\n\nprice\nFalse 201\nTrue 4\nName: price, dtype: int64\n\n" ] ], [ [ "Based on the summary above, each column has 205 rows of data, seven columns containing missing data:\n<ol>\n <li>\"normalized-losses\": 41 missing data</li>\n <li>\"num-of-doors\": 2 missing data</li>\n <li>\"bore\": 4 missing data</li>\n <li>\"stroke\" : 4 missing data</li>\n <li>\"horsepower\": 2 missing data</li>\n <li>\"peak-rpm\": 2 missing data</li>\n <li>\"price\": 4 missing data</li>\n</ol>", "_____no_output_____" ], [ "<h3 id=\"deal_missing_values\">Deal with missing data</h3>\n<b>How to deal with missing data?</b>\n\n<ol>\n <li>drop data<br>\n a. drop the whole row<br>\n b. drop the whole column\n </li>\n <li>replace data<br>\n a. replace it by mean<br>\n b. replace it by frequency<br>\n c. replace it based on other functions\n </li>\n</ol>", "_____no_output_____" ], [ "Whole columns should be dropped only if most entries in the column are empty. In our dataset, none of the columns are empty enough to drop entirely.\nWe have some freedom in choosing which method to replace data; however, some methods may seem more reasonable than others. We will apply each method to many different columns:\n\n<b>Replace by mean:</b>\n<ul>\n <li>\"normalized-losses\": 41 missing data, replace them with mean</li>\n <li>\"stroke\": 4 missing data, replace them with mean</li>\n <li>\"bore\": 4 missing data, replace them with mean</li>\n <li>\"horsepower\": 2 missing data, replace them with mean</li>\n <li>\"peak-rpm\": 2 missing data, replace them with mean</li>\n</ul>\n\n<b>Replace by frequency:</b>\n<ul>\n <li>\"num-of-doors\": 2 missing data, replace them with \"four\". \n <ul>\n <li>Reason: 84% sedans is four doors. Since four doors is most frequent, it is most likely to occur</li>\n </ul>\n </li>\n</ul>\n\n<b>Drop the whole row:</b>\n<ul>\n <li>\"price\": 4 missing data, simply delete the whole row\n <ul>\n <li>Reason: price is what we want to predict. Any data entry without price data cannot be used for prediction; therefore any row now without price data is not useful to us</li>\n </ul>\n </li>\n</ul>", "_____no_output_____" ], [ "<h4>Calculate the average of the column </h4>", "_____no_output_____" ] ], [ [ "avg_norm_loss = df[\"normalized-losses\"].astype(\"float\").mean(axis=0)\nprint(\"Average of normalized-losses:\", avg_norm_loss)", "Average of normalized-losses: 122.0\n" ] ], [ [ "<h4>Replace \"NaN\" by mean value in \"normalized-losses\" column</h4>", "_____no_output_____" ] ], [ [ "df[\"normalized-losses\"].replace(np.nan, avg_norm_loss, inplace=True)", "_____no_output_____" ] ], [ [ "<h4>Calculate the mean value for 'bore' column</h4>", "_____no_output_____" ] ], [ [ "avg_bore=df['bore'].astype('float').mean(axis=0)\nprint(\"Average of bore:\", avg_bore)", "Average of bore: 3.3297512437810943\n" ] ], [ [ "<h4>Replace NaN by mean value</h4>", "_____no_output_____" ] ], [ [ "df[\"bore\"].replace(np.nan, avg_bore, inplace=True)", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-danger alertdanger\" style=\"margin-top: 20px\">\n<h1> Question #1: </h1>\n\n<b>According to the example above, replace NaN in \"stroke\" column by mean.</b>\n</div>", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute \navg_stroke=df['stroke'].astype('float').mean(axis=0)\ndf['stroke'].replace(np.nan, avg_stroke, inplace=True)", "_____no_output_____" ] ], [ [ "Double-click <b>here</b> for the solution.\n\n<!-- The answer is below:\n\n# calculate the mean vaule for \"stroke\" column\navg_stroke = df[\"stroke\"].astype(\"float\").mean(axis = 0)\nprint(\"Average of stroke:\", avg_stroke)\n\n# replace NaN by mean value in \"stroke\" column\ndf[\"stroke\"].replace(np.nan, avg_stroke, inplace = True)\n\n-->\n", "_____no_output_____" ], [ "<h4>Calculate the mean value for the 'horsepower' column:</h4>", "_____no_output_____" ] ], [ [ "avg_horsepower = df['horsepower'].astype('float').mean(axis=0)\nprint(\"Average horsepower:\", avg_horsepower)", "Average horsepower: 104.25615763546799\n" ] ], [ [ "<h4>Replace \"NaN\" by mean value:</h4>", "_____no_output_____" ] ], [ [ "df['horsepower'].replace(np.nan, avg_horsepower, inplace=True)", "_____no_output_____" ] ], [ [ "<h4>Calculate the mean value for 'peak-rpm' column:</h4>", "_____no_output_____" ] ], [ [ "avg_peakrpm=df['peak-rpm'].astype('float').mean(axis=0)\nprint(\"Average peak rpm:\", avg_peakrpm)", "Average peak rpm: 5125.369458128079\n" ] ], [ [ "<h4>Replace NaN by mean value:</h4>", "_____no_output_____" ] ], [ [ "df['peak-rpm'].replace(np.nan, avg_peakrpm, inplace=True)", "_____no_output_____" ] ], [ [ "To see which values are present in a particular column, we can use the \".value_counts()\" method:", "_____no_output_____" ] ], [ [ "df['num-of-doors'].value_counts()", "_____no_output_____" ] ], [ [ "We can see that four doors are the most common type. We can also use the \".idxmax()\" method to calculate for us the most common type automatically:", "_____no_output_____" ] ], [ [ "df['num-of-doors'].value_counts().idxmax()", "_____no_output_____" ] ], [ [ "The replacement procedure is very similar to what we have seen previously", "_____no_output_____" ] ], [ [ "#replace the missing 'num-of-doors' values by the most frequent \ndf[\"num-of-doors\"].replace(np.nan, \"four\", inplace=True)", "_____no_output_____" ] ], [ [ "Finally, let's drop all rows that do not have price data:", "_____no_output_____" ] ], [ [ "# simply drop whole row with NaN in \"price\" column\ndf.dropna(subset=[\"price\"], axis=0, inplace=True)\n\n# reset index, because we droped two rows\ndf.reset_index(drop=True, inplace=True)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "<b>Good!</b> Now, we obtain the dataset with no missing values.", "_____no_output_____" ], [ "<h3 id=\"correct_data_format\">Correct data format</h3>\n<b>We are almost there!</b>\n<p>The last step in data cleaning is checking and making sure that all data is in the correct format (int, float, text or other).</p>\n\nIn Pandas, we use \n<p><b>.dtype()</b> to check the data type</p>\n<p><b>.astype()</b> to change the data type</p>", "_____no_output_____" ], [ "<h4>Lets list the data types for each column</h4>", "_____no_output_____" ] ], [ [ "df.dtypes", "_____no_output_____" ] ], [ [ "<p>As we can see above, some columns are not of the correct data type. Numerical variables should have type 'float' or 'int', and variables with strings such as categories should have type 'object'. For example, 'bore' and 'stroke' variables are numerical values that describe the engines, so we should expect them to be of the type 'float' or 'int'; however, they are shown as type 'object'. We have to convert data types into a proper format for each column using the \"astype()\" method.</p> ", "_____no_output_____" ], [ "<h4>Convert data types to proper format</h4>", "_____no_output_____" ] ], [ [ "df[[\"bore\", \"stroke\"]] = df[[\"bore\", \"stroke\"]].astype(\"float\")\ndf[[\"normalized-losses\"]] = df[[\"normalized-losses\"]].astype(\"int\")\ndf[[\"price\"]] = df[[\"price\"]].astype(\"float\")\ndf[[\"peak-rpm\"]] = df[[\"peak-rpm\"]].astype(\"float\")", "_____no_output_____" ] ], [ [ "<h4>Let us list the columns after the conversion</h4>", "_____no_output_____" ] ], [ [ "df.dtypes", "_____no_output_____" ] ], [ [ "<b>Wonderful!</b>\n\nNow, we finally obtain the cleaned dataset with no missing values and all data in its proper format.", "_____no_output_____" ], [ "<h2 id=\"data_standardization\">Data Standardization</h2>\n<p>\nData is usually collected from different agencies with different formats.\n(Data Standardization is also a term for a particular type of data normalization, where we subtract the mean and divide by the standard deviation)\n</p>\n \n<b>What is Standardization?</b>\n<p>Standardization is the process of transforming data into a common format which allows the researcher to make the meaningful comparison.\n</p>\n\n<b>Example</b>\n<p>Transform mpg to L/100km:</p>\n<p>In our dataset, the fuel consumption columns \"city-mpg\" and \"highway-mpg\" are represented by mpg (miles per gallon) unit. Assume we are developing an application in a country that accept the fuel consumption with L/100km standard</p>\n<p>We will need to apply <b>data transformation</b> to transform mpg into L/100km?</p>\n", "_____no_output_____" ], [ "<p>The formula for unit conversion is<p>\nL/100km = 235 / mpg\n<p>We can do many mathematical operations directly in Pandas.</p>", "_____no_output_____" ] ], [ [ "df.head()", "_____no_output_____" ], [ "# Convert mpg to L/100km by mathematical operation (235 divided by mpg)\ndf['city-L/100km'] = 235/df[\"city-mpg\"]\n\n# check your transformed data \ndf.head()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-danger alertdanger\" style=\"margin-top: 20px\">\n<h1> Question #2: </h1>\n\n<b>According to the example above, transform mpg to L/100km in the column of \"highway-mpg\", and change the name of column to \"highway-L/100km\".</b>\n</div>", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute \ndf['highway-mpg'] = 235/df['highway-mpg']\ndf.rename(columns={'highway-mpg':'highway-L/100km'}, inplace=True)\ndf.head()", "_____no_output_____" ] ], [ [ "Double-click <b>here</b> for the solution.\n\n<!-- The answer is below:\n\n# transform mpg to L/100km by mathematical operation (235 divided by mpg)\ndf[\"highway-mpg\"] = 235/df[\"highway-mpg\"]\n\n# rename column name from \"highway-mpg\" to \"highway-L/100km\"\ndf.rename(columns={'\"highway-mpg\"':'highway-L/100km'}, inplace=True)\n\n# check your transformed data \ndf.head()\n\n-->\n", "_____no_output_____" ], [ "<h2 id=\"data_normalization\">Data Normalization</h2>\n\n<b>Why normalization?</b>\n<p>Normalization is the process of transforming values of several variables into a similar range. Typical normalizations include scaling the variable so the variable average is 0, scaling the variable so the variance is 1, or scaling variable so the variable values range from 0 to 1\n</p>\n\n<b>Example</b>\n<p>To demonstrate normalization, let's say we want to scale the columns \"length\", \"width\" and \"height\" </p>\n<p><b>Target:</b>would like to Normalize those variables so their value ranges from 0 to 1.</p>\n<p><b>Approach:</b> replace original value by (original value)/(maximum value)</p>", "_____no_output_____" ] ], [ [ "# replace (original value) by (original value)/(maximum value)\ndf['length'] = df['length']/df['length'].max()\ndf['width'] = df['width']/df['width'].max()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-danger alertdanger\" style=\"margin-top: 20px\">\n<h1> Questiont #3: </h1>\n\n<b>According to the example above, normalize the column \"height\".</b>\n</div>", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute \ndf['height'] = df['height']/df['height'].max()\ndf[[\"length\",\"width\",\"height\"]].head()", "_____no_output_____" ] ], [ [ "Double-click <b>here</b> for the solution.\n\n<!-- The answer is below:\n\ndf['height'] = df['height']/df['height'].max() \n# show the scaled columns\ndf[[\"length\",\"width\",\"height\"]].head()\n\n-->", "_____no_output_____" ], [ "Here we can see, we've normalized \"length\", \"width\" and \"height\" in the range of [0,1].", "_____no_output_____" ], [ "<h2 id=\"binning\">Binning</h2>\n<b>Why binning?</b>\n<p>\n Binning is a process of transforming continuous numerical variables into discrete categorical 'bins', for grouped analysis.\n</p>\n\n<b>Example: </b>\n<p>In our dataset, \"horsepower\" is a real valued variable ranging from 48 to 288, it has 57 unique values. What if we only care about the price difference between cars with high horsepower, medium horsepower, and little horsepower (3 types)? Can we rearrange them into three ‘bins' to simplify analysis? </p>\n\n<p>We will use the Pandas method 'cut' to segment the 'horsepower' column into 3 bins </p>\n\n", "_____no_output_____" ], [ "<h3>Example of Binning Data In Pandas</h3>", "_____no_output_____" ], [ " Convert data to correct format ", "_____no_output_____" ] ], [ [ "df[\"horsepower\"]=df[\"horsepower\"].astype(int, copy=True)", "_____no_output_____" ] ], [ [ "Lets plot the histogram of horspower, to see what the distribution of horsepower looks like.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib as plt\nfrom matplotlib import pyplot\nplt.pyplot.hist(df[\"horsepower\"])\n\n# set x/y labels and plot title\nplt.pyplot.xlabel(\"horsepower\")\nplt.pyplot.ylabel(\"count\")\nplt.pyplot.title(\"horsepower bins\")", "_____no_output_____" ] ], [ [ "<p>We would like 3 bins of equal size bandwidth so we use numpy's <code>linspace(start_value, end_value, numbers_generated</code> function.</p>\n<p>Since we want to include the minimum value of horsepower we want to set start_value=min(df[\"horsepower\"]).</p>\n<p>Since we want to include the maximum value of horsepower we want to set end_value=max(df[\"horsepower\"]).</p>\n<p>Since we are building 3 bins of equal length, there should be 4 dividers, so numbers_generated=4.</p>", "_____no_output_____" ], [ "We build a bin array, with a minimum value to a maximum value, with bandwidth calculated above. The bins will be values used to determine when one bin ends and another begins.", "_____no_output_____" ] ], [ [ "bins = np.linspace(min(df[\"horsepower\"]), max(df[\"horsepower\"]), 4)\nbins", "_____no_output_____" ] ], [ [ " We set group names:", "_____no_output_____" ] ], [ [ "group_names = ['Low', 'Medium', 'High']", "_____no_output_____" ] ], [ [ " We apply the function \"cut\" the determine what each value of \"df['horsepower']\" belongs to. ", "_____no_output_____" ] ], [ [ "df['horsepower-binned'] = pd.cut(df['horsepower'], bins, labels=group_names, include_lowest=True )\ndf[['horsepower','horsepower-binned']].head(20)", "_____no_output_____" ] ], [ [ "Lets see the number of vehicles in each bin.", "_____no_output_____" ] ], [ [ "df[\"horsepower-binned\"].value_counts()", "_____no_output_____" ] ], [ [ "Lets plot the distribution of each bin.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib as plt\nfrom matplotlib import pyplot\npyplot.bar(group_names, df[\"horsepower-binned\"].value_counts())\n\n# set x/y labels and plot title\nplt.pyplot.xlabel(\"horsepower\")\nplt.pyplot.ylabel(\"count\")\nplt.pyplot.title(\"horsepower bins\")", "_____no_output_____" ] ], [ [ "<p>\n Check the dataframe above carefully, you will find the last column provides the bins for \"horsepower\" with 3 categories (\"Low\",\"Medium\" and \"High\"). \n</p>\n<p>\n We successfully narrow the intervals from 57 to 3!\n</p>", "_____no_output_____" ], [ "<h3>Bins visualization</h3>\nNormally, a histogram is used to visualize the distribution of bins we created above. ", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib as plt\nfrom matplotlib import pyplot\n\na = (0,1,2)\n\n# draw historgram of attribute \"horsepower\" with bins = 3\nplt.pyplot.hist(df[\"horsepower\"], bins = 3)\n\n# set x/y labels and plot title\nplt.pyplot.xlabel(\"horsepower\")\nplt.pyplot.ylabel(\"count\")\nplt.pyplot.title(\"horsepower bins\")", "_____no_output_____" ] ], [ [ "The plot above shows the binning result for attribute \"horsepower\". ", "_____no_output_____" ], [ "<h2 id=\"indicator\">Indicator variable (or dummy variable)</h2>\n<b>What is an indicator variable?</b>\n<p>\n An indicator variable (or dummy variable) is a numerical variable used to label categories. They are called 'dummies' because the numbers themselves don't have inherent meaning. \n</p>\n\n<b>Why we use indicator variables?</b>\n<p>\n So we can use categorical variables for regression analysis in the later modules.\n</p>\n<b>Example</b>\n<p>\n We see the column \"fuel-type\" has two unique values, \"gas\" or \"diesel\". Regression doesn't understand words, only numbers. To use this attribute in regression analysis, we convert \"fuel-type\" into indicator variables.\n</p>\n\n<p>\n We will use the panda's method 'get_dummies' to assign numerical values to different categories of fuel type. \n</p>", "_____no_output_____" ] ], [ [ "df.columns", "_____no_output_____" ] ], [ [ "get indicator variables and assign it to data frame \"dummy_variable_1\" ", "_____no_output_____" ] ], [ [ "dummy_variable_1 = pd.get_dummies(df[\"fuel-type\"])\ndummy_variable_1.head()", "_____no_output_____" ] ], [ [ "change column names for clarity ", "_____no_output_____" ] ], [ [ "dummy_variable_1.rename(columns={'fuel-type-diesel':'gas', 'fuel-type-diesel':'diesel'}, inplace=True)\ndummy_variable_1.head()", "_____no_output_____" ] ], [ [ "We now have the value 0 to represent \"gas\" and 1 to represent \"diesel\" in the column \"fuel-type\". We will now insert this column back into our original dataset. ", "_____no_output_____" ] ], [ [ "# merge data frame \"df\" and \"dummy_variable_1\" \ndf = pd.concat([df, dummy_variable_1], axis=1)\n\n# drop original column \"fuel-type\" from \"df\"\ndf.drop(\"fuel-type\", axis = 1, inplace=True)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "The last two columns are now the indicator variable representation of the fuel-type variable. It's all 0s and 1s now.", "_____no_output_____" ], [ "<div class=\"alert alert-danger alertdanger\" style=\"margin-top: 20px\">\n<h1> Question #4: </h1>\n\n<b>As above, create indicator variable to the column of \"aspiration\": \"std\" to 0, while \"turbo\" to 1.</b>\n</div>", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute \ndummy_variable_2 = pd.get_dummies(df['aspiration'])\ndummy_variable_2.head()", "_____no_output_____" ] ], [ [ "Double-click <b>here</b> for the solution.\n\n<!-- The answer is below:\n\n# get indicator variables of aspiration and assign it to data frame \"dummy_variable_2\"\ndummy_variable_2 = pd.get_dummies(df['aspiration'])\n\n# change column names for clarity\ndummy_variable_2.rename(columns={'std':'aspiration-std', 'turbo': 'aspiration-turbo'}, inplace=True)\n\n# show first 5 instances of data frame \"dummy_variable_1\"\ndummy_variable_2.head()\n\n-->", "_____no_output_____" ], [ " <div class=\"alert alert-danger alertdanger\" style=\"margin-top: 20px\">\n<h1> Question #5: </h1>\n\n<b>Merge the new dataframe to the original dataframe then drop the column 'aspiration'</b>\n</div>", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute \n# merge data frame \"df\" and \"dummy_variable_1\" \ndf = pd.concat([df, dummy_variable_2], axis=1)\n\n# drop original column \"fuel-type\" from \"df\"\ndf.drop('aspiration', axis = 1, inplace=True)", "_____no_output_____" ] ], [ [ "Double-click <b>here</b> for the solution.\n\n<!-- The answer is below:\n\n#merge the new dataframe to the original datafram\ndf = pd.concat([df, dummy_variable_2], axis=1)\n\n# drop original column \"aspiration\" from \"df\"\ndf.drop('aspiration', axis = 1, inplace=True)\n\n-->", "_____no_output_____" ], [ "save the new csv ", "_____no_output_____" ] ], [ [ "df.to_csv('clean_df.csv')", "_____no_output_____" ] ], [ [ "<h1>Thank you for completing this notebook</h1>", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n\n <p><a href=\"https://cocl.us/corsera_da0101en_notebook_bottom\"><img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/Images/BottomAd.png\" width=\"750\" align=\"center\"></a></p>\n</div>", "_____no_output_____" ], [ "<h3>About the Authors:</h3>\n\nThis notebook was written by <a href=\"https://www.linkedin.com/in/mahdi-noorian-58219234/\" target=\"_blank\">Mahdi Noorian PhD</a>, <a href=\"https://www.linkedin.com/in/joseph-s-50398b136/\" target=\"_blank\">Joseph Santarcangelo</a>, Bahare Talayian, Eric Xiao, Steven Dong, Parizad, Hima Vsudevan and <a href=\"https://www.linkedin.com/in/fiorellawever/\" target=\"_blank\">Fiorella Wenver</a> and <a href=\" https://www.linkedin.com/in/yi-leng-yao-84451275/ \" target=\"_blank\" >Yi Yao</a>.\n\n<p><a href=\"https://www.linkedin.com/in/joseph-s-50398b136/\" target=\"_blank\">Joseph Santarcangelo</a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p>", "_____no_output_____" ], [ "<hr>\n<p>Copyright &copy; 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href=\"https://cognitiveclass.ai/mit-license/\">MIT License</a>.</p>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
c5205cde90010ade1fd22c340299157f288cad2e
560,396
ipynb
Jupyter Notebook
Projeto4/Atividades/Aula01/Aula 01 - Parte 1.ipynb
filipefborba/viscomp_2018
9638a406927598af1e7e34ea5c8fe7dedcf2dd3a
[ "MIT" ]
null
null
null
Projeto4/Atividades/Aula01/Aula 01 - Parte 1.ipynb
filipefborba/viscomp_2018
9638a406927598af1e7e34ea5c8fe7dedcf2dd3a
[ "MIT" ]
null
null
null
Projeto4/Atividades/Aula01/Aula 01 - Parte 1.ipynb
filipefborba/viscomp_2018
9638a406927598af1e7e34ea5c8fe7dedcf2dd3a
[ "MIT" ]
1
2018-11-27T20:37:34.000Z
2018-11-27T20:37:34.000Z
1,248.097996
213,612
0.956019
[ [ [ "# Aula 01 - Parte 1\n## Transformações Lineares\n\nNesta primeira parte da aula faremos uma breve revisão de transformações lineares. Vamos começar pensando em transformações em 2D.\n\n### Rotação\n\nCrie uma função que recebe um ângulo $\\theta$ e devolve uma matriz de rotação representada por um *numpy.array*. Os pontos são representados em [coordenadas homogêneas](https://en.wikipedia.org/wiki/Homogeneous_coordinates).", "_____no_output_____" ] ], [ [ "# Imports\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math", "_____no_output_____" ], [ "# Funções úteis\ndef ponto(x, y):\n return np.array([x, y, 1]).reshape((3, 1))\n\ndef prettypt(pt):\n return tuple(pt.flatten())\n\ndef testar_funcao(funcao, entradas, parametros, saidas):\n EPSILON = 1e-1 # Sim, ele vai aceitar um erro grande...\n tudo_ok = True\n for entrada, parametro, saida_esperada in zip(entradas, parametros, saidas):\n if isinstance(parametro, tuple):\n mat = funcao(*parametro)\n else:\n mat = funcao(parametro)\n saida_obtida = mat.dot(entrada)\n if not np.allclose(saida_obtida, saida_esperada, atol=EPSILON):\n tudo_ok = False\n print('Erro para entrada {}. Esperado={}, Obtido={}'.format(prettypt(entrada), prettypt(saida_esperada), prettypt(saida_obtida)))\n if tudo_ok:\n print('Tudo OK :)')", "_____no_output_____" ], [ "# IMPLEMENTE ESSA FUNÇÃO\ndef rotation_matrix(theta):\n m = np.eye(3)\n m[0][0] = math.cos(theta)\n m[0][1] = -math.sin(theta)\n m[1][0] = math.sin(theta)\n m[1][1] = math.cos(theta)\n return m\nrotation_matrix(180)", "_____no_output_____" ] ], [ [ "Abra o arquivo *Rotacao.ggb* utilizando o software [Geogebra](https://www.geogebra.org/download). Gere mais 10 valores para testar a sua função movendo o ponto $p$ e alterando o valor de $\\theta'$ no programa.", "_____no_output_____" ] ], [ [ "# Os pontos são representados por tuplas (x, y)\nentradas = [\n ponto(4, 2),\n ponto(6, 3),\n ponto(6, 3),\n ponto(9, 6),\n ponto(2, 1),\n ponto(5, 5),\n ponto(5, 5),\n ponto(0, 10),\n ponto(0, 10),\n ponto(5, 10)\n # ADICIONE OUTROS PONTOS DE ENTRADA AQUI...\n]\nangulos = [\n 0.64, # Em radianos\n 0.64, # Em radianos\n 0.91, # Em radianos\n 1.57, # Em radianos\n 1.57, # Em radianos\n 1.57, # Em radianos\n 0.79, # Em radianos\n math.pi, # Em radianos\n 0, # Em radianos\n math.pi # Em radianos\n # ADICIONE OUTROS ÂNGULOS AQUI...\n]\nsaidas = [\n ponto(2, 4),\n ponto(3, 6),\n ponto(1.31, 6.58),\n ponto(-6, 9),\n ponto(-1, 2),\n ponto(-5, 5),\n ponto(0, 7.07),\n ponto(0, -10),\n ponto(0, 10),\n ponto(-5, -10)\n # ADICIONE OUTRAS SAÍDAS ESPERADAS AQUI...\n]", "_____no_output_____" ], [ "# Testando a função...\ntestar_funcao(rotation_matrix, entradas, angulos, saidas)", "Tudo OK :)\n" ] ], [ [ "### Escala\n\nCrie uma função que recebe um valor $s$ e devolve uma matriz de escala.", "_____no_output_____" ] ], [ [ "# IMPLEMENTE ESSA FUNÇÃO\ndef scale_matrix(s):\n m = np.eye(3)\n m[0][0] = s\n m[1][1] = s\n return m", "_____no_output_____" ], [ "# Gerando alguns valores para teste...\nn = 10\nentradas = [ponto(x, y) for x in range(n) for y in range(n)]\nfatores = [i+2 for i in range(n*n)] # Poderiam ser outros valores (o +2 é arbitrário)\nsaidas = [ponto(p[0,0]*s, p[1,0]*s) for p, s in zip(entradas, fatores)]\n\n# Testando a função...\ntestar_funcao(scale_matrix, entradas, fatores, saidas)", "Tudo OK :)\n" ] ], [ [ "### Translação\n\nCrie uma função que recebe dois valores $t_x$ e $t_y$ e devolve uma matriz de translação.", "_____no_output_____" ] ], [ [ "# IMPLEMENTE ESSA FUNÇÃO\ndef translation_matrix(tx, ty):\n m = np.eye(3)\n m[0][2] = tx\n m[1][2] = ty\n return m", "_____no_output_____" ], [ "# Gerando alguns valores para teste...\nn = 10\nentradas = [ponto(x, y) for x in range(n) for y in range(n)]\ntranslacoes = [(i+2, i+3) for i in range(n*n)] # Poderiam ser outros valores (o +2 e +3 são arbitrários)\nsaidas = [ponto(p[0,0]+t[0], p[1,0]+t[1]) for p, t in zip(entradas, translacoes)]\n\n# Testando a função...\ntestar_funcao(translation_matrix, entradas, translacoes, saidas)", "Tudo OK :)\n" ] ], [ [ "## Transformações em imagens\n\nCrie duas funções que recebem uma imagem, um fator de escala $s$, um ângulo $\\theta$ e uma translação $(t_x, t_y)$ e devolvem uma nova imagem aplicando a escala, rotação e translação, nesta ordem. As duas funções diferem na maneira de gerar a imagem final:\n\n1) A primeira função deve percorrer cada pixel da imagem original e calcular onde ele deve aparecer na imagem final\n\n2) A segunda função deve percorrer cada pixel da imagem final e calcular de onde ele veio na imagem original.", "_____no_output_____" ] ], [ [ "def aplica_transformacao_v1(img, s, theta, tx, ty):\n # IMPLEMENTE ESSA FUNÇÃO\n translation = translation_matrix(tx, ty)\n scale = scale_matrix(s)\n rotation = rotation_matrix(theta)\n \n transform = translation.dot(rotation).dot(scale)\n res = np.zeros_like(img) # res é a imagem a ser devolvida\n h, w = img.shape[:2]\n for i in range(w):\n for j in range(h):\n res_x, res_y, _ = transform.dot(ponto(i, j)).flatten()\n res_x, res_y = int(res_x), int(res_y)\n if 0 <= res_x and res_x < w and 0 <= res_y and res_y < h:\n res[res_y, res_x,:] = img[j,i,:]\n return res\n\ndef aplica_transformacao_v2(img, s, theta, tx, ty):\n # IMPLEMENTE ESSA FUNÇÃO\n res = np.zeros_like(img) # res é a imagem a ser devolvida\n h,w = img.shape[:2]\n scale = scale_matrix(1/s)\n rotation = rotation_matrix(-theta)\n translation = translation_matrix(-tx, -ty)\n transform = scale.dot(rotation).dot(translation)\n for res_x in range(w):\n for res_y in range(h):\n x, y, _ = transform.dot(ponto(res_x, res_y)).flatten()\n x, y = int(x), int(y)\n if 0 <= x and x < w and 0 <= y and y < h:\n res[res_y, res_x,:] = img[y,x,:]\n return res", "_____no_output_____" ], [ "# Carregando a imagem de teste\nimg = plt.imread('insper-fachada.jpg')\nplt.imshow(img)", "_____no_output_____" ], [ "# Testando a primeira versão da função\nplt.imshow(aplica_transformacao_v1(img, 1.5, math.pi/3, 500, -450))", "_____no_output_____" ], [ "# Testando a segunda versão da função\nplt.imshow(aplica_transformacao_v2(img, 1.5, math.pi/3, 500, -450))", "_____no_output_____" ] ], [ [ "# Para pensar\n\n1. Qual a diferença entre as imagens geradas? Por que essa diferença existe?\n2. A ordem das transformações faz diferença? Faça um teste:\n 1. Crie uma lista com 4 pontos nos cantos de um quadrado\n 2. Gere uma imagem em branco e desenhe os 4 pontos\n 3. Gere uma matriz de translação, uma de rotação e outra de escala\n 4. Aplica as 3 transformações sobre os 4 pontos em todas as ordens possíveis (6 no total)\n 5. Para cada combinação desenhe os 4 pontos transformados com outra cor", "_____no_output_____" ] ], [ [ "# IMPLEMENTE O TESTE DO EXERCÍCIO 2 AQUI", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
c5206ac2faea3762a30b9808b08ce11875e89536
26,295
ipynb
Jupyter Notebook
OOP.ipynb
saiddddd/OOP-Review-Python-
39386be8157e0881c0085f45174520c777bf4ca3
[ "Apache-2.0" ]
null
null
null
OOP.ipynb
saiddddd/OOP-Review-Python-
39386be8157e0881c0085f45174520c777bf4ca3
[ "Apache-2.0" ]
null
null
null
OOP.ipynb
saiddddd/OOP-Review-Python-
39386be8157e0881c0085f45174520c777bf4ca3
[ "Apache-2.0" ]
null
null
null
19.785553
310
0.439133
[ [ [ "#CREATE CLASS \n#CLASS VS INSTANCE", "_____no_output_____" ], [ "#CREATE CLASS", "_____no_output_____" ], [ "class SoftwareEngineer:\n \n def __init__(self, name, age, level, salary):\n #instance attribute\n self.name = name \n self.age = age\n self.level = level\n self.salary = salary\n ", "_____no_output_____" ], [ "#instance\nse1 = SoftwareEngineer(\"Max\", 20, \"Junior\", 5000)", "_____no_output_____" ], [ "print(se1.name, se1.age)", "Max 20\n" ], [ "class SoftwareEngineer:\n \n #class attribute\n alias = \"Keyboard Magician\"\n \n def __init__(self, name, age, level, salary):\n #instance attribute\n self.name = name \n self.age = age\n self.level = level\n self.salary = salary\n ", "_____no_output_____" ], [ "#instance\nse1 = SoftwareEngineer(\"Max\", 20, \"Junior\", 5000)", "_____no_output_____" ], [ "print(se1.alias)", "Keyboard Magician\n" ], [ "print(SoftwareEngineer.alias)", "Keyboard Magician\n" ], [ "#recap\n#create a class (blueprint)\n#create a instance (object)\n#class vsinstance\n#instance attributes : defined in __init__(self)\n#class attribute", "_____no_output_____" ], [ "class SoftwareEngineer:\n \n #class attribute\n alias = \"Keyboard Magician\"\n \n def __init__(self, name, age, level, salary):\n #instance attribute\n self.name = name \n self.age = age\n self.level = level\n self.salary = salary\n \n #instance method\n def code(self):\n print(f\"{self.name} is writing code...\")\n \n \n def code_in_language(self, language):\n print(f\"{self.name} is writing code in {language}...\")\n \n \n def information(self):\n information = f\"name = {self.name}, age = {self.age}, level = {self.level}\"\n return information\n ", "_____no_output_____" ], [ "se1.code()", "Max is writing code...\n" ], [ "se1.code_in_language(\"Python\")", "Max is writing code in Python...\n" ], [ "se1.information()", "_____no_output_____" ], [ "class SoftwareEngineer:\n \n #class attribute\n alias = \"Keyboard Magician\"\n \n def __init__(self, name, age, level, salary):\n #instance attribute\n self.name = name \n self.age = age\n self.level = level\n self.salary = salary\n \n #instance method\n def code(self):\n print(f\"{self.name} is writing code...\")\n \n \n def code_in_language(self, language):\n print(f\"{self.name} is writing code in {language}...\")\n \n \n #dunder method\n def __str__(self):\n information = f\"name = {self.name}, age = {self.age}, level = {self.level}\"\n return information\n \n def __eq__(self, other):\n return self.name == other.name and self.age == other.age\n \n \n @staticmethod #decorator\n def entry_salary(age):\n if age < 25:\n return 5000\n if age < 30:\n return 7000\n else:\n return 9000", "_____no_output_____" ], [ "print(se1)", "name = Max, age = 20, level = Junior\n" ], [ "#instance\nse1 = SoftwareEngineer(\"Max\", 20, \"Junior\", 5000)\nse2 = SoftwareEngineer(\"Max\", 20, \"Sunior\", 5000)", "_____no_output_____" ], [ "print(se1 == se2)", "True\n" ], [ "print(se1.entry_salary(78))", "9000\n" ], [ "#recap:\n#instance method(self)\n#can take arguments and can return values\n#special \"dunder\" method (__str__ and __eq__)\n#@staticmethod", "_____no_output_____" ], [ "#inherits, extend, override\nclass Employee:\n \n def __init(self, name, age):\n self.name = name\n self.age = age \n \nclass SoftwareEngineer(Employee):\n pass\n\nclass Designer(Employee):\n pass", "_____no_output_____" ], [ "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n\n#\n# Complete the 'fizzBuzz' function below.\n#\n# The function accepts INTEGER n as parameter.\n#\n\ndef fizzBuzz(n):\n for n in range(100):\n print(\"FizzBuzz\"[n%-3&4:12&8-(n%-5&4)] or n)\n\nif __name__ == '__main__':\n n = int(input().strip())\n\n fizzBuzz(n)", "7\nFizzBuzz\n1\n2\nFizz\n4\nBuzz\nFizz\n7\n8\nFizz\nBuzz\n11\nFizz\n13\n14\nFizzBuzz\n16\n17\nFizz\n19\nBuzz\nFizz\n22\n23\nFizz\nBuzz\n26\nFizz\n28\n29\nFizzBuzz\n31\n32\nFizz\n34\nBuzz\nFizz\n37\n38\nFizz\nBuzz\n41\nFizz\n43\n44\nFizzBuzz\n46\n47\nFizz\n49\nBuzz\nFizz\n52\n53\nFizz\nBuzz\n56\nFizz\n58\n59\nFizzBuzz\n61\n62\nFizz\n64\nBuzz\nFizz\n67\n68\nFizz\nBuzz\n71\nFizz\n73\n74\nFizzBuzz\n76\n77\nFizz\n79\nBuzz\nFizz\n82\n83\nFizz\nBuzz\n86\nFizz\n88\n89\nFizzBuzz\n91\n92\nFizz\n94\nBuzz\nFizz\n97\n98\nFizz\n" ], [ "#inherits, extend, override\nclass Employee:\n \n def __init__(self, name, age):\n self.name = name\n self.age = age \n \n def work(self):\n print(f\"{self.name} is working...\")\n \nclass SoftwareEngineer(Employee):\n pass\n\nclass Designer(Employee):\n pass", "_____no_output_____" ], [ "#inherits, extend, override\nclass Employee:\n \n def __init__(self, name, age, salary):\n self.name = name\n self.age = age \n self.salary = salary\n \n def work(self):\n print(f\"{self.name} is working...\")\n \nclass SoftwareEngineer(Employee):\n #extend \n \n def __init__(self, name, age, salary, level):\n #override\n super().__init__(name, age, salary)\n self.level = level\n \n def work(self):\n print(f\"{self.name} is coding...\")\n \n def debug(self):\n print(f\"{self.name} is debugging...\")\n \n \nclass Designer(Employee):\n \n def draw(self):\n print(f\"{self.name} is drawing...\")\n \n def work(self):\n print(f\"{self.name} is designing...\") ", "_____no_output_____" ], [ "se = SoftwareEngineer(\"Max\", 35, 9000, \"senior\")", "_____no_output_____" ], [ "se.name, se.age, se.level", "_____no_output_____" ], [ "se.work()", "Max is coding...\n" ], [ "se.draw()", "_____no_output_____" ], [ "se.debug()", "Max is debugging...\n" ], [ "d = Designer(\"Max\", 35, 9000)", "_____no_output_____" ], [ "d.draw()", "Max is drawing...\n" ], [ "#polymorphism", "_____no_output_____" ], [ "employees = [SoftwareEngineer(\"Max\", 25, 6000, \"Junior\"), SoftwareEngineer(\"Lisa\", 30, 9000, \"Senior\"), Designer(\"Philip\", 27, 7000)]", "_____no_output_____" ], [ "def motivate_employees(employees):\n for employee in employees:\n employee.work()", "_____no_output_____" ], [ "motivate_employees(employees)", "Max is coding...\nLisa is coding...\nPhilip is designing...\n" ], [ "#recap\n#inheritance : ChildClass(BaseClass)\n#inherit, extend, override\n#super().__init__()\n#polymorphism", "_____no_output_____" ], [ "#encaplsulation", "_____no_output_____" ], [ "class SoftwareEngineer:\n \n def __init__(self, name, age):\n self.name = name\n self.age = age\n self._salary = None #private sintax\n \n #_x is called a protected attribute\n #__x is called a private attribute\n \n self._num_bugs_solved = 0\n \n \n def code(self):\n self._num_bugs_solved += 1\n \n \n \n #getter\n def get_salary(self):\n return self._salary \n \n #setter\n def set_salary(self, base_value):\n self._salary = self._calculate_salary(base_value)\n \n \n def _calculate_salary(self, base_value):\n if self._num_bugs_solved < 10:\n return base_value\n if self._num_bugs_solved < 100:\n return base_value * 2\n return base_value * 3", "_____no_output_____" ], [ "se = SoftwareEngineer(\"Max\", 25)", "_____no_output_____" ], [ "se.age, se.name", "_____no_output_____" ], [ "se.set_salary(9000)", "_____no_output_____" ], [ "se.get_salary()", "_____no_output_____" ], [ "for i in range(70):\n se.code()", "_____no_output_____" ], [ "print(se._num_bugs_solved)", "70\n" ], [ "se.set_salary(6000)", "_____no_output_____" ], [ "print(se.get_salary())", "12000\n" ], [ "#encapsulation : hiding data process , hiding internal operation", "_____no_output_____" ], [ "class SoftwareEngineer:\n \n def __init__(self):\n self._salary = None #private sintax\n \n \n @property\n def salary(self):\n return self._salary \n \n @salary.setter\n def salary(self, value):\n self._salary = value\n \n @salary.deleter\n def salary(self, value):\n del self._salary ", "_____no_output_____" ], [ "se = SoftwareEngineer()", "_____no_output_____" ], [ "se.salary = 6000", "_____no_output_____" ], [ "se.salary", "_____no_output_____" ], [ "#recap\n#getter -> @property\n#setter -> @x.setter", "_____no_output_____" ], [ "a = [i+1 for i in range(5)]", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "a[-2]", "_____no_output_____" ], [ "x = [5,9,1,1,2,3,7,1]", "_____no_output_____" ], [ "y = [1,2,2,3,3,2,0,5]", "_____no_output_____" ], [ "import numpy as np", "_____no_output_____" ], [ "np.corrcoef(x,y)", "_____no_output_____" ], [ "def batonPass(friends, time):\n # Write your code here\n friends = [i+1 for i in range(friends)]\n for i in range(time):\n if time < len(friends):\n return (friends[i], friends[i+1])\n else:\n return (friends[i-2], friends[i-3])", "_____no_output_____" ], [ "batonPass(5,3)", "_____no_output_____" ], [ "for i in range(1,len(ans)):\n if abs((ans[i]+1)-ans[i-1]) <= k:\n ans[i] += 1\n elif abs(ans[i]-ans[i-1]) <= k:\n pass\n elif abs((ans[i]-1)-ans[i-1]) <= k:\n ans[i] -= 1\n else:\n ans[i] += 1\n c += 1\n\nfor _ in range(int(input())):\nn,k = get_int()\ns = str(input())[:-1]\nsolve(n,k,s)\n\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c5206b4ed98894ec91fb3c4989e3a3a0021114c1
447,469
ipynb
Jupyter Notebook
MLProjects/Number Plate Detection Model/Number plate Detection Model.ipynb
spirulinax/HacktoberFest_2021
ca29b4df8609ef00817c274d9662f4f99d2a736b
[ "MIT" ]
33
2021-10-01T17:51:53.000Z
2022-03-20T11:30:09.000Z
MLProjects/Number Plate Detection Model/Number plate Detection Model.ipynb
spirulinax/HacktoberFest_2021
ca29b4df8609ef00817c274d9662f4f99d2a736b
[ "MIT" ]
69
2021-10-01T09:07:22.000Z
2021-10-20T02:21:12.000Z
MLProjects/Number Plate Detection Model/Number plate Detection Model.ipynb
spirulinax/HacktoberFest_2021
ca29b4df8609ef00817c274d9662f4f99d2a736b
[ "MIT" ]
187
2021-10-01T09:06:51.000Z
2022-01-29T03:18:30.000Z
952.061702
173,676
0.954922
[ [ [ "import cv2\ncap = cv2.VideoCapture(0)", "_____no_output_____" ], [ "car_model=cv2.CascadeClassifier('cars.xml')", "_____no_output_____" ] ], [ [ "# TO DETECT CAR ON LIVE VIDEO OR PHOTO.....", "_____no_output_____" ] ], [ [ "\nwhile True:\n ret,frame=cap.read()\n cars=car_model.detectMultiScale(frame)\n gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n for(x,y,w,h) in cars:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),1)\n cv2.imshow('car',frame)\n if cv2.waitKey(10)==13:\n break\ncv2.destroyAllWindows() ", "_____no_output_____" ], [ "cap.release()", "_____no_output_____" ], [ "#main start here\nimport cv2\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport imutils\nimport easyocr", "_____no_output_____" ], [ "#main code\nimport numpy as np\nimport cv2\ncap = cv2.VideoCapture(0)\n\n#FOR REAL USE CASE and LIVE NUMBER PLATE OF CAR\n''''while(cap.isOpened()):\n ret, frame = cap.read()\n gra = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n cv2.imwrite('carpic.jpg',frame)\n cv2.imshow('frame',gra)\n if cv2.waitKey(10) == 13:\n break\n\ncap.release()\ncv2.destroyAllWindows()\nplt.imshow(cv2.cvtColor(gra, cv2.COLOR_BGR2RGB))'''\n\n#USING A IMAGE FROM GOOGLE FOR REFERENCE USE CASE\n\nimg=cv2.imread('car11 test.jpeg')\ngray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nplt.imshow(cv2.cvtColor(gray, cv2.COLOR_BGR2RGB))", "_____no_output_____" ], [ "bfilter = cv2.bilateralFilter(gray, 11, 17, 17) #Noise reduction\nedged = cv2.Canny(bfilter, 30, 200) #Edge detection\nplt.imshow(cv2.cvtColor(edged, cv2.COLOR_BGR2RGB))\n", "_____no_output_____" ], [ "keypoints = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\ncontours = imutils.grab_contours(keypoints)\ncontours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]\nlocation = None\nfor contour in contours:\n approx = cv2.approxPolyDP(contour, 10, True)\n if len(approx) == 4:\n location = approx\n break\n \nmask = np.zeros(gray.shape, np.uint8)\nnew_image = cv2.drawContours(mask, [location], 0,255, -1)\nnew_image = cv2.bitwise_and(img,img, mask=mask)", "_____no_output_____" ], [ "plt.imshow(cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB))", "_____no_output_____" ], [ "(x,y) = np.where(mask==255)\n(x1, y1) = (np.min(x), np.min(y))\n(x2, y2) = (np.max(x), np.max(y))\ncropped_image = gray[x1:x2+1, y1:y2+1]\nplt.imshow(cv2.cvtColor(cropped_image, cv2.COLOR_BGR2RGB))", "_____no_output_____" ], [ "reader = easyocr.Reader(['en'])\nresult = reader.readtext(cropped_image)\ntext = result[0][-2]\nfont = cv2.FONT_HERSHEY_SIMPLEX\nres = cv2.putText(img, text=text, org=(approx[0][0][0], approx[1][0][1]+60), fontFace=font, fontScale=1, color=(0,255,0), thickness=2, lineType=cv2.LINE_AA)\nres = cv2.rectangle(img, tuple(approx[0][0]), tuple(approx[2][0]), (0,255,0),3)\nplt.imshow(cv2.cvtColor(res, cv2.COLOR_BGR2RGB))", "CUDA not available - defaulting to CPU. Note: This module is much faster with a GPU.\nF:\\Summer Intership MLOPS\\anaconda3\\lib\\site-packages\\torch\\nn\\functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at ..\\c10/core/TensorImpl.h:1156.)\n return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)\n" ], [ "#Removing spaces from the detected number \ndef remove(text):\n return text.replace(\" \", \"\")\n \nextracted_number=remove(text)\nprint(extracted_number)", "KA05NB1786\n" ], [ "#SELENIUM TO EXTRACT DATA FROM THE THIRD PARTY WEBSITE HERE i used CARS24.Com (VALID for some number)\n\n#YOU CAN PAY FOR OTHER THIRD PARTY WEBSITES FOR MORE NUMBER PLATES\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\n\nreg_no=extracted_number;\ndriver = webdriver.Chrome(\"C:\\\\chromedriver\\\\chromedriver.exe\")\ndriver.get(\"https://www.cars24.com/rto-vehicle-registration-details/\")\ndriver.maximize_window()\ntime.sleep(5)\n#Cross button\ndriver.find_element_by_xpath(\"/html/body/div[1]/div[5]/div/div/h3/div/img\").click()\ntime.sleep(3)\n\n#sending value\ndriver.find_element_by_xpath(\"/html/body/div[1]/div[2]/div[2]/div/div[1]/div[2]/form/div/input\").click()\nlast=driver.find_element_by_xpath(\"/html/body/div[1]/div[2]/div[2]/div/div[1]/div[2]/form/div/input\")\nlast.send_keys(reg_no)\ntime.sleep(2)\n\n#button click\ndriver.find_element_by_xpath(\"/html/body/div[1]/div[2]/div[2]/div/div[1]/button\").click()\ntime.sleep(3)\n\n#data of user\ndata=driver.find_element_by_xpath(\"/html/body/div[1]/div[2]/div[2]/div[1]/div[1]\")\n\ndata_in_text=data.text\nprint(data_in_text)\n\nphone=driver.find_element_by_xpath(\"/html/body/div[1]/div[2]/div[2]/div[1]/div[1]/div[1]/div/ul/li[4]/span[2]\")\nphone_number=phone.text\n#clossing driver\ndriver.close()\n\n#saving into a file\ntext_file = open(\"sample.txt\", \"w\")\nn = text_file.write(data_in_text)\ntext_file.close()", "KA Registration - Regional Transport Office (RTO) KA-05\nNumber\nKA-05\nCity\nBangalore South - Jayanagar\nState\nKarnataka\nPhone\n+91-80-26630989\nEmail ID\[email protected]\nAddress\nJayanagar Shopping Complex, 4th Block, Jayanagar, Bangalore -560011\nSearch Another Vehicle Registration Number\nVehicle registration details\nGET RTO DETAILS\n" ] ], [ [ " # then you can send sms for the voilation of rule etc if you want ...", "_____no_output_____" ] ], [ [ "#Phone Number of user\nprint(phone_number)", "+91-80-26630989\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
c52082c69f1c95a20af093626801ca2e88b266f3
17,834
ipynb
Jupyter Notebook
BAMM.101x/datetime_objects.ipynb
KECB/learn
5b52c5c3ac640dd2a9064c33baaa9bc1885cf15f
[ "MIT" ]
2
2017-09-25T04:29:59.000Z
2018-11-04T09:53:59.000Z
BAMM.101x/datetime_objects.ipynb
KECB/learn
5b52c5c3ac640dd2a9064c33baaa9bc1885cf15f
[ "MIT" ]
null
null
null
BAMM.101x/datetime_objects.ipynb
KECB/learn
5b52c5c3ac640dd2a9064c33baaa9bc1885cf15f
[ "MIT" ]
null
null
null
26.61791
890
0.565437
[ [ [ "<h1>datetime library</h1>", "_____no_output_____" ], [ "<li>Time is linear\n<li>progresses as a straightline trajectory from the big bag\n<li>to now and into the future\n<li>日期库官方说明 https://docs.python.org/3.5/library/datetime.html", "_____no_output_____" ], [ "<h3>Reasoning about time is important in data analysis</h3>\n<li>Analyzing financial timeseries data\n<li>Looking at commuter transit passenger flows by time of day \n<li>Understanding web traffic by time of day \n<li>Examining seaonality in department store purchases", "_____no_output_____" ], [ "<h3>The datetime library</h3>\n<li>understands the relationship between different points of time\n<li>understands how to do operations on time", "_____no_output_____" ], [ "<h3>Example:</h3>\n<li>Which is greater? \"10/24/2017\" or \"11/24/2016\"", "_____no_output_____" ] ], [ [ "d1 = \"10/24/2017\"\nd2 = \"11/24/2016\"\nmax(d1,d2)", "_____no_output_____" ] ], [ [ "<li>How much time has passed?", "_____no_output_____" ] ], [ [ "d1 - d2", "_____no_output_____" ] ], [ [ "<h4>Obviously that's not going to work. </h4>\n<h4>We can't do date operations on strings</h4>\n<h4>Let's see what happens with datetime</h4>", "_____no_output_____" ] ], [ [ "import datetime\nd1 = datetime.date(2016,11,24)\nd2 = datetime.date(2017,10,24)\nmax(d1,d2)", "<class 'datetime.date'>\n" ], [ "print(d2 - d1)", "334 days, 0:00:00\n" ] ], [ [ "<li>datetime objects understand time", "_____no_output_____" ], [ "<h3>The datetime library contains several useful types</h3>\n<li>date: stores the date (month,day,year)\n<li>time: stores the time (hours,minutes,seconds)\n<li>datetime: stores the date as well as the time (month,day,year,hours,minutes,seconds)\n<li>timedelta: duration between two datetime or date objects", "_____no_output_____" ], [ "<h3>datetime.date</h3>", "_____no_output_____" ] ], [ [ "import datetime\ncentury_start = datetime.date(2000,1,1)\ntoday = datetime.date.today()\nprint(century_start,today)\nprint(\"We are\",today-century_start,\"days into this century\")\nprint(type(century_start))\nprint(type(today))", "2000-01-01 2017-08-25\nWe are 6446 days, 0:00:00 days into this century\n<class 'datetime.date'>\n<class 'datetime.date'>\n" ] ], [ [ "<h3>For a cleaner output</h3>", "_____no_output_____" ] ], [ [ "print(\"We are\",(today-century_start).days,\"days into this century\")", "We are 6445 days into this century\n" ] ], [ [ "<h3>datetime.datetime</h3>", "_____no_output_____" ] ], [ [ "century_start = datetime.datetime(2000,1,1,0,0,0)\ntime_now = datetime.datetime.now()\nprint(century_start,time_now)\nprint(\"we are\",time_now - century_start,\"days, hour, minutes and seconds into this century\")", "2000-01-01 00:00:00 2017-08-24 18:09:48.245052\nwe are 6445 days, 18:09:48.245052 days, hour, minutes and seconds into this century\n" ] ], [ [ "<h4>datetime objects can check validity</h4>\n<li>A ValueError exception is raised if the object is invalid</li>", "_____no_output_____" ] ], [ [ "some_date=datetime.date(2015,2,29)\n#some_date =datetime.date(2016,2,29)\n#some_time=datetime.datetime(2015,2,28,23,60,0)", "_____no_output_____" ] ], [ [ "<h3>datetime.timedelta</h3>\n<h4>Used to store the duration between two points in time</h4>", "_____no_output_____" ] ], [ [ "century_start = datetime.datetime(2050,1,1,0,0,0)\ntime_now = datetime.datetime.now()\ntime_since_century_start = time_now - century_start\nprint(\"days since century start\",time_since_century_start.days)\nprint(\"seconds since century start\",time_since_century_start.total_seconds())\nprint(\"minutes since century start\",time_since_century_start.total_seconds()/60)\nprint(\"hours since century start\",time_since_century_start.total_seconds()/60/60)", "days since century start -11817\nseconds since century start -1020947887.555188\nminutes since century start -17015798.1259198\nhours since century start -283596.63543199666\n" ] ], [ [ "<h3>datetime.time</h3>", "_____no_output_____" ] ], [ [ "date_and_time_now = datetime.datetime.now()\ntime_now = date_and_time_now.time()\nprint(time_now)", "19:44:37.142884\n" ] ], [ [ "<h4>You can do arithmetic operations on datetime objects</h4>\n<li>You can use timedelta objects to calculate new dates or times from a given date", "_____no_output_____" ] ], [ [ "\ntoday=datetime.date.today()\nfive_days_later=today+datetime.timedelta(days=5)\nprint(five_days_later)\n", "2017-08-29\n" ], [ "now=datetime.datetime.today()\nfive_minutes_and_five_seconds_later = now + datetime.timedelta(minutes=5,seconds=5)\nprint(five_minutes_and_five_seconds_later)", "2017-08-24 19:50:09.630242\n" ], [ "now=datetime.datetime.today()\nfive_minutes_and_five_seconds_earlier = now+datetime.timedelta(minutes=-5,seconds=-5)\nprint(five_minutes_and_five_seconds_earlier)", "2017-08-24 21:39:23.763762\n" ] ], [ [ "<li>But you can't use timedelta on time objects. If you do, you'll get a TypeError exception", "_____no_output_____" ] ], [ [ "time_now=datetime.datetime.now().time() #Returns the time component (drops the day)\nprint(time_now)\nthirty_seconds=datetime.timedelta(seconds=30)\ntime_later=time_now+thirty_seconds\n#Bug or feature?", "22:02:21.552801\n" ], [ "#But this is Python\n#And we can always get around something by writing a new function!\n#Let's write a small function to get around this problem\ndef add_to_time(time_object,time_delta):\n import datetime\n temp_datetime_object = datetime.datetime(500,1,1,time_object.hour,time_object.minute,time_object.second)\n print(temp_datetime_object)\n return (temp_datetime_object+time_delta).time()\n \n \n", "_____no_output_____" ], [ "#And test it\ntime_now=datetime.datetime.now().time()\nthirty_seconds=datetime.timedelta(seconds=30)\nprint(time_now,add_to_time(time_now,thirty_seconds))", "0500-01-01 22:37:07\n22:37:07.239431 22:37:37\n" ] ], [ [ "<h2>datetime and strings</h2>", "_____no_output_____" ] ], [ [ "More often than not, the program will need to get the date or time from a string:\n From a website (bus/train timings)\n From a file (date or datetime associated with a stock price)\n From the user (from the input statement)\n\nPython needs to parse the string so that it correctly creates a date or time object\n", "_____no_output_____" ] ], [ [ "<h4>datetime.strptime</h4>\n<li>datetime.strptime(): grabs time from a string and creates a date or datetime or time object\n<li>The programmer needs to tell the function what format the string is using\n<li> See http://pubs.opengroup.org/onlinepubs/009695399/functions/strptime.html for how to specify the format", "_____no_output_____" ] ], [ [ "date='01-Apr-03'\ndate_object=datetime.datetime.strptime(date,'%d-%b-%y')\nprint(date_object)", "2003-04-01 00:00:00\n" ], [ "#Unfortunately, there is no similar thing for time delta\n#So we have to be creative!\nbus_travel_time='2:15:30'\nhours,minutes,seconds=bus_travel_time.split(':')\nx=datetime.timedelta(hours=int(hours),minutes=int(minutes),seconds=int(seconds))\nprint(x)", "2:15:30\n" ], [ "#Or write a function that will do this for a particular format\ndef get_timedelta(time_string):\n hours,minutes,seconds = time_string.split(':')\n import datetime\n return datetime.timedelta(hours=int(hours),minutes=int(minutes),seconds=int(seconds))", "_____no_output_____" ] ], [ [ "<h4>datetime.strftime</h4>\n<li>The strftime function flips the strptime function. It converts a datetime object to a string \n<li>with the specified format", "_____no_output_____" ] ], [ [ "now = datetime.datetime.now()\nstring_now = datetime.datetime.strftime(now,'%m/%d/%y %H:%M:%S')\nprint(now,string_now)\nprint(str(now)) #Or you can use the default conversion\n", "2017-08-24 23:03:35.197581 08/24/17 23:03:35\n2017-08-24 23:03:35.197581\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "raw", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "raw" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
c5208a3da0e3195f4d2111eb9443d470731e8f5e
14,829
ipynb
Jupyter Notebook
notebook/ude/ude.ipynb
Song921012/sir-julia
a66d1ce0b1687f6462d91c6d2a42f157fece88a0
[ "MIT" ]
null
null
null
notebook/ude/ude.ipynb
Song921012/sir-julia
a66d1ce0b1687f6462d91c6d2a42f157fece88a0
[ "MIT" ]
null
null
null
notebook/ude/ude.ipynb
Song921012/sir-julia
a66d1ce0b1687f6462d91c6d2a42f157fece88a0
[ "MIT" ]
null
null
null
34.247113
732
0.532268
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
c520941eb32213f4edc0b9a7bb1bc1b24ce8a27c
3,870
ipynb
Jupyter Notebook
python-data-structures/leetocde/two-sum.ipynb
dimastatz/courses
663f19c53427552034e07f27ff0604b2d1d132ec
[ "MIT" ]
null
null
null
python-data-structures/leetocde/two-sum.ipynb
dimastatz/courses
663f19c53427552034e07f27ff0604b2d1d132ec
[ "MIT" ]
null
null
null
python-data-structures/leetocde/two-sum.ipynb
dimastatz/courses
663f19c53427552034e07f27ff0604b2d1d132ec
[ "MIT" ]
1
2022-03-24T01:16:12.000Z
2022-03-24T01:16:12.000Z
28.248175
253
0.51938
[ [ [ "# Two Sum\nGiven an array of integers nums and an integer target, return indices of the two numbers such that they add up to target.\nYou may assume that each input would have exactly one solution, and you may not use the same element twice. You can return the answer in any order.\n\n### Example 1: \nInput: nums = [2,7,11,15], target = 9 \nOutput: [0,1] \nOutput: Because nums[0] + nums[1] == 9, we return [0, 1]. \n\n### Example 2: \nInput: nums = [3,2,4], target = 6 \nOutput: [1,2] \n\n### Example 3: \nInput: nums = [3,3], target = 6 \nOutput: [0,1] \n", "_____no_output_____" ], [ "## Solution 1\n\n### Intuition\nThe first solution is a brute force solution. Loop through all possible pairs in the list, sum and if the result equals to the target, return indices\n\n### Implementation", "_____no_output_____" ] ], [ [ "from typing import List\n\ndef two_sum(nums: List[int], target: int) -> List[int]:\n for i in range(len(nums)):\n for j in range(i + 1, len(nums)):\n if nums[i] + nums[j] == target:\n return [i, j]\n raise Exception('Target cannot be constructed') \n\n\nassert two_sum([2,7,11,15], 9) == [0, 1]\nassert two_sum([3,2,4], 6) == [1, 2]\nassert two_sum([3, 3], 6) == [0, 1]", "_____no_output_____" ] ], [ [ "### Complexity Analysis\n- Time Complexity: O(n<sup>2</sup>)\n- Space Complexity: O(1)\n\n\n## Solution 2\n\n### Intuition\nThe idea is to take the previous solution but avoid scanning the entire input for each element in the input. Instead we can save all the input in the hash map. Doing so, the slow list scan, will be replaced by the lookup in the hash map (O(1)) \n\n### Implementation", "_____no_output_____" ] ], [ [ "from typing import List\n\ndef two_sum(nums: List[int], target: int) -> List[int]:\n lookup = {}\n for i,v in enumerate(nums):\n first = target - v\n if first in lookup:\n return [lookup[first], i]\n else:\n lookup[v] = i \n \n \nassert two_sum([2,7,11,15], 9) == [0, 1]\nassert two_sum([3,2,4], 6) == [1, 2]\nassert two_sum([3, 3], 6) == [0, 1]", "_____no_output_____" ] ], [ [ "### Analysis\n- Time Complexity: O(n)\n- Space Complexity: O(n)\n\n### LeetCode Output\n- Success\n- Runtime: 56 ms, faster than 90.49% of Python3 online submissions for Two Sum.\n- Memory Usage: 15.5 MB, less than 19.62% of Python3 online submissions for Two Sum.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
c520a5c8b51b1687dbad75d8db8ce0a65ff62f1a
5,709
ipynb
Jupyter Notebook
Algorithms/CloudMasking/landsat457_surface_reflectance.ipynb
OIEIEIO/earthengine-py-notebooks
5d6c5cdec0c73bf02020ee17d42c9e30d633349f
[ "MIT" ]
1,008
2020-01-27T02:03:18.000Z
2022-03-24T10:42:14.000Z
Algorithms/CloudMasking/landsat457_surface_reflectance.ipynb
rafatieppo/earthengine-py-notebooks
99fbc4abd1fb6ba41e3d8a55f8911217353a3237
[ "MIT" ]
8
2020-02-01T20:18:18.000Z
2021-11-23T01:48:02.000Z
Algorithms/CloudMasking/landsat457_surface_reflectance.ipynb
rafatieppo/earthengine-py-notebooks
99fbc4abd1fb6ba41e3d8a55f8911217353a3237
[ "MIT" ]
325
2020-01-27T02:03:36.000Z
2022-03-25T20:33:33.000Z
38.06
470
0.556139
[ [ [ "<table class=\"ee-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://github.com/giswqs/earthengine-py-notebooks/tree/master/Algorithms/CloudMasking/landsat457_surface_reflectance.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /> View source on GitHub</a></td>\n <td><a target=\"_blank\" href=\"https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Algorithms/CloudMasking/landsat457_surface_reflectance.ipynb\"><img width=26px src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png\" />Notebook Viewer</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Algorithms/CloudMasking/landsat457_surface_reflectance.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /> Run in Google Colab</a></td>\n</table>", "_____no_output_____" ], [ "## Install Earth Engine API and geemap\nInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://geemap.org). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.\nThe following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.", "_____no_output_____" ] ], [ [ "# Installs geemap package\nimport subprocess\n\ntry:\n import geemap\nexcept ImportError:\n print('Installing geemap ...')\n subprocess.check_call([\"python\", '-m', 'pip', 'install', 'geemap'])", "_____no_output_____" ], [ "import ee\nimport geemap", "_____no_output_____" ] ], [ [ "## Create an interactive map \nThe default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function. ", "_____no_output_____" ] ], [ [ "Map = geemap.Map(center=[40,-100], zoom=4)\nMap", "_____no_output_____" ] ], [ [ "## Add Earth Engine Python script ", "_____no_output_____" ] ], [ [ "# Add Earth Engine dataset\n# This example demonstrates the use of the Landsat 4, 5 or 7\n# surface reflectance QA band to mask clouds.\n\n# cloudMaskL457 = function(image) {\ndef cloudMaskL457(image):\n qa = image.select('pixel_qa')\n # If the cloud bit (5) is set and the cloud confidence (7) is high\n # or the cloud shadow bit is set (3), then it's a bad pixel.\n cloud = qa.bitwiseAnd(1 << 5) \\\n .And(qa.bitwiseAnd(1 << 7)) \\\n .Or(qa.bitwiseAnd(1 << 3))\n # Remove edge pixels that don't occur in all bands\n mask2 = image.mask().reduce(ee.Reducer.min())\n return image.updateMask(cloud.Not()).updateMask(mask2)\n# }\n\n# Map the function over the collection and take the median.\ncollection = ee.ImageCollection('LANDSAT/LT05/C01/T1_SR') \\\n .filterDate('2010-04-01', '2010-07-30')\n\ncomposite = collection \\\n .map(cloudMaskL457) \\\n .median()\n\n# Display the results in a cloudy place.\nMap.setCenter(-6.2622, 53.3473, 12)\nMap.addLayer(composite, {'bands': ['B3', 'B2', 'B1'], 'min': 0, 'max': 3000})\n", "_____no_output_____" ] ], [ [ "## Display Earth Engine data layers ", "_____no_output_____" ] ], [ [ "Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.\nMap", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
c520a9d8d47cc5697a5736f782197ff438d4820b
39,639
ipynb
Jupyter Notebook
usecases/spl/code/notebooks/Spark2dot4Notebooks/LoadLibrary_Collection_InventoryFromCaptureToParquetComposeWithPySpark2.ipynb
DataSnowman/analytics-accelerator
4777bf33ddc46e7cf508b81d23837c85cb5198da
[ "MIT" ]
4
2021-08-11T04:23:18.000Z
2021-12-28T15:01:45.000Z
usecases/spl/code/notebooks/Spark2dot4Notebooks/LoadLibrary_Collection_InventoryFromCaptureToParquetComposeWithPySpark2.ipynb
DataSnowman/analytics-accelerator
4777bf33ddc46e7cf508b81d23837c85cb5198da
[ "MIT" ]
null
null
null
usecases/spl/code/notebooks/Spark2dot4Notebooks/LoadLibrary_Collection_InventoryFromCaptureToParquetComposeWithPySpark2.ipynb
DataSnowman/analytics-accelerator
4777bf33ddc46e7cf508b81d23837c85cb5198da
[ "MIT" ]
1
2021-12-01T21:42:40.000Z
2021-12-01T21:42:40.000Z
41.204782
463
0.473044
[ [ [ "%%pyspark\n\ndf = spark.read.load('abfss://[email protected]/SeattlePublicLibrary/Library_Collection_Inventory.csv', format='csv'\n## If header exists uncomment line below\n, header=True\n)\ndisplay(df.limit(10))", "_____no_output_____" ], [ "%%pyspark\r\n\r\n# Show Schema\r\ndf.printSchema()", "_____no_output_____" ], [ "%%pyspark\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import *\n\n# Primary storage info\ncapture_account_name = 'splacceler5lmevhdeon4ym' # fill in your primary account name\ncapture_container_name = 'capture' # fill in your container name\ncapture_relative_path = 'SeattlePublicLibrary/Library_Collection_Inventory.csv' # fill in your relative folder path\n\ncapture_adls_path = 'abfss://%s@%s.dfs.core.windows.net/%s' % (capture_container_name, capture_account_name, capture_relative_path)\nprint('Primary storage account path: ' + capture_adls_path)", "_____no_output_____" ], [ "%%pyspark\r\n\r\nfrom pyspark.sql.types import StructType, StructField, IntegerType, StringType, DoubleType, DateType, TimestampType\r\ncsvSchema = StructType([\r\n StructField('bibnum', IntegerType(), True),\r\n StructField('title', StringType(), True),\r\n StructField('author', StringType(), True), \r\n StructField('isbn', StringType(), True),\r\n StructField('publication_year', StringType(), True),\r\n StructField('publisher', StringType(), True),\r\n StructField('subjects', StringType(), True),\r\n StructField('item_type', StringType(), True),\r\n StructField('item_collection', StringType(), True),\r\n StructField('floating_item', StringType(), True),\r\n StructField('item_location', StringType(), True),\r\n StructField('reportDate', StringType(), True),\r\n StructField('item_count', IntegerType(), True)\r\n])\r\n\r\nCheckByTPI_capture_df = spark.read.format('csv').option('header', 'True').schema(csvSchema).load(capture_adls_path)\r\n\r\ndisplay(CheckByTPI_capture_df.limit(10))\r\n\r\n", "_____no_output_____" ], [ "%%pyspark\r\n\r\nfrom pyspark.sql.functions import to_date, to_timestamp, col, date_format, current_timestamp\r\ndf_final = (CheckByTPI_capture_df.withColumn(\"report_date\", to_date(col(\"reportDate\"),\"MM/dd/yyyy\")).drop(\"reportDate\")\r\n .withColumn('loadDate', date_format(current_timestamp(), 'MM/dd/yyyy hh:mm:ss aa'))\r\n .withColumn(\"load_date\", to_timestamp(col(\"loadDate\"),\"MM/dd/yyyy hh:mm:ss aa\")).drop(\"loadDate\")\r\n)", "_____no_output_____" ], [ "%%pyspark\n\n# Show Schema\ndf_final.printSchema()\n\ndisplay(df_final.limit(10))\n", "_____no_output_____" ], [ "%%pyspark\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import *\n\n# Primary storage info\ncompose_account_name = 'splacceler5lmevhdeon4ym' # fill in your primary account name\ncompose_container_name = 'compose' # fill in your container name\ncompose_relative_path = 'SeattlePublicLibrary/LibraryCollectionInventory/' # fill in your relative folder path\n\ncompose_adls_path = 'abfss://%s@%s.dfs.core.windows.net/%s' % (compose_container_name, compose_account_name, compose_relative_path)\nprint('Primary storage account path: ' + compose_adls_path)", "_____no_output_____" ], [ "%%pyspark\n\ncompose_parquet_path = compose_adls_path + 'CollectionInventory.parquet'\n\nprint('parquet file path: ' + compose_parquet_path)", "_____no_output_____" ], [ "%%pyspark\n\ndf_final.write.parquet(compose_parquet_path, mode = 'overwrite')", "_____no_output_____" ], [ "%%sql\n\n-- Create database SeattlePublicLibrary only if database with same name does not exist\nCREATE DATABASE IF NOT EXISTS SeattlePublicLibrary", "_____no_output_____" ], [ "%%sql\n\n-- Create table CheckoutsByTitlePhysicalItemsschemafinal only if table with same name does not exist\nCREATE TABLE IF NOT EXISTS SeattlePublicLibrary.library_collection_inventory\n (title STRING\n ,author STRING\n ,isbn STRING\n ,publication_year STRING\n ,publisher STRING\n ,subjects STRING\n ,item_type STRING\n ,item_collection STRING\n ,floating_item STRING\n ,item_location STRING\n ,report_date DATE\n ,item_count INTEGER\n ,load_date TIMESTAMP\n)\nUSING PARQUET OPTIONS (path 'abfss://[email protected]/SeattlePublicLibrary/LibraryCollectionInventory/CollectionInventory.parquet')", "_____no_output_____" ], [ "%%sql\r\n\r\n--DROP TABLE SeattlePublicLibrary.library_collection_inventory", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c520abca2558ba27005666dfd37e23129023b340
123,175
ipynb
Jupyter Notebook
Regression/Support Vector Machine/LinearSVR_MinMaxScaler_PowerTransformer.ipynb
shreepad-nade/ds-seed
93ddd3b73541f436b6832b94ca09f50872dfaf10
[ "Apache-2.0" ]
53
2021-08-28T07:41:49.000Z
2022-03-09T02:20:17.000Z
Regression/Support Vector Machine/LinearSVR_MinMaxScaler_PowerTransformer.ipynb
shreepad-nade/ds-seed
93ddd3b73541f436b6832b94ca09f50872dfaf10
[ "Apache-2.0" ]
142
2021-07-27T07:23:10.000Z
2021-08-25T14:57:24.000Z
Regression/Support Vector Machine/LinearSVR_MinMaxScaler_PowerTransformer.ipynb
shreepad-nade/ds-seed
93ddd3b73541f436b6832b94ca09f50872dfaf10
[ "Apache-2.0" ]
38
2021-07-27T04:54:08.000Z
2021-08-23T02:27:20.000Z
155.917722
66,232
0.874
[ [ [ "# LinearSVR with MinMaxScaler & Power Transformer", "_____no_output_____" ], [ "This Code template is for the Classification task using Support Vector Regressor (SVR) based on the Support Vector Machine algorithm with Power Transformer as Feature Transformation Technique and MinMaxScaler for Feature Scaling in a pipeline.", "_____no_output_____" ], [ "### Required Packages", "_____no_output_____" ] ], [ [ "import warnings\nimport numpy as np\nimport pandas as pd \nimport matplotlib.pyplot as plt \nimport seaborn as se \nfrom sklearn.preprocessing import PowerTransformer, MinMaxScaler\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.model_selection import train_test_split\nfrom imblearn.over_sampling import RandomOverSampler\nfrom sklearn.svm import LinearSVR \nfrom sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error \nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "### Initialization\n\nFilepath of CSV file", "_____no_output_____" ] ], [ [ "#filepath\nfile_path=\"\"", "_____no_output_____" ] ], [ [ "List of features which are required for model training .", "_____no_output_____" ] ], [ [ "#x_values\nfeatures=[]", "_____no_output_____" ] ], [ [ "Target feature for prediction.", "_____no_output_____" ] ], [ [ "#y_values\ntarget=''", "_____no_output_____" ] ], [ [ "### Data fetching\nPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.\n\nWe will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.", "_____no_output_____" ] ], [ [ "df=pd.read_csv(file_path)\ndf.head()", "_____no_output_____" ] ], [ [ "### Feature Selections\n\nIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.\n\nWe will assign all the required input features to X and target/outcome to Y.", "_____no_output_____" ] ], [ [ "X=df[features]\nY=df[target]", "_____no_output_____" ] ], [ [ "### Data preprocessing\n\nSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.\n", "_____no_output_____" ] ], [ [ "def NullClearner(df):\n if(isinstance(df, pd.Series) and (df.dtype in [\"float64\",\"int64\"])):\n df.fillna(df.mean(),inplace=True)\n return df\n elif(isinstance(df, pd.Series)):\n df.fillna(df.mode()[0],inplace=True)\n return df\n else:return df\ndef EncodeX(df):\n return pd.get_dummies(df)", "_____no_output_____" ] ], [ [ "Calling preprocessing functions on the feature and target set.", "_____no_output_____" ] ], [ [ "x=X.columns.to_list()\nfor i in x:\n X[i]=NullClearner(X[i])\nX=EncodeX(X)\nY=NullClearner(Y)\nX.head()", "_____no_output_____" ] ], [ [ "#### Correlation Map\n\nIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.", "_____no_output_____" ] ], [ [ "f,ax = plt.subplots(figsize=(18, 18))\nmatrix = np.triu(X.corr())\nse.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)\nplt.show()", "_____no_output_____" ] ], [ [ "### Data Splitting\n\nThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.\n", "_____no_output_____" ] ], [ [ "x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)#performing datasplitting", "_____no_output_____" ] ], [ [ "### Model\nSupport vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.\n\nA Support Vector Machine is a discriminative classifier formally defined by a separating hyperplane. In other terms, for a given known/labelled data points, the SVM outputs an appropriate hyperplane that classifies the inputted new cases based on the hyperplane. In 2-Dimensional space, this hyperplane is a line separating a plane into two segments where each class or group occupied on either side.\n\nLinearSVR is similar to SVR with kernel=’linear’. It has more flexibility in the choice of tuning parameters and is suited for large samples.\n\n#### Feature Transformation\nPowerTransformer applies a power transform featurewise to make data more Gaussian-like.\n\nPower transforms are a family of parametric, monotonic transformations that are applied to make data more Gaussian-like. This is useful for modeling issues related to heteroscedasticity (non-constant variance), or other situations where normality is desired.\n\nFor more information... [click here](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PowerTransformer.html)\n\n\n#### Model Tuning Parameters\n\n 1. epsilon : float, default=0.0\n> Epsilon parameter in the epsilon-insensitive loss function.\n\n 2. loss : {‘epsilon_insensitive’, ‘squared_epsilon_insensitive’}, default=’epsilon_insensitive’ \n> Specifies the loss function. ‘hinge’ is the standard SVM loss (used e.g. by the SVC class) while ‘squared_hinge’ is the square of the hinge loss. The combination of penalty='l1' and loss='hinge' is not supported.\n\n 3. C : float, default=1.0\n> Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive.\n\n 4. tol : float, default=1e-4\n> Tolerance for stopping criteria.\n\n 5. dual : bool, default=True\n> Select the algorithm to either solve the dual or primal optimization problem. Prefer dual=False when n_samples > n_features.\n### Feature Scaling\n#### MinMaxScalar:\nTransform features by scaling each feature to a given range.\n\nThis estimator scales and translates each feature individually such that it is in the given range on the training set, e.g. between zero and one.\n", "_____no_output_____" ] ], [ [ "model=make_pipeline(MinMaxScaler(),PowerTransformer(),LinearSVR())\nmodel.fit(x_train, y_train)", "_____no_output_____" ] ], [ [ "#### Model Accuracy\n\nWe will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.\n\n> **score**: The **score** function returns the coefficient of determination <code>R<sup>2</sup></code> of the prediction.", "_____no_output_____" ] ], [ [ "print(\"Accuracy score {:.2f} %\\n\".format(model.score(x_test,y_test)*100))", "Accuracy score 49.41 %\n\n" ] ], [ [ "> **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions. \n\n> **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model. \n\n> **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model. ", "_____no_output_____" ] ], [ [ "y_pred=model.predict(x_test)\nprint(\"R2 Score: {:.2f} %\".format(r2_score(y_test,y_pred)*100))\nprint(\"Mean Absolute Error {:.2f}\".format(mean_absolute_error(y_test,y_pred)))\nprint(\"Mean Squared Error {:.2f}\".format(mean_squared_error(y_test,y_pred)))", "R2 Score: 49.41 %\nMean Absolute Error 24.60\nMean Squared Error 1069.76\n" ] ], [ [ "#### Prediction Plot\n\nFirst, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis.\nFor the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(14,10))\nplt.plot(range(20),y_test[0:20], color = \"green\")\nplt.plot(range(20),model.predict(x_test[0:20]), color = \"red\")\nplt.legend([\"Actual\",\"prediction\"]) \nplt.title(\"Predicted vs True Value\")\nplt.xlabel(\"Record number\")\nplt.ylabel(target)\nplt.show()", "_____no_output_____" ] ], [ [ "#### Creator:Shreepad Nade , Github: [Profile](https://github.com/shreepad-nade)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
c520b9f0c653031b8e4d0374ef528da1e5d926b7
1,471
ipynb
Jupyter Notebook
process/biopython-phred-score.ipynb
LD-Lab/16S-Taxa-Phlyo
336abb7bd36e90493e93db3dbe5afa63e5532b70
[ "Apache-2.0" ]
null
null
null
process/biopython-phred-score.ipynb
LD-Lab/16S-Taxa-Phlyo
336abb7bd36e90493e93db3dbe5afa63e5532b70
[ "Apache-2.0" ]
null
null
null
process/biopython-phred-score.ipynb
LD-Lab/16S-Taxa-Phlyo
336abb7bd36e90493e93db3dbe5afa63e5532b70
[ "Apache-2.0" ]
1
2020-11-19T08:48:51.000Z
2020-11-19T08:48:51.000Z
19.613333
132
0.546567
[ [ [ "https://biopython.org/docs/1.75/api/Bio.SeqIO.QualityIO.html", "_____no_output_____" ], [ "https://biopython-tutorial.readthedocs.io/en/latest/notebooks/19%20-%20Cookbook%20-%20Cool%20things%20to%20do%20with%20it.html", "_____no_output_____" ] ], [ [ "from Bio.SeqIO import QualityIO\nhelp(QualityIO)", "_____no_output_____" ], [ "from Bio import SeqIO\nfor record in SeqIO.parse(input_file, \"fastq\"):\n score=record.letter_annotations[\"phred_quality\"]", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ] ]
c520bb9bedc3bbe06d922f39cd4dcebc85ad5df2
61,344
ipynb
Jupyter Notebook
Week-01-Git_&_Python_intro/notebooks/python_getting_started_lecture.ipynb
bibekuchiha/26-Weeks-Of-Data-Science-master
cc651ffbedc2c5b12b512504f6804e10426a5c5b
[ "MIT" ]
1
2020-12-16T09:31:49.000Z
2020-12-16T09:31:49.000Z
Week-01-Git_&_Python_intro/notebooks/python_getting_started_lecture.ipynb
bibekuchiha/26-Weeks-Of-Data-Science-master
cc651ffbedc2c5b12b512504f6804e10426a5c5b
[ "MIT" ]
null
null
null
Week-01-Git_&_Python_intro/notebooks/python_getting_started_lecture.ipynb
bibekuchiha/26-Weeks-Of-Data-Science-master
cc651ffbedc2c5b12b512504f6804e10426a5c5b
[ "MIT" ]
null
null
null
25.306931
425
0.529832
[ [ [ "<img src=\"../images/26-weeks-of-data-science-banner.jpg\"/>", "_____no_output_____" ], [ " # Getting Started with Python", "_____no_output_____" ], [ "## About Python", "_____no_output_____" ], [ "<img src=\"../images/python-logo.png\" alt=\"Python\" style=\"width: 500px;\"/>\n\n\nPython is a\n\n- general purpose programming language\n- interpreted, not compiled\n- both **dynamically typed** _and_ **strongly typed**\n- supports multiple programming paradigms: object oriented, functional\n- comes in 2 main versions in use today: 2.7 and 3.x\n", "_____no_output_____" ], [ "## Why Python for Data Science?\n***\n\nPython is great for data science because:\n\n- general purpose programming language (as opposed to R)\n- faster idea to execution to deployment\n- battle-tested\n- mature ML libraries\n", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-success\">And it is easy to learn !</div>\n", "_____no_output_____" ], [ "<img src=\"../images/icon/Concept-Alert.png\" alt=\"Concept-Alert\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n## Python's Interactive Console : The Interpreter\n\n***\n- The Python interpreter is a console that allows interactive development\n- We are currently using the Jupyter notebook, which uses an advanced Python interpreter called IPython\n- This gives us much more power and flexibility\n\n**Let's try it out !**\n\n\n\n\n\n\n", "_____no_output_____" ] ], [ [ "print(\"Hello World!\") #As usual with any language we start with with the print function", "Hello World!\n" ] ], [ [ "# What are we going to learn today?\n***\n- CHAPTER 1 - **Python Basics**\n - **Strings**\n - Creating a String, variable assignments\n - String Indexing & Slicing\n - String Concatenation & Repetition\n - Basic Built-in String Methods\n - **Numbers**\n - Types of Numbers\n - Basic Arithmetic\n \n\n\n- CHAPTER 2 - **Data Types & Data Structures**\n - Lists\n - Dictionaries\n - Sets & Booleans\n\n\n- CHAPTER 3 - **Python Programming Constructs**\n - Loops & Iterative Statements\n - if,elif,else statements\n - for loops, while loops\n - Comprehensions\n - Exception Handling\n - Modules, Packages, \n - File I/O operations\n ", "_____no_output_____" ], [ "# CHAPTER - 1 : Python Basics\n***\nLet's understand \n- Basic data types\n- Variables and Scoping\n- Modules, Packages and the **`import`** statement\n- Operators\n", "_____no_output_____" ], [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Concept-Alert\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br /> \n\n## Strings\n***\n\nStrings are used in Python to record text information, such as name. Strings in Python are actually a *sequence*, which basically means Python keeps track of every element in the string as a sequence. For example, Python understands the string \"hello' to be a sequence of letters in a specific order. This means we will be able to use indexing to grab particular letters (like the first letter, or the last letter).\n\nThis idea of a sequence is an important one in Python and we will touch upon it later on in the future.\n\nIn this lecture we'll learn about the following:\n\n 1.) Creating Strings\n 2.) Printing Strings\n 3.) String Indexing and Slicing\n 4.) String Properties\n 5.) String Methods\n 6.) Print Formatting", "_____no_output_____" ], [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Technical-Stuff\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n### Creating a String\n***\nTo create a string in Python you need to use either single quotes or double quotes. For example:", "_____no_output_____" ] ], [ [ "# Single word\nprint('hello World!')\n\nprint() # Used to have a line space between two sentences. Try deleting this line & seeing the difference.\n\n# Entire phrase \nprint('This is also a string')", "hello World!\n\nThis is also a string\n" ] ], [ [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Concept-Alert\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br /> \n\n## Variables : Store your Value in me!\n***\n\nIn the code below we begin to explore how we can use a variable to which a string can be assigned. This can be extremely useful in many cases, where you can call the variable instead of typing the string everytime. This not only makes our code clean but it also makes it less redundant. \nExample syntax to assign a value or expression to a variable,\n\nvariable_name = value or expression\n\nNow let's get coding!!. With the below block of code showing how to assign a string to variable.\n\n", "_____no_output_____" ] ], [ [ "s = 'New York'\n\nprint(s)\n\nprint(type(s))\n\nprint(len(s)) # what's the string length", "New York\n<class 'str'>\n8\n" ] ], [ [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Technical-Stuff\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n### String Indexing\n***\nWe know strings are a sequence, which means Python can use indexes to call parts of the sequence. Let's learn how this works.\n\nIn Python, we use brackets [] after an object to call its index. We should also note that indexing starts at 0 for Python. Let's create a new object called s and the walk through a few examples of indexing.", "_____no_output_____" ] ], [ [ "# Assign s as a string\ns = 'Hello World'", "_____no_output_____" ], [ "# Print the object\nprint(s) \n\nprint() \n\n# Show first element (in this case a letter)\nprint(s[0])\n\nprint()\n\n# Show the second element (also a letter)\nprint(s[1])\n\n#Show from first element to 5th element\nprint(s[0:4])", "Hello World\n\nH\n\ne\nHell\n" ] ], [ [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Technical-Stuff\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n## String Concatenation and Repetition\n\n***\n**String Concatenation** is a process to combine two strings. It is done using the '+' operator. \n\n**String Repetition** is a process of repeating a same string multiple times\n\nThe examples of the above concepts is as follows.", "_____no_output_____" ] ], [ [ "# concatenation (addition)\n\ns1 = 'Hello'\ns2 = \"World\"\nprint(s1 + \" \" + s2)", "Hello World\n" ], [ "# repetition (multiplication)\n\nprint(\"Hello_\" * 3)\nprint(\"-\" * 10)\nprint(\"=\" * 10)", "Hello_Hello_Hello_\n----------\n==========\n" ] ], [ [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Technical-Stuff\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n## String Slicing & Indexing\n***\n**String Indexing** is used to to select the letter at a particular index/position. \n\n**String Slicing** is a process to select a subset of an entire string\n\nThe examples of the above stated are as follows", "_____no_output_____" ] ], [ [ "s = \"Namaste World\"\n\n# print sub strings\nprint(s[1]) #This is indexing.\nprint(s[6:11]) #This is known as slicing.\nprint(s[-5:-1])\n\n# test substring membership\nprint(\"Wor\" in s)", "a\ne Wor\nWorl\nTrue\n" ] ], [ [ "Note the above slicing. Here we're telling Python to grab everything from 6 up to 10 and from fifth last to second last. You'll notice this a lot in Python, where statements and are usually in the context of \"up to, but not including\".", "_____no_output_____" ], [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Technical-Stuff\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n## Basic Built-in String methods\n\n***\nObjects in Python usually have built-in methods. These methods are functions inside the object (we will learn about these in much more depth later) that can perform actions or commands on the object itself.\n\nWe call methods with a period and then the method name. Methods are in the form:\n\nobject.method(parameters)\n\nWhere parameters are extra arguments we can pass into the method. Don't worry if the details don't make 100% sense right now. Later on we will be creating our own objects and functions!\n\nHere are some examples of built-in methods in strings:", "_____no_output_____" ] ], [ [ "s = \"Hello World\"\n\nprint(s.upper()) ## Convert all the element of the string to Upper case..!!\nprint(s.lower()) ## Convert all the element of the string to Lower case..!!", "HELLO WORLD\nhello world\n" ] ], [ [ "## Print Formatting\n\nWe can use the .format() method to add formatted objects to printed string statements. \n\nThe easiest way to show this is through an example:", "_____no_output_____" ] ], [ [ "name = \"Bibek\"\nage = 22\nmarried = False\n\nprint(\"My name is %s, my age is %s, and it is %s that I am married\" % (name, age, married))\n\nprint(\"My name is {}, my age is {}, and it is {} that I am married\".format(name, age, married))", "My name is Bibek, my age is 22, and it is False that I am married\nMy name is Bibek, my age is 22, and it is False that I am married\n" ] ], [ [ "<img src=\"../images/icon/Concept-Alert.png\" alt=\"Concept-Alert\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n## Numbers \n***\n\nHaving worked with string we will turn our attention to numbers\nWe'll learn about the following topics:\n\n 1.) Types of Numbers in Python\n 2.) Basic Arithmetic\n 3.) Object Assignment in Python", "_____no_output_____" ], [ "<img src=\"../images/icon/Concept-Alert.png\" alt=\"Concept-Alert\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n## Types of numbers\n***\nPython has various \"types\" of numbers (numeric literals). We'll mainly focus on integers and floating point numbers.\n\nIntegers are just whole numbers, positive or negative. For example: 2 and -2 are examples of integers.\n\nFloating point numbers in Python are notable because they have a decimal point in them, or use an exponential (e) to define the number. For example 2.0 and -2.1 are examples of floating point numbers. 4E2 (4 times 10 to the power of 2) is also an example of a floating point number in Python.\n\nThroughout this course we will be mainly working with integers or simple float number types.\n\nHere is a table of the two main types we will spend most of our time working with some examples:\n\n<table>\n<tr>\n <th>Examples</th> \n <th>Number \"Type\"</th>\n</tr>\n\n<tr>\n <td>1,2,-5,1000</td>\n <td>Integers</td> \n</tr>\n\n<tr>\n <td>1.2,-0.5,2e2,3E2</td> \n <td>Floating-point numbers</td> \n</tr>\n </table>", "_____no_output_____" ], [ "Now let's start with some basic arithmetic.", "_____no_output_____" ], [ "## Basic Arithmetic", "_____no_output_____" ] ], [ [ "# Addition\nprint(2+1)\n\n# Subtraction\nprint(2-1)\n\n# Multiplication\nprint(2*2)\n\n# Division\nprint(3/2)", "3\n1\n4\n1.5\n" ] ], [ [ "## Arithmetic continued", "_____no_output_____" ] ], [ [ "# Powers\n2 ** 3\n\n3 **2", "_____no_output_____" ], [ "# Order of Operations followed in Python\n2 + 10 * 10 + 3", "_____no_output_____" ], [ "# Can use parenthesis to specify orders\n(2+10) * (10+3)", "_____no_output_____" ] ], [ [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Technical-Stuff\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n## Variable Assignments\n***\nNow that we've seen how to use numbers in Python as a calculator let's see how we can assign names and create variables.\n\nWe use a single equals sign to assign labels to variables. Let's see a few examples of how we can do this.", "_____no_output_____" ] ], [ [ "# Let's create an object called \"a\" and assign it the number 5\na = 5", "_____no_output_____" ] ], [ [ "Now if I call *a* in my Python script, Python will treat it as the number 5.", "_____no_output_____" ] ], [ [ "# Adding the objects\na+a", "_____no_output_____" ] ], [ [ "What happens on reassignment? Will Python let us write it over?", "_____no_output_____" ] ], [ [ "# Reassignment\na = 10", "_____no_output_____" ], [ "# Check\na", "_____no_output_____" ] ], [ [ "<img src=\"../images/icon/ppt-icons.png\" alt=\"ppt-icons\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n### Mini Challenge - 1\n***\n\nIts your turn now!! store the word `hello` in my_string. print the my_string + name. ", "_____no_output_____" ] ], [ [ "my_string = 'Hello '\nname = 'Bibek'\nprint(my_string + name)", "Hello Bibek\n" ] ], [ [ "<img src=\"../images/icon/ppt-icons.png\" alt=\"ppt-icons\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n### Mini Challenge - 2\n***\n\n**Its your turn now!!!** given the numbers stored in variables `a` and `b`. Can you write a simple code to compute the mean of these two numbers and assign it to a variable `mean`. ", "_____no_output_____" ] ], [ [ "a = 8\nb = 6\nmean = (a+b)/2\nprint(mean)", "7.0\n" ] ], [ [ "<img src=\"../images/icon/Pratical-Tip.png\" alt=\"Pratical-Tip\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\nThe names you use when creating these labels need to follow a few rules:\n\n 1. Names can not start with a number.\n 2. There can be no spaces in the name, use _ instead.\n 3. Can't use any of these symbols :'\",<>/?|\\()!@#$%^&*~-+\n\n\nUsing variable names can be a very useful way to keep track of different variables in Python. For example:", "_____no_output_____" ] ], [ [ "a$ = 9", "_____no_output_____" ] ], [ [ "## From Sales to Data Science\n***\nDiscover the story of Sagar Dawda who made a successful transition from Sales to Data Science. Making a successful switch to Data Science is a game of Decision and Determenination. But it's a long road from Decision to Determination. To read more, click <a href=\"https://greyatom.com/blog/2018/03/career-transition-decision-to-determination/\">here</a> ", "_____no_output_____" ], [ "# CHAPTER - 2 : Data Types & Data Structures\n***\n- Everything in Python is an \"object\", including integers/floats\n- Most common and important types (classes)\n - \"Single value\": None, int, float, bool, str, complex\n - \"Multiple values\": list, tuple, set, dict\n\n\n- Single/Multiple isn't a real distinction, this is for explanation\n- There are many others, but these are most frequently used", "_____no_output_____" ], [ "### Identifying Data Types\n", "_____no_output_____" ] ], [ [ "a = 42\nb = 32.30\n\nprint(type(a))#gets type of a\nprint(type(b))#gets type of b", "<class 'int'>\n<class 'float'>\n" ] ], [ [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Technical-Stuff\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n### Single Value Types\n***\n- int: Integers\n- float: Floating point numbers\n- bool: Boolean values (True, False)\n- complex: Complex numbers\n- str: String", "_____no_output_____" ], [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Technical-Stuff\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n## Lists\n***\nLists can be thought of the most general version of a *sequence* in Python. Unlike strings, they are mutable, meaning the elements inside a list can be changed!\n\nIn this section we will learn about:\n \n 1.) Creating lists\n 2.) Indexing and Slicing Lists\n 3.) Basic List Methods\n 4.) Nesting Lists\n 5.) Introduction to List Comprehensions\n\nLists are constructed with brackets [] and commas separating every element in the list.\n\nLet's go ahead and see how we can construct lists!", "_____no_output_____" ] ], [ [ "# Assign a list to an variable named my_list\nmy_list = [1,2,3]", "_____no_output_____" ] ], [ [ "We just created a list of integers, but lists can actually hold different object types. For example:", "_____no_output_____" ] ], [ [ "my_list = ['A string',23,100.232,'o']", "_____no_output_____" ] ], [ [ "Just like strings, the len() function will tell you how many items are in the sequence of the list.", "_____no_output_____" ] ], [ [ "len(my_list)", "_____no_output_____" ] ], [ [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Technical-Stuff\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n### Adding New Elements to a list\n***\nWe use two special commands to add new elements to a list. Let's make a new list to remind ourselves of how this works:", "_____no_output_____" ] ], [ [ "my_list = ['one','two','three',4,5]", "_____no_output_____" ], [ "# append a value to the end of the list\nl = [1, 2.3, ['a', 'b'], 'New York']\nl.append(3.1)\nprint(l)", "[1, 2.3, ['a', 'b'], 'New York', 3.1]\n" ], [ "# extend a list with another list. \nl = [1, 2, 3]\nl.extend([4, 5, 6])\nprint(l)", "[1, 2, 3, 4, 5, 6]\n" ] ], [ [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Technical-Stuff\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n## Slicing\n***\nSlicing is used to access individual elements or a rage of elements in a list. \n\nPython supports \"slicing\" indexable sequences. The syntax for slicing lists is:\n\n- `list_object[start:end:step]` or\n- `list_object[start:end]`\n\nstart and end are indices (start inclusive, end exclusive). All slicing values are optional.", "_____no_output_____" ] ], [ [ "lst = list(range(10)) # create a list containing 10 numbers starting from 0 \nprint(lst)\n\nprint(\"elements from index 4 to 7:\", lst[4:7])\nprint(\"alternate elements, starting at index 0:\", lst[0::2]) # prints elements from index 0 till last index with a step of 2\nprint(\"every third element, starting at index 1:\", lst[1::3]) # prints elements from index 1 till last index with a step of 3\n", "[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\nelements from index 4 to 7: [4, 5, 6]\nalternate elements, starting at index 0: [0, 2, 4, 6, 8]\nevery third element, starting at index 1: [1, 4, 7]\n" ] ], [ [ "<div class=\"alert alert-block alert-success\">**Other `list` operations**</div>\n\n***\n- **`.append`**: add element to end of list\n- **`.insert`**: insert element at given index\n- **`.extend`**: extend one list with another list", "_____no_output_____" ], [ "# Did you know?\n\n**Did you know that Japanese Anime Naruto is related to Data Science. Find out how**\n\n<img src=\"https://greyatom.com/blog/wp-content/uploads/2017/06/naruto-1-701x321.png\">\n\n\nFind out here https://medium.com/greyatom/naruto-and-data-science-how-data-science-is-an-art-and-data-scientist-an-artist-c5f16a68d670", "_____no_output_____" ], [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Technical-Stuff\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n# Dictionaries\n***\nNow we're going to switch gears and learn about *mappings* called *dictionaries* in Python. If you're familiar with other languages you can think of these Dictionaries as hash tables. \n\nThis section will serve as a brief introduction to dictionaries and consist of:\n\n 1.) Constructing a Dictionary\n 2.) Accessing objects from a dictionary\n 3.) Nesting Dictionaries\n 4.) Basic Dictionary Methods\n \nA Python dictionary consists of a key and then an associated value. That value can be almost any Python object.", "_____no_output_____" ], [ "## Constructing a Dictionary\n***\nLet's see how we can construct dictionaries to get a better understanding of how they work!", "_____no_output_____" ] ], [ [ "# Make a dictionary with {} and : to signify a key and a value\nmy_dict = {'key1':'value1','key2':'value2'}", "_____no_output_____" ], [ "# Call values by their key\nmy_dict['key2']", "_____no_output_____" ] ], [ [ "We can effect the values of a key as well. For instance:", "_____no_output_____" ] ], [ [ "my_dict['key1']=123\nmy_dict\n", "_____no_output_____" ], [ "# Subtract 123 from the value\nmy_dict['key1'] = my_dict['key1'] - 123", "_____no_output_____" ], [ "#Check\nmy_dict['key1']", "_____no_output_____" ] ], [ [ "A quick note, Python has a built-in method of doing a self subtraction or addition (or multiplication or division). We could have also used += or -= for the above statement. For example:", "_____no_output_____" ] ], [ [ "# Set the object equal to itself minus 123 \nmy_dict['key1'] -= 123\nmy_dict['key1']", "_____no_output_____" ] ], [ [ "Now its your turn to get hands-on with Dictionary, create a empty dicts. Create a new key calle animal and assign a value 'Dog' to it..\n", "_____no_output_____" ] ], [ [ "# Create a new dictionary\nd = {}\n# Create a new key through assignment\nd['animal'] = 'Dog'", "_____no_output_____" ] ], [ [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Technical-Stuff\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n# Set and Booleans\n***\nThere are two other object types in Python that we should quickly cover. Sets and Booleans. \n\n## Sets\nSets are an unordered collection of *unique* elements. We can construct them by using the set() function. Let's go ahead and make a set to see how it works", "_____no_output_____" ], [ "#### Set Theory\n<img src=\"../images/sets2.png\" width=\"60%\"/>", "_____no_output_____" ] ], [ [ "x = set()\n\n# We add to sets with the add() method\nx.add(1)\n\n#Show\nx", "_____no_output_____" ] ], [ [ "Note the curly brackets. This does not indicate a dictionary! Although you can draw analogies as a set being a dictionary with only keys.\n\nWe know that a set has only unique entries. So what happens when we try to add something that is already in a set?", "_____no_output_____" ] ], [ [ "# Add a different element\nx.add(2)\n\n#Show\nx", "_____no_output_____" ], [ "# Try to add the same element\nx.add(1)\n\n#Show\nx", "_____no_output_____" ] ], [ [ "Notice how it won't place another 1 there. That's because a set is only concerned with unique elements! We can cast a list with multiple repeat elements to a set to get the unique elements. For example:", "_____no_output_____" ] ], [ [ "# Create a list with repeats\nl = [1,1,2,2,3,4,5,6,1,1]", "_____no_output_____" ], [ "# Cast as set to get unique values\nset(l)", "_____no_output_____" ] ], [ [ "<img src=\"../images/icon/ppt-icons.png\" alt=\"ppt-icons\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n### Mini Challenge - 3\n***\nCan you access the last element of a l which is a list and find the last element of that list.", "_____no_output_____" ] ], [ [ "l = [10,20,30,40,50]\nl[-1]", "_____no_output_____" ] ], [ [ "# CHAPTER - 3 : Python Programming Constructs\n***\nWe'll be talking about\n- Looping\n- Conditional Statements\n- Comprehensions", "_____no_output_____" ], [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Technical-Stuff\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n## Loops and Iterative Statements", "_____no_output_____" ], [ "## If,elif,else Statements\n***\nif Statements in Python allows us to tell the computer to perform alternative actions based on a certain set of results.\n\nVerbally, we can imagine we are telling the computer:\n\n\"Hey if this case happens, perform some action\"\n\nWe can then expand the idea further with elif and else statements, which allow us to tell the computer:\n\n\"Hey if this case happens, perform some action. Else if another case happens, perform some other action. Else-- none of the above cases happened, perform this action\"\n\nLet's go ahead and look at the syntax format for if statements to get a better idea of this:\n\n if case1:\n perform action1\n elif case2:\n perform action2\n else: \n perform action 3", "_____no_output_____" ] ], [ [ "a = 5\nb = 4\n\nif a > b:\n # we are inside the if block\n print(\"a is greater than b\")\nelif b > a:\n # we are inside the elif block\n print(\"b is greater than a\")\nelse:\n # we are inside the else block\n print(\"a and b are equal\")\n\n# Note: Python doesn't have a switch statement", "a is greater than b\n" ] ], [ [ "<img src=\"../images/icon/Warning.png\" alt=\"Warning\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n### Indentation\n***\nIt is important to keep a good understanding of how indentation works in Python to maintain the structure and order of your code. We will touch on this topic again when we start building out functions!", "_____no_output_____" ], [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Technical-Stuff\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n# For Loops\n***\nA **for** loop acts as an iterator in Python, it goes through items that are in a *sequence* or any other iterable item. Objects that we've learned about that we can iterate over include strings,lists,tuples, and even built in iterables for dictionaries, such as the keys or values.\n\nWe've already seen the **for** statement a little bit in past lectures but now lets formalize our understanding.\n\nHere's the general format for a **for** loop in Python:\n\n for item in object:\n statements to do stuff\n\nThe variable name used for the item is completely up to the coder, so use your best judgment for choosing a name that makes sense and you will be able to understand when revisiting your code. This item name can then be referenced inside you loop, for example if you wanted to use if statements to perform checks.\n\nLet's go ahead and work through several example of **for** loops using a variety of data object types.\n", "_____no_output_____" ] ], [ [ "#Simple program to find the even numbers in a list\n\nlist_1 = [2,4,5,6,8,7,9,10] # Initialised the list\n\nfor number in list_1: # Selects one element in list_1 \n if number % 2 == 0: # Checks if it is even. IF even, only then, goes to next step else performs above step and continues iteration\n print(number,end=' ') # prints no if even. end=' ' prints the nos on the same line with a space in between. Try deleting this command & seeing the difference.", "2 4 6 8 10 " ], [ "lst1 = [4, 7, 13, 11, 3, 11, 15]\nlst2 = []\n\nfor index, e in enumerate(lst1):\n if e == 10:\n break\n if e < 10:\n continue\n lst2.append((index, e*e))\nelse:\n print(\"out of loop without using break statement\")\n\nlst2", "out of loop without using break statement\n" ], [ "\n", "_____no_output_____" ] ], [ [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Technical-Stuff\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n# While loops\n***\nThe **while** statement in Python is one of most general ways to perform iteration. A **while** statement will repeatedly execute a single statement or group of statements as long as the condition is true. The reason it is called a 'loop' is because the code statements are looped through over and over again until the condition is no longer met.\n\nThe general format of a while loop is:\n\n while test:\n code statement\n else:\n final code statements\n\nLet’s look at a few simple while loops in action. \n", "_____no_output_____" ] ], [ [ "x = 0\n\nwhile x < 10:\n print ('x is currently: ',x,end=' ') #end=' ' to put print below statement on the same line after thsi statement\n print (' x is still less than 10, adding 1 to x')\n x+=1", "x is currently: 0 x is still less than 10, adding 1 to x\nx is currently: 1 x is still less than 10, adding 1 to x\nx is currently: 2 x is still less than 10, adding 1 to x\nx is currently: 3 x is still less than 10, adding 1 to x\nx is currently: 4 x is still less than 10, adding 1 to x\nx is currently: 5 x is still less than 10, adding 1 to x\nx is currently: 6 x is still less than 10, adding 1 to x\nx is currently: 7 x is still less than 10, adding 1 to x\nx is currently: 8 x is still less than 10, adding 1 to x\nx is currently: 9 x is still less than 10, adding 1 to x\n" ] ], [ [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Technical-Stuff\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n## Comprehensions\n***\n- Python provides syntactic sugar to write small loops to generate lists/sets/tuples/dicts in one line\n- These are called comprehensions, and can greatly increase development speed and readability\n\nSyntax:\n```\n sequence = [expression(element) for element in iterable if condition]\n```\n\nThe brackets used for creating the comprehension define what type of object is created.\n\nUse **[ ]** for lists, **()** for _generators_, **{}** for sets and dicts", "_____no_output_____" ], [ "### `list` Comprehension", "_____no_output_____" ] ], [ [ "names = [\"Ravi\", \"Pooja\", \"Vijay\", \"Kiran\"]\nhello = [\"Hello \" + name for name in names]\nprint(hello)", "['Hello Ravi', 'Hello Pooja', 'Hello Vijay', 'Hello Kiran']\n" ], [ "numbers = [55, 32, 87, 99, 10, 54, 32]\neven = [num for num in numbers if num % 2 == 0]\nprint(even)\n\nodd_squares = [(num, num * num) for num in numbers if num % 2 == 1]\nprint(odd_squares)", "[32, 10, 54, 32]\n[(55, 3025), (87, 7569), (99, 9801)]\n" ] ], [ [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Technical-Stuff\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n## Exception Handling\n***\n#### try and except\n\nThe basic terminology and syntax used to handle errors in Python is the **try** and **except** statements. The code which can cause an exception to occue is put in the *try* block and the handling of the exception is the implemented in the *except* block of code. The syntax form is:\n\n try:\n You do your operations here...\n ...\n except ExceptionI:\n If there is ExceptionI, then execute this block.\n except ExceptionII:\n If there is ExceptionII, then execute this block.\n ...\n else:\n If there is no exception then execute this block. \n\nWe can also just check for any exception with just using except: To get a better understanding of all this lets check out an example: We will look at some code that opens and writes a file:", "_____no_output_____" ] ], [ [ "try:\n x = 1 / 0\nexcept ZeroDivisionError:\n print('divided by zero')\n print('executed when exception occurs')\nelse:\n print('executed only when exception does not occur')\nfinally:\n print('finally block, always executed')", "divided by zero\nexecuted when exception occurs\nfinally block, always executed\n" ] ], [ [ "<img src=\"../images/icon/Concept-Alert.png\" alt=\"Concept-Alert\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n## Modules, Packages, and `import`\n***\nA module is a collection of functions and variables that have been bundled together in a single file. Module helps us: \n- Used for code organization, packaging and reusability\n- Module: A Python file\n- Package: A folder with an ``__init__.py`` file\n- Namespace is based on file's directory path\n\nModule's are usually organised around a theme. Let's see how to use a module. To access our module we will import it using python's import statement. Math module provides access to the mathematical functions. ", "_____no_output_____" ] ], [ [ "# import the math module\nimport math\n\n# use the log10 function in the math module\nmath.log10(123)", "_____no_output_____" ] ], [ [ "<img src=\"../images/icon/Technical-Stuff.png\" alt=\"Concept-Alert\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br /> \n\n## File I/O : Helps you read your files\n***\n- Python provides a `file` object to read text/binary files.\n- This is similar to the `FileStream` object in other languages.\n- Since a `file` is a resource, it must be closed after use. This can be done manually, or using a context manager (**`with`** statement)", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\">Create a file in the current directory</div>", "_____no_output_____" ] ], [ [ "with open('myfile.txt', 'w') as f:\n f.write(\"This is my first file!\\n\")\n f.write(\"Second line!\\n\")\n f.write(\"Last line!\\n\")\n\n\n# let's verify if it was really created.\n# For that, let's find out which directory we're working from\nimport os\nprint(os.path.abspath(os.curdir))", "C:\\Users\\Bibek Shah Shankhar\\Documents\\greyatom\\26-Weeks-Of-Data-Science-master\\Week-01-Git_&_Python_intro\\notebooks\n" ] ], [ [ "<div class=\"alert alert-block alert-info\">Read the newly created file</div>", "_____no_output_____" ] ], [ [ "# read the file we just created\nwith open('myfile.txt', 'r') as f:\n for line in f:\n print(line)\n", "This is my first file!\n\nSecond line!\n\nLast line!\n\n" ] ], [ [ "<img src=\"../images/icon/ppt-icons.png\" alt=\"ppt-icons\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n### Mini Challenge - 4\n***\nCan you compute the square of a number assigned to a variable a using the math module?", "_____no_output_____" ] ], [ [ "import math\nnumber = 9\nsquare_of_number = math.pow(number,2) # pow(power) function in math module takes number and power as arguments.\nprint(square_of_number)", "81.0\n" ] ], [ [ "<img src=\"../images/icon/ppt-icons.png\" alt=\"ppt-icons\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n### Mini Challenge - 5\n***\nCan you create a list of 10 numbers iterate through the list and print the square of each number ?", "_____no_output_____" ] ], [ [ "l = [i for i in range(1,11)]\nfor i in l:\n print(i*i,end=' ')", "1 4 9 16 25 36 49 64 81 100 " ] ], [ [ "# Further Reading\n\n- Official Python Documentation: https://docs.python.org/", "_____no_output_____" ], [ "<img src=\"../images/icon/Recap.png\" alt=\"Recap\" style=\"width: 100px;float:left; margin-right:15px\"/>\n<br />\n\n# In-session Recap Time\n***\n* Python Basics\n * Variables and Scoping\n * Modules, Packages and Imports\n * Data Types & Data Structures\n * Python Programming Constructs\n* Data Types & Data Structures\n * Lists\n * Dictionaries\n * Sets & Booleans\n* Python Prograamming constructs\n * Loops and Conditional Statements\n * Exception Handling\n * File I/O", "_____no_output_____" ], [ "# Thank You\n***\n### Coming up next...\n\n- **Python Functions**: How to write modular functions to enable code reuse\n- **NumPy**: Learn the basis of most numeric computation in Python", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
c520c162c69f5b15de27938f3c4343e9c3958e14
30,781
ipynb
Jupyter Notebook
datasets/Part 6 - AutoEncoders (AE)/ae.ipynb
mazamgo/deeplearningAaZ
9a876bd634a00df4df78c51d6f10e41bc112ead5
[ "CC0-1.0" ]
323
2020-01-04T10:56:35.000Z
2022-03-25T22:26:39.000Z
datasets/Part 6 - AutoEncoders (AE)/ae.ipynb
handielg/deeplearning-az
a9c3560690cf56ab193a1df075bcfeb8a3131ab0
[ "CC0-1.0" ]
null
null
null
datasets/Part 6 - AutoEncoders (AE)/ae.ipynb
handielg/deeplearning-az
a9c3560690cf56ab193a1df075bcfeb8a3131ab0
[ "CC0-1.0" ]
191
2020-01-06T18:16:10.000Z
2022-03-30T19:04:32.000Z
46.287218
1,690
0.52867
[ [ [ "#Instalamos pytorch\n", "_____no_output_____" ] ], [ [ "#pip install torch===1.6.0 torchvision===0.7.0 -f https://download.pytorch.org/whl/torch_stable.html", "_____no_output_____" ] ], [ [ "#Clonamos el repositorio para obtener el dataset", "_____no_output_____" ] ], [ [ "!git clone https://github.com/joanby/deeplearning-az.git", "Cloning into 'deeplearning-az'...\nremote: Enumerating objects: 57, done.\u001b[K\nremote: Counting objects: 100% (57/57), done.\u001b[K\nremote: Compressing objects: 100% (41/41), done.\u001b[K\nremote: Total 10153 (delta 25), reused 39 (delta 16), pack-reused 10096\u001b[K\nReceiving objects: 100% (10153/10153), 236.95 MiB | 36.84 MiB/s, done.\nResolving deltas: 100% (50/50), done.\nChecking out files: 100% (10108/10108), done.\n" ], [ "from google.colab import drive\ndrive.mount('/content/drive')", "_____no_output_____" ] ], [ [ "# Importar las librerías", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.autograd import Variable", "_____no_output_____" ] ], [ [ "# Importar el dataset\n", "_____no_output_____" ] ], [ [ "movies = pd.read_csv(\"/content/deeplearning-az/datasets/Part 6 - AutoEncoders (AE)/ml-1m/movies.dat\", sep = '::', header = None, engine = 'python', encoding = 'latin-1')\nusers = pd.read_csv(\"/content/deeplearning-az/datasets/Part 6 - AutoEncoders (AE)/ml-1m/users.dat\", sep = '::', header = None, engine = 'python', encoding = 'latin-1')\nratings = pd.read_csv(\"/content/deeplearning-az/datasets/Part 6 - AutoEncoders (AE)/ml-1m/ratings.dat\", sep = '::', header = None, engine = 'python', encoding = 'latin-1')\n", "_____no_output_____" ] ], [ [ "# Preparar el conjunto de entrenamiento y elconjunto de testing", "_____no_output_____" ] ], [ [ "training_set = pd.read_csv(\"/content/deeplearning-az/datasets/Part 6 - AutoEncoders (AE)/ml-100k/u1.base\", sep = \"\\t\", header = None)\ntraining_set = np.array(training_set, dtype = \"int\")\ntest_set = pd.read_csv(\"/content/deeplearning-az/datasets/Part 6 - AutoEncoders (AE)/ml-100k/u1.test\", sep = \"\\t\", header = None)\ntest_set = np.array(test_set, dtype = \"int\")", "_____no_output_____" ] ], [ [ "# Obtener el número de usuarios y de películas", "_____no_output_____" ] ], [ [ "nb_users = int(max(max(training_set[:, 0]), max(test_set[:,0])))\nnb_movies = int(max(max(training_set[:, 1]), max(test_set[:, 1])))", "_____no_output_____" ] ], [ [ "# Convertir los datos en un array X[u,i] con usuarios u en fila y películas i en columna\n", "_____no_output_____" ] ], [ [ "def convert(data):\n new_data = []\n for id_user in range(1, nb_users+1):\n id_movies = data[:, 1][data[:, 0] == id_user]\n id_ratings = data[:, 2][data[:, 0] == id_user]\n ratings = np.zeros(nb_movies)\n ratings[id_movies-1] = id_ratings\n new_data.append(list(ratings))\n return new_data", "_____no_output_____" ], [ "training_set = convert(training_set)\ntest_set = convert(test_set)", "_____no_output_____" ] ], [ [ "# Convertir los datos a tensores de Torch", "_____no_output_____" ] ], [ [ "training_set = torch.FloatTensor(training_set)\ntest_set = torch.FloatTensor(test_set)", "_____no_output_____" ] ], [ [ "# Crear la arquitectura de la Red Neuronal", "_____no_output_____" ] ], [ [ "class SAE(nn.Module):\n def __init__(self, ):\n super(SAE, self).__init__()\n self.fc1 = nn.Linear(nb_movies, 20)\n self.fc2 = nn.Linear(20, 10)\n self.fc3 = nn.Linear(10, 20)\n self.fc4 = nn.Linear(20, nb_movies)\n self.activation = nn.Sigmoid()\n def forward(self, x):\n x = self.activation(self.fc1(x))\n x = self.activation(self.fc2(x))\n x = self.activation(self.fc3(x))\n x = self.fc4(x)\n return x", "_____no_output_____" ], [ "sae = SAE()\ncriterion = nn.MSELoss()\noptimizer = optim.RMSprop(sae.parameters(), lr = 0.01, weight_decay = 0.5)", "_____no_output_____" ] ], [ [ "# Entrenar el SAE", "_____no_output_____" ] ], [ [ "nb_epoch = 200\nfor epoch in range(1, nb_epoch+1):\n train_loss = 0\n s = 0.\n for id_user in range(nb_users):\n input = Variable(training_set[id_user]).unsqueeze(0)\n target = input.clone()\n if torch.sum(target.data > 0) > 0:\n output = sae.forward(input)\n target.require_grad = False\n output[target == 0] = 0\n loss = criterion(output, target)\n # la media no es sobre todas las películas, sino sobre las que realmente ha valorado\n mean_corrector = nb_movies/float(torch.sum(target.data > 0)+1e-10) \n loss.backward()\n train_loss += np.sqrt(loss.data*mean_corrector) ## sum(errors) / n_pelis_valoradas\n s += 1.\n optimizer.step()\n print(\"Epoch: \"+str(epoch)+\", Loss: \"+str(train_loss/s))\n", "Epoch: 1, Loss: tensor(1.7711)\nEpoch: 2, Loss: tensor(1.0967)\nEpoch: 3, Loss: tensor(1.0534)\nEpoch: 4, Loss: tensor(1.0385)\nEpoch: 5, Loss: tensor(1.0307)\nEpoch: 6, Loss: tensor(1.0267)\nEpoch: 7, Loss: tensor(1.0237)\nEpoch: 8, Loss: tensor(1.0218)\nEpoch: 9, Loss: tensor(1.0209)\nEpoch: 10, Loss: tensor(1.0196)\nEpoch: 11, Loss: tensor(1.0189)\nEpoch: 12, Loss: tensor(1.0183)\nEpoch: 13, Loss: tensor(1.0179)\nEpoch: 14, Loss: tensor(1.0172)\nEpoch: 15, Loss: tensor(1.0174)\nEpoch: 16, Loss: tensor(1.0169)\nEpoch: 17, Loss: tensor(1.0168)\nEpoch: 18, Loss: tensor(1.0164)\nEpoch: 19, Loss: tensor(1.0165)\nEpoch: 20, Loss: tensor(1.0162)\nEpoch: 21, Loss: tensor(1.0162)\nEpoch: 22, Loss: tensor(1.0159)\nEpoch: 23, Loss: tensor(1.0160)\nEpoch: 24, Loss: tensor(1.0160)\nEpoch: 25, Loss: tensor(1.0159)\nEpoch: 26, Loss: tensor(1.0156)\nEpoch: 27, Loss: tensor(1.0154)\nEpoch: 28, Loss: tensor(1.0150)\nEpoch: 29, Loss: tensor(1.0126)\nEpoch: 30, Loss: tensor(1.0110)\nEpoch: 31, Loss: tensor(1.0095)\nEpoch: 32, Loss: tensor(1.0086)\nEpoch: 33, Loss: tensor(1.0052)\nEpoch: 34, Loss: tensor(1.0052)\nEpoch: 35, Loss: tensor(1.0013)\nEpoch: 36, Loss: tensor(0.9998)\nEpoch: 37, Loss: tensor(0.9959)\nEpoch: 38, Loss: tensor(0.9956)\nEpoch: 39, Loss: tensor(0.9939)\nEpoch: 40, Loss: tensor(0.9936)\nEpoch: 41, Loss: tensor(0.9902)\nEpoch: 42, Loss: tensor(0.9897)\nEpoch: 43, Loss: tensor(0.9847)\nEpoch: 44, Loss: tensor(0.9847)\nEpoch: 45, Loss: tensor(0.9861)\nEpoch: 46, Loss: tensor(0.9861)\nEpoch: 47, Loss: tensor(0.9834)\nEpoch: 48, Loss: tensor(0.9864)\nEpoch: 49, Loss: tensor(0.9829)\nEpoch: 50, Loss: tensor(0.9835)\nEpoch: 51, Loss: tensor(0.9800)\nEpoch: 52, Loss: tensor(0.9848)\nEpoch: 53, Loss: tensor(0.9829)\nEpoch: 54, Loss: tensor(0.9824)\nEpoch: 55, Loss: tensor(0.9747)\nEpoch: 56, Loss: tensor(0.9707)\nEpoch: 57, Loss: tensor(0.9680)\nEpoch: 58, Loss: tensor(0.9715)\nEpoch: 59, Loss: tensor(0.9688)\nEpoch: 60, Loss: tensor(0.9671)\nEpoch: 61, Loss: tensor(0.9624)\nEpoch: 62, Loss: tensor(0.9646)\nEpoch: 63, Loss: tensor(0.9666)\nEpoch: 64, Loss: tensor(0.9665)\nEpoch: 65, Loss: tensor(0.9606)\nEpoch: 66, Loss: tensor(0.9623)\nEpoch: 67, Loss: tensor(0.9579)\nEpoch: 68, Loss: tensor(0.9579)\nEpoch: 69, Loss: tensor(0.9536)\nEpoch: 70, Loss: tensor(0.9554)\nEpoch: 71, Loss: tensor(0.9566)\nEpoch: 72, Loss: tensor(0.9552)\nEpoch: 73, Loss: tensor(0.9504)\nEpoch: 74, Loss: tensor(0.9541)\nEpoch: 75, Loss: tensor(0.9506)\nEpoch: 76, Loss: tensor(0.9505)\nEpoch: 77, Loss: tensor(0.9481)\nEpoch: 78, Loss: tensor(0.9497)\nEpoch: 79, Loss: tensor(0.9461)\nEpoch: 80, Loss: tensor(0.9466)\nEpoch: 81, Loss: tensor(0.9443)\nEpoch: 82, Loss: tensor(0.9464)\nEpoch: 83, Loss: tensor(0.9437)\nEpoch: 84, Loss: tensor(0.9446)\nEpoch: 85, Loss: tensor(0.9418)\nEpoch: 86, Loss: tensor(0.9433)\nEpoch: 87, Loss: tensor(0.9412)\nEpoch: 88, Loss: tensor(0.9417)\nEpoch: 89, Loss: tensor(0.9411)\nEpoch: 90, Loss: tensor(0.9419)\nEpoch: 91, Loss: tensor(0.9389)\nEpoch: 92, Loss: tensor(0.9392)\nEpoch: 93, Loss: tensor(0.9373)\nEpoch: 94, Loss: tensor(0.9386)\nEpoch: 95, Loss: tensor(0.9363)\nEpoch: 96, Loss: tensor(0.9380)\nEpoch: 97, Loss: tensor(0.9351)\nEpoch: 98, Loss: tensor(0.9368)\nEpoch: 99, Loss: tensor(0.9348)\nEpoch: 100, Loss: tensor(0.9363)\nEpoch: 101, Loss: tensor(0.9347)\nEpoch: 102, Loss: tensor(0.9356)\nEpoch: 103, Loss: tensor(0.9339)\nEpoch: 104, Loss: tensor(0.9352)\nEpoch: 105, Loss: tensor(0.9325)\nEpoch: 106, Loss: tensor(0.9340)\nEpoch: 107, Loss: tensor(0.9319)\nEpoch: 108, Loss: tensor(0.9332)\nEpoch: 109, Loss: tensor(0.9312)\nEpoch: 110, Loss: tensor(0.9324)\nEpoch: 111, Loss: tensor(0.9309)\nEpoch: 112, Loss: tensor(0.9320)\nEpoch: 113, Loss: tensor(0.9303)\nEpoch: 114, Loss: tensor(0.9314)\nEpoch: 115, Loss: tensor(0.9296)\nEpoch: 116, Loss: tensor(0.9300)\nEpoch: 117, Loss: tensor(0.9288)\nEpoch: 118, Loss: tensor(0.9297)\nEpoch: 119, Loss: tensor(0.9283)\nEpoch: 120, Loss: tensor(0.9293)\nEpoch: 121, Loss: tensor(0.9278)\nEpoch: 122, Loss: tensor(0.9286)\nEpoch: 123, Loss: tensor(0.9272)\nEpoch: 124, Loss: tensor(0.9286)\nEpoch: 125, Loss: tensor(0.9265)\nEpoch: 126, Loss: tensor(0.9273)\nEpoch: 127, Loss: tensor(0.9264)\nEpoch: 128, Loss: tensor(0.9271)\nEpoch: 129, Loss: tensor(0.9258)\nEpoch: 130, Loss: tensor(0.9264)\nEpoch: 131, Loss: tensor(0.9254)\nEpoch: 132, Loss: tensor(0.9258)\nEpoch: 133, Loss: tensor(0.9244)\nEpoch: 134, Loss: tensor(0.9252)\nEpoch: 135, Loss: tensor(0.9247)\nEpoch: 136, Loss: tensor(0.9253)\nEpoch: 137, Loss: tensor(0.9236)\nEpoch: 138, Loss: tensor(0.9244)\nEpoch: 139, Loss: tensor(0.9230)\nEpoch: 140, Loss: tensor(0.9227)\nEpoch: 141, Loss: tensor(0.9225)\nEpoch: 142, Loss: tensor(0.9225)\nEpoch: 143, Loss: tensor(0.9217)\nEpoch: 144, Loss: tensor(0.9217)\nEpoch: 145, Loss: tensor(0.9214)\nEpoch: 146, Loss: tensor(0.9222)\nEpoch: 147, Loss: tensor(0.9212)\nEpoch: 148, Loss: tensor(0.9206)\nEpoch: 149, Loss: tensor(0.9206)\nEpoch: 150, Loss: tensor(0.9212)\nEpoch: 151, Loss: tensor(0.9208)\nEpoch: 152, Loss: tensor(0.9211)\nEpoch: 153, Loss: tensor(0.9202)\nEpoch: 154, Loss: tensor(0.9202)\nEpoch: 155, Loss: tensor(0.9195)\nEpoch: 156, Loss: tensor(0.9195)\nEpoch: 157, Loss: tensor(0.9192)\nEpoch: 158, Loss: tensor(0.9192)\nEpoch: 159, Loss: tensor(0.9184)\nEpoch: 160, Loss: tensor(0.9190)\nEpoch: 161, Loss: tensor(0.9187)\nEpoch: 162, Loss: tensor(0.9182)\nEpoch: 163, Loss: tensor(0.9178)\nEpoch: 164, Loss: tensor(0.9180)\nEpoch: 165, Loss: tensor(0.9178)\nEpoch: 166, Loss: tensor(0.9177)\nEpoch: 167, Loss: tensor(0.9175)\nEpoch: 168, Loss: tensor(0.9174)\nEpoch: 169, Loss: tensor(0.9169)\nEpoch: 170, Loss: tensor(0.9174)\nEpoch: 171, Loss: tensor(0.9165)\nEpoch: 172, Loss: tensor(0.9167)\nEpoch: 173, Loss: tensor(0.9161)\nEpoch: 174, Loss: tensor(0.9168)\nEpoch: 175, Loss: tensor(0.9160)\nEpoch: 176, Loss: tensor(0.9164)\nEpoch: 177, Loss: tensor(0.9159)\nEpoch: 178, Loss: tensor(0.9160)\nEpoch: 179, Loss: tensor(0.9158)\nEpoch: 180, Loss: tensor(0.9158)\nEpoch: 181, Loss: tensor(0.9152)\nEpoch: 182, Loss: tensor(0.9155)\nEpoch: 183, Loss: tensor(0.9150)\nEpoch: 184, Loss: tensor(0.9152)\nEpoch: 185, Loss: tensor(0.9148)\nEpoch: 186, Loss: tensor(0.9150)\nEpoch: 187, Loss: tensor(0.9147)\nEpoch: 188, Loss: tensor(0.9142)\nEpoch: 189, Loss: tensor(0.9148)\nEpoch: 190, Loss: tensor(0.9150)\nEpoch: 191, Loss: tensor(0.9143)\nEpoch: 192, Loss: tensor(0.9143)\nEpoch: 193, Loss: tensor(0.9138)\nEpoch: 194, Loss: tensor(0.9143)\nEpoch: 195, Loss: tensor(0.9135)\nEpoch: 196, Loss: tensor(0.9138)\nEpoch: 197, Loss: tensor(0.9132)\nEpoch: 198, Loss: tensor(0.9136)\nEpoch: 199, Loss: tensor(0.9128)\nEpoch: 200, Loss: tensor(0.9132)\n" ] ], [ [ "# Evaluar el conjunto de test en nuestro SAE", "_____no_output_____" ] ], [ [ "test_loss = 0\ns = 0.\nfor id_user in range(nb_users):\n input = Variable(training_set[id_user]).unsqueeze(0)\n target = Variable(test_set[id_user]).unsqueeze(0)\n if torch.sum(target.data > 0) > 0:\n output = sae.forward(input)\n target.require_grad = False\n output[target == 0] = 0\n loss = criterion(output, target)\n # la media no es sobre todas las películas, sino sobre las que realmente ha valorado\n mean_corrector = nb_movies/float(torch.sum(target.data > 0)+1e-10) \n test_loss += np.sqrt(loss.data*mean_corrector) ## sum(errors) / n_pelis_valoradas\n s += 1.", "_____no_output_____" ], [ "print(\"Test Loss: \"+str(test_loss/s))", "Test Loss: tensor(0.9549)\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
c520cfd71ea8d92d40ab526768043ac22c5e951d
4,036
ipynb
Jupyter Notebook
makeNP_array.ipynb
tejasvx/kaggle
85982ee7d401a43348e248c58cf3c8f958032af2
[ "Apache-2.0" ]
3
2018-04-14T19:03:41.000Z
2018-10-18T05:10:02.000Z
makeNP_array.ipynb
tejasvx/kaggle
85982ee7d401a43348e248c58cf3c8f958032af2
[ "Apache-2.0" ]
null
null
null
makeNP_array.ipynb
tejasvx/kaggle
85982ee7d401a43348e248c58cf3c8f958032af2
[ "Apache-2.0" ]
null
null
null
44.844444
923
0.601586
[ [ [ "# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport librosa\nimport librosa.display\nimport os\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\npath = \"../input/train/audio/\"\nimport matplotlib.pyplot as plt\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\n#from subprocess import check_output\n#print(check_output([\"ls\", \"../input/train/audio\"]).decode(\"utf8\"))\n\n# Any results you write to the current directory are saved as output.", "_____no_output_____" ], [ "def Labels(path):\n label = os.listdir(path)\n labelIndex = np.arange(0, len(label))\n print (label, labelIndex)\n return label , labelIndex", "_____no_output_____" ], [ "def wav2mfcc(path, maxPad=44):\n wave, sr = librosa.load(path)\n mfcc = librosa.feature.mfcc(wave, sr)\n #plt.figure(figsize=(10, 4))\n #librosa.display.specshow(mfcc, x_axis='time')\n padWidth = (maxPad - mfcc.shape[1])\n mfcc = np.pad(mfcc, pad_width=((0, 0), (0, padWidth)), mode='constant')\n librosa.display.waveplot(mfcc, x_axis='time')\n return mfcc", "_____no_output_____" ], [ "def MakeNPArray(path, maxPad=44):\n labels, tejasv = Labels(path)\n for label in labels:\n mfccList = []\n wavfiles = [path + label + '/' + wavfile for wavfile in os.listdir(path + '/' + label)]\n for wavfile in wavfiles:\n mfcc = wav2mfcc(wavfile, maxPad=maxPad)\n mfccList.append(mfcc)\n np.save(label + '.npy', mfccList)", "_____no_output_____" ], [ "MakeNPArray(path)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
c520d414f1731a304c79fbc1d0a07e81c116b08c
149,828
ipynb
Jupyter Notebook
code/.ipynb_checkpoints/UE17CS303-ASSIGNMENT-026_110_139_208-checkpoint.ipynb
abhijeetmurthy/Random-Forest-from-Scratch
52499aad2196280eb8acf336fcb2875905a7661e
[ "MIT" ]
null
null
null
code/.ipynb_checkpoints/UE17CS303-ASSIGNMENT-026_110_139_208-checkpoint.ipynb
abhijeetmurthy/Random-Forest-from-Scratch
52499aad2196280eb8acf336fcb2875905a7661e
[ "MIT" ]
null
null
null
code/.ipynb_checkpoints/UE17CS303-ASSIGNMENT-026_110_139_208-checkpoint.ipynb
abhijeetmurthy/Random-Forest-from-Scratch
52499aad2196280eb8acf336fcb2875905a7661e
[ "MIT" ]
null
null
null
265.182301
43,056
0.906419
[ [ [ "import pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\ndf=pd.read_csv(\"phl_hec_all_confirmed.csv\") ;\n# df.head()\nsns.heatmap(df.isnull(),yticklabels=False,cbar=False)\ndf.drop(['P. Name KOI'],axis=1,inplace=True)\ndf.drop(['P. Min Mass (EU)'],axis=1,inplace=True)\ndf.drop(['P. Max Mass (EU)'],axis=1,inplace=True)\ndf['P. Zone Class']=df['P. Zone Class'].fillna(df['P. Zone Class'].mode()[0])\ndf['P. Mass Class']=df['P. Mass Class'].fillna(df['P. Mass Class'].mode()[0])\ndf['P. Composition Class']=df['P. Composition Class'].fillna(df['P. Composition Class'].mode()[0])\ndf['P. Habitable Class']=df['P. Habitable Class'].fillna(df['P. Habitable Class'].mode()[0])\ndf['P. Atmosphere Class']=df['P. Atmosphere Class'].fillna(df['P. Atmosphere Class'].mode()[0])\ndf['P. Teq Min (K)']=df['P. Teq Min (K)'].fillna(df['P. Teq Min (K)'].mean())\ndf['P. Teq Mean (K)']=df['P. Teq Mean (K)'].fillna(df['P. Teq Mean (K)'].mean())\ndf['P. Teq Max (K)']=df['P. Teq Max (K)'].fillna(df['P. Teq Max (K)'].mean())\ndf['P. Mass (EU)']=df['P. Mass (EU)'].fillna(df['P. Mass (EU)'].mean())\ndf['P. Radius (EU)']=df['P. Radius (EU)'].fillna(df['P. Radius (EU)'].mean())\ndf['P. Density (EU)']=df['P. Density (EU)'].fillna(df['P. Density (EU)'].mean())\ndf['P. Gravity (EU)']=df['P. Gravity (EU)'].fillna(df['P. Gravity (EU)'].mean())\ndf['P. Esc Vel (EU)']=df['P. Esc Vel (EU)'].fillna(df['P. Esc Vel (EU)'].mean())\ndf['P. Teq Min (K)']=df['P. Teq Min (K)'].fillna(df['P. Teq Min (K)'].mean())\ndf['P. Teq Mean (K)']=df['P. Teq Mean (K)'].fillna(df['P. Teq Mean (K)'].mean())\ndf['P. Teq Max (K)']=df['P. Teq Max (K)'].fillna(df['P. Teq Max (K)'].mean())\ndf['P. Ts Min (K)']=df['P. Ts Min (K)'].fillna(df['P. Ts Min (K)'].mean())\ndf['P. Ts Mean (K)']=df['P. Ts Mean (K)'].fillna(df['P. Ts Mean (K)'].mean())\ndf['P. Ts Max (K)']=df['P. Ts Max (K)'].fillna(df['P. Ts Max (K)'].mean())\ndf['P. Surf Press (EU)']=df['P. Surf Press (EU)'].fillna(df['P. Surf Press (EU)'].mean())\ndf['P. Mag']=df['P. Mag'].fillna(df['P. Mag'].mean())\ndf['P. Appar Size (deg)']=df['P. Appar Size (deg)'].fillna(df['P. Appar Size (deg)'].mean())\ndf['P. Period (days)']=df['P. Period (days)'].fillna(df['P. Period (days)'].mean())\ndf['P. Sem Major Axis (AU)']=df['P. Sem Major Axis (AU)'].fillna(df['P. Sem Major Axis (AU)'].mean())\ndf['P. Eccentricity']=df['P. Eccentricity'].fillna(df['P. Eccentricity'].mean())\ndf['P. Mean Distance (AU)']=df['P. Mean Distance (AU)'].fillna(df['P. Mean Distance (AU)'].mean())\ndf['P. Omega (deg)']=df['P. Omega (deg)'].fillna(df['P. Omega (deg)'].mean())\ndf['P. Name Kepler'] = df['P. Name Kepler'].fillna(df['P. Name'])\ndf.drop(['P. Inclination (deg)'],axis=1,inplace=True)\ndf.drop(['S. Name HD'],axis=1,inplace=True)\ndf.drop(['S. Name HIP'],axis=1,inplace=True)\ndf['S. Type']=df['S. Type'].fillna(df['S. Type'].mode()[0])\ndf['S. Mass (SU)']=df['S. Mass (SU)'].fillna(df['S. Mass (SU)'].mean())\ndf['S. Radius (SU)']=df['S. Radius (SU)'].fillna(df['S. Radius (SU)'].mean())\ndf['S. Teff (K)']=df['S. Teff (K)'].fillna(df['S. Teff (K)'].mean())\ndf['S. Luminosity (SU)']=df['S. Luminosity (SU)'].fillna(df['S. Luminosity (SU)'].mean())\ndf['S. [Fe/H]']=df['S. [Fe/H]'].fillna(df['S. [Fe/H]'].mean())\ndf['S. Age (Gyrs)']=df['S. Age (Gyrs)'].fillna(df['S. Age (Gyrs)'].mean())\ndf['S. Appar Mag']=df['S. Appar Mag'].fillna(df['S. Appar Mag'].mean())\ndf['S. Distance (pc)']=df['S. Distance (pc)'].fillna(df['S. Distance (pc)'].mean())\ndf['S. Mag from Planet']=df['S. Mag from Planet'].fillna(df['S. Mag from Planet'].mean())\ndf['S. Size from Planet (deg)']=df['S. Size from Planet (deg)'].fillna(df['S. Size from Planet (deg)'].mean())\ndf['S. Hab Zone Min (AU)']=df['S. Hab Zone Min (AU)'].fillna(df['S. Hab Zone Min (AU)'].mean())\ndf['S. Hab Zone Max (AU)']=df['S. Hab Zone Max (AU)'].fillna(df['S. Hab Zone Max (AU)'].mean())\ndf['P. HZD']=df['P. HZD'].fillna(df['P. HZD'].mean())\ndf['P. HZC']=df['P. HZC'].fillna(df['P. HZC'].mean())\ndf['P. HZA']=df['P. HZA'].fillna(df['P. HZA'].mean())\ndf['P. HZI']=df['P. HZI'].fillna(df['P. HZI'].mean())\ndf['P. SPH']=df['P. SPH'].fillna(df['P. SPH'].mean())\ndf['P. ESI']=df['P. ESI'].fillna(df['P. ESI'].mean())\ndf['P. Disc. Year'] = df['P. Disc. Year'].fillna(df['Unnamed: 68'])\ndf.drop(['Unnamed: 68'],axis=1,inplace=True)\ndset=df", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\n%matplotlib inline\nfrom random import seed\nfrom random import randrange #returns random numbers from a given range\nfrom math import sqrt\nimport random\nimport matplotlib.pyplot as plt\nrandom.seed(43)\n# from decision_tree_functions import decision_tree_algorithm, decision_tree_predictions\n# from helper_functions import train_test_split, calculate_accuracy", "_____no_output_____" ] ], [ [ "# Load and Prepare Data", "_____no_output_____" ], [ "#### Format of the data\n- last column of the data frame must contain the label and it must also be called \"label\"\n- there should be no missing values in the data frame", "_____no_output_____" ] ], [ [ "# dset=pd.read_csv(\"Cleaned_data1.csv\") ;", "_____no_output_____" ], [ "from sklearn import preprocessing \ndf=dset.apply(preprocessing.LabelEncoder().fit_transform);\ndf['label']=df['P. Habitable Class']\ndf.drop(\"P. Habitable Class\",axis=1,inplace=True)\nX=df\ny=df[\"label\"]", "_____no_output_____" ] ], [ [ "# Random Forest", "_____no_output_____" ] ], [ [ "import random\n\n\nclass Node:\n def __init__(self, data):\n\n # all the data that is held by this node\n self.data = data\n\n # left child node\n self.left = None\n\n # right child node\n self.right = None\n\n # category if the current node is a leaf node\n self.category = None\n\n # a tuple: (row, column), representing the point where we split the data\n # into the left/right node\n self.split_point = None\n\n\ndef build_model(train_data, n_trees, max_depth, min_size, n_features, n_sample_rate):\n trees = []\n for i in range(n_trees):\n random.shuffle(train_data)\n n_samples = int(len(train_data) * n_sample_rate)\n tree = build_tree(train_data[: n_samples], 1, max_depth, min_size, n_features)\n trees.append(tree)\n return trees\n\n\ndef predict_with_single_tree(tree, row):\n if tree.category is not None:\n return tree.category\n x, y = tree.split_point\n split_value = tree.data[x][y]\n if row[y] <= split_value:\n return predict_with_single_tree(tree.left, row)\n else:\n return predict_with_single_tree(tree.right, row)\n\n\ndef predict(trees, row):\n prediction = []\n for tree in trees:\n prediction.append(predict_with_single_tree(tree, row))\n return max(set(prediction), key=prediction.count)\n\n\ndef get_most_common_category(data):\n categories = [row[-1] for row in data]\n return max(set(categories), key=categories.count)\n\n\ndef build_tree(train_data, depth, max_depth, min_size, n_features):\n root = Node(train_data)\n x, y = get_split_point(train_data, n_features)\n left_group, right_group = split(train_data, x, y)\n if len(left_group) == 0 or len(right_group) == 0 or depth >= max_depth:\n root.category = get_most_common_category(left_group + right_group)\n else:\n root.split_point = (x, y)\n if len(left_group) < min_size:\n root.left = Node(left_group)\n root.left.category = get_most_common_category(left_group)\n else:\n root.left = build_tree(left_group, depth + 1, max_depth, min_size, n_features)\n\n if len(right_group) < min_size:\n root.right = Node(right_group)\n root.right.category = get_most_common_category(right_group)\n else:\n root.right = build_tree(right_group, depth + 1, max_depth, min_size, n_features)\n return root\n\n\ndef get_features(n_selected_features, n_total_features):\n features = [i for i in range(n_total_features)]\n random.shuffle(features)\n return features[:n_selected_features]\n\n\ndef get_categories(data):\n return set([row[-1] for row in data])\n\n\ndef get_split_point(data, n_features):\n n_total_features = len(data[0]) - 1\n features = get_features(n_features, n_total_features)\n categories = get_categories(data)\n x, y, gini_index = None, None, None\n for index in range(len(data)):\n for feature in features:\n left, right = split(data, index, feature)\n current_gini_index = get_gini_index(left, right, categories)\n if gini_index is None or current_gini_index < gini_index:\n x, y, gini_index = index, feature, current_gini_index\n return x, y\n\n\ndef get_gini_index(left, right, categories):\n gini_index = 0\n for group in left, right:\n if len(group) == 0:\n continue\n score = 0\n for category in categories:\n p = [row[-1] for row in group].count(category) / len(group)\n score += p * p\n gini_index += (1 - score) * (len(group) / len(left + right))\n return gini_index\n\n\ndef split(data, x, y):\n split_value = data[x][y]\n left, right = [], []\n for row in data:\n if row[y] <= split_value:\n left.append(row)\n else:\n right.append(row)\n return left, right", "_____no_output_____" ], [ "class CrossValidationSplitter:\n def __init__(self, data, k_fold):\n self.data = data\n self.k_fold = k_fold\n self.n_iteration = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.n_iteration >= self.k_fold:\n raise StopIteration\n self.n_iteration += 1\n return self.__load_data()\n\n def __load_data(self):\n n_train_data = (1 / self.k_fold) * len(self.data)\n data_copy = self.data[:]\n train_data = []\n while len(train_data) < n_train_data:\n train_data.append(self.__pop_random_row(data_copy))\n test_data = data_copy\n return train_data, test_data\n\n def __pop_random_row(self, data):\n random.shuffle(data)\n return data[0]\n\n\ndef split_data(data, rate):\n random.shuffle(data)\n n_train_data = int(len(data) * rate)\n return data[: n_train_data], data[n_train_data:]\n\n\ndef calculate_accuracy(model, validate_data):\n n_total = 0\n n_correct = 0\n predicted_categories = [predict(model, row[:-1]) for row in validate_data]\n correct_categories = [row[-1] for row in validate_data]\n for predicted_category, correct_category in zip(predicted_categories, correct_categories):\n n_total += 1\n if predicted_category == correct_category:\n n_correct += 1\n return n_correct / n_total\n\n\ndata = df.values.tolist()\ntrain_data_all, test_data = split_data(data, 0.9)\n\nfor n_tree in [1, 3, 10]:\n accuracies = []\n accuracies_test=[]\n cross_validation_splitter = CrossValidationSplitter(train_data_all, 100)\n model = None\n for train_data, validate_data in cross_validation_splitter:\n n_features = int(sqrt(len(train_data[0]) - 1))\n model = build_model(\n train_data=train_data,\n n_trees=n_tree,\n max_depth=5,\n min_size=1,\n n_features=n_features,\n n_sample_rate=0.9\n )\n a2=calculate_accuracy(model,test_data)\n a1=calculate_accuracy(model, validate_data)\n accuracies.append(a1)\n accuracies_test.append(a2)\n# print(a1)\n ax = plt.axes()\n ax.plot(accuracies,label=\"Training Score\")\n ax.plot(accuracies_test,label=\"Cross-validation Score\")\n ax.set(xlim=(0, 100), ylim=(.85,1),\n xlabel='CrossValidationSet', ylabel='Accuracy',label='CrossValidation Accuracy')\n# plt.show()\n plt.axhline(y=sum(accuracies)/len(accuracies), label='Mean Accuracy', linestyle='--', color='red')\n ax.legend();\n plt.show()\n print(\"Average cross validation accuracy for {} trees: {}\".format(n_tree, np.mean(accuracies)))\n print(\"Test accuracy for {} trees: {}\".format(n_tree, calculate_accuracy(model, test_data)))\n", "_____no_output_____" ], [ "# # Model (can also use single decision tree)\nfrom sklearn.ensemble import RandomForestClassifier\nmodel = RandomForestClassifier(n_estimators=10)\n# import matplotlib.pyplot as plt\nmodel.fit(X, y)\n# estimator = model.estimators_[5]\n# col = X.columns\n# y1 = estimator.feature_importances_\n# maxElement = np.amax(y1)\n# y1=np.delete(y1,maxElement)\n# l1=[]\n# cols=[]\n# for i in range(len(y1)):\n# if(y1[i]!=0):\n# l1.append(i)\n# for i in range(len(l1)):\n# cols.append(col[l1[i]])\n# print(cols)\n# print(l1)\n# print(y1)\n# fig, ax = plt.subplots() \n# width = 0.4 # the width of the bars \n# ind = np.arange(len(l1))# the x locations for the groups\n# print(ind)\n# ax.barh(ind, l1, width, color=\"green\")\n# ax.set_yticks(ind+width/10)\n# ax.set_yticklabels(cols, minor=False)\n# plt.title('Feature importance in RandomForest Classifier')\n# plt.xlabel('Relative importance')\n# plt.ylabel('feature') \n# plt.figure(figsize=(5,5))\n# fig.set_size_inches(6.5, 4.5, forward=True)", "_____no_output_____" ], [ "from sklearn.model_selection import validation_curve\nparam_range = np.arange(1, 100, 1)\n\n# Calculate accuracy on training and test set using range of parameter values\ntrain_scores, test_scores = validation_curve(RandomForestClassifier(), \n X, \n y, \n param_name=\"n_estimators\", \n param_range=param_range,\n cv=3, \n scoring=\"accuracy\")", "_____no_output_____" ], [ "train_mean = np.mean(train_scores, axis=1)\ntest_mean = np.mean(test_scores, axis=1)\nplt.plot(param_range, train_mean, label=\"Training score\", color=\"black\")\nplt.plot(param_range, test_mean, label=\"Cross-validation score\")\nplt.title(\"Validation Curve With Random Forest\")\nplt.xlabel(\"Number Of Trees\")\nplt.ylabel(\"Accuracy Score\")\nplt.tight_layout()\nplt.legend(loc=\"best\")\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
c520e1a0bca6f803fdc9220f88c0815ff7eaf8df
30,506
ipynb
Jupyter Notebook
_episodes/04-pipefilter.ipynb
peakBreaker/shell-novice
2a56389e64fab63a6811ada011a80dbf195436fc
[ "CC-BY-4.0" ]
null
null
null
_episodes/04-pipefilter.ipynb
peakBreaker/shell-novice
2a56389e64fab63a6811ada011a80dbf195436fc
[ "CC-BY-4.0" ]
null
null
null
_episodes/04-pipefilter.ipynb
peakBreaker/shell-novice
2a56389e64fab63a6811ada011a80dbf195436fc
[ "CC-BY-4.0" ]
null
null
null
28.298701
118
0.534813
[ [ [ "---\n\n---", "_____no_output_____" ] ], [ [ "\n```\n```\n\n---\ntitle: \"Pipes and Filters\"\nteaching: 25\nexercises: 10\nquestions:\n- \"How can I combine existing commands to do new things?\"\nobjectives:\n- \"Redirect a command's output to a file.\"\n- \"Process a file instead of keyboard input using redirection.\"\n- \"Construct command pipelines with two or more stages.\"\n- \"Explain what usually happens if a program or pipeline isn't given any input to process.\"\n- \"Explain Unix's 'small pieces, loosely joined' philosophy.\"\nkeypoints:\n- \"`wc` counts lines, words, and characters in its inputs.\"\n- \"`cat` displays the contents of its inputs.\"\n- \"`sort` sorts its inputs.\"\n- \"`head` displays the first 10 lines of its input.\"\n- \"`tail` displays the last 10 lines of its input.\"\n- \"`command > [file]` redirects a command's output to a file (overwriting any existing content).\"\n- \"`command >> [file]` appends a command's output to a file.\"\n- \"`[first] | [second]` is a pipeline: the output of the first command is used as the input to the second.\"\n- \"The best way to use the shell is to use pipes to combine simple single-purpose programs (filters).\"\n---\nNow that we know a few basic commands,\nwe can finally look at the shell's most powerful feature:\nthe ease with which it lets us combine existing programs in new ways.\nWe'll start with the directory called `shell-lesson-data/molecules`\nthat contains six files describing some simple organic molecules.\nThe `.pdb` extension indicates that these files are in Protein Data Bank format,\na simple text format that specifies the type and position of each atom in the molecule.", "_____no_output_____" ] ], [ [ "%%bash\n$ ls molecules", "_____no_output_____" ] ], [ [ "{: .language-bash}\n\n```\ncubane.pdb ethane.pdb methane.pdb\noctane.pdb pentane.pdb propane.pdb\n```\n\n{: .output}\nLet's go into that directory with `cd` and run an example command `wc cubane.pdb`:", "_____no_output_____" ] ], [ [ "%%bash\n$ cd molecules\n$ wc cubane.pdb", "_____no_output_____" ] ], [ [ "{: .language-bash}\n\n```\n20 156 1158 cubane.pdb\n```\n\n{: .output}\n`wc` is the 'word count' command:\nit counts the number of lines, words, and characters in files (from left to right, in that order).\nIf we run the command `wc *.pdb`, the `*` in `*.pdb` matches zero or more characters,\nso the shell turns `*.pdb` into a list of all `.pdb` files in the current directory:", "_____no_output_____" ] ], [ [ "%%bash\n$ wc *.pdb", "_____no_output_____" ] ], [ [ "{: .language-bash}\n\n```\n20 156 1158 cubane.pdb\n12 84 622 ethane.pdb\n9 57 422 methane.pdb\n30 246 1828 octane.pdb\n21 165 1226 pentane.pdb\n15 111 825 propane.pdb\n107 819 6081 total\n```\n\n{: .output}\nNote that `wc *.pdb` also shows the total number of all lines in the last line of the output.\nIf we run `wc -l` instead of just `wc`,\nthe output shows only the number of lines per file:", "_____no_output_____" ] ], [ [ "%%bash\n$ wc -l *.pdb", "_____no_output_____" ] ], [ [ "{: .language-bash}\n\n```\n20 cubane.pdb\n12 ethane.pdb\n9 methane.pdb\n30 octane.pdb\n21 pentane.pdb\n15 propane.pdb\n107 total\n```\n\n{: .output}\nThe `-m` and `-w` options can also be used with the `wc` command, to show\nonly the number of characters or the number of words in the files.\n> ## Why Isn't It Doing Anything?\n>\n> What happens if a command is supposed to process a file, but we\n> don't give it a filename? For example, what if we type:\n>", "_____no_output_____" ] ], [ [ "%%bash\n> $ wc -l", "_____no_output_____" ] ], [ [ "```\n```\n\n> {: .language-bash}\n>\n> but don't type `*.pdb` (or anything else) after the command?\n> Since it doesn't have any filenames, `wc` assumes it is supposed to\n> process input given at the command prompt, so it just sits there and waits for us to give\n> it some data interactively. From the outside, though, all we see is it\n> sitting there: the command doesn't appear to do anything.\n>\n> If you make this kind of mistake, you can escape out of this state by holding down\n> the control key (<kbd>Ctrl</kbd>) and typing the letter <kbd>C</kbd> once and\n> letting go of the <kbd>Ctrl</kbd> key.\n> <kbd>Ctrl</kbd>+<kbd>C</kbd>\n{: .callout}\n## Capturing output from commands\nWhich of these files contains the fewest lines?\nIt's an easy question to answer when there are only six files,\nbut what if there were 6000?\nOur first step toward a solution is to run the command:", "_____no_output_____" ] ], [ [ "%%bash\n$ wc -l *.pdb > lengths.txt", "_____no_output_____" ] ], [ [ "```\n```\n\n{: .language-bash}\nThe greater than symbol, `>`, tells the shell to **redirect** the command's output\nto a file instead of printing it to the screen. (This is why there is no screen output:\neverything that `wc` would have printed has gone into the\nfile `lengths.txt` instead.) The shell will create\nthe file if it doesn't exist. If the file exists, it will be\nsilently overwritten, which may lead to data loss and thus requires\nsome caution.\n`ls lengths.txt` confirms that the file exists:", "_____no_output_____" ] ], [ [ "%%bash\n$ ls lengths.txt", "_____no_output_____" ] ], [ [ "{: .language-bash}\n\n```\nlengths.txt\n```\n\n{: .output}\nWe can now send the content of `lengths.txt` to the screen using `cat lengths.txt`.\nThe `cat` command gets its name from 'concatenate' i.e. join together,\nand it prints the contents of files one after another.\nThere's only one file in this case,\nso `cat` just shows us what it contains:", "_____no_output_____" ] ], [ [ "%%bash\n$ cat lengths.txt", "_____no_output_____" ] ], [ [ "{: .language-bash}\n\n```\n20 cubane.pdb\n12 ethane.pdb\n9 methane.pdb\n30 octane.pdb\n21 pentane.pdb\n15 propane.pdb\n107 total\n```\n\n{: .output}\n> ## Output Page by Page\n>\n> We'll continue to use `cat` in this lesson, for convenience and consistency,\n> but it has the disadvantage that it always dumps the whole file onto your screen.\n> More useful in practice is the command `less`,\n> which you use with `less lengths.txt`.\n> This displays a screenful of the file, and then stops.\n> You can go forward one screenful by pressing the spacebar,\n> or back one by pressing `b`. Press `q` to quit.\n{: .callout}\n## Filtering output\nNext we'll use the `sort` command to sort the contents of the `lengths.txt` file.\nBut first we'll use an exercise to learn a little about the sort command:\n> ## What Does `sort -n` Do?\n>\n> The file [`shell-lesson-data/numbers.txt`](../shell-lesson-data/numbers.txt)\n> contains the following lines:\n>\n\n```\n> 10\n> 2\n> 19\n> 22\n> 6\n```\n\n> {: .source}\n>\n> If we run `sort` on this file, the output is:\n>\n\n```\n> 10\n> 19\n> 2\n> 22\n> 6\n```\n\n> {: .output}\n>\n> If we run `sort -n` on the same file, we get this instead:\n>\n\n```\n> 2\n> 6\n> 10\n> 19\n> 22\n```\n\n> {: .output}\n>\n> Explain why `-n` has this effect.\n>\n> > ## Solution\n> > The `-n` option specifies a numerical rather than an alphanumerical sort.\n> {: .solution}\n{: .challenge}\nWe will also use the `-n` option to specify that the sort is\nnumerical instead of alphanumerical.\nThis does *not* change the file;\ninstead, it sends the sorted result to the screen:", "_____no_output_____" ] ], [ [ "%%bash\n$ sort -n lengths.txt", "_____no_output_____" ] ], [ [ "{: .language-bash}\n\n```\n9 methane.pdb\n12 ethane.pdb\n15 propane.pdb\n20 cubane.pdb\n21 pentane.pdb\n30 octane.pdb\n107 total\n```\n\n{: .output}\nWe can put the sorted list of lines in another temporary file called `sorted-lengths.txt`\nby putting `> sorted-lengths.txt` after the command,\njust as we used `> lengths.txt` to put the output of `wc` into `lengths.txt`.\nOnce we've done that,\nwe can run another command called `head` to get the first few lines in `sorted-lengths.txt`:", "_____no_output_____" ] ], [ [ "%%bash\n$ sort -n lengths.txt > sorted-lengths.txt\n$ head -n 1 sorted-lengths.txt", "_____no_output_____" ] ], [ [ "{: .language-bash}\n\n```\n9 methane.pdb\n```\n\n{: .output}\nUsing `-n 1` with `head` tells it that\nwe only want the first line of the file;\n`-n 20` would get the first 20,\nand so on.\nSince `sorted-lengths.txt` contains the lengths of our files ordered from least to greatest,\nthe output of `head` must be the file with the fewest lines.\n> ## Redirecting to the same file\n>\n> It's a very bad idea to try redirecting\n> the output of a command that operates on a file\n> to the same file. For example:\n>", "_____no_output_____" ] ], [ [ "%%bash\n> $ sort -n lengths.txt > lengths.txt", "_____no_output_____" ] ], [ [ "```\n```\n\n> {: .language-bash}\n>\n> Doing something like this may give you\n> incorrect results and/or delete\n> the contents of `lengths.txt`.\n{: .callout}\n> ## What Does `>>` Mean?\n>\n> We have seen the use of `>`, but there is a similar operator `>>`\n> which works slightly differently.\n> We'll learn about the differences between these two operators by printing some strings.\n> We can use the `echo` command to print strings e.g.\n>", "_____no_output_____" ] ], [ [ "%%bash\n> $ echo The echo command prints text", "_____no_output_____" ] ], [ [ "> {: .language-bash}\n\n```\n> The echo command prints text\n```\n\n> {: .output}\n>\n> Now test the commands below to reveal the difference between the two operators:\n>", "_____no_output_____" ] ], [ [ "%%bash\n> $ echo hello > testfile01.txt", "_____no_output_____" ] ], [ [ "```\n```\n\n> {: .language-bash}\n>\n> and:\n>", "_____no_output_____" ] ], [ [ "%%bash\n> $ echo hello >> testfile02.txt", "_____no_output_____" ] ], [ [ "```\n```\n\n> {: .language-bash}\n>\n> Hint: Try executing each command twice in a row and then examining the output files.\n>\n> > ## Solution\n> > In the first example with `>`, the string 'hello' is written to `testfile01.txt`,\n> > but the file gets overwritten each time we run the command.\n> >\n> > We see from the second example that the `>>` operator also writes 'hello' to a file\n> > (in this case`testfile02.txt`),\n> > but appends the string to the file if it already exists\n> > (i.e. when we run it for the second time).\n> {: .solution}\n{: .challenge}\n> ## Appending Data\n>\n> We have already met the `head` command, which prints lines from the start of a file.\n> `tail` is similar, but prints lines from the end of a file instead.\n>\n> Consider the file `shell-lesson-data/data/animals.txt`.\n> After these commands, select the answer that\n> corresponds to the file `animals-subset.txt`:\n>", "_____no_output_____" ] ], [ [ "%%bash\n> $ head -n 3 animals.txt > animals-subset.txt\n> $ tail -n 2 animals.txt >> animals-subset.txt", "_____no_output_____" ] ], [ [ "```\n```\n\n> {: .language-bash}\n>\n> 1. The first three lines of `animals.txt`\n> 2. The last two lines of `animals.txt`\n> 3. The first three lines and the last two lines of `animals.txt`\n> 4. The second and third lines of `animals.txt`\n>\n> > ## Solution\n> > Option 3 is correct.\n> > For option 1 to be correct we would only run the `head` command.\n> > For option 2 to be correct we would only run the `tail` command.\n> > For option 4 to be correct we would have to pipe the output of `head` into `tail -n 2`\n> > by doing `head -n 3 animals.txt | tail -n 2 > animals-subset.txt`\n> {: .solution}\n{: .challenge}\n## Passing output to another command\nIn our example of finding the file with the fewest lines,\nwe are using two intermediate files `lengths.txt` and `sorted-lengths.txt` to store output.\nThis is a confusing way to work because\neven once you understand what `wc`, `sort`, and `head` do,\nthose intermediate files make it hard to follow what's going on.\nWe can make it easier to understand by running `sort` and `head` together:", "_____no_output_____" ] ], [ [ "%%bash\n$ sort -n lengths.txt | head -n 1", "_____no_output_____" ] ], [ [ "{: .language-bash}\n\n```\n9 methane.pdb\n```\n\n{: .output}\nThe vertical bar, `|`, between the two commands is called a **pipe**.\nIt tells the shell that we want to use\nthe output of the command on the left\nas the input to the command on the right.\nThis has removed the need for the `sorted-lengths.txt` file.\n## Combining multiple commands\nNothing prevents us from chaining pipes consecutively.\nWe can for example send the output of `wc` directly to `sort`,\nand then the resulting output to `head`.\nThis removes the need for any intermediate files.\nWe'll start by using a pipe to send the output of `wc` to `sort`:", "_____no_output_____" ] ], [ [ "%%bash\n$ wc -l *.pdb | sort -n", "_____no_output_____" ] ], [ [ "{: .language-bash}\n\n```\n9 methane.pdb\n12 ethane.pdb\n15 propane.pdb\n20 cubane.pdb\n21 pentane.pdb\n30 octane.pdb\n107 total\n```\n\n{: .output}\nWe can then send that output through another pipe, to `head`, so that the full pipeline becomes:", "_____no_output_____" ] ], [ [ "%%bash\n$ wc -l *.pdb | sort -n | head -n 1", "_____no_output_____" ] ], [ [ "{: .language-bash}\n\n```\n9 methane.pdb\n```\n\n{: .output}\nThis is exactly like a mathematician nesting functions like *log(3x)*\nand saying 'the log of three times *x*'.\nIn our case,\nthe calculation is 'head of sort of line count of `*.pdb`'.\nThe redirection and pipes used in the last few commands are illustrated below:\n![Redirects and Pipes of different commands: \"wc -l *.pdb\" will direct the\noutput to the shell. \"wc -l *.pdb > lengths\" will direct output to the file\n\"lengths\". \"wc -l *.pdb | sort -n | head -n 1\" will build a pipeline where the\noutput of the \"wc\" command is the input to the \"sort\" command, the output of\nthe \"sort\" command is the input to the \"head\" command and the output of the\n\"head\" command is directed to the shell](../fig/redirects-and-pipes.svg)\n> ## Piping Commands Together\n>\n> In our current directory, we want to find the 3 files which have the least number of\n> lines. Which command listed below would work?\n>\n> 1. `wc -l * > sort -n > head -n 3`\n> 2. `wc -l * | sort -n | head -n 1-3`\n> 3. `wc -l * | head -n 3 | sort -n`\n> 4. `wc -l * | sort -n | head -n 3`\n>\n> > ## Solution\n> > Option 4 is the solution.\n> > The pipe character `|` is used to connect the output from one command to\n> > the input of another.\n> > `>` is used to redirect standard output to a file.\n> > Try it in the `shell-lesson-data/molecules` directory!\n> {: .solution}\n{: .challenge}\n## Tools designed to work together\nThis idea of linking programs together is why Unix has been so successful.\nInstead of creating enormous programs that try to do many different things,\nUnix programmers focus on creating lots of simple tools that each do one job well,\nand that work well with each other.\nThis programming model is called 'pipes and filters'.\nWe've already seen pipes;\na **filter** is a program like `wc` or `sort`\nthat transforms a stream of input into a stream of output.\nAlmost all of the standard Unix tools can work this way:\nunless told to do otherwise,\nthey read from standard input,\ndo something with what they've read,\nand write to standard output.\nThe key is that any program that reads lines of text from standard input\nand writes lines of text to standard output\ncan be combined with every other program that behaves this way as well.\nYou can *and should* write your programs this way\nso that you and other people can put those programs into pipes to multiply their power.\n> ## Pipe Reading Comprehension\n>\n> A file called `animals.txt` (in the `shell-lesson-data/data` folder) contains the following data:\n>\n\n```\n> 2012-11-05,deer\n> 2012-11-05,rabbit\n> 2012-11-05,raccoon\n> 2012-11-06,rabbit\n> 2012-11-06,deer\n> 2012-11-06,fox\n> 2012-11-07,rabbit\n> 2012-11-07,bear\n```\n\n> {: .source}\n>\n> What text passes through each of the pipes and the final redirect in the pipeline below?\n>", "_____no_output_____" ] ], [ [ "%%bash\n> $ cat animals.txt | head -n 5 | tail -n 3 | sort -r > final.txt", "_____no_output_____" ] ], [ [ "```\n```\n\n> {: .language-bash}\n> Hint: build the pipeline up one command at a time to test your understanding\n> > ## Solution\n> > The `head` command extracts the first 5 lines from `animals.txt`.\n> > Then, the last 3 lines are extracted from the previous 5 by using the `tail` command.\n> > With the `sort -r` command those 3 lines are sorted in reverse order and finally,\n> > the output is redirected to a file `final.txt`.\n> > The content of this file can be checked by executing `cat final.txt`.\n> > The file should contain the following lines:\n> > ```\n> > 2012-11-06,rabbit\n> > 2012-11-06,deer\n> > 2012-11-05,raccoon\n> > ```\n> > {: .source}\n> {: .solution}\n{: .challenge}\n> ## Pipe Construction\n>\n> For the file `animals.txt` from the previous exercise, consider the following command:\n>", "_____no_output_____" ] ], [ [ "%%bash\n> $ cut -d , -f 2 animals.txt", "_____no_output_____" ] ], [ [ "```\n```\n\n> {: .language-bash}\n>\n> The `cut` command is used to remove or 'cut out' certain sections of each line in the file,\n> and `cut` expects the lines to be separated into columns by a <kbd>Tab</kbd> character.\n> A character used in this way is a called a **delimiter**.\n> In the example above we use the `-d` option to specify the comma as our delimiter character.\n> We have also used the `-f` option to specify that we want to extract the second field (column).\n> This gives the following output:\n>", "_____no_output_____" ] ], [ [ "%%bash\n> deer\n> rabbit\n> raccoon\n> rabbit\n> deer\n> fox\n> rabbit\n> bear", "_____no_output_____" ] ], [ [ "```\n```\n\n> {: .output}\n>\n> The `uniq` command filters out adjacent matching lines in a file.\n> How could you extend this pipeline (using `uniq` and another command) to find\n> out what animals the file contains (without any duplicates in their\n> names)?\n>\n> > ## Solution\n> > ```\n> > $ cut -d , -f 2 animals.txt | sort | uniq\n> > ```\n> > {: .language-bash}\n> {: .solution}\n{: .challenge}\n> ## Which Pipe?\n>\n> The file `animals.txt` contains 8 lines of data formatted as follows:\n>\n\n```\n> 2012-11-05,deer\n> 2012-11-05,rabbit\n> 2012-11-05,raccoon\n> 2012-11-06,rabbit\n> ...\n```\n\n> {: .output}\n>\n> The `uniq` command has a `-c` option which gives a count of the\n> number of times a line occurs in its input. Assuming your current\n> directory is `shell-lesson-data/data/`, what command would you use to produce\n> a table that shows the total count of each type of animal in the file?\n>\n> 1. `sort animals.txt | uniq -c`\n> 2. `sort -t, -k2,2 animals.txt | uniq -c`\n> 3. `cut -d, -f 2 animals.txt | uniq -c`\n> 4. `cut -d, -f 2 animals.txt | sort | uniq -c`\n> 5. `cut -d, -f 2 animals.txt | sort | uniq -c | wc -l`\n>\n> > ## Solution\n> > Option 4. is the correct answer.\n> > If you have difficulty understanding why, try running the commands, or sub-sections of\n> > the pipelines (make sure you are in the `shell-lesson-data/data` directory).\n> {: .solution}\n{: .challenge}\n## Nelle's Pipeline: Checking Files\nNelle has run her samples through the assay machines\nand created 17 files in the `north-pacific-gyre/2012-07-03` directory described earlier.\nAs a quick check, starting from her home directory, Nelle types:", "_____no_output_____" ] ], [ [ "%%bash\n$ cd north-pacific-gyre/2012-07-03\n$ wc -l *.txt", "_____no_output_____" ] ], [ [ "```\n```\n\n{: .language-bash}\nThe output is 18 lines that look like this:\n\n```\n300 NENE01729A.txt\n300 NENE01729B.txt\n300 NENE01736A.txt\n300 NENE01751A.txt\n300 NENE01751B.txt\n300 NENE01812A.txt\n... ...\n```\n\n{: .output}\nNow she types this:", "_____no_output_____" ] ], [ [ "%%bash\n$ wc -l *.txt | sort -n | head -n 5", "_____no_output_____" ] ], [ [ "{: .language-bash}\n\n```\n240 NENE02018B.txt\n300 NENE01729A.txt\n300 NENE01729B.txt\n300 NENE01736A.txt\n300 NENE01751A.txt\n```\n\n{: .output}\nWhoops: one of the files is 60 lines shorter than the others.\nWhen she goes back and checks it,\nshe sees that she did that assay at 8:00 on a Monday morning --- someone\nwas probably in using the machine on the weekend,\nand she forgot to reset it.\nBefore re-running that sample,\nshe checks to see if any files have too much data:", "_____no_output_____" ] ], [ [ "%%bash\n$ wc -l *.txt | sort -n | tail -n 5", "_____no_output_____" ] ], [ [ "{: .language-bash}\n\n```\n300 NENE02040B.txt\n300 NENE02040Z.txt\n300 NENE02043A.txt\n300 NENE02043B.txt\n5040 total\n```\n\n{: .output}\nThose numbers look good --- but what's that 'Z' doing there in the third-to-last line?\nAll of her samples should be marked 'A' or 'B';\nby convention,\nher lab uses 'Z' to indicate samples with missing information.\nTo find others like it, she does this:", "_____no_output_____" ] ], [ [ "%%bash\n$ ls *Z.txt", "_____no_output_____" ] ], [ [ "{: .language-bash}\n", "_____no_output_____" ] ] ]
[ "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
c520e5f3c8f2b7c573640a7b9b135f84fa1013db
58,504
ipynb
Jupyter Notebook
docs/contents/user/tools/basic/info.ipynb
uibcdf/MolModMTs
4f6b6f671a9fa3e73008d1e9c48686d5f20a6573
[ "MIT" ]
null
null
null
docs/contents/user/tools/basic/info.ipynb
uibcdf/MolModMTs
4f6b6f671a9fa3e73008d1e9c48686d5f20a6573
[ "MIT" ]
null
null
null
docs/contents/user/tools/basic/info.ipynb
uibcdf/MolModMTs
4f6b6f671a9fa3e73008d1e9c48686d5f20a6573
[ "MIT" ]
null
null
null
40.016416
329
0.521332
[ [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import molsysmt as msm", "_____no_output_____" ] ], [ [ "# Info\n\n*Printing out summary information of a molecular system*\n\n", "_____no_output_____" ], [ "There is in MolSysMT a method to print out a brief overview of a molecular system and its elements. The output of this method can be a `pandas.DataFrame` or a `string`. Lets load a molecular system to illustrate with some simple examples how it works:", "_____no_output_____" ] ], [ [ "molecular_system = msm.convert('pdb_id:1tcd', to_form='molsysmt.MolSys')", "/home/diego/projects/MolSysMT/molsysmt/item/mmtf_MMTFDecoder/to_molsysmt_Topology.py:34: UserWarning: The structure in the PDB has biological assemblies. There are geometrical transformations proposed in the structure. See the following issue in the source code repository: https://github.com/uibcdf/MolSysMT/issues/33\n warnings.warn(warning_message)\n" ] ], [ [ "## As a DataFrame", "_____no_output_____" ], [ "### Summary information on atoms\n\nThe method `molsysmt.info()` can be applied over any element of the molecular system. Lets see an example where the summary information is shown for a set of atoms when the input argument `output='dataframe'`:", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='atom', indices=[9,10,11,12], output='dataframe')", "_____no_output_____" ], [ "output = msm.info(molecular_system, element='atom', indices=[9,10,11,12], output='dataframe')\noutput.data.to_dict()", "_____no_output_____" ] ], [ [ "The method can also take a selection input argument:", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='atom', selection='group_index==6')", "_____no_output_____" ] ], [ [ "Notice that the default option for `output` is 'dataframe'.", "_____no_output_____" ], [ "### Summary information on groups\n\nLets see an example where the summary information is shown for a set of groups:", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='group', indices=[20,21,22,23])", "_____no_output_____" ] ], [ [ "### Summary information on components\n\nFind here now an example on how the method `molsysmt.info()` works over components:", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='component', selection='molecule_type!=\"water\"')", "_____no_output_____" ] ], [ [ "### Summary information on chains\n\nIf the summary information on all chains in the molecular system needs to be printed out:", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='chain')", "_____no_output_____" ] ], [ [ "### Summary information on molecules\n\nThe following is an example on how the method works when the elementted element is 'molecule':", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='molecule', selection='molecule_type!=\"water\"')", "_____no_output_____" ] ], [ [ "### Summary information on entities\n\nIf the elementted element is 'entity' the method prints out the next summary information:", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='entity')", "_____no_output_____" ] ], [ [ "### Summary information on a molecular system\n\nAt last, a summary information can be shown on the whole molecular system as follows:", "_____no_output_____" ] ], [ [ "msm.info(molecular_system)", "_____no_output_____" ], [ "topology, structures = msm.convert(molecular_system, to_form=['molsysmt.Topology','molsysmt.Structures'])", "_____no_output_____" ], [ "msm.info(topology)", "_____no_output_____" ], [ "msm.info(structures)", "_____no_output_____" ], [ "msm.info([topology, structures])", "_____no_output_____" ] ], [ [ "## As a string\n\nThe method `molsysmt.info()` can also return a string, short or long, with key information to identify the elementted element.", "_____no_output_____" ], [ "### Summary information on atoms\n\nIf we only need to get a short string encoding the main attributes of an atom, the input argument `output` should take the value 'short_string':", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='atom', indices=10, output='short_string')", "_____no_output_____" ] ], [ [ "The string is nothing but the atom name, the atom id and the atom index with '-' between the name and the id, and '@' between the id and the index. The input argument `indices` accepts also a list of indices:", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='atom', indices=[10,11,12,13], output='short_string')", "_____no_output_____" ] ], [ [ "The long version of the string includes the short string of the group, chain and molecule the atom belongs to; with the character '/' in between:", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='atom', indices=10, output='long_string')", "_____no_output_____" ] ], [ [ "### Summary information on groups\n\nThe short string corresponding to a group is composed of its name, id and index. The characters used as separators are the same as with atoms: '-' between name and id, and '@' between id and index.", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='group', indices=0, output='short_string')", "_____no_output_____" ] ], [ [ "The long version of the string includes the short string for the chain and molecule the group belongs to:", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='group', indices=3, output='long_string')", "_____no_output_____" ] ], [ [ "### Summary information on components", "_____no_output_____" ], [ "The short string with the summary information of a component is its index only:", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='component', indices=2, output='short_string')", "_____no_output_____" ] ], [ [ "The long version of the string includes the chain and molecule the component belongs to with the character '/' as separator.", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='component', indices=2, output='long_string')", "_____no_output_____" ] ], [ [ "### Summary information on chains", "_____no_output_____" ], [ "Just like with atoms and groups, the short version of the chain string is made up of the sequence of atributes: name, id and index. The character '-' is in between the chain name and the chain id, and '@' precedes the chain index:", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='chain', indices=2, output='short_string')", "_____no_output_____" ] ], [ [ "The long version of the string in this case is the same as the short one:", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='chain', indices=2, output='long_string')", "_____no_output_____" ] ], [ [ "### Summary information on molecules\n\nMolecules have no relevant id attributes, thats why in this case the short string is the molecule name followed by the character '@' and the molecule index:", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='molecule', indices=0, output='short_string')", "_____no_output_____" ] ], [ [ "As well as with chains, the short and long strings are equivalent here:", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='molecule', indices=0, output='long_string')", "_____no_output_____" ] ], [ [ "### Summary information on entities", "_____no_output_____" ], [ "The significant attributes for entities are only two. In this case the string takes the same coding as before, with the character '@' between the name and the index.", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='entity', indices=0, output='short_string')", "_____no_output_____" ] ], [ [ "The long string is equal to the short string when the element is an entity:", "_____no_output_____" ] ], [ [ "msm.info(molecular_system, element='entity', indices=0, output='long_string')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
c520f42f904545a5185367c963e713736388234f
3,462
ipynb
Jupyter Notebook
project/src/main/python/filesyncData/Get_Data.ipynb
daifengqi/big-data-hft
013747ca3c2ca984eeac723fd5d8f8e3458b840c
[ "MIT" ]
1
2022-03-07T09:32:40.000Z
2022-03-07T09:32:40.000Z
project/src/main/python/filesyncData/Get_Data.ipynb
daifengqi/big-data-hft
013747ca3c2ca984eeac723fd5d8f8e3458b840c
[ "MIT" ]
null
null
null
project/src/main/python/filesyncData/Get_Data.ipynb
daifengqi/big-data-hft
013747ca3c2ca984eeac723fd5d8f8e3458b840c
[ "MIT" ]
1
2022-03-03T16:22:37.000Z
2022-03-03T16:22:37.000Z
27.919355
186
0.574235
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\n#from WindPy import *\nfrom datetime import datetime,timedelta\nimport statsmodels.api as sm\nfrom datetime import datetime,timedelta \nimport seaborn as sns\nimport xlwings as xw\nimport time\nimport warnings\nimport pyodbc\nimport re\nimport calendar\n%matplotlib inline\nwarnings.filterwarnings('ignore')\nplt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签\nplt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号\n#w.start()", "_____no_output_____" ], [ "def list_to_str(my_list):\n if len(my_list)==0 or 'Nan' in my_list:\n print(\"You Input List Is Null\")\n elif len(my_list)==1:\n my_list = str(tuple(my_list))\n my_list = my_list[0:len(my_list)-2] + \")\"\n else:\n my_list = str(tuple(my_list))\n return my_list \n\ndef datetime_to_str(dt):\n return datetime.strftime(dt, '%Y%m%d') ", "_____no_output_____" ], [ "con = pyodbc.connect(\"driver={SQL Server};Server=v-wind;DataBase=wind_quant;UID=ywpublic;PWD=1qazXSW@\")\npath = \"select S_INFO_WINDCODE,TRADE_DT,S_DQ_ADJOPEN,S_DQ_ADJCLOSE,S_DQ_ADJLOW,S_DQ_ADJHIGH,S_DQ_VOLUME from AShareEODPrices where TRADE_DT>= 20181131 and TRADE_DT<=20190430\" \nresult = pd.read_sql(path, con)", "_____no_output_____" ], [ "for column_name in result.columns[2:]:\n df = result[['S_INFO_WINDCODE','TRADE_DT',column_name]]\n df = df.pivot(columns='S_INFO_WINDCODE',index='TRADE_DT',values=column_name)\n df.to_csv('{}.csv'.format(column_name))", "_____no_output_____" ], [ "path = \"select S_INFO_WINDCODE,TRADE_DT,S_VAL_MV,S_DQ_MV from AShareEODDerivativeIndicator where TRADE_DT>= 20181131 and TRADE_DT<=20190430\" \nresult = pd.read_sql(path, con)\nfor column_name in result.columns[2:]:\n df = result[['S_INFO_WINDCODE','TRADE_DT',column_name]]\n df = df.pivot(columns='S_INFO_WINDCODE',index='TRADE_DT',values=column_name)\n df.to_csv('{}.csv'.format(column_name))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
c52106e07ed85aff61c1d3696bfc1c8c66e0f8c0
152,764
ipynb
Jupyter Notebook
code/hyperparameters_tuning.ipynb
MartinsAlex/Group-Project-DM-ML-2021
f832f03e83e58d08f2f05151137c7d282d8614a3
[ "MIT" ]
null
null
null
code/hyperparameters_tuning.ipynb
MartinsAlex/Group-Project-DM-ML-2021
f832f03e83e58d08f2f05151137c7d282d8614a3
[ "MIT" ]
null
null
null
code/hyperparameters_tuning.ipynb
MartinsAlex/Group-Project-DM-ML-2021
f832f03e83e58d08f2f05151137c7d282d8614a3
[ "MIT" ]
1
2021-12-28T18:33:38.000Z
2021-12-28T18:33:38.000Z
80.870302
40,988
0.74549
[ [ [ "# [Detecting the difficulty level of French texts](https://www.kaggle.com/c/detecting-the-difficulty-level-of-french-texts/overview/evaluation)\n## Hyper parameters tuning\n---\nIn this notebook, we will use cross-validation to find the best parameters of the models that showed the most promising result in first approach.", "_____no_output_____" ] ], [ [ "# Download the french language model\n!python -m spacy download fr_core_news_md", "_____no_output_____" ], [ "import pandas as pd\nimport spacy\nfrom spacy import displacy\nimport string\nimport numpy as np\nimport string\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.base import TransformerMixin\nfrom sklearn.pipeline import Pipeline\n\nfrom sklearn.preprocessing import OrdinalEncoder, OneHotEncoder, LabelEncoder\nfrom spacy.lang.en.stop_words import STOP_WORDS\nfrom spacy.lang.en import English\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression, LogisticRegressionCV\nfrom sklearn.svm import LinearSVC\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score\nfrom sklearn.utils.multiclass import unique_labels\nnp.random.state = 0\n\ndef evaluate(y_true, pred):\n \"\"\"\n Calculate the models performance metrics. \n Since it is a multi-class classification, we take the weighted average \n for the metrics that are calculated for each class.\n\n \"\"\"\n\n report = {\n 'accuracy':accuracy_score(y_true, pred),\n 'recall':recall_score(y_true, pred, average='weighted'),\n 'precision':precision_score(y_true, pred, average='weighted'),\n 'f1_score':f1_score(y_true, pred, average='weighted')\n }\n\n return report\n\ndef plot_confusion_matrix(y_true, pred):\n \n \"\"\"\n A function to plot the models confusion matrix.\n \"\"\"\n\n cf_matrix = confusion_matrix(y_test, pred)\n\n fig, ax = plt.subplots(1,1, figsize=(9, 6))\n\n sns.heatmap(cf_matrix, ax=ax, annot=True, \n annot_kws={\"size\": 16}, fmt='g')\n\n ax.set_xticklabels(y_true.iloc[:6])\n ax.set_yticklabels(y_true.iloc[:6])\n\n ax.set_ylabel(\"Actual\")\n ax.set_xlabel(\"Predicted\")\n ax.set_title(\"Confusion matrix\")\n\n\nsp = spacy.load('fr_core_news_md')\n\n# Import stopwords from spacy french language\nstop_words = spacy.lang.fr.stop_words.STOP_WORDS\n# Import punctations characters\npunctuations = string.punctuation", "_____no_output_____" ], [ "df = pd.read_csv(\"https://raw.githubusercontent.com/LaCrazyTomato/Group-Project-DM-ML-2021/main/data/training_data.csv\")\n\ndf.head()", "_____no_output_____" ] ], [ [ "This time we will optimize the Tokenizer, with the aim of reducing the dimensionality and improve accuracy. To do this, we will use the PorterStemmer combined with the WordNetLemmatizer of nltk, in order to keep only the root of the words.\n\nWe kept stop-words, number and punctations because we believed their number of occurences can be predictors of a sentence complexity (and this looks like to be the case because accuracy on testing set reduce when we keep them).", "_____no_output_____" ] ], [ [ "import re\nimport nltk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import PorterStemmer, SnowballStemmer, WordNetLemmatizer\nnltk.download('punkt')\n\n# Define cleaning function\ndef nltk_tokenizer(doc):\n \n # Lowercase\n doc = doc.lower()\n \n # Tokenize and remove white spaces (strip)\n doc = word_tokenize(doc)\n doc = [word.lower().strip() for word in doc]\n \n stemmer = PorterStemmer()\n doc = [stemmer.stem(word) for word in doc]\n \n lemma = WordNetLemmatizer()\n doc = [lemma.lemmatize(word) for word in doc]\n \n return doc\n\n\nprint(nltk_tokenizer(df.loc[2, 'sentence']))", "[nltk_data] Downloading package punkt to\n[nltk_data] C:\\Users\\Alex\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package punkt is already up-to-date!\n" ], [ "tfidf_vectorizer = TfidfVectorizer(tokenizer=nltk_tokenizer)", "_____no_output_____" ], [ "X = df['sentence']\ny = df['difficulty']\n\n# Train test split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0, stratify=y)\n\nX_train", "_____no_output_____" ] ], [ [ "# Models tuning\nWe will user [GridSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) from sklearn, in order to find the best hyperparameters. What is good with this module is that, in addition to the parameters of the classifier, it allows us to optimize the parameters of all the preprocessor included in the pipeline (e.g: vectorizer, scaler, ...).\n\n## 1. LogisticRegression\n### 1.1 Remainder : accuracy from first approach -> 46.56 %", "_____no_output_____" ] ], [ [ "\nLRCV_model = LogisticRegression()\n\npipe = Pipeline([('vectorizer', tfidf_vectorizer),\n ('classifier', LRCV_model)])\n\npipe.fit(X_train, y_train)\n\n\npred = pipe.predict(X_test)\n\nevaluate(y_test, pred)\n", "_____no_output_____" ] ], [ [ "With optimized text cleaning, we managed to **increase accuracy by approximately 160 basis point.**", "_____no_output_____" ], [ "### 1.2 Tuning", "_____no_output_____" ] ], [ [ "from sklearn.datasets import make_classification\nfrom sklearn.model_selection import HalvingGridSearchCV, GridSearchCV\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\n\ntfidf_vector = TfidfVectorizer()\n\n\nLR_model = LogisticRegression(random_state=0)\n\npipe = Pipeline([('vectorizer', tfidf_vector),\n ('classifier', LR_model)])\n\n# We define here all the parameters we want the CV to do combination with.\nparam_grid = {'classifier__solver': ['lbfgs'],\n 'classifier__penalty': ['l2', 'none'],\n 'classifier__max_iter': [10_000],\n 'vectorizer__tokenizer':[nltk_tokenizer],\n 'vectorizer__ngram_range':[(1,3), (1,4), (1,5), (1,6)],\n 'vectorizer__analyzer':['word', 'char'],\n 'vectorizer__norm':['l1', 'l2'],\n 'vectorizer__max_df':[0.7, 0.8, 0.9, 1.0],\n 'vectorizer__min_df':[0, 1, 2],\n }\n\ngrid_search_params = dict(estimator=pipe,\n param_grid=param_grid,\n verbose=10)\n\nLR_cross_validation = GridSearchCV(**grid_search_params)\n\nLR_cross_validation\n\n", "_____no_output_____" ] ], [ [ "All the parameters combination gives **384 models possible**. For each of these, the cross-validation will do 5 split, which gives us a total number of fits of 1920.\n\nIt took a lot of time.. So we saved it in a csv file.", "_____no_output_____" ] ], [ [ "#%%time\n#LR_cross_validation.fit(X, y)\n#results = pd.DataFrame(LR_cross_validation.cv_results_)\n#results.to_csv(\"LR_cross_validation.csv\")", "_____no_output_____" ], [ "results = pd.read_csv(\"LR_cross_validation.csv\")\nresults.head()", "_____no_output_____" ], [ "sns.histplot(results.mean_test_score, kde=True)", "_____no_output_____" ], [ "fig, ax = plt.subplots(1,2, figsize=(12, 4))\n\nsns.scatterplot(data=results, x=\"param_classifier__penalty\", \n y=\"mean_test_score\", \n hue=\"param_vectorizer__analyzer\",\n ax=ax[0])\n\nsns.scatterplot(data=results, x=\"param_vectorizer__ngram_range\", \n y=\"mean_test_score\", \n hue=\"param_vectorizer__norm\",\n ax=ax[1])\n", "_____no_output_____" ], [ "best = results[results.rank_test_score==1]\n\ndisplay(best)\ndict(best.params)", "_____no_output_____" ] ], [ [ "### 1.2 Testing accuracy with best parameters found", "_____no_output_____" ] ], [ [ "from sklearn.datasets import make_classification\nfrom sklearn.model_selection import GridSearchCV\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\n\n\nvectorizer = TfidfVectorizer(tokenizer=nltk_tokenizer, \n ngram_range=(1, 6),\n analyzer='char',\n min_df=2,\n max_df=0.7,\n norm='l2')\n\n\nmodel = LogisticRegression(max_iter=10_000,\n penalty='l2',\n solver='lbfgs')\n\npipe = Pipeline([('vectorizer', vectorizer), \n ('classifier', model)])\n\n\npipe.fit(X_train, y_train)\n\npipe.score(X_test, y_test)\n", "_____no_output_____" ] ], [ [ "We improved accuracy on testing set by 220 basis point.", "_____no_output_____" ], [ "## 2. Random Forest\n### 2.1 Remainder : accuracy from first approach -> 39.79 %\n\nFor the vectorizer, we will keep best parameters previously.", "_____no_output_____" ] ], [ [ "from sklearn.datasets import make_classification\nfrom sklearn.model_selection import GridSearchCV\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestClassifier\n\nvectorizer = TfidfVectorizer(tokenizer=nltk_tokenizer, \n ngram_range=(1, 6),\n analyzer='char',\n min_df=2,\n max_df=0.7,\n norm='l2')\n\nrandom_forest_model = RandomForestClassifier(random_state=0)\n\npipe = Pipeline([('vectorizer', vectorizer),\n ('classifier', random_forest_model)])\n\n# We define here all the parameters we want the CV to do combination with.\nparam_grid = {'classifier__criterion': ['entropy', 'gini'],\n 'classifier__min_samples_split': [2, 4, 6],\n 'classifier__max_features': [\"auto\", \"sqrt\", \"log2\"],\n }\n\ngrid_search_params = dict(estimator=pipe,\n param_grid=param_grid,\n verbose=10)\n\nrandom_forest_cross_validation = GridSearchCV(**grid_search_params)\n\nrandom_forest_cross_validation\n\n", "_____no_output_____" ], [ "#%%time\n#random_forest_cross_validation.fit(X, y)\n#results = pd.DataFrame(random_forest_cross_validation.cv_results_)\n#results.to_csv(\"random_forest_cross_validation.csv\")", "_____no_output_____" ], [ "results = pd.read_csv(\"random_forest_cross_validation.csv\")", "_____no_output_____" ], [ "best = results[results.rank_test_score==1]\n\ndisplay(best)\ndict(best.params)", "_____no_output_____" ], [ "sns.histplot(results.mean_test_score, kde=True)", "_____no_output_____" ], [ "fig, ax = plt.subplots(1,2, figsize=(12, 4))\n\nsns.scatterplot(data=results, x=\"param_classifier__min_samples_split\", \n y=\"mean_test_score\", \n hue=\"param_classifier__criterion\",\n ax=ax[0])\n\nsns.scatterplot(data=results, x=\"param_classifier__max_features\", \n y=\"mean_test_score\", \n hue=\"param_classifier__criterion\",\n ax=ax[1])\n\n", "_____no_output_____" ] ], [ [ "### 2.2 Testing accuracy with best parameters found", "_____no_output_____" ] ], [ [ "vectorizer = TfidfVectorizer(tokenizer=nltk_tokenizer, \n ngram_range=(1, 6),\n analyzer='char',\n min_df=2,\n max_df=0.7,\n norm='l2')\n\nmodel = RandomForestClassifier(criterion='gini',\n max_features='auto',\n min_samples_split=2)\n\npipe = Pipeline([('vectorizer', vectorizer), \n ('classifier', model)])\n\n\npipe.fit(X_train, y_train)\n\npipe.score(X_test, y_test)", "_____no_output_____" ] ], [ [ "## 3. Ridge classifier\n### 3.1 Remainder : accuracy from first approach -> 46.77 %", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import RidgeClassifier\n\n\nvectorizer = TfidfVectorizer(tokenizer=nltk_tokenizer, \n ngram_range=(1, 6),\n analyzer='char',\n min_df=2,\n max_df=0.7,\n norm='l2')\n\nridge_model = RidgeClassifier(random_state=0)\n\nridge_pipe = Pipeline([('vectorizer', vectorizer),\n ('classifier', ridge_model)])\n\n\nparam_grid = {'classifier__alpha': [0.8, 1.0, 1.2],\n 'classifier__max_iter': [10_000],\n 'classifier__max_iter': [10_000],\n 'classifier__solver':['auto', 'sparse_cg', 'sag']\n }\n\ngrid_search_params = dict(estimator=ridge_pipe,\n param_grid=param_grid,\n verbose=10)\n\nridge_cross_validation = GridSearchCV(**grid_search_params)\n\nridge_cross_validation\n", "_____no_output_____" ], [ "#%%time\n#ridge_cross_validation.fit(X, y)\n#results = pd.DataFrame(ridge_cross_validation.cv_results_)\n#results.to_csv(\"ridge_cross_validation.csv\")", "_____no_output_____" ], [ "results = pd.read_csv(\"ridge_cross_validation.csv\")", "_____no_output_____" ], [ "best = results[results.rank_test_score==1]\n\ndisplay(best)\ndict(best.params)", "_____no_output_____" ] ], [ [ "### 3.2 Testing accuracy with best parameters found", "_____no_output_____" ] ], [ [ "model = RidgeClassifier(random_state=0, \n max_iter=10_000, \n alpha=1.2,\n solver='auto')\n\npipe = Pipeline([('vectorizer', vectorizer), \n ('classifier', model)])\n\n\npipe.fit(X_train, y_train)\n\npipe.score(X_test, y_test)", "_____no_output_____" ] ], [ [ "## 4. Perceptron classifier\n### 4.1 Remainder : accuracy from first approach -> 41.04 %", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import Perceptron\n\n\n\nvectorizer = TfidfVectorizer(tokenizer=nltk_tokenizer, \n ngram_range=(1, 6),\n analyzer='char',\n min_df=2,\n max_df=0.7,\n norm='l2')\n\nperceptron_model = Perceptron(random_state=0)\n\nperceptron_pipe = Pipeline([('vectorizer', vectorizer),\n ('classifier', perceptron_model)])\n\nparam_grid = {'classifier__alpha': [0.0001, 0.0003, 0.005],\n }\n\ngrid_search_params = dict(estimator=perceptron_pipe,\n param_grid=param_grid,\n verbose=10)\n\nperceptron_cross_validation = GridSearchCV(**grid_search_params)\n\nperceptron_cross_validation", "_____no_output_____" ], [ "#%%time\n#perceptron_cross_validation.fit(X, y)\n#results = pd.DataFrame(perceptron_cross_validation.cv_results_)\n#results.to_csv(\"perceptron_cross_validation.csv\")", "_____no_output_____" ], [ "results = pd.read_csv(\"perceptron_cross_validation.csv\")", "_____no_output_____" ], [ "best = results[results.rank_test_score==1]\n\ndisplay(best)\ndict(best.params)", "_____no_output_____" ] ], [ [ "### 4.2 Testing accuracy with best parameters found\nAn alpha of 0.0001 works well and this is the default parameter. Therefore, we wont specify any parameter for the classifier and use default ones.", "_____no_output_____" ] ], [ [ "model = Perceptron()\n\npipe = Pipeline([('vectorizer', vectorizer), \n ('classifier', model)])\n\n\npipe.fit(X_train, y_train)\n\npipe.score(X_test, y_test)", "_____no_output_____" ] ], [ [ "Now that we have our models with optimal parameters, we will implement technics (such as pca, scaling, stacking..) and/or additionnal features to improve the accuracy.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
c5212825eaa4b41c3de356756cc3194dd0d6915d
907,818
ipynb
Jupyter Notebook
5. Web-scraping/web_scraping_1.ipynb
djamaludinn/kross3-6
8d840e2911e8318b52441a05a7bd735f35640bb9
[ "MIT" ]
null
null
null
5. Web-scraping/web_scraping_1.ipynb
djamaludinn/kross3-6
8d840e2911e8318b52441a05a7bd735f35640bb9
[ "MIT" ]
null
null
null
5. Web-scraping/web_scraping_1.ipynb
djamaludinn/kross3-6
8d840e2911e8318b52441a05a7bd735f35640bb9
[ "MIT" ]
null
null
null
64.47571
280,493
0.498926
[ [ [ "#### Основы программирования в Python для социальных наук\n\n## Web-scraping таблиц. Подготовка к самостоятельной\n\nСеминар 7\n\n*Автор: Татьяна Рогович, НИУ ВШЭ*\n", "_____no_output_____" ], [ "Этот блокнот поможет вам разобраться, как подходить к самостоятельной работе. Один из пунктов - это скрейпинг таблицы из википедии. Посмотрим на примере, как это делать. Вы знаете из онлайн-курса как пользоваться библиотеками для доступа к сайтам по ссылкам и библиотекой BS для поиска тегов. Сегодня посмотрим пример, как сохранить таблицу из вики.", "_____no_output_____" ], [ "**Задание 1.** \n*5 баллов* \n\n1. На странице в wiki https://en.wikipedia.org/wiki/List_of_nuclear_weapons_tests нужно найти таблицу под названием \"Worldwide nuclear test with a yield of 1.4 Mt TNT equivalent and more\". \n2. С помощью поиска по тегам, нужно сохранить из таблицы следующие колонки: 'Date (GMT)', 'Yield (megatons)', 'Country'. Каждая колонка таблицы должна быть сохранена в отдельную переменную, внутри которой лежит список, где первое значение - название колонки. Например, для колонки 'Date (GMT)' список будет выглядеть так: \n['Date (GMT)', 'October 31, 1952', ...остальные значения..., 'November 17, 1976']\n3. Выведите эти три списка командой \nprint(Dates) \nprint(Yield) \nprint(Country)", "_____no_output_____" ] ], [ [ "# Ваше решение. При необходимости создавайте новые ячейки под этой с помощью знака +", "_____no_output_____" ] ], [ [ "**Задание 2.** \n*5 баллов (каждыый шаг 1 балл)* \n\n1. Напишите функцию, которая берет аргументом название страны и возвращает (return) среднюю мощность взрыва для этой страны (нужно сложить все значения из колонки 'Yield (megatons)', которым соответствует страна, например, США, и раделить на количество этих значений). Для подсчета используйте списки, которые вы извлекли в Задании 1. \n2. Из списка Country оставьте только уникальные значения для стран и запустите вашу функцию в цикле для каждого значения Country. Внутри цикла сделайте следующий вывод \"{название страны}: средняя мощность взрыва {средняя мощность} мегатон\"\n3. Отдельно сохраните в переменную и выведите среднюю мощность взрыва (Yield (megatons) для бомб, которые тестировались в USA.\n4. Отдельно сохраните в переменную и выведите среднюю мощность взрыва (Yield (megatons) для бомб, которые тестировались в Soviet Union.\n5. Сравните эти значения и выведите название страны, для которой средняя мощность взрыва выше.\n\n\nЗадание, выполненное без использования автоматически собранных данных, не засчитывается (например, если вы скопировали все значения из таблицы вручную и нашли их среднее).", "_____no_output_____" ] ], [ [ "# Ваше решение. При необходимости создавайте новые ячейки под этой с помощью знака +\n# Функция", "_____no_output_____" ], [ "# Цикл по странам", "_____no_output_____" ], [ "# Значение для США", "_____no_output_____" ], [ "# Значение для СССР", "_____no_output_____" ], [ "# Сравнение значений", "_____no_output_____" ] ], [ [ "# Пример решения Задания 1. ", "_____no_output_____" ], [ "Сначала мы импортируем библиотеку `requests`. Она позволяет нам просто и удобно посылать HTTP/1.1 запросы, не утруждаясь ручным трудом.", "_____no_output_____" ] ], [ [ "import requests", "_____no_output_____" ] ], [ [ "Теперь мы должны указать адрес страницы с которой мы будем скрейпить данные и сохраним ее в переменную `website_url`.\n`requests.get(url).text` обратиться к сайту и вернет `HTML` код сайта.", "_____no_output_____" ] ], [ [ "website_url = requests.get('https://en.wikipedia.org/wiki/List_of_nuclear_weapons_tests').text\nwebsite_url", "_____no_output_____" ] ], [ [ "Как мы видим, весь код представлен просто блоком текста, который не удобно читать и разбирать. Поэтому мы создадим объект `BeautifulSoup` с помощью функциии `BeautifulSoup`, предварительно импортировав саму библиотеку. `Beautiful Soup` это библиотека для парсинга `HTML` и `XML` документов. Она создает дерево из `HTML` кода, что очень полезно при скрейпинге. Функция `prettify()` позволяет видеть код в более удобном виде, в том числе с разбивкой по тегам.", "_____no_output_____" ] ], [ [ "from bs4 import BeautifulSoup\n\nsoup = BeautifulSoup(website_url,'lxml')\n\nprint(soup.prettify())", "<!DOCTYPE html>\n<html class=\"client-nojs\" dir=\"ltr\" lang=\"en\">\n <head>\n <meta charset=\"utf-8\"/>\n <title>\n List of nuclear weapons tests - Wikipedia\n </title>\n <script>\n document.documentElement.className=\"client-js\";RLCONF={\"wgBreakFrames\":false,\"wgSeparatorTransformTable\":[\"\",\"\"],\"wgDigitTransformTable\":[\"\",\"\"],\"wgDefaultDateFormat\":\"dmy\",\"wgMonthNames\":[\"\",\"January\",\"February\",\"March\",\"April\",\"May\",\"June\",\"July\",\"August\",\"September\",\"October\",\"November\",\"December\"],\"wgRequestId\":\"2b4a66a9-2ec1-4ad0-a285-a52d791ba460\",\"wgCSPNonce\":false,\"wgCanonicalNamespace\":\"\",\"wgCanonicalSpecialPageName\":false,\"wgNamespaceNumber\":0,\"wgPageName\":\"List_of_nuclear_weapons_tests\",\"wgTitle\":\"List of nuclear weapons tests\",\"wgCurRevisionId\":1058798754,\"wgRevisionId\":1058798754,\"wgArticleId\":2189647,\"wgIsArticle\":true,\"wgIsRedirect\":false,\"wgAction\":\"view\",\"wgUserName\":null,\"wgUserGroups\":[\"*\"],\"wgCategories\":[\"CS1 maint: archived copy as title\",\"Articles containing Russian-language text\",\"Webarchive template wayback links\",\"Articles with short description\",\"Short description is different from Wikidata\",\"All articles with unsourced statements\",\n\"Articles with unsourced statements from September 2015\",\"Articles with unsourced statements from March 2019\",\"All articles with incomplete citations\",\"Articles with incomplete citations from October 2018\",\"Nuclear weapons testing\",\"History-related lists\",\"Nuclear technology-related lists\"],\"wgPageContentLanguage\":\"en\",\"wgPageContentModel\":\"wikitext\",\"wgRelevantPageName\":\"List_of_nuclear_weapons_tests\",\"wgRelevantArticleId\":2189647,\"wgIsProbablyEditable\":true,\"wgRelevantPageIsProbablyEditable\":true,\"wgRestrictionEdit\":[],\"wgRestrictionMove\":[],\"wgFlaggedRevsParams\":{\"tags\":{\"status\":{\"levels\":-1}}},\"wgMediaViewerOnClick\":true,\"wgMediaViewerEnabledByDefault\":true,\"wgPopupsFlags\":10,\"wgVisualEditor\":{\"pageLanguageCode\":\"en\",\"pageLanguageDir\":\"ltr\",\"pageVariantFallbacks\":\"en\"},\"wgMFDisplayWikibaseDescriptions\":{\"search\":true,\"nearby\":true,\"watchlist\":true,\"tagline\":false},\"wgWMESchemaEditAttemptStepOversample\":false,\"wgWMEPageLength\":70000,\"wgNoticeProject\":\"wikipedia\",\n\"wgULSCurrentAutonym\":\"English\",\"wgEditSubmitButtonLabelPublish\":true,\"wgCentralAuthMobileDomain\":false,\"wgULSPosition\":\"interlanguage\",\"wgULSisCompactLinksEnabled\":true,\"wgWikibaseItemId\":\"Q1863664\",\"wgGENewcomerTasksGuidanceEnabled\":true,\"wgGEAskQuestionEnabled\":false,\"wgGELinkRecommendationsFrontendEnabled\":false};RLSTATE={\"ext.globalCssJs.user.styles\":\"ready\",\"site.styles\":\"ready\",\"user.styles\":\"ready\",\"ext.globalCssJs.user\":\"ready\",\"user\":\"ready\",\"user.options\":\"loading\",\"ext.cite.styles\":\"ready\",\"skins.vector.styles.legacy\":\"ready\",\"jquery.tablesorter.styles\":\"ready\",\"jquery.makeCollapsible.styles\":\"ready\",\"ext.visualEditor.desktopArticleTarget.noscript\":\"ready\",\"ext.wikimediaBadges\":\"ready\",\"ext.uls.interlanguage\":\"ready\",\"wikibase.client.init\":\"ready\"};RLPAGEMODULES=[\"ext.cite.ux-enhancements\",\"ext.scribunto.logs\",\"site\",\"mediawiki.page.ready\",\"jquery.tablesorter\",\"jquery.makeCollapsible\",\"mediawiki.toc\",\"skins.vector.legacy.js\",\"ext.gadget.ReferenceTooltips\",\n\"ext.gadget.charinsert\",\"ext.gadget.extra-toolbar-buttons\",\"ext.gadget.refToolbar\",\"ext.gadget.switcher\",\"mmv.head\",\"mmv.bootstrap.autostart\",\"ext.popups\",\"ext.visualEditor.desktopArticleTarget.init\",\"ext.visualEditor.targetLoader\",\"ext.eventLogging\",\"ext.wikimediaEvents\",\"ext.navigationTiming\",\"ext.cx.eventlogging.campaigns\",\"ext.centralNotice.geoIP\",\"ext.centralNotice.startUp\",\"ext.centralauth.centralautologin\",\"ext.uls.compactlinks\",\"ext.uls.interface\",\"ext.growthExperiments.SuggestedEditSession\"];\n </script>\n <script>\n (RLQ=window.RLQ||[]).push(function(){mw.loader.implement(\"user.options@1hzgi\",function($,jQuery,require,module){/*@nomin*/mw.user.tokens.set({\"patrolToken\":\"+\\\\\",\"watchToken\":\"+\\\\\",\"csrfToken\":\"+\\\\\"});\n});});\n </script>\n <link href=\"/w/load.php?lang=en&amp;modules=ext.cite.styles%7Cext.uls.interlanguage%7Cext.visualEditor.desktopArticleTarget.noscript%7Cext.wikimediaBadges%7Cjquery.makeCollapsible.styles%7Cjquery.tablesorter.styles%7Cskins.vector.styles.legacy%7Cwikibase.client.init&amp;only=styles&amp;skin=vector\" rel=\"stylesheet\"/>\n <script async=\"\" src=\"/w/load.php?lang=en&amp;modules=startup&amp;only=scripts&amp;raw=1&amp;skin=vector\">\n </script>\n <meta content=\"\" name=\"ResourceLoaderDynamicStyles\"/>\n <link href=\"/w/load.php?lang=en&amp;modules=site.styles&amp;only=styles&amp;skin=vector\" rel=\"stylesheet\"/>\n <meta content=\"MediaWiki 1.38.0-wmf.9\" name=\"generator\"/>\n <meta content=\"origin\" name=\"referrer\"/>\n <meta content=\"origin-when-crossorigin\" name=\"referrer\"/>\n <meta content=\"origin-when-cross-origin\" name=\"referrer\"/>\n <meta content=\"telephone=no\" name=\"format-detection\"/>\n <meta content=\"https://upload.wikimedia.org/wikipedia/commons/6/6a/Little_boy.jpg\" property=\"og:image\"/>\n <meta content=\"1200\" property=\"og:image:width\"/>\n <meta content=\"789\" property=\"og:image:height\"/>\n <meta content=\"https://upload.wikimedia.org/wikipedia/commons/6/6a/Little_boy.jpg\" property=\"og:image\"/>\n <meta content=\"800\" property=\"og:image:width\"/>\n <meta content=\"526\" property=\"og:image:height\"/>\n <meta content=\"https://upload.wikimedia.org/wikipedia/commons/thumb/6/6a/Little_boy.jpg/640px-Little_boy.jpg\" property=\"og:image\"/>\n <meta content=\"640\" property=\"og:image:width\"/>\n <meta content=\"421\" property=\"og:image:height\"/>\n <meta content=\"List of nuclear weapons tests - Wikipedia\" property=\"og:title\"/>\n <meta content=\"website\" property=\"og:type\"/>\n <link href=\"//upload.wikimedia.org\" rel=\"preconnect\"/>\n <link href=\"//en.m.wikipedia.org/wiki/List_of_nuclear_weapons_tests\" media=\"only screen and (max-width: 720px)\" rel=\"alternate\"/>\n <link href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit\" rel=\"alternate\" title=\"Edit this page\" type=\"application/x-wiki\"/>\n <link href=\"/static/apple-touch/wikipedia.png\" rel=\"apple-touch-icon\"/>\n <link href=\"/static/favicon/wikipedia.ico\" rel=\"shortcut icon\"/>\n <link href=\"/w/opensearch_desc.php\" rel=\"search\" title=\"Wikipedia (en)\" type=\"application/opensearchdescription+xml\"/>\n <link href=\"//en.wikipedia.org/w/api.php?action=rsd\" rel=\"EditURI\" type=\"application/rsd+xml\"/>\n <link href=\"https://creativecommons.org/licenses/by-sa/3.0/\" rel=\"license\"/>\n <link href=\"https://en.wikipedia.org/wiki/List_of_nuclear_weapons_tests\" rel=\"canonical\"/>\n <link href=\"//meta.wikimedia.org\" rel=\"dns-prefetch\"/>\n <link href=\"//login.wikimedia.org\" rel=\"dns-prefetch\"/>\n </head>\n <body class=\"mediawiki ltr sitedir-ltr mw-hide-empty-elt ns-0 ns-subject mw-editable page-List_of_nuclear_weapons_tests rootpage-List_of_nuclear_weapons_tests skin-vector action-view skin-vector-legacy\">\n <div class=\"noprint\" id=\"mw-page-base\">\n </div>\n <div class=\"noprint\" id=\"mw-head-base\">\n </div>\n <div class=\"mw-body\" id=\"content\" role=\"main\">\n <a id=\"top\">\n </a>\n <div id=\"siteNotice\">\n <!-- CentralNotice -->\n </div>\n <div class=\"mw-indicators\">\n </div>\n <h1 class=\"firstHeading\" id=\"firstHeading\">\n List of nuclear weapons tests\n </h1>\n <div class=\"vector-body\" id=\"bodyContent\">\n <div class=\"noprint\" id=\"siteSub\">\n From Wikipedia, the free encyclopedia\n </div>\n <div id=\"contentSub\">\n </div>\n <div id=\"contentSub2\">\n </div>\n <div id=\"jump-to-nav\">\n </div>\n <a class=\"mw-jump-link\" href=\"#mw-head\">\n Jump to navigation\n </a>\n <a class=\"mw-jump-link\" href=\"#searchInput\">\n Jump to search\n </a>\n <div class=\"mw-body-content mw-content-ltr\" dir=\"ltr\" id=\"mw-content-text\" lang=\"en\">\n <div class=\"mw-parser-output\">\n <div class=\"shortdescription nomobile noexcerpt noprint searchaux\" style=\"display:none\">\n Wikipedia list article\n </div>\n <style data-mw-deduplicate=\"TemplateStyles:r1045330069\">\n .mw-parser-output .sidebar{width:22em;float:right;clear:right;margin:0.5em 0 1em 1em;background:#f8f9fa;border:1px solid #aaa;padding:0.2em;text-align:center;line-height:1.4em;font-size:88%;border-collapse:collapse;display:table}body.skin-minerva .mw-parser-output .sidebar{display:table!important;float:right!important;margin:0.5em 0 1em 1em!important}.mw-parser-output .sidebar-subgroup{width:100%;margin:0;border-spacing:0}.mw-parser-output .sidebar-left{float:left;clear:left;margin:0.5em 1em 1em 0}.mw-parser-output .sidebar-none{float:none;clear:both;margin:0.5em 1em 1em 0}.mw-parser-output .sidebar-outer-title{padding:0 0.4em 0.2em;font-size:125%;line-height:1.2em;font-weight:bold}.mw-parser-output .sidebar-top-image{padding:0.4em}.mw-parser-output .sidebar-top-caption,.mw-parser-output .sidebar-pretitle-with-top-image,.mw-parser-output .sidebar-caption{padding:0.2em 0.4em 0;line-height:1.2em}.mw-parser-output .sidebar-pretitle{padding:0.4em 0.4em 0;line-height:1.2em}.mw-parser-output .sidebar-title,.mw-parser-output .sidebar-title-with-pretitle{padding:0.2em 0.8em;font-size:145%;line-height:1.2em}.mw-parser-output .sidebar-title-with-pretitle{padding:0.1em 0.4em}.mw-parser-output .sidebar-image{padding:0.2em 0.4em 0.4em}.mw-parser-output .sidebar-heading{padding:0.1em 0.4em}.mw-parser-output .sidebar-content{padding:0 0.5em 0.4em}.mw-parser-output .sidebar-content-with-subgroup{padding:0.1em 0.4em 0.2em}.mw-parser-output .sidebar-above,.mw-parser-output .sidebar-below{padding:0.3em 0.8em;font-weight:bold}.mw-parser-output .sidebar-collapse .sidebar-above,.mw-parser-output .sidebar-collapse .sidebar-below{border-top:1px solid #aaa;border-bottom:1px solid #aaa}.mw-parser-output .sidebar-navbar{text-align:right;font-size:115%;padding:0 0.4em 0.4em}.mw-parser-output .sidebar-list-title{padding:0 0.4em;text-align:left;font-weight:bold;line-height:1.6em;font-size:105%}.mw-parser-output .sidebar-list-title-c{padding:0 0.4em;text-align:center;margin:0 3.3em}@media(max-width:720px){body.mediawiki .mw-parser-output .sidebar{width:100%!important;clear:both;float:none!important;margin-left:0!important;margin-right:0!important}}\n </style>\n <table class=\"sidebar nomobile nowraplinks hlist\" style=\"width:;\">\n <tbody>\n <tr>\n <th class=\"sidebar-title\">\n <a href=\"/wiki/Nuclear_weapon\" title=\"Nuclear weapon\">\n Nuclear weapons\n </a>\n </th>\n </tr>\n <tr>\n <td class=\"sidebar-image\">\n <a class=\"image\" href=\"/wiki/File:Little_boy.jpg\">\n <img alt=\"Photograph of a mock-up of the Little Boy nuclear weapon dropped on Hiroshima, Japan, in August 1945.\" class=\"thumbborder\" data-file-height=\"462\" data-file-width=\"703\" decoding=\"async\" height=\"112\" src=\"//upload.wikimedia.org/wikipedia/commons/thumb/6/6a/Little_boy.jpg/170px-Little_boy.jpg\" srcset=\"//upload.wikimedia.org/wikipedia/commons/thumb/6/6a/Little_boy.jpg/255px-Little_boy.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/6a/Little_boy.jpg/340px-Little_boy.jpg 2x\" width=\"170\"/>\n </a>\n </td>\n </tr>\n <tr>\n <th class=\"sidebar-heading\" style=\"background:#eee;\">\n Background\n </th>\n </tr>\n <tr>\n <td class=\"sidebar-content\">\n <ul>\n <li>\n <a href=\"/wiki/History_of_nuclear_weapons\" title=\"History of nuclear weapons\">\n History\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_warfare\" title=\"Nuclear warfare\">\n Warfare\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_weapon_design\" title=\"Nuclear weapon design\">\n Design\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_weapons_testing\" title=\"Nuclear weapons testing\">\n Testing\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_weapons_delivery\" title=\"Nuclear weapons delivery\">\n Delivery\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_weapon_yield\" title=\"Nuclear weapon yield\">\n Yield\n </a>\n </li>\n </ul>\n <ul>\n <li>\n <a href=\"/wiki/Effects_of_nuclear_explosions\" title=\"Effects of nuclear explosions\">\n Effects\n </a>\n and\n <a href=\"/wiki/List_of_projected_death_tolls_from_nuclear_attacks_on_cities\" title=\"List of projected death tolls from nuclear attacks on cities\">\n estimated megadeaths\n </a>\n of\n <a href=\"/wiki/Nuclear_explosion\" title=\"Nuclear explosion\">\n explosions\n </a>\n </li>\n </ul>\n <ul>\n <li>\n <a href=\"/wiki/Nuclear_winter\" title=\"Nuclear winter\">\n Winter\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_labor_issues\" title=\"Nuclear labor issues\">\n Workers\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_ethics\" title=\"Nuclear ethics\">\n Ethics\n </a>\n </li>\n </ul>\n <ul>\n <li>\n <a href=\"/wiki/List_of_nuclear_weapons\" title=\"List of nuclear weapons\">\n Arsenals\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_arms_race\" title=\"Nuclear arms race\">\n Arms race\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_espionage\" title=\"Nuclear espionage\">\n Espionage\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_proliferation\" title=\"Nuclear proliferation\">\n Proliferation\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_disarmament\" title=\"Nuclear disarmament\">\n Disarmament\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_terrorism\" title=\"Nuclear terrorism\">\n Terrorism\n </a>\n </li>\n <li>\n <a href=\"/wiki/Anti-nuclear_movement\" title=\"Anti-nuclear movement\">\n Opposition\n </a>\n </li>\n </ul>\n </td>\n </tr>\n <tr>\n <th class=\"sidebar-heading\" style=\"background:#eee;\">\n <a href=\"/wiki/List_of_states_with_nuclear_weapons\" title=\"List of states with nuclear weapons\">\n Nuclear-armed states\n </a>\n </th>\n </tr>\n <tr>\n <td class=\"sidebar-content\">\n <dl>\n <dd>\n <i>\n <a href=\"/wiki/Treaty_on_the_Non-Proliferation_of_Nuclear_Weapons\" title=\"Treaty on the Non-Proliferation of Nuclear Weapons\">\n NPT\n </a>\n recognized\n </i>\n <br/>\n <a href=\"/wiki/United_States_and_weapons_of_mass_destruction\" title=\"United States and weapons of mass destruction\">\n United States\n </a>\n </dd>\n <dd>\n <a href=\"/wiki/Russia_and_weapons_of_mass_destruction\" title=\"Russia and weapons of mass destruction\">\n Russia\n </a>\n </dd>\n <dd>\n <a href=\"/wiki/Nuclear_weapons_of_the_United_Kingdom\" title=\"Nuclear weapons of the United Kingdom\">\n United Kingdom\n </a>\n </dd>\n <dd>\n <a href=\"/wiki/France_and_weapons_of_mass_destruction\" title=\"France and weapons of mass destruction\">\n France\n </a>\n </dd>\n <dd>\n <a href=\"/wiki/China_and_weapons_of_mass_destruction\" title=\"China and weapons of mass destruction\">\n China\n </a>\n </dd>\n </dl>\n <dl>\n <dd>\n <i>\n Others\n </i>\n <br/>\n <a href=\"/wiki/India_and_weapons_of_mass_destruction\" title=\"India and weapons of mass destruction\">\n India\n </a>\n </dd>\n <dd>\n <a href=\"/wiki/Nuclear_weapons_and_Israel\" title=\"Nuclear weapons and Israel\">\n Israel\n </a>\n <span style=\"font-size:90%;\">\n (undeclared)\n </span>\n </dd>\n <dd>\n <a href=\"/wiki/Pakistan_and_weapons_of_mass_destruction\" title=\"Pakistan and weapons of mass destruction\">\n Pakistan\n </a>\n </dd>\n <dd>\n <a href=\"/wiki/North_Korea_and_weapons_of_mass_destruction\" title=\"North Korea and weapons of mass destruction\">\n North Korea\n </a>\n </dd>\n </dl>\n <dl>\n <dd>\n <i>\n Former\n </i>\n <br/>\n <a href=\"/wiki/South_Africa_and_weapons_of_mass_destruction\" title=\"South Africa and weapons of mass destruction\">\n South Africa\n </a>\n </dd>\n <dd>\n <a href=\"/wiki/List_of_states_with_nuclear_weapons#Former_Soviet_Republics\" title=\"List of states with nuclear weapons\">\n Belarus\n </a>\n </dd>\n <dd>\n <a href=\"/wiki/List_of_states_with_nuclear_weapons#Former_Soviet_Republics\" title=\"List of states with nuclear weapons\">\n Kazakhstan\n </a>\n </dd>\n <dd>\n <a href=\"/wiki/Nuclear_weapons_and_Ukraine\" title=\"Nuclear weapons and Ukraine\">\n Ukraine\n </a>\n </dd>\n </dl>\n </td>\n </tr>\n <tr>\n <td class=\"sidebar-navbar\">\n <style data-mw-deduplicate=\"TemplateStyles:r1054937957\">\n .mw-parser-output .navbar{display:inline;font-size:88%;font-weight:normal}.mw-parser-output .navbar-collapse{float:left;text-align:left}.mw-parser-output .navbar-boxtext{word-spacing:0}.mw-parser-output .navbar ul{display:inline-block;white-space:nowrap;line-height:inherit}.mw-parser-output .navbar-brackets::before{margin-right:-0.125em;content:\"[ \"}.mw-parser-output .navbar-brackets::after{margin-left:-0.125em;content:\" ]\"}.mw-parser-output .navbar li{word-spacing:-0.125em}.mw-parser-output .navbar a>span,.mw-parser-output .navbar a>abbr{text-decoration:inherit}.mw-parser-output .navbar-mini abbr{font-variant:small-caps;border-bottom:none;text-decoration:none;cursor:inherit}.mw-parser-output .navbar-ct-full{font-size:114%;margin:0 7em}.mw-parser-output .navbar-ct-mini{font-size:114%;margin:0 4em}.mw-parser-output .infobox .navbar{font-size:100%}.mw-parser-output .navbox .navbar{display:block;font-size:100%}.mw-parser-output .navbox-title .navbar{float:left;text-align:left;margin-right:0.5em}\n </style>\n <div class=\"navbar plainlinks hlist navbar-mini\">\n <ul>\n <li class=\"nv-view\">\n <a href=\"/wiki/Template:Nuclear_weapons\" title=\"Template:Nuclear weapons\">\n <abbr title=\"View this template\">\n v\n </abbr>\n </a>\n </li>\n <li class=\"nv-talk\">\n <a href=\"/wiki/Template_talk:Nuclear_weapons\" title=\"Template talk:Nuclear weapons\">\n <abbr title=\"Discuss this template\">\n t\n </abbr>\n </a>\n </li>\n <li class=\"nv-edit\">\n <a class=\"external text\" href=\"https://en.wikipedia.org/w/index.php?title=Template:Nuclear_weapons&amp;action=edit\">\n <abbr title=\"Edit this template\">\n e\n </abbr>\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n </tbody>\n </table>\n <div class=\"thumb tright\">\n <div class=\"thumbinner\" style=\"width:222px;\">\n <a class=\"image\" href=\"/wiki/File:Radiation_warning_symbol.svg\">\n <img alt=\"\" class=\"thumbimage\" data-file-height=\"600\" data-file-width=\"600\" decoding=\"async\" height=\"220\" src=\"//upload.wikimedia.org/wikipedia/commons/thumb/0/0b/Radiation_warning_symbol.svg/220px-Radiation_warning_symbol.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/commons/thumb/0/0b/Radiation_warning_symbol.svg/330px-Radiation_warning_symbol.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/0/0b/Radiation_warning_symbol.svg/440px-Radiation_warning_symbol.svg.png 2x\" width=\"220\"/>\n </a>\n <div class=\"thumbcaption\">\n <div class=\"magnify\">\n <a class=\"internal\" href=\"/wiki/File:Radiation_warning_symbol.svg\" title=\"Enlarge\">\n </a>\n </div>\n The radiation warning symbol (\n <i>\n trefoil\n </i>\n ).\n </div>\n </div>\n </div>\n <p>\n <a href=\"/wiki/Nuclear_weapons_testing\" title=\"Nuclear weapons testing\">\n Nuclear weapons testing\n </a>\n is the act of experimentally and deliberately firing one or more nuclear devices in a controlled manner pursuant to a military, scientific or technological goal. This has been done on\n <a href=\"/wiki/List_of_nuclear_test_sites\" title=\"List of nuclear test sites\">\n test sites\n </a>\n on land or waters owned, controlled or leased from the owners by one of the eight nuclear nations: the\n <a href=\"/wiki/United_States_and_weapons_of_mass_destruction\" title=\"United States and weapons of mass destruction\">\n United States\n </a>\n , the\n <a href=\"/wiki/Russia_and_weapons_of_mass_destruction\" title=\"Russia and weapons of mass destruction\">\n Soviet Union\n </a>\n , the\n <a class=\"mw-redirect\" href=\"/wiki/The_United_Kingdom_and_weapons_of_mass_destruction\" title=\"The United Kingdom and weapons of mass destruction\">\n United Kingdom\n </a>\n ,\n <a href=\"/wiki/France_and_weapons_of_mass_destruction\" title=\"France and weapons of mass destruction\">\n France\n </a>\n ,\n <a href=\"/wiki/China_and_weapons_of_mass_destruction\" title=\"China and weapons of mass destruction\">\n China\n </a>\n ,\n <a href=\"/wiki/India_and_weapons_of_mass_destruction\" title=\"India and weapons of mass destruction\">\n India\n </a>\n ,\n <a href=\"/wiki/Pakistan_and_weapons_of_mass_destruction\" title=\"Pakistan and weapons of mass destruction\">\n Pakistan\n </a>\n and\n <a href=\"/wiki/North_Korea_and_weapons_of_mass_destruction\" title=\"North Korea and weapons of mass destruction\">\n North Korea\n </a>\n , or has been done on or over ocean sites far from\n <a href=\"/wiki/Territorial_waters\" title=\"Territorial waters\">\n territorial waters\n </a>\n . There have been 2,121 tests done since the first in July 1945, involving 2,476 nuclear devices. As of 1993, worldwide, 520 atmospheric nuclear explosions (including 8 underwater) have been conducted with a total yield of 545\n <a href=\"/wiki/TNT_equivalent\" title=\"TNT equivalent\">\n megaton\n </a>\n (Mt): 217 Mt from pure\n <a href=\"/wiki/Nuclear_fission\" title=\"Nuclear fission\">\n fission\n </a>\n and 328 Mt from bombs using\n <a href=\"/wiki/Nuclear_fusion\" title=\"Nuclear fusion\">\n fusion\n </a>\n , while the estimated number of underground nuclear tests conducted in the period from 1957 to 1992 is 1,352 explosions with a total yield of 90 Mt.\n <sup class=\"reference\" id=\"cite_ref-1\">\n <a href=\"#cite_note-1\">\n [1]\n </a>\n </sup>\n </p>\n <p>\n Very few unknown tests are suspected at this time, the\n <a class=\"mw-redirect\" href=\"/wiki/Vela_Incident\" title=\"Vela Incident\">\n Vela Incident\n </a>\n being the most prominent.\n <a href=\"/wiki/Israel_and_weapons_of_mass_destruction\" title=\"Israel and weapons of mass destruction\">\n Israel\n </a>\n is the only country suspected of having nuclear weapons but not known to have ever tested any.\n </p>\n <p>\n The following are considered nuclear tests:\n </p>\n <ul>\n <li>\n single nuclear devices fired in deep horizontal tunnels (drifts) or in vertical shafts, in shallow shafts (\"cratering\"), underwater, on barges or vessels on the water, on land, in\n <a href=\"/wiki/Bomb_tower\" title=\"Bomb tower\">\n towers\n </a>\n , carried by balloons, shot from cannons, dropped from airplanes with or without parachutes, and shot into a\n <a href=\"/wiki/Projectile_motion\" title=\"Projectile motion\">\n ballistic trajectory\n </a>\n , into high atmosphere or into near space on rockets. Since 1963 the great majority have been underground due to the\n <a class=\"mw-redirect\" href=\"/wiki/Partial_Test_Ban_Treaty\" title=\"Partial Test Ban Treaty\">\n Partial Test Ban Treaty\n </a>\n .\n </li>\n <li>\n Salvo tests in which several devices are fired simultaneously, as defined by international treaties:\n </li>\n </ul>\n <style data-mw-deduplicate=\"TemplateStyles:r996844942\">\n .mw-parser-output .templatequote{overflow:hidden;margin:1em 0;padding:0 40px}.mw-parser-output .templatequote .templatequotecite{line-height:1.5em;text-align:left;padding-left:1.6em;margin-top:0}\n </style>\n <blockquote class=\"templatequote\">\n <p>\n In conformity with treaties between the United States and the Soviet Union, ... For nuclear weapon tests, a salvo is defined as two or more underground nuclear explosions conducted at a test site within an area delineated by a circle having a diameter of two kilometers and conducted within a total period of time of 0.1 second.\n <sup class=\"reference\" id=\"cite_ref-Yang_2-0\">\n <a href=\"#cite_note-Yang-2\">\n [2]\n </a>\n </sup>\n </p>\n </blockquote>\n <ul>\n <li>\n The two nuclear bombs dropped in combat over Japan in 1945. While the primary purpose of these two detonations was military and not experimental, observations were made and the tables would be incomplete without them.\n </li>\n <li>\n Nuclear\n <a class=\"mw-redirect\" href=\"/wiki/Nuclear_weapons_design#Warhead_design_safety\" title=\"Nuclear weapons design\">\n safety tests\n </a>\n in which the intended nuclear yield was intended to be zero, and which failed to some extent if a nuclear yield was detected. There have been failures, and therefore they are included in the lists, as well as the successes.\n </li>\n <li>\n <a href=\"/wiki/Fizzle_(nuclear_explosion)\" title=\"Fizzle (nuclear explosion)\">\n Fizzles\n </a>\n , in which the expected yield was not reached.\n </li>\n <li>\n Tests intended but not completed because of vehicle or other support failures that destroyed the device.\n </li>\n <li>\n Tests that were emplaced and could not be fired for various reasons. Usually, the devices were ultimately destroyed by later conventional or nuclear explosions.\n </li>\n </ul>\n <p>\n Not included as nuclear tests:\n </p>\n <ul>\n <li>\n Misfires which were corrected and later fired as intended.\n </li>\n <li>\n Hydro-nuclear or\n <a href=\"/wiki/Nuclear_weapons_testing\" title=\"Nuclear weapons testing\">\n Subcritical testing\n </a>\n in which the normal fuel material for a nuclear device is below the amount necessary to sustain a chain reaction. The line here is finely drawn, but, among other things, subcritical testing is not prohibited by the\n <a class=\"mw-redirect\" href=\"/wiki/Comprehensive_Nuclear_Test_Ban_Treaty\" title=\"Comprehensive Nuclear Test Ban Treaty\">\n Comprehensive Nuclear Test Ban Treaty\n </a>\n , while safety tests are.\n <sup class=\"reference\" id=\"cite_ref-3\">\n <a href=\"#cite_note-3\">\n [3]\n </a>\n </sup>\n <sup class=\"reference\" id=\"cite_ref-4\">\n <a href=\"#cite_note-4\">\n [4]\n </a>\n </sup>\n </li>\n </ul>\n <div aria-labelledby=\"mw-toc-heading\" class=\"toc\" id=\"toc\" role=\"navigation\">\n <input class=\"toctogglecheckbox\" id=\"toctogglecheckbox\" role=\"button\" style=\"display:none\" type=\"checkbox\"/>\n <div class=\"toctitle\" dir=\"ltr\" lang=\"en\">\n <h2 id=\"mw-toc-heading\">\n Contents\n </h2>\n <span class=\"toctogglespan\">\n <label class=\"toctogglelabel\" for=\"toctogglecheckbox\">\n </label>\n </span>\n </div>\n <ul>\n <li class=\"toclevel-1 tocsection-1\">\n <a href=\"#Tests_by_country\">\n <span class=\"tocnumber\">\n 1\n </span>\n <span class=\"toctext\">\n Tests by country\n </span>\n </a>\n </li>\n <li class=\"toclevel-1 tocsection-2\">\n <a href=\"#Known_tests\">\n <span class=\"tocnumber\">\n 2\n </span>\n <span class=\"toctext\">\n Known tests\n </span>\n </a>\n <ul>\n <li class=\"toclevel-2 tocsection-3\">\n <a href=\"#United_States\">\n <span class=\"tocnumber\">\n 2.1\n </span>\n <span class=\"toctext\">\n United States\n </span>\n </a>\n </li>\n <li class=\"toclevel-2 tocsection-4\">\n <a href=\"#Soviet_Union\">\n <span class=\"tocnumber\">\n 2.2\n </span>\n <span class=\"toctext\">\n Soviet Union\n </span>\n </a>\n </li>\n <li class=\"toclevel-2 tocsection-5\">\n <a href=\"#United_Kingdom\">\n <span class=\"tocnumber\">\n 2.3\n </span>\n <span class=\"toctext\">\n United Kingdom\n </span>\n </a>\n </li>\n <li class=\"toclevel-2 tocsection-6\">\n <a href=\"#France\">\n <span class=\"tocnumber\">\n 2.4\n </span>\n <span class=\"toctext\">\n France\n </span>\n </a>\n </li>\n <li class=\"toclevel-2 tocsection-7\">\n <a href=\"#China\">\n <span class=\"tocnumber\">\n 2.5\n </span>\n <span class=\"toctext\">\n China\n </span>\n </a>\n </li>\n <li class=\"toclevel-2 tocsection-8\">\n <a href=\"#India\">\n <span class=\"tocnumber\">\n 2.6\n </span>\n <span class=\"toctext\">\n India\n </span>\n </a>\n </li>\n <li class=\"toclevel-2 tocsection-9\">\n <a href=\"#Pakistan\">\n <span class=\"tocnumber\">\n 2.7\n </span>\n <span class=\"toctext\">\n Pakistan\n </span>\n </a>\n </li>\n <li class=\"toclevel-2 tocsection-10\">\n <a href=\"#North_Korea\">\n <span class=\"tocnumber\">\n 2.8\n </span>\n <span class=\"toctext\">\n North Korea\n </span>\n </a>\n </li>\n </ul>\n </li>\n <li class=\"toclevel-1 tocsection-11\">\n <a href=\"#Alleged_tests\">\n <span class=\"tocnumber\">\n 3\n </span>\n <span class=\"toctext\">\n Alleged tests\n </span>\n </a>\n <ul>\n <li class=\"toclevel-2 tocsection-12\">\n <a href=\"#China_2\">\n <span class=\"tocnumber\">\n 3.1\n </span>\n <span class=\"toctext\">\n China\n </span>\n </a>\n </li>\n <li class=\"toclevel-2 tocsection-13\">\n <a href=\"#Israel\">\n <span class=\"tocnumber\">\n 3.2\n </span>\n <span class=\"toctext\">\n Israel\n </span>\n </a>\n </li>\n <li class=\"toclevel-2 tocsection-14\">\n <a href=\"#North_Korea_2\">\n <span class=\"tocnumber\">\n 3.3\n </span>\n <span class=\"toctext\">\n North Korea\n </span>\n </a>\n </li>\n <li class=\"toclevel-2 tocsection-15\">\n <a href=\"#Pakistan_2\">\n <span class=\"tocnumber\">\n 3.4\n </span>\n <span class=\"toctext\">\n Pakistan\n </span>\n </a>\n </li>\n <li class=\"toclevel-2 tocsection-16\">\n <a href=\"#Russia\">\n <span class=\"tocnumber\">\n 3.5\n </span>\n <span class=\"toctext\">\n Russia\n </span>\n </a>\n </li>\n <li class=\"toclevel-2 tocsection-17\">\n <a href=\"#Vela_Incident\">\n <span class=\"tocnumber\">\n 3.6\n </span>\n <span class=\"toctext\">\n Vela Incident\n </span>\n </a>\n </li>\n </ul>\n </li>\n <li class=\"toclevel-1 tocsection-18\">\n <a href=\"#Tests_of_live_warheads_on_rockets\">\n <span class=\"tocnumber\">\n 4\n </span>\n <span class=\"toctext\">\n Tests of live warheads on rockets\n </span>\n </a>\n </li>\n <li class=\"toclevel-1 tocsection-19\">\n <a href=\"#Most_powerful_tests\">\n <span class=\"tocnumber\">\n 5\n </span>\n <span class=\"toctext\">\n Most powerful tests\n </span>\n </a>\n </li>\n <li class=\"toclevel-1 tocsection-20\">\n <a href=\"#See_also\">\n <span class=\"tocnumber\">\n 6\n </span>\n <span class=\"toctext\">\n See also\n </span>\n </a>\n </li>\n <li class=\"toclevel-1 tocsection-21\">\n <a href=\"#References\">\n <span class=\"tocnumber\">\n 7\n </span>\n <span class=\"toctext\">\n References\n </span>\n </a>\n </li>\n <li class=\"toclevel-1 tocsection-22\">\n <a href=\"#External_links\">\n <span class=\"tocnumber\">\n 8\n </span>\n <span class=\"toctext\">\n External links\n </span>\n </a>\n </li>\n </ul>\n </div>\n <h2>\n <span class=\"mw-headline\" id=\"Tests_by_country\">\n Tests by country\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=1\" title=\"Edit section: Tests by country\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h2>\n <p>\n The table in this section summarizes all worldwide nuclear testing (including the two bombs dropped in combat which were not tests). The country names are links to summary articles for each country, which may in turn be used to drill down to test series articles which contain details on every known nuclear explosion and test. The notes attached to various table cells detail how the numbers therein are arrived at.\n </p>\n <table class=\"wikitable sortable\" style=\"text-align:center; font-size:90%\">\n <caption>\n Worldwide nuclear testing totals by country\n </caption>\n <tbody>\n <tr>\n <th style=\"text-align:left;\">\n <a href=\"/wiki/List_of_states_with_nuclear_weapons\" title=\"List of states with nuclear weapons\">\n Country\n </a>\n </th>\n <th>\n <a href=\"/wiki/Nuclear_weapons_testing\" title=\"Nuclear weapons testing\">\n Tests\n </a>\n <sup class=\"reference\" id=\"cite_ref-5\">\n <a href=\"#cite_note-5\">\n [a]\n </a>\n </sup>\n </th>\n <th>\n <a href=\"/wiki/List_of_nuclear_weapons\" title=\"List of nuclear weapons\">\n Devices\n </a>\n fired\n <sup class=\"reference\" id=\"cite_ref-6\">\n <a href=\"#cite_note-6\">\n [b]\n </a>\n </sup>\n </th>\n <th>\n <a href=\"/wiki/List_of_nuclear_weapons\" title=\"List of nuclear weapons\">\n Devices\n </a>\n w/\n <br/>\n unknown yields\n <sup class=\"reference\" id=\"cite_ref-7\">\n <a href=\"#cite_note-7\">\n [c]\n </a>\n </sup>\n </th>\n <th>\n <a class=\"mw-redirect\" href=\"/wiki/Peaceful_nuclear_explosions\" title=\"Peaceful nuclear explosions\">\n Peaceful use tests\n </a>\n <sup class=\"reference\" id=\"cite_ref-8\">\n <a href=\"#cite_note-8\">\n [d]\n </a>\n </sup>\n </th>\n <th>\n <a href=\"/wiki/Partial_Nuclear_Test_Ban_Treaty\" title=\"Partial Nuclear Test Ban Treaty\">\n Non-PTBT tests\n </a>\n <sup class=\"reference\" id=\"cite_ref-9\">\n <a href=\"#cite_note-9\">\n [e]\n </a>\n </sup>\n </th>\n <th class=\"unsortable\">\n <a href=\"/wiki/Nuclear_weapon_yield\" title=\"Nuclear weapon yield\">\n Yield range\n </a>\n (kilotons)\n </th>\n <th>\n <a href=\"/wiki/Nuclear_weapon_yield\" title=\"Nuclear weapon yield\">\n Total yield\n </a>\n (kilotons)\n </th>\n <th>\n Percentage by tests\n </th>\n <th>\n Percentage by yield\n </th>\n </tr>\n <tr>\n <th style=\"text-align:left;\">\n <a class=\"mw-redirect\" href=\"/wiki/List_of_nuclear_weapons_tests_of_the_United_States\" title=\"List of nuclear weapons tests of the United States\">\n USA\n </a>\n <sup class=\"reference\" id=\"cite_ref-Yang_2-1\">\n <a href=\"#cite_note-Yang-2\">\n [2]\n </a>\n </sup>\n <sup class=\"reference\" id=\"cite_ref-doe209_10-0\">\n <a href=\"#cite_note-doe209-10\">\n [5]\n </a>\n </sup>\n </th>\n <td>\n <span data-sort-value=\"1032 !\">\n 1,032\n </span>\n <sup class=\"reference\" id=\"cite_ref-11\">\n <a href=\"#cite_note-11\">\n [f]\n </a>\n </sup>\n </td>\n <td>\n <span data-sort-value=\"1127 !\">\n 1,132\n </span>\n </td>\n <td>\n <span data-sort-value=\"0008 !\">\n 12\n </span>\n </td>\n <td>\n <span data-sort-value=\"0027 !\">\n 27\n </span>\n <sup class=\"reference\" id=\"cite_ref-12\">\n <a href=\"#cite_note-12\">\n [g]\n </a>\n </sup>\n <br/>\n <small>\n (\n <a class=\"mw-redirect\" href=\"/wiki/Operation_Plowshare\" title=\"Operation Plowshare\">\n Operation Plowshare\n </a>\n )\n </small>\n </td>\n <td>\n <span data-sort-value=\"0214 !\">\n 231\n </span>\n </td>\n <td>\n 0 to 15,000\n </td>\n <td>\n <span data-sort-value=\"196513 !\">\n 196,514\n </span>\n <sup class=\"reference\" id=\"cite_ref-13\">\n <a href=\"#cite_note-13\">\n [h]\n </a>\n </sup>\n </td>\n <td>\n <span data-sort-value=\"4880000 !\">\n 48.7%\n </span>\n </td>\n <td>\n <span data-sort-value=\"3700000 !\">\n 36.3%\n </span>\n </td>\n </tr>\n <tr>\n <th style=\"text-align:left;\">\n <a href=\"/wiki/List_of_nuclear_weapons_tests_of_the_Soviet_Union\" title=\"List of nuclear weapons tests of the Soviet Union\">\n USSR\n </a>\n <sup class=\"reference\" id=\"cite_ref-Yang_2-2\">\n <a href=\"#cite_note-Yang-2\">\n [2]\n </a>\n </sup>\n <sup class=\"reference\" id=\"cite_ref-USSRList2_14-0\">\n <a href=\"#cite_note-USSRList2-14\">\n [6]\n </a>\n </sup>\n </th>\n <td>\n <span data-sort-value=\"0727 !\">\n 727\n </span>\n <sup class=\"reference\" id=\"cite_ref-16\">\n <a href=\"#cite_note-16\">\n [i]\n </a>\n </sup>\n </td>\n <td>\n <span data-sort-value=\"0980 !\">\n 981\n </span>\n </td>\n <td>\n <span data-sort-value=\"0248 !\">\n 248\n </span>\n </td>\n <td>\n <span data-sort-value=\"0156 !\">\n 156\n </span>\n <sup class=\"reference\" id=\"cite_ref-17\">\n <a href=\"#cite_note-17\">\n [j]\n </a>\n </sup>\n <br/>\n <small>\n (\n <a href=\"/wiki/Nuclear_Explosions_for_the_National_Economy\" title=\"Nuclear Explosions for the National Economy\">\n Nuclear Explosions for the National Economy\n </a>\n )\n </small>\n </td>\n <td>\n <span data-sort-value=\"0229 !\">\n 229\n </span>\n </td>\n <td>\n 0 to 50,000\n </td>\n <td>\n <span data-sort-value=\"296837 !\">\n 296,837\n </span>\n </td>\n <td>\n <span data-sort-value=\"34.40000 !\">\n 34.4%\n </span>\n </td>\n <td>\n <span data-sort-value=\"54.00000 !\">\n 54.9%\n </span>\n </td>\n </tr>\n <tr>\n <th style=\"text-align:left;\">\n <a class=\"mw-redirect\" href=\"/wiki/List_of_nuclear_weapons_tests_of_the_United_Kingdom\" title=\"List of nuclear weapons tests of the United Kingdom\">\n UK\n </a>\n <sup class=\"reference\" id=\"cite_ref-Yang_2-3\">\n <a href=\"#cite_note-Yang-2\">\n [2]\n </a>\n </sup>\n </th>\n <td>\n <span data-sort-value=\"0088 !\">\n 88\n </span>\n <sup class=\"reference\" id=\"cite_ref-18\">\n <a href=\"#cite_note-18\">\n [k]\n </a>\n </sup>\n </td>\n <td>\n <span data-sort-value=\"0088 !\">\n 88\n </span>\n </td>\n <td>\n <span data-sort-value=\"0031 !\">\n 31\n </span>\n c\n </td>\n <td>\n <span data-sort-value=\"0000 !\">\n 0\n </span>\n </td>\n <td>\n <span data-sort-value=\"0021 !\">\n 21\n </span>\n </td>\n <td>\n 0 to 3,000\n </td>\n <td>\n <span data-sort-value=\"009282 !\">\n 9,282\n </span>\n </td>\n <td>\n <span data-sort-value=\"04.20000 !\">\n 4.15%\n </span>\n </td>\n <td>\n <span data-sort-value=\"01.80000 !\">\n 1.72%\n </span>\n </td>\n </tr>\n <tr>\n <th style=\"text-align:left;\">\n <a href=\"/wiki/List_of_nuclear_weapons_tests_of_France\" title=\"List of nuclear weapons tests of France\">\n France\n </a>\n <sup class=\"reference\" id=\"cite_ref-Yang_2-4\">\n <a href=\"#cite_note-Yang-2\">\n [2]\n </a>\n </sup>\n </th>\n <td>\n <span data-sort-value=\"0217 !\">\n 217\n </span>\n <sup class=\"reference\" id=\"cite_ref-20\">\n <a href=\"#cite_note-20\">\n [l]\n </a>\n </sup>\n </td>\n <td>\n <span data-sort-value=\"0217 !\">\n 217\n </span>\n </td>\n <td>\n <span data-sort-value=\"0000 !\">\n 0\n </span>\n </td>\n <td>\n <span data-sort-value=\"0004 !\">\n 4\n </span>\n <sup class=\"reference\" id=\"cite_ref-21\">\n <a href=\"#cite_note-21\">\n [m]\n </a>\n </sup>\n </td>\n <td>\n <span data-sort-value=\"0057 !\">\n 57\n </span>\n </td>\n <td>\n 0 to 2,600\n </td>\n <td>\n <span data-sort-value=\"013567 !\">\n 13,567\n </span>\n </td>\n <td>\n <span data-sort-value=\"10.00000 !\">\n 10.2%\n </span>\n </td>\n <td>\n <span data-sort-value=\"02.60000 !\">\n 2.51%\n </span>\n </td>\n </tr>\n <tr>\n <th style=\"text-align:left;\">\n <a href=\"/wiki/List_of_nuclear_weapons_tests_of_China\" title=\"List of nuclear weapons tests of China\">\n China\n </a>\n <sup class=\"reference\" id=\"cite_ref-Yang_2-5\">\n <a href=\"#cite_note-Yang-2\">\n [2]\n </a>\n </sup>\n </th>\n <td>\n <span data-sort-value=\"0047 !\">\n 47\n </span>\n <sup class=\"reference\" id=\"cite_ref-22\">\n <a href=\"#cite_note-22\">\n [n]\n </a>\n </sup>\n </td>\n <td>\n <span data-sort-value=\"0047 !\">\n 48\n </span>\n </td>\n <td>\n <span data-sort-value=\"0007 !\">\n 7\n </span>\n </td>\n <td>\n <span data-sort-value=\"0000 !\">\n 0\n </span>\n </td>\n <td>\n <span data-sort-value=\"0023 !\">\n 23\n </span>\n </td>\n <td>\n 0 to 4,000\n </td>\n <td>\n <span data-sort-value=\"024409 !\">\n 24,409\n </span>\n </td>\n <td>\n <span data-sort-value=\"02.20000 !\">\n 2.22%\n </span>\n </td>\n <td>\n <span data-sort-value=\"04.60000 !\">\n 4.51%\n </span>\n </td>\n </tr>\n <tr>\n <th style=\"text-align:left;\">\n <a href=\"/wiki/List_of_nuclear_weapons_tests_of_India\" title=\"List of nuclear weapons tests of India\">\n India\n </a>\n <sup class=\"reference\" id=\"cite_ref-Yang_2-6\">\n <a href=\"#cite_note-Yang-2\">\n [2]\n </a>\n </sup>\n </th>\n <td>\n <span data-sort-value=\"0004 !\">\n 4\n </span>\n </td>\n <td>\n <span data-sort-value=\"0006 !\">\n 6\n </span>\n </td>\n <td>\n <span data-sort-value=\"0000 !\">\n 0\n </span>\n </td>\n <td>\n <span data-sort-value=\"0001 !\">\n 1\n </span>\n <sup class=\"reference\" id=\"cite_ref-23\">\n <a href=\"#cite_note-23\">\n [o]\n </a>\n </sup>\n </td>\n <td>\n <span data-sort-value=\"0000 !\">\n 0\n </span>\n </td>\n <td>\n 0 to 60\n </td>\n <td>\n <span data-sort-value=\"000070 !\">\n 70\n </span>\n </td>\n <td>\n <span data-sort-value=\"00.14000 !\">\n 0.141%\n </span>\n </td>\n <td>\n <span data-sort-value=\"00.13000 !\">\n 0.013%\n </span>\n </td>\n </tr>\n <tr>\n <th style=\"text-align:left;\">\n <a href=\"/wiki/List_of_nuclear_weapons_tests_of_Pakistan\" title=\"List of nuclear weapons tests of Pakistan\">\n Pakistan\n </a>\n <sup class=\"reference\" id=\"cite_ref-Yang_2-7\">\n <a href=\"#cite_note-Yang-2\">\n [2]\n </a>\n </sup>\n </th>\n <td>\n <span data-sort-value=\"0002 !\">\n 2\n </span>\n </td>\n <td>\n <span data-sort-value=\"0006 !\">\n 6\n </span>\n <sup class=\"reference\" id=\"cite_ref-24\">\n <a href=\"#cite_note-24\">\n [p]\n </a>\n </sup>\n </td>\n <td>\n <span data-sort-value=\"0000 !\">\n 0\n </span>\n </td>\n <td>\n <span data-sort-value=\"0000 !\">\n 0\n </span>\n </td>\n <td>\n <span data-sort-value=\"0000 !\">\n 0\n </span>\n </td>\n <td>\n 1 to 32\n </td>\n <td>\n <span data-sort-value=\"000051 !\">\n 51\n </span>\n </td>\n <td>\n <span data-sort-value=\"00.09500 !\">\n 0.107%\n </span>\n </td>\n <td>\n <span data-sort-value=\"00.00960 !\">\n 0.0094%\n </span>\n </td>\n </tr>\n <tr>\n <th style=\"text-align:left;\">\n <span class=\"nowrap\">\n <a href=\"/wiki/List_of_nuclear_weapons_tests_of_North_Korea\" title=\"List of nuclear weapons tests of North Korea\">\n North Korea\n </a>\n <sup class=\"reference\" id=\"cite_ref-Yang_2-8\">\n <a href=\"#cite_note-Yang-2\">\n [2]\n </a>\n </sup>\n </span>\n </th>\n <td>\n <span data-sort-value=\"0006 !\">\n 6\n </span>\n </td>\n <td>\n <span data-sort-value=\"0006 !\">\n 6\n </span>\n </td>\n <td>\n <span data-sort-value=\"0000 !\">\n 0\n </span>\n </td>\n <td>\n <span data-sort-value=\"0000 !\">\n 0\n </span>\n </td>\n <td>\n <span data-sort-value=\"0000 !\">\n 0\n </span>\n </td>\n <td>\n 1 to 250\n </td>\n <td>\n <span data-sort-value=\"00197.8 !\">\n 197.8\n </span>\n </td>\n <td>\n <span data-sort-value=\"00.283 !\">\n 0.283%\n </span>\n </td>\n <td>\n <span data-sort-value=\"00.0244 !\">\n 0.036%\n </span>\n </td>\n </tr>\n <tr class=\"sortbottom\">\n <th>\n Total\n </th>\n <th>\n 2,121\n </th>\n <th>\n 2,476\n </th>\n <th>\n 294\n </th>\n <th>\n 188\n </th>\n <th>\n 604\n </th>\n <th>\n 0 to 50,000\n </th>\n <th>\n 540,849\n </th>\n <th>\n </th>\n <th>\n </th>\n </tr>\n </tbody>\n </table>\n <style data-mw-deduplicate=\"TemplateStyles:r1011085734\">\n .mw-parser-output .reflist{font-size:90%;margin-bottom:0.5em;list-style-type:decimal}.mw-parser-output .reflist .references{font-size:100%;margin-bottom:0;list-style-type:inherit}.mw-parser-output .reflist-columns-2{column-width:30em}.mw-parser-output .reflist-columns-3{column-width:25em}.mw-parser-output .reflist-columns{margin-top:0.3em}.mw-parser-output .reflist-columns ol{margin-top:0}.mw-parser-output .reflist-columns li{page-break-inside:avoid;break-inside:avoid-column}.mw-parser-output .reflist-upper-alpha{list-style-type:upper-alpha}.mw-parser-output .reflist-upper-roman{list-style-type:upper-roman}.mw-parser-output .reflist-lower-alpha{list-style-type:lower-alpha}.mw-parser-output .reflist-lower-greek{list-style-type:lower-greek}.mw-parser-output .reflist-lower-roman{list-style-type:lower-roman}\n </style>\n <div class=\"reflist reflist-lower-alpha\">\n <div class=\"mw-references-wrap mw-references-columns\">\n <ol class=\"references\">\n <li id=\"cite_note-5\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-5\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n Including salvo tests counted as a single test.\n </span>\n </li>\n <li id=\"cite_note-6\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-6\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n Detonations include zero-yield detonations in safety tests and failed full yield tests, but not those in the accident category listed above.\n </span>\n </li>\n <li id=\"cite_note-7\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-7\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n The number of detonations for which the yield is unknown.\n </span>\n </li>\n <li id=\"cite_note-8\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-8\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n As declared so by the nation testing; some may have been dual use.\n </span>\n </li>\n <li id=\"cite_note-9\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-9\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n Tests which violate the PTBT – atmospheric, surface, barge, space, and underwater tests.\n </span>\n </li>\n <li id=\"cite_note-11\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-11\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n Including five tests in which the devices were destroyed before detonation by rocket failures, and the combat bombs dropped on Japan in World War II\n </span>\n </li>\n <li id=\"cite_note-12\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-12\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n Includes both application tests and research tests at NTS.\n </span>\n </li>\n <li id=\"cite_note-13\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-13\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n When a test yield reads \"&lt; number kt\" (like \"&lt; 20 kt\") this total scores the yield as half the stated maximum, i.e., 10 kt in this example.\n </span>\n </li>\n <li id=\"cite_note-16\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-16\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n Includes the test device left behind in Semipalatinsk and 11 apparent failures not in the official list, but included in list in reference following:\n <sup class=\"reference\" id=\"cite_ref-RSNF2_15-0\">\n <a href=\"#cite_note-RSNF2-15\">\n [7]\n </a>\n </sup>\n </span>\n </li>\n <li id=\"cite_note-17\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-17\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n 124 applications tests and 32 research tests which helped design better PNE charges.\n </span>\n </li>\n <li id=\"cite_note-18\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-18\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n Includes the 43\n <i>\n Vixen\n </i>\n tests, which were safety tests.\n </span>\n </li>\n <li id=\"cite_note-20\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-20\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n Including 5\n <i>\n Pollen\n </i>\n plutonium dispersal tests near at Adrar Tikertine near In Ekker, and two possible safety tests in 1978, listed in reference following:\n <sup class=\"reference\" id=\"cite_ref-LeCEP_19-0\">\n <a href=\"#cite_note-LeCEP-19\">\n [8]\n </a>\n </sup>\n </span>\n </li>\n <li id=\"cite_note-21\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-21\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n Four of the tests at In Ekker were the focus of attention at APEX (Application pacifique des expérimentations nucléaires). They gave the tests different names, causing some confusion.\n </span>\n </li>\n <li id=\"cite_note-22\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-22\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n Includes one test destroyed before detonation by a failed parachute, and two which are unlisted in most sources, but are listed in the reference following:\n <sup class=\"reference\" id=\"cite_ref-USSRList2_14-1\">\n <a href=\"#cite_note-USSRList2-14\">\n [6]\n </a>\n </sup>\n </span>\n </li>\n <li id=\"cite_note-23\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-23\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n Indira Gandhi, in her capacity as India's Minister of Atomic Energy at the time, declared the\n <i>\n Smiling Buddha\n </i>\n test to have been a test for the peaceful uses of atomic power.\n </span>\n </li>\n <li id=\"cite_note-24\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-24\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n There is some uncertainty as to exactly how many bombs were exploded in each of Pakistan's tests. It could be as low as three altogether or as high as six.\n </span>\n </li>\n </ol>\n </div>\n </div>\n <h2>\n <span class=\"mw-headline\" id=\"Known_tests\">\n Known tests\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=2\" title=\"Edit section: Known tests\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h2>\n <p>\n In the following subsections, a selection of significant tests (by no means exhaustive) is listed, representative of the testing effort in each nuclear country.\n </p>\n <h3>\n <span class=\"mw-headline\" id=\"United_States\">\n United States\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=3\" title=\"Edit section: United States\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h3>\n <style data-mw-deduplicate=\"TemplateStyles:r1033289096\">\n .mw-parser-output .hatnote{font-style:italic}.mw-parser-output div.hatnote{padding-left:1.6em;margin-bottom:0.5em}.mw-parser-output .hatnote i{font-style:normal}.mw-parser-output .hatnote+link+.hatnote{margin-top:-0.5em}\n </style>\n <div class=\"hatnote navigation-not-searchable\" role=\"note\">\n Main article:\n <a class=\"mw-redirect\" href=\"/wiki/List_of_United_States%27_nuclear_weapons_tests\" title=\"List of United States' nuclear weapons tests\">\n List of United States' nuclear weapons tests\n </a>\n </div>\n <p>\n The standard \"official\" list of tests for American devices is arguably the\n <a href=\"/wiki/United_States_Department_of_Energy\" title=\"United States Department of Energy\">\n United States Department of Energy\n </a>\n DoE-209 document.\n <sup class=\"reference\" id=\"cite_ref-doe209_10-1\">\n <a href=\"#cite_note-doe209-10\">\n [5]\n </a>\n </sup>\n The United States conducted around 1,054 nuclear tests (by official count) between 1945 and 1992, including 216 atmospheric, underwater, and space tests.\n <sup class=\"reference\" id=\"cite_ref-25\">\n <a href=\"#cite_note-25\">\n [9]\n </a>\n </sup>\n Some significant tests conducted by the United States include:\n </p>\n <div class=\"thumb tright\">\n <div class=\"thumbinner\" style=\"width:222px;\">\n <a class=\"image\" href=\"/wiki/File:Crossroads_baker_explosion.jpg\">\n <img alt=\"\" class=\"thumbimage\" data-file-height=\"629\" data-file-width=\"800\" decoding=\"async\" height=\"173\" src=\"//upload.wikimedia.org/wikipedia/commons/thumb/2/21/Crossroads_baker_explosion.jpg/220px-Crossroads_baker_explosion.jpg\" srcset=\"//upload.wikimedia.org/wikipedia/commons/thumb/2/21/Crossroads_baker_explosion.jpg/330px-Crossroads_baker_explosion.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/2/21/Crossroads_baker_explosion.jpg/440px-Crossroads_baker_explosion.jpg 2x\" width=\"220\"/>\n </a>\n <div class=\"thumbcaption\">\n <div class=\"magnify\">\n <a class=\"internal\" href=\"/wiki/File:Crossroads_baker_explosion.jpg\" title=\"Enlarge\">\n </a>\n </div>\n Shot \"Baker\" of\n <a href=\"/wiki/Operation_Crossroads\" title=\"Operation Crossroads\">\n Operation Crossroads\n </a>\n (1946) was the first underwater nuclear explosion.\n </div>\n </div>\n </div>\n <ul>\n <li>\n The\n <i>\n <a class=\"mw-redirect\" href=\"/wiki/Trinity_test\" title=\"Trinity test\">\n Trinity\n </a>\n </i>\n test on 16 July 1945, near\n <a href=\"/wiki/Socorro,_New_Mexico\" title=\"Socorro, New Mexico\">\n Socorro, New Mexico\n </a>\n , was the first-ever test of a nuclear weapon (yield of around 20 kilotons).\n </li>\n <li>\n The\n <i>\n <a href=\"/wiki/Operation_Crossroads\" title=\"Operation Crossroads\">\n Operation Crossroads\n </a>\n </i>\n series in July 1946, at\n <a href=\"/wiki/Bikini_Atoll\" title=\"Bikini Atoll\">\n Bikini Atoll\n </a>\n in the\n <a href=\"/wiki/Marshall_Islands\" title=\"Marshall Islands\">\n Marshall Islands\n </a>\n , was the first postwar test series and one of the largest military operations in U.S. history.\n </li>\n <li>\n The\n <i>\n <a href=\"/wiki/Operation_Greenhouse\" title=\"Operation Greenhouse\">\n Operation Greenhouse\n </a>\n </i>\n shots of May 1951, at\n <a href=\"/wiki/Enewetak_Atoll\" title=\"Enewetak Atoll\">\n Enewetak Atoll\n </a>\n in the Marshall Islands, included the first\n <a href=\"/wiki/Boosted_fission_weapon\" title=\"Boosted fission weapon\">\n boosted fission weapon\n </a>\n test (named\n <i>\n Item\n </i>\n ) and a scientific test (named\n <i>\n George\n </i>\n ) which proved the feasibility of thermonuclear weapons.\n </li>\n <li>\n The\n <i>\n <a href=\"/wiki/Ivy_Mike\" title=\"Ivy Mike\">\n Ivy Mike\n </a>\n </i>\n shot of 1 November 1952, at\n <a href=\"/wiki/Enewetak_Atoll\" title=\"Enewetak Atoll\">\n Enewetak Atoll\n </a>\n , was the first full test of a\n <a class=\"mw-redirect\" href=\"/wiki/Teller-Ulam_design\" title=\"Teller-Ulam design\">\n Teller-Ulam design\n </a>\n \"staged\" hydrogen bomb, with a yield of 10 megatons. This was not a deployable weapon. With its full\n <a class=\"mw-redirect\" href=\"/wiki/Cryogenic\" title=\"Cryogenic\">\n cryogenic\n </a>\n equipment it weighed about 82 tons.\n <sup class=\"noprint Inline-Template Template-Fact\" style=\"white-space:nowrap;\">\n [\n <i>\n <a href=\"/wiki/Wikipedia:Citation_needed\" title=\"Wikipedia:Citation needed\">\n <span title=\"This claim needs references to reliable sources. (September 2015)\">\n citation needed\n </span>\n </a>\n </i>\n ]\n </sup>\n </li>\n <li>\n The\n <i>\n <a href=\"/wiki/Castle_Bravo\" title=\"Castle Bravo\">\n Castle Bravo\n </a>\n </i>\n shot of 1 March 1954, at\n <a href=\"/wiki/Bikini_Atoll\" title=\"Bikini Atoll\">\n Bikini Atoll\n </a>\n , was the first test of a deployable (solid fuel) thermonuclear weapon, and also (accidentally)\n <sup class=\"noprint Inline-Template Template-Fact\" style=\"white-space:nowrap;\">\n [\n <i>\n <a href=\"/wiki/Wikipedia:Citation_needed\" title=\"Wikipedia:Citation needed\">\n <span title=\"This claim needs references to reliable sources. (September 2015)\">\n citation needed\n </span>\n </a>\n </i>\n ]\n </sup>\n the largest weapon ever tested by the United States (15 megatons). It was also the single largest U.S. radiological accident in connection with nuclear testing.\n <sup class=\"noprint Inline-Template Template-Fact\" style=\"white-space:nowrap;\">\n [\n <i>\n <a href=\"/wiki/Wikipedia:Citation_needed\" title=\"Wikipedia:Citation needed\">\n <span title=\"This claim needs references to reliable sources. (September 2015)\">\n citation needed\n </span>\n </a>\n </i>\n ]\n </sup>\n The unanticipated yield, and a change in the weather, resulted in\n <a href=\"/wiki/Nuclear_fallout\" title=\"Nuclear fallout\">\n nuclear fallout\n </a>\n spreading eastward onto the inhabited\n <a href=\"/wiki/Rongelap_Atoll\" title=\"Rongelap Atoll\">\n Rongelap\n </a>\n and\n <a href=\"/wiki/Rongerik_Atoll\" title=\"Rongerik Atoll\">\n Rongerik\n </a>\n atolls, which were soon evacuated.\n <sup class=\"noprint Inline-Template Template-Fact\" style=\"white-space:nowrap;\">\n [\n <i>\n <a href=\"/wiki/Wikipedia:Citation_needed\" title=\"Wikipedia:Citation needed\">\n <span title=\"This claim needs references to reliable sources. (September 2015)\">\n citation needed\n </span>\n </a>\n </i>\n ]\n </sup>\n Many of the Marshall Islands natives have since suffered from\n <a class=\"mw-redirect\" href=\"/wiki/Congenital_disorder\" title=\"Congenital disorder\">\n birth defects\n </a>\n and have received some compensation from the\n <a href=\"/wiki/Federal_government_of_the_United_States\" title=\"Federal government of the United States\">\n federal government of the United States\n </a>\n .\n <sup class=\"noprint Inline-Template Template-Fact\" style=\"white-space:nowrap;\">\n [\n <i>\n <a href=\"/wiki/Wikipedia:Citation_needed\" title=\"Wikipedia:Citation needed\">\n <span title=\"This claim needs references to reliable sources. (September 2015)\">\n citation needed\n </span>\n </a>\n </i>\n ]\n </sup>\n A\n <a href=\"/wiki/Japan\" title=\"Japan\">\n Japanese\n </a>\n fishing boat, the\n <i>\n <a href=\"/wiki/Daigo_Fukury%C5%AB_Maru\" title=\"Daigo Fukuryū Maru\">\n Daigo Fukuryū Maru\n </a>\n </i>\n , also came into contact with the fallout, which caused many of the crew to grow ill; one eventually died. The crew's exposure was referenced in the film\n <i>\n Godzilla\n </i>\n as a criticism of American nuclear tests in the Pacific.\n <sup class=\"noprint Inline-Template Template-Fact\" style=\"white-space:nowrap;\">\n [\n <i>\n <a href=\"/wiki/Wikipedia:Citation_needed\" title=\"Wikipedia:Citation needed\">\n <span title=\"This claim needs references to reliable sources. (September 2015)\">\n citation needed\n </span>\n </a>\n </i>\n ]\n </sup>\n </li>\n <li>\n The\n <i>\n <a class=\"mw-redirect\" href=\"/wiki/Operation_Plumbob\" title=\"Operation Plumbob\">\n Operation Plumbob\n </a>\n </i>\n series of May - October 1957 is considered the biggest, longest, and most controversial test series that occurred within the continental United States. Rainier Mesa, Frenchman Flat, and Yucca Flat were all used for the 29 different atmospheric explosions.\n <sup class=\"reference\" id=\"cite_ref-26\">\n <a href=\"#cite_note-26\">\n [10]\n </a>\n </sup>\n </li>\n <li>\n Shot\n <i>\n Argus I\n </i>\n of\n <i>\n <a href=\"/wiki/Operation_Argus\" title=\"Operation Argus\">\n Operation Argus\n </a>\n </i>\n , on 27 August 1958, was the first detonation of a nuclear weapon in\n <a href=\"/wiki/Outer_space\" title=\"Outer space\">\n outer space\n </a>\n when a 1.7-kiloton warhead was detonated at 200 kilometers altitude over the South\n <a href=\"/wiki/Atlantic_Ocean\" title=\"Atlantic Ocean\">\n Atlantic Ocean\n </a>\n during a series of\n <a href=\"/wiki/High-altitude_nuclear_explosion\" title=\"High-altitude nuclear explosion\">\n high-altitude nuclear explosions\n </a>\n .\n </li>\n <li>\n Shot\n <i>\n Frigate Bird\n </i>\n of\n <i>\n <a class=\"mw-redirect\" href=\"/wiki/Operation_Dominic_I_and_II\" title=\"Operation Dominic I and II\">\n Operation Dominic\n </a>\n </i>\n on 6 May 1962, was the only U.S. test of an operational\n <a href=\"/wiki/Ballistic_missile\" title=\"Ballistic missile\">\n ballistic missile\n </a>\n with a live nuclear warhead (yield of 600 kilotons), at\n <a href=\"/wiki/Johnston_Atoll\" title=\"Johnston Atoll\">\n Johnston Atoll\n </a>\n in the Pacific. In general, missile systems were tested without live warheads and warheads were tested separately for safety concerns. In the early 1960s there were mounting questions about how the systems would behave under combat conditions (when they were \"mated\", in military parlance), and this test was meant to dispel these concerns. However, the warhead had to be somewhat modified before its use, and the missile was only a\n <a href=\"/wiki/Submarine-launched_ballistic_missile\" title=\"Submarine-launched ballistic missile\">\n SLBM\n </a>\n (and not an\n <a href=\"/wiki/Intercontinental_ballistic_missile\" title=\"Intercontinental ballistic missile\">\n ICBM\n </a>\n ), so by itself, it did not satisfy all concerns.\n <sup class=\"reference\" id=\"cite_ref-27\">\n <a href=\"#cite_note-27\">\n [11]\n </a>\n </sup>\n </li>\n <li>\n Shot\n <i>\n <a href=\"/wiki/Sedan_(nuclear_test)\" title=\"Sedan (nuclear test)\">\n Sedan\n </a>\n </i>\n of\n <i>\n <a href=\"/wiki/Operation_Storax\" title=\"Operation Storax\">\n Operation Storax\n </a>\n </i>\n on 6 July 1962 (yield of 104 kilotons), was an attempt at showing the feasibility of using nuclear weapons for civilian, peaceful purposes as part of\n <a class=\"mw-redirect\" href=\"/wiki/Operation_Plowshare\" title=\"Operation Plowshare\">\n Operation Plowshare\n </a>\n . In this instance, a 1280-feet-in-diameter and 320-feet-deep\n <a href=\"/wiki/Explosion_crater\" title=\"Explosion crater\">\n explosion crater\n </a>\n , morphologically similar to an\n <a href=\"/wiki/Impact_crater\" title=\"Impact crater\">\n impact crater\n </a>\n , was created at the Nevada Test Site.\n </li>\n <li>\n Shot\n <i>\n Divider\n </i>\n of\n <a href=\"/wiki/Operation_Julin\" title=\"Operation Julin\">\n Operation Julin\n </a>\n on 23 September 1992, at the Nevada Test Site, was the last U.S. nuclear test. Described as a \"test to ensure safety of deterrent forces\", the series was interrupted by the beginning of negotiations over the\n <a href=\"/wiki/Comprehensive_Nuclear-Test-Ban_Treaty\" title=\"Comprehensive Nuclear-Test-Ban Treaty\">\n Comprehensive Nuclear-Test-Ban Treaty\n </a>\n .\n <sup class=\"reference\" id=\"cite_ref-28\">\n <a href=\"#cite_note-28\">\n [12]\n </a>\n </sup>\n </li>\n </ul>\n <h3>\n <span class=\"mw-headline\" id=\"Soviet_Union\">\n Soviet Union\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=4\" title=\"Edit section: Soviet Union\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h3>\n <div class=\"thumb tright\">\n <div class=\"thumbinner\" style=\"width:302px;\">\n <a class=\"image\" href=\"/wiki/File:Wfm_sts_overview.png\">\n <img alt=\"\" class=\"thumbimage\" data-file-height=\"504\" data-file-width=\"1000\" decoding=\"async\" height=\"151\" src=\"//upload.wikimedia.org/wikipedia/commons/thumb/c/cb/Wfm_sts_overview.png/300px-Wfm_sts_overview.png\" srcset=\"//upload.wikimedia.org/wikipedia/commons/thumb/c/cb/Wfm_sts_overview.png/450px-Wfm_sts_overview.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/c/cb/Wfm_sts_overview.png/600px-Wfm_sts_overview.png 2x\" width=\"300\"/>\n </a>\n <div class=\"thumbcaption\">\n <div class=\"magnify\">\n <a class=\"internal\" href=\"/wiki/File:Wfm_sts_overview.png\" title=\"Enlarge\">\n </a>\n </div>\n The 18,000 km\n <sup>\n 2\n </sup>\n expanse of the Semipalatinsk Test Site (indicated in red), attached to\n <a href=\"/wiki/Kurchatov,_Kazakhstan\" title=\"Kurchatov, Kazakhstan\">\n Kurchatov\n </a>\n (along the\n <a class=\"mw-redirect\" href=\"/wiki/Irtysh_river\" title=\"Irtysh river\">\n Irtysh river\n </a>\n ), and near\n <a href=\"/wiki/Semey\" title=\"Semey\">\n Semey\n </a>\n , as well as\n <a class=\"mw-redirect\" href=\"/wiki/Karagandy\" title=\"Karagandy\">\n Karagandy\n </a>\n , and\n <a class=\"mw-redirect\" href=\"/wiki/Astana\" title=\"Astana\">\n Astana\n </a>\n . The site comprised an area\n <a class=\"mw-redirect\" href=\"/wiki/The_size_of_Wales\" title=\"The size of Wales\">\n the size of Wales\n </a>\n .\n <sup class=\"reference\" id=\"cite_ref-29\">\n <a href=\"#cite_note-29\">\n [13]\n </a>\n </sup>\n </div>\n </div>\n </div>\n <link href=\"mw-data:TemplateStyles:r1033289096\" rel=\"mw-deduplicated-inline-style\"/>\n <div class=\"hatnote navigation-not-searchable\" role=\"note\">\n Main article:\n <a class=\"mw-redirect\" href=\"/wiki/Soviet_Union%27s_nuclear_testing_series\" title=\"Soviet Union's nuclear testing series\">\n Soviet Union's nuclear testing series\n </a>\n </div>\n <link href=\"mw-data:TemplateStyles:r1033289096\" rel=\"mw-deduplicated-inline-style\"/>\n <div class=\"hatnote navigation-not-searchable\" role=\"note\">\n See also:\n <a href=\"/wiki/Soviet_atomic_bomb_project\" title=\"Soviet atomic bomb project\">\n Soviet atomic bomb project\n </a>\n </div>\n <p>\n After the fall of the USSR, the American government (as a member of the International Consortium \"\n <a href=\"/wiki/International_Science_and_Technology_Center\" title=\"International Science and Technology Center\">\n International Science and Technology Center\n </a>\n \") hired a number of top scientists in\n <a href=\"/wiki/Sarov\" title=\"Sarov\">\n Sarov\n </a>\n (aka Arzamas-16, the Soviet equivalent of Los Alamos and thus sometimes called \"Los Arzamas\") to draft a number of documents about the history of the Soviet atomic program.\n <sup class=\"reference\" id=\"cite_ref-30\">\n <a href=\"#cite_note-30\">\n [14]\n </a>\n </sup>\n One of the documents was the definitive list of Soviet nuclear tests.\n <sup class=\"reference\" id=\"cite_ref-USSRList2_14-2\">\n <a href=\"#cite_note-USSRList2-14\">\n [6]\n </a>\n </sup>\n Most of the tests have no code names, unlike the American tests, so they are known by their test numbers from this document. Some list compilers have detected discrepancies in that list; one device was abandoned in its cove in a tunnel in\n <a href=\"/wiki/Semey\" title=\"Semey\">\n Semipalatinsk\n </a>\n when the Soviets abandoned\n <a href=\"/wiki/Kazakhstan\" title=\"Kazakhstan\">\n Kazakhstan\n </a>\n ,\n <sup class=\"reference\" id=\"cite_ref-31\">\n <a href=\"#cite_note-31\">\n [15]\n </a>\n </sup>\n and one list\n <sup class=\"reference\" id=\"cite_ref-32\">\n <a href=\"#cite_note-32\">\n [16]\n </a>\n </sup>\n lists 13 other tests which apparently failed to provide any yield. The source for that was the well respected\n <i>\n Russian Strategic Nuclear Forces\n </i>\n <sup class=\"reference\" id=\"cite_ref-RSNF_33-0\">\n <a href=\"#cite_note-RSNF-33\">\n [17]\n </a>\n </sup>\n which confirms 11 of the 13; those 11 are in the Wikipedia lists.\n </p>\n <p>\n The\n <a href=\"/wiki/Soviet_Union\" title=\"Soviet Union\">\n Soviet Union\n </a>\n conducted 715 nuclear tests (by the official count)\n <sup class=\"reference\" id=\"cite_ref-34\">\n <a href=\"#cite_note-34\">\n [18]\n </a>\n </sup>\n between 1949 and 1990, including 219 atmospheric, underwater, and space tests. Most of them took place at the\n <a href=\"/wiki/Semipalatinsk_Test_Site\" title=\"Semipalatinsk Test Site\">\n Semipalatinsk Test Site\n </a>\n in Kazakhstan and the\n <a href=\"/wiki/Novaya_Zemlya\" title=\"Novaya Zemlya\">\n Northern Test Site\n </a>\n at\n <a href=\"/wiki/Novaya_Zemlya\" title=\"Novaya Zemlya\">\n Novaya Zemlya\n </a>\n . Additional industrial tests were conducted at various locations in Russia and Kazakhstan, while a small number of tests were conducted in\n <a href=\"/wiki/Ukraine\" title=\"Ukraine\">\n Ukraine\n </a>\n ,\n <a href=\"/wiki/Uzbekistan\" title=\"Uzbekistan\">\n Uzbekistan\n </a>\n , and\n <a href=\"/wiki/Turkmenistan\" title=\"Turkmenistan\">\n Turkmenistan\n </a>\n .\n </p>\n <p>\n In addition, the\n <a class=\"mw-redirect\" href=\"/wiki/Totskoye_nuclear_test\" title=\"Totskoye nuclear test\">\n large-scale military exercise\n </a>\n was conducted by\n <a class=\"mw-redirect\" href=\"/wiki/Soviet_army\" title=\"Soviet army\">\n Soviet army\n </a>\n to explore the possibility of defensive and offensive\n <a class=\"mw-redirect\" href=\"/wiki/Warfare\" title=\"Warfare\">\n warfare\n </a>\n operations on the nuclear battlefield. The exercise, under code name of \"Snezhok\" (Snowball), involved detonation of a nuclear bomb twice as powerful as the one used in\n <a href=\"/wiki/Atomic_bombings_of_Hiroshima_and_Nagasaki\" title=\"Atomic bombings of Hiroshima and Nagasaki\">\n Nagasaki\n </a>\n and approximately 45,000 soldiers coming through the\n <a href=\"/wiki/Epicenter\" title=\"Epicenter\">\n epicenter\n </a>\n immediately after the blast\n <sup class=\"reference\" id=\"cite_ref-35\">\n <a href=\"#cite_note-35\">\n [19]\n </a>\n </sup>\n The exercise was conducted on September 14, 1954, under command of\n <a href=\"/wiki/Marshal_of_the_Soviet_Union\" title=\"Marshal of the Soviet Union\">\n Marshal\n </a>\n <a href=\"/wiki/Georgy_Zhukov\" title=\"Georgy Zhukov\">\n Georgy Zhukov\n </a>\n to the north of\n <a href=\"/wiki/Totskoye\" title=\"Totskoye\">\n Totskoye\n </a>\n village in\n <a href=\"/wiki/Orenburg_Oblast\" title=\"Orenburg Oblast\">\n Orenburg Oblast\n </a>\n ,\n <a href=\"/wiki/Russia\" title=\"Russia\">\n Russia\n </a>\n .\n <sup class=\"noprint Inline-Template Template-Fact\" style=\"white-space:nowrap;\">\n [\n <i>\n <a href=\"/wiki/Wikipedia:Citation_needed\" title=\"Wikipedia:Citation needed\">\n <span title=\"This claim needs references to reliable sources. (March 2019)\">\n citation needed\n </span>\n </a>\n </i>\n ]\n </sup>\n </p>\n <p>\n Some significant Soviet tests include:\n </p>\n <ul>\n <li>\n <i>\n <a class=\"mw-redirect\" href=\"/wiki/Joe_1\" title=\"Joe 1\">\n Operation First Lightning/RDS-1\n </a>\n </i>\n (known as Joe 1 in the West), August 29, 1949: first Soviet nuclear test.\n </li>\n <li>\n <i>\n <a href=\"/wiki/Joe_4\" title=\"Joe 4\">\n RDS-6s\n </a>\n </i>\n (known as\n <a href=\"/wiki/Joe_4\" title=\"Joe 4\">\n Joe 4\n </a>\n in the West), August 12, 1953: first Soviet thermonuclear test using a sloyka (layer cake) design. The design proved to be unscalable into megaton yields, but it was air-deployable.\n </li>\n <li>\n <i>\n <a href=\"/wiki/RDS-37\" title=\"RDS-37\">\n RDS-37\n </a>\n </i>\n , November 22, 1955: first Soviet multi-megaton, \"true\" hydrogen bomb test using\n <a href=\"/wiki/Andrei_Sakharov\" title=\"Andrei Sakharov\">\n Andrei Sakharov\n </a>\n 's \"third idea\", essentially a re-invention of the Teller-Ulam.\n </li>\n <li>\n <i>\n <a href=\"/wiki/Tsar_Bomba\" title=\"Tsar Bomba\">\n Tsar Bomba\n </a>\n </i>\n , October 30, 1961: largest nuclear weapon ever detonated, with a design yield of 100 Mt, de-rated to 50 Mt for the test drop.\n </li>\n <li>\n <i>\n <a href=\"/wiki/Chagan_(nuclear_test)\" title=\"Chagan (nuclear test)\">\n Chagan\n </a>\n </i>\n , January 15, 1965: large cratering experiment as part of\n <a href=\"/wiki/Nuclear_Explosions_for_the_National_Economy\" title=\"Nuclear Explosions for the National Economy\">\n Nuclear Explosions for the National Economy\n </a>\n program, which created an artificial lake.\n <sup class=\"noprint Inline-Template Template-Fact\" style=\"white-space:nowrap;\">\n [\n <i>\n <a href=\"/wiki/Wikipedia:Citation_needed\" title=\"Wikipedia:Citation needed\">\n <span title=\"This claim needs references to reliable sources. (March 2019)\">\n citation needed\n </span>\n </a>\n </i>\n ]\n </sup>\n </li>\n </ul>\n <p>\n The last Soviet test took place on October 24, 1990. After the dissolution of the USSR in 1992, Russia inherited the USSR's nuclear stockpile, while Kazakhstan inherited the Semipalatinsk nuclear test area, as well as the\n <a href=\"/wiki/Baikonur_Cosmodrome\" title=\"Baikonur Cosmodrome\">\n Baikonur Cosmodrome\n </a>\n , the\n <a href=\"/wiki/Sary_Shagan\" title=\"Sary Shagan\">\n Sary Shagan\n </a>\n missile/radar test area and three ballistic missile fields. Semipalatinsk included at least the one unexploded device, later blown up with conventional explosives by a combined USA/Kazakh team. No testing has occurred in the former territory of the USSR since its dissolution.\n <sup class=\"noprint Inline-Template Template-Fact\" style=\"white-space:nowrap;\">\n [\n <i>\n <a href=\"/wiki/Wikipedia:Citation_needed\" title=\"Wikipedia:Citation needed\">\n <span title=\"This claim needs references to reliable sources. (March 2019)\">\n citation needed\n </span>\n </a>\n </i>\n ]\n </sup>\n </p>\n <h3>\n <span class=\"mw-headline\" id=\"United_Kingdom\">\n United Kingdom\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=5\" title=\"Edit section: United Kingdom\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h3>\n <link href=\"mw-data:TemplateStyles:r1033289096\" rel=\"mw-deduplicated-inline-style\"/>\n <div class=\"hatnote navigation-not-searchable\" role=\"note\">\n Main article:\n <a class=\"mw-redirect\" href=\"/wiki/United_Kingdom%27s_nuclear_testing_series\" title=\"United Kingdom's nuclear testing series\">\n United Kingdom's nuclear testing series\n </a>\n </div>\n <p>\n The United Kingdom has conducted 45 tests (12 in Australian territory, including 3 in the\n <a href=\"/wiki/Montebello_Islands\" title=\"Montebello Islands\">\n Montebello Islands\n </a>\n of\n <a href=\"/wiki/Western_Australia\" title=\"Western Australia\">\n Western Australia\n </a>\n and 9 in mainland\n <a href=\"/wiki/South_Australia\" title=\"South Australia\">\n South Australia\n </a>\n (7 at\n <a href=\"/wiki/British_nuclear_tests_at_Maralinga\" title=\"British nuclear tests at Maralinga\">\n Maralinga\n </a>\n and 2 at\n <a class=\"mw-redirect\" href=\"/wiki/Emu_Field\" title=\"Emu Field\">\n Emu Field\n </a>\n ); 9 in the\n <a href=\"/wiki/Line_Islands\" title=\"Line Islands\">\n Line Islands\n </a>\n of the central Pacific (3 at\n <a href=\"/wiki/Malden_Island\" title=\"Malden Island\">\n Malden Island\n </a>\n and 6 at\n <a href=\"/wiki/Kiritimati\" title=\"Kiritimati\">\n Kiritimati\n </a>\n /Christmas Island); and 24 in the U.S. as part of joint test series). Often excluded from British totals are the 31 safety tests of\n <a href=\"/wiki/Operation_Vixen\" title=\"Operation Vixen\">\n Operation Vixen\n </a>\n in Maralinga. British test series include:\n </p>\n <ul>\n <li>\n <i>\n <a href=\"/wiki/Operation_Hurricane\" title=\"Operation Hurricane\">\n Operation Hurricane\n </a>\n </i>\n , October 3, 1952 (first atomic bomb)\n </li>\n <li>\n <i>\n <a href=\"/wiki/Operation_Totem\" title=\"Operation Totem\">\n Operation Totem\n </a>\n </i>\n , 1953\n </li>\n <li>\n <i>\n <a href=\"/wiki/Operation_Mosaic\" title=\"Operation Mosaic\">\n Operation Mosaic\n </a>\n </i>\n , 1956\n </li>\n <li>\n <i>\n <a href=\"/wiki/British_nuclear_tests_at_Maralinga#Operation_Buffalo\" title=\"British nuclear tests at Maralinga\">\n Operation Buffalo\n </a>\n </i>\n , 1956\n </li>\n <li>\n <i>\n <a href=\"/wiki/British_nuclear_tests_at_Maralinga#Operation_Antler\" title=\"British nuclear tests at Maralinga\">\n Operation Antler\n </a>\n </i>\n , 1957\n </li>\n <li>\n <i>\n <a href=\"/wiki/Operation_Grapple\" title=\"Operation Grapple\">\n Operation Grapple\n </a>\n </i>\n , 1957–1958 (Included the first hydrogen bomb,\n <i>\n Grapple X/Round C\n </i>\n )\n </li>\n </ul>\n <p>\n Last test:\n <a class=\"mw-redirect\" href=\"/wiki/Julin_Bristol\" title=\"Julin Bristol\">\n Julin Bristol\n </a>\n , November 26, 1991, vertical shaft.\n </p>\n <p>\n Atmospheric tests involving nuclear material but conventional explosions:\n <sup class=\"reference\" id=\"cite_ref-36\">\n <a href=\"#cite_note-36\">\n [20]\n </a>\n </sup>\n </p>\n <ul>\n <li>\n <i>\n <a href=\"/wiki/British_nuclear_tests_at_Maralinga#Minor_trials\" title=\"British nuclear tests at Maralinga\">\n Operation Kittens\n </a>\n </i>\n , 1953–1961 (initiator tests using conventional explosive)\n </li>\n <li>\n <i>\n <a href=\"/wiki/British_nuclear_tests_at_Maralinga#Minor_trials\" title=\"British nuclear tests at Maralinga\">\n Operation Rats\n </a>\n </i>\n , 1956–1960 (conventional explosions to study dispersal of uranium)\n </li>\n <li>\n <i>\n <a href=\"/wiki/British_nuclear_tests_at_Maralinga#Minor_trials\" title=\"British nuclear tests at Maralinga\">\n Operation Tims\n </a>\n </i>\n , 1955–1963 (conventional explosions for tamper, plutonium compression trials)\n </li>\n <li>\n <i>\n <a href=\"/wiki/Operation_Vixen\" title=\"Operation Vixen\">\n Operation Vixen\n </a>\n </i>\n , 1959–1963 (effects of accidental fire or explosion on nuclear weapons)\n </li>\n </ul>\n <h3>\n <span class=\"mw-headline\" id=\"France\">\n France\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=6\" title=\"Edit section: France\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h3>\n <link href=\"mw-data:TemplateStyles:r1033289096\" rel=\"mw-deduplicated-inline-style\"/>\n <div class=\"hatnote navigation-not-searchable\" role=\"note\">\n Main articles:\n <a class=\"mw-redirect\" href=\"/wiki/France%27s_nuclear_testing_series\" title=\"France's nuclear testing series\">\n France's nuclear testing series\n </a>\n and\n <a class=\"mw-redirect\" href=\"/wiki/France_and_nuclear_weapons\" title=\"France and nuclear weapons\">\n France and nuclear weapons\n </a>\n </div>\n <p>\n France conducted 210 nuclear tests between February 13, 1960 and January 27, 1996.\n <sup class=\"reference\" id=\"cite_ref-37\">\n <a href=\"#cite_note-37\">\n [21]\n </a>\n </sup>\n Four were tested at\n <a href=\"/wiki/Reggane\" title=\"Reggane\">\n Reggane\n </a>\n ,\n <a href=\"/wiki/French_Algeria\" title=\"French Algeria\">\n French Algeria\n </a>\n , 13 at\n <a class=\"mw-redirect\" href=\"/wiki/In_Ekker\" title=\"In Ekker\">\n In Ekker\n </a>\n , Algeria and the rest at\n <a href=\"/wiki/Moruroa\" title=\"Moruroa\">\n Moruroa\n </a>\n and\n <a href=\"/wiki/Fangataufa\" title=\"Fangataufa\">\n Fangataufa\n </a>\n Atolls in\n <a href=\"/wiki/French_Polynesia\" title=\"French Polynesia\">\n French Polynesia\n </a>\n . Often skipped in lists are the 5 safety tests at Adrar Tikertine in Algeria.\n <sup class=\"reference\" id=\"cite_ref-LeCEP_19-1\">\n <a href=\"#cite_note-LeCEP-19\">\n [8]\n </a>\n </sup>\n </p>\n <ul>\n <li>\n <i>\n <a class=\"mw-redirect\" href=\"/wiki/Operation_Gerboise_bleue\" title=\"Operation Gerboise bleue\">\n Operation Gerboise bleue\n </a>\n </i>\n , February 13, 1960 (first atomic bomb) and three more:\n <a href=\"/wiki/Reggane\" title=\"Reggane\">\n Reggane\n </a>\n , Algeria; in the atmosphere; final test reputed to be more intended to prevent the weapon from falling into the hands of generals rebelling against French colonial rule than for testing purposes.\n <sup class=\"reference\" id=\"cite_ref-scoop-qui-font-plouf_38-0\">\n <a href=\"#cite_note-scoop-qui-font-plouf-38\">\n [22]\n </a>\n </sup>\n </li>\n <li>\n <i>\n <a class=\"mw-redirect\" href=\"/wiki/Operation_Agathe\" title=\"Operation Agathe\">\n Operation Agathe\n </a>\n </i>\n , November 7, 1961 and 12 more: In Ekker, Algeria; underground\n </li>\n <li>\n <i>\n Operation Aldébaran\n </i>\n , July 2, 1966 and 45 more: Moruroa and Fangataufa; in the atmosphere;\n <ul>\n <li>\n <i>\n <a href=\"/wiki/Canopus_(nuclear_test)\" title=\"Canopus (nuclear test)\">\n Canopus\n </a>\n </i>\n first hydrogen bomb: August 28, 1968 (Fangataufa)\n </li>\n </ul>\n </li>\n <li>\n <i>\n Operation Achille\n </i>\n June 5, 1975 and 146 more: Moruroa and Fangataufa; underground\n <ul>\n <li>\n <i>\n Operation Xouthos\n </i>\n last test: January 27, 1996 (Fangataufa)\n </li>\n </ul>\n </li>\n </ul>\n <h3>\n <span class=\"mw-headline\" id=\"China\">\n China\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=7\" title=\"Edit section: China\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h3>\n <link href=\"mw-data:TemplateStyles:r1033289096\" rel=\"mw-deduplicated-inline-style\"/>\n <div class=\"hatnote navigation-not-searchable\" role=\"note\">\n Main article:\n <a class=\"mw-redirect\" href=\"/wiki/List_of_Chinese_nuclear_tests\" title=\"List of Chinese nuclear tests\">\n List of Chinese nuclear tests\n </a>\n </div>\n <p>\n The foremost list of Chinese tests compiled by the\n <a href=\"/wiki/Federation_of_American_Scientists\" title=\"Federation of American Scientists\">\n Federation of American Scientists\n </a>\n <sup class=\"reference\" id=\"cite_ref-39\">\n <a href=\"#cite_note-39\">\n [23]\n </a>\n </sup>\n skips over two Chinese tests listed by others. The\n <a class=\"mw-redirect\" href=\"/wiki/People%27s_Republic_of_China\" title=\"People's Republic of China\">\n People's Republic of China\n </a>\n conducted 45 tests (23 atmospheric and 22 underground, all conducted at\n <a href=\"/wiki/Lop_Nur\" title=\"Lop Nur\">\n Lop Nur\n </a>\n Nuclear Weapons Test Base, in Malan,\n <a href=\"/wiki/Xinjiang\" title=\"Xinjiang\">\n Xinjiang\n </a>\n )\n </p>\n <ul>\n <li>\n <i>\n <a class=\"mw-redirect\" href=\"/wiki/596_(nuclear_test)\" title=\"596 (nuclear test)\">\n 596\n </a>\n </i>\n First test – October 16, 1964\n </li>\n <li>\n Film is now available of 1966 tests here at time 09:00\n <sup class=\"reference\" id=\"cite_ref-40\">\n <a href=\"#cite_note-40\">\n [24]\n </a>\n </sup>\n and another test later in this film.\n </li>\n <li>\n <i>\n <a href=\"/wiki/Test_No._6\" title=\"Test No. 6\">\n Test No. 6\n </a>\n </i>\n , First hydrogen bomb test – June 17, 1967\n </li>\n <li>\n <i>\n CHIC-16\n </i>\n , 200 kt-1 Mt atmospheric test – June 17, 1974\n <sup class=\"reference\" id=\"cite_ref-41\">\n <a href=\"#cite_note-41\">\n [25]\n </a>\n </sup>\n </li>\n <li>\n <i>\n #21\n </i>\n , Largest hydrogen bomb tested by China (4 megatons) - November 17, 1976\n </li>\n <li>\n <i>\n #29\n </i>\n , Last atmospheric test – October 16, 1980. This would also be the last atmospheric nuclear test by any country\n <sup class=\"reference\" id=\"cite_ref-his_42-0\">\n <a href=\"#cite_note-his-42\">\n [26]\n </a>\n </sup>\n </li>\n <li>\n <i>\n #45\n </i>\n , Last test – July 29, 1996, underground.\n <sup class=\"reference\" id=\"cite_ref-43\">\n <a href=\"#cite_note-43\">\n [27]\n </a>\n </sup>\n </li>\n </ul>\n <h3>\n <span class=\"mw-headline\" id=\"India\">\n India\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=8\" title=\"Edit section: India\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h3>\n <link href=\"mw-data:TemplateStyles:r1033289096\" rel=\"mw-deduplicated-inline-style\"/>\n <div class=\"hatnote navigation-not-searchable\" role=\"note\">\n Main article:\n <a class=\"mw-redirect\" href=\"/wiki/List_of_Indian_nuclear_tests\" title=\"List of Indian nuclear tests\">\n List of Indian nuclear tests\n </a>\n </div>\n <p>\n <a href=\"/wiki/India\" title=\"India\">\n India\n </a>\n announced it had conducted a test of a single device in 1974 near Pakistan's\n <a href=\"/wiki/Pokhran\" title=\"Pokhran\">\n eastern border\n </a>\n under the codename\n <i>\n Operation Smiling Buddha\n </i>\n . After 24 years, India publicly announced five further nuclear tests on May 11 and May 13, 1998. The official number of Indian nuclear tests is six, conducted under two different code-names and at different times.\n </p>\n <ul>\n <li>\n May 18, 1974:\n <i>\n <a href=\"/wiki/Operation_Smiling_Buddha\" title=\"Operation Smiling Buddha\">\n Operation Smiling Buddha\n </a>\n </i>\n (type: implosion, plutonium and underground). One underground test in a horizontal shaft around 107 m long under the long-constructed\n <a href=\"/wiki/Indian_Army\" title=\"Indian Army\">\n Indian Army\n </a>\n <a href=\"/wiki/Pokhran#Pokhran_Test_Range\" title=\"Pokhran\">\n Pokhran Test Range\n </a>\n (IA-PTR) in the\n <a href=\"/wiki/Thar_Desert\" title=\"Thar Desert\">\n Thar Desert\n </a>\n , eastern border of Pakistan. The Indian\n <a href=\"/wiki/India_Meteorological_Department\" title=\"India Meteorological Department\">\n Meteorological Department\n </a>\n and the\n <a href=\"/wiki/Atomic_Energy_Commission_of_India\" title=\"Atomic Energy Commission of India\">\n Atomic Energy Commission\n </a>\n announced the yield of the weapon at 12\n <a href=\"/wiki/TNT_equivalent\" title=\"TNT equivalent\">\n kt\n </a>\n . Other Western sources claimed the yield to be around 2–12\n <a href=\"/wiki/TNT_equivalent\" title=\"TNT equivalent\">\n kt\n </a>\n . However, the claim was dismissed by the\n <i>\n <a href=\"/wiki/Bulletin_of_the_Atomic_Scientists\" title=\"Bulletin of the Atomic Scientists\">\n Bulletin of the Atomic Scientists\n </a>\n </i>\n and it was later reported to be 8\n <a href=\"/wiki/TNT_equivalent\" title=\"TNT equivalent\">\n kt\n </a>\n .\n <sup class=\"reference\" id=\"cite_ref-smiling_buddha_44-0\">\n <a href=\"#cite_note-smiling_buddha-44\">\n [28]\n </a>\n </sup>\n </li>\n <li>\n May 11, 1998:\n <i>\n <a class=\"mw-redirect\" href=\"/wiki/Operation_Shakti\" title=\"Operation Shakti\">\n Operation Shakti\n </a>\n </i>\n (type: implosion, 3 uranium and 2 plutonium devices, all underground). The\n <a href=\"/wiki/Atomic_Energy_Commission_of_India\" title=\"Atomic Energy Commission of India\">\n Atomic Energy Commission\n </a>\n (AEC) of India and the\n <a href=\"/wiki/Defence_Research_and_Development_Organisation\" title=\"Defence Research and Development Organisation\">\n Defence Research and Development Organisation\n </a>\n (DRDO) simultaneously conducted a test of three nuclear devices at the\n <a href=\"/wiki/Indian_Army\" title=\"Indian Army\">\n Indian Army\n </a>\n <a href=\"/wiki/Pokhran#Nuclear_test_site\" title=\"Pokhran\">\n Pokhran Test Range\n </a>\n (IAPTR) on May 11, 1998. Two days later, on May 13, the AEC and DRDO carried out a test of two further nuclear devices, detonated simultaneously. During this operation, AEC India claimed to have tested a three-stage thermonuclear device (\n <a class=\"mw-redirect\" href=\"/wiki/Teller-Ulam_design\" title=\"Teller-Ulam design\">\n Teller-Ulam design\n </a>\n ), but the yield of the tests was significantly lower than that expected from thermonuclear devices. The yields remain questionable, at best, by Western and Indian scholars, estimated at 45\n <a href=\"/wiki/TNT_equivalent\" title=\"TNT equivalent\">\n kt\n </a>\n ; scale down of 200\n <a href=\"/wiki/TNT_equivalent\" title=\"TNT equivalent\">\n kt\n </a>\n model.\n </li>\n </ul>\n <h3>\n <span class=\"mw-headline\" id=\"Pakistan\">\n Pakistan\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=9\" title=\"Edit section: Pakistan\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h3>\n <link href=\"mw-data:TemplateStyles:r1033289096\" rel=\"mw-deduplicated-inline-style\"/>\n <div class=\"hatnote navigation-not-searchable\" role=\"note\">\n Main article:\n <a class=\"mw-redirect\" href=\"/wiki/Pakistan%27s_nuclear_testing_series\" title=\"Pakistan's nuclear testing series\">\n Pakistan's nuclear testing series\n </a>\n </div>\n <p>\n <a href=\"/wiki/Pakistan\" title=\"Pakistan\">\n Pakistan\n </a>\n conducted 6 official tests, under 2 different code names, in the final week of May 1998. From 1983 to 1994, around 24 nuclear cold tests were carried out by Pakistan; these remained unannounced and classified until 2000. In May 1998, Pakistan responded publicly by testing 6 nuclear devices.\n <sup class=\"reference\" id=\"cite_ref-timesofindia.indiatimes.com_45-0\">\n <a href=\"#cite_note-timesofindia.indiatimes.com-45\">\n [29]\n </a>\n </sup>\n </p>\n <ul>\n <li>\n March 11, 1983:\n <i>\n <a class=\"mw-redirect\" href=\"/wiki/Kirana-I\" title=\"Kirana-I\">\n Kirana-I\n </a>\n </i>\n (type: implosion,\n <a class=\"mw-redirect\" href=\"/wiki/Reactor_grade_plutonium_nuclear_test\" title=\"Reactor grade plutonium nuclear test\">\n non-fissioned (plutonium)\n </a>\n and underground). The 24 underground cold tests of nuclear devices were performed near the\n <a class=\"mw-redirect\" href=\"/wiki/Sargodha_Airbase\" title=\"Sargodha Airbase\">\n Sargodha Air Force Base\n </a>\n .\n <sup class=\"reference\" id=\"cite_ref-Article_46-0\">\n <a href=\"#cite_note-Article-46\">\n [30]\n </a>\n </sup>\n </li>\n <li>\n May 28, 1998:\n <i>\n <a href=\"/wiki/Chagai-I\" title=\"Chagai-I\">\n Chagai-I\n </a>\n </i>\n (type: implosion,\n <a class=\"mw-redirect\" href=\"/wiki/Highly_enriched_uranium\" title=\"Highly enriched uranium\">\n HEU\n </a>\n and underground). One underground horizontal-shaft tunnel test (inside a granite mountain) of boosted fission devices at Koh Kambaran in the\n <a href=\"/wiki/Ras_Koh_Hills\" title=\"Ras Koh Hills\">\n Ras Koh Hills\n </a>\n in Chagai District of Balochistan Province.\n <sup class=\"reference\" id=\"cite_ref-timesofindia.indiatimes.com_45-1\">\n <a href=\"#cite_note-timesofindia.indiatimes.com-45\">\n [29]\n </a>\n </sup>\n <sup class=\"reference\" id=\"cite_ref-def_47-0\">\n <a href=\"#cite_note-def-47\">\n [31]\n </a>\n </sup>\n The announced yield of the five devices was a total of 40–45 kilotonnes with the largest having a yield of approximately 30–45 kilotonnes. An independent assessment however put the test yield at no more than 12 kt and the maximum yield of a single device at only 9 kt as opposed to 35 kt as claimed by Pakistani authorities.\n <sup class=\"reference\" id=\"cite_ref-Nuclearweaponarchive.org_48-0\">\n <a href=\"#cite_note-Nuclearweaponarchive.org-48\">\n [32]\n </a>\n </sup>\n According to\n <i>\n The Bulletin of the Atomic Scientists\n </i>\n , the maximum yield was only 2–10 kt as opposed to the claim of 35 kt and the total yield of all tests was no more than 8–15 kt.\n <sup class=\"reference\" id=\"cite_ref-google1_49-0\">\n <a href=\"#cite_note-google1-49\">\n [33]\n </a>\n </sup>\n </li>\n <li>\n May 30, 1998:\n <i>\n <a href=\"/wiki/Chagai-II\" title=\"Chagai-II\">\n Chagai-II\n </a>\n </i>\n (type: implosion, plutonium device and underground). One underground vertical-shaft tunnel test of a miniaturized fission device having an announced yield of approximately 18–20 kilotonnes, carried out in the\n <a href=\"/wiki/Kharan_Desert\" title=\"Kharan Desert\">\n Kharan Desert\n </a>\n in\n <a href=\"/wiki/Kharan_District\" title=\"Kharan District\">\n Kharan District\n </a>\n , Balochistan Province.\n <sup class=\"reference\" id=\"cite_ref-def_47-1\">\n <a href=\"#cite_note-def-47\">\n [31]\n </a>\n </sup>\n An independent assessment put the figure of this test at 4–6 kt only.\n <sup class=\"reference\" id=\"cite_ref-Nuclearweaponarchive.org_48-1\">\n <a href=\"#cite_note-Nuclearweaponarchive.org-48\">\n [32]\n </a>\n </sup>\n Some Western seismologists put the figure at a mere 2 kt.\n <sup class=\"reference\" id=\"cite_ref-google1_49-1\">\n <a href=\"#cite_note-google1-49\">\n [33]\n </a>\n </sup>\n </li>\n </ul>\n <h3>\n <span class=\"mw-headline\" id=\"North_Korea\">\n North Korea\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=10\" title=\"Edit section: North Korea\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h3>\n <link href=\"mw-data:TemplateStyles:r1033289096\" rel=\"mw-deduplicated-inline-style\"/>\n <div class=\"hatnote navigation-not-searchable\" role=\"note\">\n Main article:\n <a class=\"mw-redirect\" href=\"/wiki/North_Korea%27s_nuclear_testing_series\" title=\"North Korea's nuclear testing series\">\n North Korea's nuclear testing series\n </a>\n </div>\n <p>\n On October 9, 2006,\n <a href=\"/wiki/North_Korea\" title=\"North Korea\">\n North Korea\n </a>\n announced they had conducted\n <a href=\"/wiki/2006_North_Korean_nuclear_test\" title=\"2006 North Korean nuclear test\">\n a nuclear test\n </a>\n in\n <a href=\"/wiki/North_Hamgyong_Province\" title=\"North Hamgyong Province\">\n North Hamgyong Province\n </a>\n on the northeast coast at 10:36 AM (11:30 AEST). There was a 3.58 magnitude earthquake reported in\n <a href=\"/wiki/South_Korea\" title=\"South Korea\">\n South Korea\n </a>\n , and a 4.2 magnitude tremor was detected 240 miles north of P'yongyang. The low estimates on the yield of the test—potentially less than a kiloton in strength—have led to speculation as to whether it was a\n <a class=\"mw-redirect\" href=\"/wiki/Fizzle_(nuclear_test)\" title=\"Fizzle (nuclear test)\">\n fizzle\n </a>\n (unsuccessful test), or not a genuine\n <i>\n nuclear\n </i>\n test at all.\n </p>\n <p>\n On May 25, 2009, North Korea announced having conducted\n <a href=\"/wiki/2009_North_Korean_nuclear_test\" title=\"2009 North Korean nuclear test\">\n a second nuclear test\n </a>\n . A tremor, with magnitude reports ranging from 4.7 to 5.3, was detected at\n <a href=\"/wiki/Mantapsan\" title=\"Mantapsan\">\n Mantapsan\n </a>\n , 233 miles northeast of\n <a href=\"/wiki/Pyongyang\" title=\"Pyongyang\">\n P'yongyang\n </a>\n and within a few kilometers of the 2006 test location. While estimates, as to yield, are still uncertain, with reports ranging from 3 to 20 kilotons, the stronger tremor indicates a significantly larger yield than the 2006 test.\n </p>\n <p>\n On 12 February 2013,\n <a href=\"/wiki/North_Korea\" title=\"North Korea\">\n North Korean\n </a>\n state media announced it had conducted\n <a href=\"/wiki/2013_North_Korean_nuclear_test\" title=\"2013 North Korean nuclear test\">\n an underground nuclear test\n </a>\n , its third in seven years. A tremor that exhibited a nuclear bomb signature with an initial\n <a href=\"/wiki/Moment_magnitude_scale\" title=\"Moment magnitude scale\">\n magnitude\n </a>\n 4.9 (later revised to 5.1) was detected by both\n <a class=\"mw-redirect\" href=\"/wiki/Comprehensive_Nuclear-Test-Ban_Treaty_Organization_Preparatory_Commission\" title=\"Comprehensive Nuclear-Test-Ban Treaty Organization Preparatory Commission\">\n Comprehensive Nuclear-Test-Ban Treaty Organization Preparatory Commission\n </a>\n (CTBTO)\n <sup class=\"reference\" id=\"cite_ref-50\">\n <a href=\"#cite_note-50\">\n [34]\n </a>\n </sup>\n and the\n <a href=\"/wiki/United_States_Geological_Survey\" title=\"United States Geological Survey\">\n United States Geological Survey\n </a>\n (USGS).\n <sup class=\"reference\" id=\"cite_ref-USGS_report_2013_nuclear_test_51-0\">\n <a href=\"#cite_note-USGS_report_2013_nuclear_test-51\">\n [35]\n </a>\n </sup>\n The tremor occurred at 11:57 local time (02:57\n <a class=\"mw-redirect\" href=\"/wiki/UTC\" title=\"UTC\">\n UTC\n </a>\n ) and the USGS said the\n <a href=\"/wiki/Hypocenter\" title=\"Hypocenter\">\n hypocenter\n </a>\n of the event was only one\n <a class=\"mw-redirect\" href=\"/wiki/Kilometer\" title=\"Kilometer\">\n kilometer\n </a>\n deep.\n <a href=\"/wiki/South_Korea\" title=\"South Korea\">\n South Korea\n </a>\n 's defense ministry said the event reading indicated a blast of six to seven\n <a href=\"/wiki/TNT_equivalent\" title=\"TNT equivalent\">\n kilotons\n </a>\n .\n <sup class=\"reference\" id=\"cite_ref-52\">\n <a href=\"#cite_note-52\">\n [36]\n </a>\n </sup>\n <sup class=\"reference\" id=\"cite_ref-USGS_report_summary_53-0\">\n <a href=\"#cite_note-USGS_report_summary-53\">\n [37]\n </a>\n </sup>\n <sup class=\"reference\" id=\"cite_ref-54\">\n <a href=\"#cite_note-54\">\n [38]\n </a>\n </sup>\n <sup class=\"reference\" id=\"cite_ref-55\">\n <a href=\"#cite_note-55\">\n [39]\n </a>\n </sup>\n However, there are some experts who estimate the yield to be up to 15 kt, since the test site's geology is not well understood.\n <sup class=\"reference\" id=\"cite_ref-56\">\n <a href=\"#cite_note-56\">\n [40]\n </a>\n </sup>\n In comparison, the atomic (fission) bombs dropped by the\n <i>\n <a href=\"/wiki/Enola_Gay\" title=\"Enola Gay\">\n Enola Gay\n </a>\n </i>\n on\n <a href=\"/wiki/Hiroshima\" title=\"Hiroshima\">\n Hiroshima\n </a>\n (\n <a href=\"/wiki/Little_Boy\" title=\"Little Boy\">\n Little Boy\n </a>\n , a \"gun-type\" atomic bomb) and on\n <a href=\"/wiki/Nagasaki\" title=\"Nagasaki\">\n Nagasaki\n </a>\n by\n <i>\n <a href=\"/wiki/Bockscar\" title=\"Bockscar\">\n Bockscar\n </a>\n </i>\n (\n <a href=\"/wiki/Fat_Man\" title=\"Fat Man\">\n Fat Man\n </a>\n , an \"implosion-type\" atomic bomb) had blast yields of the equivalents of 15 and 21 kilotons of TNT, respectively.\n </p>\n <p>\n On January 5, 2015, North Korean TV news anchors announced that they had successfully tested a \"miniaturized atomic bomb\", about 5 miles from the\n <a href=\"/wiki/Punggye-ri_Nuclear_Test_Site\" title=\"Punggye-ri Nuclear Test Site\">\n Punggye-ri nuclear site\n </a>\n where a test was conducted in 2013.\n </p>\n <p>\n On January 6, 2016, North Korea announced that it conducted a successful test of a hydrogen bomb. The seismic event, at a magnitude of 5.1, occurred 19 kilometers (12 miles) east-northeast of Sungjibaegam.\n <sup class=\"reference\" id=\"cite_ref-57\">\n <a href=\"#cite_note-57\">\n [41]\n </a>\n </sup>\n </p>\n <p>\n On September 9, 2016, North Korea announced another successful nuclear weapon test at the Punggye-ri Test Site. This is the first warhead the state claims to be able to mount to a missile or long-range rocket previously tested in June 2016.\n <sup class=\"reference\" id=\"cite_ref-58\">\n <a href=\"#cite_note-58\">\n [42]\n </a>\n </sup>\n Estimates for the explosive yield range from 20 to 30 kt and coincided with a 5.3 magnitude earthquake in the region.\n <sup class=\"reference\" id=\"cite_ref-59\">\n <a href=\"#cite_note-59\">\n [43]\n </a>\n </sup>\n </p>\n <p>\n On September 3, 2017, North Korea successfully detonated its first weapon self-designated as a hydrogen bomb.\n <sup class=\"reference\" id=\"cite_ref-60\">\n <a href=\"#cite_note-60\">\n [44]\n </a>\n </sup>\n Initial yield estimates place it at 100 kt. Reports indicate that the test blast caused a magnitude 6.3 earthquake,\n <sup class=\"reference\" id=\"cite_ref-61\">\n <a href=\"#cite_note-61\">\n [45]\n </a>\n </sup>\n and possibly resulted in a cave-in at the test site.\n <sup class=\"reference\" id=\"cite_ref-62\">\n <a href=\"#cite_note-62\">\n [46]\n </a>\n </sup>\n </p>\n <h2>\n <span class=\"mw-headline\" id=\"Alleged_tests\">\n Alleged tests\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=11\" title=\"Edit section: Alleged tests\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h2>\n <p>\n There have been a number of significant alleged/disputed/unacknowledged accounts of countries testing nuclear explosives. Their status is either not certain or entirely disputed by most mainstream experts.\n </p>\n <h3>\n <span class=\"mw-headline\" id=\"China_2\">\n China\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=12\" title=\"Edit section: China\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h3>\n <p>\n On April 15, 2020\n <a href=\"/wiki/United_States\" title=\"United States\">\n U.S.\n </a>\n officials said China may have conducted\n <a href=\"/wiki/Tactical_nuclear_weapon\" title=\"Tactical nuclear weapon\">\n low-yield nuclear weapon\n </a>\n tests in its\n <a href=\"/wiki/Lop_Nur\" title=\"Lop Nur\">\n Lop Nur\n </a>\n test site.\n <sup class=\"reference\" id=\"cite_ref-63\">\n <a href=\"#cite_note-63\">\n [47]\n </a>\n </sup>\n </p>\n <h3>\n <span class=\"mw-headline\" id=\"Israel\">\n Israel\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=13\" title=\"Edit section: Israel\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h3>\n <p>\n Israel was alleged by a\n <a href=\"/wiki/Bundeswehr\" title=\"Bundeswehr\">\n Bundeswehr\n </a>\n report to have made an underground test in 1963.\n <sup class=\"reference\" id=\"cite_ref-64\">\n <a href=\"#cite_note-64\">\n [48]\n </a>\n </sup>\n <sup class=\"noprint Inline-Template\" style=\"white-space:nowrap;\">\n [\n <i>\n <a href=\"/wiki/Wikipedia:Citing_sources#What_information_to_include\" title=\"Wikipedia:Citing sources\">\n <span title=\"A complete citation is needed. (October 2018)\">\n full citation needed\n </span>\n </a>\n </i>\n ]\n </sup>\n Historian Taysir Nashif reported a zero yield implosion test in 1966.\n <sup class=\"reference\" id=\"cite_ref-65\">\n <a href=\"#cite_note-65\">\n [49]\n </a>\n </sup>\n <sup class=\"noprint Inline-Template\" style=\"white-space:nowrap;\">\n [\n <i>\n <a href=\"/wiki/Wikipedia:Citing_sources#What_information_to_include\" title=\"Wikipedia:Citing sources\">\n <span title=\"A complete citation is needed. (October 2018)\">\n full citation needed\n </span>\n </a>\n </i>\n ]\n </sup>\n Scientists from Israel participated in the earliest French nuclear tests before DeGaulle cut off further cooperation.\n <sup class=\"reference\" id=\"cite_ref-66\">\n <a href=\"#cite_note-66\">\n [50]\n </a>\n </sup>\n </p>\n <h3>\n <span class=\"mw-headline\" id=\"North_Korea_2\">\n North Korea\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=14\" title=\"Edit section: North Korea\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h3>\n <link href=\"mw-data:TemplateStyles:r1033289096\" rel=\"mw-deduplicated-inline-style\"/>\n <div class=\"hatnote navigation-not-searchable\" role=\"note\">\n Main article:\n <a href=\"/wiki/Ryanggang_explosion\" title=\"Ryanggang explosion\">\n Ryanggang explosion\n </a>\n </div>\n <p>\n On September 9, 2004,\n <a class=\"mw-redirect\" href=\"/wiki/Media_of_South_Korea\" title=\"Media of South Korea\">\n South Korean media\n </a>\n reported that there had been a large explosion at the Chinese/North Korean border. This explosion left a crater visible by satellite and precipitated a large (2-mile diameter)\n <a href=\"/wiki/Mushroom_cloud\" title=\"Mushroom cloud\">\n mushroom cloud\n </a>\n . The United States and South Korea quickly downplayed this, explaining it away as a forest fire that had nothing to do with the DPRK's nuclear weapons program.\n </p>\n <link href=\"mw-data:TemplateStyles:r1033289096\" rel=\"mw-deduplicated-inline-style\"/>\n <div class=\"hatnote navigation-not-searchable\" role=\"note\">\n Main article:\n <a href=\"/wiki/List_of_nuclear_weapons_tests_of_North_Korea\" title=\"List of nuclear weapons tests of North Korea\">\n List of nuclear weapons tests of North Korea\n </a>\n </div>\n <p>\n <a href=\"/wiki/North_Korea\" title=\"North Korea\">\n North Korea\n </a>\n has conducted six nuclear tests, in 2006, 2009, 2013, twice in 2016, and 2017. The 3 September 2017 test, like their January 2016 test, is claimed to be a\n <a class=\"mw-redirect\" href=\"/wiki/Hydrogen_bomb\" title=\"Hydrogen bomb\">\n hydrogen bomb\n </a>\n (but may only be a\n <a href=\"/wiki/Boosted_fission_weapon\" title=\"Boosted fission weapon\">\n boosted fission weapon\n </a>\n rather than an actual staged Teller–Ulam\n <a href=\"/wiki/Thermonuclear_weapon\" title=\"Thermonuclear weapon\">\n thermonuclear weapon\n </a>\n ).\n </p>\n <h3>\n <span class=\"mw-headline\" id=\"Pakistan_2\">\n Pakistan\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=15\" title=\"Edit section: Pakistan\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h3>\n <p>\n Because Pakistan's nuclear program was conducted under extreme secrecy, it raised concerns in the Soviet Union and India, who suspected that since the 1974 test it was inevitable that Pakistan would further develop its program. The pro-Soviet newspaper,\n <i>\n The Patriot\n </i>\n , reported that \"Pakistan has exploded a nuclear device in the range of 20 to 50 kilotons\" in 1983.\n <sup class=\"reference\" id=\"cite_ref-67\">\n <a href=\"#cite_note-67\">\n [51]\n </a>\n </sup>\n But it was widely dismissed by Western diplomats as it was pointed out that\n <i>\n The Patriot\n </i>\n had previously engaged in spreading disinformation on several occasions. In 1983, India and the Soviet Union both investigated secret tests but, due to lack of any scientific data, these statements were widely dismissed.\n <sup class=\"reference\" id=\"cite_ref-68\">\n <a href=\"#cite_note-68\">\n [52]\n </a>\n </sup>\n </p>\n <p>\n In their book,\n <i>\n The Nuclear Express\n </i>\n , authors\n <a href=\"/wiki/Thomas_C._Reed\" title=\"Thomas C. Reed\">\n Thomas Reed\n </a>\n and Danny Stillman also allege that the People's Republic of China allowed Pakistan to detonate a nuclear weapon at its\n <a href=\"/wiki/Lop_Nur\" title=\"Lop Nur\">\n Lop Nur\n </a>\n test site in 1990, eight years before Pakistan held its first official weapons test.\n <sup class=\"reference\" id=\"cite_ref-HiddenTravelsBroad_69-0\">\n <a href=\"#cite_note-HiddenTravelsBroad-69\">\n [53]\n </a>\n </sup>\n </p>\n <p>\n However, senior scientist\n <a href=\"/wiki/Abdul_Qadeer_Khan\" title=\"Abdul Qadeer Khan\">\n Abdul Qadeer Khan\n </a>\n strongly rejected the claim in May 1998.\n <sup class=\"reference\" id=\"cite_ref-Jang_Media_Group,_Co._70-0\">\n <a href=\"#cite_note-Jang_Media_Group,_Co.-70\">\n [54]\n </a>\n </sup>\n According to Khan, due to its sensitivity, no country allows another country to use their test site to explode the devices.\n <sup class=\"reference\" id=\"cite_ref-Jang_Media_Group,_Co._70-1\">\n <a href=\"#cite_note-Jang_Media_Group,_Co.-70\">\n [54]\n </a>\n </sup>\n Such an agreement only existed between the United States and the United Kingdom since the\n <a href=\"/wiki/1958_US%E2%80%93UK_Mutual_Defence_Agreement\" title=\"1958 US–UK Mutual Defence Agreement\">\n 1958 US–UK Mutual Defense Agreement\n </a>\n which among other things allows Britain access to the American\n <a class=\"mw-redirect\" href=\"/wiki/Nevada_National_Security_Site\" title=\"Nevada National Security Site\">\n Nevada National Security Site\n </a>\n for testing.\n <sup class=\"reference\" id=\"cite_ref-71\">\n <a href=\"#cite_note-71\">\n [55]\n </a>\n </sup>\n Dr.\n <a href=\"/wiki/Samar_Mubarakmand\" title=\"Samar Mubarakmand\">\n Samar Mubarakmand\n </a>\n , another senior scientist, also confirmed Dr. Khan's statement and acknowledged that cold tests were carried out, under codename\n <i>\n <a class=\"mw-redirect\" href=\"/wiki/Kirana-I\" title=\"Kirana-I\">\n Kirana-I\n </a>\n </i>\n , in a test site which was built by the\n <a href=\"/wiki/Pakistan_Army_Corps_of_Engineers\" title=\"Pakistan Army Corps of Engineers\">\n Corps of Engineers\n </a>\n under the guidance of the\n <a href=\"/wiki/Pakistan_Atomic_Energy_Commission\" title=\"Pakistan Atomic Energy Commission\">\n PAEC\n </a>\n .\n <sup class=\"reference\" id=\"cite_ref-72\">\n <a href=\"#cite_note-72\">\n [56]\n </a>\n </sup>\n Additionally, the UK conducted\n <a href=\"/wiki/British_nuclear_tests_at_Maralinga\" title=\"British nuclear tests at Maralinga\">\n nuclear tests in Australia\n </a>\n in the 1950s.\n </p>\n <h3>\n <span class=\"mw-headline\" id=\"Russia\">\n Russia\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=16\" title=\"Edit section: Russia\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h3>\n <p>\n The\n <a href=\"/wiki/Yekaterinburg_Fireball\" title=\"Yekaterinburg Fireball\">\n Yekaterinburg Fireball\n </a>\n of November 14, 2014, is alleged by some\n <sup class=\"reference\" id=\"cite_ref-73\">\n <a href=\"#cite_note-73\">\n [57]\n </a>\n </sup>\n to have been a nuclear test in space, which would not have been detected by the\n <a class=\"mw-redirect\" href=\"/wiki/CTBTO\" title=\"CTBTO\">\n CTBTO\n </a>\n because the CTBTO does not have autonomous ways to monitor space nuclear tests (i.e. satellites) and relies thus on information that member States would accept to provide. The fireball happened a few days before a conference in\n <a href=\"/wiki/Yekaterinburg\" title=\"Yekaterinburg\">\n Yekaterinburg\n </a>\n on the theme of air/missile defense.\n <sup class=\"reference\" id=\"cite_ref-74\">\n <a href=\"#cite_note-74\">\n [58]\n </a>\n </sup>\n The affirmation, however, is disputed as the\n <a href=\"/wiki/Ministry_of_Emergency_Situations_(Russia)\" title=\"Ministry of Emergency Situations (Russia)\">\n Russian Ministry of Emergency Situations\n </a>\n claimed it was an \"on-ground\" explosion.\n <sup class=\"reference\" id=\"cite_ref-siberiantimes.com_75-0\">\n <a href=\"#cite_note-siberiantimes.com-75\">\n [59]\n </a>\n </sup>\n <i>\n <a href=\"/wiki/The_Siberian_Times\" title=\"The Siberian Times\">\n The Siberian Times\n </a>\n </i>\n , a local newspaper, noted that \"the light was not accompanied by any sound\".\n <sup class=\"reference\" id=\"cite_ref-siberiantimes.com_75-1\">\n <a href=\"#cite_note-siberiantimes.com-75\">\n [59]\n </a>\n </sup>\n </p>\n <h3>\n <span class=\"mw-headline\" id=\"Vela_Incident\">\n Vela Incident\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=17\" title=\"Edit section: Vela Incident\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h3>\n <p>\n The\n <a class=\"mw-redirect\" href=\"/wiki/Vela_Incident\" title=\"Vela Incident\">\n Vela Incident\n </a>\n was an unidentified\n <i>\n double flash\n </i>\n of light detected by a partly functional, decommissioned American\n <a href=\"/wiki/Vela_(satellite)\" title=\"Vela (satellite)\">\n Vela Satellite\n </a>\n on September 22, 1979, in the\n <a href=\"/wiki/Indian_Ocean\" title=\"Indian Ocean\">\n Indian Ocean\n </a>\n (near the\n <a href=\"/wiki/Prince_Edward_Islands\" title=\"Prince Edward Islands\">\n Prince Edward Islands\n </a>\n off\n <a href=\"/wiki/Antarctica\" title=\"Antarctica\">\n Antarctica\n </a>\n ), other sensors which could have recorded proof of a nuclear test were not functioning on this satellite. It is possible that this was produced by a nuclear device. If this flash detection was actually a nuclear test, a popular theory favored in the diary of then sitting American President\n <a href=\"/wiki/Jimmy_Carter\" title=\"Jimmy Carter\">\n Jimmy Carter\n </a>\n , is that it resulted from a covert joint\n <a href=\"/wiki/South_Africa\" title=\"South Africa\">\n South African\n </a>\n and\n <a href=\"/wiki/Israel\" title=\"Israel\">\n Israeli\n </a>\n nuclear test of an advanced highly miniaturized Israeli artillery shell sized device which was unintentionally detectable by satellite optical sensor due to a break in the cloud cover of a typhoon.\n <sup class=\"reference\" id=\"cite_ref-76\">\n <a href=\"#cite_note-76\">\n [60]\n </a>\n </sup>\n Analysis of the\n <a class=\"mw-redirect\" href=\"/wiki/South_African_nuclear_program\" title=\"South African nuclear program\">\n South African nuclear program\n </a>\n later showed only six of the crudest and heavy designs weighing well over 340 kg had been built when they finally declared and disarmed their nuclear arsenal.\n <sup class=\"reference\" id=\"cite_ref-77\">\n <a href=\"#cite_note-77\">\n [61]\n </a>\n </sup>\n The 1986\n <a href=\"/wiki/Mordechai_Vanunu\" title=\"Mordechai Vanunu\">\n Vanunu\n </a>\n leaks analyzed by nuclear weapon miniaturization pioneer\n <a href=\"/wiki/Ted_Taylor_(physicist)\" title=\"Ted Taylor (physicist)\">\n Ted Taylor\n </a>\n revealed very sophisticated miniaturized Israeli designs among the evidence presented.\n <sup class=\"reference\" id=\"cite_ref-78\">\n <a href=\"#cite_note-78\">\n [62]\n </a>\n </sup>\n Also suspected were France testing a\n <a href=\"/wiki/Neutron_bomb\" title=\"Neutron bomb\">\n neutron bomb\n </a>\n near their\n <a href=\"/wiki/Kerguelen_Islands\" title=\"Kerguelen Islands\">\n Kerguelen Islands\n </a>\n territory,\n <sup class=\"reference\" id=\"cite_ref-79\">\n <a href=\"#cite_note-79\">\n [63]\n </a>\n </sup>\n the Soviet Union making a prohibited atmospheric test,\n <sup class=\"reference\" id=\"cite_ref-80\">\n <a href=\"#cite_note-80\">\n [64]\n </a>\n </sup>\n <sup class=\"reference\" id=\"cite_ref-81\">\n <a href=\"#cite_note-81\">\n [65]\n </a>\n </sup>\n as well as India or Pakistan doing initial proof of concept tests of early weaponized nuclear bombs.\n <sup class=\"reference\" id=\"cite_ref-82\">\n <a href=\"#cite_note-82\">\n [66]\n </a>\n </sup>\n </p>\n <h2>\n <span class=\"mw-headline\" id=\"Tests_of_live_warheads_on_rockets\">\n Tests of live warheads on rockets\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=18\" title=\"Edit section: Tests of live warheads on rockets\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h2>\n <div class=\"thumb tright\">\n <div class=\"thumbinner\" style=\"width:152px;\">\n <a class=\"image\" href=\"/wiki/File:Operation_Dominic_-_Frigate_Bird_nuclear_explosion.jpg\">\n <img alt=\"\" class=\"thumbimage\" data-file-height=\"1200\" data-file-width=\"1024\" decoding=\"async\" height=\"176\" src=\"//upload.wikimedia.org/wikipedia/commons/thumb/a/a8/Operation_Dominic_-_Frigate_Bird_nuclear_explosion.jpg/150px-Operation_Dominic_-_Frigate_Bird_nuclear_explosion.jpg\" srcset=\"//upload.wikimedia.org/wikipedia/commons/thumb/a/a8/Operation_Dominic_-_Frigate_Bird_nuclear_explosion.jpg/225px-Operation_Dominic_-_Frigate_Bird_nuclear_explosion.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/a/a8/Operation_Dominic_-_Frigate_Bird_nuclear_explosion.jpg/300px-Operation_Dominic_-_Frigate_Bird_nuclear_explosion.jpg 2x\" width=\"150\"/>\n </a>\n <div class=\"thumbcaption\">\n <div class=\"magnify\">\n <a class=\"internal\" href=\"/wiki/File:Operation_Dominic_-_Frigate_Bird_nuclear_explosion.jpg\" title=\"Enlarge\">\n </a>\n </div>\n The Frigate Bird explosion seen through the\n <a href=\"/wiki/Periscope\" title=\"Periscope\">\n periscope\n </a>\n of\n <a href=\"/wiki/USS_Carbonero_(SS-337)\" title=\"USS Carbonero (SS-337)\">\n USS\n <i>\n Carbonero\n </i>\n (SS-337)\n </a>\n .\n </div>\n </div>\n </div>\n <p>\n Missiles and nuclear warheads have usually been tested separately because testing them together is considered highly dangerous; they are certainly the most extreme type of\n <a href=\"/wiki/Live_fire_exercise\" title=\"Live fire exercise\">\n live fire exercise\n </a>\n . The only US live test of an operational missile was the following:\n </p>\n <ul>\n <li>\n <i>\n Frigate Bird\n </i>\n : on May 6, 1962, a\n <a href=\"/wiki/UGM-27_Polaris\" title=\"UGM-27 Polaris\">\n UGM-27 Polaris\n </a>\n A-2 missile with a live 600 kt\n <a class=\"mw-redirect\" href=\"/wiki/W27_warhead\" title=\"W27 warhead\">\n W47 warhead\n </a>\n was launched from the\n <a href=\"/wiki/USS_Ethan_Allen_(SSBN-608)\" title=\"USS Ethan Allen (SSBN-608)\">\n USS\n <i>\n Ethan Allen\n </i>\n </a>\n ; it flew 1,800 km (1,100 mi), re-entered the atmosphere, and detonated at an altitude of 3.4 km (2.1 mi) over the South Pacific.\n </li>\n </ul>\n <p>\n Other live tests with the nuclear explosive delivered by rocket by the USA include:\n </p>\n <ul>\n <li>\n The July 19, 1957 test\n <i>\n <a href=\"/wiki/Operation_Plumbbob\" title=\"Operation Plumbbob\">\n Plumbbob/John\n </a>\n </i>\n fired a small yield nuclear weapon on an\n <a href=\"/wiki/AIR-2_Genie\" title=\"AIR-2 Genie\">\n AIR-2 Genie\n </a>\n air-to-air rocket from a jet fighter.\n </li>\n <li>\n On August 1, 1958,\n <a href=\"/wiki/PGM-11_Redstone\" title=\"PGM-11 Redstone\">\n Redstone\n </a>\n rocket launched nuclear test\n <i>\n <a href=\"/wiki/Hardtack_Teak\" title=\"Hardtack Teak\">\n Teak\n </a>\n </i>\n that detonated at an altitude of 77.8 km (48.3 mi). On August 12, 1958, Redstone #CC51 launched nuclear test\n <i>\n Orange\n </i>\n to a detonation altitude of 43 km (27 mi). Both were part of\n <i>\n <a href=\"/wiki/Operation_Hardtack_I\" title=\"Operation Hardtack I\">\n Operation Hardtack I\n </a>\n </i>\n and had a yield of 3.75 Mt\n </li>\n <li>\n <i>\n <a href=\"/wiki/Operation_Argus\" title=\"Operation Argus\">\n Operation Argus\n </a>\n </i>\n : three tests above the\n <a class=\"mw-redirect\" href=\"/wiki/South_Atlantic_Ocean\" title=\"South Atlantic Ocean\">\n South Atlantic Ocean\n </a>\n , August 27, August 30, and September 6, 1958\n </li>\n <li>\n On July 9, 1962,\n <a href=\"/wiki/PGM-17_Thor\" title=\"PGM-17 Thor\">\n Thor missile\n </a>\n launched a Mk4 reentry vehicle containing a\n <a href=\"/wiki/W49\" title=\"W49\">\n W49 thermonuclear warhead\n </a>\n to an altitude of 248 miles (400 km). The warhead detonated with a yield of 1.45 Mt. This was the\n <i>\n <a href=\"/wiki/Starfish_Prime\" title=\"Starfish Prime\">\n Starfish Prime\n </a>\n </i>\n event of nuclear test operation\n <i>\n <a href=\"/wiki/Operation_Fishbowl\" title=\"Operation Fishbowl\">\n Dominic-Fishbowl\n </a>\n </i>\n </li>\n <li>\n In the\n <i>\n <a href=\"/wiki/Operation_Fishbowl\" title=\"Operation Fishbowl\">\n Dominic-Fishbowl\n </a>\n </i>\n series in 1962:\n <i>\n Checkmate, Bluegill, Kingfish\n </i>\n and\n <i>\n Tightrope\n </i>\n </li>\n </ul>\n <p>\n The USA also conducted two live weapons test involving\n <a href=\"/wiki/Nuclear_artillery\" title=\"Nuclear artillery\">\n nuclear artillery\n </a>\n including:\n </p>\n <ul>\n <li>\n Test of the\n <a href=\"/wiki/M65_atomic_cannon\" title=\"M65 atomic cannon\">\n M65 atomic cannon\n </a>\n using the\n <a href=\"/wiki/W9_(nuclear_warhead)\" title=\"W9 (nuclear warhead)\">\n W9\n </a>\n artillery shell during the\n <a href=\"/wiki/Upshot-Knothole_Grable\" title=\"Upshot-Knothole Grable\">\n Upshot-Knothole Grable\n </a>\n test on May 25, 1953.\n </li>\n <li>\n Test of the\n <a href=\"/wiki/Davy_Crockett_(nuclear_device)\" title=\"Davy Crockett (nuclear device)\">\n Davy Crockett\n </a>\n recoilless gun during\n <a href=\"/wiki/Little_Feller_(nuclear_tests)\" title=\"Little Feller (nuclear tests)\">\n Little Feller I\n </a>\n test on July 17, 1962.\n </li>\n </ul>\n <p>\n The USA also conducted one live weapons test involving a missile launched\n <a class=\"mw-redirect\" href=\"/wiki/Nuclear_depth_charge\" title=\"Nuclear depth charge\">\n nuclear depth charge\n </a>\n :\n </p>\n <ul>\n <li>\n Test of the\n <a href=\"/wiki/RUR-5_ASROC\" title=\"RUR-5 ASROC\">\n RUR-5 ASROC\n </a>\n during the\n <a href=\"/wiki/Operation_Dominic\" title=\"Operation Dominic\">\n Dominic-Swordfish\n </a>\n test on May 11, 1962.\n </li>\n </ul>\n <p>\n The Soviet Union tested nuclear explosives on rockets as part of their development of a localized\n <a href=\"/wiki/Anti-ballistic_missile\" title=\"Anti-ballistic missile\">\n anti-ballistic missile\n </a>\n system in the 1960s. Some of the Soviet nuclear tests with warheads delivered by rocket include:\n </p>\n <ul>\n <li>\n Baikal (USSR Test #25, February 2, 1956, at\n <a class=\"mw-redirect\" href=\"/wiki/Aralsk\" title=\"Aralsk\">\n Aralsk\n </a>\n ) – one test, with a\n <a class=\"new\" href=\"/w/index.php?title=R-5M&amp;action=edit&amp;redlink=1\" title=\"R-5M (page does not exist)\">\n R-5M\n </a>\n rocket launch from Kapustin Yar.\n </li>\n <li>\n ZUR-215 (#34, January 19, 1957, at\n <a href=\"/wiki/Kapustin_Yar\" title=\"Kapustin Yar\">\n Kapustin Yar\n </a>\n ) – one test, with a rocket launch from Kapustin Yar.\n </li>\n <li>\n (#82 and 83, early November 1958) two tests, done after declared cease-fire for test moratorium negotiations, from Kapustin Yar.\n </li>\n <li>\n Groza (#88, September 6, 1961, at Kapustin Yar) – one test, with a rocket launch from Kapustin Yar.\n </li>\n <li>\n Grom (#115, October 6, 1961, at Kapustin Yar) – one test, with a rocket launch from Kapustin Yar.\n </li>\n <li>\n Volga (#106 and 108, September 20–22, 1961, at\n <a href=\"/wiki/Novaya_Zemlya\" title=\"Novaya Zemlya\">\n Novaya Zemlya\n </a>\n ) – two tests, with\n <a class=\"new\" href=\"/w/index.php?title=R-11M&amp;action=edit&amp;redlink=1\" title=\"R-11M (page does not exist)\">\n R-11M\n </a>\n rockets launch from\n <a class=\"new\" href=\"/w/index.php?title=Rogachevo_(airbase)&amp;action=edit&amp;redlink=1\" title=\"Rogachevo (airbase) (page does not exist)\">\n Rogachevo\n </a>\n .\n </li>\n <li>\n Roza (#94 and 99, September 12–16, 1961, at Novaya Zemlya) – two tests, with\n <a href=\"/wiki/R-12_Dvina\" title=\"R-12 Dvina\">\n R-12\n </a>\n rockets launch from\n <a href=\"/wiki/Vorkuta\" title=\"Vorkuta\">\n Vorkuta\n </a>\n .\n </li>\n <li>\n <a href=\"/wiki/Raduga_(nuclear_test)\" title=\"Raduga (nuclear test)\">\n Raduga\n </a>\n (#121, October 20, 1961, at Novaya Zemlya) – one test, with a\n <a href=\"/wiki/R-13_(missile)\" title=\"R-13 (missile)\">\n R-13\n </a>\n rocket launch.\n </li>\n <li>\n Tyulpan (#164, September 8, 1962, at Novaya Zemlya) – one test, with\n <a href=\"/wiki/R-14_Chusovaya\" title=\"R-14 Chusovaya\">\n R-14\n </a>\n rockets launched from\n <a href=\"/wiki/Chita,_Zabaykalsky_Krai\" title=\"Chita, Zabaykalsky Krai\">\n Chita\n </a>\n .\n </li>\n <li>\n <a href=\"/wiki/Soviet_Project_K_nuclear_tests\" title=\"Soviet Project K nuclear tests\">\n Operation K\n </a>\n (1961 and 1962, at\n <a class=\"mw-redirect\" href=\"/wiki/Sary-Shagan\" title=\"Sary-Shagan\">\n Sary-Shagan\n </a>\n ) – five tests, at high altitude, with rockets launched from Kapustin Yar.\n </li>\n </ul>\n <p>\n The Soviet Union also conducted three live\n <a href=\"/wiki/Nuclear_torpedo\" title=\"Nuclear torpedo\">\n nuclear torpedo\n </a>\n tests including:\n </p>\n <ul>\n <li>\n Test of the T-5 torpedo on September 21, 1955 at Novaya Zemlya.\n </li>\n <li>\n Test of the T-5 torpedo on October 10, 1957 at Novaya Zemlya.\n </li>\n <li>\n Test of the T-5 torpedo on October 23, 1961 at Novaya Zemlya.\n </li>\n </ul>\n <p>\n The People's Republic of China conducted CHIC-4 with a\n <a class=\"mw-redirect\" href=\"/wiki/Dongfeng-2\" title=\"Dongfeng-2\">\n Dongfeng-2\n </a>\n rocket launch on October 27, 1966. The warhead exploded with a yield of 12 kt.\n </p>\n <h2>\n <span class=\"mw-headline\" id=\"Most_powerful_tests\">\n Most powerful tests\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=19\" title=\"Edit section: Most powerful tests\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h2>\n <p>\n The following list contains all known nuclear tests conducted with a yield of 1.4 Mt TNT equivalent and more.\n </p>\n <table class=\"wikitable sortable\" style=\"text-align:center;\">\n <caption>\n Worldwide nuclear test with a yield of 1.4 Mt TNT equivalent and more\n </caption>\n <tbody>\n <tr>\n <th>\n Date (GMT)\n </th>\n <th>\n Yield (megatons)\n </th>\n <th>\n Deployment\n </th>\n <th>\n Country\n </th>\n <th>\n Test Site\n </th>\n <th>\n Name or Number\n </th>\n </tr>\n <tr>\n <td>\n October 30, 1961\n </td>\n <td>\n 50\n </td>\n <td>\n parachute air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n <a href=\"/wiki/Novaya_Zemlya\" title=\"Novaya Zemlya\">\n Novaya Zemlya\n </a>\n </td>\n <td>\n <a href=\"/wiki/Tsar_Bomba\" title=\"Tsar Bomba\">\n Tsar Bomba\n </a>\n , Test #130\n </td>\n </tr>\n <tr>\n <td>\n December 24, 1962\n </td>\n <td>\n 24.2\n </td>\n <td>\n missile warhead\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n <a href=\"/wiki/Test_219\" title=\"Test 219\">\n Test #219\n </a>\n </td>\n </tr>\n <tr>\n <td>\n August 5, 1962\n </td>\n <td>\n 21.1\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #147\n </td>\n </tr>\n <tr>\n <td>\n September 27, 1962\n </td>\n <td>\n 20.0\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #174\n </td>\n </tr>\n <tr>\n <td>\n September 25, 1962\n </td>\n <td>\n 19.1\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #173\n </td>\n </tr>\n <tr>\n <td>\n March 1, 1954\n </td>\n <td>\n 15\n </td>\n <td>\n ground\n </td>\n <td>\n USA\n </td>\n <td>\n <a href=\"/wiki/Bikini_Atoll\" title=\"Bikini Atoll\">\n Bikini Atoll\n </a>\n </td>\n <td>\n <a href=\"/wiki/Castle_Bravo\" title=\"Castle Bravo\">\n Castle Bravo\n </a>\n </td>\n </tr>\n <tr>\n <td>\n May 5, 1954\n </td>\n <td>\n 13.5\n </td>\n <td>\n barge\n </td>\n <td>\n USA\n </td>\n <td>\n Bikini Atoll\n </td>\n <td>\n <a href=\"/wiki/Castle_Yankee\" title=\"Castle Yankee\">\n Castle Yankee\n </a>\n </td>\n </tr>\n <tr>\n <td>\n October 23, 1961\n </td>\n <td>\n 12.5\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #123\n </td>\n </tr>\n <tr>\n <td>\n March 26, 1954\n </td>\n <td>\n 11.0\n </td>\n <td>\n barge\n </td>\n <td>\n USA\n </td>\n <td>\n Bikini Atoll\n </td>\n <td>\n <a href=\"/wiki/Castle_Romeo\" title=\"Castle Romeo\">\n Castle Romeo\n </a>\n </td>\n </tr>\n <tr>\n <td>\n October 31, 1952\n </td>\n <td>\n 10.4\n </td>\n <td>\n ground\n </td>\n <td>\n USA\n </td>\n <td>\n <a href=\"/wiki/Enewetak_Atoll\" title=\"Enewetak Atoll\">\n Enewetak Atoll\n </a>\n </td>\n <td>\n <a href=\"/wiki/Ivy_Mike\" title=\"Ivy Mike\">\n Ivy Mike\n </a>\n </td>\n </tr>\n <tr>\n <td>\n August 25, 1962\n </td>\n <td>\n 10.0\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #158\n </td>\n </tr>\n <tr>\n <td>\n September 19, 1962\n </td>\n <td>\n 10.0\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #168\n </td>\n </tr>\n <tr>\n <td>\n July 11, 1958\n </td>\n <td>\n 9.3\n </td>\n <td>\n barge\n </td>\n <td>\n USA\n </td>\n <td>\n Bikini Atoll\n </td>\n <td>\n Poplar\n </td>\n </tr>\n <tr>\n <td>\n June 28, 1958\n </td>\n <td>\n 8.9\n </td>\n <td>\n barge\n </td>\n <td>\n USA\n </td>\n <td>\n Enewetak Atoll\n </td>\n <td>\n Oak\n </td>\n </tr>\n <tr>\n <td>\n October 30, 1962\n </td>\n <td>\n 8.3\n </td>\n <td>\n air drop\n </td>\n <td>\n USA\n </td>\n <td>\n <a href=\"/wiki/Johnston_Atoll\" title=\"Johnston Atoll\">\n Johnston Atoll\n </a>\n </td>\n <td>\n Housatonic\n </td>\n </tr>\n <tr>\n <td>\n October 22, 1962\n </td>\n <td>\n 8.2\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #183\n </td>\n </tr>\n <tr>\n <td>\n June 27, 1962\n </td>\n <td>\n 7.7\n </td>\n <td>\n air drop\n </td>\n <td>\n USA\n </td>\n <td>\n <a href=\"/wiki/Kiritimati\" title=\"Kiritimati\">\n Kiritimati\n </a>\n </td>\n <td>\n Bighorn\n </td>\n </tr>\n <tr>\n <td>\n April 25, 1954\n </td>\n <td>\n 6.9\n </td>\n <td>\n barge\n </td>\n <td>\n USA\n </td>\n <td>\n Bikini Atoll\n </td>\n <td>\n <a href=\"/wiki/Castle_Union\" title=\"Castle Union\">\n Castle Union\n </a>\n </td>\n </tr>\n <tr>\n <td>\n July 20, 1956\n </td>\n <td>\n 5.0\n </td>\n <td>\n barge\n </td>\n <td>\n USA\n </td>\n <td>\n Bikini Atoll\n </td>\n <td>\n Tewa\n </td>\n </tr>\n <tr>\n <td>\n October 31, 1961\n </td>\n <td>\n 5.0\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #131\n </td>\n </tr>\n <tr>\n <td>\n November 6, 1971\n </td>\n <td>\n 4.8\n </td>\n <td>\n underground shaft\n </td>\n <td>\n USA\n </td>\n <td>\n <a href=\"/wiki/Amchitka\" title=\"Amchitka\">\n Amchitka\n </a>\n </td>\n <td>\n <a href=\"/wiki/Cannikin\" title=\"Cannikin\">\n Cannikin\n </a>\n </td>\n </tr>\n <tr>\n <td>\n July 10, 1956\n </td>\n <td>\n 4.5\n </td>\n <td>\n barge\n </td>\n <td>\n USA\n </td>\n <td>\n Bikini Atoll\n </td>\n <td>\n Navajo\n </td>\n </tr>\n <tr>\n <td>\n August 27, 1962\n </td>\n <td>\n 4.2\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #160\n </td>\n </tr>\n <tr>\n <td>\n October 6, 1961\n </td>\n <td>\n 4.0\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #114\n </td>\n </tr>\n <tr>\n <td>\n October 27, 1973\n </td>\n <td>\n 4.0\n </td>\n <td>\n underground shaft\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #392\n </td>\n </tr>\n <tr>\n <td>\n November 17, 1976\n </td>\n <td>\n 4.0\n </td>\n <td>\n air drop\n </td>\n <td>\n China\n </td>\n <td>\n <a href=\"/wiki/Lop_Nur\" title=\"Lop Nur\">\n Lop Nur\n </a>\n </td>\n <td>\n Test (21)\n </td>\n </tr>\n <tr>\n <td>\n July 11, 1962\n </td>\n <td>\n 3.9\n </td>\n <td>\n parachuted\n </td>\n <td>\n USA\n </td>\n <td>\n Kiritimati\n </td>\n <td>\n Pamlico\n </td>\n </tr>\n <tr>\n <td>\n May 20, 1956\n </td>\n <td>\n 3.8\n </td>\n <td>\n free air drop\n </td>\n <td>\n USA\n </td>\n <td>\n Bikini Atoll\n </td>\n <td>\n Cherokee\n </td>\n </tr>\n <tr>\n <td>\n August 1, 1958\n </td>\n <td>\n 3.8\n </td>\n <td>\n high alt rocket\n </td>\n <td>\n USA\n </td>\n <td>\n Johnston Atoll\n </td>\n <td>\n <a href=\"/wiki/Hardtack_Teak\" title=\"Hardtack Teak\">\n Teak\n </a>\n </td>\n </tr>\n <tr>\n <td>\n August 12, 1958\n </td>\n <td>\n 3.8\n </td>\n <td>\n high alt rocket\n </td>\n <td>\n USA\n </td>\n <td>\n Johnston Atoll\n </td>\n <td>\n Orange\n </td>\n </tr>\n <tr>\n <td>\n September 12, 1973\n </td>\n <td>\n 3.8\n </td>\n <td>\n tunnel\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #385 - 1\n </td>\n </tr>\n <tr>\n <td>\n May 27, 1956\n </td>\n <td>\n 3.5\n </td>\n <td>\n dry surface\n </td>\n <td>\n USA\n </td>\n <td>\n Bikini Atoll\n </td>\n <td>\n Zuni\n </td>\n </tr>\n <tr>\n <td>\n October 14, 1970\n </td>\n <td>\n 3.4\n </td>\n <td>\n air drop\n </td>\n <td>\n China\n </td>\n <td>\n Lop Nur\n </td>\n <td>\n CHIC-11\n </td>\n </tr>\n <tr>\n <td>\n September 16, 1962\n </td>\n <td>\n 3.3\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #166\n </td>\n </tr>\n <tr>\n <td>\n June 17, 1967\n </td>\n <td>\n 3.3\n </td>\n <td>\n parachuted\n </td>\n <td>\n China\n </td>\n <td>\n Lop Nur\n </td>\n <td>\n <a href=\"/wiki/Test_No._6\" title=\"Test No. 6\">\n CHIC-6\n </a>\n </td>\n </tr>\n <tr>\n <td>\n September 15, 1962\n </td>\n <td>\n 3.1\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #165\n </td>\n </tr>\n <tr>\n <td>\n December 25, 1962\n </td>\n <td>\n 3.1\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #220\n </td>\n </tr>\n <tr>\n <td>\n April 28, 1958\n </td>\n <td>\n 3.0\n </td>\n <td>\n air drop\n </td>\n <td>\n UK\n </td>\n <td>\n Kiritimati\n </td>\n <td>\n Grapple Y\n </td>\n </tr>\n <tr>\n <td>\n October 4, 1961\n </td>\n <td>\n 3.0\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #113\n </td>\n </tr>\n <tr>\n <td>\n June 10, 1962\n </td>\n <td>\n 3.0\n </td>\n <td>\n free air drop\n </td>\n <td>\n USA\n </td>\n <td>\n Kiritimati\n </td>\n <td>\n Yeso\n </td>\n </tr>\n <tr>\n <td>\n December 27, 1968\n </td>\n <td>\n 3.0\n </td>\n <td>\n air drop\n </td>\n <td>\n China\n </td>\n <td>\n Lop Nur\n </td>\n <td>\n CHIC-8\n </td>\n </tr>\n <tr>\n <td>\n September 29, 1969\n </td>\n <td>\n 3.0\n </td>\n <td>\n air drop\n </td>\n <td>\n China\n </td>\n <td>\n Lop Nur\n </td>\n <td>\n CHIC-10\n </td>\n </tr>\n <tr>\n <td>\n June 27, 1973\n </td>\n <td>\n 3.0\n </td>\n <td>\n air drop\n </td>\n <td>\n China\n </td>\n <td>\n Lop Nur\n </td>\n <td>\n Test (15)\n </td>\n </tr>\n <tr>\n <td>\n October 6, 1957\n </td>\n <td>\n 2.9\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #47\n </td>\n </tr>\n <tr>\n <td>\n October 18, 1958\n </td>\n <td>\n 2.9\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #73\n </td>\n </tr>\n <tr>\n <td>\n October 22, 1958\n </td>\n <td>\n 2.8\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #78\n </td>\n </tr>\n <tr>\n <td>\n August 20, 1962\n </td>\n <td>\n 2.8\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #152\n </td>\n </tr>\n <tr>\n <td>\n September 10, 1961\n </td>\n <td>\n 2.7\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n 90 Vozduj\n </td>\n </tr>\n <tr>\n <td>\n August 24, 1968\n </td>\n <td>\n 2.6\n </td>\n <td>\n balloon\n </td>\n <td>\n France\n </td>\n <td>\n <a href=\"/wiki/Fangataufa\" title=\"Fangataufa\">\n Fangataufa\n </a>\n </td>\n <td>\n <a href=\"/wiki/Canopus_(nuclear_test)\" title=\"Canopus (nuclear test)\">\n Canopus\n </a>\n </td>\n </tr>\n <tr>\n <td>\n September 27, 1971\n </td>\n <td>\n 2.5\n </td>\n <td>\n tunnel\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #345 - 1\n </td>\n </tr>\n <tr>\n <td>\n September 21, 1962\n </td>\n <td>\n 2.4\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #169\n </td>\n </tr>\n <tr>\n <td>\n November 2, 1974\n </td>\n <td>\n 2.3\n </td>\n <td>\n underground shaft\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #411\n </td>\n </tr>\n <tr>\n <td>\n October 14, 1970\n </td>\n <td>\n 2.2\n </td>\n <td>\n tunnel\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #327 - 1\n </td>\n </tr>\n <tr>\n <td>\n July 26, 1958\n </td>\n <td>\n 2.0\n </td>\n <td>\n barge\n </td>\n <td>\n USA\n </td>\n <td>\n Enewetak Atoll\n </td>\n <td>\n Pine\n </td>\n </tr>\n <tr>\n <td>\n July 8, 1956\n </td>\n <td>\n 1.9\n </td>\n <td>\n barge\n </td>\n <td>\n USA\n </td>\n <td>\n Enewetak Atoll\n </td>\n <td>\n Apache\n </td>\n </tr>\n <tr>\n <td>\n September 8, 1962\n </td>\n <td>\n 1.9\n </td>\n <td>\n high alt rocket\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n 164 Tyulpan\n </td>\n </tr>\n <tr>\n <td>\n March 26, 1970\n </td>\n <td>\n 1.9\n </td>\n <td>\n underground shaft\n </td>\n <td>\n USA\n </td>\n <td>\n <a href=\"/wiki/Nevada_Test_Site\" title=\"Nevada Test Site\">\n Nevada\n </a>\n </td>\n <td>\n Handley\n </td>\n </tr>\n <tr>\n <td>\n November 8, 1957\n </td>\n <td>\n 1.8\n </td>\n <td>\n air drop\n </td>\n <td>\n UK\n </td>\n <td>\n Kiritimati\n </td>\n <td>\n Grapple X\n </td>\n </tr>\n <tr>\n <td>\n May 13, 1954\n </td>\n <td>\n 1.7\n </td>\n <td>\n barge\n </td>\n <td>\n USA\n </td>\n <td>\n Enewetak Atoll\n </td>\n <td>\n Nectar\n </td>\n </tr>\n <tr>\n <td>\n November 22, 1955\n </td>\n <td>\n 1.6\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n <a href=\"/wiki/Semey\" title=\"Semey\">\n Semipalatinsk\n </a>\n </td>\n <td>\n 24 Binarnaya\n </td>\n </tr>\n <tr>\n <td>\n September 24, 1957\n </td>\n <td>\n 1.6\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #45\n </td>\n </tr>\n <tr>\n <td>\n August 22, 1962\n </td>\n <td>\n 1.6\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #154\n </td>\n </tr>\n <tr>\n <td>\n October 18, 1962\n </td>\n <td>\n 1.6\n </td>\n <td>\n parachuted\n </td>\n <td>\n USA\n </td>\n <td>\n Johnston Atoll\n </td>\n <td>\n Chama\n </td>\n </tr>\n <tr>\n <td>\n February 27, 1958\n </td>\n <td>\n 1.5\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #54\n </td>\n </tr>\n <tr>\n <td>\n June 14, 1958\n </td>\n <td>\n 1.5\n </td>\n <td>\n barge\n </td>\n <td>\n USA\n </td>\n <td>\n Enewetak Atoll\n </td>\n <td>\n Walnut\n </td>\n </tr>\n <tr>\n <td>\n October 12, 1958\n </td>\n <td>\n 1.5\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #71\n </td>\n </tr>\n <tr>\n <td>\n October 15, 1958\n </td>\n <td>\n 1.5\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #72\n </td>\n </tr>\n <tr>\n <td>\n September 20, 1961\n </td>\n <td>\n 1.5\n </td>\n <td>\n high alt rocket\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n 106 Volga1\n </td>\n </tr>\n <tr>\n <td>\n October 20, 1961\n </td>\n <td>\n 1.5\n </td>\n <td>\n high alt rocket\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n <a href=\"/wiki/Raduga_(nuclear_test)\" title=\"Raduga (nuclear test)\">\n 121 Raduga\n </a>\n </td>\n </tr>\n <tr>\n <td>\n November 4, 1961\n </td>\n <td>\n 1.5\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #140\n </td>\n </tr>\n <tr>\n <td>\n May 11, 1958\n </td>\n <td>\n 1.4\n </td>\n <td>\n barge\n </td>\n <td>\n USA\n </td>\n <td>\n Bikini Atoll\n </td>\n <td>\n Fir\n </td>\n </tr>\n <tr>\n <td>\n May 12, 1958\n </td>\n <td>\n 1.4\n </td>\n <td>\n dry surface\n </td>\n <td>\n USA\n </td>\n <td>\n Enewetak Atoll\n </td>\n <td>\n Koa\n </td>\n </tr>\n <tr>\n <td>\n July 9, 1962\n </td>\n <td>\n 1.4\n </td>\n <td>\n space rocket\n </td>\n <td>\n USA\n </td>\n <td>\n Johnston Atoll\n </td>\n <td>\n <a href=\"/wiki/Starfish_Prime\" title=\"Starfish Prime\">\n Starfish Prime\n </a>\n </td>\n </tr>\n <tr>\n <td>\n September 18, 1962\n </td>\n <td>\n 1.4\n </td>\n <td>\n air drop\n </td>\n <td>\n Soviet Union\n </td>\n <td>\n Novaya Zemlya\n </td>\n <td>\n Test #167\n </td>\n </tr>\n </tbody>\n </table>\n <h2>\n <span class=\"mw-headline\" id=\"See_also\">\n See also\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=20\" title=\"Edit section: See also\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h2>\n <style data-mw-deduplicate=\"TemplateStyles:r998391716\">\n .mw-parser-output .div-col{margin-top:0.3em;column-width:30em}.mw-parser-output .div-col-small{font-size:90%}.mw-parser-output .div-col-rules{column-rule:1px solid #aaa}.mw-parser-output .div-col dl,.mw-parser-output .div-col ol,.mw-parser-output .div-col ul{margin-top:0}.mw-parser-output .div-col li,.mw-parser-output .div-col dd{page-break-inside:avoid;break-inside:avoid-column}\n </style>\n <div class=\"div-col\" style=\"column-width: 30em;\">\n <ul>\n <li>\n <a href=\"/wiki/Andrei_Sakharov\" title=\"Andrei Sakharov\">\n Andrei Sakharov\n </a>\n </li>\n <li>\n <a href=\"/wiki/Edward_Teller\" title=\"Edward Teller\">\n Edward Teller\n </a>\n </li>\n <li>\n <a href=\"/wiki/High_explosive_nuclear_effects_testing\" title=\"High explosive nuclear effects testing\">\n High explosive nuclear effects testing\n </a>\n </li>\n <li>\n <a href=\"/wiki/Historical_nuclear_weapons_stockpiles_and_nuclear_tests_by_country\" title=\"Historical nuclear weapons stockpiles and nuclear tests by country\">\n Historical nuclear weapons stockpiles and nuclear tests by country\n </a>\n </li>\n <li>\n <a href=\"/wiki/International_Day_against_Nuclear_Tests\" title=\"International Day against Nuclear Tests\">\n International Day against Nuclear Tests\n </a>\n </li>\n <li>\n <a href=\"/wiki/J._Robert_Oppenheimer\" title=\"J. Robert Oppenheimer\">\n J. Robert Oppenheimer\n </a>\n </li>\n <li>\n <a href=\"/wiki/Largest_artificial_non-nuclear_explosions\" title=\"Largest artificial non-nuclear explosions\">\n Largest artificial non-nuclear explosions\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/List_of_nuclear_weapon_test_locations\" title=\"List of nuclear weapon test locations\">\n List of nuclear weapon test locations\n </a>\n </li>\n <li>\n <a href=\"/wiki/List_of_nuclear_weapons_tests_of_China\" title=\"List of nuclear weapons tests of China\">\n List of nuclear weapons tests of China\n </a>\n </li>\n <li>\n <a href=\"/wiki/Lists_of_nuclear_disasters_and_radioactive_incidents\" title=\"Lists of nuclear disasters and radioactive incidents\">\n Lists of nuclear disasters and radioactive incidents\n </a>\n </li>\n <li>\n <a href=\"/wiki/Novaya_Zemlya\" title=\"Novaya Zemlya\">\n Novaya Zemlya\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_fallout\" title=\"Nuclear fallout\">\n Nuclear fallout\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/Nuclear_Test_Ban\" title=\"Nuclear Test Ban\">\n Nuclear Test Ban\n </a>\n </li>\n <li>\n <a href=\"/wiki/Soviet_atomic_bomb_project\" title=\"Soviet atomic bomb project\">\n Soviet atomic bomb project\n </a>\n </li>\n <li>\n <a href=\"/wiki/Stanislaw_Ulam\" title=\"Stanislaw Ulam\">\n Stanislaw Ulam\n </a>\n </li>\n </ul>\n </div>\n <h2>\n <span class=\"mw-headline\" id=\"References\">\n References\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=21\" title=\"Edit section: References\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h2>\n <link href=\"mw-data:TemplateStyles:r1011085734\" rel=\"mw-deduplicated-inline-style\"/>\n <div class=\"reflist\">\n <div class=\"mw-references-wrap mw-references-columns\">\n <ol class=\"references\">\n <li id=\"cite_note-1\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-1\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <style data-mw-deduplicate=\"TemplateStyles:r999302996\">\n .mw-parser-output cite.citation{font-style:inherit}.mw-parser-output .citation q{quotes:\"\\\"\"\"\\\"\"\"'\"\"'\"}.mw-parser-output .id-lock-free a,.mw-parser-output .citation .cs1-lock-free a{background:linear-gradient(transparent,transparent),url(\"//upload.wikimedia.org/wikipedia/commons/6/65/Lock-green.svg\")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-limited a,.mw-parser-output .id-lock-registration a,.mw-parser-output .citation .cs1-lock-limited a,.mw-parser-output .citation .cs1-lock-registration a{background:linear-gradient(transparent,transparent),url(\"//upload.wikimedia.org/wikipedia/commons/d/d6/Lock-gray-alt-2.svg\")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-subscription a,.mw-parser-output .citation .cs1-lock-subscription a{background:linear-gradient(transparent,transparent),url(\"//upload.wikimedia.org/wikipedia/commons/a/aa/Lock-red-alt-2.svg\")right 0.1em center/9px no-repeat}.mw-parser-output .cs1-subscription,.mw-parser-output .cs1-registration{color:#555}.mw-parser-output .cs1-subscription span,.mw-parser-output .cs1-registration span{border-bottom:1px dotted;cursor:help}.mw-parser-output .cs1-ws-icon a{background:linear-gradient(transparent,transparent),url(\"//upload.wikimedia.org/wikipedia/commons/4/4c/Wikisource-logo.svg\")right 0.1em center/12px no-repeat}.mw-parser-output code.cs1-code{color:inherit;background:inherit;border:none;padding:inherit}.mw-parser-output .cs1-hidden-error{display:none;font-size:100%}.mw-parser-output .cs1-visible-error{font-size:100%}.mw-parser-output .cs1-maint{display:none;color:#33aa33;margin-left:0.3em}.mw-parser-output .cs1-format{font-size:95%}.mw-parser-output .cs1-kern-left,.mw-parser-output .cs1-kern-wl-left{padding-left:0.2em}.mw-parser-output .cs1-kern-right,.mw-parser-output .cs1-kern-wl-right{padding-right:0.2em}.mw-parser-output .citation .mw-selflink{font-weight:inherit}\n </style>\n <cite class=\"citation book cs1\" id=\"CITEREFPavlovski1998\">\n Pavlovski, O. A. (14 August 1998). \"Radiological Consequences of Nuclear Testing for the Population of the Former USSR (Input Information, Models, Dose, and Risk Estimates)\".\n <i>\n Atmospheric Nuclear Tests\n </i>\n . Springer Berlin Heidelberg. pp. 219–260.\n <a class=\"mw-redirect\" href=\"/wiki/Doi_(identifier)\" title=\"Doi (identifier)\">\n doi\n </a>\n :\n <a class=\"external text\" href=\"https://doi.org/10.1007%2F978-3-662-03610-5_17\" rel=\"nofollow\">\n 10.1007/978-3-662-03610-5_17\n </a>\n .\n <a class=\"mw-redirect\" href=\"/wiki/ISBN_(identifier)\" title=\"ISBN (identifier)\">\n ISBN\n </a>\n <a href=\"/wiki/Special:BookSources/978-3-642-08359-4\" title=\"Special:BookSources/978-3-642-08359-4\">\n <bdi>\n 978-3-642-08359-4\n </bdi>\n </a>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Radiological+Consequences+of+Nuclear+Testing+for+the+Population+of+the+Former+USSR+%28Input+Information%2C+Models%2C+Dose%2C+and+Risk+Estimates%29&amp;rft.btitle=Atmospheric+Nuclear+Tests&amp;rft.pages=219-260&amp;rft.pub=Springer+Berlin+Heidelberg&amp;rft.date=1998-08-14&amp;rft_id=info%3Adoi%2F10.1007%2F978-3-662-03610-5_17&amp;rft.isbn=978-3-642-08359-4&amp;rft.aulast=Pavlovski&amp;rft.aufirst=O.+A.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-Yang-2\">\n <span class=\"mw-cite-backlink\">\n ^\n <a href=\"#cite_ref-Yang_2-0\">\n <sup>\n <i>\n <b>\n a\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-Yang_2-1\">\n <sup>\n <i>\n <b>\n b\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-Yang_2-2\">\n <sup>\n <i>\n <b>\n c\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-Yang_2-3\">\n <sup>\n <i>\n <b>\n d\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-Yang_2-4\">\n <sup>\n <i>\n <b>\n e\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-Yang_2-5\">\n <sup>\n <i>\n <b>\n f\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-Yang_2-6\">\n <sup>\n <i>\n <b>\n g\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-Yang_2-7\">\n <sup>\n <i>\n <b>\n h\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-Yang_2-8\">\n <sup>\n <i>\n <b>\n i\n </b>\n </i>\n </sup>\n </a>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation cs2\" id=\"CITEREFYangNorthRomneyRichards2000\">\n Yang, Xiaoping; North, Robert; Romney, Carl; Richards, Paul G. (August 2000),\n <a class=\"external text\" href=\"http://www.ldeo.columbia.edu/~richards/my_papers/WW_nuclear_tests_IASPEI_HB.pdf\" rel=\"nofollow\">\n <i>\n Worldwide Nuclear Explosions\n </i>\n </a>\n <span class=\"cs1-format\">\n (PDF)\n </span>\n <span class=\"reference-accessdate\">\n , retrieved\n <span class=\"nowrap\">\n 2013-12-31\n </span>\n </span>\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Worldwide+Nuclear+Explosions&amp;rft.date=2000-08&amp;rft.aulast=Yang&amp;rft.aufirst=Xiaoping&amp;rft.au=North%2C+Robert&amp;rft.au=Romney%2C+Carl&amp;rft.au=Richards%2C+Paul+G.&amp;rft_id=http%3A%2F%2Fwww.ldeo.columbia.edu%2F~richards%2Fmy_papers%2FWW_nuclear_tests_IASPEI_HB.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-3\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-3\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\" id=\"CITEREFMartin_Kalinowski\">\n Martin Kalinowski.\n <a class=\"external text\" href=\"http://www.math.yorku.ca/sfp/subcritical.html\" rel=\"nofollow\">\n \"SubCritical Tests\"\n </a>\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2014-01-01\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=SubCritical+Tests&amp;rft.au=Martin+Kalinowski&amp;rft_id=http%3A%2F%2Fwww.math.yorku.ca%2Fsfp%2Fsubcritical.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-4\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-4\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\" id=\"CITEREFJeffrey_Lewis\">\n Jeffrey Lewis.\n <a class=\"external text\" href=\"https://web.archive.org/web/20140102200348/http://lewis.armscontrolwonk.com/archive/6011/subcritical-experiments\" rel=\"nofollow\">\n \"Subcritical Experiments\"\n </a>\n . Archived from\n <a class=\"external text\" href=\"http://lewis.armscontrolwonk.com/archive/6011/subcritical-experiments\" rel=\"nofollow\">\n the original\n </a>\n on 2014-01-02\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2014-01-01\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Subcritical+Experiments&amp;rft.au=Jeffrey+Lewis&amp;rft_id=http%3A%2F%2Flewis.armscontrolwonk.com%2Farchive%2F6011%2Fsubcritical-experiments&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-doe209-10\">\n <span class=\"mw-cite-backlink\">\n ^\n <a href=\"#cite_ref-doe209_10-0\">\n <sup>\n <i>\n <b>\n a\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-doe209_10-1\">\n <sup>\n <i>\n <b>\n b\n </b>\n </i>\n </sup>\n </a>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"https://web.archive.org/web/20061012160826/http://www.nv.doe.gov/library/publications/historical/DOENV_209_REV15.pdf\" rel=\"nofollow\">\n \"United States Nuclear Tests: July 1945 through September 1992 (Revision 15)\"\n </a>\n <span class=\"cs1-format\">\n (PDF)\n </span>\n . Department of Energy, Nevada Operations Office. December 2000. Archived from\n <a class=\"external text\" href=\"http://www.nv.doe.gov/library/publications/historical/DOENV_209_REV15.pdf\" rel=\"nofollow\">\n the original\n </a>\n <span class=\"cs1-format\">\n (PDF)\n </span>\n on 2006-10-12\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2013-10-26\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=United+States+Nuclear+Tests%3A+July+1945+through+September+1992+%28Revision+15%29&amp;rft.pub=Department+of+Energy%2C+Nevada+Operations+Office&amp;rft.date=2000-12&amp;rft_id=http%3A%2F%2Fwww.nv.doe.gov%2Flibrary%2Fpublications%2Fhistorical%2FDOENV_209_REV15.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n Generally regarded as the \"official\" list of American tests.\n </span>\n </li>\n <li id=\"cite_note-USSRList2-14\">\n <span class=\"mw-cite-backlink\">\n ^\n <a href=\"#cite_ref-USSRList2_14-0\">\n <sup>\n <i>\n <b>\n a\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-USSRList2_14-1\">\n <sup>\n <i>\n <b>\n b\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-USSRList2_14-2\">\n <sup>\n <i>\n <b>\n c\n </b>\n </i>\n </sup>\n </a>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\" id=\"CITEREFAndryushinVoloshinIlkaevMatushchenko1999\">\n Andryushin, L. A.; Voloshin, N. P.; Ilkaev, R. I.; Matushchenko, A. M.; Ryabev, L. D.; Strukov, V. G.; Chernyshev, A. K.; Yudin, Yu. A. (1999).\n <a class=\"external text\" href=\"https://web.archive.org/web/20131219131618/http://www.iss-atom.ru/ksenia/catal_nt/\" rel=\"nofollow\">\n \"Catalog of Worldwide Nuclear Testing\"\n </a>\n . Sarov, Russia: RFNC-VNIIEF. Archived from\n <a class=\"external text\" href=\"http://www.iss-atom.ru/ksenia/catal_nt/\" rel=\"nofollow\">\n the original\n </a>\n on 2013-12-19\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2013-12-18\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Catalog+of+Worldwide+Nuclear+Testing&amp;rft.place=Sarov%2C+Russia&amp;rft.pub=RFNC-VNIIEF&amp;rft.date=1999&amp;rft.aulast=Andryushin&amp;rft.aufirst=L.+A.&amp;rft.au=Voloshin%2C+N.+P.&amp;rft.au=Ilkaev%2C+R.+I.&amp;rft.au=Matushchenko%2C+A.+M.&amp;rft.au=Ryabev%2C+L.+D.&amp;rft.au=Strukov%2C+V.+G.&amp;rft.au=Chernyshev%2C+A.+K.&amp;rft.au=Yudin%2C+Yu.+A.&amp;rft_id=http%3A%2F%2Fwww.iss-atom.ru%2Fksenia%2Fcatal_nt%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-RSNF2-15\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-RSNF2_15-0\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation cs2\" id=\"CITEREFPodvig2001\">\n Podvig, Pavel, ed. (2001),\n <a class=\"external text\" href=\"https://books.google.com/books?id=CPRVbYDc-7kC&amp;lpg=PA453\" rel=\"nofollow\">\n <i>\n Russian Strategic Nuclear Forces\n </i>\n </a>\n , Cambridge, MA: MIT Press,\n <a class=\"mw-redirect\" href=\"/wiki/ISBN_(identifier)\" title=\"ISBN (identifier)\">\n ISBN\n </a>\n <a href=\"/wiki/Special:BookSources/9780262661812\" title=\"Special:BookSources/9780262661812\">\n <bdi>\n 9780262661812\n </bdi>\n </a>\n <span class=\"reference-accessdate\">\n , retrieved\n <span class=\"nowrap\">\n 2014-01-09\n </span>\n </span>\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Russian+Strategic+Nuclear+Forces&amp;rft.place=Cambridge%2C+MA&amp;rft.pub=MIT+Press&amp;rft.date=2001&amp;rft.isbn=9780262661812&amp;rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DCPRVbYDc-7kC%26lpg%3DPA453&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-LeCEP-19\">\n <span class=\"mw-cite-backlink\">\n ^\n <a href=\"#cite_ref-LeCEP_19-0\">\n <sup>\n <i>\n <b>\n a\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-LeCEP_19-1\">\n <sup>\n <i>\n <b>\n b\n </b>\n </i>\n </sup>\n </a>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"http://www.point-zero-canopus.org/reperes/chronologie-essais-nucleaires-polynesie-francaise\" rel=\"nofollow\">\n \"Le CEP in Polynesie Francaise - Archives sur le Centre d'Experimentation du Pacifique a Muroroa, Hao et Fangataufa: Chronologie des essais nucléaires en Polynésie Française effectués de 1966 à 1996\"\n </a>\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2014-01-24\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Le+CEP+in+Polynesie+Francaise+-+Archives+sur+le+Centre+d%27Experimentation+du+Pacifique+a+Muroroa%2C+Hao+et+Fangataufa%3A+Chronologie+des+essais+nucl%C3%A9aires+en+Polyn%C3%A9sie+Fran%C3%A7aise+effectu%C3%A9s+de+1966+%C3%A0+1996&amp;rft_id=http%3A%2F%2Fwww.point-zero-canopus.org%2Freperes%2Fchronologie-essais-nucleaires-polynesie-francaise&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-25\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-25\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"http://www.johnstonsarchive.net/nuclear/atest00.html\" rel=\"nofollow\">\n \"Chronological Listing of Above Ground Nuclear Detonations\"\n </a>\n . Wm. Robert Johnston\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2001-02-06\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Chronological+Listing+of+Above+Ground+Nuclear+Detonations&amp;rft.pub=Wm.+Robert+Johnston&amp;rft_id=http%3A%2F%2Fwww.johnstonsarchive.net%2Fnuclear%2Fatest00.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-26\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-26\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"https://www.atomicheritage.org/history/operation-plumbbob-1957\" rel=\"nofollow\">\n \"Atomic Heritage Foundation, Operation Plumb-bob - 1957\"\n </a>\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2018-11-30\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Atomic+Heritage+Foundation%2C+Operation+Plumb-bob+-+1957&amp;rft_id=https%3A%2F%2Fwww.atomicheritage.org%2Fhistory%2Foperation-plumbbob-1957&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-27\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-27\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation book cs1\" id=\"CITEREFMacKenzie1993\">\n <a href=\"/wiki/Donald_Angus_MacKenzie\" title=\"Donald Angus MacKenzie\">\n MacKenzie, Donald A.\n </a>\n (1993).\n <i>\n Inventing Accuracy: A Historical Sociology of Nuclear Missile Guidance\n </i>\n .\n <a href=\"/wiki/Cambridge,_Massachusetts\" title=\"Cambridge, Massachusetts\">\n Cambridge, Massachusetts\n </a>\n :\n <a href=\"/wiki/MIT_Press\" title=\"MIT Press\">\n MIT Press\n </a>\n . pp.\n <a class=\"external text\" href=\"https://archive.org/details/inventingaccurac00dona/page/343\" rel=\"nofollow\">\n 343–344\n </a>\n .\n <a class=\"mw-redirect\" href=\"/wiki/ISBN_(identifier)\" title=\"ISBN (identifier)\">\n ISBN\n </a>\n <a href=\"/wiki/Special:BookSources/978-0-262-63147-1\" title=\"Special:BookSources/978-0-262-63147-1\">\n <bdi>\n 978-0-262-63147-1\n </bdi>\n </a>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Inventing+Accuracy%3A+A+Historical+Sociology+of+Nuclear+Missile+Guidance&amp;rft.place=Cambridge%2C+Massachusetts&amp;rft.pages=343-344&amp;rft.pub=MIT+Press&amp;rft.date=1993&amp;rft.isbn=978-0-262-63147-1&amp;rft.aulast=MacKenzie&amp;rft.aufirst=Donald+A.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-28\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-28\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"https://web.archive.org/web/20131103041622/http://www.nv.doe.gov/library/publications/historical/DOENV_317.pdf\" rel=\"nofollow\">\n \"Archived copy\"\n </a>\n <span class=\"cs1-format\">\n (PDF)\n </span>\n . Archived from\n <a class=\"external text\" href=\"http://www.nv.doe.gov/library/publications/historical/DOENV_317.pdf\" rel=\"nofollow\">\n the original\n </a>\n <span class=\"cs1-format\">\n (PDF)\n </span>\n on 2013-11-03\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2013-10-31\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Archived+copy&amp;rft_id=http%3A%2F%2Fwww.nv.doe.gov%2Flibrary%2Fpublications%2Fhistorical%2FDOENV_317.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n <span class=\"cs1-maint citation-comment\">\n CS1 maint: archived copy as title (\n <a href=\"/wiki/Category:CS1_maint:_archived_copy_as_title\" title=\"Category:CS1 maint: archived copy as title\">\n link\n </a>\n )\n </span>\n </span>\n </li>\n <li id=\"cite_note-29\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-29\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\" id=\"CITEREFTogzhan_Kassenova2009\">\n Togzhan Kassenova (28 September 2009).\n <a class=\"external text\" href=\"http://www.thebulletin.org/web-edition/features/the-lasting-toll-of-semipalatinsks-nuclear-testing\" rel=\"nofollow\">\n \"The lasting toll of Semipalatinsk's nuclear testing\"\n </a>\n .\n <i>\n Bulletin of the Atomic Scientists\n </i>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Bulletin+of+the+Atomic+Scientists&amp;rft.atitle=The+lasting+toll+of+Semipalatinsk%27s+nuclear+testing&amp;rft.date=2009-09-28&amp;rft.au=Togzhan+Kassenova&amp;rft_id=http%3A%2F%2Fwww.thebulletin.org%2Fweb-edition%2Ffeatures%2Fthe-lasting-toll-of-semipalatinsks-nuclear-testing&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-30\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-30\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\" id=\"CITEREFYury_A_YudinProject_Manager\">\n Yury A Yudin; Project Manager.\n <a class=\"external text\" href=\"http://www.partnershipforglobalsecurity-archive.org/Documents/history-manuscript_eng.pdf\" rel=\"nofollow\">\n \"Manuscript on the History of the Soviet Nuclear Weapons and Nuclear Infrastructure\"\n </a>\n <span class=\"cs1-format\">\n (PDF)\n </span>\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2014-01-01\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Manuscript+on+the+History+of+the+Soviet+Nuclear+Weapons+and+Nuclear+Infrastructure&amp;rft.au=Yury+A+Yudin&amp;rft.au=Project+Manager&amp;rft_id=http%3A%2F%2Fwww.partnershipforglobalsecurity-archive.org%2FDocuments%2Fhistory-manuscript_eng.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-31\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-31\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation news cs1\" id=\"CITEREFEllen_Barry2011\">\n Ellen Barry (2011-05-21).\n <a class=\"external text\" href=\"https://www.nytimes.com/2011/05/22/world/asia/22kazakhstan.html\" rel=\"nofollow\">\n \"Old Soviet Nuclear Site in Asia Has Unlikely Sentinel: The U.S.\"\n </a>\n <i>\n The New York Times\n </i>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+New+York+Times&amp;rft.atitle=Old+Soviet+Nuclear+Site+in+Asia+Has+Unlikely+Sentinel%3A+The+U.S.&amp;rft.date=2011-05-21&amp;rft.au=Ellen+Barry&amp;rft_id=https%3A%2F%2Fwww.nytimes.com%2F2011%2F05%2F22%2Fworld%2Fasia%2F22kazakhstan.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-32\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-32\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\" id=\"CITEREFWm_Robert_Johnston\">\n Wm Robert Johnston.\n <a class=\"external text\" href=\"http://www.johnstonsarchive.net/nuclear/\" rel=\"nofollow\">\n \"Johnston Archive of Nuclear Weapons\"\n </a>\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2013-12-31\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Johnston+Archive+of+Nuclear+Weapons&amp;rft.au=Wm+Robert+Johnston&amp;rft_id=http%3A%2F%2Fwww.johnstonsarchive.net%2Fnuclear%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-RSNF-33\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-RSNF_33-0\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation cs2\" id=\"CITEREFPodvig2001\">\n Podvig, Pavel, ed. (2001),\n <a class=\"external text\" href=\"https://books.google.com/books?id=CPRVbYDc-7kC&amp;lpg=PA453\" rel=\"nofollow\">\n <i>\n Russian Strategic Nuclear Forces\n </i>\n </a>\n , Cambridge, MA: MIT Press,\n <a class=\"mw-redirect\" href=\"/wiki/ISBN_(identifier)\" title=\"ISBN (identifier)\">\n ISBN\n </a>\n <a href=\"/wiki/Special:BookSources/9780262661812\" title=\"Special:BookSources/9780262661812\">\n <bdi>\n 9780262661812\n </bdi>\n </a>\n <span class=\"reference-accessdate\">\n , retrieved\n <span class=\"nowrap\">\n 2014-01-09\n </span>\n </span>\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Russian+Strategic+Nuclear+Forces&amp;rft.place=Cambridge%2C+MA&amp;rft.pub=MIT+Press&amp;rft.date=2001&amp;rft.isbn=9780262661812&amp;rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DCPRVbYDc-7kC%26lpg%3DPA453&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-34\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-34\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"http://nuclearweaponarchive.org/Russia/Sovtestsum.html\" rel=\"nofollow\">\n \"Soviet Nuclear Test Summary\"\n </a>\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2010-09-04\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Soviet+Nuclear+Test+Summary&amp;rft_id=http%3A%2F%2Fnuclearweaponarchive.org%2FRussia%2FSovtestsum.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-35\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-35\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <a href=\"/wiki/Viktor_Suvorov\" title=\"Viktor Suvorov\">\n Viktor Suvorov\n </a>\n ,\n <i>\n <a class=\"external text\" href=\"http://militera.lib.ru/research/suvorov7/index.html\" rel=\"nofollow\">\n Shadow of Victory\n </a>\n </i>\n (\n <span lang=\"ru\" title=\"Russian-language text\">\n Тень победы\n </span>\n ), Donetsk, 2003,\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <a class=\"mw-redirect\" href=\"/wiki/ISBN_(identifier)\" title=\"ISBN (identifier)\">\n ISBN\n </a>\n <a href=\"/wiki/Special:BookSources/966-696-022-2\" title=\"Special:BookSources/966-696-022-2\">\n 966-696-022-2\n </a>\n , pages 353–375.\n </span>\n </li>\n <li id=\"cite_note-36\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-36\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"https://web.archive.org/web/20090225154407/http://www.dva.gov.au/media/publicat/2006/nuclear_test/dosimetry/pdf/dosimetry_chapter_1_introduction.pdf\" rel=\"nofollow\">\n \"Australian participants in British nuclear tests in Australia — Vol 1: Dosimetry\"\n </a>\n <span class=\"cs1-format\">\n (PDF)\n </span>\n .\n <i>\n Australian Department of Veteran's Affairs\n </i>\n . Archived from\n <a class=\"external text\" href=\"http://www.dva.gov.au/media/publicat/2006/nuclear_test/dosimetry/pdf/dosimetry_chapter_1_introduction.pdf\" rel=\"nofollow\">\n the original\n </a>\n <span class=\"cs1-format\">\n (PDF)\n </span>\n on 2009-02-25\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2007-12-24\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Australian+Department+of+Veteran%27s+Affairs&amp;rft.atitle=Australian+participants+in+British+nuclear+tests+in+Australia+%E2%80%94+Vol+1%3A+Dosimetry&amp;rft_id=http%3A%2F%2Fwww.dva.gov.au%2Fmedia%2Fpublicat%2F2006%2Fnuclear_test%2Fdosimetry%2Fpdf%2Fdosimetry_chapter_1_introduction.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-37\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-37\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"http://www.capcomespace.net/dossiers/espace_europeen/albion/essais_nucleaire_francais_listing.htm\" rel=\"nofollow\">\n \"Listing des essais nucléaires français\"\n </a>\n . Capcomespace.net\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2010-09-04\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Listing+des+essais+nucl%C3%A9aires+fran%C3%A7ais&amp;rft.pub=Capcomespace.net&amp;rft_id=http%3A%2F%2Fwww.capcomespace.net%2Fdossiers%2Fespace_europeen%2Falbion%2Fessais_nucleaire_francais_listing.htm&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-scoop-qui-font-plouf-38\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-scoop-qui-font-plouf_38-0\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <a class=\"external text\" href=\"http://secretdefense.blogs.liberation.fr/defense/2010/02/essais-nucléaires-gerboise-verte-la-bombe-et-le-scoop-qui-font-plouf.html\" rel=\"nofollow\">\n Essais nucléaires : Gerboise verte, la bombe et le scoop qui font plouf... (actualisé)\n </a>\n ,\n <a href=\"/wiki/Jean-Dominique_Merchet\" title=\"Jean-Dominique Merchet\">\n Jean-Dominique Merchet\n </a>\n ,\n <i>\n <a href=\"/wiki/Lib%C3%A9ration\" title=\"Libération\">\n Libération\n </a>\n </i>\n </span>\n </li>\n <li id=\"cite_note-39\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-39\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"https://fas.org/nuke/guide/china/nuke/tests.htm\" rel=\"nofollow\">\n \"Chinese Nuclear tests\"\n </a>\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2013-12-31\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Chinese+Nuclear+tests&amp;rft_id=https%3A%2F%2Ffas.org%2Fnuke%2Fguide%2Fchina%2Fnuke%2Ftests.htm&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-40\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-40\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation cs2\" id=\"CITEREFwolfkinler2013\">\n wolfkinler (2013-04-08),\n <a class=\"external text\" href=\"https://www.youtube.com/watch?v=nra9fWC-pQc\" rel=\"nofollow\">\n <i>\n 中国的核试验1966\n </i>\n </a>\n <span class=\"reference-accessdate\">\n , retrieved\n <span class=\"nowrap\">\n 2018-01-24\n </span>\n </span>\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=%E4%B8%AD%E5%9B%BD%E7%9A%84%E6%A0%B8%E8%AF%95%E9%AA%8C1966&amp;rft.date=2013-04-08&amp;rft.au=wolfkinler&amp;rft_id=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3Dnra9fWC-pQc&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-41\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-41\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"http://nuclearthreatinitiative.org/db/china/testlist.htm\" rel=\"nofollow\">\n \"China's Nuclear Tests\"\n </a>\n . Nuclearthreatinitiative.org\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2010-09-04\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=China%27s+Nuclear+Tests&amp;rft.pub=Nuclearthreatinitiative.org&amp;rft_id=http%3A%2F%2Fnuclearthreatinitiative.org%2Fdb%2Fchina%2Ftestlist.htm&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-his-42\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-his_42-0\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <a class=\"external text\" href=\"http://www.nti.org/db/china/testchr.htm#1980\" rel=\"nofollow\">\n China's Nuclear program in the 1980s\n </a>\n <a class=\"external text\" href=\"https://web.archive.org/web/20070608060502/http://www.nti.org/db/china/testchr.htm#1980\" rel=\"nofollow\">\n Archived\n </a>\n 2007-06-08 at the\n <a href=\"/wiki/Wayback_Machine\" title=\"Wayback Machine\">\n Wayback Machine\n </a>\n nti.org\n </span>\n </li>\n <li id=\"cite_note-43\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-43\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation news cs1\" id=\"CITEREFFaison1996\">\n Faison, Seth (30 July 1996).\n <a class=\"external text\" href=\"https://www.nytimes.com/1996/07/30/world/china-sets-off-nuclear-test-then-announces-moratorium.html\" rel=\"nofollow\">\n \"China Sets Off Nuclear Test, Then Announces Moratorium\"\n </a>\n .\n <i>\n New York Times\n </i>\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 11 November\n </span>\n 2020\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=New+York+Times&amp;rft.atitle=China+Sets+Off+Nuclear+Test%2C+Then+Announces+Moratorium&amp;rft.date=1996-07-30&amp;rft.aulast=Faison&amp;rft.aufirst=Seth&amp;rft_id=https%3A%2F%2Fwww.nytimes.com%2F1996%2F07%2F30%2Fworld%2Fchina-sets-off-nuclear-test-then-announces-moratorium.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-smiling_buddha-44\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-smiling_buddha_44-0\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"http://nuclearweaponarchive.org/India/IndiaSmiling.html\" rel=\"nofollow\">\n \"India's Nuclear Weapons Program - Smiling Buddha: 1974\"\n </a>\n . Nuclear Weapon Archive.\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=India%27s+Nuclear+Weapons+Program+-+Smiling+Buddha%3A+1974&amp;rft.pub=Nuclear+Weapon+Archive&amp;rft_id=http%3A%2F%2Fnuclearweaponarchive.org%2FIndia%2FIndiaSmiling.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-timesofindia.indiatimes.com-45\">\n <span class=\"mw-cite-backlink\">\n ^\n <a href=\"#cite_ref-timesofindia.indiatimes.com_45-0\">\n <sup>\n <i>\n <b>\n a\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-timesofindia.indiatimes.com_45-1\">\n <sup>\n <i>\n <b>\n b\n </b>\n </i>\n </sup>\n </a>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation news cs1\" id=\"CITEREFChidanand_Rajghatta2009\">\n Chidanand Rajghatta (2009-09-21).\n <a class=\"external text\" href=\"https://web.archive.org/web/20121105124608/http://articles.timesofindia.indiatimes.com/2009-09-21/pakistan/28104311_1_uf6-enrichment-centrifuge-plant\" rel=\"nofollow\">\n \"AQ Khan nails Pakistan's nuke lies - Pakistan - World\"\n </a>\n .\n <i>\n <a href=\"/wiki/The_Times_of_India\" title=\"The Times of India\">\n The Times of India\n </a>\n </i>\n . Archived from\n <a class=\"external text\" href=\"http://articles.timesofindia.indiatimes.com/2009-09-21/pakistan/28104311_1_uf6-enrichment-centrifuge-plant\" rel=\"nofollow\">\n the original\n </a>\n on 2012-11-05\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2010-09-04\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Times+of+India&amp;rft.atitle=AQ+Khan+nails+Pakistan%27s+nuke+lies+-+Pakistan+-+World&amp;rft.date=2009-09-21&amp;rft.au=Chidanand+Rajghatta&amp;rft_id=http%3A%2F%2Farticles.timesofindia.indiatimes.com%2F2009-09-21%2Fpakistan%2F28104311_1_uf6-enrichment-centrifuge-plant&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-Article-46\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-Article_46-0\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\" id=\"CITEREFAzam2000\">\n Azam, Rai Muhammad Saleh Azam (June 2000).\n <a class=\"external text\" href=\"https://web.archive.org/web/20120401181303/http://www.defencejournal.com/2000/june/chagai.htm\" rel=\"nofollow\">\n \"Where Mountains Move: The Story of Chagai, §Kirana Hills, Sarghodha Air Force Base: Kirana-I: The Cold tests\"\n </a>\n .\n <i>\n Rai Muhammad Saleh Azam\n </i>\n . Article published in the\n <a href=\"/wiki/The_Nation_(Pakistan)\" title=\"The Nation (Pakistan)\">\n Nation\n </a>\n , Defence Journal. Archived from\n <a class=\"external text\" href=\"http://www.defencejournal.com/2000/june/chagai.htm\" rel=\"nofollow\">\n the original\n </a>\n on 2012-04-01.\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Rai+Muhammad+Saleh+Azam&amp;rft.atitle=Where+Mountains+Move%3A+The+Story+of+Chagai%2C+%C2%A7Kirana+Hills%2C+Sarghodha+Air+Force+Base%3A+Kirana-I%3A+The+Cold+tests.&amp;rft.date=2000-06&amp;rft.aulast=Azam&amp;rft.aufirst=Rai+Muhammad+Saleh+Azam&amp;rft_id=http%3A%2F%2Fwww.defencejournal.com%2F2000%2Fjune%2Fchagai.htm&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-def-47\">\n <span class=\"mw-cite-backlink\">\n ^\n <a href=\"#cite_ref-def_47-0\">\n <sup>\n <i>\n <b>\n a\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-def_47-1\">\n <sup>\n <i>\n <b>\n b\n </b>\n </i>\n </sup>\n </a>\n </span>\n <span class=\"reference-text\">\n <a class=\"external text\" href=\"http://www.defencejournal.com/2000/june/chagai.htm\" rel=\"nofollow\">\n When Mountains Move: The Story of Chagai\n </a>\n <a class=\"external text\" href=\"https://web.archive.org/web/20120401181303/http://www.defencejournal.com/2000/june/chagai.htm\" rel=\"nofollow\">\n Archived\n </a>\n 2012-04-01 at the\n <a href=\"/wiki/Wayback_Machine\" title=\"Wayback Machine\">\n Wayback Machine\n </a>\n Rai Muhammad Saleh Azam, defencejournal.com\n </span>\n </li>\n <li id=\"cite_note-Nuclearweaponarchive.org-48\">\n <span class=\"mw-cite-backlink\">\n ^\n <a href=\"#cite_ref-Nuclearweaponarchive.org_48-0\">\n <sup>\n <i>\n <b>\n a\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-Nuclearweaponarchive.org_48-1\">\n <sup>\n <i>\n <b>\n b\n </b>\n </i>\n </sup>\n </a>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"http://nuclearweaponarchive.org/Pakistan/PakTests.html\" rel=\"nofollow\">\n \"Pakistan's Nuclear Weapons Program – 1998: The Year of Testing\"\n </a>\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2012-08-17\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Pakistan%27s+Nuclear+Weapons+Program+%E2%80%93+1998%3A+The+Year+of+Testing&amp;rft_id=http%3A%2F%2Fnuclearweaponarchive.org%2FPakistan%2FPakTests.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-google1-49\">\n <span class=\"mw-cite-backlink\">\n ^\n <a href=\"#cite_ref-google1_49-0\">\n <sup>\n <i>\n <b>\n a\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-google1_49-1\">\n <sup>\n <i>\n <b>\n b\n </b>\n </i>\n </sup>\n </a>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation book cs1\" id=\"CITEREFEducational_Foundation_for_Nuclear_Science,_Inc.1998\">\n Educational Foundation for Nuclear Science, Inc. (1998).\n <a class=\"external text\" href=\"https://books.google.com/books?id=vAsAAAAAMBAJ&amp;pg=PA24\" rel=\"nofollow\">\n <i>\n Bulletin of the Atomic Scientists\n </i>\n </a>\n . Educational Foundation for Nuclear Science, Inc. p. 24.\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Bulletin+of+the+Atomic+Scientists&amp;rft.pages=24&amp;rft.pub=Educational+Foundation+for+Nuclear+Science%2C+Inc.&amp;rft.date=1998&amp;rft.au=Educational+Foundation+for+Nuclear+Science%2C+Inc.&amp;rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DvAsAAAAAMBAJ%26pg%3DPA24&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-50\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-50\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"http://www.ctbto.org/press-centre/press-releases/2013/on-the-ctbtos-detection-in-north-korea/\" rel=\"nofollow\">\n \"Press Release: On the CTBTO's detection in North Korea\"\n </a>\n . CTBTO\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 12 February\n </span>\n 2013\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Press+Release%3A+On+the+CTBTO%27s+detection+in+North+Korea&amp;rft.pub=CTBTO&amp;rft_id=http%3A%2F%2Fwww.ctbto.org%2Fpress-centre%2Fpress-releases%2F2013%2Fon-the-ctbtos-detection-in-north-korea%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-USGS_report_2013_nuclear_test-51\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-USGS_report_2013_nuclear_test_51-0\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"https://earthquake.usgs.gov/earthquakes/eventpage/usc000f5t0\" rel=\"nofollow\">\n \"M5.1 Nuclear Explosion - 24km ENE of Sungjibaegam, North Korea\"\n </a>\n .\n <i>\n Earthquake Hazards Program\n </i>\n .\n <a href=\"/wiki/United_States_Geological_Survey\" title=\"United States Geological Survey\">\n United States Geological Survey\n </a>\n . 12 February 2013\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 12 February\n </span>\n 2013\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Earthquake+Hazards+Program&amp;rft.atitle=M5.1+Nuclear+Explosion+-+24km+ENE+of+Sungjibaegam%2C+North+Korea&amp;rft.date=2013-02-12&amp;rft_id=https%3A%2F%2Fearthquake.usgs.gov%2Fearthquakes%2Feventpage%2Fusc000f5t0&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-52\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-52\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\" id=\"CITEREFRivieraAkiko2013\">\n Riviera, Gloria; Akiko, Fujita (12 February 2013).\n <a class=\"external text\" href=\"https://abcnews.go.com/International/north-korea-tremor-arouses-suspicion-nuclear-test/story?id=18444191\" rel=\"nofollow\">\n \"North Korea Tremor Arouses Suspicion of Nuclear Test\"\n </a>\n .\n <i>\n ABC News\n </i>\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 12 February\n </span>\n 2013\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=ABC+News&amp;rft.atitle=North+Korea+Tremor+Arouses+Suspicion+of+Nuclear+Test&amp;rft.date=2013-02-12&amp;rft.aulast=Riviera&amp;rft.aufirst=Gloria&amp;rft.au=Akiko%2C+Fujita&amp;rft_id=https%3A%2F%2Fabcnews.go.com%2FInternational%2Fnorth-korea-tremor-arouses-suspicion-nuclear-test%2Fstory%3Fid%3D18444191&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-USGS_report_summary-53\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-USGS_report_summary_53-0\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"https://earthquake.usgs.gov/earthquakes/eventpage/usc000f5t0#summary\" rel=\"nofollow\">\n \"M5.1 – 24km ENE of Sungjibaegam, North Korea\"\n </a>\n . USGS. 12 February 2013\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 12 February\n </span>\n 2013\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=M5.1+%E2%80%93+24km+ENE+of+Sungjibaegam%2C+North+Korea&amp;rft.pub=USGS&amp;rft.date=2013-02-12&amp;rft_id=https%3A%2F%2Fearthquake.usgs.gov%2Fearthquakes%2Feventpage%2Fusc000f5t0%23summary&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-54\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-54\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation news cs1\" id=\"CITEREFChanceKim2013\">\n Chance, David; Kim, Jack (12 February 2013).\n <a class=\"external text\" href=\"https://www.reuters.com/article/us-korea-north-idUSBRE91B04820130212\" rel=\"nofollow\">\n \"China joins U.S., Japan in condemning North Korea nuclear test\"\n </a>\n .\n <i>\n Reuters\n </i>\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 12 February\n </span>\n 2013\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Reuters&amp;rft.atitle=China+joins+U.S.%2C+Japan+in+condemning+North+Korea+nuclear+test&amp;rft.date=2013-02-12&amp;rft.aulast=Chance&amp;rft.aufirst=David&amp;rft.au=Kim%2C+Jack&amp;rft_id=https%3A%2F%2Fwww.reuters.com%2Farticle%2Fus-korea-north-idUSBRE91B04820130212&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-55\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-55\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation news cs1\" id=\"CITEREFMacLeod2013\">\n MacLeod, Calum (12 February 2013).\n <a class=\"external text\" href=\"https://www.usatoday.com/story/news/world/2013/02/11/earthquake-north-korea-nuclear-test/1911587/\" rel=\"nofollow\">\n \"Obama calls North Korea nuclear test a threat to U.S.\"\n </a>\n <i>\n USA Today\n </i>\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 12 February\n </span>\n 2013\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=USA+Today&amp;rft.atitle=Obama+calls+North+Korea+nuclear+test+a+threat+to+U.S.&amp;rft.date=2013-02-12&amp;rft.aulast=MacLeod&amp;rft.aufirst=Calum&amp;rft_id=https%3A%2F%2Fwww.usatoday.com%2Fstory%2Fnews%2Fworld%2F2013%2F02%2F11%2Fearthquake-north-korea-nuclear-test%2F1911587%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-56\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-56\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation news cs1\" id=\"CITEREFMarcus2013\">\n Marcus, Jonathan (12 February 2013).\n <a class=\"external text\" href=\"https://www.bbc.co.uk/news/world-asia-21431599\" rel=\"nofollow\">\n \"North Korea nuclear test raises uranium concerns\"\n </a>\n .\n <i>\n BBC News\n </i>\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 12 February\n </span>\n 2013\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=BBC+News&amp;rft.atitle=North+Korea+nuclear+test+raises+uranium+concerns&amp;rft.date=2013-02-12&amp;rft.aulast=Marcus&amp;rft.aufirst=Jonathan&amp;rft_id=https%3A%2F%2Fwww.bbc.co.uk%2Fnews%2Fworld-asia-21431599&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-57\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-57\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation news cs1\">\n <a class=\"external text\" href=\"https://www.bbc.com/news/world-asia-35240012\" rel=\"nofollow\">\n \"North Korea nuclear: State claims first hydrogen bomb test\"\n </a>\n . 7 January 2016\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 7 January\n </span>\n 2016\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.atitle=North+Korea+nuclear%3A+State+claims+first+hydrogen+bomb+test&amp;rft.date=2016-01-07&amp;rft_id=https%3A%2F%2Fwww.bbc.com%2Fnews%2Fworld-asia-35240012&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-58\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-58\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation news cs1\">\n <a class=\"external text\" href=\"https://www.nytimes.com/2016/06/23/world/asia/north-korea-missile-test.html\" rel=\"nofollow\">\n \"North Korea's Successful Missile Test Shows Program's Progress, Analysts Say\"\n </a>\n . 22 June 2016\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 12 September\n </span>\n 2016\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.atitle=North+Korea%27s+Successful+Missile+Test+Shows+Program%27s+Progress%2C+Analysts+Say&amp;rft.date=2016-06-22&amp;rft_id=https%3A%2F%2Fwww.nytimes.com%2F2016%2F06%2F23%2Fworld%2Fasia%2Fnorth-korea-missile-test.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-59\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-59\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation news cs1\">\n <a class=\"external text\" href=\"https://www.nytimes.com/2016/09/09/world/asia/north-korea-nuclear-test.html\" rel=\"nofollow\">\n \"North Korea Tests a Mightier Nuclear Bomb, Raising Tension\"\n </a>\n . 8 September 2016\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 12 September\n </span>\n 2016\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.atitle=North+Korea+Tests+a+Mightier+Nuclear+Bomb%2C+Raising+Tension&amp;rft.date=2016-09-08&amp;rft_id=https%3A%2F%2Fwww.nytimes.com%2F2016%2F09%2F09%2Fworld%2Fasia%2Fnorth-korea-nuclear-test.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-60\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-60\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"https://abcnews.go.com/International/63-magnitude-explosion-detected-north-korea-usgs/story?id=49591865\" rel=\"nofollow\">\n \"Trump says 'we'll see' about attacking North Korea after announcement of H-bomb test\"\n </a>\n .\n <i>\n ABC News\n </i>\n . 4 September 2017.\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=ABC+News&amp;rft.atitle=Trump+says+%27we%27ll+see%27+about+attacking+North+Korea+after+announcement+of+H-bomb+test&amp;rft.date=2017-09-04&amp;rft_id=https%3A%2F%2Fabcnews.go.com%2FInternational%2F63-magnitude-explosion-detected-north-korea-usgs%2Fstory%3Fid%3D49591865&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-61\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-61\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"https://earthquake.usgs.gov/earthquakes/eventpage/us2000aert#executive\" rel=\"nofollow\">\n \"M 6.3 Nuclear Explosion - 21km ENE of Sungjibaegam, North Korea\"\n </a>\n .\n <i>\n earthquake.usgs.gov\n </i>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=earthquake.usgs.gov&amp;rft.atitle=M+6.3+Nuclear+Explosion+-+21km+ENE+of+Sungjibaegam%2C+North+Korea&amp;rft_id=https%3A%2F%2Fearthquake.usgs.gov%2Fearthquakes%2Feventpage%2Fus2000aert%23executive&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-62\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-62\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"https://earthquake.usgs.gov/earthquakes/eventpage/us2000aetk#executive\" rel=\"nofollow\">\n \"M 4.1 Collapse - 21km ENE of Sungjibaegam, North Korea\"\n </a>\n .\n <i>\n earthquake.usgs.gov\n </i>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=earthquake.usgs.gov&amp;rft.atitle=M+4.1+Collapse+-+21km+ENE+of+Sungjibaegam%2C+North+Korea&amp;rft_id=https%3A%2F%2Fearthquake.usgs.gov%2Fearthquakes%2Feventpage%2Fus2000aetk%23executive&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-63\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-63\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation news cs1\" id=\"CITEREFLanday\">\n Landay, Jonathan.\n <a class=\"external text\" href=\"https://www.reuters.com/article/us-usa-china-nuclear/china-may-have-conducted-low-level-nuclear-test-blasts-us-says-idUSKCN21X386\" rel=\"nofollow\">\n \"China may have conducted low-level nuclear test blasts, U.S. says\"\n </a>\n .\n <i>\n Reuters\n </i>\n (15 April 2020)\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 16 April\n </span>\n 2020\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Reuters&amp;rft.atitle=China+may+have+conducted+low-level+nuclear+test+blasts%2C+U.S.+says&amp;rft.issue=15+April+2020&amp;rft.aulast=Landay&amp;rft.aufirst=Jonathan&amp;rft_id=https%3A%2F%2Fwww.reuters.com%2Farticle%2Fus-usa-china-nuclear%2Fchina-may-have-conducted-low-level-nuclear-test-blasts-us-says-idUSKCN21X386&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-64\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-64\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n June 1976, West Germany army publication 'Wehrtechnik'\n </span>\n </li>\n <li id=\"cite_note-65\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-65\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <i>\n Nuclear Weapons in the Middle East: Dimensions and Responsibilities, by Taysir Nashif\n </i>\n </span>\n </li>\n <li id=\"cite_note-66\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-66\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"https://fas.org/nuke/guide/israel/nuke/\" rel=\"nofollow\">\n \"Nuclear Weapons - Israel\"\n </a>\n .\n <i>\n fas.org\n </i>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=fas.org&amp;rft.atitle=Nuclear+Weapons+-+Israel&amp;rft_id=https%3A%2F%2Ffas.org%2Fnuke%2Fguide%2Fisrael%2Fnuke%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-67\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-67\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"https://web.archive.org/web/20100414084207/http://www.nti.org/e_research/profiles/Pakistan/Nuclear/chronology_1983.html\" rel=\"nofollow\">\n \"NTI: 1983 in Pakistan\"\n </a>\n . Archived from\n <a class=\"external text\" href=\"http://www.nti.org/e_research/profiles/Pakistan/Nuclear/chronology_1983.html\" rel=\"nofollow\">\n the original\n </a>\n on April 14, 2010.\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=NTI%3A+1983+in+Pakistan&amp;rft_id=http%3A%2F%2Fwww.nti.org%2Fe_research%2Fprofiles%2FPakistan%2FNuclear%2Fchronology_1983.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-68\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-68\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n —S.G. Roy, \"India Investigates Reported Nuclear Test,\" United Press International, 25 June 1983, International; in Lexis-Nexis Academic Universe, 25 June 1983,\n <a class=\"external free\" href=\"http://web.lexis-nexis.com\" rel=\"nofollow\">\n http://web.lexis-nexis.com\n </a>\n ; \"Pakistan Adamantly Rejects Accusation it Tested Bomb,\" Washington Post, 26 June 1983, First Section, World News, A24; in Lexis-Nexis Academic Universe, 25 June 1983,\n <a class=\"external free\" href=\"http://web.lexis-nexis.com\" rel=\"nofollow\">\n http://web.lexis-nexis.com\n </a>\n .\n </span>\n </li>\n <li id=\"cite_note-HiddenTravelsBroad-69\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-HiddenTravelsBroad_69-0\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n William Broad,\n <a class=\"external text\" href=\"https://www.nytimes.com/2008/12/09/science/09bomb.html\" rel=\"nofollow\">\n \"Hidden Travels of the Atomic Bomb\"\n </a>\n ,\n <i>\n New York Times\n </i>\n (8 December 2008).\n </span>\n </li>\n <li id=\"cite_note-Jang_Media_Group,_Co.-70\">\n <span class=\"mw-cite-backlink\">\n ^\n <a href=\"#cite_ref-Jang_Media_Group,_Co._70-0\">\n <sup>\n <i>\n <b>\n a\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-Jang_Media_Group,_Co._70-1\">\n <sup>\n <i>\n <b>\n b\n </b>\n </i>\n </sup>\n </a>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\" id=\"CITEREFKhan1998\">\n <a href=\"/wiki/Kamran_Khan_(journalist)\" title=\"Kamran Khan (journalist)\">\n Khan, Kamran\n </a>\n (May 30, 1998).\n <a class=\"external text\" href=\"http://nuclearweaponarchive.org/Pakistan/KhanInterview.html\" rel=\"nofollow\">\n \"Interview with Abdul Qadeer Khan\"\n </a>\n .\n <i>\n Kamran Khan, director of the News Intelligence Unit of \"\n <a href=\"/wiki/The_News_International\" title=\"The News International\">\n The News International\n </a>\n \"\n </i>\n . Jang Media Group, Co\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n May 30,\n </span>\n 2011\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Kamran+Khan%2C+director+of+the+News+Intelligence+Unit+of+%22The+News+International%22&amp;rft.atitle=Interview+with+Abdul+Qadeer+Khan&amp;rft.date=1998-05-30&amp;rft.aulast=Khan&amp;rft.aufirst=Kamran&amp;rft_id=http%3A%2F%2Fnuclearweaponarchive.org%2FPakistan%2FKhanInterview.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-71\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-71\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <a class=\"external free\" href=\"http://www.ford.utexas.edu/library/document/nsdmnssm/nsdm276a.htm\" rel=\"nofollow\">\n http://www.ford.utexas.edu/library/document/nsdmnssm/nsdm276a.htm\n </a>\n </span>\n </li>\n <li id=\"cite_note-72\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-72\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\" id=\"CITEREFMir2004\">\n <a href=\"/wiki/Hamid_Mir\" title=\"Hamid Mir\">\n Mir, Hamid\n </a>\n (May 3, 2004).\n <a class=\"external text\" href=\"http://oraclesyndicate.twoday.net/stories/4167731/\" rel=\"nofollow\">\n \"Interview of Dr. Samar Mubarak — Head of Pakistan Missile Program\"\n </a>\n .\n <i>\n Hamid Mir, director of the Political Intelligence Directorate of \"\n <a href=\"/wiki/The_News_International\" title=\"The News International\">\n The News International\n </a>\n \"\n </i>\n . Geo Television Network\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n May 13,\n </span>\n 2011\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Hamid+Mir%2C+director+of+the+Political+Intelligence+Directorate+of+%22The+News+International%22&amp;rft.atitle=Interview+of+Dr.+Samar+Mubarak+%E2%80%94+Head+of+Pakistan+Missile+Program&amp;rft.date=2004-05-03&amp;rft.aulast=Mir&amp;rft.aufirst=Hamid&amp;rft_id=http%3A%2F%2Foraclesyndicate.twoday.net%2Fstories%2F4167731%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-73\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-73\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n The flashlight in Siberia, November 14, 2014, a nuclear test in space, Association Pyrophor, August 16, 2015, available at\n <a class=\"external free\" href=\"https://assopyrophor.org/2015/08/16/the-flash-light-in-siberia-nov-14-2014-a-nuclear-test-in-space-le-flash-en-siberie-du-14112014-un-test-nucleaire-dans-lespace/\" rel=\"nofollow\">\n https://assopyrophor.org/2015/08/16/the-flash-light-in-siberia-nov-14-2014-a-nuclear-test-in-space-le-flash-en-siberie-du-14112014-un-test-nucleaire-dans-lespace/\n </a>\n , last retrieved 09/10/2016\n </span>\n </li>\n <li id=\"cite_note-74\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-74\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n CSTO Getting Serious About Joint Air Defense System, Joshua Kucera, November 20, 2014,\n <a class=\"external free\" href=\"http://www.eurasianet.org/node/71041\" rel=\"nofollow\">\n http://www.eurasianet.org/node/71041\n </a>\n </span>\n </li>\n <li id=\"cite_note-siberiantimes.com-75\">\n <span class=\"mw-cite-backlink\">\n ^\n <a href=\"#cite_ref-siberiantimes.com_75-0\">\n <sup>\n <i>\n <b>\n a\n </b>\n </i>\n </sup>\n </a>\n <a href=\"#cite_ref-siberiantimes.com_75-1\">\n <sup>\n <i>\n <b>\n b\n </b>\n </i>\n </sup>\n </a>\n </span>\n <span class=\"reference-text\">\n New evidence that fire in the sky was caused by military, The Siberian Times, November 23, 2014,\n <a class=\"external free\" href=\"http://siberiantimes.com/other/others/news/n0027-new-evidence-that-fire-in-the-sky-was-caused-by-military/\" rel=\"nofollow\">\n http://siberiantimes.com/other/others/news/n0027-new-evidence-that-fire-in-the-sky-was-caused-by-military/\n </a>\n </span>\n </li>\n <li id=\"cite_note-76\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-76\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n *\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation book cs1\" id=\"CITEREFHersh1991\">\n Hersh, Seymour (1991).\n <i>\n The Samson Option: Israel's Nuclear Arsenal and American Foreign Policy\n </i>\n . Random House.\n <a class=\"mw-redirect\" href=\"/wiki/ISBN_(identifier)\" title=\"ISBN (identifier)\">\n ISBN\n </a>\n <a href=\"/wiki/Special:BookSources/978-0-394-57006-8\" title=\"Special:BookSources/978-0-394-57006-8\">\n <bdi>\n 978-0-394-57006-8\n </bdi>\n </a>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=The+Samson+Option%3A+Israel%27s+Nuclear+Arsenal+and+American+Foreign+Policy&amp;rft.pub=Random+House&amp;rft.date=1991&amp;rft.isbn=978-0-394-57006-8&amp;rft.aulast=Hersh&amp;rft.aufirst=Seymour&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n , page 271\n </span>\n </li>\n <li id=\"cite_note-77\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-77\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\" id=\"CITEREFEngelbrecht\">\n Engelbrecht, Leon.\n <a class=\"external text\" href=\"http://www.defenceweb.co.za/index.php?option=com_content&amp;view=article&amp;id=5942:book-review-how-sa-built-six-atom-bombs-&amp;catid=57:Book+Reviews&amp;Itemid=141\" rel=\"nofollow\">\n \"Book Review: How SA built six atom bombs - defenceWeb\"\n </a>\n .\n <i>\n www.defenceweb.co.za\n </i>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.defenceweb.co.za&amp;rft.atitle=Book+Review%3A+How+SA+built+six+atom+bombs+-+defenceWeb&amp;rft.aulast=Engelbrecht&amp;rft.aufirst=Leon&amp;rft_id=http%3A%2F%2Fwww.defenceweb.co.za%2Findex.php%3Foption%3Dcom_content%26view%3Darticle%26id%3D5942%3Abook-review-how-sa-built-six-atom-bombs-%26catid%3D57%3ABook%2BReviews%26Itemid%3D141&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-78\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-78\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"https://web.archive.org/web/20141003212249/http://www.wisconsinproject.org/countries/israel/israel-aims.html\" rel=\"nofollow\">\n \"Israel Aims to Improve Missile Accuracy\"\n </a>\n . 3 October 2014. Archived from\n <a class=\"external text\" href=\"http://www.wisconsinproject.org/countries/israel/israel-aims.html\" rel=\"nofollow\">\n the original\n </a>\n on 3 October 2014.\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Israel+Aims+to+Improve+Missile+Accuracy&amp;rft.date=2014-10-03&amp;rft_id=http%3A%2F%2Fwww.wisconsinproject.org%2Fcountries%2Fisrael%2Fisrael-aims.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n <li id=\"cite_note-79\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-79\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n Richelson, Jeffrey T. (2007). Spying on the Bomb: American Nuclear Intelligence from Nazi Germany to Iran and North Korea. W. W. Norton Co.\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <a class=\"mw-redirect\" href=\"/wiki/ISBN_(identifier)\" title=\"ISBN (identifier)\">\n ISBN\n </a>\n <a href=\"/wiki/Special:BookSources/0-393-32982-8\" title=\"Special:BookSources/0-393-32982-8\">\n 0-393-32982-8\n </a>\n .\n </span>\n </li>\n <li id=\"cite_note-80\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-80\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n \"One hell of a gamble by Aleksandr Fursenko and Timothy Naftali\" p132.\n </span>\n </li>\n <li id=\"cite_note-81\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-81\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n Bulletin of the Atomic Scientists Jan 1985 p33\n </span>\n </li>\n <li id=\"cite_note-82\">\n <span class=\"mw-cite-backlink\">\n <b>\n <a href=\"#cite_ref-82\">\n ^\n </a>\n </b>\n </span>\n <span class=\"reference-text\">\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"https://digitalarchive.wilsoncenter.org/document/119217\" rel=\"nofollow\">\n \"Wilson Center Digital Archive\"\n </a>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Wilson+Center+Digital+Archive&amp;rft_id=https%3A%2F%2Fdigitalarchive.wilsoncenter.org%2Fdocument%2F119217&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </span>\n </li>\n </ol>\n </div>\n </div>\n <h2>\n <span class=\"mw-headline\" id=\"External_links\">\n External links\n </span>\n <span class=\"mw-editsection\">\n <span class=\"mw-editsection-bracket\">\n [\n </span>\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit&amp;section=22\" title=\"Edit section: External links\">\n edit\n </a>\n <span class=\"mw-editsection-bracket\">\n ]\n </span>\n </span>\n </h2>\n <ul>\n <li>\n <a class=\"external text\" href=\"https://fas.org/nuke/guide/usa/nuclear/usnuctests.htm\" rel=\"nofollow\">\n United States Nuclear Tests July 1945 through September 1992\n </a>\n </li>\n <li>\n <a class=\"external text\" href=\"http://www.ga.gov.au/oracle/nukexp_form.jsp\" rel=\"nofollow\">\n Australian Government — Geoscience Australia — database of nuclear explosions since 1945\n </a>\n </li>\n <li>\n <a class=\"external text\" href=\"http://sonicbomb.com/modules.php?name=Content&amp;pa=showpage&amp;pid=39\" rel=\"nofollow\">\n Video archive of nuclear weapon testing\n </a>\n </li>\n <li>\n <a class=\"external text\" href=\"http://wilsoncenter.org/program/nuclear-proliferation-international-history-project\" rel=\"nofollow\">\n Nuclear Proliferation Archive\n </a>\n </li>\n <li>\n <link href=\"mw-data:TemplateStyles:r999302996\" rel=\"mw-deduplicated-inline-style\"/>\n <cite class=\"citation web cs1\">\n <a class=\"external text\" href=\"http://www.armscontrol.org/factsheets/nucleartesttally\" rel=\"nofollow\">\n \"The Nuclear Testing Tally\"\n </a>\n . Arms Control Association. February 2013\n <span class=\"reference-accessdate\">\n . Retrieved\n <span class=\"nowrap\">\n 2014-01-31\n </span>\n </span>\n .\n </cite>\n <span class=\"Z3988\" title=\"ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=The+Nuclear+Testing+Tally&amp;rft.pub=Arms+Control+Association&amp;rft.date=2013-02&amp;rft_id=http%3A%2F%2Fwww.armscontrol.org%2Ffactsheets%2Fnucleartesttally&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AList+of+nuclear+weapons+tests\">\n </span>\n </li>\n </ul>\n <div class=\"navbox-styles nomobile\">\n <style data-mw-deduplicate=\"TemplateStyles:r1057682214\">\n .mw-parser-output .navbox{box-sizing:border-box;border:1px solid #a2a9b1;width:100%;clear:both;font-size:88%;text-align:center;padding:1px;margin:1em auto 0}.mw-parser-output .navbox .navbox{margin-top:0}.mw-parser-output .navbox+.navbox,.mw-parser-output .navbox+.navbox-styles+.navbox{margin-top:-1px}.mw-parser-output .navbox-inner,.mw-parser-output .navbox-subgroup{width:100%}.mw-parser-output .navbox-group,.mw-parser-output .navbox-title,.mw-parser-output .navbox-abovebelow{padding:0.25em 1em;line-height:1.5em;text-align:center}.mw-parser-output .navbox-group{white-space:nowrap;text-align:right}.mw-parser-output .navbox,.mw-parser-output .navbox-subgroup{background-color:#fdfdfd}.mw-parser-output .navbox-list{line-height:1.5em;border-color:#fdfdfd}.mw-parser-output .navbox-list-with-group{text-align:left;border-left-width:2px;border-left-style:solid}.mw-parser-output tr+tr>.navbox-abovebelow,.mw-parser-output tr+tr>.navbox-group,.mw-parser-output tr+tr>.navbox-image,.mw-parser-output tr+tr>.navbox-list{border-top:2px solid #fdfdfd}.mw-parser-output .navbox th,.mw-parser-output .navbox-title{}.mw-parser-output .navbox-abovebelow,.mw-parser-output th.navbox-group,.mw-parser-output .navbox-subgroup .navbox-title{background-color:#ddf}.mw-parser-output .navbox-subgroup .navbox-group,.mw-parser-output .navbox-subgroup .navbox-abovebelow{background-color:#e6e6ff}.mw-parser-output .navbox-even{background-color:#f7f7f7}.mw-parser-output .navbox-odd{background-color:transparent}.mw-parser-output .navbox .hlist td dl,.mw-parser-output .navbox .hlist td ol,.mw-parser-output .navbox .hlist td ul,.mw-parser-output .navbox td.hlist dl,.mw-parser-output .navbox td.hlist ol,.mw-parser-output .navbox td.hlist ul{padding:0.125em 0}.mw-parser-output .navbox .navbar{display:block;font-size:100%}.mw-parser-output .navbox-title .navbar{float:left;text-align:left;margin-right:0.5em}\n </style>\n </div>\n <div aria-labelledby=\"Nuclear_technology\" class=\"navbox\" role=\"navigation\" style=\"padding:3px\">\n <table class=\"nowraplinks hlist | above = * [[Outline of nuclear technology|Outline]] mw-collapsible autocollapse navbox-inner\" style=\"border-spacing:0;background:transparent;color:inherit\">\n <tbody>\n <tr>\n <th class=\"navbox-title\" colspan=\"2\" scope=\"col\">\n <link href=\"mw-data:TemplateStyles:r1054937957\" rel=\"mw-deduplicated-inline-style\"/>\n <div class=\"navbar plainlinks hlist navbar-mini\">\n <ul>\n <li class=\"nv-view\">\n <a href=\"/wiki/Template:Nuclear_technology\" title=\"Template:Nuclear technology\">\n <abbr style=\";;background:none transparent;border:none;box-shadow:none;padding:0;\" title=\"View this template\">\n v\n </abbr>\n </a>\n </li>\n <li class=\"nv-talk\">\n <a href=\"/wiki/Template_talk:Nuclear_technology\" title=\"Template talk:Nuclear technology\">\n <abbr style=\";;background:none transparent;border:none;box-shadow:none;padding:0;\" title=\"Discuss this template\">\n t\n </abbr>\n </a>\n </li>\n <li class=\"nv-edit\">\n <a class=\"external text\" href=\"https://en.wikipedia.org/w/index.php?title=Template:Nuclear_technology&amp;action=edit\">\n <abbr style=\";;background:none transparent;border:none;box-shadow:none;padding:0;\" title=\"Edit this template\">\n e\n </abbr>\n </a>\n </li>\n </ul>\n </div>\n <div id=\"Nuclear_technology\" style=\"font-size:114%;margin:0 4em\">\n <a href=\"/wiki/Nuclear_technology\" title=\"Nuclear technology\">\n Nuclear technology\n </a>\n </div>\n </th>\n </tr>\n <tr>\n <td class=\"navbox-list navbox-odd\" colspan=\"2\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n </div>\n <table class=\"nowraplinks navbox-subgroup\" style=\"border-spacing:0\">\n <tbody>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n Science\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Nuclear_chemistry\" title=\"Nuclear chemistry\">\n Chemistry\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_engineering\" title=\"Nuclear engineering\">\n Engineering\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_physics\" title=\"Nuclear physics\">\n Physics\n </a>\n </li>\n <li>\n <a href=\"/wiki/Atomic_nucleus\" title=\"Atomic nucleus\">\n Atomic nucleus\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_fission\" title=\"Nuclear fission\">\n Fission\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_fusion\" title=\"Nuclear fusion\">\n Fusion\n </a>\n </li>\n <li>\n <a href=\"/wiki/Radiation\" title=\"Radiation\">\n Radiation\n </a>\n <ul>\n <li>\n <a href=\"/wiki/Ionizing_radiation\" title=\"Ionizing radiation\">\n ionizing\n </a>\n </li>\n <li>\n <a href=\"/wiki/Bremsstrahlung\" title=\"Bremsstrahlung\">\n braking\n </a>\n </li>\n </ul>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n <a href=\"/wiki/Nuclear_fuel\" title=\"Nuclear fuel\">\n Fuel\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-even\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Tritium\" title=\"Tritium\">\n Tritium\n </a>\n </li>\n <li>\n <a href=\"/wiki/Deuterium\" title=\"Deuterium\">\n Deuterium\n </a>\n </li>\n <li>\n <a href=\"/wiki/Helium-3\" title=\"Helium-3\">\n Helium-3\n </a>\n </li>\n <li>\n <a href=\"/wiki/Fertile_material\" title=\"Fertile material\">\n Fertile material\n </a>\n </li>\n <li>\n <a href=\"/wiki/Fissile_material\" title=\"Fissile material\">\n Fissile material\n </a>\n </li>\n <li>\n <a href=\"/wiki/Isotope_separation\" title=\"Isotope separation\">\n Isotope separation\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_material\" title=\"Nuclear material\">\n Nuclear material\n </a>\n <ul>\n <li>\n <a href=\"/wiki/Uranium\" title=\"Uranium\">\n Uranium\n </a>\n <ul>\n <li>\n <a href=\"/wiki/Enriched_uranium\" title=\"Enriched uranium\">\n enriched\n </a>\n </li>\n <li>\n <a href=\"/wiki/Depleted_uranium\" title=\"Depleted uranium\">\n depleted\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a href=\"/wiki/Plutonium\" title=\"Plutonium\">\n Plutonium\n </a>\n </li>\n <li>\n <a href=\"/wiki/Thorium\" title=\"Thorium\">\n Thorium\n </a>\n </li>\n </ul>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n <a href=\"/wiki/Neutron\" title=\"Neutron\">\n Neutron\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Neutron_activation\" title=\"Neutron activation\">\n Activation\n </a>\n </li>\n <li>\n <a href=\"/wiki/Neutron_capture\" title=\"Neutron capture\">\n Capture\n </a>\n </li>\n <li>\n <a href=\"/wiki/Neutron_poison\" title=\"Neutron poison\">\n Poison\n </a>\n </li>\n <li>\n <a href=\"/wiki/Neutron_cross_section\" title=\"Neutron cross section\">\n Cross section\n </a>\n </li>\n <li>\n <a href=\"/wiki/Neutron_generator\" title=\"Neutron generator\">\n Generator\n </a>\n </li>\n <li>\n <a href=\"/wiki/Neutron_radiation\" title=\"Neutron radiation\">\n Radiation\n </a>\n </li>\n <li>\n <a href=\"/wiki/Neutron_reflector\" title=\"Neutron reflector\">\n Reflector\n </a>\n </li>\n <li>\n <a href=\"/wiki/Neutron_temperature\" title=\"Neutron temperature\">\n Temperature\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/Thermal_neutron\" title=\"Thermal neutron\">\n Thermal\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/Fast_neutron\" title=\"Fast neutron\">\n Fast\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/Fusion_neutron\" title=\"Fusion neutron\">\n Fusion\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n <a href=\"/wiki/Nuclear_power\" title=\"Nuclear power\">\n Power\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-even\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Nuclear_power_by_country\" title=\"Nuclear power by country\">\n by country\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_power_plant\" title=\"Nuclear power plant\">\n Power plant\n </a>\n </li>\n <li>\n <a href=\"/wiki/Economics_of_nuclear_power_plants\" title=\"Economics of nuclear power plants\">\n Economics\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_and_radiation_accidents_and_incidents\" title=\"Nuclear and radiation accidents and incidents\">\n Accidents and incidents\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_energy_policy\" title=\"Nuclear energy policy\">\n Policy\n </a>\n </li>\n <li>\n <a href=\"/wiki/Fusion_power\" title=\"Fusion power\">\n Fusion\n </a>\n </li>\n <li>\n <a href=\"/wiki/Radioisotope_thermoelectric_generator\" title=\"Radioisotope thermoelectric generator\">\n Radioisotope thermoelectric (RTG)\n </a>\n <ul>\n <li>\n <a href=\"/wiki/Multi-mission_radioisotope_thermoelectric_generator\" title=\"Multi-mission radioisotope thermoelectric generator\">\n MMRTG\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_propulsion\" title=\"Nuclear propulsion\">\n Propulsion\n </a>\n <ul>\n <li>\n <a href=\"/wiki/Nuclear_thermal_rocket\" title=\"Nuclear thermal rocket\">\n rocket\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_safety_and_security\" title=\"Nuclear safety and security\">\n Safety and security\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n <a href=\"/wiki/Nuclear_medicine\" title=\"Nuclear medicine\">\n Medicine\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd\" style=\"width:100%;padding:0;background:none;\">\n <div style=\"padding:0 0.25em\">\n </div>\n <table class=\"nowraplinks navbox-subgroup\" style=\"border-spacing:0\">\n <tbody>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%;width:4.0em;font-weight:normal;\">\n <a href=\"/wiki/Medical_imaging\" title=\"Medical imaging\">\n Imaging\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd\" style=\"width:100%;padding:0;width:auto;\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/RadBall\" title=\"RadBall\">\n RadBall\n </a>\n </li>\n <li>\n <a href=\"/wiki/Scintigraphy\" title=\"Scintigraphy\">\n Scintigraphy\n </a>\n </li>\n <li>\n <a href=\"/wiki/Single-photon_emission_computed_tomography\" title=\"Single-photon emission computed tomography\">\n Single-photon emission (SPECT)\n </a>\n </li>\n <li>\n <a href=\"/wiki/Positron_emission_tomography\" title=\"Positron emission tomography\">\n Positron-emission tomography (PET)\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%;width:4.0em;font-weight:normal;\">\n Therapy\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-even\" style=\"width:100%;padding:0;width:auto;;background:none;\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Fast_neutron_therapy\" title=\"Fast neutron therapy\">\n Fast-neutron\n </a>\n </li>\n <li>\n <a href=\"/wiki/Neutron_capture_therapy_of_cancer\" title=\"Neutron capture therapy of cancer\">\n Neutron capture therapy of cancer\n </a>\n </li>\n <li>\n <a href=\"/wiki/Targeted_alpha-particle_therapy\" title=\"Targeted alpha-particle therapy\">\n Targeted alpha-particle\n </a>\n </li>\n <li>\n <a href=\"/wiki/Proton_therapy\" title=\"Proton therapy\">\n Proton-beam\n </a>\n </li>\n <li>\n <a href=\"/wiki/Tomotherapy\" title=\"Tomotherapy\">\n Tomotherapy\n </a>\n </li>\n <li>\n <a href=\"/wiki/Brachytherapy\" title=\"Brachytherapy\">\n Brachytherapy\n </a>\n </li>\n <li>\n <a href=\"/wiki/Radiation_therapy\" title=\"Radiation therapy\">\n Radiation therapy\n </a>\n </li>\n <li>\n <a href=\"/wiki/Radiosurgery\" title=\"Radiosurgery\">\n Radiosurgery\n </a>\n </li>\n <li>\n <a href=\"/wiki/Radiopharmacology\" title=\"Radiopharmacology\">\n Radiopharmacology\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n </tbody>\n </table>\n <div>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n <a href=\"/wiki/Nuclear_weapon\" title=\"Nuclear weapon\">\n Weapons\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd\" style=\"width:100%;padding:0;background:none;\">\n <div style=\"padding:0 0.25em\">\n </div>\n <table class=\"nowraplinks navbox-subgroup\" style=\"border-spacing:0\">\n <tbody>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%;width:4.0em;font-weight:normal;\">\n Topics\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd\" style=\"width:100%;padding:0;width:auto;\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Nuclear_arms_race\" title=\"Nuclear arms race\">\n Arms race\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_weapons_delivery\" title=\"Nuclear weapons delivery\">\n Delivery\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_weapon_design\" title=\"Nuclear weapon design\">\n Design\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_disarmament\" title=\"Nuclear disarmament\">\n Disarmament\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_ethics\" title=\"Nuclear ethics\">\n Ethics\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_explosion\" title=\"Nuclear explosion\">\n Explosion\n </a>\n <ul>\n <li>\n <a href=\"/wiki/Effects_of_nuclear_explosions\" title=\"Effects of nuclear explosions\">\n effects\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a href=\"/wiki/History_of_nuclear_weapons\" title=\"History of nuclear weapons\">\n History\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_proliferation\" title=\"Nuclear proliferation\">\n Proliferation\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_weapons_testing\" title=\"Nuclear weapons testing\">\n Testing\n </a>\n <ul>\n <li>\n <a href=\"/wiki/High-altitude_nuclear_explosion\" title=\"High-altitude nuclear explosion\">\n high-altitude\n </a>\n </li>\n <li>\n <a href=\"/wiki/Underground_nuclear_weapons_testing\" title=\"Underground nuclear weapons testing\">\n underground\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_warfare\" title=\"Nuclear warfare\">\n Warfare\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_weapon_yield\" title=\"Nuclear weapon yield\">\n Yield\n </a>\n <ul>\n <li>\n <a href=\"/wiki/TNT_equivalent\" title=\"TNT equivalent\">\n TNTe\n </a>\n </li>\n </ul>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%;width:4.0em;font-weight:normal;\">\n Lists\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-even\" style=\"width:100%;padding:0;width:auto;\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/List_of_estimated_death_tolls_from_nuclear_attacks_on_cities\" title=\"List of estimated death tolls from nuclear attacks on cities\">\n Estimated death tolls from attacks\n </a>\n </li>\n <li>\n <a href=\"/wiki/List_of_states_with_nuclear_weapons\" title=\"List of states with nuclear weapons\">\n States with nuclear weapons\n </a>\n </li>\n <li>\n <a href=\"/wiki/Historical_nuclear_weapons_stockpiles_and_nuclear_tests_by_country\" title=\"Historical nuclear weapons stockpiles and nuclear tests by country\">\n Historical stockpiles and tests\n </a>\n <ul>\n <li>\n <a class=\"mw-selflink selflink\">\n Tests\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/List_of_nuclear_weapons_tests_of_the_United_States\" title=\"List of nuclear weapons tests of the United States\">\n Tests in the United States\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a href=\"/wiki/List_of_weapons_of_mass_destruction_treaties\" title=\"List of weapons of mass destruction treaties\">\n WMD treaties\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear-weapon-free_zone\" title=\"Nuclear-weapon-free zone\">\n Weapon-free zones\n </a>\n </li>\n <li>\n <a href=\"/wiki/List_of_nuclear_weapons\" title=\"List of nuclear weapons\">\n Weapons\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n </tbody>\n </table>\n <div>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n <a href=\"/wiki/Radioactive_waste\" title=\"Radioactive waste\">\n Waste\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd\" style=\"width:100%;padding:0;background:none;\">\n <div style=\"padding:0 0.25em\">\n </div>\n <table class=\"nowraplinks navbox-subgroup\" style=\"border-spacing:0\">\n <tbody>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%;width:4.0em;font-weight:normal;\">\n Products\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd\" style=\"width:100%;padding:0;width:auto;\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Actinide\" title=\"Actinide\">\n Actinide\n </a>\n <ul>\n <li>\n <a href=\"/wiki/Reprocessed_uranium\" title=\"Reprocessed uranium\">\n Reprocessed uranium\n </a>\n </li>\n <li>\n <a href=\"/wiki/Reactor-grade_plutonium\" title=\"Reactor-grade plutonium\">\n Reactor-grade plutonium\n </a>\n </li>\n <li>\n <a href=\"/wiki/Minor_actinide\" title=\"Minor actinide\">\n Minor actinide\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a href=\"/wiki/Activation_product\" title=\"Activation product\">\n Activation\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_fission_product\" title=\"Nuclear fission product\">\n Fission\n </a>\n <ul>\n <li>\n <a href=\"/wiki/Long-lived_fission_product\" title=\"Long-lived fission product\">\n LLFP\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a href=\"/wiki/Actinide_chemistry\" title=\"Actinide chemistry\">\n Actinide chemistry\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%;width:4.0em;font-weight:normal;\">\n Disposal\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-even\" style=\"width:100%;padding:0;width:auto;\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Nuclear_fuel_cycle\" title=\"Nuclear fuel cycle\">\n Fuel cycle\n </a>\n </li>\n <li>\n <a href=\"/wiki/High-level_waste\" title=\"High-level waste\">\n High-level (HLW)\n </a>\n </li>\n <li>\n <a href=\"/wiki/Low-level_waste\" title=\"Low-level waste\">\n Low-level (LLW)\n </a>\n </li>\n <li>\n <a href=\"/wiki/Deep_geological_repository\" title=\"Deep geological repository\">\n Repository\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_reprocessing\" title=\"Nuclear reprocessing\">\n Reprocessing\n </a>\n </li>\n <li>\n <a href=\"/wiki/Spent_nuclear_fuel\" title=\"Spent nuclear fuel\">\n Spent fuel\n </a>\n <ul>\n <li>\n <a href=\"/wiki/Spent_fuel_pool\" title=\"Spent fuel pool\">\n pool\n </a>\n </li>\n <li>\n <a href=\"/wiki/Dry_cask_storage\" title=\"Dry cask storage\">\n cask\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_transmutation\" title=\"Nuclear transmutation\">\n Transmutation\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n </tbody>\n </table>\n <div>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n Debate\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Nuclear_power_debate\" title=\"Nuclear power debate\">\n Nuclear power\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_weapons_debate\" title=\"Nuclear weapons debate\">\n Nuclear weapons\n </a>\n </li>\n <li>\n <a href=\"/wiki/Blue_Ribbon_Commission_on_America%27s_Nuclear_Future\" title=\"Blue Ribbon Commission on America's Nuclear Future\">\n Blue Ribbon Commission on America's Nuclear Future\n </a>\n </li>\n <li>\n <a href=\"/wiki/Anti-nuclear_movement\" title=\"Anti-nuclear movement\">\n Anti-nuclear movement\n </a>\n </li>\n <li>\n <a href=\"/wiki/Uranium_mining_debate\" title=\"Uranium mining debate\">\n Uranium mining\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_power_phase-out\" title=\"Nuclear power phase-out\">\n Nuclear power phase-out\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n </tbody>\n </table>\n <div>\n </div>\n </td>\n </tr>\n <tr>\n <td class=\"navbox-list navbox-odd\" colspan=\"2\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n </div>\n <table class=\"nowraplinks mw-collapsible expanded navbox-subgroup\" style=\"border-spacing:0\">\n <tbody>\n <tr>\n <th class=\"navbox-title\" colspan=\"2\" scope=\"col\">\n <div id=\"Nuclear_reactors\" style=\"font-size:114%;margin:0 4em\">\n <a href=\"/wiki/Nuclear_reactor\" title=\"Nuclear reactor\">\n Nuclear reactors\n </a>\n </div>\n </th>\n </tr>\n <tr>\n <td class=\"navbox-list navbox-odd\" colspan=\"2\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n </div>\n <table class=\"nowraplinks mw-collapsible mw-collapsed navbox-subgroup\" style=\"border-spacing:0\">\n <tbody>\n <tr>\n <th class=\"navbox-title\" colspan=\"2\" scope=\"col\">\n <div id=\"Fission\" style=\"font-size:114%;margin:0 4em\">\n <span style=\"font-size:90%;\">\n <a href=\"/wiki/Nuclear_reactor#Fission\" title=\"Nuclear reactor\">\n Fission\n </a>\n </span>\n </div>\n </th>\n </tr>\n <tr>\n <td class=\"navbox-abovebelow\" colspan=\"2\">\n <div id=\"Moderator\">\n <div style=\"float: left;\">\n <b>\n <a href=\"/wiki/Neutron_moderator\" title=\"Neutron moderator\">\n Moderator\n </a>\n </b>\n </div>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n <a href=\"/wiki/Light-water_reactor\" title=\"Light-water reactor\">\n Light water\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd hlist\" style=\"width:100%;padding:0;background:none;\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Aqueous_homogeneous_reactor\" title=\"Aqueous homogeneous reactor\">\n Aqueous homogeneous\n </a>\n </li>\n <li>\n <a href=\"/wiki/Boiling_water_reactor\" title=\"Boiling water reactor\">\n Boiling\n </a>\n <ul>\n <li>\n <a href=\"/wiki/GE_BWR\" title=\"GE BWR\">\n BWR\n </a>\n </li>\n <li>\n <a href=\"/wiki/Advanced_boiling_water_reactor\" title=\"Advanced boiling water reactor\">\n ABWR\n </a>\n </li>\n <li>\n <a href=\"/wiki/Economic_Simplified_Boiling_Water_Reactor\" title=\"Economic Simplified Boiling Water Reactor\">\n ESBWR\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/Kerena_boiling_water_reactor\" title=\"Kerena boiling water reactor\">\n Kerena\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a href=\"/wiki/Pressurized_water_reactor\" title=\"Pressurized water reactor\">\n Pressurized\n </a>\n <ul>\n <li>\n <a href=\"/wiki/AP1000\" title=\"AP1000\">\n AP1000\n </a>\n </li>\n <li>\n <a href=\"/wiki/APR-1400\" title=\"APR-1400\">\n APR-1400\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/APR%2B\" title=\"APR+\">\n APR+\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/APWR\" title=\"APWR\">\n APWR\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/ATMEA1\" title=\"ATMEA1\">\n ATMEA1\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/CAP1400\" title=\"CAP1400\">\n CAP1400\n </a>\n </li>\n <li>\n <a href=\"/wiki/CPR-1000\" title=\"CPR-1000\">\n CPR-1000\n </a>\n </li>\n <li>\n <a href=\"/wiki/EPR_(nuclear_reactor)\" title=\"EPR (nuclear reactor)\">\n EPR\n </a>\n </li>\n <li>\n <a href=\"/wiki/Hualong_One\" title=\"Hualong One\">\n HPR-1000\n </a>\n <ul>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/ACPR1000\" title=\"ACPR1000\">\n ACPR1000\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/ACP1000\" title=\"ACP1000\">\n ACP1000\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a href=\"/wiki/VVER\" title=\"VVER\">\n VVER\n </a>\n </li>\n <li>\n <a href=\"/wiki/IPWR-900\" title=\"IPWR-900\">\n IPWR-900\n </a>\n </li>\n <li>\n many others\n </li>\n </ul>\n </li>\n <li>\n <a href=\"/wiki/Supercritical_water_reactor\" title=\"Supercritical water reactor\">\n Supercritical (SCWR)\n </a>\n </li>\n <li>\n <a href=\"/wiki/Natural_nuclear_fission_reactor\" title=\"Natural nuclear fission reactor\">\n Natural fission\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n <a class=\"mw-redirect\" href=\"/wiki/Heavy-water_reactor\" title=\"Heavy-water reactor\">\n Heavy water\n </a>\n <br/>\n <span style=\"font-size:90%;\">\n <style data-mw-deduplicate=\"TemplateStyles:r886047488\">\n .mw-parser-output .nobold{font-weight:normal}\n </style>\n <span class=\"nobold\">\n by\n <a href=\"/wiki/Nuclear_reactor_coolant\" title=\"Nuclear reactor coolant\">\n coolant\n </a>\n </span>\n </span>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd hlist\" style=\"width:100%;padding:0;background:none;;background:whitesmoke;\">\n <div style=\"padding:0 0.25em\">\n </div>\n <table class=\"nowraplinks navbox-subgroup\" style=\"border-spacing:0\">\n <tbody>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%;font-weight:normal;\">\n <a class=\"mw-redirect\" href=\"/wiki/Deuterium_oxide\" title=\"Deuterium oxide\">\n D\n <sub>\n 2\n </sub>\n O\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-even\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Pressurized_heavy-water_reactor\" title=\"Pressurized heavy-water reactor\">\n Pressurized\n </a>\n <ul>\n <li>\n <a href=\"/wiki/CANDU_reactor\" title=\"CANDU reactor\">\n CANDU\n </a>\n <ul>\n <li>\n CANDU 6\n </li>\n <li>\n CANDU 9\n </li>\n <li>\n EC6\n </li>\n <li>\n AFCR\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/ACR-1000\" title=\"ACR-1000\">\n ACR-1000\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a href=\"/wiki/Carolinas%E2%80%93Virginia_Tube_Reactor\" title=\"Carolinas–Virginia Tube Reactor\">\n CVTR\n </a>\n </li>\n <li>\n <a href=\"/wiki/IPHWR\" title=\"IPHWR\">\n IPHWR\n </a>\n <ul>\n <li>\n <a href=\"/wiki/IPHWR-220\" title=\"IPHWR-220\">\n IPHWR-220\n </a>\n </li>\n <li>\n <a href=\"/wiki/IPHWR#IPHWR-540\" title=\"IPHWR\">\n IPHWR-540\n </a>\n </li>\n <li>\n <a href=\"/wiki/IPHWR-700\" title=\"IPHWR-700\">\n IPHWR-700\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/Nuclear_energy_in_Argentina\" title=\"Nuclear energy in Argentina\">\n PHWR KWU\n </a>\n </li>\n <li>\n <a class=\"new\" href=\"/w/index.php?title=MZFR&amp;action=edit&amp;redlink=1\" title=\"MZFR (page does not exist)\">\n MZFR\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/%C3%85gestaverket\" title=\"Ågestaverket\">\n R3\n </a>\n </li>\n <li>\n <a href=\"/wiki/R4_nuclear_reactor\" title=\"R4 nuclear reactor\">\n R4 Marviken\n </a>\n </li>\n </ul>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%;font-weight:normal;\">\n <a class=\"mw-redirect\" href=\"/wiki/H2O\" title=\"H2O\">\n H\n <sub>\n 2\n </sub>\n O\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a class=\"new\" href=\"/w/index.php?title=HWLWR&amp;action=edit&amp;redlink=1\" title=\"HWLWR (page does not exist)\">\n HWLWR\n </a>\n <ul>\n <li>\n <a href=\"/wiki/Fugen_Nuclear_Power_Plant\" title=\"Fugen Nuclear Power Plant\">\n ATR\n </a>\n </li>\n <li>\n <a href=\"/wiki/Gentilly_Nuclear_Generating_Station#Gentilly-1\" title=\"Gentilly Nuclear Generating Station\">\n HW BLWR 250\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/Steam-generating_heavy_water_reactor\" title=\"Steam-generating heavy water reactor\">\n Steam-generating (SGHWR)\n </a>\n </li>\n <li>\n <a href=\"/wiki/Advanced_heavy-water_reactor\" title=\"Advanced heavy-water reactor\">\n AHWR\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%;font-weight:normal;\">\n <a href=\"/wiki/Organic_matter\" title=\"Organic matter\">\n Organic\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-even\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/WR-1\" title=\"WR-1\">\n WR-1\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%;font-weight:normal;\">\n <a href=\"/wiki/Carbon_dioxide\" title=\"Carbon dioxide\">\n CO\n <sub>\n 2\n </sub>\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a class=\"new\" href=\"/w/index.php?title=HWGCR&amp;action=edit&amp;redlink=1\" title=\"HWGCR (page does not exist)\">\n HWGCR\n </a>\n <ul>\n <li>\n <a href=\"/wiki/Brennilis_Nuclear_Power_Plant\" title=\"Brennilis Nuclear Power Plant\">\n EL-4\n </a>\n </li>\n <li>\n <a class=\"new\" href=\"/w/index.php?title=Kernkraftwerk_Niederaichbach&amp;action=edit&amp;redlink=1\" title=\"Kernkraftwerk Niederaichbach (page does not exist)\">\n KKN\n </a>\n </li>\n <li>\n <a href=\"/wiki/KS_150\" title=\"KS 150\">\n KS 150\n </a>\n </li>\n <li>\n <a href=\"/wiki/Lucens_reactor\" title=\"Lucens reactor\">\n Lucens\n </a>\n </li>\n </ul>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n </tbody>\n </table>\n <div>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n <div style=\"display:inline-block; padding:0.1em 0;line-height:1.2em;\">\n <a href=\"/wiki/Graphite-moderated_reactor\" title=\"Graphite-moderated reactor\">\n Graphite\n </a>\n <br/>\n <span style=\"font-size:90%;\">\n <link href=\"mw-data:TemplateStyles:r886047488\" rel=\"mw-deduplicated-inline-style\"/>\n <span class=\"nobold\">\n by\n <a href=\"/wiki/Nuclear_reactor_coolant\" title=\"Nuclear reactor coolant\">\n coolant\n </a>\n </span>\n </span>\n </div>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd hlist\" style=\"width:100%;padding:0;background:none;\">\n <div style=\"padding:0 0.25em\">\n </div>\n <table class=\"nowraplinks navbox-subgroup\" style=\"border-spacing:0\">\n <tbody>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%;font-weight:normal;\">\n Water\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n </div>\n <table class=\"nowraplinks navbox-subgroup\" style=\"border-spacing:0\">\n <tbody>\n <tr>\n <th class=\"navbox-group\" id=\"H2O\" scope=\"row\" style=\"width:2.5em;font-weight:normal;\">\n <a class=\"mw-redirect\" href=\"/wiki/H2O\" title=\"H2O\">\n H\n <sub>\n 2\n </sub>\n O\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-even\" style=\"padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Obninsk_Nuclear_Power_Plant\" title=\"Obninsk Nuclear Power Plant\">\n AM-1\n </a>\n </li>\n <li>\n <a href=\"/wiki/Beloyarsk_Nuclear_Power_Station#Early_reactors\" title=\"Beloyarsk Nuclear Power Station\">\n AMB-X\n </a>\n </li>\n <li>\n <a href=\"/wiki/EGP-6\" title=\"EGP-6\">\n EGP-6\n </a>\n </li>\n <li>\n <a href=\"/wiki/RBMK\" title=\"RBMK\">\n RBMK\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n </tbody>\n </table>\n <div>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%;font-weight:normal;\">\n <a href=\"/wiki/Gas-cooled_reactor\" title=\"Gas-cooled reactor\">\n Gas\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n </div>\n <table class=\"nowraplinks navbox-subgroup\" style=\"border-spacing:0\">\n <tbody>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:2.5em;font-weight:normal;\">\n <a href=\"/wiki/Carbon_dioxide\" title=\"Carbon dioxide\">\n CO\n <sub>\n 2\n </sub>\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd\" style=\"padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/UNGG_reactor\" title=\"UNGG reactor\">\n <i>\n Uranium Naturel Graphite Gaz\n </i>\n (UNGG)\n </a>\n </li>\n <li>\n <a href=\"/wiki/Magnox\" title=\"Magnox\">\n Magnox\n </a>\n </li>\n <li>\n <a href=\"/wiki/Advanced_Gas-cooled_Reactor\" title=\"Advanced Gas-cooled Reactor\">\n Advanced gas-cooled (AGR)\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:2.5em;font-weight:normal;\">\n <a href=\"/wiki/Helium\" title=\"Helium\">\n He\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-even\" style=\"padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Gas_turbine_modular_helium_reactor\" title=\"Gas turbine modular helium reactor\">\n GTMHR\n </a>\n <ul>\n <li>\n <a class=\"new\" href=\"/w/index.php?title=MHR-T&amp;action=edit&amp;redlink=1\" title=\"MHR-T (page does not exist)\">\n MHR-T\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a href=\"/wiki/UHTREX\" title=\"UHTREX\">\n UHTREX\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/Very-high-temperature_reactor\" title=\"Very-high-temperature reactor\">\n VHTR (HTGR)\n </a>\n <ul>\n <li>\n <a href=\"/wiki/Pebble-bed_reactor\" title=\"Pebble-bed reactor\">\n PBR (PBMR)\n </a>\n <ul>\n <li>\n <a href=\"/wiki/AVR_reactor\" title=\"AVR reactor\">\n AVR\n </a>\n </li>\n <li>\n <a href=\"/wiki/HTR-10\" title=\"HTR-10\">\n HTR-10\n </a>\n </li>\n <li>\n <a href=\"/wiki/HTR-PM\" title=\"HTR-PM\">\n HTR-PM\n </a>\n </li>\n <li>\n <a href=\"/wiki/THTR-300\" title=\"THTR-300\">\n THTR-300\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a class=\"new\" href=\"/w/index.php?title=Prismatic_block_reactor&amp;action=edit&amp;redlink=1\" title=\"Prismatic block reactor (page does not exist)\">\n PMR\n </a>\n </li>\n </ul>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n </tbody>\n </table>\n <div>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%;font-weight:normal;\">\n <a href=\"/wiki/Molten_salt_reactor\" title=\"Molten salt reactor\">\n Molten-salt\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n </div>\n <table class=\"nowraplinks navbox-subgroup\" style=\"border-spacing:0\">\n <tbody>\n <tr>\n <th class=\"navbox-group\" id=\"Fluorides\" scope=\"row\" style=\"width:2.5em;font-weight:normal;\">\n <a href=\"/wiki/FLiBe\" title=\"FLiBe\">\n Fluorides\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd\" style=\"padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Fuji_Molten_Salt_Reactor\" title=\"Fuji Molten Salt Reactor\">\n Fuji MSR\n </a>\n </li>\n <li>\n <a href=\"/wiki/Liquid_fluoride_thorium_reactor\" title=\"Liquid fluoride thorium reactor\">\n Liquid-fluoride thorium reactor (LFTR)\n </a>\n </li>\n <li>\n <a href=\"/wiki/Molten-Salt_Reactor_Experiment\" title=\"Molten-Salt Reactor Experiment\">\n Molten-Salt Reactor Experiment (MSRE)\n </a>\n </li>\n <li>\n <a href=\"/wiki/Integral_Molten_Salt_Reactor\" title=\"Integral Molten Salt Reactor\">\n Integral Molten Salt Reactor (IMSR)\n </a>\n </li>\n <li>\n <a href=\"/wiki/TMSR-500\" title=\"TMSR-500\">\n TMSR-500\n </a>\n </li>\n <li>\n <a href=\"/wiki/TMSR-LF1\" title=\"TMSR-LF1\">\n TMSR-LF1\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n </tbody>\n </table>\n <div>\n </div>\n </td>\n </tr>\n </tbody>\n </table>\n <div>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n <div style=\"display:inline-block; padding:0.1em 0;line-height:1.2em;\">\n None\n <br/>\n <link href=\"mw-data:TemplateStyles:r886047488\" rel=\"mw-deduplicated-inline-style\"/>\n <span class=\"nobold\">\n (\n <a href=\"/wiki/Fast-neutron_reactor\" title=\"Fast-neutron reactor\">\n fast-neutron\n </a>\n )\n </span>\n </div>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd hlist\" style=\"width:100%;padding:0;background:none;\">\n <div style=\"padding:0 0.25em\">\n </div>\n <table class=\"nowraplinks navbox-subgroup\" style=\"border-spacing:0\">\n <tbody>\n <tr>\n <td class=\"navbox-list navbox-even\" colspan=\"2\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/Fast_breeder_reactor\" title=\"Fast breeder reactor\">\n Breeder (FBR)\n </a>\n </li>\n <li>\n <a href=\"/wiki/Integral_fast_reactor\" title=\"Integral fast reactor\">\n Integral (IFR)\n </a>\n </li>\n <li>\n <a href=\"/wiki/Liquid_metal_cooled_reactor\" title=\"Liquid metal cooled reactor\">\n Liquid-metal-cooled (LMFR)\n </a>\n </li>\n <li>\n <a href=\"/wiki/Small,_sealed,_transportable,_autonomous_reactor\" title=\"Small, sealed, transportable, autonomous reactor\">\n Small sealed transportable autonomous (SSTAR)\n </a>\n </li>\n <li>\n <a href=\"/wiki/Traveling_wave_reactor\" title=\"Traveling wave reactor\">\n Traveling-wave (TWR)\n </a>\n </li>\n <li>\n <a href=\"/wiki/Energy_Multiplier_Module\" title=\"Energy Multiplier Module\">\n Energy Multiplier Module (EM2)\n </a>\n </li>\n <li>\n <a href=\"/wiki/Reduced_moderation_water_reactor\" title=\"Reduced moderation water reactor\">\n Reduced-moderation (RMWR)\n </a>\n </li>\n <li>\n <a href=\"/wiki/Fast_Breeder_Test_Reactor\" title=\"Fast Breeder Test Reactor\">\n Fast Breeder Test Reactor (FBTR)\n </a>\n </li>\n <li>\n <a href=\"/wiki/Dual_fluid_reactor\" title=\"Dual fluid reactor\">\n Dual fluid reactor (DFR)\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%;font-weight:normal;\">\n <a href=\"/wiki/Generation_IV_reactor\" title=\"Generation IV reactor\">\n Generation IV\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Sodium-cooled_fast_reactor\" title=\"Sodium-cooled fast reactor\">\n Sodium (SFR)\n </a>\n <ul>\n <li>\n <a href=\"/wiki/BN-350_reactor\" title=\"BN-350 reactor\">\n BN-350\n </a>\n </li>\n <li>\n <a href=\"/wiki/BN-600_reactor\" title=\"BN-600 reactor\">\n BN-600\n </a>\n </li>\n <li>\n <a href=\"/wiki/BN-800_reactor\" title=\"BN-800 reactor\">\n BN-800\n </a>\n </li>\n <li>\n <a href=\"/wiki/BN-1200_reactor\" title=\"BN-1200 reactor\">\n BN-1200\n </a>\n </li>\n <li>\n <a href=\"/wiki/CFR-600\" title=\"CFR-600\">\n CFR-600\n </a>\n </li>\n <li>\n <a href=\"/wiki/Ph%C3%A9nix\" title=\"Phénix\">\n Phénix\n </a>\n </li>\n <li>\n <a href=\"/wiki/Superph%C3%A9nix\" title=\"Superphénix\">\n Superphénix\n </a>\n </li>\n <li>\n <a href=\"/wiki/Prototype_Fast_Breeder_Reactor\" title=\"Prototype Fast Breeder Reactor\">\n PFBR\n </a>\n </li>\n <li>\n <a href=\"/wiki/FBR-600\" title=\"FBR-600\">\n FBR-600\n </a>\n </li>\n <li>\n <a href=\"/wiki/China_Experimental_Fast_Reactor\" title=\"China Experimental Fast Reactor\">\n CEFR\n </a>\n </li>\n <li>\n <a href=\"/wiki/Dounreay#Prototype_Fast_Reactor_(PFR)\" title=\"Dounreay\">\n PFR\n </a>\n </li>\n <li>\n <a href=\"/wiki/PRISM_(reactor)\" title=\"PRISM (reactor)\">\n PRISM\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a href=\"/wiki/Lead-cooled_fast_reactor\" title=\"Lead-cooled fast reactor\">\n Lead\n </a>\n </li>\n <li>\n <a href=\"/wiki/Gas-cooled_fast_reactor\" title=\"Gas-cooled fast reactor\">\n Helium gas (GFR)\n </a>\n </li>\n <li>\n <a href=\"/wiki/Stable_salt_reactor\" title=\"Stable salt reactor\">\n Stable Salt Reactor (SSR)\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n </tbody>\n </table>\n <div>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n Others\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-even hlist\" style=\"width:100%;padding:0;background:none;\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Organic_nuclear_reactor\" title=\"Organic nuclear reactor\">\n Organic nuclear reactor\n </a>\n <ul>\n <li>\n <a class=\"new\" href=\"/w/index.php?title=Arbus-reactor&amp;action=edit&amp;redlink=1\" title=\"Arbus-reactor (page does not exist)\">\n Arbus\n </a>\n </li>\n <li>\n <a href=\"/wiki/Piqua_Nuclear_Generating_Station\" title=\"Piqua Nuclear Generating Station\">\n Piqua\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a href=\"/wiki/Aircraft_Nuclear_Propulsion\" title=\"Aircraft Nuclear Propulsion\">\n Aircraft Reactor Experiment\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n </tbody>\n </table>\n <div>\n </div>\n <table class=\"nowraplinks mw-collapsible autocollapse navbox-subgroup\" style=\"border-spacing:0\">\n <tbody>\n <tr>\n <th class=\"navbox-title\" colspan=\"2\" scope=\"col\">\n <div id=\"Fusion\" style=\"font-size:114%;margin:0 4em\">\n <span style=\"font-size:90%;\">\n <a href=\"/wiki/Nuclear_reactor#Fusion_reactors\" title=\"Nuclear reactor\">\n Fusion\n </a>\n </span>\n </div>\n </th>\n </tr>\n <tr>\n <td class=\"navbox-abovebelow\" colspan=\"2\">\n <div id=\"by_confinement\">\n by\n <a href=\"/wiki/Thermonuclear_fusion#Confinement\" title=\"Thermonuclear fusion\">\n confinement\n </a>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n <a href=\"/wiki/Magnetic_confinement_fusion\" title=\"Magnetic confinement fusion\">\n Magnetic\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd hlist\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Field-reversed_configuration\" title=\"Field-reversed configuration\">\n Field-reversed configuration\n </a>\n </li>\n <li>\n <a href=\"/wiki/Levitated_dipole\" title=\"Levitated dipole\">\n Levitated dipole\n </a>\n </li>\n <li>\n <a href=\"/wiki/Reversed_field_pinch\" title=\"Reversed field pinch\">\n Reversed field pinch\n </a>\n </li>\n <li>\n <a href=\"/wiki/Spheromak\" title=\"Spheromak\">\n Spheromak\n </a>\n </li>\n <li>\n <a href=\"/wiki/Stellarator\" title=\"Stellarator\">\n Stellarator\n </a>\n </li>\n <li>\n <a href=\"/wiki/Tokamak\" title=\"Tokamak\">\n Tokamak\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n <a href=\"/wiki/Inertial_confinement_fusion\" title=\"Inertial confinement fusion\">\n Inertial\n </a>\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-even hlist\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Bubble_fusion\" title=\"Bubble fusion\">\n Bubble\n <span style=\"font-size:85%;\">\n (acoustic)\n </span>\n </a>\n </li>\n <li>\n <a href=\"/wiki/Fusor\" title=\"Fusor\">\n Fusor\n </a>\n <ul>\n <li>\n <a href=\"/wiki/Inertial_electrostatic_confinement\" title=\"Inertial electrostatic confinement\">\n electrostatic\n </a>\n </li>\n </ul>\n </li>\n <li>\n <a href=\"/wiki/Inertial_confinement_fusion\" title=\"Inertial confinement fusion\">\n Laser-driven\n </a>\n </li>\n <li>\n <a href=\"/wiki/Magnetized_target_fusion\" title=\"Magnetized target fusion\">\n Magnetized-target\n </a>\n </li>\n <li>\n <a href=\"/wiki/Z-pinch\" title=\"Z-pinch\">\n Z-pinch\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n Other\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd hlist\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Dense_plasma_focus\" title=\"Dense plasma focus\">\n Dense plasma focus\n </a>\n </li>\n <li>\n <a href=\"/wiki/Migma\" title=\"Migma\">\n Migma\n </a>\n </li>\n <li>\n <a href=\"/wiki/Muon-catalyzed_fusion\" title=\"Muon-catalyzed fusion\">\n Muon-catalyzed\n </a>\n </li>\n <li>\n <a href=\"/wiki/Polywell\" title=\"Polywell\">\n Polywell\n </a>\n </li>\n <li>\n <a href=\"/wiki/Pyroelectric_fusion\" title=\"Pyroelectric fusion\">\n Pyroelectric\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n </tbody>\n </table>\n <div>\n </div>\n </td>\n </tr>\n </tbody>\n </table>\n <div>\n </div>\n </td>\n </tr>\n <tr>\n <td class=\"navbox-abovebelow\" colspan=\"2\">\n <div>\n <ul>\n <li>\n <b>\n <a class=\"image\" href=\"/wiki/File:Radioactive.svg\">\n <img alt=\"Radioactive.svg\" class=\"noviewer\" data-file-height=\"446\" data-file-width=\"512\" decoding=\"async\" height=\"28\" src=\"//upload.wikimedia.org/wikipedia/commons/thumb/b/b5/Radioactive.svg/32px-Radioactive.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/commons/thumb/b/b5/Radioactive.svg/48px-Radioactive.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/b/b5/Radioactive.svg/64px-Radioactive.svg.png 2x\" width=\"32\"/>\n </a>\n <a href=\"/wiki/Portal:Nuclear_technology\" title=\"Portal:Nuclear technology\">\n Nuclear technology portal\n </a>\n </b>\n </li>\n <li>\n <img alt=\"Category\" class=\"noviewer\" data-file-height=\"185\" data-file-width=\"180\" decoding=\"async\" height=\"16\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/16px-Symbol_category_class.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/23px-Symbol_category_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/31px-Symbol_category_class.svg.png 2x\" title=\"Category\" width=\"16\"/>\n <b>\n <a href=\"/wiki/Category:Nuclear_technology\" title=\"Category:Nuclear technology\">\n Category\n </a>\n </b>\n </li>\n <li>\n <img alt=\"Commons page\" class=\"noviewer\" data-file-height=\"1376\" data-file-width=\"1024\" decoding=\"async\" height=\"16\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/4/4a/Commons-logo.svg/12px-Commons-logo.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/4/4a/Commons-logo.svg/18px-Commons-logo.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/4/4a/Commons-logo.svg/24px-Commons-logo.svg.png 2x\" title=\"Commons page\" width=\"12\"/>\n <b>\n <a class=\"extiw\" href=\"https://commons.wikimedia.org/wiki/Category:Nuclear_technology\" title=\"commons:Category:Nuclear technology\">\n Commons\n </a>\n </b>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n </tbody>\n </table>\n </div>\n <div class=\"navbox-styles nomobile\">\n <link href=\"mw-data:TemplateStyles:r1057682214\" rel=\"mw-deduplicated-inline-style\"/>\n </div>\n <div aria-labelledby=\"Lists_of_nuclear_disasters_and_radioactive_incidents\" class=\"navbox\" role=\"navigation\" style=\"padding:3px\">\n <table class=\"nowraplinks mw-collapsible autocollapse navbox-inner\" style=\"border-spacing:0;background:transparent;color:inherit\">\n <tbody>\n <tr>\n <th class=\"navbox-title\" colspan=\"2\" scope=\"col\">\n <link href=\"mw-data:TemplateStyles:r1054937957\" rel=\"mw-deduplicated-inline-style\"/>\n <div class=\"navbar plainlinks hlist navbar-mini\">\n <ul>\n <li class=\"nv-view\">\n <a href=\"/wiki/Template:Nuclear_and_radiation_accidents_and_incidents\" title=\"Template:Nuclear and radiation accidents and incidents\">\n <abbr style=\";;background:none transparent;border:none;box-shadow:none;padding:0;\" title=\"View this template\">\n v\n </abbr>\n </a>\n </li>\n <li class=\"nv-talk\">\n <a href=\"/wiki/Template_talk:Nuclear_and_radiation_accidents_and_incidents\" title=\"Template talk:Nuclear and radiation accidents and incidents\">\n <abbr style=\";;background:none transparent;border:none;box-shadow:none;padding:0;\" title=\"Discuss this template\">\n t\n </abbr>\n </a>\n </li>\n <li class=\"nv-edit\">\n <a class=\"external text\" href=\"https://en.wikipedia.org/w/index.php?title=Template:Nuclear_and_radiation_accidents_and_incidents&amp;action=edit\">\n <abbr style=\";;background:none transparent;border:none;box-shadow:none;padding:0;\" title=\"Edit this template\">\n e\n </abbr>\n </a>\n </li>\n </ul>\n </div>\n <div id=\"Lists_of_nuclear_disasters_and_radioactive_incidents\" style=\"font-size:114%;margin:0 4em\">\n <a href=\"/wiki/Lists_of_nuclear_disasters_and_radioactive_incidents\" title=\"Lists of nuclear disasters and radioactive incidents\">\n Lists of nuclear disasters and radioactive incidents\n </a>\n </div>\n </th>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n Main\n <br/>\n accident\n <br/>\n lists\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd hlist\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/Vulnerability_of_nuclear_plants_to_attack\" title=\"Vulnerability of nuclear plants to attack\">\n Vulnerability of nuclear plants to attack\n </a>\n </li>\n <li>\n <a href=\"/wiki/List_of_Chernobyl-related_articles\" title=\"List of Chernobyl-related articles\">\n Chernobyl-related articles\n </a>\n </li>\n <li>\n <a href=\"/wiki/Crimes_involving_radioactive_substances\" title=\"Crimes involving radioactive substances\">\n Crimes involving radioactive substances\n </a>\n </li>\n <li>\n <a href=\"/wiki/Criticality_accident#Incidents\" title=\"Criticality accident\">\n Criticality accidents and incidents\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_meltdown#Nuclear_meltdown_events\" title=\"Nuclear meltdown\">\n Nuclear meltdown accidents\n </a>\n </li>\n <li>\n <a href=\"/wiki/Template:Milestone_nuclear_explosions\" title=\"Template:Milestone nuclear explosions\">\n List of milestone nuclear explosions\n </a>\n </li>\n <li>\n <a href=\"/wiki/List_of_military_nuclear_accidents\" title=\"List of military nuclear accidents\">\n Military nuclear accidents\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_and_radiation_accidents_and_incidents\" title=\"Nuclear and radiation accidents and incidents\">\n Nuclear and radiation accidents and incidents\n </a>\n </li>\n <li>\n <a href=\"/wiki/List_of_nuclear_and_radiation_accidents_by_death_toll\" title=\"List of nuclear and radiation accidents by death toll\">\n Nuclear and radiation accidents by death toll\n </a>\n </li>\n <li>\n <a class=\"mw-selflink selflink\">\n Nuclear weapons tests\n </a>\n </li>\n <li>\n <a href=\"/wiki/List_of_sunken_nuclear_submarines\" title=\"List of sunken nuclear submarines\">\n Sunken nuclear submarines\n </a>\n </li>\n <li>\n <a href=\"/wiki/List_of_orphan_source_incidents\" title=\"List of orphan source incidents\">\n List of orphan source incidents\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n Lists by\n <br/>\n country\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-even hlist\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/List_of_cancelled_nuclear_reactors_in_the_United_States\" title=\"List of cancelled nuclear reactors in the United States\">\n Cancelled nuclear reactors in the United States\n </a>\n </li>\n <li>\n <a href=\"/wiki/List_of_inquiries_into_uranium_mining_in_Australia\" title=\"List of inquiries into uranium mining in Australia\">\n Inquiries into uranium mining in Australia\n </a>\n </li>\n <li>\n <a href=\"/wiki/List_of_nuclear_and_radiation_fatalities_by_country\" title=\"List of nuclear and radiation fatalities by country\">\n Nuclear and radiation fatalities by country\n </a>\n </li>\n <li>\n <a href=\"/wiki/List_of_nuclear_power_accidents_by_country\" title=\"List of nuclear power accidents by country\">\n Nuclear power accidents by country\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/List_of_nuclear_reactors\" title=\"List of nuclear reactors\">\n Nuclear reactors by country\n </a>\n </li>\n <li>\n <a href=\"/wiki/List_of_nuclear_test_sites\" title=\"List of nuclear test sites\">\n Nuclear test sites\n </a>\n </li>\n <li>\n <a href=\"/wiki/List_of_nuclear_weapons_tests_of_the_Soviet_Union\" title=\"List of nuclear weapons tests of the Soviet Union\">\n Nuclear weapons tests of the Soviet Union\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/List_of_nuclear_weapons_tests_of_the_United_States\" title=\"List of nuclear weapons tests of the United States\">\n Nuclear weapons tests of the United States\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n Individual\n <br/>\n accidents\n <br/>\n and sites\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-odd hlist\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n 2019\n <a href=\"/wiki/Nyonoksa_radiation_accident\" title=\"Nyonoksa radiation accident\">\n Nyonoksa radiation accident\n </a>\n </li>\n <li>\n 2011\n <a href=\"/wiki/Fukushima_Daiichi_nuclear_disaster\" title=\"Fukushima Daiichi nuclear disaster\">\n Fukushima Daiichi nuclear disaster\n </a>\n </li>\n <li>\n 2001\n <a href=\"/wiki/Instituto_Oncol%C3%B3gico_Nacional#Accident\" title=\"Instituto Oncológico Nacional\">\n Instituto Oncológico Nacional#Accident\n </a>\n </li>\n <li>\n <a href=\"/wiki/1996_San_Juan_de_Dios_radiotherapy_accident\" title=\"1996 San Juan de Dios radiotherapy accident\">\n 1996 San Juan de Dios radiotherapy accident\n </a>\n </li>\n <li>\n <a class=\"mw-redirect\" href=\"/wiki/1990_Clinic_of_Zaragoza_radiotherapy_accident\" title=\"1990 Clinic of Zaragoza radiotherapy accident\">\n 1990 Clinic of Zaragoza radiotherapy accident\n </a>\n </li>\n <li>\n 1987\n <a href=\"/wiki/Goi%C3%A2nia_accident\" title=\"Goiânia accident\">\n Goiânia accident\n </a>\n </li>\n <li>\n 1986\n <a href=\"/wiki/Chernobyl_disaster\" title=\"Chernobyl disaster\">\n Chernobyl disaster\n </a>\n and\n <a href=\"/wiki/Effects_of_the_Chernobyl_disaster\" title=\"Effects of the Chernobyl disaster\">\n Effects of the Chernobyl disaster\n </a>\n </li>\n <li>\n 1985\n <a href=\"/wiki/Soviet_submarine_K-431\" title=\"Soviet submarine K-431\">\n Chazhma Bay nuclear accident\n </a>\n </li>\n <li>\n 1982\n <a href=\"/wiki/Andreev_Bay_nuclear_accident\" title=\"Andreev Bay nuclear accident\">\n Andreev Bay nuclear accident\n </a>\n </li>\n <li>\n 1980\n <a href=\"/wiki/Kramatorsk_radiological_accident\" title=\"Kramatorsk radiological accident\">\n Kramatorsk radiological accident\n </a>\n </li>\n <li>\n 1979\n <a href=\"/wiki/Three_Mile_Island_accident\" title=\"Three Mile Island accident\">\n Three Mile Island accident\n </a>\n and\n <a href=\"/wiki/Three_Mile_Island_accident_health_effects\" title=\"Three Mile Island accident health effects\">\n Three Mile Island accident health effects\n </a>\n </li>\n <li>\n 1969\n <a href=\"/wiki/Lucens_reactor\" title=\"Lucens reactor\">\n Lucens reactor\n </a>\n </li>\n <li>\n 1962\n <a href=\"/wiki/Johnston_Atoll#National_nuclear_weapon_test_site_1958-1963#Failures\" title=\"Johnston Atoll\">\n Thor missile launch failures at Johnston Atoll\n </a>\n under\n <a href=\"/wiki/Operation_Fishbowl\" title=\"Operation Fishbowl\">\n Operation Fishbowl\n </a>\n </li>\n <li>\n 1962\n <a href=\"/wiki/Cuban_Missile_Crisis\" title=\"Cuban Missile Crisis\">\n Cuban Missile Crisis\n </a>\n </li>\n <li>\n 1961\n <a href=\"/wiki/Soviet_submarine_K-19#Nuclear_accident\" title=\"Soviet submarine K-19\">\n K-19 nuclear accident\n </a>\n </li>\n <li>\n 1961\n <a href=\"/wiki/SL-1\" title=\"SL-1\">\n SL-1\n </a>\n nuclear meltdown\n </li>\n <li>\n 1957\n <a href=\"/wiki/Kyshtym_disaster\" title=\"Kyshtym disaster\">\n Kyshtym disaster\n </a>\n </li>\n <li>\n 1957\n <a href=\"/wiki/Windscale_fire\" title=\"Windscale fire\">\n Windscale fire\n </a>\n </li>\n <li>\n 1957\n <a href=\"/wiki/Operation_Plumbbob\" title=\"Operation Plumbbob\">\n Operation Plumbbob\n </a>\n </li>\n <li>\n 1954\n <a href=\"/wiki/Totskoye_nuclear_exercise\" title=\"Totskoye nuclear exercise\">\n Totskoye nuclear exercise\n </a>\n </li>\n <li>\n <a href=\"/wiki/Bikini_Atoll\" title=\"Bikini Atoll\">\n Bikini Atoll\n </a>\n </li>\n <li>\n <a href=\"/wiki/Hanford_Site\" title=\"Hanford Site\">\n Hanford Site\n </a>\n </li>\n <li>\n <a href=\"/wiki/Radioactive_contamination_from_the_Rocky_Flats_Plant\" title=\"Radioactive contamination from the Rocky Flats Plant\">\n Rocky Flats Plant\n </a>\n </li>\n <li>\n 1945\n <a href=\"/wiki/Atomic_bombings_of_Hiroshima_and_Nagasaki\" title=\"Atomic bombings of Hiroshima and Nagasaki\">\n Atomic bombings of Hiroshima and Nagasaki\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <th class=\"navbox-group\" scope=\"row\" style=\"width:1%\">\n Related\n <br/>\n topics\n </th>\n <td class=\"navbox-list-with-group navbox-list navbox-even hlist\" style=\"width:100%;padding:0\">\n <div style=\"padding:0 0.25em\">\n <ul>\n <li>\n <a href=\"/wiki/List_of_books_about_nuclear_issues\" title=\"List of books about nuclear issues\">\n Books about nuclear issues\n </a>\n </li>\n <li>\n <a href=\"/wiki/List_of_films_about_nuclear_issues\" title=\"List of films about nuclear issues\">\n Films about nuclear issues\n </a>\n </li>\n <li>\n <a href=\"/wiki/Anti-war_movement\" title=\"Anti-war movement\">\n Anti-war movement\n </a>\n </li>\n <li>\n <a href=\"/wiki/Bikini_Atoll\" title=\"Bikini Atoll\">\n Bikini Atoll\n </a>\n </li>\n <li>\n <i>\n <a href=\"/wiki/Bulletin_of_the_Atomic_Scientists\" title=\"Bulletin of the Atomic Scientists\">\n Bulletin of the Atomic Scientists\n </a>\n </i>\n </li>\n <li>\n <a href=\"/wiki/France_and_weapons_of_mass_destruction\" title=\"France and weapons of mass destruction\">\n France and weapons of mass destruction\n </a>\n </li>\n <li>\n <a href=\"/wiki/History_of_the_anti-nuclear_movement\" title=\"History of the anti-nuclear movement\">\n History of the anti-nuclear movement\n </a>\n </li>\n <li>\n <a href=\"/wiki/International_Day_against_Nuclear_Tests\" title=\"International Day against Nuclear Tests\">\n International Day against Nuclear Tests\n </a>\n </li>\n <li>\n <a href=\"/wiki/List_of_nuclear_close_calls\" title=\"List of nuclear close calls\">\n Nuclear close calls\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear-Free_Future_Award\" title=\"Nuclear-Free Future Award\">\n Nuclear-Free Future Award\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear-free_zone\" title=\"Nuclear-free zone\">\n Nuclear-free zone\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_power_debate\" title=\"Nuclear power debate\">\n Nuclear power debate\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_power_phase-out\" title=\"Nuclear power phase-out\">\n Nuclear power phase-out\n </a>\n </li>\n <li>\n <a href=\"/wiki/Nuclear_weapons_debate\" title=\"Nuclear weapons debate\">\n Nuclear weapons debate\n </a>\n </li>\n <li>\n <a href=\"/wiki/List_of_peace_activists\" title=\"List of peace activists\">\n Peace activists\n </a>\n </li>\n <li>\n <a href=\"/wiki/Peace_movement\" title=\"Peace movement\">\n Peace movement\n </a>\n </li>\n <li>\n <a href=\"/wiki/Peace_camp\" title=\"Peace camp\">\n Peace camp\n </a>\n </li>\n <li>\n <a href=\"/wiki/Russell%E2%80%93Einstein_Manifesto\" title=\"Russell–Einstein Manifesto\">\n Russell–Einstein Manifesto\n </a>\n </li>\n <li>\n <a href=\"/wiki/Smiling_Sun\" title=\"Smiling Sun\">\n Smiling Sun\n </a>\n </li>\n </ul>\n </div>\n </td>\n </tr>\n <tr>\n <td class=\"navbox-abovebelow\" colspan=\"2\">\n <div>\n <b>\n <a class=\"image\" href=\"/wiki/File:Radioactive.svg\">\n <img alt=\"Radioactive.svg\" class=\"noviewer\" data-file-height=\"446\" data-file-width=\"512\" decoding=\"async\" height=\"28\" src=\"//upload.wikimedia.org/wikipedia/commons/thumb/b/b5/Radioactive.svg/32px-Radioactive.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/commons/thumb/b/b5/Radioactive.svg/48px-Radioactive.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/b/b5/Radioactive.svg/64px-Radioactive.svg.png 2x\" width=\"32\"/>\n </a>\n <a href=\"/wiki/Portal:Nuclear_technology\" title=\"Portal:Nuclear technology\">\n Nuclear technology portal\n </a>\n </b>\n <img alt=\"Category\" class=\"noviewer\" data-file-height=\"185\" data-file-width=\"180\" decoding=\"async\" height=\"16\" src=\"//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/16px-Symbol_category_class.svg.png\" srcset=\"//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/23px-Symbol_category_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/31px-Symbol_category_class.svg.png 2x\" title=\"Category\" width=\"16\"/>\n <a href=\"/wiki/Category:Radiation_accidents_and_incidents\" title=\"Category:Radiation accidents and incidents\">\n Category\n </a>\n </div>\n </td>\n </tr>\n </tbody>\n </table>\n </div>\n <!-- \nNewPP limit report\nParsed by mw1375\nCached time: 20211205173546\nCache expiry: 1814400\nReduced expiry: false\nComplications: [vary‐revision‐sha1]\nCPU time usage: 1.509 seconds\nReal time usage: 1.732 seconds\nPreprocessor visited node count: 7235/1000000\nPost‐expand include size: 286909/2097152 bytes\nTemplate argument size: 12235/2097152 bytes\nHighest expansion depth: 16/40\nExpensive parser function count: 4/500\nUnstrip recursion depth: 1/20\nUnstrip post‐expand size: 197664/5000000 bytes\nLua time usage: 0.802/10.000 seconds\nLua memory usage: 26885154/52428800 bytes\nNumber of Wikibase entities loaded: 0/400\n-->\n <!--\nTransclusion expansion time report (%,ms,calls,template)\n100.00% 1408.742 1 -total\n 47.26% 665.755 2 Template:Reflist\n 13.07% 184.063 1 Template:Lang\n 10.85% 152.885 34 Template:Cite_web\n 9.84% 138.690 1 Template:Nuclear_technology\n 9.56% 134.608 1 Template:Navbox_with_collapsible_groups\n 9.42% 132.719 9 Template:Citation_needed\n 8.95% 126.069 11 Template:Fix\n 8.90% 125.400 4 Template:Cite_book\n 6.35% 89.524 1 Template:Nuclear_weapons\n-->\n <!-- Saved in parser cache with key enwiki:pcache:idhash:2189647-0!canonical and timestamp 20211205173544 and revision id 1058798754. Serialized with JSON.\n -->\n </div>\n <noscript>\n <img alt=\"\" height=\"1\" src=\"//en.wikipedia.org/wiki/Special:CentralAutoLogin/start?type=1x1\" style=\"border: none; position: absolute;\" title=\"\" width=\"1\"/>\n </noscript>\n <div class=\"printfooter\">\n Retrieved from \"\n <a dir=\"ltr\" href=\"https://en.wikipedia.org/w/index.php?title=List_of_nuclear_weapons_tests&amp;oldid=1058798754\">\n https://en.wikipedia.org/w/index.php?title=List_of_nuclear_weapons_tests&amp;oldid=1058798754\n </a>\n \"\n </div>\n </div>\n <div class=\"catlinks\" data-mw=\"interface\" id=\"catlinks\">\n <div class=\"mw-normal-catlinks\" id=\"mw-normal-catlinks\">\n <a href=\"/wiki/Help:Category\" title=\"Help:Category\">\n Categories\n </a>\n :\n <ul>\n <li>\n <a href=\"/wiki/Category:Nuclear_weapons_testing\" title=\"Category:Nuclear weapons testing\">\n Nuclear weapons testing\n </a>\n </li>\n <li>\n <a href=\"/wiki/Category:History-related_lists\" title=\"Category:History-related lists\">\n History-related lists\n </a>\n </li>\n <li>\n <a href=\"/wiki/Category:Nuclear_technology-related_lists\" title=\"Category:Nuclear technology-related lists\">\n Nuclear technology-related lists\n </a>\n </li>\n </ul>\n </div>\n <div class=\"mw-hidden-catlinks mw-hidden-cats-hidden\" id=\"mw-hidden-catlinks\">\n Hidden categories:\n <ul>\n <li>\n <a href=\"/wiki/Category:CS1_maint:_archived_copy_as_title\" title=\"Category:CS1 maint: archived copy as title\">\n CS1 maint: archived copy as title\n </a>\n </li>\n <li>\n <a href=\"/wiki/Category:Articles_containing_Russian-language_text\" title=\"Category:Articles containing Russian-language text\">\n Articles containing Russian-language text\n </a>\n </li>\n <li>\n <a href=\"/wiki/Category:Webarchive_template_wayback_links\" title=\"Category:Webarchive template wayback links\">\n Webarchive template wayback links\n </a>\n </li>\n <li>\n <a href=\"/wiki/Category:Articles_with_short_description\" title=\"Category:Articles with short description\">\n Articles with short description\n </a>\n </li>\n <li>\n <a href=\"/wiki/Category:Short_description_is_different_from_Wikidata\" title=\"Category:Short description is different from Wikidata\">\n Short description is different from Wikidata\n </a>\n </li>\n <li>\n <a href=\"/wiki/Category:All_articles_with_unsourced_statements\" title=\"Category:All articles with unsourced statements\">\n All articles with unsourced statements\n </a>\n </li>\n <li>\n <a href=\"/wiki/Category:Articles_with_unsourced_statements_from_September_2015\" title=\"Category:Articles with unsourced statements from September 2015\">\n Articles with unsourced statements from September 2015\n </a>\n </li>\n <li>\n <a href=\"/wiki/Category:Articles_with_unsourced_statements_from_March_2019\" title=\"Category:Articles with unsourced statements from March 2019\">\n Articles with unsourced statements from March 2019\n </a>\n </li>\n <li>\n <a href=\"/wiki/Category:All_articles_with_incomplete_citations\" title=\"Category:All articles with incomplete citations\">\n All articles with incomplete citations\n </a>\n </li>\n <li>\n <a href=\"/wiki/Category:Articles_with_incomplete_citations_from_October_2018\" title=\"Category:Articles with incomplete citations from October 2018\">\n Articles with incomplete citations from October 2018\n </a>\n </li>\n </ul>\n </div>\n </div>\n </div>\n </div>\n <div id=\"mw-data-after-content\">\n <div class=\"read-more-container\">\n </div>\n </div>\n <div id=\"mw-navigation\">\n <h2>\n Navigation menu\n </h2>\n <div id=\"mw-head\">\n <nav aria-labelledby=\"p-personal-label\" class=\"mw-portlet mw-portlet-personal vector-user-menu-legacy vector-menu\" id=\"p-personal\" role=\"navigation\">\n <h3 aria-label=\"\" class=\"vector-menu-heading\" id=\"p-personal-label\">\n <span>\n Personal tools\n </span>\n </h3>\n <div class=\"vector-menu-content\">\n <ul class=\"vector-menu-content-list\">\n <li class=\"mw-list-item\" id=\"pt-anonuserpage\">\n <span>\n Not logged in\n </span>\n </li>\n <li class=\"mw-list-item\" id=\"pt-anontalk\">\n <a accesskey=\"n\" href=\"/wiki/Special:MyTalk\" title=\"Discussion about edits from this IP address [n]\">\n <span>\n Talk\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"pt-anoncontribs\">\n <a accesskey=\"y\" href=\"/wiki/Special:MyContributions\" title=\"A list of edits made from this IP address [y]\">\n <span>\n Contributions\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"pt-createaccount\">\n <a href=\"/w/index.php?title=Special:CreateAccount&amp;returnto=List+of+nuclear+weapons+tests\" title=\"You are encouraged to create an account and log in; however, it is not mandatory\">\n <span>\n Create account\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"pt-login\">\n <a accesskey=\"o\" href=\"/w/index.php?title=Special:UserLogin&amp;returnto=List+of+nuclear+weapons+tests\" title=\"You're encouraged to log in; however, it's not mandatory. [o]\">\n <span>\n Log in\n </span>\n </a>\n </li>\n </ul>\n </div>\n </nav>\n <div id=\"left-navigation\">\n <nav aria-labelledby=\"p-namespaces-label\" class=\"mw-portlet mw-portlet-namespaces vector-menu vector-menu-tabs\" id=\"p-namespaces\" role=\"navigation\">\n <h3 aria-label=\"\" class=\"vector-menu-heading\" id=\"p-namespaces-label\">\n <span>\n Namespaces\n </span>\n </h3>\n <div class=\"vector-menu-content\">\n <ul class=\"vector-menu-content-list\">\n <li class=\"selected mw-list-item\" id=\"ca-nstab-main\">\n <a accesskey=\"c\" href=\"/wiki/List_of_nuclear_weapons_tests\" title=\"View the content page [c]\">\n <span>\n Article\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"ca-talk\">\n <a accesskey=\"t\" href=\"/wiki/Talk:List_of_nuclear_weapons_tests\" rel=\"discussion\" title=\"Discuss improvements to the content page [t]\">\n <span>\n Talk\n </span>\n </a>\n </li>\n </ul>\n </div>\n </nav>\n <nav aria-labelledby=\"p-variants-label\" class=\"mw-portlet mw-portlet-variants emptyPortlet vector-menu-dropdown-noicon vector-menu vector-menu-dropdown\" id=\"p-variants\" role=\"navigation\">\n <input aria-haspopup=\"true\" aria-labelledby=\"p-variants-label\" class=\"vector-menu-checkbox\" data-event-name=\"ui.dropdown-p-variants\" id=\"p-variants-checkbox\" role=\"button\" type=\"checkbox\"/>\n <h3 aria-label=\"Change language variant\" class=\"vector-menu-heading\" id=\"p-variants-label\">\n <span>\n Variants\n </span>\n <span class=\"vector-menu-checkbox-expanded\">\n expanded\n </span>\n <span class=\"vector-menu-checkbox-collapsed\">\n collapsed\n </span>\n </h3>\n <div class=\"vector-menu-content\">\n <ul class=\"vector-menu-content-list\">\n </ul>\n </div>\n </nav>\n </div>\n <div id=\"right-navigation\">\n <nav aria-labelledby=\"p-views-label\" class=\"mw-portlet mw-portlet-views vector-menu vector-menu-tabs\" id=\"p-views\" role=\"navigation\">\n <h3 aria-label=\"\" class=\"vector-menu-heading\" id=\"p-views-label\">\n <span>\n Views\n </span>\n </h3>\n <div class=\"vector-menu-content\">\n <ul class=\"vector-menu-content-list\">\n <li class=\"selected mw-list-item\" id=\"ca-view\">\n <a href=\"/wiki/List_of_nuclear_weapons_tests\">\n <span>\n Read\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"ca-edit\">\n <a accesskey=\"e\" href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=edit\" title=\"Edit this page [e]\">\n <span>\n Edit\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"ca-history\">\n <a accesskey=\"h\" href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=history\" title=\"Past revisions of this page [h]\">\n <span>\n View history\n </span>\n </a>\n </li>\n </ul>\n </div>\n </nav>\n <nav aria-labelledby=\"p-cactions-label\" class=\"mw-portlet mw-portlet-cactions emptyPortlet vector-menu-dropdown-noicon vector-menu vector-menu-dropdown\" id=\"p-cactions\" role=\"navigation\" title=\"More options\">\n <input aria-haspopup=\"true\" aria-labelledby=\"p-cactions-label\" class=\"vector-menu-checkbox\" data-event-name=\"ui.dropdown-p-cactions\" id=\"p-cactions-checkbox\" role=\"button\" type=\"checkbox\"/>\n <h3 aria-label=\"\" class=\"vector-menu-heading\" id=\"p-cactions-label\">\n <span>\n More\n </span>\n <span class=\"vector-menu-checkbox-expanded\">\n expanded\n </span>\n <span class=\"vector-menu-checkbox-collapsed\">\n collapsed\n </span>\n </h3>\n <div class=\"vector-menu-content\">\n <ul class=\"vector-menu-content-list\">\n </ul>\n </div>\n </nav>\n <div class=\"vector-search-box\" id=\"p-search\" role=\"search\">\n <div>\n <h3>\n <label for=\"searchInput\">\n Search\n </label>\n </h3>\n <form action=\"/w/index.php\" class=\"vector-search-box-form\" id=\"searchform\">\n <div class=\"vector-search-box-inner\" data-search-loc=\"header-navigation\" id=\"simpleSearch\">\n <input accesskey=\"f\" aria-label=\"Search Wikipedia\" autocapitalize=\"sentences\" class=\"vector-search-box-input\" id=\"searchInput\" name=\"search\" placeholder=\"Search Wikipedia\" title=\"Search Wikipedia [f]\" type=\"search\"/>\n <input name=\"title\" type=\"hidden\" value=\"Special:Search\"/>\n <input class=\"searchButton mw-fallbackSearchButton\" id=\"mw-searchButton\" name=\"fulltext\" title=\"Search Wikipedia for this text\" type=\"submit\" value=\"Search\"/>\n <input class=\"searchButton\" id=\"searchButton\" name=\"go\" title=\"Go to a page with this exact name if it exists\" type=\"submit\" value=\"Go\"/>\n </div>\n </form>\n </div>\n </div>\n </div>\n </div>\n <div id=\"mw-panel\">\n <div id=\"p-logo\" role=\"banner\">\n <a class=\"mw-wiki-logo\" href=\"/wiki/Main_Page\" title=\"Visit the main page\">\n </a>\n </div>\n <nav aria-labelledby=\"p-navigation-label\" class=\"mw-portlet mw-portlet-navigation vector-menu vector-menu-portal portal\" id=\"p-navigation\" role=\"navigation\">\n <h3 aria-label=\"\" class=\"vector-menu-heading\" id=\"p-navigation-label\">\n <span>\n Navigation\n </span>\n </h3>\n <div class=\"vector-menu-content\">\n <ul class=\"vector-menu-content-list\">\n <li class=\"mw-list-item\" id=\"n-mainpage-description\">\n <a accesskey=\"z\" href=\"/wiki/Main_Page\" title=\"Visit the main page [z]\">\n <span>\n Main page\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"n-contents\">\n <a href=\"/wiki/Wikipedia:Contents\" title=\"Guides to browsing Wikipedia\">\n <span>\n Contents\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"n-currentevents\">\n <a href=\"/wiki/Portal:Current_events\" title=\"Articles related to current events\">\n <span>\n Current events\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"n-randompage\">\n <a accesskey=\"x\" href=\"/wiki/Special:Random\" title=\"Visit a randomly selected article [x]\">\n <span>\n Random article\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"n-aboutsite\">\n <a href=\"/wiki/Wikipedia:About\" title=\"Learn about Wikipedia and how it works\">\n <span>\n About Wikipedia\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"n-contactpage\">\n <a href=\"//en.wikipedia.org/wiki/Wikipedia:Contact_us\" title=\"How to contact Wikipedia\">\n <span>\n Contact us\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"n-sitesupport\">\n <a href=\"https://donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&amp;utm_medium=sidebar&amp;utm_campaign=C13_en.wikipedia.org&amp;uselang=en\" title=\"Support us by donating to the Wikimedia Foundation\">\n <span>\n Donate\n </span>\n </a>\n </li>\n </ul>\n </div>\n </nav>\n <nav aria-labelledby=\"p-interaction-label\" class=\"mw-portlet mw-portlet-interaction vector-menu vector-menu-portal portal\" id=\"p-interaction\" role=\"navigation\">\n <h3 aria-label=\"\" class=\"vector-menu-heading\" id=\"p-interaction-label\">\n <span>\n Contribute\n </span>\n </h3>\n <div class=\"vector-menu-content\">\n <ul class=\"vector-menu-content-list\">\n <li class=\"mw-list-item\" id=\"n-help\">\n <a href=\"/wiki/Help:Contents\" title=\"Guidance on how to use and edit Wikipedia\">\n <span>\n Help\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"n-introduction\">\n <a href=\"/wiki/Help:Introduction\" title=\"Learn how to edit Wikipedia\">\n <span>\n Learn to edit\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"n-portal\">\n <a href=\"/wiki/Wikipedia:Community_portal\" title=\"The hub for editors\">\n <span>\n Community portal\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"n-recentchanges\">\n <a accesskey=\"r\" href=\"/wiki/Special:RecentChanges\" title=\"A list of recent changes to Wikipedia [r]\">\n <span>\n Recent changes\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"n-upload\">\n <a href=\"/wiki/Wikipedia:File_Upload_Wizard\" title=\"Add images or other media for use on Wikipedia\">\n <span>\n Upload file\n </span>\n </a>\n </li>\n </ul>\n </div>\n </nav>\n <nav aria-labelledby=\"p-tb-label\" class=\"mw-portlet mw-portlet-tb vector-menu vector-menu-portal portal\" id=\"p-tb\" role=\"navigation\">\n <h3 aria-label=\"\" class=\"vector-menu-heading\" id=\"p-tb-label\">\n <span>\n Tools\n </span>\n </h3>\n <div class=\"vector-menu-content\">\n <ul class=\"vector-menu-content-list\">\n <li class=\"mw-list-item\" id=\"t-whatlinkshere\">\n <a accesskey=\"j\" href=\"/wiki/Special:WhatLinksHere/List_of_nuclear_weapons_tests\" title=\"List of all English Wikipedia pages containing links to this page [j]\">\n <span>\n What links here\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"t-recentchangeslinked\">\n <a accesskey=\"k\" href=\"/wiki/Special:RecentChangesLinked/List_of_nuclear_weapons_tests\" rel=\"nofollow\" title=\"Recent changes in pages linked from this page [k]\">\n <span>\n Related changes\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"t-upload\">\n <a accesskey=\"u\" href=\"/wiki/Wikipedia:File_Upload_Wizard\" title=\"Upload files [u]\">\n <span>\n Upload file\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"t-specialpages\">\n <a accesskey=\"q\" href=\"/wiki/Special:SpecialPages\" title=\"A list of all special pages [q]\">\n <span>\n Special pages\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"t-permalink\">\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;oldid=1058798754\" title=\"Permanent link to this revision of this page\">\n <span>\n Permanent link\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"t-info\">\n <a href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;action=info\" title=\"More information about this page\">\n <span>\n Page information\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"t-cite\">\n <a href=\"/w/index.php?title=Special:CiteThisPage&amp;page=List_of_nuclear_weapons_tests&amp;id=1058798754&amp;wpFormIdentifier=titleform\" title=\"Information on how to cite this page\">\n <span>\n Cite this page\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"t-wikibase\">\n <a accesskey=\"g\" href=\"https://www.wikidata.org/wiki/Special:EntityPage/Q1863664\" title=\"Structured data on this page hosted by Wikidata [g]\">\n <span>\n Wikidata item\n </span>\n </a>\n </li>\n </ul>\n </div>\n </nav>\n <nav aria-labelledby=\"p-coll-print_export-label\" class=\"mw-portlet mw-portlet-coll-print_export vector-menu vector-menu-portal portal\" id=\"p-coll-print_export\" role=\"navigation\">\n <h3 aria-label=\"\" class=\"vector-menu-heading\" id=\"p-coll-print_export-label\">\n <span>\n Print/export\n </span>\n </h3>\n <div class=\"vector-menu-content\">\n <ul class=\"vector-menu-content-list\">\n <li class=\"mw-list-item\" id=\"coll-download-as-rl\">\n <a href=\"/w/index.php?title=Special:DownloadAsPdf&amp;page=List_of_nuclear_weapons_tests&amp;action=show-download-screen\" title=\"Download this page as a PDF file\">\n <span>\n Download as PDF\n </span>\n </a>\n </li>\n <li class=\"mw-list-item\" id=\"t-print\">\n <a accesskey=\"p\" href=\"/w/index.php?title=List_of_nuclear_weapons_tests&amp;printable=yes\" title=\"Printable version of this page [p]\">\n <span>\n Printable version\n </span>\n </a>\n </li>\n </ul>\n </div>\n </nav>\n <nav aria-labelledby=\"p-wikibase-otherprojects-label\" class=\"mw-portlet mw-portlet-wikibase-otherprojects vector-menu vector-menu-portal portal\" id=\"p-wikibase-otherprojects\" role=\"navigation\">\n <h3 aria-label=\"\" class=\"vector-menu-heading\" id=\"p-wikibase-otherprojects-label\">\n <span>\n In other projects\n </span>\n </h3>\n <div class=\"vector-menu-content\">\n <ul class=\"vector-menu-content-list\">\n <li class=\"wb-otherproject-link wb-otherproject-commons mw-list-item\">\n <a href=\"https://commons.wikimedia.org/wiki/Category:Nuclear_weapon_tests\" hreflang=\"en\">\n <span>\n Wikimedia Commons\n </span>\n </a>\n </li>\n </ul>\n </div>\n </nav>\n <nav aria-labelledby=\"p-lang-label\" class=\"mw-portlet mw-portlet-lang vector-menu vector-menu-portal portal\" id=\"p-lang\" role=\"navigation\">\n <h3 aria-label=\"\" class=\"vector-menu-heading\" id=\"p-lang-label\">\n <span>\n Languages\n </span>\n </h3>\n <div class=\"vector-menu-content\">\n <ul class=\"vector-menu-content-list\">\n <li class=\"interlanguage-link interwiki-de mw-list-item\">\n <a class=\"interlanguage-link-target\" href=\"https://de.wikipedia.org/wiki/Liste_von_Kernwaffentests\" hreflang=\"de\" lang=\"de\" title=\"Liste von Kernwaffentests – German\">\n <span>\n Deutsch\n </span>\n </a>\n </li>\n <li class=\"interlanguage-link interwiki-es mw-list-item\">\n <a class=\"interlanguage-link-target\" href=\"https://es.wikipedia.org/wiki/Anexo:Ensayos_nucleares\" hreflang=\"es\" lang=\"es\" title=\"Anexo:Ensayos nucleares – Spanish\">\n <span>\n Español\n </span>\n </a>\n </li>\n <li class=\"interlanguage-link interwiki-fr mw-list-item\">\n <a class=\"interlanguage-link-target\" href=\"https://fr.wikipedia.org/wiki/Liste_d%27essais_nucl%C3%A9aires\" hreflang=\"fr\" lang=\"fr\" title=\"Liste d'essais nucléaires – French\">\n <span>\n Français\n </span>\n </a>\n </li>\n <li class=\"interlanguage-link interwiki-ja mw-list-item\">\n <a class=\"interlanguage-link-target\" href=\"https://ja.wikipedia.org/wiki/%E6%A0%B8%E5%AE%9F%E9%A8%93%E3%81%AE%E4%B8%80%E8%A6%A7\" hreflang=\"ja\" lang=\"ja\" title=\"核実験の一覧 – Japanese\">\n <span>\n 日本語\n </span>\n </a>\n </li>\n <li class=\"interlanguage-link interwiki-no mw-list-item\">\n <a class=\"interlanguage-link-target\" href=\"https://no.wikipedia.org/wiki/Liste_over_pr%C3%B8vesprengninger\" hreflang=\"nb\" lang=\"nb\" title=\"Liste over prøvesprengninger – Norwegian Bokmål\">\n <span>\n Norsk bokmål\n </span>\n </a>\n </li>\n <li class=\"interlanguage-link interwiki-pl mw-list-item\">\n <a class=\"interlanguage-link-target\" href=\"https://pl.wikipedia.org/wiki/Lista_wybuch%C3%B3w_j%C4%85drowych\" hreflang=\"pl\" lang=\"pl\" title=\"Lista wybuchów jądrowych – Polish\">\n <span>\n Polski\n </span>\n </a>\n </li>\n </ul>\n <div class=\"after-portlet after-portlet-lang\">\n <span class=\"wb-langlinks-edit wb-langlinks-link\">\n <a class=\"wbc-editpage\" href=\"https://www.wikidata.org/wiki/Special:EntityPage/Q1863664#sitelinks-wikipedia\" title=\"Edit interlanguage links\">\n Edit links\n </a>\n </span>\n </div>\n </div>\n </nav>\n </div>\n </div>\n <footer class=\"mw-footer\" id=\"footer\" role=\"contentinfo\">\n <ul id=\"footer-info\">\n <li id=\"footer-info-lastmod\">\n This page was last edited on 5 December 2021, at 17:35\n <span class=\"anonymous-show\">\n (UTC)\n </span>\n .\n </li>\n <li id=\"footer-info-copyright\">\n Text is available under the\n <a href=\"//en.wikipedia.org/wiki/Wikipedia:Text_of_Creative_Commons_Attribution-ShareAlike_3.0_Unported_License\" rel=\"license\">\n Creative Commons Attribution-ShareAlike License\n </a>\n <a href=\"//creativecommons.org/licenses/by-sa/3.0/\" rel=\"license\" style=\"display:none;\">\n </a>\n ;\nadditional terms may apply. By using this site, you agree to the\n <a href=\"//foundation.wikimedia.org/wiki/Terms_of_Use\">\n Terms of Use\n </a>\n and\n <a href=\"//foundation.wikimedia.org/wiki/Privacy_policy\">\n Privacy Policy\n </a>\n . Wikipedia® is a registered trademark of the\n <a href=\"//www.wikimediafoundation.org/\">\n Wikimedia Foundation, Inc.\n </a>\n , a non-profit organization.\n </li>\n </ul>\n <ul id=\"footer-places\">\n <li id=\"footer-places-privacy\">\n <a class=\"extiw\" href=\"https://foundation.wikimedia.org/wiki/Privacy_policy\" title=\"wmf:Privacy policy\">\n Privacy policy\n </a>\n </li>\n <li id=\"footer-places-about\">\n <a href=\"/wiki/Wikipedia:About\" title=\"Wikipedia:About\">\n About Wikipedia\n </a>\n </li>\n <li id=\"footer-places-disclaimer\">\n <a href=\"/wiki/Wikipedia:General_disclaimer\" title=\"Wikipedia:General disclaimer\">\n Disclaimers\n </a>\n </li>\n <li id=\"footer-places-contact\">\n <a href=\"//en.wikipedia.org/wiki/Wikipedia:Contact_us\">\n Contact Wikipedia\n </a>\n </li>\n <li id=\"footer-places-mobileview\">\n <a class=\"noprint stopMobileRedirectToggle\" href=\"//en.m.wikipedia.org/w/index.php?title=List_of_nuclear_weapons_tests&amp;mobileaction=toggle_view_mobile\">\n Mobile view\n </a>\n </li>\n <li id=\"footer-places-developers\">\n <a href=\"https://www.mediawiki.org/wiki/Special:MyLanguage/How_to_contribute\">\n Developers\n </a>\n </li>\n <li id=\"footer-places-statslink\">\n <a href=\"https://stats.wikimedia.org/#/en.wikipedia.org\">\n Statistics\n </a>\n </li>\n <li id=\"footer-places-cookiestatement\">\n <a href=\"https://foundation.wikimedia.org/wiki/Cookie_statement\">\n Cookie statement\n </a>\n </li>\n </ul>\n <ul class=\"noprint\" id=\"footer-icons\">\n <li id=\"footer-copyrightico\">\n <a href=\"https://wikimediafoundation.org/\">\n <img alt=\"Wikimedia Foundation\" height=\"31\" loading=\"lazy\" src=\"/static/images/footer/wikimedia-button.png\" srcset=\"/static/images/footer/wikimedia-button-1.5x.png 1.5x, /static/images/footer/wikimedia-button-2x.png 2x\" width=\"88\"/>\n </a>\n </li>\n <li id=\"footer-poweredbyico\">\n <a href=\"https://www.mediawiki.org/\">\n <img alt=\"Powered by MediaWiki\" height=\"31\" loading=\"lazy\" src=\"/static/images/footer/poweredby_mediawiki_88x31.png\" srcset=\"/static/images/footer/poweredby_mediawiki_132x47.png 1.5x, /static/images/footer/poweredby_mediawiki_176x62.png 2x\" width=\"88\"/>\n </a>\n </li>\n </ul>\n </footer>\n <script>\n (RLQ=window.RLQ||[]).push(function(){mw.config.set({\"wgPageParseReport\":{\"limitreport\":{\"cputime\":\"1.509\",\"walltime\":\"1.732\",\"ppvisitednodes\":{\"value\":7235,\"limit\":1000000},\"postexpandincludesize\":{\"value\":286909,\"limit\":2097152},\"templateargumentsize\":{\"value\":12235,\"limit\":2097152},\"expansiondepth\":{\"value\":16,\"limit\":40},\"expensivefunctioncount\":{\"value\":4,\"limit\":500},\"unstrip-depth\":{\"value\":1,\"limit\":20},\"unstrip-size\":{\"value\":197664,\"limit\":5000000},\"entityaccesscount\":{\"value\":0,\"limit\":400},\"timingprofile\":[\"100.00% 1408.742 1 -total\",\" 47.26% 665.755 2 Template:Reflist\",\" 13.07% 184.063 1 Template:Lang\",\" 10.85% 152.885 34 Template:Cite_web\",\" 9.84% 138.690 1 Template:Nuclear_technology\",\" 9.56% 134.608 1 Template:Navbox_with_collapsible_groups\",\" 9.42% 132.719 9 Template:Citation_needed\",\" 8.95% 126.069 11 Template:Fix\",\" 8.90% 125.400 4 Template:Cite_book\",\" 6.35% 89.524 1 Template:Nuclear_weapons\"]},\"scribunto\":{\"limitreport-timeusage\":{\"value\":\"0.802\",\"limit\":\"10.000\"},\"limitreport-memusage\":{\"value\":26885154,\"limit\":52428800},\"limitreport-logs\":\"table#1 {\\n [\\\"size\\\"] = \\\"small\\\",\\n}\\ntable#1 {\\n [\\\"size\\\"] = \\\"small\\\",\\n}\\n\"},\"cachereport\":{\"origin\":\"mw1375\",\"timestamp\":\"20211205173546\",\"ttl\":1814400,\"transientcontent\":false}}});});\n </script>\n <script type=\"application/ld+json\">\n {\"@context\":\"https:\\/\\/schema.org\",\"@type\":\"Article\",\"name\":\"List of nuclear weapons tests\",\"url\":\"https:\\/\\/en.wikipedia.org\\/wiki\\/List_of_nuclear_weapons_tests\",\"sameAs\":\"http:\\/\\/www.wikidata.org\\/entity\\/Q1863664\",\"mainEntity\":\"http:\\/\\/www.wikidata.org\\/entity\\/Q1863664\",\"author\":{\"@type\":\"Organization\",\"name\":\"Contributors to Wikimedia projects\"},\"publisher\":{\"@type\":\"Organization\",\"name\":\"Wikimedia Foundation, Inc.\",\"logo\":{\"@type\":\"ImageObject\",\"url\":\"https:\\/\\/www.wikimedia.org\\/static\\/images\\/wmf-hor-googpub.png\"}},\"datePublished\":\"2005-07-07T21:30:23Z\",\"dateModified\":\"2021-12-05T17:35:33Z\",\"image\":\"https:\\/\\/upload.wikimedia.org\\/wikipedia\\/commons\\/6\\/6a\\/Little_boy.jpg\",\"headline\":\"Wikimedia list article\"}\n </script>\n <script>\n (RLQ=window.RLQ||[]).push(function(){mw.config.set({\"wgBackendResponseTime\":192,\"wgHostname\":\"mw1416\"});});\n </script>\n </body>\n</html>\n" ] ], [ [ "Если внимательно изучить код `HTML` искомой таблицы, то можно обнаружить что вся таблица находится в классе `Wikitable Sortable`. (Для включения отображения кода сайта в вашем браузере можно нажать правкой кнопкой мыши на таблицы и выбрать пункт *Исследовать элемент*).", "_____no_output_____" ], [ "![title](table.png)", "_____no_output_____" ], [ "Поэтому первой задачей будет найти класс *wikitable sortable* в коде `HTML`. Это можно сделать с помощью функции `find_all`, указав в качестве аргументов, что мы ищем тэг `table` с классом `wikitable sortable`.", "_____no_output_____" ] ], [ [ "My_table = soup.find_all('table',{'class':'wikitable sortable'})\nMy_table", "_____no_output_____" ] ], [ [ "Но как вы могли заметить, то на страницы есть две таблицы, которые принадлежат этому классу. Функция `find_all` вернет все найденные объекты в виде списка. Поэтому проверим второй найденный элемент.", "_____no_output_____" ] ], [ [ "My_table[1]", "_____no_output_____" ] ], [ [ "Все верно, это наша искомая таблица. Если дальше изучить содержимое таблицы, то станет понятно что внутри тега `th` находится заголовок таблицы, а внутри `td` строки таблицы. А оба этих тега находятся внутри тегов `tr` что является по факту строкой таблицы. Давайте извлечем все строки таблицы также используя функцию `find_all`.", "_____no_output_____" ] ], [ [ "rows = My_table[1].find_all('tr')\nrows", "_____no_output_____" ] ], [ [ "Давайте внимательно изучим содержимое одной строки, вытащим все `td`. Отобразим вторую строчку:", "_____no_output_____" ] ], [ [ "rows[1].find_all('td')", "_____no_output_____" ] ], [ [ "Мы видим нужные нам данные между тегов `<td><\\td>`, а также ссылки с тегом `<a>` и даже смешанные ячейки с обоими этими вариантами. Давайте сначала извлечем просто данные. Для этого используем функцию `get_text()` - она вернет все что между тегами.", "_____no_output_____" ], [ "Возьмем, например, дату (она будет первым элементом):", "_____no_output_____" ] ], [ [ "rows[1].find_all('td')[0].get_text()", "_____no_output_____" ] ], [ [ "Единственное, нам нужно отдельно обработать, это первую строку, в которой хранится заголовок ряда (table header)", "_____no_output_____" ] ], [ [ "rows[0].find_all('th')[0].get_text()", "_____no_output_____" ] ], [ [ "Все классно, только довайте избавимся от знака переноса строки.", "_____no_output_____" ] ], [ [ "rows[0].find_all('th')[0].get_text().strip()", "_____no_output_____" ] ], [ [ "Вообще хорошая идея всегда использовать метод strip(), чтобы удалять такие знаки (если удалять нечего, ошибку он не выдаст).", "_____no_output_____" ], [ "Давайте теперь извлечем все даты. Создадим список для их хранения `Dates` и будет итерироваться по всем элементам:", "_____no_output_____" ] ], [ [ "Dates = []\n\nDates.append(rows[0].find_all('th')[0].get_text().strip()) # отдельно добавляем заголовок\n\nfor row in rows[1:]: # начинаем со второго ряда таблицы, потому что 0 уже обработали выше\n r = row.find_all('td') # находим все теги td для строки таблицы\n Dates.append(r[0].get_text().strip()) # сохраняем данные в наш список\n\nDates", "_____no_output_____" ] ], [ [ "Ок! Следующие колонки, которые нам нужны - мощность взрыва и страна. Давайте поймем, где их искать.", "_____no_output_____" ] ], [ [ "rows[0]", "_____no_output_____" ] ], [ [ "Видим, что Yield вторая колонка, а страна третья. Соберем их в отдельные списки по той же схеме, что дату. Но сначала проверим, что правильно посчитали номера.", "_____no_output_____" ] ], [ [ "rows[0].find_all('th')[1]", "_____no_output_____" ], [ "rows[0].find_all('th')[3]", "_____no_output_____" ] ], [ [ "Вроде все правильно. Единственно, не забудем хранить числа как float", "_____no_output_____" ] ], [ [ "Yield = []\n\nYield.append(rows[0].find_all('th')[1].get_text().strip()) # отдельно добавляем заголовок\n\nfor row in rows[1:]: # начинаем со второго ряда таблицы, потому что 0 уже обработали выше\n r = row.find_all('td') # находим все теги td для строки таблицы\n Yield.append(float(r[1].get_text().strip())) # сохраняем данные в наш список и переводим в float\n\nYield", "_____no_output_____" ], [ "Country = []\n\nCountry.append(rows[0].find_all('th')[3].get_text().strip()) # отдельно добавляем заголовок\n\nfor row in rows[1:]: # начинаем со второго ряда таблицы, потому что 0 уже обработали выше\n r = row.find_all('td') # находим все теги td для строки таблицы\n Country.append(r[3].get_text().strip()) # сохраняем данные в наш список и переводим в float\n\nCountry", "_____no_output_____" ], [ "print(Dates)\nprint(Yield)\nprint(Country)", "['Date (GMT)', 'October 30, 1961', 'December 24, 1962', 'August 5, 1962', 'September 27, 1962', 'September 25, 1962', 'March 1, 1954', 'May 5, 1954', 'October 23, 1961', 'March 26, 1954', 'October 31, 1952', 'August 25, 1962', 'September 19, 1962', 'July 11, 1958', 'June 28, 1958', 'October 30, 1962', 'October 22, 1962', 'June 27, 1962', 'April 25, 1954', 'July 20, 1956', 'October 31, 1961', 'November 6, 1971', 'July 10, 1956', 'August 27, 1962', 'October 6, 1961', 'October 27, 1973', 'November 17, 1976', 'July 11, 1962', 'May 20, 1956', 'August 1, 1958', 'August 12, 1958', 'September 12, 1973', 'May 27, 1956', 'October 14, 1970', 'September 16, 1962', 'June 17, 1967', 'September 15, 1962', 'December 25, 1962', 'April 28, 1958', 'October 4, 1961', 'June 10, 1962', 'December 27, 1968', 'September 29, 1969', 'June 27, 1973', 'October 6, 1957', 'October 18, 1958', 'October 22, 1958', 'August 20, 1962', 'September 10, 1961', 'August 24, 1968', 'September 27, 1971', 'September 21, 1962', 'November 2, 1974', 'October 14, 1970', 'July 26, 1958', 'July 8, 1956', 'September 8, 1962', 'March 26, 1970', 'November 8, 1957', 'May 13, 1954', 'November 22, 1955', 'September 24, 1957', 'August 22, 1962', 'October 18, 1962', 'February 27, 1958', 'June 14, 1958', 'October 12, 1958', 'October 15, 1958', 'September 20, 1961', 'October 20, 1961', 'November 4, 1961', 'May 11, 1958', 'May 12, 1958', 'July 9, 1962', 'September 18, 1962']\n['Yield (megatons)', 50.0, 24.2, 21.1, 20.0, 19.1, 15.0, 13.5, 12.5, 11.0, 10.4, 10.0, 10.0, 9.3, 8.9, 8.3, 8.2, 7.7, 6.9, 5.0, 5.0, 4.8, 4.5, 4.2, 4.0, 4.0, 4.0, 3.9, 3.8, 3.8, 3.8, 3.8, 3.5, 3.4, 3.3, 3.3, 3.1, 3.1, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.9, 2.9, 2.8, 2.8, 2.7, 2.6, 2.5, 2.4, 2.3, 2.2, 2.0, 1.9, 1.9, 1.9, 1.8, 1.7, 1.6, 1.6, 1.6, 1.6, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.4, 1.4, 1.4, 1.4]\n['Country', 'Soviet Union', 'Soviet Union', 'Soviet Union', 'Soviet Union', 'Soviet Union', 'USA', 'USA', 'Soviet Union', 'USA', 'USA', 'Soviet Union', 'Soviet Union', 'USA', 'USA', 'USA', 'Soviet Union', 'USA', 'USA', 'USA', 'Soviet Union', 'USA', 'USA', 'Soviet Union', 'Soviet Union', 'Soviet Union', 'China', 'USA', 'USA', 'USA', 'USA', 'Soviet Union', 'USA', 'China', 'Soviet Union', 'China', 'Soviet Union', 'Soviet Union', 'UK', 'Soviet Union', 'USA', 'China', 'China', 'China', 'Soviet Union', 'Soviet Union', 'Soviet Union', 'Soviet Union', 'Soviet Union', 'France', 'Soviet Union', 'Soviet Union', 'Soviet Union', 'Soviet Union', 'USA', 'USA', 'Soviet Union', 'USA', 'UK', 'USA', 'Soviet Union', 'Soviet Union', 'Soviet Union', 'USA', 'Soviet Union', 'USA', 'Soviet Union', 'Soviet Union', 'Soviet Union', 'Soviet Union', 'Soviet Union', 'USA', 'USA', 'USA', 'Soviet Union']\n" ] ], [ [ "# Пример решения задания 2", "_____no_output_____" ], [ "1. Напишите функцию, которая берет аргументом название страны и возвращает (return) среднюю мощность взрыва для этой страны (нужно сложить все значения из колонки 'Yield (megatons)', которым соответствует страна, например, США, и раделить на количество этих значений). Для подсчета используйте списки, которые вы извлекли в Задании 1. \n2. Из списка Country оставьте только уникальные значения для стран и запустите вашу функцию в цикле для каждого значения Country. Внутри цикла сделайте следующий вывод \"{название страны}: средняя мощность взрыва {средняя мощность} мегатон\"\n3. Отдельно сохраните в переменную и выведите среднюю мощность взрыва (Yield (megatons) для бомб, которые тестировались в USA.\n4. Отдельно сохраните в переменную и выведите среднюю мощность взрыва (Yield (megatons) для бомб, которые тестировались в Soviet Union.\n5. Сравните эти значения и выведите название страны, для которой средняя мощность взрыва выше.", "_____no_output_____" ] ], [ [ "# 1\ndef average_yield(country):\n yield_sum = 0 # создаем счетчитк, в который будем приплюсовывать мощность каждого испытания в заданной стране\n yield_count = 0 # создаем счетчик, в котором будем хранить количество испытаний\n for idx in range(len(Country)): # запускаем цикл для всех значений индексов списка Country\n if Country[idx] == country: # проверяем, равно ли значение в списке Country стране, для которой вызвана функция\n yield_sum += Yield[idx] # если да, то добавляем мощность взрыва под этим же индексом\n yield_count += 1 # считаем это исптание\n return yield_sum / yield_count # после окончания работы цикла возвращаем среднюю мощность", "_____no_output_____" ], [ "# 2\nfor country in set(Country[1:]): # чтобы оставить только уникальные значения - делаем множество из списка + с помощью среза избавляемся от от заголовка колонки под индексом [0]\n print(country, ': средняя мощность взрыва', average_yield(country), 'мегатон')", "Soviet Union : средняя мощность взрыва 6.557894736842106 мегатон\nFrance : средняя мощность взрыва 2.6 мегатон\nUSA : средняя мощность взрыва 5.255555555555556 мегатон\nUK : средняя мощность взрыва 2.4 мегатон\nChina : средняя мощность взрыва 3.283333333333333 мегатон\n" ], [ "# 3, 4\nyield_ussr = average_yield('Soviet Union')\nyield_usa = average_yield('USA')\n\nprint(yield_ussr, yield_usa)\n\n", "6.557894736842106 5.255555555555556\n" ], [ "# 5\nif yield_ussr > yield_usa:\n print('Soviet Union')\nelse:\n print('USA')", "Soviet Union\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ] ]
c5212b6021ca41c7c34a20ee21104f717ab32e75
173,284
ipynb
Jupyter Notebook
docs/beta/notebooks/00_Index.ipynb
leonbett/debuggingbook
ae1fa940c306160429232fbc93a7a7f14b44efb7
[ "MIT" ]
null
null
null
docs/beta/notebooks/00_Index.ipynb
leonbett/debuggingbook
ae1fa940c306160429232fbc93a7a7f14b44efb7
[ "MIT" ]
null
null
null
docs/beta/notebooks/00_Index.ipynb
leonbett/debuggingbook
ae1fa940c306160429232fbc93a7a7f14b44efb7
[ "MIT" ]
null
null
null
160.151571
5,821
0.766874
[ [ [ "# Index", "_____no_output_____" ], [ "## A - E", "_____no_output_____" ], [ "### A\n\n* a chapter with the same name in The Fuzzing Book &mdash; [Mining Function Specifications](DynamicInvariants.ipynb#Mining-Function-Specifications)\n* A Python implementation of grammar-based input reduction &mdash; [Reducing Failure-Inducing Inputs (Background)](DeltaDebugger.ipynb#Background)\n* a similar chapter in \"The Fuzzing Book\" &mdash; [Reducing Failure-Inducing Inputs](DeltaDebugger.ipynb#Reducing-Failure-Inducing-Inputs)\n* abstract failure-inducing inputs &mdash; [Generalizing Failure Circumstances (A Failing Program)](DDSetDebugger.ipynb#A-Failing-Program)\n* abstract syntax tree &mdash; [Tracing Executions (Exercise 2: Syntax-Based Instrumentation)](Tracer.ipynb#Exercise-2:-Syntax-Based-Instrumentation), [Reducing Failure-Inducing Inputs (Reducing Syntax Trees)](DeltaDebugger.ipynb#Reducing-Syntax-Trees)\n* _actual_ `<assert.h>` header file &mdash; [Asserting Expectations (Assertion Diagnostics)](Assertions.ipynb#Assertion-Diagnostics)\n* Address Sanitizer](http://clang.llvm.org/docs/AddressSanitizer.html) discussed in this chapter was developed at Google; the [paper by Serebryany &mdash; [Asserting Expectations (Background)](Assertions.ipynb#Background)\n* `add_call()` &mdash; [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls)\n* `add_collector()` &mdash; [Statistical Debugging (A Base Class for Statistical Debugging)](StatisticalDebugger.ipynb#A-Base-Class-for-Statistical-Debugging)\n* `add_conditions()` &mdash; [Repairing Code Automatically (Collecting Conditions)](Repairer.ipynb#Collecting-Conditions)\n* `add_definition()` &mdash; [Repairing Code Automatically ((Re)defining Functions)](Repairer.ipynb#(Re)defining-Functions)\n* `add_dependencies()` &mdash; [Tracking Failure Origins (Setting Variables)](Slicer.ipynb#Setting-Variables)\n* `add_hierarchy()` &mdash; [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies)\n* `add_items_to_ignore()` &mdash; [Statistical Debugging (Error Prevention)](StatisticalDebugger.ipynb#Error-Prevention)\n* `add_statements()` &mdash; [Repairing Code Automatically (Picking Statements)](Repairer.ipynb#Picking-Statements)\n* `add_to()` &mdash; [Tracking Failure Origins (Calls and Augmented Assign)](Slicer.ipynb#Calls-and-Augmented-Assign), [Reducing Failure-Inducing Inputs (General Delta Debugging)](DeltaDebugger.ipynb#General-Delta-Debugging)\n* `advance()` &mdash; [Asserting Expectations (End of Excursion)](Assertions.ipynb#End-of-Excursion), [Asserting Expectations (End of Excursion)](Assertions.ipynb#End-of-Excursion)\n* `after_collection()` &mdash; [Reducing Failure-Inducing Inputs (Collecting a Call)](DeltaDebugger.ipynb#Collecting-a-Call), [Reducing Failure-Inducing Inputs (Processing Multiple Arguments)](DeltaDebugger.ipynb#Processing-Multiple-Arguments), [Isolating Failure-Inducing Changes (A ChangeDebugger class)](ChangeDebugger.ipynb#A-ChangeDebugger-class)\n* `allocate()` &mdash; [Asserting Expectations (Excursion: Dynamic Memory in C)](Assertions.ipynb#Excursion:-Dynamic-Memory-in-C), [Asserting Expectations (Excursion: Managed Memory)](Assertions.ipynb#Excursion:-Managed-Memory)\n* `all_calls()` &mdash; [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls)\n* `all_conditions()` &mdash; [Repairing Code Automatically (Collecting Conditions)](Repairer.ipynb#Collecting-Conditions)\n* `all_events()` &mdash; [Statistical Debugging (A Base Class for Statistical Debugging)](StatisticalDebugger.ipynb#A-Base-Class-for-Statistical-Debugging)\n* `all_fail_events()` &mdash; [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs)\n* `all_functions()` &mdash; [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies)\n* `all_invariants()` &mdash; [Mining Function Specifications (Extracting Invariants)](DynamicInvariants.ipynb#Extracting-Invariants)\n* `all_metrics()` &mdash; [Debugging Performance Issues (Collecting Time Spent)](PerformanceDebugger.ipynb#Collecting-Time-Spent), [Debugging Performance Issues (Collecting Time Spent)](PerformanceDebugger.ipynb#Collecting-Time-Spent), [Debugging Performance Issues (Other Metrics)](PerformanceDebugger.ipynb#Other-Metrics)\n* `all_pass_events()` &mdash; [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs)\n* `all_statements()` &mdash; [Repairing Code Automatically (Picking Statements)](Repairer.ipynb#Picking-Statements)\n* `all_statements_and_functions()` &mdash; [Repairing Code Automatically (Picking Statements)](Repairer.ipynb#Picking-Statements)\n* `all_vars()` &mdash; [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies)\n* `AMBER` &mdash; [Asserting Expectations (Consider Leaving Some Assertions On)](Assertions.ipynb#Consider-Leaving-Some-Assertions-On)\n* `annotate_arg()` &mdash; [Mining Function Specifications (Excursion: Annotating Functions with Given Types)](DynamicInvariants.ipynb#Excursion:-Annotating-Functions-with-Given-Types)\n* `annotate_function_ast_with_invariants()` &mdash; [Mining Function Specifications (Exercise 7: Embedding Invariants as Assertions)](DynamicInvariants.ipynb#Exercise-7:-Embedding-Invariants-as-Assertions)\n* `annotate_function_ast_with_types()` &mdash; [Mining Function Specifications (Excursion: Annotating Functions with Mined Types)](DynamicInvariants.ipynb#Excursion:-Annotating-Functions-with-Mined-Types)\n* `annotate_function_with_invariants()` &mdash; [Mining Function Specifications (Exercise 7: Embedding Invariants as Assertions)](DynamicInvariants.ipynb#Exercise-7:-Embedding-Invariants-as-Assertions)\n* `annotate_function_with_types()` &mdash; [Mining Function Specifications (Excursion: Annotating Functions with Mined Types)](DynamicInvariants.ipynb#Excursion:-Annotating-Functions-with-Mined-Types)\n* `annotate_invariants()` &mdash; [Mining Function Specifications (Exercise 7: Embedding Invariants as Assertions)](DynamicInvariants.ipynb#Exercise-7:-Embedding-Invariants-as-Assertions)\n* `annotate_types()` &mdash; [Mining Function Specifications (Excursion: Annotating Functions with Mined Types)](DynamicInvariants.ipynb#Excursion:-Annotating-Functions-with-Mined-Types)\n* `arg()` &mdash; [Tracking Failure Origins (Excursion: Tracking Calls and Arguments)](Slicer.ipynb#Excursion:-Tracking-Calls-and-Arguments), [Tracking Failure Origins (Excursion: Function Arguments)](Slicer.ipynb#Excursion:-Function-Arguments)\n* `args()` &mdash; [Reducing Failure-Inducing Inputs (Collecting a Call)](DeltaDebugger.ipynb#Collecting-a-Call), [Statistical Debugging (Collecting Events)](StatisticalDebugger.ipynb#Collecting-Events)\n* `argstring()` &mdash; [Statistical Debugging (Collecting Events)](StatisticalDebugger.ipynb#Collecting-Events)\n* `args_test()` &mdash; [Tracking Failure Origins (Excursion: Tracking Parameters)](Slicer.ipynb#Excursion:-Tracking-Parameters)\n* `assert_flow()` &mdash; [Tracking Failure Origins (Exercise 5: Flow Assertions)](Slicer.ipynb#Exercise-5:-Flow-Assertions)\n* `assign_command()` &mdash; [How Debuggers Work (Exercise 1: Changing State)](Debugger.ipynb#Exercise-1:-Changing-State)\n* `assign_test()` &mdash; [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions)\n* AST &mdash; [Tracing Executions (Exercise 2: Syntax-Based Instrumentation)](Tracer.ipynb#Exercise-2:-Syntax-Based-Instrumentation), [Reducing Failure-Inducing Inputs (Reducing Syntax Trees)](DeltaDebugger.ipynb#Reducing-Syntax-Trees)\n* asyncio &mdash; [Repairing Code Automatically (Exercise 5: Parallel Repair)](Repairer.ipynb#Exercise-5:-Parallel-Repair)\n* `augment()` &mdash; [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions)\n* AutoFix &mdash; [Repairing Code Automatically (Background)](Repairer.ipynb#Background)\n* Automata theory &mdash; [Generalizing Failure Circumstances (Grammars)](DDSetDebugger.ipynb#Grammars)\n* `A_Class` class &mdash; [Class Diagrams (Getting a Class Hierarchy)](ClassDiagram.ipynb#Getting-a-Class-Hierarchy), [Class Diagrams (Getting a Class Hierarchy)](ClassDiagram.ipynb#Getting-a-Class-Hierarchy)\n", "_____no_output_____" ], [ "### B\n\n* `backward_slice()` &mdash; [Tracking Failure Origins (Slices)](Slicer.ipynb#Slices)\n* `BAD_ATTR_INPUT` &mdash; [Generalizing Failure Circumstances (Testing for Generalization)](DDSetDebugger.ipynb#Testing-for-Generalization)\n* `BAD_INPUTS` &mdash; [Generalizing Failure Circumstances (A Failing Program)](DDSetDebugger.ipynb#A-Failing-Program)\n* `bad_input_tree_generalizer()` &mdash; [Generalizing Failure Circumstances (Testing for Generalization)](DDSetDebugger.ipynb#Testing-for-Generalization)\n* `bad_input_tree_mutator()` &mdash; [Generalizing Failure Circumstances (Referencing Subtrees)](DDSetDebugger.ipynb#Referencing-Subtrees)\n* `BAD_INPUT` &mdash; [Generalizing Failure Circumstances (A Failing Program)](DDSetDebugger.ipynb#A-Failing-Program), [Generalizing Failure Circumstances (Parsing)](DDSetDebugger.ipynb#Parsing), [Generalizing Failure Circumstances (Referencing Subtrees)](DDSetDebugger.ipynb#Referencing-Subtrees), [Generalizing Failure Circumstances (Testing for Generalization)](DDSetDebugger.ipynb#Testing-for-Generalization), [Generalizing Failure Circumstances (Generalizable Paths)](DDSetDebugger.ipynb#Generalizable-Paths), [Generalizing Failure Circumstances (Generalizing Arguments)](DDSetDebugger.ipynb#Generalizing-Arguments), [Generalizing Failure Circumstances (Exercise 1: Generalization and Specialization)](DDSetDebugger.ipynb#Exercise-1:-Generalization-and-Specialization)\n* `bar()` &mdash; [Class Diagrams (Getting a Class Hierarchy)](ClassDiagram.ipynb#Getting-a-Class-Hierarchy)\n* `bar` class &mdash; [Where the Bugs are (Mapping Elements to Locations)](ChangeCounter.ipynb#Mapping-Elements-to-Locations)\n* `BetterTime` class &mdash; [Asserting Expectations (End of Excursion)](Assertions.ipynb#End-of-Excursion)\n* binary reduction of dependency graphs &mdash; [Reducing Failure-Inducing Inputs (Background)](DeltaDebugger.ipynb#Background)\n* bisecting &mdash; [Isolating Failure-Inducing Changes (Manual Bisecting)](ChangeDebugger.ipynb#Manual-Bisecting)\n* `BLACK` &mdash; [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures)\n* `BLOCK_LIST_START` &mdash; [Asserting Expectations (Excursion: Dynamic Memory in C)](Assertions.ipynb#Excursion:-Dynamic-Memory-in-C)\n* blog post &mdash; [Reducing Failure-Inducing Inputs (Background)](DeltaDebugger.ipynb#Background)\n* `break_command()` &mdash; [How Debuggers Work (Setting Breakpoints)](Debugger.ipynb#Setting-Breakpoints)\n* `brightness()` &mdash; [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum)\n* `BROWSER` &mdash; [Tracking Bugs (Excursion: Remote Control with Selenium)](Tracking.ipynb#Excursion:-Remote-Control-with-Selenium), [Tracking Bugs (Excursion: Remote Control with Selenium)](Tracking.ipynb#Excursion:-Remote-Control-with-Selenium)\n* bug &mdash; [Introduction to Debugging (From Defect to Failure)](Intro_Debugging.ipynb#From-Defect-to-Failure), [Introduction to Debugging (Lessons Learned)](Intro_Debugging.ipynb#Lessons-Learned)\n* `buggy_square_root_with_postcondition()` &mdash; [Mining Function Specifications (Annotating Functions with Pre- and Postconditions)](DynamicInvariants.ipynb#Annotating-Functions-with-Pre--and-Postconditions)\n* Bugzilla bug database &mdash; [Tracking Bugs (Background)](Tracking.ipynb#Background)\n* `B_Class` class &mdash; [Class Diagrams (Getting a Class Hierarchy)](ClassDiagram.ipynb#Getting-a-Class-Hierarchy)\n", "_____no_output_____" ], [ "### C\n\n* C-Reduce &mdash; [Reducing Failure-Inducing Inputs (Background)](DeltaDebugger.ipynb#Background)\n* `CachingCallReducer` class &mdash; [Reducing Failure-Inducing Inputs (Testing, Logging, and Caching)](DeltaDebugger.ipynb#Testing,-Logging,-and-Caching)\n* call stack &mdash; [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks)\n* `call()` &mdash; [Tracking Failure Origins (Excursion: Tracking Calls and Arguments)](Slicer.ipynb#Excursion:-Tracking-Calls-and-Arguments), [Tracking Failure Origins (Excursion: Calls and Returns)](Slicer.ipynb#Excursion:-Calls-and-Returns), [Reducing Failure-Inducing Inputs (Collecting a Call)](DeltaDebugger.ipynb#Collecting-a-Call), [Reducing Failure-Inducing Inputs (Repeating a Call)](DeltaDebugger.ipynb#Repeating-a-Call)\n* `CallCollector` class &mdash; [Tracking Failure Origins (Excursion: Implementing Dynamic Instrumentation)](Slicer.ipynb#Excursion:-Implementing-Dynamic-Instrumentation), [Reducing Failure-Inducing Inputs (Collecting a Call)](DeltaDebugger.ipynb#Collecting-a-Call), [Reducing Failure-Inducing Inputs (Collecting a Call)](DeltaDebugger.ipynb#Collecting-a-Call), [Reducing Failure-Inducing Inputs (Repeating a Call)](DeltaDebugger.ipynb#Repeating-a-Call), [Reducing Failure-Inducing Inputs (Repeating a Call)](DeltaDebugger.ipynb#Repeating-a-Call)\n* `callee()` &mdash; [Inspecting Call Stacks (Synopsis)](StackInspector.ipynb#Synopsis)\n* `caller()` &mdash; [Inspecting Call Stacks (Synopsis)](StackInspector.ipynb#Synopsis)\n* `caller_frame()` &mdash; [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks)\n* `caller_function()` &mdash; [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks)\n* `caller_globals()` &mdash; [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks)\n* `caller_locals()` &mdash; [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks)\n* `caller_location()` &mdash; [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks)\n* `CallReducer` class &mdash; [Reducing Failure-Inducing Inputs (Testing, Logging, and Caching)](DeltaDebugger.ipynb#Testing,-Logging,-and-Caching), [Reducing Failure-Inducing Inputs (Testing, Logging, and Caching)](DeltaDebugger.ipynb#Testing,-Logging,-and-Caching)\n* `calls()` &mdash; [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls)\n* `calls_in_our_with_block()` &mdash; [Tracking Failure Origins (Excursion: Implementing Dynamic Instrumentation)](Slicer.ipynb#Excursion:-Implementing-Dynamic-Instrumentation)\n* `CallTracer` class &mdash; [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls), [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls), [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls), [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls), [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls), [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls), [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls)\n* `call_generator()` &mdash; [Tracking Failure Origins (Excursion: Calls and Returns)](Slicer.ipynb#Excursion:-Calls-and-Returns)\n* `call_test()` &mdash; [Tracking Failure Origins (Excursion: Function Arguments)](Slicer.ipynb#Excursion:-Function-Arguments)\n* `cancel()` &mdash; [Error Handling (Catching Timeouts)](ExpectError.ipynb#Catching-Timeouts)\n* `can_cross()` &mdash; [Repairing Code Automatically (Applying Crossover on Programs)](Repairer.ipynb#Applying-Crossover-on-Programs)\n* `can_generalize()` &mdash; [Generalizing Failure Circumstances (Testing for Generalization)](DDSetDebugger.ipynb#Testing-for-Generalization)\n* cause-effect chain &mdash; [Introduction to Debugging (From Defect to Failure)](Intro_Debugging.ipynb#From-Defect-to-Failure)\n* `ChangeCounter` class &mdash; [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes), [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes), [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes), [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes), [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes), [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes), [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes), [Where the Bugs are (Visualizing Past Changes)](ChangeCounter.ipynb#Visualizing-Past-Changes), [Where the Bugs are (Visualizing Past Changes)](ChangeCounter.ipynb#Visualizing-Past-Changes), [Where the Bugs are (Visualizing Past Changes)](ChangeCounter.ipynb#Visualizing-Past-Changes), [Where the Bugs are (Visualizing Past Changes)](ChangeCounter.ipynb#Visualizing-Past-Changes), [Where the Bugs are (Visualizing Past Changes)](ChangeCounter.ipynb#Visualizing-Past-Changes)\n* `ChangeDebugger` class &mdash; [Isolating Failure-Inducing Changes (A ChangeDebugger class)](ChangeDebugger.ipynb#A-ChangeDebugger-class), [Isolating Failure-Inducing Changes (A ChangeDebugger class)](ChangeDebugger.ipynb#A-ChangeDebugger-class), [Isolating Failure-Inducing Changes (A ChangeDebugger class)](ChangeDebugger.ipynb#A-ChangeDebugger-class), [Isolating Failure-Inducing Changes (A ChangeDebugger class)](ChangeDebugger.ipynb#A-ChangeDebugger-class), [Isolating Failure-Inducing Changes (A ChangeDebugger class)](ChangeDebugger.ipynb#A-ChangeDebugger-class), [Isolating Failure-Inducing Changes (A ChangeDebugger class)](ChangeDebugger.ipynb#A-ChangeDebugger-class)\n* `changed_elems()` &mdash; [Where the Bugs are (Determining Changed Elements)](ChangeCounter.ipynb#Determining-Changed-Elements)\n* `changed_elems_by_mapping()` &mdash; [Where the Bugs are (Determining Changed Elements)](ChangeCounter.ipynb#Determining-Changed-Elements)\n* `changed_vars()` &mdash; [Tracing Executions (Tracing Variable Changes)](Tracer.ipynb#Tracing-Variable-Changes)\n* chapter on delta debugging](DeltaDebugger.ipynb), the [chapter on tracing](Tracer.ipynb), and excessively in the [chapter on slicing](Slicer.ipynb). The [official Python `ast` reference](http://docs.python.org/3/library/ast) is complete, but a bit brief; the documentation [\"Green Tree Snakes - the missing Python AST docs\" &mdash; [Repairing Code Automatically (Random Code Mutations)](Repairer.ipynb#Random-Code-Mutations)\n* \"Checking a Large Routine\" &mdash; [Asserting Expectations (Background)](Assertions.ipynb#Background)\n* `check_location()` &mdash; [Tracking Failure Origins (Reading Variables)](Slicer.ipynb#Reading-Variables), [Tracking Failure Origins (Checking Locations)](Slicer.ipynb#Checking-Locations)\n* `check_reproducibility()` &mdash; [Reducing Failure-Inducing Inputs (Processing Multiple Arguments)](DeltaDebugger.ipynb#Processing-Multiple-Arguments)\n* `check_time()` &mdash; [Error Handling (Catching Timeouts)](ExpectError.ipynb#Catching-Timeouts)\n* `choose_bool_op()` &mdash; [Repairing Code Automatically (Mutating Conditions)](Repairer.ipynb#Mutating-Conditions)\n* `choose_condition()` &mdash; [Repairing Code Automatically (Mutating Conditions)](Repairer.ipynb#Mutating-Conditions)\n* `choose_op()` &mdash; [Repairing Code Automatically (Choosing a Mutation Method)](Repairer.ipynb#Choosing-a-Mutation-Method)\n* `choose_statement()` &mdash; [Repairing Code Automatically (Swapping Statements)](Repairer.ipynb#Swapping-Statements)\n* `classifier()` &mdash; [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers)\n* `ClassifyingDebugger` class &mdash; [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers), [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers), [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers), [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers), [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers), [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers), [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers), [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers)\n* `CLASS_COLOR` &mdash; [Class Diagrams (Drawing Class Hierarchy with Method Names)](ClassDiagram.ipynb#Drawing-Class-Hierarchy-with-Method-Names), [Class Diagrams (Drawing Class Hierarchy with Method Names)](ClassDiagram.ipynb#Drawing-Class-Hierarchy-with-Method-Names)\n* `CLASS_FONT` &mdash; [Class Diagrams (Drawing Class Hierarchy with Method Names)](ClassDiagram.ipynb#Drawing-Class-Hierarchy-with-Method-Names), [Class Diagrams (Drawing Class Hierarchy with Method Names)](ClassDiagram.ipynb#Drawing-Class-Hierarchy-with-Method-Names)\n* `class_hierarchy()` &mdash; [Class Diagrams (Getting a Class Hierarchy)](ClassDiagram.ipynb#Getting-a-Class-Hierarchy)\n* `class_items()` &mdash; [Class Diagrams (Getting Methods and Variables)](ClassDiagram.ipynb#Getting-Methods-and-Variables)\n* `_class_items()` &mdash; [Class Diagrams (Getting Methods and Variables)](ClassDiagram.ipynb#Getting-Methods-and-Variables)\n* `class_methods()` &mdash; [Class Diagrams (Getting Methods and Variables)](ClassDiagram.ipynb#Getting-Methods-and-Variables)\n* `class_methods_string()` &mdash; [Class Diagrams (Drawing Class Hierarchy with Method Names)](ClassDiagram.ipynb#Drawing-Class-Hierarchy-with-Method-Names)\n* `class_set()` &mdash; [Class Diagrams (Getting a Class Tree)](ClassDiagram.ipynb#Getting-a-Class-Tree)\n* `class_tree()` &mdash; [Class Diagrams (Getting a Class Tree)](ClassDiagram.ipynb#Getting-a-Class-Tree)\n* `class_vars()` &mdash; [Class Diagrams (Getting Methods and Variables)](ClassDiagram.ipynb#Getting-Methods-and-Variables)\n* `class_vars_string()` &mdash; [Class Diagrams (Drawing Class Hierarchy with Method Names)](ClassDiagram.ipynb#Drawing-Class-Hierarchy-with-Method-Names)\n* `clear_read()` &mdash; [Tracking Failure Origins (Checking Locations)](Slicer.ipynb#Checking-Locations)\n* `clock()` &mdash; [Timer (Measuring Time)](Timer.ipynb#Measuring-Time)\n* code snippet from StackOverflow &mdash; [Mining Function Specifications (Annotating Functions with Pre- and Postconditions)](DynamicInvariants.ipynb#Annotating-Functions-with-Pre--and-Postconditions)\n* `code()` &mdash; [Tracking Failure Origins (Excursion: Listing Dependencies)](Slicer.ipynb#Excursion:-Listing-Dependencies), [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class), [Statistical Debugging (Discrete Spectrum)](StatisticalDebugger.ipynb#Discrete-Spectrum)\n* `_code()` &mdash; [Tracking Failure Origins (Excursion: Listing Dependencies)](Slicer.ipynb#Excursion:-Listing-Dependencies)\n* `code_with_coverage()` &mdash; [Statistical Debugging (Collecting Coverage)](StatisticalDebugger.ipynb#Collecting-Coverage)\n* `collect()` &mdash; [Reducing Failure-Inducing Inputs (Traversing Syntax Trees)](DeltaDebugger.ipynb#Traversing-Syntax-Trees), [Statistical Debugging (Collecting Events)](StatisticalDebugger.ipynb#Collecting-Events), [Statistical Debugging (Collecting Events)](StatisticalDebugger.ipynb#Collecting-Events), [Statistical Debugging (Collecting Coverage)](StatisticalDebugger.ipynb#Collecting-Coverage), [Statistical Debugging (A Base Class for Statistical Debugging)](StatisticalDebugger.ipynb#A-Base-Class-for-Statistical-Debugging), [Statistical Debugging (Other Events besides Coverage)](StatisticalDebugger.ipynb#Other-Events-besides-Coverage), [Debugging Performance Issues (Collecting Time Spent)](PerformanceDebugger.ipynb#Collecting-Time-Spent), [Debugging Performance Issues (Other Metrics)](PerformanceDebugger.ipynb#Other-Metrics)\n* `collectors_without_event()` &mdash; [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum)\n* `collectors_with_event()` &mdash; [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum)\n* `Collector` class &mdash; [Statistical Debugging (Collecting Events)](StatisticalDebugger.ipynb#Collecting-Events), [Statistical Debugging (Collecting Events)](StatisticalDebugger.ipynb#Collecting-Events), [Statistical Debugging (Error Prevention)](StatisticalDebugger.ipynb#Error-Prevention), [Statistical Debugging (Error Prevention)](StatisticalDebugger.ipynb#Error-Prevention)\n* `collect_fail()` &mdash; [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs)\n* `collect_pass()` &mdash; [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs)\n* `color()` &mdash; [Statistical Debugging (Excursion: Printing an Event Table)](StatisticalDebugger.ipynb#Excursion:-Printing-an-Event-Table), [Statistical Debugging (Discrete Spectrum)](StatisticalDebugger.ipynb#Discrete-Spectrum), [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum), [Debugging Performance Issues (Visualizing Time Spent)](PerformanceDebugger.ipynb#Visualizing-Time-Spent)\n* `commands()` &mdash; [How Debuggers Work (Excursion: Implementing execute())](Debugger.ipynb#Excursion:-Implementing-execute())\n* `command_method()` &mdash; [How Debuggers Work (Excursion: Implementing execute())](Debugger.ipynb#Excursion:-Implementing-execute())\n* commit hash &mdash; [Isolating Failure-Inducing Changes (Accessing Versions)](ChangeDebugger.ipynb#Accessing-Versions)\n* compiler testing &mdash; [Reducing Failure-Inducing Inputs (Background)](DeltaDebugger.ipynb#Background)\n* `compile_and_run()` &mdash; [Reducing Failure-Inducing Inputs (Reducing Program Code)](DeltaDebugger.ipynb#Reducing-Program-Code)\n* `compile_and_test_ast()` &mdash; [Reducing Failure-Inducing Inputs (Reducing Trees)](DeltaDebugger.ipynb#Reducing-Trees)\n* `compile_and_test_html_markup()` &mdash; [Reducing Failure-Inducing Inputs (Reducing Code Lines)](DeltaDebugger.ipynb#Reducing-Code-Lines)\n* `compile_and_test_html_markup_simple()` &mdash; [Reducing Failure-Inducing Inputs (Reducing Program Code)](DeltaDebugger.ipynb#Reducing-Program-Code)\n* `condition()` &mdash; [Mining Function Specifications (Annotating Functions with Pre- and Postconditions)](DynamicInvariants.ipynb#Annotating-Functions-with-Pre--and-Postconditions)\n* `ConditionalTracer` class &mdash; [Tracing Executions (Conditional Tracing)](Tracer.ipynb#Conditional-Tracing), [Tracing Executions (Conditional Tracing)](Tracer.ipynb#Conditional-Tracing), [Tracing Executions (Conditional Tracing)](Tracer.ipynb#Conditional-Tracing), [Tracing Executions (Conditional Tracing)](Tracer.ipynb#Conditional-Tracing), [Tracing Executions (Conditional Tracing)](Tracer.ipynb#Conditional-Tracing)\n* `ConditionMutator` class &mdash; [Repairing Code Automatically (Mutating Conditions)](Repairer.ipynb#Mutating-Conditions), [Repairing Code Automatically (Mutating Conditions)](Repairer.ipynb#Mutating-Conditions)\n* `ConditionVisitor` class &mdash; [Repairing Code Automatically (Collecting Conditions)](Repairer.ipynb#Collecting-Conditions)\n* `CONDITION` &mdash; [How Debuggers Work (Watchpoints (\"watch\"))](Debugger.ipynb#Watchpoints-(\"watch\"))\n* context-free grammars &mdash; [Generalizing Failure Circumstances (Grammars)](DDSetDebugger.ipynb#Grammars)\n* `continue_command()` &mdash; [How Debuggers Work (Debugger Interaction)](Debugger.ipynb#Debugger-Interaction)\n* `ContinuousSpectrumDebugger` class &mdash; [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum), [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum), [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum), [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum)\n* contract &mdash; [Mining Function Specifications (Beyond Generic Failures)](DynamicInvariants.ipynb#Beyond-Generic-Failures)\n* `copy_and_reduce()` &mdash; [Reducing Failure-Inducing Inputs (Deleting Nodes)](DeltaDebugger.ipynb#Deleting-Nodes)\n* `coverage()` &mdash; [Statistical Debugging (Collecting Events)](StatisticalDebugger.ipynb#Collecting-Events), [Statistical Debugging (Collecting Coverage)](StatisticalDebugger.ipynb#Collecting-Coverage), [Statistical Debugging (Excursion: Printing an Event Table)](StatisticalDebugger.ipynb#Excursion:-Printing-an-Event-Table)\n* `CoverageCollector` class &mdash; [Statistical Debugging (Collecting Coverage)](StatisticalDebugger.ipynb#Collecting-Coverage), [Statistical Debugging (Collecting Coverage)](StatisticalDebugger.ipynb#Collecting-Coverage), [Statistical Debugging (Collecting Coverage)](StatisticalDebugger.ipynb#Collecting-Coverage)\n* `covered_functions()` &mdash; [Statistical Debugging (Collecting Events)](StatisticalDebugger.ipynb#Collecting-Events), [Statistical Debugging (Collecting Coverage)](StatisticalDebugger.ipynb#Collecting-Coverage), [Statistical Debugging (Excursion: Printing an Event Table)](StatisticalDebugger.ipynb#Excursion:-Printing-an-Event-Table)\n* `create_function()` &mdash; [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks)\n* crossover &mdash; [Repairing Code Automatically (Crossover)](Repairer.ipynb#Crossover)\n* `crossover()` &mdash; [Repairing Code Automatically (Applying Crossover on Programs)](Repairer.ipynb#Applying-Crossover-on-Programs)\n* `CrossoverError` class &mdash; [Repairing Code Automatically (Applying Crossover on Programs)](Repairer.ipynb#Applying-Crossover-on-Programs)\n* `CrossoverOperator` class &mdash; [Repairing Code Automatically (Crossing Statement Lists)](Repairer.ipynb#Crossing-Statement-Lists), [Repairing Code Automatically (Applying Crossover on Programs)](Repairer.ipynb#Applying-Crossover-on-Programs), [Repairing Code Automatically (Applying Crossover on Programs)](Repairer.ipynb#Applying-Crossover-on-Programs), [Repairing Code Automatically (Applying Crossover on Programs)](Repairer.ipynb#Applying-Crossover-on-Programs), [Repairing Code Automatically (Applying Crossover on Programs)](Repairer.ipynb#Applying-Crossover-on-Programs)\n* `crossover_attr()` &mdash; [Repairing Code Automatically (Applying Crossover on Programs)](Repairer.ipynb#Applying-Crossover-on-Programs)\n* `crossover_branches()` &mdash; [Repairing Code Automatically (Applying Crossover on Programs)](Repairer.ipynb#Applying-Crossover-on-Programs)\n* `cross_bodies()` &mdash; [Repairing Code Automatically (Crossing Statement Lists)](Repairer.ipynb#Crossing-Statement-Lists)\n* `current_repo()` &mdash; [Where the Bugs are (Mining with PyDriller)](ChangeCounter.ipynb#Mining-with-PyDriller)\n* `C_Class` class &mdash; [Class Diagrams (Getting a Class Hierarchy)](ClassDiagram.ipynb#Getting-a-Class-Hierarchy)\n", "_____no_output_____" ], [ "### D\n\n* DAIKON dynamic invariant detector](https://plse.cs.washington.edu/daikon/) can be considered the mother of function specification miners. Continuously maintained and extended for more than 20 years, it mines likely invariants in the style of this chapter for a variety of languages, including C, C++, C#, Eiffel, F#, Java, Perl, and Visual Basic. On top of the functionality discussed above, it holds a rich catalog of patterns for likely invariants, supports data invariants, can eliminate invariants that are implied by others, and determines statistical confidence to disregard unlikely invariants. The corresponding paper \\cite{Ernst2001} is one of the seminal and most-cited papers of Software Engineering. A multitude of works have been published based on DAIKON and detecting invariants; see this [curated list &mdash; [Mining Function Specifications (Background)](DynamicInvariants.ipynb#Background)\n* Daniel Lemire puts it &mdash; [Asserting Expectations (Checking Memory Usage with Memory Sanitizer)](Assertions.ipynb#Checking-Memory-Usage-with-Memory-Sanitizer)\n* data flow &mdash; [Tracking Failure Origins (Data Dependencies)](Slicer.ipynb#Data-Dependencies)\n* `DataTrackerTester` class &mdash; [Tracking Failure Origins (Tracking Variable Accesses)](Slicer.ipynb#Tracking-Variable-Accesses)\n* `DataTracker` class &mdash; [Tracking Failure Origins (A Data Tracker)](Slicer.ipynb#A-Data-Tracker), [Tracking Failure Origins (A Data Tracker)](Slicer.ipynb#A-Data-Tracker), [Tracking Failure Origins (A Data Tracker)](Slicer.ipynb#A-Data-Tracker), [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions), [Tracking Failure Origins (Excursion: Tracking Control)](Slicer.ipynb#Excursion:-Tracking-Control), [Tracking Failure Origins (Excursion: Tracking Control)](Slicer.ipynb#Excursion:-Tracking-Control), [Tracking Failure Origins (Excursion: Tracking Calls and Arguments)](Slicer.ipynb#Excursion:-Tracking-Calls-and-Arguments), [Tracking Failure Origins (Excursion: Tracking Calls and Arguments)](Slicer.ipynb#Excursion:-Tracking-Calls-and-Arguments), [Tracking Failure Origins (Excursion: Tracking Calls and Arguments)](Slicer.ipynb#Excursion:-Tracking-Calls-and-Arguments), [Tracking Failure Origins (Excursion: Tracking Parameters)](Slicer.ipynb#Excursion:-Tracking-Parameters)\n* `DATA_TRACKER` &mdash; [Tracking Failure Origins (Tracking Variable Accesses)](Slicer.ipynb#Tracking-Variable-Accesses)\n* `dd()` &mdash; [Reducing Failure-Inducing Inputs (General Delta Debugging)](DeltaDebugger.ipynb#General-Delta-Debugging)\n* `ddmin()` &mdash; [Reducing Failure-Inducing Inputs (Delta Debugging)](DeltaDebugger.ipynb#Delta-Debugging)\n* `DDSetDebugger` class &mdash; [Generalizing Failure Circumstances (Constructor)](DDSetDebugger.ipynb#Constructor), [Generalizing Failure Circumstances (Generalizing Arguments)](DDSetDebugger.ipynb#Generalizing-Arguments), [Generalizing Failure Circumstances (Generalizing Arguments)](DDSetDebugger.ipynb#Generalizing-Arguments), [Generalizing Failure Circumstances (Fuzzing)](DDSetDebugger.ipynb#Fuzzing)\n* Debugger API](https://developer.mozilla.org/en-US/docs/Tools/Debugger-API) and Google's [chrome.debugger API &mdash; [Tracing Executions (High-Level Debugging Interfaces)](Tracer.ipynb#High-Level-Debugging-Interfaces)\n* debuggers &mdash; [How Debuggers Work (Debuggers)](Debugger.ipynb#Debuggers)\n* `Debugger` class &mdash; [How Debuggers Work (Debugger Interaction)](Debugger.ipynb#Debugger-Interaction), [How Debuggers Work (Debugger Interaction)](Debugger.ipynb#Debugger-Interaction), [How Debuggers Work (Debugger Interaction)](Debugger.ipynb#Debugger-Interaction), [How Debuggers Work (Debugger Interaction)](Debugger.ipynb#Debugger-Interaction), [How Debuggers Work (Debugger Interaction)](Debugger.ipynb#Debugger-Interaction), [How Debuggers Work (Debugger Interaction)](Debugger.ipynb#Debugger-Interaction), [How Debuggers Work (Debugger Interaction)](Debugger.ipynb#Debugger-Interaction), [How Debuggers Work (Excursion: Implementing execute())](Debugger.ipynb#Excursion:-Implementing-execute()), [How Debuggers Work (Excursion: Implementing execute())](Debugger.ipynb#Excursion:-Implementing-execute()), [How Debuggers Work (Excursion: Implementing execute())](Debugger.ipynb#Excursion:-Implementing-execute()), [How Debuggers Work (Excursion: Implementing execute())](Debugger.ipynb#Excursion:-Implementing-execute()), [How Debuggers Work (Printing Values)](Debugger.ipynb#Printing-Values), [How Debuggers Work (Printing Values)](Debugger.ipynb#Printing-Values), [How Debuggers Work (Listing Source Code)](Debugger.ipynb#Listing-Source-Code), [How Debuggers Work (Setting Breakpoints)](Debugger.ipynb#Setting-Breakpoints), [How Debuggers Work (Deleting Breakpoints)](Debugger.ipynb#Deleting-Breakpoints), [How Debuggers Work (Listings with Benefits)](Debugger.ipynb#Listings-with-Benefits), [How Debuggers Work (Quitting)](Debugger.ipynb#Quitting), [How Debuggers Work (Exercise 1: Changing State)](Debugger.ipynb#Exercise-1:-Changing-State)\n* debugging information &mdash; [Tracing Executions (Low-Level Debugging Interfaces)](Tracer.ipynb#Low-Level-Debugging-Interfaces)\n* `debuggingbook_change_counter()` &mdash; [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes)\n* `DEBUGGINGBOOK_REMOTE_REPO` &mdash; [Where the Bugs are (Mining with PyDriller)](ChangeCounter.ipynb#Mining-with-PyDriller)\n* `DEBUGGINGBOOK_REPO` &mdash; [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes)\n* Decision trees &mdash; [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers)\n* `decorator()` &mdash; [Mining Function Specifications (Annotating Functions with Pre- and Postconditions)](DynamicInvariants.ipynb#Annotating-Functions-with-Pre--and-Postconditions), [Mining Function Specifications (Exercise 3: Verbose Invariant Checkers)](DynamicInvariants.ipynb#Exercise-3:-Verbose-Invariant-Checkers)\n* `default_functions()` &mdash; [Repairing Code Automatically (Helper Functions)](Repairer.ipynb#Helper-Functions)\n* `default_items_to_instrument()` &mdash; [Tracking Failure Origins (An Instrumenter Base Class)](Slicer.ipynb#An-Instrumenter-Base-Class), [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class), [Tracking Failure Origins (Excursion: Implementing Dynamic Instrumentation)](Slicer.ipynb#Excursion:-Implementing-Dynamic-Instrumentation)\n* `defined_in()` &mdash; [Class Diagrams (Getting Methods and Variables)](ClassDiagram.ipynb#Getting-Methods-and-Variables)\n* `DefinitionVisitor` class &mdash; [Repairing Code Automatically ((Re)defining Functions)](Repairer.ipynb#(Re)defining-Functions)\n* `delete()` &mdash; [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures), [Repairing Code Automatically (Deleting Statements)](Repairer.ipynb#Deleting-Statements)\n* `delete_command()` &mdash; [How Debuggers Work (Deleting Breakpoints)](Debugger.ipynb#Deleting-Breakpoints)\n* `DELIMITERS` &mdash; [Where the Bugs are (Mapping Elements to Locations)](ChangeCounter.ipynb#Mapping-Elements-to-Locations)\n* `DeltaDebugger` class &mdash; [Reducing Failure-Inducing Inputs (General Delta Debugging)](DeltaDebugger.ipynb#General-Delta-Debugging), [Reducing Failure-Inducing Inputs (Processing Multiple Arguments)](DeltaDebugger.ipynb#Processing-Multiple-Arguments), [Reducing Failure-Inducing Inputs (Processing Multiple Arguments)](DeltaDebugger.ipynb#Processing-Multiple-Arguments), [Reducing Failure-Inducing Inputs (Processing Multiple Arguments)](DeltaDebugger.ipynb#Processing-Multiple-Arguments), [Reducing Failure-Inducing Inputs (Public API)](DeltaDebugger.ipynb#Public-API), [Reducing Failure-Inducing Inputs (Public API)](DeltaDebugger.ipynb#Public-API), [Reducing Failure-Inducing Inputs (Public API)](DeltaDebugger.ipynb#Public-API), [Reducing Failure-Inducing Inputs (Public API)](DeltaDebugger.ipynb#Public-API)\n* `demo()` &mdash; [Tracking Failure Origins (Synopsis)](Slicer.ipynb#Synopsis)\n* `demo4()` &mdash; [Tracking Failure Origins (Exercise 5: Flow Assertions)](Slicer.ipynb#Exercise-5:-Flow-Assertions)\n* `dependencies()` &mdash; [Tracking Failure Origins (Setting Variables)](Slicer.ipynb#Setting-Variables), [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class)\n* `Dependencies` class &mdash; [Tracking Failure Origins (A Class for Dependencies)](Slicer.ipynb#A-Class-for-Dependencies), [Tracking Failure Origins (A Class for Dependencies)](Slicer.ipynb#A-Class-for-Dependencies), [Tracking Failure Origins (A Class for Dependencies)](Slicer.ipynb#A-Class-for-Dependencies), [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies), [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies), [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies), [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies), [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies), [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies), [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies), [Tracking Failure Origins (Slices)](Slicer.ipynb#Slices), [Tracking Failure Origins (Excursion: Listing Dependencies)](Slicer.ipynb#Excursion:-Listing-Dependencies), [Tracking Failure Origins (Excursion: Listing Dependencies)](Slicer.ipynb#Excursion:-Listing-Dependencies), [Tracking Failure Origins (Excursion: Listing Dependencies)](Slicer.ipynb#Excursion:-Listing-Dependencies), [Tracking Failure Origins (Excursion: Listing Dependencies)](Slicer.ipynb#Excursion:-Listing-Dependencies), [Tracking Failure Origins (Excursion: Diagnostics)](Slicer.ipynb#Excursion:-Diagnostics)\n* `DependencyTrackerTester` class &mdash; [Tracking Failure Origins (Excursion: Function Arguments)](Slicer.ipynb#Excursion:-Function-Arguments)\n* `DependencyTracker` class &mdash; [Tracking Failure Origins (Tracking Dependencies)](Slicer.ipynb#Tracking-Dependencies), [Tracking Failure Origins (Reading Variables)](Slicer.ipynb#Reading-Variables), [Tracking Failure Origins (Checking Locations)](Slicer.ipynb#Checking-Locations), [Tracking Failure Origins (Checking Locations)](Slicer.ipynb#Checking-Locations), [Tracking Failure Origins (Setting Variables)](Slicer.ipynb#Setting-Variables), [Tracking Failure Origins (Excursion: Control Dependencies)](Slicer.ipynb#Excursion:-Control-Dependencies), [Tracking Failure Origins (Excursion: Control Dependencies)](Slicer.ipynb#Excursion:-Control-Dependencies), [Tracking Failure Origins (Excursion: Control Dependencies)](Slicer.ipynb#Excursion:-Control-Dependencies), [Tracking Failure Origins (Excursion: Calls and Returns)](Slicer.ipynb#Excursion:-Calls-and-Returns), [Tracking Failure Origins (Excursion: Calls and Returns)](Slicer.ipynb#Excursion:-Calls-and-Returns), [Tracking Failure Origins (Excursion: Calls and Returns)](Slicer.ipynb#Excursion:-Calls-and-Returns), [Tracking Failure Origins (Excursion: Function Arguments)](Slicer.ipynb#Excursion:-Function-Arguments), [Tracking Failure Origins (Excursion: Function Arguments)](Slicer.ipynb#Excursion:-Function-Arguments)\n* derivation tree &mdash; [Generalizing Failure Circumstances (Derivation Trees)](DDSetDebugger.ipynb#Derivation-Trees)\n* description of red-black trees on Wikipedia &mdash; [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures)\n* diagnosis &mdash; [Introduction to Debugging (Fixing the Bug)](Intro_Debugging.ipynb#Fixing-the-Bug)\n* `diff()` &mdash; [Isolating Failure-Inducing Changes (Computing and Applying Patches)](ChangeDebugger.ipynb#Computing-and-Applying-Patches)\n* diff-match-patch library &mdash; [Isolating Failure-Inducing Changes (Synopsis)](ChangeDebugger.ipynb#Synopsis), [Isolating Failure-Inducing Changes (Computing and Applying Patches)](ChangeDebugger.ipynb#Computing-and-Applying-Patches), [Isolating Failure-Inducing Changes (Programmatic Interface)](ChangeDebugger.ipynb#Programmatic-Interface)\n* `DifferenceDebugger` class &mdash; [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs), [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs)\n* `DiscreteSpectrumDebugger` class &mdash; [Statistical Debugging (Discrete Spectrum)](StatisticalDebugger.ipynb#Discrete-Spectrum)\n* `display_class_hierarchy()` &mdash; [Class Diagrams (Drawing Class Hierarchy with Method Names)](ClassDiagram.ipynb#Drawing-Class-Hierarchy-with-Method-Names)\n* `display_class_node()` &mdash; [Class Diagrams (Drawing Class Hierarchy with Method Names)](ClassDiagram.ipynb#Drawing-Class-Hierarchy-with-Method-Names)\n* `display_class_trees()` &mdash; [Class Diagrams (Drawing Class Hierarchy with Method Names)](ClassDiagram.ipynb#Drawing-Class-Hierarchy-with-Method-Names)\n* `display_legend()` &mdash; [Class Diagrams (Drawing Class Hierarchy with Method Names)](ClassDiagram.ipynb#Drawing-Class-Hierarchy-with-Method-Names)\n* `display_tree()` &mdash; [Generalizing Failure Circumstances (Derivation Trees)](DDSetDebugger.ipynb#Derivation-Trees)\n* `display_versions()` &mdash; [Isolating Failure-Inducing Changes (Leveraging Version Histories)](ChangeDebugger.ipynb#Leveraging-Version-Histories)\n* `docstring()` &mdash; [Class Diagrams (Getting Docs)](ClassDiagram.ipynb#Getting-Docs)\n* `doc_class_methods()` &mdash; [Class Diagrams (Getting Methods and Variables)](ClassDiagram.ipynb#Getting-Methods-and-Variables)\n* `DOC_INDENT` &mdash; [Class Diagrams (Getting Docs)](ClassDiagram.ipynb#Getting-Docs)\n* Donald J. Knuth &mdash; [Debugging Performance Issues (Improving Performance)](PerformanceDebugger.ipynb#Improving-Performance)\n* `do_report()` &mdash; [Tracing Executions (Conditional Tracing)](Tracer.ipynb#Conditional-Tracing), [Tracing Executions (Watching Events)](Tracer.ipynb#Watching-Events)\n* `draw_dependencies()` &mdash; [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies)\n* `draw_edge()` &mdash; [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies)\n* `drop_shadow()` &mdash; [Tracking Bugs (Excursion: Screenshots with Drop Shadows)](Tracking.ipynb#Excursion:-Screenshots-with-Drop-Shadows)\n* `dump_tree()` &mdash; [Tracking Failure Origins (Tracking Variable Accesses)](Slicer.ipynb#Tracking-Variable-Accesses)\n* `DynamicMemory` class &mdash; [Asserting Expectations (Excursion: Dynamic Memory in C)](Assertions.ipynb#Excursion:-Dynamic-Memory-in-C), [Asserting Expectations (Excursion: Dynamic Memory in C)](Assertions.ipynb#Excursion:-Dynamic-Memory-in-C)\n* `D_Class` class &mdash; [Class Diagrams (Getting a Class Hierarchy)](ClassDiagram.ipynb#Getting-a-Class-Hierarchy), [Class Diagrams (Getting a Class Hierarchy)](ClassDiagram.ipynb#Getting-a-Class-Hierarchy)\n", "_____no_output_____" ], [ "### E\n\n* easyplotly &mdash; [Where the Bugs are (Visualizing Past Changes)](ChangeCounter.ipynb#Visualizing-Past-Changes), [Where the Bugs are (Visualizing Past Changes)](ChangeCounter.ipynb#Visualizing-Past-Changes), [Where the Bugs are (Visualizing Past Changes)](ChangeCounter.ipynb#Visualizing-Past-Changes)\n* `elapsed_time()` &mdash; [Timer (Measuring Time)](Timer.ipynb#Measuring-Time)\n* `elem_mapping()` &mdash; [Where the Bugs are (Mapping Elements to Locations)](ChangeCounter.ipynb#Mapping-Elements-to-Locations)\n* `elem_size()` &mdash; [Where the Bugs are (Determining Changed Elements)](ChangeCounter.ipynb#Determining-Changed-Elements)\n* _Elitism_ &mdash; [Repairing Code Automatically (Exercise 2: Elitism)](Repairer.ipynb#Exercise-2:-Elitism)\n* `EmbeddedInvariantAnnotator` class &mdash; [Mining Function Specifications (Exercise 7: Embedding Invariants as Assertions)](DynamicInvariants.ipynb#Exercise-7:-Embedding-Invariants-as-Assertions)\n* `EmbeddedInvariantTransformer` class &mdash; [Mining Function Specifications (Exercise 7: Embedding Invariants as Assertions)](DynamicInvariants.ipynb#Exercise-7:-Embedding-Invariants-as-Assertions), [Mining Function Specifications (Exercise 7: Embedding Invariants as Assertions)](DynamicInvariants.ipynb#Exercise-7:-Embedding-Invariants-as-Assertions)\n* `empty()` &mdash; [Reducing Failure-Inducing Inputs (General Delta Debugging)](DeltaDebugger.ipynb#General-Delta-Debugging)\n* `__enter__()` &mdash; [Tracing Executions (A Tracer Class)](Tracer.ipynb#A-Tracer-Class), [Asserting Expectations (Exercise 1 – Storage Assertions)](Assertions.ipynb#Exercise-1-–-Storage-Assertions), [Asserting Expectations (Task 2 – Global Consistency)](Assertions.ipynb#Task-2-–-Global-Consistency), [Tracking Failure Origins (Tracking Variable Accesses)](Slicer.ipynb#Tracking-Variable-Accesses), [Tracking Failure Origins (Excursion: Tracking Control)](Slicer.ipynb#Excursion:-Tracking-Control), [Tracking Failure Origins (Excursion: Control Dependencies)](Slicer.ipynb#Excursion:-Control-Dependencies), [Tracking Failure Origins (An Instrumenter Base Class)](Slicer.ipynb#An-Instrumenter-Base-Class), [Reducing Failure-Inducing Inputs (Collecting a Call)](DeltaDebugger.ipynb#Collecting-a-Call), [Isolating Failure-Inducing Changes (A ChangeDebugger class)](ChangeDebugger.ipynb#A-ChangeDebugger-class), [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs), [Debugging Performance Issues (Building a Profiler)](PerformanceDebugger.ipynb#Building-a-Profiler), [Debugging Performance Issues (Collecting Time Spent)](PerformanceDebugger.ipynb#Collecting-Time-Spent), [Error Handling (Catching Errors)](ExpectError.ipynb#Catching-Errors), [Error Handling (Catching Timeouts)](ExpectError.ipynb#Catching-Timeouts), [Timer (Measuring Time)](Timer.ipynb#Measuring-Time)\n* entire books about fuzzing &mdash; [Reducing Failure-Inducing Inputs (Why Reducing?)](DeltaDebugger.ipynb#Why-Reducing?)\n* `equalNumberOfBlackNodesOnSubtrees()` &mdash; [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures)\n* error &mdash; [Introduction to Debugging (From Defect to Failure)](Intro_Debugging.ipynb#From-Defect-to-Failure)\n* `escape()` &mdash; [Class Diagrams (Getting Docs)](ClassDiagram.ipynb#Getting-Docs)\n* `escape_doc()` &mdash; [Class Diagrams (Getting Docs)](ClassDiagram.ipynb#Getting-Docs)\n* `eval_in_context()` &mdash; [Tracing Executions (Conditional Tracing)](Tracer.ipynb#Conditional-Tracing), [Tracing Executions (Conditional Tracing)](Tracer.ipynb#Conditional-Tracing)\n* `events()` &mdash; [Statistical Debugging (Collecting Events)](StatisticalDebugger.ipynb#Collecting-Events), [Statistical Debugging (Collecting Coverage)](StatisticalDebugger.ipynb#Collecting-Coverage), [Statistical Debugging (Other Events besides Coverage)](StatisticalDebugger.ipynb#Other-Events-besides-Coverage)\n* `events_changed()` &mdash; [Tracing Executions (Watching Events)](Tracer.ipynb#Watching-Events)\n* `EventTracer` class &mdash; [Tracing Executions (Watching Events)](Tracer.ipynb#Watching-Events), [Tracing Executions (Watching Events)](Tracer.ipynb#Watching-Events), [Tracing Executions (Watching Events)](Tracer.ipynb#Watching-Events)\n* `event_fraction()` &mdash; [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum)\n* `event_str()` &mdash; [Statistical Debugging (Excursion: Printing an Event Table)](StatisticalDebugger.ipynb#Excursion:-Printing-an-Event-Table)\n* `event_table()` &mdash; [Statistical Debugging (Excursion: Printing an Event Table)](StatisticalDebugger.ipynb#Excursion:-Printing-an-Event-Table)\n* `event_table_text()` &mdash; [Statistical Debugging (Excursion: Printing an Event Table)](StatisticalDebugger.ipynb#Excursion:-Printing-an-Event-Table)\n* `evolve()` &mdash; [Repairing Code Automatically (Evolving)](Repairer.ipynb#Evolving)\n* `evolve_middle()` &mdash; [Repairing Code Automatically (Evolution)](Repairer.ipynb#Evolution)\n* `exception()` &mdash; [Reducing Failure-Inducing Inputs (Collecting a Call)](DeltaDebugger.ipynb#Collecting-a-Call), [Statistical Debugging (Collecting Events)](StatisticalDebugger.ipynb#Collecting-Events)\n* `execute()` &mdash; [How Debuggers Work (Debugger Interaction)](Debugger.ipynb#Debugger-Interaction), [How Debuggers Work (Excursion: Implementing execute())](Debugger.ipynb#Excursion:-Implementing-execute()), [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class)\n* execution log &mdash; [Tracing Executions (Tracing Python Programs)](Tracer.ipynb#Tracing-Python-Programs)\n* `execution_diagram()` &mdash; [Introduction to Debugging (From Defect to Failure)](Intro_Debugging.ipynb#From-Defect-to-Failure)\n* `__exit__()` &mdash; [Tracing Executions (A Tracer Class)](Tracer.ipynb#A-Tracer-Class), [Asserting Expectations (Exercise 1 – Storage Assertions)](Assertions.ipynb#Exercise-1-–-Storage-Assertions), [Asserting Expectations (Task 2 – Global Consistency)](Assertions.ipynb#Task-2-–-Global-Consistency), [Tracking Failure Origins (Tracking Variable Accesses)](Slicer.ipynb#Tracking-Variable-Accesses), [Tracking Failure Origins (Excursion: Tracking Control)](Slicer.ipynb#Excursion:-Tracking-Control), [Tracking Failure Origins (Excursion: Control Dependencies)](Slicer.ipynb#Excursion:-Control-Dependencies), [Tracking Failure Origins (An Instrumenter Base Class)](Slicer.ipynb#An-Instrumenter-Base-Class), [Reducing Failure-Inducing Inputs (Collecting a Call)](DeltaDebugger.ipynb#Collecting-a-Call), [Statistical Debugging (Error Prevention)](StatisticalDebugger.ipynb#Error-Prevention), [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs), [Error Handling (Catching Errors)](ExpectError.ipynb#Catching-Errors), [Error Handling (Catching Timeouts)](ExpectError.ipynb#Catching-Timeouts), [Timer (Measuring Time)](Timer.ipynb#Measuring-Time)\n* `expand_criteria()` &mdash; [Tracking Failure Origins (Slices)](Slicer.ipynb#Slices)\n* `ExpectError` class &mdash; [Error Handling (Catching Errors)](ExpectError.ipynb#Catching-Errors)\n* `ExpectTimeout` class &mdash; [Error Handling (Catching Timeouts)](ExpectError.ipynb#Catching-Timeouts)\n", "_____no_output_____" ], [ "## F - J", "_____no_output_____" ], [ "### F\n\n* `f()` &mdash; [Tracking Failure Origins (Excursion: Tracking Calls and Arguments)](Slicer.ipynb#Excursion:-Tracking-Calls-and-Arguments), [Where the Bugs are (Mapping Elements to Locations)](ChangeCounter.ipynb#Mapping-Elements-to-Locations), [Where the Bugs are (Determining Changed Elements)](ChangeCounter.ipynb#Determining-Changed-Elements)\n* `fail()` &mdash; [Tracing Executions (Exercise 1: Exception Handling)](Tracer.ipynb#Exercise-1:-Exception-Handling)\n* `failed_fraction()` &mdash; [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum)\n* failure &mdash; [Introduction to Debugging (From Defect to Failure)](Intro_Debugging.ipynb#From-Defect-to-Failure)\n* `FailureNotReproducedError` class &mdash; [Reducing Failure-Inducing Inputs (Processing Multiple Arguments)](DeltaDebugger.ipynb#Processing-Multiple-Arguments), [Repairing Code Automatically (Running Tests)](Repairer.ipynb#Running-Tests)\n* `fail_collectors()` &mdash; [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs)\n* `FAIL_COLOR` &mdash; [Introduction to Debugging (Visualizing Code)](Intro_Debugging.ipynb#Visualizing-Code), [Isolating Failure-Inducing Changes (Leveraging Version Histories)](ChangeDebugger.ipynb#Leveraging-Version-Histories)\n* `fail_source()` &mdash; [Isolating Failure-Inducing Changes (A ChangeDebugger class)](ChangeDebugger.ipynb#A-ChangeDebugger-class)\n* `fail_test()` &mdash; [Error Handling (Catching Errors)](ExpectError.ipynb#Catching-Errors)\n* `FAIL_VALUE` &mdash; [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers)\n* `FAIL` &mdash; [Introduction to Debugging (Visualizing Code)](Intro_Debugging.ipynb#Visualizing-Code), [Reducing Failure-Inducing Inputs (Delta Debugging)](DeltaDebugger.ipynb#Delta-Debugging), [Isolating Failure-Inducing Changes (Leveraging Version Histories)](ChangeDebugger.ipynb#Leveraging-Version-Histories), [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs)\n* `FALSE_TREE` &mdash; [Reducing Failure-Inducing Inputs (Transforming Nodes)](DeltaDebugger.ipynb#Transforming-Nodes)\n* fault &mdash; [Introduction to Debugging (From Defect to Failure)](Intro_Debugging.ipynb#From-Defect-to-Failure)\n* `features()` &mdash; [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers)\n* `feature_names()` &mdash; [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers)\n* `filter()` &mdash; [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes), [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes)\n* `find_paths()` &mdash; [Generalizing Failure Circumstances (Generalizable Paths)](DDSetDebugger.ipynb#Generalizable-Paths)\n* `FineChangeCounter` class &mdash; [Where the Bugs are (Putting it all Together)](ChangeCounter.ipynb#Putting-it-all-Together)\n* `FineFixCounter` class &mdash; [Where the Bugs are (Exercise 1: Fine-Grained Fixes)](ChangeCounter.ipynb#Exercise-1:-Fine-Grained-Fixes)\n* finite state machines &mdash; [Generalizing Failure Circumstances (Grammars)](DDSetDebugger.ipynb#Grammars)\n* First actual case of bug being found &mdash; [Introduction to Debugging (Debugging Aftermath)](Intro_Debugging.ipynb#Debugging-Aftermath)\n* `fitness()` &mdash; [Repairing Code Automatically ((Re)defining Functions)](Repairer.ipynb#(Re)defining-Functions)\n* `fitness_key()` &mdash; [Repairing Code Automatically (Evolving)](Repairer.ipynb#Evolving)\n* `FixCounter` class &mdash; [Where the Bugs are (Counting Past Fixes)](ChangeCounter.ipynb#Counting-Past-Fixes), [Where the Bugs are (Counting Past Fixes)](ChangeCounter.ipynb#Counting-Past-Fixes)\n* `FONT_NAME` &mdash; [Introduction to Debugging (Visualizing Code)](Intro_Debugging.ipynb#Visualizing-Code), [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies), [Isolating Failure-Inducing Changes (Leveraging Version Histories)](ChangeDebugger.ipynb#Leveraging-Version-Histories)\n* `foo()` &mdash; [Tracing Executions (Exercise 2: Syntax-Based Instrumentation)](Tracer.ipynb#Exercise-2:-Syntax-Based-Instrumentation), [Where the Bugs are (Mapping Elements to Locations)](ChangeCounter.ipynb#Mapping-Elements-to-Locations), [Where the Bugs are (Mapping Elements to Locations)](ChangeCounter.ipynb#Mapping-Elements-to-Locations), [Where the Bugs are (Determining Changed Elements)](ChangeCounter.ipynb#Determining-Changed-Elements), [Class Diagrams (Getting a Class Hierarchy)](ClassDiagram.ipynb#Getting-a-Class-Hierarchy), [Class Diagrams (Getting a Class Hierarchy)](ClassDiagram.ipynb#Getting-a-Class-Hierarchy), [Class Diagrams (Getting a Class Hierarchy)](ClassDiagram.ipynb#Getting-a-Class-Hierarchy)\n* formal languages &mdash; [Generalizing Failure Circumstances (Grammars)](DDSetDebugger.ipynb#Grammars)\n* `format_call()` &mdash; [Reducing Failure-Inducing Inputs (Collecting a Call)](DeltaDebugger.ipynb#Collecting-a-Call), [Reducing Failure-Inducing Inputs (Repeating a Call)](DeltaDebugger.ipynb#Repeating-a-Call)\n* `format_exception()` &mdash; [Reducing Failure-Inducing Inputs (Collecting a Call)](DeltaDebugger.ipynb#Collecting-a-Call), [Reducing Failure-Inducing Inputs (Repeating a Call)](DeltaDebugger.ipynb#Repeating-a-Call)\n* `format_node()` &mdash; [Repairing Code Automatically (Choosing Suspicious Statements to Mutate)](Repairer.ipynb#Choosing-Suspicious-Statements-to-Mutate), [Repairing Code Automatically (Helpers)](Repairer.ipynb#Helpers)\n* `format_var()` &mdash; [Tracking Failure Origins (Excursion: Listing Dependencies)](Slicer.ipynb#Excursion:-Listing-Dependencies)\n* `free()` &mdash; [Asserting Expectations (Excursion: Dynamic Memory in C)](Assertions.ipynb#Excursion:-Dynamic-Memory-in-C), [Asserting Expectations (Excursion: Managed Memory)](Assertions.ipynb#Excursion:-Managed-Memory)\n* `from_set()` &mdash; [Reducing Failure-Inducing Inputs (General Delta Debugging)](DeltaDebugger.ipynb#General-Delta-Debugging)\n* full-fledged implementation of DDSET](https://rahul.gopinath.org/post/2020/07/15/ddset/) with plenty of details and experiments is available as a Jupyter Notebook. Our implementation follows the [simplified implementation of DDSET, as described by Gopinath &mdash; [Generalizing Failure Circumstances (Background)](DDSetDebugger.ipynb#Background)\n* `fun()` &mdash; [Asserting Expectations (Assertion Diagnostics)](Assertions.ipynb#Assertion-Diagnostics)\n* `funcs_in_our_with_block()` &mdash; [Tracking Failure Origins (Excursion: Implementing Dynamic Instrumentation)](Slicer.ipynb#Excursion:-Implementing-Dynamic-Instrumentation)\n* `function()` &mdash; [Reducing Failure-Inducing Inputs (Collecting a Call)](DeltaDebugger.ipynb#Collecting-a-Call), [Statistical Debugging (Collecting Events)](StatisticalDebugger.ipynb#Collecting-Events), [Statistical Debugging (Excursion: Printing an Event Table)](StatisticalDebugger.ipynb#Excursion:-Printing-an-Event-Table)\n* `functions_with_invariants()` &mdash; [Mining Function Specifications (Converting Mined Invariants to Annotations)](DynamicInvariants.ipynb#Converting-Mined-Invariants-to-Annotations)\n* `function_with_invariants()` &mdash; [Mining Function Specifications (Converting Mined Invariants to Annotations)](DynamicInvariants.ipynb#Converting-Mined-Invariants-to-Annotations), [Mining Function Specifications (Exercise 7: Embedding Invariants as Assertions)](DynamicInvariants.ipynb#Exercise-7:-Embedding-Invariants-as-Assertions)\n* `function_with_invariants_ast()` &mdash; [Mining Function Specifications (Exercise 7: Embedding Invariants as Assertions)](DynamicInvariants.ipynb#Exercise-7:-Embedding-Invariants-as-Assertions)\n* `FUNCTION` &mdash; [How Debuggers Work (Named breakpoints (\"break\"))](Debugger.ipynb#Named-breakpoints-(\"break\"))\n* `fun_1()` &mdash; [Tracking Failure Origins (End of Excursion)](Slicer.ipynb#End-of-Excursion)\n* `fun_2()` &mdash; [Tracking Failure Origins (End of Excursion)](Slicer.ipynb#End-of-Excursion)\n* `fuzz()` &mdash; [Reducing Failure-Inducing Inputs (Why Reducing?)](DeltaDebugger.ipynb#Why-Reducing?), [Generalizing Failure Circumstances (Fuzzing)](DDSetDebugger.ipynb#Fuzzing)\n* fuzzingbook &mdash; [Generalizing Failure Circumstances (Grammars)](DDSetDebugger.ipynb#Grammars)\n* `fuzz_args()` &mdash; [Generalizing Failure Circumstances (Fuzzing)](DDSetDebugger.ipynb#Fuzzing)\n* `fuzz_tree()` &mdash; [Generalizing Failure Circumstances (Fuzzing with Patterns)](DDSetDebugger.ipynb#Fuzzing-with-Patterns)\n", "_____no_output_____" ], [ "### G\n\n* GDB, the GNU debugger](https://www.gnu.org/software/gdb/), whose interface in turn goes back to earlier command-line debuggers such as [dbx &mdash; [How Debuggers Work (Background)](Debugger.ipynb#Background)\n* `gen()` &mdash; [Tracking Failure Origins (Excursion: Function Arguments)](Slicer.ipynb#Excursion:-Function-Arguments)\n* `generalizable_paths()` &mdash; [Generalizing Failure Circumstances (Generalizable Paths)](DDSetDebugger.ipynb#Generalizable-Paths)\n* `generalize()` &mdash; [Generalizing Failure Circumstances (Generalizable Paths)](DDSetDebugger.ipynb#Generalizable-Paths), [Generalizing Failure Circumstances (Generalizing Arguments)](DDSetDebugger.ipynb#Generalizing-Arguments)\n* `generalize_path()` &mdash; [Generalizing Failure Circumstances (Generalizable Paths)](DDSetDebugger.ipynb#Generalizable-Paths)\n* `generic_test()` &mdash; [Reducing Failure-Inducing Inputs (Delta Debugging)](DeltaDebugger.ipynb#Delta-Debugging)\n* `generic_visit()` &mdash; [Reducing Failure-Inducing Inputs (Traversing Syntax Trees)](DeltaDebugger.ipynb#Traversing-Syntax-Trees), [Repairing Code Automatically (Picking Statements)](Repairer.ipynb#Picking-Statements), [Repairing Code Automatically (Collecting Conditions)](Repairer.ipynb#Collecting-Conditions)\n* GenProg &mdash; [Repairing Code Automatically (Background)](Repairer.ipynb#Background)\n* `get()` &mdash; [Tracking Failure Origins (A Data Tracker)](Slicer.ipynb#A-Data-Tracker), [Tracking Failure Origins (Reading Variables)](Slicer.ipynb#Reading-Variables)\n* `__getitem__()` &mdash; [Asserting Expectations (Excursion: A C Memory Model Simulator)](Assertions.ipynb#Excursion:-A-C-Memory-Model-Simulator), [Asserting Expectations (Exercise 1 – Storage Assertions)](Assertions.ipynb#Exercise-1-–-Storage-Assertions), [Asserting Expectations (Task 2 – Global Consistency)](Assertions.ipynb#Task-2-–-Global-Consistency)\n* `getsource()` &mdash; [Repairing Code Automatically (Helper Functions)](Repairer.ipynb#Helper-Functions)\n* `get_arguments()` &mdash; [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls)\n* `get_output()` &mdash; [Isolating Failure-Inducing Changes (Accessing Versions)](ChangeDebugger.ipynb#Accessing-Versions)\n* `get_subtree()` &mdash; [Generalizing Failure Circumstances (Referencing Subtrees)](DDSetDebugger.ipynb#Referencing-Subtrees)\n* grammars &mdash; [Generalizing Failure Circumstances (Grammars)](DDSetDebugger.ipynb#Grammars)\n* `graph()` &mdash; [Introduction to Debugging (Visualizing Code)](Intro_Debugging.ipynb#Visualizing-Code), [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies), [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class), [Isolating Failure-Inducing Changes (Leveraging Version Histories)](ChangeDebugger.ipynb#Leveraging-Version-Histories)\n* `graph_attr()` &mdash; [Generalizing Failure Circumstances (Derivation Trees)](DDSetDebugger.ipynb#Derivation-Trees)\n* `GREEN` &mdash; [Asserting Expectations (Consider Leaving Some Assertions On)](Assertions.ipynb#Consider-Leaving-Some-Assertions-On)\n", "_____no_output_____" ], [ "### H\n\n* hardware breakpoints &mdash; [Tracing Executions (Tracing Binary Executables)](Tracer.ipynb#Tracing-Binary-Executables)\n* hardware watchpoints &mdash; [Tracing Executions (Tracing Binary Executables)](Tracer.ipynb#Tracing-Binary-Executables)\n* HDD &mdash; [Reducing Failure-Inducing Inputs (Background)](DeltaDebugger.ipynb#Background)\n* `HEADLESS` &mdash; [Tracking Bugs (Excursion: Remote Control with Selenium)](Tracking.ipynb#Excursion:-Remote-Control-with-Selenium)\n* `hello()` &mdash; [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls)\n* `help_command()` &mdash; [How Debuggers Work (Excursion: Implementing execute())](Debugger.ipynb#Excursion:-Implementing-execute()), [How Debuggers Work (Excursion: Implementing execute())](Debugger.ipynb#Excursion:-Implementing-execute())\n* Hierarchical Delta Debugging &mdash; [Reducing Failure-Inducing Inputs (Background)](DeltaDebugger.ipynb#Background)\n* `HitCollector` class &mdash; [Debugging Performance Issues (Other Metrics)](PerformanceDebugger.ipynb#Other-Metrics)\n* `hours()` &mdash; [Asserting Expectations (Times and Time Bombs)](Assertions.ipynb#Times-and-Time-Bombs), [Asserting Expectations (Excursion: Checked Getters and Setters in Python)](Assertions.ipynb#Excursion:-Checked-Getters-and-Setters-in-Python), [Asserting Expectations (Excursion: Checked Getters and Setters in Python)](Assertions.ipynb#Excursion:-Checked-Getters-and-Setters-in-Python)\n* `hue()` &mdash; [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum), [Statistical Debugging (The Ochiai Metric)](StatisticalDebugger.ipynb#The-Ochiai-Metric)\n", "_____no_output_____" ], [ "### I\n\n* `id()` &mdash; [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies), [Statistical Debugging (Collecting Events)](StatisticalDebugger.ipynb#Collecting-Events)\n* `ignore_location_change()` &mdash; [Tracking Failure Origins (Checking Locations)](Slicer.ipynb#Checking-Locations)\n* `ignore_next_location_change()` &mdash; [Tracking Failure Origins (Checking Locations)](Slicer.ipynb#Checking-Locations)\n* `include()` &mdash; [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes), [Where the Bugs are (Counting Past Fixes)](ChangeCounter.ipynb#Counting-Past-Fixes)\n* infection &mdash; [Introduction to Debugging (From Defect to Failure)](Intro_Debugging.ipynb#From-Defect-to-Failure), [Introduction to Debugging (Lessons Learned)](Intro_Debugging.ipynb#Lessons-Learned)\n* `init()` &mdash; [Reducing Failure-Inducing Inputs (Collecting a Call)](DeltaDebugger.ipynb#Collecting-a-Call), [Reducing Failure-Inducing Inputs (Testing, Logging, and Caching)](DeltaDebugger.ipynb#Testing,-Logging,-and-Caching)\n* `initial_population()` &mdash; [Repairing Code Automatically (Repairing)](Repairer.ipynb#Repairing)\n* `__init__()` &mdash; [Tracing Executions (A Tracer Class)](Tracer.ipynb#A-Tracer-Class), [Tracing Executions (Tracing Variable Changes)](Tracer.ipynb#Tracing-Variable-Changes), [Tracing Executions (Conditional Tracing)](Tracer.ipynb#Conditional-Tracing), [Tracing Executions (Watching Events)](Tracer.ipynb#Watching-Events), [How Debuggers Work (Debugger Interaction)](Debugger.ipynb#Debugger-Interaction), [Asserting Expectations (Times and Time Bombs)](Assertions.ipynb#Times-and-Time-Bombs), [Asserting Expectations (Times and Time Bombs)](Assertions.ipynb#Times-and-Time-Bombs), [Asserting Expectations (End of Excursion)](Assertions.ipynb#End-of-Excursion), [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures), [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures), [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures), [Asserting Expectations (Excursion: A C Memory Model Simulator)](Assertions.ipynb#Excursion:-A-C-Memory-Model-Simulator), [Asserting Expectations (Excursion: Dynamic Memory in C)](Assertions.ipynb#Excursion:-Dynamic-Memory-in-C), [Asserting Expectations (Excursion: Managed Memory)](Assertions.ipynb#Excursion:-Managed-Memory), [Asserting Expectations (Exercise 1 – Storage Assertions)](Assertions.ipynb#Exercise-1-–-Storage-Assertions), [Asserting Expectations (Task 2 – Global Consistency)](Assertions.ipynb#Task-2-–-Global-Consistency), [Tracking Failure Origins (A Class for Dependencies)](Slicer.ipynb#A-Class-for-Dependencies), [Tracking Failure Origins (A Data Tracker)](Slicer.ipynb#A-Data-Tracker), [Tracking Failure Origins (Tracking Variable Accesses)](Slicer.ipynb#Tracking-Variable-Accesses), [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions), [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions), [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions), [Tracking Failure Origins (Excursion: Tracking Return Values)](Slicer.ipynb#Excursion:-Tracking-Return-Values), [Tracking Failure Origins (Tracking Dependencies)](Slicer.ipynb#Tracking-Dependencies), [Tracking Failure Origins (An Instrumenter Base Class)](Slicer.ipynb#An-Instrumenter-Base-Class), [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class), [Tracking Failure Origins (Excursion: Implementing Dynamic Instrumentation)](Slicer.ipynb#Excursion:-Implementing-Dynamic-Instrumentation), [Tracking Failure Origins (Excursion: Implementing Dynamic Instrumentation)](Slicer.ipynb#Excursion:-Implementing-Dynamic-Instrumentation), [Reducing Failure-Inducing Inputs (Collecting a Call)](DeltaDebugger.ipynb#Collecting-a-Call), [Reducing Failure-Inducing Inputs (Testing, Logging, and Caching)](DeltaDebugger.ipynb#Testing,-Logging,-and-Caching), [Reducing Failure-Inducing Inputs (Traversing Syntax Trees)](DeltaDebugger.ipynb#Traversing-Syntax-Trees), [Isolating Failure-Inducing Changes (A ChangeDebugger class)](ChangeDebugger.ipynb#A-ChangeDebugger-class), [Statistical Debugging (Collecting Events)](StatisticalDebugger.ipynb#Collecting-Events), [Statistical Debugging (Collecting Coverage)](StatisticalDebugger.ipynb#Collecting-Coverage), [Statistical Debugging (A Base Class for Statistical Debugging)](StatisticalDebugger.ipynb#A-Base-Class-for-Statistical-Debugging), [Statistical Debugging (Other Events besides Coverage)](StatisticalDebugger.ipynb#Other-Events-besides-Coverage), [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls), [Mining Function Specifications (Excursion: Annotating Functions with Given Types)](DynamicInvariants.ipynb#Excursion:-Annotating-Functions-with-Given-Types), [Mining Function Specifications (Extracting Invariants)](DynamicInvariants.ipynb#Extracting-Invariants), [Mining Function Specifications (Exercise 7: Embedding Invariants as Assertions)](DynamicInvariants.ipynb#Exercise-7:-Embedding-Invariants-as-Assertions), [Generalizing Failure Circumstances (Mutating the Tree)](DDSetDebugger.ipynb#Mutating-the-Tree), [Generalizing Failure Circumstances (Generalizing Trees)](DDSetDebugger.ipynb#Generalizing-Trees), [Generalizing Failure Circumstances (Constructor)](DDSetDebugger.ipynb#Constructor), [Debugging Performance Issues (Building a Profiler)](PerformanceDebugger.ipynb#Building-a-Profiler), [Debugging Performance Issues (Collecting Time Spent)](PerformanceDebugger.ipynb#Collecting-Time-Spent), [Debugging Performance Issues (Visualizing Time Spent)](PerformanceDebugger.ipynb#Visualizing-Time-Spent), [Debugging Performance Issues (Other Metrics)](PerformanceDebugger.ipynb#Other-Metrics), [Repairing Code Automatically (Picking Statements)](Repairer.ipynb#Picking-Statements), [Repairing Code Automatically (Mutating Statements)](Repairer.ipynb#Mutating-Statements), [Repairing Code Automatically (Crossing Statement Lists)](Repairer.ipynb#Crossing-Statement-Lists), [Repairing Code Automatically (Excursion: Implementing Repairer)](Repairer.ipynb#Excursion:-Implementing-Repairer), [Repairing Code Automatically ((Re)defining Functions)](Repairer.ipynb#(Re)defining-Functions), [Repairing Code Automatically (Collecting Conditions)](Repairer.ipynb#Collecting-Conditions), [Repairing Code Automatically (Mutating Conditions)](Repairer.ipynb#Mutating-Conditions), [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes), [Error Handling (Catching Errors)](ExpectError.ipynb#Catching-Errors), [Error Handling (Catching Timeouts)](ExpectError.ipynb#Catching-Timeouts), [Timer (Measuring Time)](Timer.ipynb#Measuring-Time)\n* `InjectPass` class &mdash; [Tracing Executions (Exercise 2: Syntax-Based Instrumentation)](Tracer.ipynb#Exercise-2:-Syntax-Based-Instrumentation)\n* input grammar in fuzzingbook format &mdash; [Generalizing Failure Circumstances (Synopsis)](DDSetDebugger.ipynb#Synopsis), [Generalizing Failure Circumstances (Synopsis)](DDSetDebugger.ipynb#Synopsis)\n* `insert()` &mdash; [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures), [Repairing Code Automatically (Inserting Statements)](Repairer.ipynb#Inserting-Statements)\n* `insert_assertions()` &mdash; [Mining Function Specifications (Exercise 7: Embedding Invariants as Assertions)](DynamicInvariants.ipynb#Exercise-7:-Embedding-Invariants-as-Assertions), [Mining Function Specifications (Exercise 7: Embedding Invariants as Assertions)](DynamicInvariants.ipynb#Exercise-7:-Embedding-Invariants-as-Assertions)\n* `insert_tracer()` &mdash; [Tracing Executions (Efficient Tracing)](Tracer.ipynb#Efficient-Tracing)\n* `instantiate_prop()` &mdash; [Mining Function Specifications (Instantiating Properties)](DynamicInvariants.ipynb#Instantiating-Properties)\n* `instantiate_prop_ast()` &mdash; [Mining Function Specifications (Instantiating Properties)](DynamicInvariants.ipynb#Instantiating-Properties)\n* `instrument()` &mdash; [Tracking Failure Origins (An Instrumenter Base Class)](Slicer.ipynb#An-Instrumenter-Base-Class), [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class)\n* `Instrumenter` class &mdash; [Tracking Failure Origins (An Instrumenter Base Class)](Slicer.ipynb#An-Instrumenter-Base-Class), [Tracking Failure Origins (An Instrumenter Base Class)](Slicer.ipynb#An-Instrumenter-Base-Class)\n* `instrument_call()` &mdash; [Tracking Failure Origins (Excursion: Tracking Calls and Arguments)](Slicer.ipynb#Excursion:-Tracking-Calls-and-Arguments)\n* `interaction_loop()` &mdash; [How Debuggers Work (Debugger Interaction)](Debugger.ipynb#Debugger-Interaction)\n* `InvariantAnnotator` class &mdash; [Mining Function Specifications (Converting Mined Invariants to Annotations)](DynamicInvariants.ipynb#Converting-Mined-Invariants-to-Annotations), [Mining Function Specifications (Converting Mined Invariants to Annotations)](DynamicInvariants.ipynb#Converting-Mined-Invariants-to-Annotations), [Mining Function Specifications (Converting Mined Invariants to Annotations)](DynamicInvariants.ipynb#Converting-Mined-Invariants-to-Annotations), [Mining Function Specifications (Converting Mined Invariants to Annotations)](DynamicInvariants.ipynb#Converting-Mined-Invariants-to-Annotations), [Mining Function Specifications (Exercise 3: Verbose Invariant Checkers)](DynamicInvariants.ipynb#Exercise-3:-Verbose-Invariant-Checkers), [Mining Function Specifications (Exercise 3: Verbose Invariant Checkers)](DynamicInvariants.ipynb#Exercise-3:-Verbose-Invariant-Checkers)\n* invariants &mdash; [Mining Function Specifications (Mining Invariants)](DynamicInvariants.ipynb#Mining-Invariants)\n* `invariants()` &mdash; [Mining Function Specifications (Extracting Invariants)](DynamicInvariants.ipynb#Extracting-Invariants)\n* `InvariantTracer` class &mdash; [Mining Function Specifications (Extracting Invariants)](DynamicInvariants.ipynb#Extracting-Invariants), [Mining Function Specifications (Extracting Invariants)](DynamicInvariants.ipynb#Extracting-Invariants)\n* `INVARIANT_PROPERTIES` &mdash; [Mining Function Specifications (Defining Properties)](DynamicInvariants.ipynb#Defining-Properties), [Mining Function Specifications (Extracting Invariants)](DynamicInvariants.ipynb#Extracting-Invariants)\n* `in_generator()` &mdash; [Tracking Failure Origins (Excursion: Calls and Returns)](Slicer.ipynb#Excursion:-Calls-and-Returns)\n* ISO 8601 format &mdash; [Asserting Expectations (Times and Time Bombs)](Assertions.ipynb#Times-and-Time-Bombs)\n* `is_abstract()` &mdash; [Class Diagrams (Drawing Class Hierarchy with Method Names)](ClassDiagram.ipynb#Drawing-Class-Hierarchy-with-Method-Names)\n* `is_internal()` &mdash; [Tracking Failure Origins (Tracking Variable Accesses)](Slicer.ipynb#Tracking-Variable-Accesses)\n* `is_internal_error()` &mdash; [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks)\n* `is_overloaded()` &mdash; [Class Diagrams (Drawing Class Hierarchy with Method Names)](ClassDiagram.ipynb#Drawing-Class-Hierarchy-with-Method-Names)\n* `is_public()` &mdash; [Class Diagrams (Drawing Class Hierarchy with Method Names)](ClassDiagram.ipynb#Drawing-Class-Hierarchy-with-Method-Names)\n* `is_reducible()` &mdash; [Reducing Failure-Inducing Inputs (Processing Multiple Arguments)](DeltaDebugger.ipynb#Processing-Multiple-Arguments)\n* `is_test()` &mdash; [Repairing Code Automatically (Helper Functions)](Repairer.ipynb#Helper-Functions)\n* `is_var()` &mdash; [Class Diagrams (Getting Methods and Variables)](ClassDiagram.ipynb#Getting-Methods-and-Variables)\n* its documentation in the Python reference &mdash; [Tracing Executions (Tracing Python Programs)](Tracer.ipynb#Tracing-Python-Programs)\n", "_____no_output_____" ], [ "### J\n\n* J-Reduce &mdash; [Reducing Failure-Inducing Inputs (Background)](DeltaDebugger.ipynb#Background)\n* Java Debug Interface](https://docs.oracle.com/javase/8/docs/jdk/api/jpda/jdi/) (JDI) is a _high-level interface_ for implementing a debugger (or tracer) on top of Java. [This introduction to JDI &mdash; [Tracing Executions (High-Level Debugging Interfaces)](Tracer.ipynb#High-Level-Debugging-Interfaces)\n* `JSON` &mdash; [Where the Bugs are (Mapping Elements to Locations)](ChangeCounter.ipynb#Mapping-Elements-to-Locations)\n* `just_x()` &mdash; [Tracking Failure Origins (Excursion: Function Arguments)](Slicer.ipynb#Excursion:-Function-Arguments)\n", "_____no_output_____" ], [ "## L - P", "_____no_output_____" ], [ "### L\n\n* `label()` &mdash; [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies)\n* language specifications &mdash; [Generalizing Failure Circumstances (Grammars)](DDSetDebugger.ipynb#Grammars)\n* `LeftmostNameVisitor` class &mdash; [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions)\n* `leftmost_name()` &mdash; [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions)\n* `LINENO` &mdash; [Tracking Failure Origins (Excursion: Listing Dependencies)](Slicer.ipynb#Excursion:-Listing-Dependencies)\n* `LINE` &mdash; [How Debuggers Work (Execute until line (\"until\"))](Debugger.ipynb#Execute-until-line-(\"until\")), [How Debuggers Work (Execute until line (\"until\"))](Debugger.ipynb#Execute-until-line-(\"until\"))\n* `list_command()` &mdash; [How Debuggers Work (Listing Source Code)](Debugger.ipynb#Listing-Source-Code), [How Debuggers Work (Listings with Benefits)](Debugger.ipynb#Listings-with-Benefits)\n* `list_error()` &mdash; [Reducing Failure-Inducing Inputs (Reducing other Collections)](DeltaDebugger.ipynb#Reducing-other-Collections)\n* `list_length()` &mdash; [Mining Function Specifications (A Recursive Function)](DynamicInvariants.ipynb#A-Recursive-Function)\n* `LoadVisitor` class &mdash; [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions)\n* `load_names()` &mdash; [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions)\n* `log()` &mdash; [Tracing Executions (A Tracer Class)](Tracer.ipynb#A-Tracer-Class)\n* `log_tree()` &mdash; [Repairing Code Automatically (Helper Functions)](Repairer.ipynb#Helper-Functions)\n* `long_running_test()` &mdash; [Error Handling (Catching Timeouts)](ExpectError.ipynb#Catching-Timeouts)\n", "_____no_output_____" ], [ "### M\n\n* magic &mdash; [Where the Bugs are (Mapping Elements to Locations)](ChangeCounter.ipynb#Mapping-Elements-to-Locations)\n* `main()` &mdash; [Where the Bugs are (Mapping Elements to Locations)](ChangeCounter.ipynb#Mapping-Elements-to-Locations), [Where the Bugs are (Determining Changed Elements)](ChangeCounter.ipynb#Determining-Changed-Elements)\n* `make_call()` &mdash; [Tracking Failure Origins (Excursion: Tracking Calls and Arguments)](Slicer.ipynb#Excursion:-Tracking-Calls-and-Arguments)\n* `make_data_tracker()` &mdash; [Tracking Failure Origins (Tracking Variable Accesses)](Slicer.ipynb#Tracking-Variable-Accesses), [Tracking Failure Origins (Excursion: Function Arguments)](Slicer.ipynb#Excursion:-Function-Arguments)\n* `make_get_data()` &mdash; [Tracking Failure Origins (Tracking Variable Accesses)](Slicer.ipynb#Tracking-Variable-Accesses)\n* `make_graph()` &mdash; [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies)\n* `make_set_data()` &mdash; [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions)\n* `make_test()` &mdash; [Tracking Failure Origins (Excursion: Tracking Control)](Slicer.ipynb#Excursion:-Tracking-Control)\n* `make_with()` &mdash; [Tracking Failure Origins (Excursion: Tracking Control)](Slicer.ipynb#Excursion:-Tracking-Control)\n* malfunction &mdash; [Introduction to Debugging (From Defect to Failure)](Intro_Debugging.ipynb#From-Defect-to-Failure), [Introduction to Debugging (Lessons Learned)](Intro_Debugging.ipynb#Lessons-Learned)\n* `ManagedMemory` class &mdash; [Asserting Expectations (Excursion: Managed Memory)](Assertions.ipynb#Excursion:-Managed-Memory), [Asserting Expectations (Excursion: Managed Memory)](Assertions.ipynb#Excursion:-Managed-Memory), [Asserting Expectations (Excursion: Managed Memory)](Assertions.ipynb#Excursion:-Managed-Memory), [Asserting Expectations (Excursion: Managed Memory)](Assertions.ipynb#Excursion:-Managed-Memory)\n* `map()` &mdash; [Where the Bugs are (Visualizing Past Changes)](ChangeCounter.ipynb#Visualizing-Past-Changes)\n* `map_colorscale()` &mdash; [Where the Bugs are (Visualizing Past Changes)](ChangeCounter.ipynb#Visualizing-Past-Changes)\n* `map_hoverinfo()` &mdash; [Where the Bugs are (Visualizing Past Changes)](ChangeCounter.ipynb#Visualizing-Past-Changes), [Where the Bugs are (Counting Past Fixes)](ChangeCounter.ipynb#Counting-Past-Fixes)\n* `map_node_color()` &mdash; [Where the Bugs are (Visualizing Past Changes)](ChangeCounter.ipynb#Visualizing-Past-Changes)\n* `map_node_sizes()` &mdash; [Where the Bugs are (Visualizing Past Changes)](ChangeCounter.ipynb#Visualizing-Past-Changes)\n* `map_node_text()` &mdash; [Where the Bugs are (Visualizing Past Changes)](ChangeCounter.ipynb#Visualizing-Past-Changes), [Where the Bugs are (Counting Past Fixes)](ChangeCounter.ipynb#Counting-Past-Fixes)\n* `mark_tracker()` &mdash; [Tracking Bugs (Managing Issues)](Tracking.ipynb#Managing-Issues)\n* Master Mind Board Grame &mdash; [Introduction to Debugging (Keep a Log)](Intro_Debugging.ipynb#Keep-a-Log)\n* `maximum()` &mdash; [Debugging Performance Issues (Collecting Time Spent)](PerformanceDebugger.ipynb#Collecting-Time-Spent), [Debugging Performance Issues (Visualizing Time Spent)](PerformanceDebugger.ipynb#Visualizing-Time-Spent)\n* `max_args()` &mdash; [Reducing Failure-Inducing Inputs (Public API)](DeltaDebugger.ipynb#Public-API)\n* `Memory` class &mdash; [Asserting Expectations (Excursion: A C Memory Model Simulator)](Assertions.ipynb#Excursion:-A-C-Memory-Model-Simulator), [Asserting Expectations (Excursion: A C Memory Model Simulator)](Assertions.ipynb#Excursion:-A-C-Memory-Model-Simulator), [Asserting Expectations (Excursion: A C Memory Model Simulator)](Assertions.ipynb#Excursion:-A-C-Memory-Model-Simulator)\n* `metavars()` &mdash; [Mining Function Specifications (Extracting Meta-Variables)](DynamicInvariants.ipynb#Extracting-Meta-Variables)\n* `METHOD_COLOR` &mdash; [Class Diagrams (Drawing Class Hierarchy with Method Names)](ClassDiagram.ipynb#Drawing-Class-Hierarchy-with-Method-Names)\n* `METHOD_FONT` &mdash; [Class Diagrams (Drawing Class Hierarchy with Method Names)](ClassDiagram.ipynb#Drawing-Class-Hierarchy-with-Method-Names)\n* `method_string()` &mdash; [Class Diagrams (Drawing Class Hierarchy with Method Names)](ClassDiagram.ipynb#Drawing-Class-Hierarchy-with-Method-Names)\n* `metric()` &mdash; [Debugging Performance Issues (Collecting Time Spent)](PerformanceDebugger.ipynb#Collecting-Time-Spent), [Debugging Performance Issues (Collecting Time Spent)](PerformanceDebugger.ipynb#Collecting-Time-Spent), [Debugging Performance Issues (Visualizing Time Spent)](PerformanceDebugger.ipynb#Visualizing-Time-Spent), [Debugging Performance Issues (Other Metrics)](PerformanceDebugger.ipynb#Other-Metrics)\n* `MetricCollector` class &mdash; [Debugging Performance Issues (Collecting Time Spent)](PerformanceDebugger.ipynb#Collecting-Time-Spent), [Debugging Performance Issues (Collecting Time Spent)](PerformanceDebugger.ipynb#Collecting-Time-Spent)\n* `MetricDebugger` class &mdash; [Debugging Performance Issues (Visualizing Time Spent)](PerformanceDebugger.ipynb#Visualizing-Time-Spent)\n* `middle()` &mdash; [Tracking Failure Origins (Dependencies)](Slicer.ipynb#Dependencies), [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum)\n* `middle_checked()` &mdash; [Statistical Debugging (Exercise 1: A Postcondition for Middle)](StatisticalDebugger.ipynb#Exercise-1:-A-Postcondition-for-Middle)\n* `middle_deps()` &mdash; [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies)\n* `middle_failing_testcase()` &mdash; [Statistical Debugging (Using Large Test Suites)](StatisticalDebugger.ipynb#Using-Large-Test-Suites)\n* `MIDDLE_FAILING_TESTCASES` &mdash; [Statistical Debugging (Using Large Test Suites)](StatisticalDebugger.ipynb#Using-Large-Test-Suites)\n* `middle_fitness()` &mdash; [Repairing Code Automatically (Fitness)](Repairer.ipynb#Fitness)\n* `middle_fixed()` &mdash; [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum)\n* `middle_passing_testcase()` &mdash; [Statistical Debugging (Using Large Test Suites)](StatisticalDebugger.ipynb#Using-Large-Test-Suites)\n* `MIDDLE_PASSING_TESTCASES` &mdash; [Statistical Debugging (Using Large Test Suites)](StatisticalDebugger.ipynb#Using-Large-Test-Suites)\n* `MIDDLE_POPULATION` &mdash; [Repairing Code Automatically (Population)](Repairer.ipynb#Population), [Repairing Code Automatically (Evolution)](Repairer.ipynb#Evolution)\n* `middle_sort_of_fixed()` &mdash; [Repairing Code Automatically (Validated Repairs)](Repairer.ipynb#Validated-Repairs)\n* `middle_test()` &mdash; [Statistical Debugging (Using Large Test Suites)](StatisticalDebugger.ipynb#Using-Large-Test-Suites), [Generalizing Failure Circumstances (Middle)](DDSetDebugger.ipynb#Middle), [Repairing Code Automatically (A Test Suite)](Repairer.ipynb#A-Test-Suite)\n* `middle_testcase()` &mdash; [Statistical Debugging (Using Large Test Suites)](StatisticalDebugger.ipynb#Using-Large-Test-Suites)\n* `MIDDLE_TESTS` &mdash; [Statistical Debugging (Using Large Test Suites)](StatisticalDebugger.ipynb#Using-Large-Test-Suites)\n* `middle_tree()` &mdash; [Repairing Code Automatically (Random Code Mutations)](Repairer.ipynb#Random-Code-Mutations)\n* `mine()` &mdash; [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes)\n* `mine_commit()` &mdash; [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes)\n* `minutes()` &mdash; [Asserting Expectations (Times and Time Bombs)](Assertions.ipynb#Times-and-Time-Bombs)\n* `min_args()` &mdash; [Reducing Failure-Inducing Inputs (Public API)](DeltaDebugger.ipynb#Public-API)\n* `min_arg_diff()` &mdash; [Reducing Failure-Inducing Inputs (Public API)](DeltaDebugger.ipynb#Public-API)\n* `min_patches()` &mdash; [Isolating Failure-Inducing Changes (A ChangeDebugger class)](ChangeDebugger.ipynb#A-ChangeDebugger-class)\n* MonkeyType &mdash; [Mining Function Specifications (Background)](DynamicInvariants.ipynb#Background)\n* Mozilla _Bugzilla_ issue tracker &mdash; [Tracking Bugs (Reporting an Issue)](Tracking.ipynb#Reporting-an-Issue)\n* `mul_with()` &mdash; [Tracking Failure Origins (Calls and Augmented Assign)](Slicer.ipynb#Calls-and-Augmented-Assign)\n* `mutate()` &mdash; [Generalizing Failure Circumstances (Mutating the Tree)](DDSetDebugger.ipynb#Mutating-the-Tree), [Repairing Code Automatically (All Together)](Repairer.ipynb#All-Together)\n* mutation testing &mdash; [Tracking Failure Origins (Assessing Test Quality)](Slicer.ipynb#Assessing-Test-Quality)\n* `myeval()` &mdash; [Reducing Failure-Inducing Inputs (Synopsis)](DeltaDebugger.ipynb#Synopsis)\n* `MyInt` class &mdash; [Tracking Failure Origins (Wrapping Data Objects)](Slicer.ipynb#Wrapping-Data-Objects)\n* Mypy &mdash; [Mining Function Specifications (Static Type Checking)](DynamicInvariants.ipynb#Static-Type-Checking)\n* `mystery()` &mdash; [Reducing Failure-Inducing Inputs (Why Reducing?)](DeltaDebugger.ipynb#Why-Reducing?)\n* `MyTime` class &mdash; [Asserting Expectations (Excursion: Checked Getters and Setters in Python)](Assertions.ipynb#Excursion:-Checked-Getters-and-Setters-in-Python)\n* `my_condition()` &mdash; [Mining Function Specifications (Exercise 3: Verbose Invariant Checkers)](DynamicInvariants.ipynb#Exercise-3:-Verbose-Invariant-Checkers)\n* `my_own_assert()` &mdash; [Asserting Expectations (Assertions)](Assertions.ipynb#Assertions)\n* `my_postcondition()` &mdash; [Mining Function Specifications (Exercise 3: Verbose Invariant Checkers)](DynamicInvariants.ipynb#Exercise-3:-Verbose-Invariant-Checkers)\n* `my_precondition()` &mdash; [Mining Function Specifications (Exercise 3: Verbose Invariant Checkers)](DynamicInvariants.ipynb#Exercise-3:-Verbose-Invariant-Checkers)\n* `my_square_root()` &mdash; [Asserting Expectations (Synopsis)](Assertions.ipynb#Synopsis)\n", "_____no_output_____" ], [ "### N\n\n* `NAME` &mdash; [How Debuggers Work (Synopsis)](Debugger.ipynb#Synopsis), [How Debuggers Work (Printing Values)](Debugger.ipynb#Printing-Values), [How Debuggers Work (Synopsis)](Debugger.ipynb#Synopsis), [Tracking Failure Origins (Excursion: Listing Dependencies)](Slicer.ipynb#Excursion:-Listing-Dependencies)\n* `NDEBUG` &mdash; [Asserting Expectations (Assertion Diagnostics)](Assertions.ipynb#Assertion-Diagnostics), [Asserting Expectations (Assertion Diagnostics)](Assertions.ipynb#Assertion-Diagnostics)\n* Newton–Raphson method &mdash; [Asserting Expectations (Checking Results)](Assertions.ipynb#Checking-Results)\n* `new_issue()` &mdash; [Tracking Bugs (Excursion: Adding Some More Issue Reports)](Tracking.ipynb#Excursion:-Adding-Some-More-Issue-Reports)\n* `new_tree()` &mdash; [Generalizing Failure Circumstances (Creating new Subtrees)](DDSetDebugger.ipynb#Creating-new-Subtrees)\n* `__new__()` &mdash; [Tracking Failure Origins (Wrapping Data Objects)](Slicer.ipynb#Wrapping-Data-Objects)\n* `NoCallError` class &mdash; [Reducing Failure-Inducing Inputs (Collecting a Call)](DeltaDebugger.ipynb#Collecting-a-Call)\n* `NodeCollector` class &mdash; [Reducing Failure-Inducing Inputs (Traversing Syntax Trees)](DeltaDebugger.ipynb#Traversing-Syntax-Trees)\n* `NodeMarker` class &mdash; [Reducing Failure-Inducing Inputs (Deleting Nodes)](DeltaDebugger.ipynb#Deleting-Nodes)\n* `NodeReducer` class &mdash; [Reducing Failure-Inducing Inputs (Deleting Nodes)](DeltaDebugger.ipynb#Deleting-Nodes), [Reducing Failure-Inducing Inputs (Transforming Nodes)](DeltaDebugger.ipynb#Transforming-Nodes), [Reducing Failure-Inducing Inputs (Transforming Nodes)](DeltaDebugger.ipynb#Transforming-Nodes), [Reducing Failure-Inducing Inputs (Transforming Nodes)](DeltaDebugger.ipynb#Transforming-Nodes), [Reducing Failure-Inducing Inputs (Transforming Nodes)](DeltaDebugger.ipynb#Transforming-Nodes)\n* `node_attr()` &mdash; [Generalizing Failure Circumstances (Derivation Trees)](DDSetDebugger.ipynb#Derivation-Trees)\n* `NODE_COLOR` &mdash; [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies)\n* `NODE_MAX_LENGTH` &mdash; [Repairing Code Automatically (Helpers)](Repairer.ipynb#Helpers)\n* `node_suspiciousness()` &mdash; [Repairing Code Automatically (Choosing Suspicious Statements to Mutate)](Repairer.ipynb#Choosing-Suspicious-Statements-to-Mutate)\n* `node_to_be_mutated()` &mdash; [Repairing Code Automatically (Choosing Suspicious Statements to Mutate)](Repairer.ipynb#Choosing-Suspicious-Statements-to-Mutate)\n* `NotFailingError` class &mdash; [Reducing Failure-Inducing Inputs (Processing Multiple Arguments)](DeltaDebugger.ipynb#Processing-Multiple-Arguments)\n* `NotPassingError` class &mdash; [Isolating Failure-Inducing Changes (A ChangeDebugger class)](ChangeDebugger.ipynb#A-ChangeDebugger-class)\n* `NUM_WORKERS` &mdash; [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes)\n", "_____no_output_____" ], [ "### O\n\n* `OchiaiDebugger` class &mdash; [Statistical Debugging (The Ochiai Metric)](StatisticalDebugger.ipynb#The-Ochiai-Metric)\n* official Python `ast` reference &mdash; [Tracking Failure Origins (Tracking Variable Accesses)](Slicer.ipynb#Tracking-Variable-Accesses), [Repairing Code Automatically (Picking Statements)](Repairer.ipynb#Picking-Statements), [Repairing Code Automatically (Mutating Statements)](Repairer.ipynb#Mutating-Statements)\n* official Python `ast` reference](http://docs.python.org/3/library/ast) for a list of nodes (and some ideas on what to replace them by). The documentation [\"Green Tree Snakes - the missing Python AST docs\" &mdash; [Reducing Failure-Inducing Inputs (Exercise 1: Advanced Syntactic Code Reduction)](DeltaDebugger.ipynb#Exercise-1:-Advanced-Syntactic-Code-Reduction)\n* official Python `ast` reference](http://docs.python.org/3/library/ast) is complete, but a bit brief; the documentation [\"Green Tree Snakes - the missing Python AST docs\" &mdash; [Tracing Executions (Exercise 2: Syntax-Based Instrumentation)](Tracer.ipynb#Exercise-2:-Syntax-Based-Instrumentation), [Tracking Failure Origins (Tracking Variable Accesses)](Slicer.ipynb#Tracking-Variable-Accesses), [Reducing Failure-Inducing Inputs (Traversing Syntax Trees)](DeltaDebugger.ipynb#Traversing-Syntax-Trees)\n* `only_fail_events()` &mdash; [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs)\n* `only_pass_events()` &mdash; [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs)\n* oracles &mdash; [Mining Function Specifications (Checking Specifications)](DynamicInvariants.ipynb#Checking-Specifications)\n* `our_frame()` &mdash; [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks)\n* `our_with_block()` &mdash; [Tracking Failure Origins (Excursion: Implementing Dynamic Instrumentation)](Slicer.ipynb#Excursion:-Implementing-Dynamic-Instrumentation)\n* `overloaded_class_methods()` &mdash; [Class Diagrams (Getting Methods and Variables)](ClassDiagram.ipynb#Getting-Methods-and-Variables)\n* overview at Livable Software](https://livablesoftware.com/tools-mine-analyze-github-git-software-data/) gives a great overview of platforms and tools for mining development data. One of the most ambitious ones is [GrimoireLab &mdash; [Where the Bugs are (Background)](ChangeCounter.ipynb#Background)\n", "_____no_output_____" ], [ "### P\n\n* `p1()` &mdash; [Repairing Code Automatically (Crossing Statement Lists)](Repairer.ipynb#Crossing-Statement-Lists), [Repairing Code Automatically (Crossover in Action)](Repairer.ipynb#Crossover-in-Action)\n* `p2()` &mdash; [Repairing Code Automatically (Crossing Statement Lists)](Repairer.ipynb#Crossing-Statement-Lists), [Repairing Code Automatically (Crossover in Action)](Repairer.ipynb#Crossover-in-Action)\n* `param()` &mdash; [Tracking Failure Origins (Excursion: Tracking Parameters)](Slicer.ipynb#Excursion:-Tracking-Parameters), [Tracking Failure Origins (Excursion: Function Arguments)](Slicer.ipynb#Excursion:-Function-Arguments)\n* `params()` &mdash; [Mining Function Specifications (Converting Mined Invariants to Annotations)](DynamicInvariants.ipynb#Converting-Mined-Invariants-to-Annotations)\n* `parentsAreConsistent()` &mdash; [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures)\n* parse tree &mdash; [Generalizing Failure Circumstances (Parsing)](DDSetDebugger.ipynb#Parsing)\n* `parse()` &mdash; [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class), [Repairing Code Automatically (Helper Functions)](Repairer.ipynb#Helper-Functions)\n* `parse_type()` &mdash; [Mining Function Specifications (Excursion: Annotating Functions with Given Types)](DynamicInvariants.ipynb#Excursion:-Annotating-Functions-with-Given-Types)\n* `passed_fraction()` &mdash; [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum)\n* `password_checker()` &mdash; [Tracking Failure Origins (Verifying Information Flows)](Slicer.ipynb#Verifying-Information-Flows)\n* `pass_collectors()` &mdash; [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs)\n* `PASS_COLOR` &mdash; [Introduction to Debugging (Visualizing Code)](Intro_Debugging.ipynb#Visualizing-Code), [Isolating Failure-Inducing Changes (Leveraging Version Histories)](ChangeDebugger.ipynb#Leveraging-Version-Histories)\n* `pass_source()` &mdash; [Isolating Failure-Inducing Changes (A ChangeDebugger class)](ChangeDebugger.ipynb#A-ChangeDebugger-class)\n* `PASS_TREE` &mdash; [Reducing Failure-Inducing Inputs (Transforming Nodes)](DeltaDebugger.ipynb#Transforming-Nodes)\n* `PASS_VALUE` &mdash; [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers)\n* `PASS` &mdash; [Introduction to Debugging (Visualizing Code)](Intro_Debugging.ipynb#Visualizing-Code), [Reducing Failure-Inducing Inputs (Delta Debugging)](DeltaDebugger.ipynb#Delta-Debugging), [Isolating Failure-Inducing Changes (Leveraging Version Histories)](ChangeDebugger.ipynb#Leveraging-Version-Histories), [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs)\n* `patch()` &mdash; [Isolating Failure-Inducing Changes (Computing and Applying Patches)](ChangeDebugger.ipynb#Computing-and-Applying-Patches)\n* `patches()` &mdash; [Isolating Failure-Inducing Changes (A ChangeDebugger class)](ChangeDebugger.ipynb#A-ChangeDebugger-class)\n* `patch_string()` &mdash; [Isolating Failure-Inducing Changes (Computing and Applying Patches)](ChangeDebugger.ipynb#Computing-and-Applying-Patches)\n* `percentage()` &mdash; [Statistical Debugging (Discrete Spectrum)](StatisticalDebugger.ipynb#Discrete-Spectrum)\n* `PerformanceDebugger` class &mdash; [Debugging Performance Issues (Visualizing Time Spent)](PerformanceDebugger.ipynb#Visualizing-Time-Spent)\n* `PerformanceTracer` class &mdash; [Debugging Performance Issues (Building a Profiler)](PerformanceDebugger.ipynb#Building-a-Profiler), [Debugging Performance Issues (Building a Profiler)](PerformanceDebugger.ipynb#Building-a-Profiler), [Debugging Performance Issues (Building a Profiler)](PerformanceDebugger.ipynb#Building-a-Profiler)\n* `POPULATION_SIZE` &mdash; [Repairing Code Automatically (Population)](Repairer.ipynb#Population), [Repairing Code Automatically (Exercise 1: Automated Repair Parameters)](Repairer.ipynb#Exercise-1:-Automated-Repair-Parameters)\n* `postcondition()` &mdash; [Mining Function Specifications (Annotating Functions with Pre- and Postconditions)](DynamicInvariants.ipynb#Annotating-Functions-with-Pre--and-Postconditions)\n* `postconditions()` &mdash; [Mining Function Specifications (Converting Mined Invariants to Annotations)](DynamicInvariants.ipynb#Converting-Mined-Invariants-to-Annotations), [Mining Function Specifications (Exercise 3: Verbose Invariant Checkers)](DynamicInvariants.ipynb#Exercise-3:-Verbose-Invariant-Checkers), [Mining Function Specifications (Exercise 7: Embedding Invariants as Assertions)](DynamicInvariants.ipynb#Exercise-7:-Embedding-Invariants-as-Assertions)\n* `precondition()` &mdash; [Mining Function Specifications (Annotating Functions with Pre- and Postconditions)](DynamicInvariants.ipynb#Annotating-Functions-with-Pre--and-Postconditions)\n* `preconditions()` &mdash; [Mining Function Specifications (Converting Mined Invariants to Annotations)](DynamicInvariants.ipynb#Converting-Mined-Invariants-to-Annotations), [Mining Function Specifications (Exercise 3: Verbose Invariant Checkers)](DynamicInvariants.ipynb#Exercise-3:-Verbose-Invariant-Checkers), [Mining Function Specifications (Exercise 7: Embedding Invariants as Assertions)](DynamicInvariants.ipynb#Exercise-7:-Embedding-Invariants-as-Assertions)\n* `PreconditionTransformer` class &mdash; [Mining Function Specifications (Exercise 7: Embedding Invariants as Assertions)](DynamicInvariants.ipynb#Exercise-7:-Embedding-Invariants-as-Assertions)\n* `pretty_invariants()` &mdash; [Mining Function Specifications (Extracting Invariants)](DynamicInvariants.ipynb#Extracting-Invariants)\n* `primes_generator()` &mdash; [Generalizing Failure Circumstances (Referencing Subtrees)](DDSetDebugger.ipynb#Referencing-Subtrees)\n* `print_command()` &mdash; [How Debuggers Work (Printing Values)](Debugger.ipynb#Printing-Values), [How Debuggers Work (Printing Values)](Debugger.ipynb#Printing-Values)\n* `print_debugger_status()` &mdash; [Tracing Executions (Tracing Variable Changes)](Tracer.ipynb#Tracing-Variable-Changes), [Tracing Executions (Exercise 1: Exception Handling)](Tracer.ipynb#Exercise-1:-Exception-Handling)\n* `print_patch()` &mdash; [Isolating Failure-Inducing Changes (Computing and Applying Patches)](ChangeDebugger.ipynb#Computing-and-Applying-Patches)\n* `print_sum()` &mdash; [Mining Function Specifications (Sum of two Numbers)](DynamicInvariants.ipynb#Sum-of-two-Numbers)\n* `process_args()` &mdash; [Reducing Failure-Inducing Inputs (Processing Multiple Arguments)](DeltaDebugger.ipynb#Processing-Multiple-Arguments)\n* profile or cProfile &mdash; [Debugging Performance Issues (Measuring Performance)](PerformanceDebugger.ipynb#Measuring-Performance)\n* profiling &mdash; [Debugging Performance Issues (Measuring Performance)](PerformanceDebugger.ipynb#Measuring-Performance)\n* profiling](https://en.wikipedia.org/wiki/Profiling_(computer_programming)) and [performance analysis tools &mdash; [Debugging Performance Issues (Background)](PerformanceDebugger.ipynb#Background)\n* program-repair.org &mdash; [Repairing Code Automatically (Background)](Repairer.ipynb#Background)\n* `PROJECT` &mdash; [Isolating Failure-Inducing Changes (Create a Working Directory)](ChangeDebugger.ipynb#Create-a-Working-Directory)\n* `prop_function()` &mdash; [Mining Function Specifications (Evaluating Properties)](DynamicInvariants.ipynb#Evaluating-Properties)\n* `prop_function_text()` &mdash; [Mining Function Specifications (Evaluating Properties)](DynamicInvariants.ipynb#Evaluating-Properties)\n* ptrace() &mdash; [Tracing Executions (Low-Level Debugging Interfaces)](Tracer.ipynb#Low-Level-Debugging-Interfaces)\n* `public_class_methods()` &mdash; [Class Diagrams (Getting Methods and Variables)](ClassDiagram.ipynb#Getting-Methods-and-Variables)\n* PyAnnotate &mdash; [Mining Function Specifications (Background)](DynamicInvariants.ipynb#Background)\n* PyDriller &mdash; [Debugging Performance Issues (Tracing Execution Profiles)](PerformanceDebugger.ipynb#Tracing-Execution-Profiles), [Where the Bugs are (Synopsis)](ChangeCounter.ipynb#Synopsis), [Where the Bugs are (Mining with PyDriller)](ChangeCounter.ipynb#Mining-with-PyDriller), [Where the Bugs are (Synopsis)](ChangeCounter.ipynb#Synopsis)\n* Python abstract syntax tree &mdash; [Repairing Code Automatically (Synopsis)](Repairer.ipynb#Synopsis), [Repairing Code Automatically (Synopsis)](Repairer.ipynb#Synopsis)\n* Python tutorial &mdash; [Introduction to Debugging (Understanding Python Programs)](Intro_Debugging.ipynb#Understanding-Python-Programs)\n", "_____no_output_____" ], [ "## Q - U", "_____no_output_____" ], [ "### Q\n\n* `quit_command()` &mdash; [How Debuggers Work (Quitting)](Debugger.ipynb#Quitting)\n* `quux()` &mdash; [Class Diagrams (Getting a Class Hierarchy)](ClassDiagram.ipynb#Getting-a-Class-Hierarchy)\n* `qux()` &mdash; [Class Diagrams (Getting a Class Hierarchy)](ClassDiagram.ipynb#Getting-a-Class-Hierarchy)\n* `qux` class &mdash; [Where the Bugs are (Determining Changed Elements)](ChangeCounter.ipynb#Determining-Changed-Elements)\n", "_____no_output_____" ], [ "### R\n\n* `random_html()` &mdash; [Repairing Code Automatically (Excursion: Creating HTML Test Cases)](Repairer.ipynb#Excursion:-Creating-HTML-Test-Cases)\n* `random_id()` &mdash; [Repairing Code Automatically (Excursion: Creating HTML Test Cases)](Repairer.ipynb#Excursion:-Creating-HTML-Test-Cases)\n* `random_plain()` &mdash; [Repairing Code Automatically (Excursion: Creating HTML Test Cases)](Repairer.ipynb#Excursion:-Creating-HTML-Test-Cases)\n* `random_string()` &mdash; [Repairing Code Automatically (Excursion: Creating HTML Test Cases)](Repairer.ipynb#Excursion:-Creating-HTML-Test-Cases)\n* `random_string_noquotes()` &mdash; [Repairing Code Automatically (Excursion: Creating HTML Test Cases)](Repairer.ipynb#Excursion:-Creating-HTML-Test-Cases)\n* `rank()` &mdash; [Statistical Debugging (Ranking Lines by Suspiciousness)](StatisticalDebugger.ipynb#Ranking-Lines-by-Suspiciousness)\n* `RankingDebugger` class &mdash; [Statistical Debugging (Ranking Lines by Suspiciousness)](StatisticalDebugger.ipynb#Ranking-Lines-by-Suspiciousness)\n* `read()` &mdash; [Asserting Expectations (Excursion: A C Memory Model Simulator)](Assertions.ipynb#Excursion:-A-C-Memory-Model-Simulator), [Asserting Expectations (Excursion: Managed Memory)](Assertions.ipynb#Excursion:-Managed-Memory)\n* record+replay \"rr\" debugger &mdash; [How Debuggers Work (Background)](Debugger.ipynb#Background)\n* red-black search tree &mdash; [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures)\n* Red-Black Tree &mdash; [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures)\n* `RedBlackNode` class &mdash; [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures)\n* `RedBlackTree` class &mdash; [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures), [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures), [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures), [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures), [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures), [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures)\n* `redNodesHaveOnlyBlackChildren()` &mdash; [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures)\n* `reduce()` &mdash; [Repairing Code Automatically (Simplifying)](Repairer.ipynb#Simplifying)\n* `reduce_arg()` &mdash; [Reducing Failure-Inducing Inputs (Testing, Logging, and Caching)](DeltaDebugger.ipynb#Testing,-Logging,-and-Caching)\n* `RED` &mdash; [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures), [Asserting Expectations (Consider Leaving Some Assertions On)](Assertions.ipynb#Consider-Leaving-Some-Assertions-On)\n* Regular expressions &mdash; [Generalizing Failure Circumstances (Grammars)](DDSetDebugger.ipynb#Grammars)\n* `remove_first_char()` &mdash; [Mining Function Specifications (Exercise 3: Verbose Invariant Checkers)](DynamicInvariants.ipynb#Exercise-3:-Verbose-Invariant-Checkers), [Mining Function Specifications (Exercise 3: Verbose Invariant Checkers)](DynamicInvariants.ipynb#Exercise-3:-Verbose-Invariant-Checkers)\n* `REMOVE_HTML_FAILING_TESTCASES` &mdash; [Repairing Code Automatically (Removing HTML Markup)](Repairer.ipynb#Removing-HTML-Markup), [Repairing Code Automatically (Excursion: Creating HTML Test Cases)](Repairer.ipynb#Excursion:-Creating-HTML-Test-Cases)\n* `remove_html_markup()` &mdash; [Introduction to Debugging (Your Task: Remove HTML Markup)](Intro_Debugging.ipynb#Your-Task:-Remove-HTML-Markup), [Introduction to Debugging (A First Fix)](Intro_Debugging.ipynb#A-First-Fix), [Introduction to Debugging (Fixing the Code)](Intro_Debugging.ipynb#Fixing-the-Code), [Introduction to Debugging (Add Assertions)](Intro_Debugging.ipynb#Add-Assertions), [How Debuggers Work (Debugger Interaction)](Debugger.ipynb#Debugger-Interaction), [Asserting Expectations (Partial Checks)](Assertions.ipynb#Partial-Checks), [Asserting Expectations (Partial Checks)](Assertions.ipynb#Partial-Checks), [Isolating Failure-Inducing Changes (Initialize Git)](ChangeDebugger.ipynb#Initialize-Git), [Isolating Failure-Inducing Changes (Initialize Git)](ChangeDebugger.ipynb#Initialize-Git), [Isolating Failure-Inducing Changes (Excursion: Adding More Revisions)](ChangeDebugger.ipynb#Excursion:-Adding-More-Revisions), [Isolating Failure-Inducing Changes (Excursion: Adding More Revisions)](ChangeDebugger.ipynb#Excursion:-Adding-More-Revisions), [Isolating Failure-Inducing Changes (Excursion: Adding More Revisions)](ChangeDebugger.ipynb#Excursion:-Adding-More-Revisions), [Isolating Failure-Inducing Changes (Excursion: Adding More Revisions)](ChangeDebugger.ipynb#Excursion:-Adding-More-Revisions), [Isolating Failure-Inducing Changes (Excursion: Adding More Revisions)](ChangeDebugger.ipynb#Excursion:-Adding-More-Revisions), [Isolating Failure-Inducing Changes (End of Excursion)](ChangeDebugger.ipynb#End-of-Excursion), [Statistical Debugging (Collecting Events)](StatisticalDebugger.ipynb#Collecting-Events), [Generalizing Failure Circumstances (A Failing Program)](DDSetDebugger.ipynb#A-Failing-Program), [Repairing Code Automatically (Removing HTML Markup)](Repairer.ipynb#Removing-HTML-Markup)\n* `remove_html_markup_ampersand()` &mdash; [Debugging Performance Issues (Integrating with Delta Debugging)](PerformanceDebugger.ipynb#Integrating-with-Delta-Debugging)\n* `remove_html_markup_deps()` &mdash; [Tracking Failure Origins (Slices)](Slicer.ipynb#Slices)\n* `remove_html_markup_fixed()` &mdash; [Introduction to Debugging (Use the Most Obvious Fix)](Intro_Debugging.ipynb#Use-the-Most-Obvious-Fix)\n* `remove_html_markup_test()` &mdash; [Repairing Code Automatically (Removing HTML Markup)](Repairer.ipynb#Removing-HTML-Markup)\n* `remove_html_markup_traced()` &mdash; [Tracing Executions (Tracing Python Programs)](Tracer.ipynb#Tracing-Python-Programs)\n* `remove_html_markup_tree()` &mdash; [Repairing Code Automatically (Removing HTML Markup)](Repairer.ipynb#Removing-HTML-Markup)\n* `remove_html_markup_without_quotes()` &mdash; [Introduction to Debugging (Debugging into Existence)](Intro_Debugging.ipynb#Debugging-into-Existence)\n* `remove_html_markup_with_print()` &mdash; [Introduction to Debugging (Printf Debugging)](Intro_Debugging.ipynb#Printf-Debugging)\n* `remove_html_markup_with_proper_quotes()` &mdash; [Introduction to Debugging (Part 3: Fix the Problem)](Intro_Debugging.ipynb#Part-3:-Fix-the-Problem)\n* `remove_html_markup_with_quote_assert()` &mdash; [Introduction to Debugging (Refuting a Hypothesis)](Intro_Debugging.ipynb#Refuting-a-Hypothesis)\n* `remove_html_markup_with_tag_assert()` &mdash; [Introduction to Debugging (Refining a Hypothesis)](Intro_Debugging.ipynb#Refining-a-Hypothesis)\n* `REMOVE_HTML_PASSING_TESTCASES` &mdash; [Repairing Code Automatically (Removing HTML Markup)](Repairer.ipynb#Removing-HTML-Markup), [Repairing Code Automatically (Excursion: Creating HTML Test Cases)](Repairer.ipynb#Excursion:-Creating-HTML-Test-Cases)\n* `remove_html_test()` &mdash; [Debugging Performance Issues (Integrating with Delta Debugging)](PerformanceDebugger.ipynb#Integrating-with-Delta-Debugging)\n* `remove_html_testcase()` &mdash; [Repairing Code Automatically (Excursion: Creating HTML Test Cases)](Repairer.ipynb#Excursion:-Creating-HTML-Test-Cases)\n* `REMOVE_HTML_TESTS` &mdash; [Repairing Code Automatically (Excursion: Creating HTML Test Cases)](Repairer.ipynb#Excursion:-Creating-HTML-Test-Cases)\n* `repair()` &mdash; [Repairing Code Automatically (Repairing)](Repairer.ipynb#Repairing)\n* `Repairer` class &mdash; [Repairing Code Automatically (Excursion: Implementing Repairer)](Repairer.ipynb#Excursion:-Implementing-Repairer), [Repairing Code Automatically (Helper Functions)](Repairer.ipynb#Helper-Functions), [Repairing Code Automatically (Helper Functions)](Repairer.ipynb#Helper-Functions), [Repairing Code Automatically (Helper Functions)](Repairer.ipynb#Helper-Functions), [Repairing Code Automatically (Helper Functions)](Repairer.ipynb#Helper-Functions), [Repairing Code Automatically (Running Tests)](Repairer.ipynb#Running-Tests), [Repairing Code Automatically (Running Tests)](Repairer.ipynb#Running-Tests), [Repairing Code Automatically (Running Tests)](Repairer.ipynb#Running-Tests), [Repairing Code Automatically ((Re)defining Functions)](Repairer.ipynb#(Re)defining-Functions), [Repairing Code Automatically ((Re)defining Functions)](Repairer.ipynb#(Re)defining-Functions), [Repairing Code Automatically (Repairing)](Repairer.ipynb#Repairing), [Repairing Code Automatically (Evolving)](Repairer.ipynb#Evolving), [Repairing Code Automatically (Evolving)](Repairer.ipynb#Evolving), [Repairing Code Automatically (Simplifying)](Repairer.ipynb#Simplifying), [Repairing Code Automatically (Simplifying)](Repairer.ipynb#Simplifying)\n* `repOK()` &mdash; [Asserting Expectations (End of Excursion)](Assertions.ipynb#End-of-Excursion), [Asserting Expectations (End of Excursion)](Assertions.ipynb#End-of-Excursion), [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures), [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures), [Asserting Expectations (Task 2 – Global Consistency)](Assertions.ipynb#Task-2-–-Global-Consistency)\n* `repr_dependencies()` &mdash; [Tracking Failure Origins (Excursion: Listing Dependencies)](Slicer.ipynb#Excursion:-Listing-Dependencies)\n* `repr_deps()` &mdash; [Tracking Failure Origins (Excursion: Listing Dependencies)](Slicer.ipynb#Excursion:-Listing-Dependencies)\n* `_repr_html_()` &mdash; [Statistical Debugging (Discrete Spectrum)](StatisticalDebugger.ipynb#Discrete-Spectrum)\n* `_repr_markdown_()` &mdash; [Asserting Expectations (Excursion: A C Memory Model Simulator)](Assertions.ipynb#Excursion:-A-C-Memory-Model-Simulator), [Statistical Debugging (Excursion: Printing an Event Table)](StatisticalDebugger.ipynb#Excursion:-Printing-an-Event-Table)\n* `_repr_svg_()` &mdash; [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies), [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class)\n* `repr_var()` &mdash; [Tracking Failure Origins (Excursion: Listing Dependencies)](Slicer.ipynb#Excursion:-Listing-Dependencies)\n* `__repr__()` &mdash; [Asserting Expectations (Times and Time Bombs)](Assertions.ipynb#Times-and-Time-Bombs), [Asserting Expectations (Excursion: A C Memory Model Simulator)](Assertions.ipynb#Excursion:-A-C-Memory-Model-Simulator), [Asserting Expectations (Excursion: A C Memory Model Simulator)](Assertions.ipynb#Excursion:-A-C-Memory-Model-Simulator), [Tracking Failure Origins (Excursion: Listing Dependencies)](Slicer.ipynb#Excursion:-Listing-Dependencies), [Tracking Failure Origins (Wrapping Data Objects)](Slicer.ipynb#Wrapping-Data-Objects), [Reducing Failure-Inducing Inputs (Public API)](DeltaDebugger.ipynb#Public-API), [Isolating Failure-Inducing Changes (A ChangeDebugger class)](ChangeDebugger.ipynb#A-ChangeDebugger-class), [Statistical Debugging (Collecting Events)](StatisticalDebugger.ipynb#Collecting-Events), [Statistical Debugging (Excursion: Printing an Event Table)](StatisticalDebugger.ipynb#Excursion:-Printing-an-Event-Table), [Statistical Debugging (Discrete Spectrum)](StatisticalDebugger.ipynb#Discrete-Spectrum), [Statistical Debugging (Ranking Lines by Suspiciousness)](StatisticalDebugger.ipynb#Ranking-Lines-by-Suspiciousness), [Generalizing Failure Circumstances (Generalizing Arguments)](DDSetDebugger.ipynb#Generalizing-Arguments)\n* `reset()` &mdash; [Reducing Failure-Inducing Inputs (Testing, Logging, and Caching)](DeltaDebugger.ipynb#Testing,-Logging,-and-Caching), [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls)\n* `reset_timer()` &mdash; [Debugging Performance Issues (Building a Profiler)](PerformanceDebugger.ipynb#Building-a-Profiler), [Debugging Performance Issues (Collecting Time Spent)](PerformanceDebugger.ipynb#Collecting-Time-Spent)\n* `restore()` &mdash; [Tracking Failure Origins (An Instrumenter Base Class)](Slicer.ipynb#An-Instrumenter-Base-Class), [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class)\n* `ret()` &mdash; [Tracking Failure Origins (Excursion: Tracking Calls and Arguments)](Slicer.ipynb#Excursion:-Tracking-Calls-and-Arguments), [Tracking Failure Origins (Excursion: Calls and Returns)](Slicer.ipynb#Excursion:-Calls-and-Returns), [Reducing Failure-Inducing Inputs (General Delta Debugging)](DeltaDebugger.ipynb#General-Delta-Debugging)\n* `return_value()` &mdash; [Tracking Failure Origins (Excursion: Tracking Return Values)](Slicer.ipynb#Excursion:-Tracking-Return-Values)\n* `RETURN_VALUE` &mdash; [Mining Function Specifications (Extracting Invariants)](DynamicInvariants.ipynb#Extracting-Invariants)\n* `ret_generator()` &mdash; [Tracking Failure Origins (Excursion: Calls and Returns)](Slicer.ipynb#Excursion:-Calls-and-Returns)\n* `RE_SPACE` &mdash; [Repairing Code Automatically (Choosing a Mutation Method)](Repairer.ipynb#Choosing-a-Mutation-Method)\n* `rootHasNoParent()` &mdash; [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures)\n* `rootIsBlack()` &mdash; [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures)\n* rubber duck debugging &mdash; [Introduction to Debugging (Rubberducking)](Intro_Debugging.ipynb#Rubberducking)\n* Rubber duck debugging &mdash; [Introduction to Debugging (Rubberducking)](Intro_Debugging.ipynb#Rubberducking)\n* rubberducking &mdash; [Introduction to Debugging (Rubberducking)](Intro_Debugging.ipynb#Rubberducking)\n* `run()` &mdash; [Reducing Failure-Inducing Inputs (Testing, Logging, and Caching)](DeltaDebugger.ipynb#Testing,-Logging,-and-Caching)\n* `run_redmine()` &mdash; [Tracking Bugs (Excursion: Starting Redmine)](Tracking.ipynb#Excursion:-Starting-Redmine)\n* `run_tests()` &mdash; [Repairing Code Automatically (Running Tests)](Repairer.ipynb#Running-Tests)\n* `run_test_set()` &mdash; [Repairing Code Automatically (Running Tests)](Repairer.ipynb#Running-Tests)\n* `rxdelim()` &mdash; [Where the Bugs are (Mapping Elements to Locations)](ChangeCounter.ipynb#Mapping-Elements-to-Locations)\n", "_____no_output_____" ], [ "### S\n\n* `samples()` &mdash; [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers)\n* scalene &mdash; [Debugging Performance Issues (Measuring Performance)](PerformanceDebugger.ipynb#Measuring-Performance)\n* Scalene &mdash; [Debugging Performance Issues (Sampling Execution Profiles)](PerformanceDebugger.ipynb#Sampling-Execution-Profiles), [Debugging Performance Issues (Background)](PerformanceDebugger.ipynb#Background)\n* scientific method &mdash; [Introduction to Debugging (The Scientific Method)](Intro_Debugging.ipynb#The-Scientific-Method)\n* `screenshot()` &mdash; [Tracking Bugs (Excursion: Screenshots with Drop Shadows)](Tracking.ipynb#Excursion:-Screenshots-with-Drop-Shadows)\n* `search_frame()` &mdash; [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks)\n* `search_func()` &mdash; [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks)\n* `search_superclasses()` &mdash; [Class Diagrams (Getting Methods and Variables)](ClassDiagram.ipynb#Getting-Methods-and-Variables)\n* `second()` &mdash; [Class Diagrams (Getting a Class Hierarchy)](ClassDiagram.ipynb#Getting-a-Class-Hierarchy)\n* `seconds()` &mdash; [Asserting Expectations (Times and Time Bombs)](Assertions.ipynb#Times-and-Time-Bombs)\n* `seconds_since_midnight()` &mdash; [Asserting Expectations (End of Excursion)](Assertions.ipynb#End-of-Excursion)\n* `SECRET_HASH_DIGEST` &mdash; [Tracking Failure Origins (Verifying Information Flows)](Slicer.ipynb#Verifying-Information-Flows)\n* Selenium](https://www.seleniumhq.org) is a framework for testing Web applications by _automating interaction in the browser_. Selenium provides an API that allows one to launch a Web browser, query the state of the user interface, and interact with individual user interface elements. The Selenium API is available in a number of languages; we use the [Selenium API for Python &mdash; [Tracking Bugs (Excursion: Remote Control with Selenium)](Tracking.ipynb#Excursion:-Remote-Control-with-Selenium)\n* SemFix &mdash; [Repairing Code Automatically (Background)](Repairer.ipynb#Background)\n* `set()` &mdash; [Tracking Failure Origins (A Data Tracker)](Slicer.ipynb#A-Data-Tracker), [Tracking Failure Origins (Setting Variables)](Slicer.ipynb#Setting-Variables)\n* `__setitem__()` &mdash; [Asserting Expectations (Excursion: A C Memory Model Simulator)](Assertions.ipynb#Excursion:-A-C-Memory-Model-Simulator), [Asserting Expectations (Exercise 1 – Storage Assertions)](Assertions.ipynb#Exercise-1-–-Storage-Assertions), [Asserting Expectations (Task 1 – Local Consistency)](Assertions.ipynb#Task-1-–-Local-Consistency), [Asserting Expectations (Task 2 – Global Consistency)](Assertions.ipynb#Task-2-–-Global-Consistency)\n* `set_hours()` &mdash; [Asserting Expectations (Invariant Checkers)](Assertions.ipynb#Invariant-Checkers), [Asserting Expectations (End of Excursion)](Assertions.ipynb#End-of-Excursion)\n* `SGML` &mdash; [Where the Bugs are (Mapping Elements to Locations)](ChangeCounter.ipynb#Mapping-Elements-to-Locations)\n* `ShadowStorage` class &mdash; [Asserting Expectations (Task 2 – Global Consistency)](Assertions.ipynb#Task-2-–-Global-Consistency)\n* `shape()` &mdash; [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers)\n* `shelve` &mdash; [Asserting Expectations (Exercise 1 – Storage Assertions)](Assertions.ipynb#Exercise-1-–-Storage-Assertions)\n* showast &mdash; [Mining Function Specifications (Excursion: Accessing Function Structure)](DynamicInvariants.ipynb#Excursion:-Accessing-Function-Structure)\n* `show_allocated()` &mdash; [Asserting Expectations (Excursion: Managed Memory)](Assertions.ipynb#Excursion:-Managed-Memory)\n* `show_classifier()` &mdash; [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers)\n* `show_contents()` &mdash; [Asserting Expectations (Excursion: A C Memory Model Simulator)](Assertions.ipynb#Excursion:-A-C-Memory-Model-Simulator), [Asserting Expectations (Excursion: Managed Memory)](Assertions.ipynb#Excursion:-Managed-Memory)\n* `show_header()` &mdash; [Asserting Expectations (Excursion: A C Memory Model Simulator)](Assertions.ipynb#Excursion:-A-C-Memory-Model-Simulator), [Asserting Expectations (Excursion: Dynamic Memory in C)](Assertions.ipynb#Excursion:-Dynamic-Memory-in-C)\n* `show_initialized()` &mdash; [Asserting Expectations (Excursion: Managed Memory)](Assertions.ipynb#Excursion:-Managed-Memory)\n* `show_sep()` &mdash; [Asserting Expectations (Excursion: A C Memory Model Simulator)](Assertions.ipynb#Excursion:-A-C-Memory-Model-Simulator)\n* sibling book on test generation &mdash; [Isolating Failure-Inducing Changes (Changes and Bugs)](ChangeDebugger.ipynb#Changes-and-Bugs)\n* `simple_call_string()` &mdash; [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls)\n* `SIMPLE_HTML_GRAMMAR` &mdash; [Generalizing Failure Circumstances (Parsing)](DDSetDebugger.ipynb#Parsing)\n* `SKIP_LIST` &mdash; [Repairing Code Automatically (Applying Crossover on Programs)](Repairer.ipynb#Applying-Crossover-on-Programs)\n* `Slicer` class &mdash; [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class), [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class), [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class), [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class), [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class), [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class), [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class), [Tracking Failure Origins (Excursion: Implementing Dynamic Instrumentation)](Slicer.ipynb#Excursion:-Implementing-Dynamic-Instrumentation), [Tracking Failure Origins (Excursion: Implementing Dynamic Instrumentation)](Slicer.ipynb#Excursion:-Implementing-Dynamic-Instrumentation), [Tracking Failure Origins (Excursion: Implementing Dynamic Instrumentation)](Slicer.ipynb#Excursion:-Implementing-Dynamic-Instrumentation), [Tracking Failure Origins (Excursion: Implementing Dynamic Instrumentation)](Slicer.ipynb#Excursion:-Implementing-Dynamic-Instrumentation)\n* slicing criteria &mdash; [Tracking Failure Origins (Slices)](Slicer.ipynb#Slices)\n* `slider()` &mdash; [How Debuggers Work (Part 3: Graphical User Interface)](Debugger.ipynb#Part-3:-Graphical-User-Interface)\n* `some_extreme_function()` &mdash; [Tracing Executions (Efficient Tracing)](Tracer.ipynb#Efficient-Tracing)\n* `some_long_running_function()` &mdash; [Timer (Measuring Time)](Timer.ipynb#Measuring-Time)\n* `some_obscure_function()` &mdash; [Asserting Expectations (Assertions and Documentation)](Assertions.ipynb#Assertions-and-Documentation)\n* `_source()` &mdash; [Tracking Failure Origins (A Class for Dependencies)](Slicer.ipynb#A-Class-for-Dependencies)\n* `source()` &mdash; [Tracking Failure Origins (A Class for Dependencies)](Slicer.ipynb#A-Class-for-Dependencies)\n* `SpectrumDebugger` class &mdash; [Statistical Debugging (Discrete Spectrum)](StatisticalDebugger.ipynb#Discrete-Spectrum), [Statistical Debugging (Discrete Spectrum)](StatisticalDebugger.ipynb#Discrete-Spectrum), [Statistical Debugging (Discrete Spectrum)](StatisticalDebugger.ipynb#Discrete-Spectrum), [Statistical Debugging (Discrete Spectrum)](StatisticalDebugger.ipynb#Discrete-Spectrum)\n* `split()` &mdash; [Reducing Failure-Inducing Inputs (General Delta Debugging)](DeltaDebugger.ipynb#General-Delta-Debugging)\n* `sq()` &mdash; [Tracking Failure Origins (Excursion: Function Arguments)](Slicer.ipynb#Excursion:-Function-Arguments)\n* `square_root()` &mdash; [Asserting Expectations (Checking Preconditions)](Assertions.ipynb#Checking-Preconditions), [Asserting Expectations (Checking Preconditions)](Assertions.ipynb#Checking-Preconditions), [Asserting Expectations (Checking Results)](Assertions.ipynb#Checking-Results), [Asserting Expectations (Checking Results)](Assertions.ipynb#Checking-Results), [Asserting Expectations (Checking Results)](Assertions.ipynb#Checking-Results), [Asserting Expectations (Checking Results)](Assertions.ipynb#Checking-Results), [Mining Function Specifications (Specifications and Assertions)](DynamicInvariants.ipynb#Specifications-and-Assertions), [Mining Function Specifications (Beyond Generic Failures)](DynamicInvariants.ipynb#Beyond-Generic-Failures)\n* `square_root_annotated()` &mdash; [Mining Function Specifications (Getting Types)](DynamicInvariants.ipynb#Getting-Types)\n* `square_root_fixed()` &mdash; [Repairing Code Automatically (Exercise 3: Evolving Values)](Repairer.ipynb#Exercise-3:-Evolving-Values)\n* `square_root_test()` &mdash; [Generalizing Failure Circumstances (Square Root)](DDSetDebugger.ipynb#Square-Root)\n* `square_root_unchecked()` &mdash; [Tracking Failure Origins (Assessing Test Quality)](Slicer.ipynb#Assessing-Test-Quality)\n* `square_root_with_checked_type_annotations()` &mdash; [Mining Function Specifications (Runtime Type Checking)](DynamicInvariants.ipynb#Runtime-Type-Checking)\n* `square_root_with_invariants()` &mdash; [Mining Function Specifications (Annotating Functions with Pre- and Postconditions)](DynamicInvariants.ipynb#Annotating-Functions-with-Pre--and-Postconditions)\n* `square_root_with_local_types()` &mdash; [Mining Function Specifications (Exercise 2: Types for Local Variables)](DynamicInvariants.ipynb#Exercise-2:-Types-for-Local-Variables)\n* `square_root_with_postcondition()` &mdash; [Mining Function Specifications (Annotating Functions with Pre- and Postconditions)](DynamicInvariants.ipynb#Annotating-Functions-with-Pre--and-Postconditions)\n* `square_root_with_precondition()` &mdash; [Mining Function Specifications (Annotating Functions with Pre- and Postconditions)](DynamicInvariants.ipynb#Annotating-Functions-with-Pre--and-Postconditions)\n* `square_root_with_type_annotations()` &mdash; [Mining Function Specifications (Mining Data Types)](DynamicInvariants.ipynb#Mining-Data-Types)\n* `square_root_with_union_type()` &mdash; [Mining Function Specifications (Runtime Type Checking)](DynamicInvariants.ipynb#Runtime-Type-Checking), [Mining Function Specifications (Exercise 1: Union Types)](DynamicInvariants.ipynb#Exercise-1:-Union-Types)\n* `StackInspectorDemo` class &mdash; [Inspecting Call Stacks (Synopsis)](StackInspector.ipynb#Synopsis)\n* `StackInspector` class &mdash; [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks), [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks), [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks), [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks), [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks), [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks), [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks)\n* `start_redmine()` &mdash; [Tracking Bugs (Excursion: Starting Redmine)](Tracking.ipynb#Excursion:-Starting-Redmine)\n* `start_webdriver()` &mdash; [Tracking Bugs (Excursion: Remote Control with Selenium)](Tracking.ipynb#Excursion:-Remote-Control-with-Selenium)\n* `StatementMutator` class &mdash; [Repairing Code Automatically (Mutating Statements)](Repairer.ipynb#Mutating-Statements), [Repairing Code Automatically (Choosing Suspicious Statements to Mutate)](Repairer.ipynb#Choosing-Suspicious-Statements-to-Mutate), [Repairing Code Automatically (Choosing Suspicious Statements to Mutate)](Repairer.ipynb#Choosing-Suspicious-Statements-to-Mutate), [Repairing Code Automatically (Choosing a Mutation Method)](Repairer.ipynb#Choosing-a-Mutation-Method), [Repairing Code Automatically (Swapping Statements)](Repairer.ipynb#Swapping-Statements), [Repairing Code Automatically (Swapping Statements)](Repairer.ipynb#Swapping-Statements), [Repairing Code Automatically (Inserting Statements)](Repairer.ipynb#Inserting-Statements), [Repairing Code Automatically (Deleting Statements)](Repairer.ipynb#Deleting-Statements), [Repairing Code Automatically (Helpers)](Repairer.ipynb#Helpers), [Repairing Code Automatically (All Together)](Repairer.ipynb#All-Together)\n* `StatementVisitor` class &mdash; [Repairing Code Automatically (Picking Statements)](Repairer.ipynb#Picking-Statements)\n* `StatisticalDebugger` class &mdash; [Statistical Debugging (A Base Class for Statistical Debugging)](StatisticalDebugger.ipynb#A-Base-Class-for-Statistical-Debugging), [Statistical Debugging (A Base Class for Statistical Debugging)](StatisticalDebugger.ipynb#A-Base-Class-for-Statistical-Debugging), [Statistical Debugging (A Base Class for Statistical Debugging)](StatisticalDebugger.ipynb#A-Base-Class-for-Statistical-Debugging), [Statistical Debugging (Excursion: Printing an Event Table)](StatisticalDebugger.ipynb#Excursion:-Printing-an-Event-Table)\n* `STEP_COLOR` &mdash; [Introduction to Debugging (Visualizing Code)](Intro_Debugging.ipynb#Visualizing-Code), [Isolating Failure-Inducing Changes (Leveraging Version Histories)](ChangeDebugger.ipynb#Leveraging-Version-Histories)\n* `step_command()` &mdash; [How Debuggers Work (Debugger Interaction)](Debugger.ipynb#Debugger-Interaction)\n* `stop_here()` &mdash; [How Debuggers Work (Debugger Interaction)](Debugger.ipynb#Debugger-Interaction)\n* `Storage` class &mdash; [Asserting Expectations (Exercise 1 – Storage Assertions)](Assertions.ipynb#Exercise-1-–-Storage-Assertions), [Asserting Expectations (Task 1 – Local Consistency)](Assertions.ipynb#Task-1-–-Local-Consistency)\n* `StoreVisitor` class &mdash; [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions)\n* `store_names()` &mdash; [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions)\n* `string_error()` &mdash; [Reducing Failure-Inducing Inputs (Reducing Multiple Arguments)](DeltaDebugger.ipynb#Reducing-Multiple-Arguments)\n* `__str__()` &mdash; [Tracking Failure Origins (Excursion: Listing Dependencies)](Slicer.ipynb#Excursion:-Listing-Dependencies), [Statistical Debugging (Discrete Spectrum)](StatisticalDebugger.ipynb#Discrete-Spectrum)\n* `sum2()` &mdash; [Mining Function Specifications (Avoiding Overspecialization)](DynamicInvariants.ipynb#Avoiding-Overspecialization), [Mining Function Specifications (Synopsis)](DynamicInvariants.ipynb#Synopsis)\n* `sum3()` &mdash; [Mining Function Specifications (Excursion: Handling Multiple Types)](DynamicInvariants.ipynb#Excursion:-Handling-Multiple-Types)\n* `susp()` &mdash; [Statistical Debugging (Ranking Lines by Suspiciousness)](StatisticalDebugger.ipynb#Ranking-Lines-by-Suspiciousness)\n* `suspiciousness()` &mdash; [Statistical Debugging (Discrete Spectrum)](StatisticalDebugger.ipynb#Discrete-Spectrum), [Statistical Debugging (Discrete Spectrum)](StatisticalDebugger.ipynb#Discrete-Spectrum), [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum), [Statistical Debugging (The Ochiai Metric)](StatisticalDebugger.ipynb#The-Ochiai-Metric), [Debugging Performance Issues (Visualizing Time Spent)](PerformanceDebugger.ipynb#Visualizing-Time-Spent)\n* `suspiciousness_func()` &mdash; [Repairing Code Automatically (Mutating Statements)](Repairer.ipynb#Mutating-Statements)\n* `swap()` &mdash; [Repairing Code Automatically (Swapping Statements)](Repairer.ipynb#Swapping-Statements), [Repairing Code Automatically (Mutating Conditions)](Repairer.ipynb#Mutating-Conditions)\n* syntactical structure &mdash; [Generalizing Failure Circumstances (Grammars)](DDSetDebugger.ipynb#Grammars)\n* syntax tree &mdash; [Generalizing Failure Circumstances (Derivation Trees)](DDSetDebugger.ipynb#Derivation-Trees)\n", "_____no_output_____" ], [ "### T\n\n* `T1` &mdash; [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs)\n* `T2` &mdash; [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs)\n* `T3` &mdash; [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum)\n* `TarantulaDebugger` class &mdash; [Statistical Debugging (The Tarantula Metric)](StatisticalDebugger.ipynb#The-Tarantula-Metric)\n* test &mdash; [Introduction to Debugging (Running a Function)](Intro_Debugging.ipynb#Running-a-Function)\n* `test()` &mdash; [Tracking Failure Origins (Excursion: Tracking Control)](Slicer.ipynb#Excursion:-Tracking-Control), [Tracking Failure Origins (Excursion: Control Dependencies)](Slicer.ipynb#Excursion:-Control-Dependencies), [Reducing Failure-Inducing Inputs (Testing, Logging, and Caching)](DeltaDebugger.ipynb#Testing,-Logging,-and-Caching), [Reducing Failure-Inducing Inputs (Testing, Logging, and Caching)](DeltaDebugger.ipynb#Testing,-Logging,-and-Caching), [Reducing Failure-Inducing Inputs (General Delta Debugging)](DeltaDebugger.ipynb#General-Delta-Debugging), [Isolating Failure-Inducing Changes (High-Level Interface)](ChangeDebugger.ipynb#High-Level-Interface), [Generalizing Failure Circumstances (Generalizing Arguments)](DDSetDebugger.ipynb#Generalizing-Arguments), [Inspecting Call Stacks (Synopsis)](StackInspector.ipynb#Synopsis)\n* `test_call()` &mdash; [Tracking Failure Origins (Excursion: Tracking Calls and Arguments)](Slicer.ipynb#Excursion:-Tracking-Calls-and-Arguments)\n* `test_debugger_html()` &mdash; [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs)\n* `test_debugger_html_simple()` &mdash; [Statistical Debugging (Collecting Passing and Failing Runs)](StatisticalDebugger.ipynb#Collecting-Passing-and-Failing-Runs)\n* `test_debugger_middle()` &mdash; [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum)\n* `test_math()` &mdash; [Tracking Failure Origins (Calls and Augmented Assign)](Slicer.ipynb#Calls-and-Augmented-Assign)\n* `test_middle_lines()` &mdash; [Repairing Code Automatically (Simplifying)](Repairer.ipynb#Simplifying)\n* `test_patches()` &mdash; [Isolating Failure-Inducing Changes (A ChangeDebugger class)](ChangeDebugger.ipynb#A-ChangeDebugger-class)\n* `test_reduce()` &mdash; [Repairing Code Automatically (Simplifying)](Repairer.ipynb#Simplifying)\n* `test_remove_html_markup()` &mdash; [Isolating Failure-Inducing Changes (A ChangeDebugger class)](ChangeDebugger.ipynb#A-ChangeDebugger-class)\n* `test_remove_html_markup_patches()` &mdash; [Isolating Failure-Inducing Changes (Delta Debugging on Patches)](ChangeDebugger.ipynb#Delta-Debugging-on-Patches)\n* `test_square_root()` &mdash; [Asserting Expectations (Assertions)](Assertions.ipynb#Assertions)\n* `test_tree()` &mdash; [Generalizing Failure Circumstances (Generalizing Trees)](DDSetDebugger.ipynb#Generalizing-Trees)\n* `TEST` &mdash; [Tracking Failure Origins (Setting Variables)](Slicer.ipynb#Setting-Variables)\n* the etymology of the word \"bug\"](http://www.catb.org/~esr/jargon/html/B/bug.html) in [The Jargon File](http://www.catb.org/~esr/jargon/). Also check out the [Wikipedia entry on debugging &mdash; [Introduction to Debugging (Debugging Aftermath)](Intro_Debugging.ipynb#Debugging-Aftermath)\n* The Fuzzing Book &mdash; [Mining Function Specifications (Avoiding Overspecialization)](DynamicInvariants.ipynb#Avoiding-Overspecialization)\n* the fuzzing book &mdash; [Generalizing Failure Circumstances (Grammars)](DDSetDebugger.ipynb#Grammars), [Generalizing Failure Circumstances (Parsing)](DDSetDebugger.ipynb#Parsing)\n* the GNU command-line debugger (GDB) &mdash; [How Debuggers Work (Exercise 2: More Commands)](Debugger.ipynb#Exercise-2:-More-Commands)\n* \"The state of type hints in Python\" &mdash; [Mining Function Specifications (Background)](DynamicInvariants.ipynb#Background)\n* the Whyline &mdash; [Tracking Failure Origins (Background)](Slicer.ipynb#Background)\n* theory &mdash; [Introduction to Debugging (The Scientific Method)](Intro_Debugging.ipynb#The-Scientific-Method), [Introduction to Debugging (Fixing the Bug)](Intro_Debugging.ipynb#Fixing-the-Bug)\n* this blog post &mdash; [How Debuggers Work (Exercise 1: Changing State)](Debugger.ipynb#Exercise-1:-Changing-State)\n* this discussion in StackOverflow &mdash; [Reducing Failure-Inducing Inputs (General Delta Debugging)](DeltaDebugger.ipynb#General-Delta-Debugging)\n* `TimeCollector` class &mdash; [Debugging Performance Issues (Collecting Time Spent)](PerformanceDebugger.ipynb#Collecting-Time-Spent), [Debugging Performance Issues (Collecting Time Spent)](PerformanceDebugger.ipynb#Collecting-Time-Spent)\n* `Timer` class &mdash; [Timer (Measuring Time)](Timer.ipynb#Measuring-Time)\n* `Time` class &mdash; [Asserting Expectations (Times and Time Bombs)](Assertions.ipynb#Times-and-Time-Bombs), [Asserting Expectations (Times and Time Bombs)](Assertions.ipynb#Times-and-Time-Bombs), [Asserting Expectations (Times and Time Bombs)](Assertions.ipynb#Times-and-Time-Bombs), [Asserting Expectations (Times and Time Bombs)](Assertions.ipynb#Times-and-Time-Bombs), [Asserting Expectations (Invariant Checkers)](Assertions.ipynb#Invariant-Checkers), [Asserting Expectations (End of Excursion)](Assertions.ipynb#End-of-Excursion), [Asserting Expectations (End of Excursion)](Assertions.ipynb#End-of-Excursion), [Asserting Expectations (End of Excursion)](Assertions.ipynb#End-of-Excursion), [Asserting Expectations (End of Excursion)](Assertions.ipynb#End-of-Excursion)\n* `tooltip()` &mdash; [Tracking Failure Origins (Drawing Dependencies)](Slicer.ipynb#Drawing-Dependencies), [Statistical Debugging (Excursion: Printing an Event Table)](StatisticalDebugger.ipynb#Excursion:-Printing-an-Event-Table), [Statistical Debugging (Discrete Spectrum)](StatisticalDebugger.ipynb#Discrete-Spectrum), [Statistical Debugging (Discrete Spectrum)](StatisticalDebugger.ipynb#Discrete-Spectrum), [Statistical Debugging (Continuous Spectrum)](StatisticalDebugger.ipynb#Continuous-Spectrum), [Debugging Performance Issues (Visualizing Time Spent)](PerformanceDebugger.ipynb#Visualizing-Time-Spent)\n* `toplevel_defs()` &mdash; [Repairing Code Automatically ((Re)defining Functions)](Repairer.ipynb#(Re)defining-Functions)\n* `total()` &mdash; [Debugging Performance Issues (Collecting Time Spent)](PerformanceDebugger.ipynb#Collecting-Time-Spent), [Debugging Performance Issues (Visualizing Time Spent)](PerformanceDebugger.ipynb#Visualizing-Time-Spent)\n* `to_set()` &mdash; [Reducing Failure-Inducing Inputs (General Delta Debugging)](DeltaDebugger.ipynb#General-Delta-Debugging)\n* trace &mdash; [Tracing Executions (Tracing Python Programs)](Tracer.ipynb#Tracing-Python-Programs)\n* `traceit()` &mdash; [Tracing Executions (Tracing Python Programs)](Tracer.ipynb#Tracing-Python-Programs), [Tracing Executions (Tracing Python Programs)](Tracer.ipynb#Tracing-Python-Programs), [Tracing Executions (Tracing Python Programs)](Tracer.ipynb#Tracing-Python-Programs), [Tracing Executions (Tracing Python Programs)](Tracer.ipynb#Tracing-Python-Programs), [Tracing Executions (A Tracer Class)](Tracer.ipynb#A-Tracer-Class), [Tracing Executions (Accessing Source Code)](Tracer.ipynb#Accessing-Source-Code), [Tracing Executions (Tracing Calls and Returns)](Tracer.ipynb#Tracing-Calls-and-Returns), [Tracing Executions (Tracing Variable Changes)](Tracer.ipynb#Tracing-Variable-Changes), [Tracing Executions (Conditional Tracing)](Tracer.ipynb#Conditional-Tracing), [How Debuggers Work (Debugger Interaction)](Debugger.ipynb#Debugger-Interaction), [Reducing Failure-Inducing Inputs (Collecting a Call)](DeltaDebugger.ipynb#Collecting-a-Call), [Statistical Debugging (Collecting Events)](StatisticalDebugger.ipynb#Collecting-Events), [Statistical Debugging (Collecting Events)](StatisticalDebugger.ipynb#Collecting-Events), [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls), [Debugging Performance Issues (Building a Profiler)](PerformanceDebugger.ipynb#Building-a-Profiler)\n* `_traceit()` &mdash; [Tracing Executions (A Tracer Class)](Tracer.ipynb#A-Tracer-Class)\n* `tracemalloc` module &mdash; [Debugging Performance Issues (Exercise 1: Profiling Memory Usage)](PerformanceDebugger.ipynb#Exercise-1:-Profiling-Memory-Usage)\n* `TRACER_CODE` &mdash; [Tracing Executions (Efficient Tracing)](Tracer.ipynb#Efficient-Tracing)\n* `TRACER` &mdash; [Tracing Executions (Efficient Tracing)](Tracer.ipynb#Efficient-Tracing), [Tracing Executions (Efficient Tracing)](Tracer.ipynb#Efficient-Tracing)\n* `Tracer` class &mdash; [Tracing Executions (A Tracer Class)](Tracer.ipynb#A-Tracer-Class), [Tracing Executions (Accessing Source Code)](Tracer.ipynb#Accessing-Source-Code), [Tracing Executions (Tracing Calls and Returns)](Tracer.ipynb#Tracing-Calls-and-Returns), [Tracing Executions (Tracing Variable Changes)](Tracer.ipynb#Tracing-Variable-Changes), [Tracing Executions (Tracing Variable Changes)](Tracer.ipynb#Tracing-Variable-Changes), [Tracing Executions (Exercise 1: Exception Handling)](Tracer.ipynb#Exercise-1:-Exception-Handling)\n* `trace_call()` &mdash; [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls)\n* `trace_return()` &mdash; [Mining Function Specifications (Tracing Calls)](DynamicInvariants.ipynb#Tracing-Calls)\n* tracing function &mdash; [Tracing Executions (Tracing Python Programs)](Tracer.ipynb#Tracing-Python-Programs)\n* `TrackCallTransformer` class &mdash; [Tracking Failure Origins (Excursion: Tracking Calls and Arguments)](Slicer.ipynb#Excursion:-Tracking-Calls-and-Arguments)\n* `TrackControlTransformer` class &mdash; [Tracking Failure Origins (Excursion: Tracking Control)](Slicer.ipynb#Excursion:-Tracking-Control), [Tracking Failure Origins (Excursion: Tracking Control)](Slicer.ipynb#Excursion:-Tracking-Control), [Tracking Failure Origins (Excursion: Tracking Control)](Slicer.ipynb#Excursion:-Tracking-Control), [Tracking Failure Origins (Excursion: Tracking Control)](Slicer.ipynb#Excursion:-Tracking-Control), [Tracking Failure Origins (Excursion: Tracking Control)](Slicer.ipynb#Excursion:-Tracking-Control)\n* `TrackGetTransformer` class &mdash; [Tracking Failure Origins (Tracking Variable Accesses)](Slicer.ipynb#Tracking-Variable-Accesses)\n* `TrackParamsTransformer` class &mdash; [Tracking Failure Origins (Excursion: Tracking Parameters)](Slicer.ipynb#Excursion:-Tracking-Parameters)\n* `TrackReturnTransformer` class &mdash; [Tracking Failure Origins (Excursion: Tracking Return Values)](Slicer.ipynb#Excursion:-Tracking-Return-Values)\n* `TrackSetTransformer` class &mdash; [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions), [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions), [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions), [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions)\n* `transform()` &mdash; [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class)\n* `transformers()` &mdash; [Tracking Failure Origins (The Slicer Class)](Slicer.ipynb#The-Slicer-Class)\n* `traverse_tree()` &mdash; [Class Diagrams (Getting a Class Tree)](ClassDiagram.ipynb#Getting-a-Class-Tree)\n* `TreeGeneralizer` class &mdash; [Generalizing Failure Circumstances (Generalizing Trees)](DDSetDebugger.ipynb#Generalizing-Trees), [Generalizing Failure Circumstances (Generalizing Trees)](DDSetDebugger.ipynb#Generalizing-Trees), [Generalizing Failure Circumstances (Testing for Generalization)](DDSetDebugger.ipynb#Testing-for-Generalization), [Generalizing Failure Circumstances (Generalizable Paths)](DDSetDebugger.ipynb#Generalizable-Paths), [Generalizing Failure Circumstances (Generalizable Paths)](DDSetDebugger.ipynb#Generalizable-Paths), [Generalizing Failure Circumstances (Generalizable Paths)](DDSetDebugger.ipynb#Generalizable-Paths), [Generalizing Failure Circumstances (Fuzzing with Patterns)](DDSetDebugger.ipynb#Fuzzing-with-Patterns)\n* `treeIsAcyclic()` &mdash; [Asserting Expectations (Large Data Structures)](Assertions.ipynb#Large-Data-Structures)\n* `TreeMutator` class &mdash; [Generalizing Failure Circumstances (Mutating the Tree)](DDSetDebugger.ipynb#Mutating-the-Tree), [Generalizing Failure Circumstances (Referencing Subtrees)](DDSetDebugger.ipynb#Referencing-Subtrees), [Generalizing Failure Circumstances (Creating new Subtrees)](DDSetDebugger.ipynb#Creating-new-Subtrees), [Generalizing Failure Circumstances (Mutating the Tree)](DDSetDebugger.ipynb#Mutating-the-Tree)\n* `true_property_instantiations()` &mdash; [Mining Function Specifications (Checking Invariants)](DynamicInvariants.ipynb#Checking-Invariants)\n* Turing machines &mdash; [Generalizing Failure Circumstances (Grammars)](DDSetDebugger.ipynb#Grammars)\n* `TypeAnnotator` class &mdash; [Mining Function Specifications (Excursion: A Type Annotator Class)](DynamicInvariants.ipynb#Excursion:-A-Type-Annotator-Class)\n* `typed_function()` &mdash; [Mining Function Specifications (Excursion: A Type Annotator Class)](DynamicInvariants.ipynb#Excursion:-A-Type-Annotator-Class)\n* `typed_functions()` &mdash; [Mining Function Specifications (Excursion: A Type Annotator Class)](DynamicInvariants.ipynb#Excursion:-A-Type-Annotator-Class)\n* `typed_functions_ast()` &mdash; [Mining Function Specifications (Excursion: A Type Annotator Class)](DynamicInvariants.ipynb#Excursion:-A-Type-Annotator-Class)\n* `typed_function_ast()` &mdash; [Mining Function Specifications (Excursion: A Type Annotator Class)](DynamicInvariants.ipynb#Excursion:-A-Type-Annotator-Class)\n* types &mdash; [Mining Function Specifications (Mining Data Types)](DynamicInvariants.ipynb#Mining-Data-Types)\n* `TypeTracer` class &mdash; [Mining Function Specifications (Excursion: A Type Annotator Class)](DynamicInvariants.ipynb#Excursion:-A-Type-Annotator-Class)\n* `TypeTransformer` class &mdash; [Mining Function Specifications (Excursion: Annotating Functions with Given Types)](DynamicInvariants.ipynb#Excursion:-Annotating-Functions-with-Given-Types), [Mining Function Specifications (Excursion: Annotating Functions with Given Types)](DynamicInvariants.ipynb#Excursion:-Annotating-Functions-with-Given-Types), [Mining Function Specifications (Excursion: Annotating Functions with Given Types)](DynamicInvariants.ipynb#Excursion:-Annotating-Functions-with-Given-Types)\n* `type_string()` &mdash; [Mining Function Specifications (Excursion: Annotating Functions with Mined Types)](DynamicInvariants.ipynb#Excursion:-Annotating-Functions-with-Mined-Types)\n", "_____no_output_____" ], [ "### U\n\n* universal grammars &mdash; [Generalizing Failure Circumstances (Grammars)](DDSetDebugger.ipynb#Grammars)\n* `unknown()` &mdash; [Class Diagrams (Getting Docs)](ClassDiagram.ipynb#Getting-Docs), [Inspecting Call Stacks (Inspecting Call Stacks)](StackInspector.ipynb#Inspecting-Call-Stacks)\n* `UNRESOLVED` &mdash; [Reducing Failure-Inducing Inputs (Delta Debugging)](DeltaDebugger.ipynb#Delta-Debugging), [Reducing Failure-Inducing Inputs (Reducing Code Lines)](DeltaDebugger.ipynb#Reducing-Code-Lines), [Isolating Failure-Inducing Changes (Leveraging Version Histories)](ChangeDebugger.ipynb#Leveraging-Version-Histories)\n* `update_changes()` &mdash; [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes)\n* `update_elems()` &mdash; [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes), [Where the Bugs are (Putting it all Together)](ChangeCounter.ipynb#Putting-it-all-Together)\n* `update_size()` &mdash; [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes)\n* `update_stats()` &mdash; [Where the Bugs are (Counting Changes)](ChangeCounter.ipynb#Counting-Changes)\n", "_____no_output_____" ], [ "## V - Y", "_____no_output_____" ], [ "### V\n\n* Valgrind &mdash; [Asserting Expectations (Checking Memory Usage with Valgrind)](Assertions.ipynb#Checking-Memory-Usage-with-Valgrind)\n* Valgrind](https://valgrind.org) originated as an academic tool which has seen lots of industrial usage. A [list of papers &mdash; [Asserting Expectations (Background)](Assertions.ipynb#Background)\n* `validate()` &mdash; [Tracking Failure Origins (A Class for Dependencies)](Slicer.ipynb#A-Class-for-Dependencies), [Tracking Failure Origins (Excursion: Diagnostics)](Slicer.ipynb#Excursion:-Diagnostics), [Repairing Code Automatically (Running Tests)](Repairer.ipynb#Running-Tests)\n* `ValueCollector` class &mdash; [Statistical Debugging (Other Events besides Coverage)](StatisticalDebugger.ipynb#Other-Events-besides-Coverage)\n* `VALUE` &mdash; [How Debuggers Work (Exercise 1: Changing State)](Debugger.ipynb#Exercise-1:-Changing-State)\n* `var_string()` &mdash; [Class Diagrams (Drawing Class Hierarchy with Method Names)](ClassDiagram.ipynb#Drawing-Class-Hierarchy-with-Method-Names)\n* `VAR` &mdash; [How Debuggers Work (Exercise 1: Changing State)](Debugger.ipynb#Exercise-1:-Changing-State), [Class Diagrams (Getting a Class Hierarchy)](ClassDiagram.ipynb#Getting-a-Class-Hierarchy)\n* `VERSIONS` &mdash; [Isolating Failure-Inducing Changes (Leveraging Version Histories)](ChangeDebugger.ipynb#Leveraging-Version-Histories)\n* `visit()` &mdash; [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions), [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions), [Reducing Failure-Inducing Inputs (Deleting Nodes)](DeltaDebugger.ipynb#Deleting-Nodes), [Reducing Failure-Inducing Inputs (Deleting Nodes)](DeltaDebugger.ipynb#Deleting-Nodes), [Repairing Code Automatically (Choosing a Mutation Method)](Repairer.ipynb#Choosing-a-Mutation-Method)\n* `visit_AnnAssign()` &mdash; [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions)\n* `visit_Assert()` &mdash; [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions)\n* `visit_Assign()` &mdash; [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions), [Reducing Failure-Inducing Inputs (Transforming Nodes)](DeltaDebugger.ipynb#Transforming-Nodes)\n* `visit_AsyncFor()` &mdash; [Tracking Failure Origins (Excursion: Tracking Control)](Slicer.ipynb#Excursion:-Tracking-Control)\n* `visit_AsyncFunctionDef()` &mdash; [Tracking Failure Origins (Excursion: Tracking Return Values)](Slicer.ipynb#Excursion:-Tracking-Return-Values), [Repairing Code Automatically (Picking Statements)](Repairer.ipynb#Picking-Statements), [Repairing Code Automatically ((Re)defining Functions)](Repairer.ipynb#(Re)defining-Functions)\n* `visit_AugAssign()` &mdash; [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions)\n* `visit_BoolOp()` &mdash; [Reducing Failure-Inducing Inputs (Transforming Nodes)](DeltaDebugger.ipynb#Transforming-Nodes), [Repairing Code Automatically (Collecting Conditions)](Repairer.ipynb#Collecting-Conditions)\n* `visit_Call()` &mdash; [Tracking Failure Origins (Excursion: Tracking Calls and Arguments)](Slicer.ipynb#Excursion:-Tracking-Calls-and-Arguments), [Tracking Failure Origins (Excursion: Implementing Dynamic Instrumentation)](Slicer.ipynb#Excursion:-Implementing-Dynamic-Instrumentation)\n* `visit_ClassDef()` &mdash; [Repairing Code Automatically (Picking Statements)](Repairer.ipynb#Picking-Statements), [Repairing Code Automatically ((Re)defining Functions)](Repairer.ipynb#(Re)defining-Functions)\n* `visit_Compare()` &mdash; [Reducing Failure-Inducing Inputs (Transforming Nodes)](DeltaDebugger.ipynb#Transforming-Nodes)\n* `visit_comprehension()` &mdash; [Tracking Failure Origins (Excursion: Tracking Control)](Slicer.ipynb#Excursion:-Tracking-Control)\n* `visit_Expr()` &mdash; [Mining Function Specifications (Excursion: Annotating Functions with Given Types)](DynamicInvariants.ipynb#Excursion:-Annotating-Functions-with-Given-Types)\n* `visit_For()` &mdash; [Tracking Failure Origins (Excursion: Tracking Control)](Slicer.ipynb#Excursion:-Tracking-Control)\n* `visit_FunctionDef()` &mdash; [Tracing Executions (Exercise 2: Syntax-Based Instrumentation)](Tracer.ipynb#Exercise-2:-Syntax-Based-Instrumentation), [Tracking Failure Origins (Excursion: Tracking Return Values)](Slicer.ipynb#Excursion:-Tracking-Return-Values), [Tracking Failure Origins (Excursion: Tracking Parameters)](Slicer.ipynb#Excursion:-Tracking-Parameters), [Mining Function Specifications (Excursion: Annotating Functions with Given Types)](DynamicInvariants.ipynb#Excursion:-Annotating-Functions-with-Given-Types), [Mining Function Specifications (Exercise 7: Embedding Invariants as Assertions)](DynamicInvariants.ipynb#Exercise-7:-Embedding-Invariants-as-Assertions), [Repairing Code Automatically (Picking Statements)](Repairer.ipynb#Picking-Statements), [Repairing Code Automatically ((Re)defining Functions)](Repairer.ipynb#(Re)defining-Functions)\n* `visit_If()` &mdash; [Tracking Failure Origins (Excursion: Tracking Control)](Slicer.ipynb#Excursion:-Tracking-Control), [Reducing Failure-Inducing Inputs (Transforming Nodes)](DeltaDebugger.ipynb#Transforming-Nodes)\n* `visit_Module()` &mdash; [Reducing Failure-Inducing Inputs (Deleting Nodes)](DeltaDebugger.ipynb#Deleting-Nodes), [Repairing Code Automatically (Picking Statements)](Repairer.ipynb#Picking-Statements)\n* `visit_Name()` &mdash; [Tracking Failure Origins (Tracking Variable Accesses)](Slicer.ipynb#Tracking-Variable-Accesses), [Tracking Failure Origins (Excursion: Tracking Assignments and Assertions)](Slicer.ipynb#Excursion:-Tracking-Assignments-and-Assertions), [Mining Function Specifications (Extracting Meta-Variables)](DynamicInvariants.ipynb#Extracting-Meta-Variables), [Mining Function Specifications (Instantiating Properties)](DynamicInvariants.ipynb#Instantiating-Properties)\n* `visit_Node()` &mdash; [Reducing Failure-Inducing Inputs (Deleting Nodes)](DeltaDebugger.ipynb#Deleting-Nodes)\n* `visit_node()` &mdash; [Repairing Code Automatically (Picking Statements)](Repairer.ipynb#Picking-Statements)\n* `visit_Return()` &mdash; [Tracking Failure Origins (Excursion: Tracking Return Values)](Slicer.ipynb#Excursion:-Tracking-Return-Values)\n* `visit_return_or_yield()` &mdash; [Tracking Failure Origins (Excursion: Tracking Return Values)](Slicer.ipynb#Excursion:-Tracking-Return-Values)\n* `visit_UnaryOp()` &mdash; [Repairing Code Automatically (Collecting Conditions)](Repairer.ipynb#Collecting-Conditions)\n* `visit_While()` &mdash; [Tracking Failure Origins (Excursion: Tracking Control)](Slicer.ipynb#Excursion:-Tracking-Control)\n* `visit_With()` &mdash; [Tracking Failure Origins (Excursion: Implementing Dynamic Instrumentation)](Slicer.ipynb#Excursion:-Implementing-Dynamic-Instrumentation)\n* `visit_Yield()` &mdash; [Tracking Failure Origins (Excursion: Tracking Return Values)](Slicer.ipynb#Excursion:-Tracking-Return-Values)\n* `visit_YieldFrom()` &mdash; [Tracking Failure Origins (Excursion: Tracking Return Values)](Slicer.ipynb#Excursion:-Tracking-Return-Values)\n", "_____no_output_____" ], [ "### W\n\n* watchpoints &mdash; [Tracing Executions (Watching Events)](Tracer.ipynb#Watching-Events)\n* web driver &mdash; [Tracking Bugs (Excursion: Remote Control with Selenium)](Tracking.ipynb#Excursion:-Remote-Control-with-Selenium)\n* `weight()` &mdash; [Repairing Code Automatically (Running Tests)](Repairer.ipynb#Running-Tests)\n* `WEIGHT_FAILING` &mdash; [Repairing Code Automatically (Fitness)](Repairer.ipynb#Fitness), [Repairing Code Automatically (Fitness)](Repairer.ipynb#Fitness), [Repairing Code Automatically (Exercise 1: Automated Repair Parameters)](Repairer.ipynb#Exercise-1:-Automated-Repair-Parameters)\n* `WEIGHT_PASSING` &mdash; [Repairing Code Automatically (Fitness)](Repairer.ipynb#Fitness), [Repairing Code Automatically (Fitness)](Repairer.ipynb#Fitness), [Repairing Code Automatically (Fitness)](Repairer.ipynb#Fitness), [Repairing Code Automatically (Exercise 1: Automated Repair Parameters)](Repairer.ipynb#Exercise-1:-Automated-Repair-Parameters)\n* `WithVisitor` class &mdash; [Tracking Failure Origins (Excursion: Implementing Dynamic Instrumentation)](Slicer.ipynb#Excursion:-Implementing-Dynamic-Instrumentation)\n* `with_mysql()` &mdash; [Tracking Bugs (Excursion: Setting up Redmine)](Tracking.ipynb#Excursion:-Setting-up-Redmine)\n* `with_ruby()` &mdash; [Tracking Bugs (Excursion: Setting up Redmine)](Tracking.ipynb#Excursion:-Setting-up-Redmine)\n* `wrapper()` &mdash; [Tracking Failure Origins (Excursion: Calls and Returns)](Slicer.ipynb#Excursion:-Calls-and-Returns), [Mining Function Specifications (Annotating Functions with Pre- and Postconditions)](DynamicInvariants.ipynb#Annotating-Functions-with-Pre--and-Postconditions), [Mining Function Specifications (Exercise 3: Verbose Invariant Checkers)](DynamicInvariants.ipynb#Exercise-3:-Verbose-Invariant-Checkers)\n* `write()` &mdash; [Asserting Expectations (Excursion: A C Memory Model Simulator)](Assertions.ipynb#Excursion:-A-C-Memory-Model-Simulator), [Asserting Expectations (Excursion: Managed Memory)](Assertions.ipynb#Excursion:-Managed-Memory)\n* `write_source()` &mdash; [Isolating Failure-Inducing Changes (Initialize Git)](ChangeDebugger.ipynb#Initialize-Git)\n", "_____no_output_____" ], [ "### X\n\n* `X()` &mdash; [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers)\n", "_____no_output_____" ], [ "### Y\n\n* `Y()` &mdash; [Statistical Debugging (Training Classifiers)](StatisticalDebugger.ipynb#Training-Classifiers)\n", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
c5214cdee552ff6eaf46494eacee4e2ef0398b13
54,581
ipynb
Jupyter Notebook
notebooks/nlp_intent/nlp_intent.ipynb
dexX7/codecentric.AI-bootcamp
b6edbf6b450718bb001aad941266daf3c17ed84e
[ "MIT" ]
22
2018-11-30T14:41:25.000Z
2022-03-09T13:02:43.000Z
notebooks/nlp_intent/nlp_intent.ipynb
dexX7/codecentric.AI-bootcamp
b6edbf6b450718bb001aad941266daf3c17ed84e
[ "MIT" ]
6
2020-03-24T16:59:31.000Z
2022-03-11T23:45:30.000Z
notebooks/nlp_intent/nlp_intent.ipynb
dexX7/codecentric.AI-bootcamp
b6edbf6b450718bb001aad941266daf3c17ed84e
[ "MIT" ]
10
2018-11-30T13:21:01.000Z
2022-03-09T13:02:44.000Z
32.979456
647
0.59779
[ [ [ "# NLP Intent Recognition", "_____no_output_____" ], [ "Hallo und herzlich willkommen zum codecentric.AI bootcamp!\n\nHeute wollen wir uns mit einem fortgeschrittenen Thema aus dem Bereich _natural language processing_, kurz _NLP_, genannt, beschäftigen:\n\n> Wie bringt man Sprachassistenten, Chatbots und ähnlichen Systemen bei, die Absicht eines Nutzers aus seinen Äußerungen zu erkennen?\n\nDieses Problem wird im Englischen allgemein als _intent recognition_ bezeichnet und gehört zu dem ambitionierten Gebiet des _natural language understanding_, kurz _NLU_ genannt. Einen Einstieg in dieses Thema bietet das folgende [Youtube-Video](https://www.youtube.com/watch?v=H_3R8inCOvM):", "_____no_output_____" ] ], [ [ "# lade Video\nfrom IPython.display import IFrame\nIFrame('https://www.youtube.com/embed/H_3R8inCOvM', width=850, height=650)", "_____no_output_____" ] ], [ [ "Zusammen werden wir in diesem Tutorial mit Hilfe der NLU-Bibliothek [Rasa-NLU](https://rasa.com/docs/nlu/) einem WetterBot beibringen, einfache Fragemuster zum Wetter zu verstehen und zu beantworten. Zum Beispiel wird er auf die Fragen\n\n> `\"Wie warm war es 1989?\"`\n\nmit\n\n<img src=\"img/answer-1.svg\" width=\"85%\" align=\"middle\">\n\nund auf\n\n> `\"Welche Temperatur hatten wir in Schleswig-Holstein und in Baden-Württemberg?\"`\n\nmit\n\n<img src=\"img/answer-2.svg\" width=\"85%\" align=\"middle\">\n\nantworten. Der folgende Screencast gibt einen Überblick über das Notebook:", "_____no_output_____" ] ], [ [ "# lade Video\nfrom IPython.display import IFrame\nIFrame('https://www.youtube.com/embed/pVwO4Brs4kY', width=850, height=650)", "_____no_output_____" ] ], [ [ "Damit es gleich richtig losgehen kann, importieren wir noch zwei Standardbibliotheken und vereinbaren das Datenverzeichnis:", "_____no_output_____" ] ], [ [ "import os\nimport numpy as np\n\n\nDATA_DIR = 'data'", "_____no_output_____" ] ], [ [ "## Unser Ausgangspunkt", "_____no_output_____" ], [ "Allgemein ist die Aufgabe, aus einer Sprachäußerung die zugrunde liegende Absicht zu erkennen, selbst für Menschen manchmal nicht einfach. Soll ein Computer diese schwierige Aufgabe lösen, so muss man sich überlegen, was man zu einem gegebenen Input &mdash; also einer (unstrukturierten) Sprachäußerung &mdash; für einen Output erwarten, wie man also Absichten modelliert und strukturiert.\n\nWeit verbreitet ist folgender Ansatz für Intent Recognition:\n\n- jede Äußerung wird einer _Domain_, also einem Gebiet, zugeordnet,\n- für jede _Domain_ gibt es einen festen Satz von _Intents_, also eine Reihe von Absichten,\n- jede Absicht kann durch _Parameter_ konkretisiert werden und hat dafür eine Reihe von _Slots_, die wie Parameter einer Funktion oder Felder eines Formulares mit gewissen Werten gefüllt werden können.\n\nFür die Äußerungen\n\n> - `\"Wie warm war es 1990 in Berlin?\"`\n> - `\"Welche Temperatur hatten wir in Hessen im Jahr 2018?\"`\n> - `\"Wie komme ich zum Hauptbahnhof?\"`\n\nkönnte _Intent Recognition_ also zum Beispiel jeweils folgende Ergebnisse liefern:\n\n> - `{'intent': 'Frag_Temperatur', 'slots': {'Ort': 'Berlin', 'Jahr': '1990'}}`\n> - `{'intent': 'Frag_Temperatur', 'slots': {'Ort': 'Hessen', 'Jahr': '2018'}}`\n> - `{'intent': 'Frag_Weg', 'slots': {'Start': None, 'Ziel': 'Hauptbahnhof'}}`\n\nFür Python steht eine ganze von NLP-Bibliotheken zur Verfügung, die Intent Recognition in der einen oder anderen Form ermöglichen, zum Beispiel\n\n- [Rasa NLU](https://rasa.com/docs/nlu/) (&bdquo;Language Understanding for chatbots and AI assistants&ldquo;),\n- [snips](https://snips-nlu.readthedocs.io/en/latest/) (&bdquo;Using Voice to Make Technology Disappear&ldquo;),\n- [DeepPavlov](http://deeppavlov.ai) (&bdquo;an open-source conversational AI library&ldquo;),\n- [NLP Architect](http://nlp_architect.nervanasys.com/index.html) von Intel (&bdquo;for exploring state-of-the-art deep learning topologies and techniques for natural language processing and natural language unterstanding&ldquo;),\n- [pytext](https://pytext-pytext.readthedocs-hosted.com/en/latest/index.html) von Facebook (&bdquo;a deep-learning based NLP modeling framework built on PyTorch&ldquo;).\n\nWir entscheiden uns im Folgenden für die Bibliothek Rasa NLU, weil wir dafür bequem mit einem Open-Source-Tool (chatette) umfangreiche Trainingsdaten generieren können. Rasa NLU wiederum benutzt die NLP-Bibliothek [spaCy](https://spacy.io), die Machine-Learning-Bibliothek [scikit-learn](https://scikit-learn.org/stable/) und die Deep-Learning-Bibliothek [TensorFlow](https://www.tensorflow.org/).\n", "_____no_output_____" ], [ "## Intent Recognition von Anfang bis Ende mit Rasa NLU", "_____no_output_____" ], [ "Schauen wir uns an, wie man eine Sprach-Engine für Intent Recognition trainieren kann! Dafür beschränken wir uns zunächst auf wenige Intents und Trainingsdaten und gehen die benötigten Schritte von Anfang bis Ende durch.", "_____no_output_____" ], [ "### Schritt 1: Intents durch Trainingsdaten beschreiben", "_____no_output_____" ], [ "Als Erstes müssen wir die Intents mit Hilfe von Trainingsdaten beschreiben. _Rasa NLU_ erwartet beides zusammen in einer Datei im menschenfreundlichen [Markdown-Format](http://markdown.de/) oder im computerfreundlichen [JSON-Format](https://de.wikipedia.org/wiki/JavaScript_Object_Notation). Ein Beispiel für solche Trainingsdaten im Markdown-Format ist der folgende Python-String, den wir in die Datei `intents.md` speichern: ", "_____no_output_____" ] ], [ [ "TRAIN_INTENTS = \"\"\"\n## intent: Frag_Temperatur\n- Wie [warm](Eigenschaft) war es [1900](Zeit) in [Brandenburg](Ort)\n- Wie [kalt](Eigenschaft) war es in [Hessen](Ort) [1900](Zeit)\n- Was war die Temperatur [1977](Zeit) in [Sachsen](Ort)\n\n## intent: Frag_Ort\n- Wo war es [1998](Zeit) am [kältesten](Superlativ:kalt)\n- Finde das [kältesten](Superlativ:kalt) Bundesland im Jahr [2004](Zeit)\n- Wo war es [2010](Zeit) [kälter](Komparativ:kalt) als [1994](Zeit) in [Rheinland-Pfalz](Ort)\n\n## intent: Frag_Zeit\n- Wann war es in [Bayern](Ort) am [kühlsten](Superlativ:kalt)\n- Finde das [kälteste](Superlativ:kalt) Jahr im [Saarland](Ort)\n- Wann war es in [Schleswig-Holstein](Ort) [wärmer](Komparativ:warm) als in [Baden-Württemberg](Ort)\n\n## intent: Ende\n- Ende\n- Auf Wiedersehen\n- Tschuess\n\"\"\"\n\n\nINTENTS_PATH = os.path.join(DATA_DIR, 'intents.md')\n\n\ndef write_file(filename, text):\n with open(filename, 'w', encoding='utf-8') as file:\n file.write(text)\n\nwrite_file(INTENTS_PATH, TRAIN_INTENTS)", "_____no_output_____" ] ], [ [ "Hier wird jeder Intent erst in der Form\n\n> `## intent: NAME`\n\ndeklariert, wobei `NAME` durch die Bezeichnung des Intents zu ersetzen ist. Anschließend wird der Intent durch eine Liste von\nBeispiel-Äußerungen beschrieben. Die Parameter beziehungsweise Slots werden in den Beispieläußerungen in der Form\n\n> `[WERT](SLOT)`\n\nmarkiert, wobei `SLOT` die Bezeichnung des Slots und `Wert` der entsprechende Teil der Äußerung ist.\n", "_____no_output_____" ], [ "### Schritt 2: Sprach-Engine konfigurieren...", "_____no_output_____" ], [ "Die Sprach-Engine von _Rasa NLU_ ist als Pipeline gestaltet und [sehr flexibel konfigurierbar](https://rasa.com/docs/nlu/components/#section-pipeline). Zwei [Beispiel-Konfigurationen](https://rasa.com/docs/nlu/choosing_pipeline/) sind in Rasa bereits enthalten:\n\n- `spacy_sklearn` verwendet vortrainierte Wortvektoren, eine [scikit-learn-Implementierung](https://scikit-learn.org/stable/modules/svm.html) einer linearen [Support-vector Machine]( https://en.wikipedia.org/wiki/Support-vector_machine) für die Klassifikation und wird für kleine Trainingsmengen (<1000) empfohlen. Da diese Pipeline vortrainierte Wortvektoren und spaCy benötigt, kann sie nur für [die meisten westeuropäische Sprachen](https://rasa.com/docs/nlu/languages/#section-languages) verwendet werden. Allerdings sind die Version 0.20.1 von scikit-learn und 0.13.8 von Rasa-NLU nicht kompatibel\n\n- `tensorflow_embedding` trainiert für die Klassifikation Einbettungen von Äußerungen und von Intents in denselben Vektorraum und wird für größere Trainingsmengen (>1000) empfohlen. Die zu Grunde liegende Idee stammt aus dem Artikel [StarSpace: Embed All The Things!](https://arxiv.org/abs/1709.03856). Sie ist sehr vielseitig anwendbar und beispielsweise auch für [Question Answering](https://en.wikipedia.org/wiki/Question_answering) geeignet. Diese Pipeline benötigt kein Vorwissen über die verwendete Sprache, ist also universell einsetzbar, und kann auch auf das Erkennen mehrerer Intents in einer Äußerung trainiert werden.\n\nZum Füllen der Slots verwenden beide Pipelines eine [Python-Implementierung](http://www.chokkan.org/software/crfsuite/) von [Conditional Random Fields](https://en.wikipedia.org/wiki/Conditional_random_field).\n\nDie Konfiguration der Pipeline wird durch eine YAML-Datei beschrieben. Der folgende Python-String entspricht der Konfiguration `tensorflow_embedding`:", "_____no_output_____" ] ], [ [ "CONFIG_TF = \"\"\"\npipeline:\n- name: \"tokenizer_whitespace\"\n- name: \"ner_crf\"\n- name: \"ner_synonyms\"\n- name: \"intent_featurizer_count_vectors\"\n- name: \"intent_classifier_tensorflow_embedding\"\n\"\"\"", "_____no_output_____" ] ], [ [ "### Schritt 3: ...trainieren...", "_____no_output_____" ], [ "Sind die Trainingsdaten und die Konfiguration der Pipeline beisammen, so kann die Sprach-Engine trainiert werden. In der Regel erfolgt dies bei Rasa mit Hilfe eines Kommandozeilen-Interface oder direkt [in Python](https://rasa.com/docs/nlu/python/). Die folgende Funktion `train` erwartet die Konfiguration als Python-String und den Namen der Datei mit den Trainingsdaten und gibt die trainierte Sprach-Engine als Instanz einer `Interpreter`-Klasse zurück:", "_____no_output_____" ] ], [ [ "import rasa_nlu.training_data\nimport rasa_nlu.config\nfrom rasa_nlu.model import Trainer, Interpreter\n\nMODEL_DIR = 'models'\n\ndef train(config=CONFIG_TF, intents_path=INTENTS_PATH):\n config_path = os.path.join(DATA_DIR, 'rasa_config.yml')\n write_file(config_path, config)\n trainer = Trainer(rasa_nlu.config.load(config_path))\n trainer.train(rasa_nlu.training_data.load_data(intents_path))\n return Interpreter.load(trainer.persist(MODEL_DIR))\n\ninterpreter = train()", "_____no_output_____" ] ], [ [ "### Schritt 4: ...und testen!", "_____no_output_____" ], [ "Wir testen nun, ob die Sprach-Engine `interpreter` folgende Test-Äußerungen richtig versteht:", "_____no_output_____" ] ], [ [ "TEST_UTTERANCES = [\n 'Was war die durchschnittliche Temperatur 2004 in Mecklenburg-Vorpommern',\n 'Nenn mir das wärmste Bundesland 2018',\n 'In welchem Jahr war es in Nordrhein-Westfalen heißer als 1990',\n 'Wo war es 2000 am kältesten',\n 'Bis bald',\n]", "_____no_output_____" ] ], [ [ "Die Methode `parse` von `interpreter` erwartet eine Äußerung als Python-String, wendet Intent Recognition an und liefert eine sehr detaillierte Rückgabe:", "_____no_output_____" ] ], [ [ "interpreter.parse(TEST_UTTERANCES[0])", "_____no_output_____" ] ], [ [ "Die Rückgabe umfasst im Wesentlichen\n\n- den Namen des ermittelten Intent sowie eine Sicherheit beziehungsweise Konfidenz zwischen 0 und 1,\n- für jeden ermittelten Parameter die Start- und Endposition in der Äußerung, den Wert und wieder eine Konfidenz,\n- ein Ranking der möglichen Intents nach der Sicherheit/Konfidenz, mit der sie in dieser Äußerung vermutet wurden.\n\nFür eine übersichtlichere Darstellung und leichte Weiterverarbeitung bereiten wir die Rückgabe mit Hilfe der Funktionen `extract_intent` und `extract_confidences` ein wenig auf. Anschließend gehen wir unsere Test-Äußerungen durch:", "_____no_output_____" ] ], [ [ "def extract_intent(intent):\n return (intent['intent']['name'] if intent['intent'] else None,\n [(ent['entity'], ent['value']) for ent in intent['entities']])\n\n\ndef extract_confidences(intent):\n return (intent['intent']['confidence'] if intent['intent'] else None,\n [ent['confidence'] for ent in intent['entities']])\n\n\ndef test(interpreter, utterances=TEST_UTTERANCES):\n for utterance in utterances:\n intent = interpreter.parse(utterance)\n print('<', utterance)\n print('>', extract_intent(intent))\n print(' ', extract_confidences(intent))\n print()\n\ntest(interpreter)", "_____no_output_____" ] ], [ [ "Das Ergebnis ist noch nicht ganz überzeugend &mdash; wir haben aber auch nur ganz wenig Trainingsdaten vorgegeben!", "_____no_output_____" ], [ "## Trainingsdaten generieren mit Chatette", "_____no_output_____" ], [ "Für ein erfolgreiches Training brauchen wir also viel mehr Trainingsdaten. Doch fängt man an, weitere Beispiele aufzuschreiben, so fallen einem schnell viele kleine Variationsmöglichkeiten ein, die sich recht frei kombinieren lassen. Zum Beispiel können wir für eine Frage nach der Temperatur in Berlin im Jahr 1990 mit jeder der Phrasen\n> - \"Wie warm war es...\"\n> - \"Wie kalt war es...\"\n> - \"Welche Temperatur hatten wir...\"\n\nbeginnen und dann mit\n\n> - \"...in Berlin 1990\"\n> - \"...1990 in Berlin\"\n\nabschließen, vor \"1990\" noch \"im Jahr\" einfügen und so weiter. Statt alle denkbaren Kombinationen aufzuschreiben, ist es sinnvoller, die Möglichkeiten mit Hilfe von Regeln zu beschreiben und daraus Trainingsdaten generieren zu lassen. Genau das ermöglicht das Python-Tool [chatette](https://github.com/SimGus/Chatette), das wir im Folgenden verwenden. Dieses Tool liest Regeln, die einer speziellen Syntax folgen müssen, aus einer Datei aus und erzeugt dann daraus Trainingsdaten für Rasa NLU im JSON-Format.\n", "_____no_output_____" ], [ "### Regeln zur Erzeugung von Trainingsdaten", "_____no_output_____" ], [ "Wir legen im Folgenden erst einen Grundvorrat an Regeln für die Intents `Frag_Temperatur`, `Frag_Ort`, `Frag_Zeit` und `Ende` in einem Python-Dictionary an und erläutern danach genauer, wie die Regeln aufgebaut sind:", "_____no_output_____" ] ], [ [ "RULES = {\n '@[Ort]': (\n 'Brandenburg', 'Baden-Wuerttemberg', 'Bayern', 'Hessen',\n 'Rheinland-Pfalz', 'Schleswig-Holstein', 'Saarland', 'Sachsen',\n ),\n '@[Zeit]': set(map(str, np.random.randint(1891, 2018, size=5))),\n '@[Komparativ]': ('wärmer', 'kälter',),\n '@[Superlativ]': ('wärmsten', 'kältesten',),\n '%[Frag_Temperatur]': ('Wie {warm/kalt} war es ~[zeit_ort]',\n 'Welche Temperatur hatten wir ~[zeit_ort]',\n 'Wie war die Temperatur ~[zeit_ort]',\n ),\n '%[Frag_Ort]': (\n '~[wo_war] es @[Zeit] @[Komparativ] als {@[Zeit]/in @[Ort]}',\n '~[wo_war] es @[Zeit] am @[Superlativ]',\n ),\n '%[Frag_Jahr]': (\n '~[wann_war] es in @[Ort] @[Komparativ] als {@[Zeit]/in @[Ort]}',\n '~[wann_war] es in @[Ort] am @[Superlativ]',\n ),\n '%[Ende]': ('Ende', 'Auf Wiedersehen', 'Tschuess',),\n '~[finde]': ('Sag mir', 'Finde'),\n '~[wie_war]': ('Wie war', '~[finde]',),\n '~[was_war]': ('Was war', '~[finde]',),\n '~[wo_war]': ('Wo war', 'In welchem {Bundesland|Land} war',),\n '~[wann_war]': ('Wann war', 'In welchem Jahr war',),\n '~[zeit_ort]': ('@[Zeit] in @[Ort]', '@[Ort] in @[Zeit]',),\n '~[Bundesland]': ('Land', 'Bundesland',),\n}", "_____no_output_____" ] ], [ [ "Jede Regel besteht aus einem Namen beziehungsweise Platzhalter und einer Menge von Phrasen. Je nachdem, ob der Name die Form\n> `%[NAME]`, `@[NAME]` oder `~[NAME]`\n\nhat, beschreibt die Regel einen\n\n> _Intent_, _Slot_ oder eine _Alternative_\n\nmit der Bezeichnung `NAME`. Jede Phrase kann ihrerseits Platzhalter für Slots und Alternativen erhalten. Diese Platzhalter werden bei der Erzeugung von Trainingsdaten von chatette jeweils durch eine der Phrasen ersetzt, die in der Regel für den jeweiligen Slot beziehungsweise die Alternativen aufgelistet sind. Außerdem können Phrasen\n\n- Alternativen der Form `{_|_|_}`,\n- optionale Teile in der Form `[_?]`\n\nund einige weitere spezielle Konstrukte enthalten. Mehr Details finden sich in der [Syntax-Beschreibung](https://github.com/SimGus/Chatette/wiki/Syntax-specifications) von chatette.\n\n", "_____no_output_____" ], [ "### Erzeugung der Trainingsdaten", "_____no_output_____" ], [ "Die in dem Python-Dictionary kompakt abgelegten Regeln müssen nun für chatette so formatiert werden, dass bei jeder Regel der Name einen neuen Absatz einleitet und anschließend die möglichen Phrasen schön eingerückt Zeile für Zeile aufgelistet werden. Dies leistet die folgende Funktion `format_rules`. Zusätzlich fügt sie eine Vorgabe ein, wieviel Trainingsbeispiele pro Intent erzeugt werden sollen:", "_____no_output_____" ] ], [ [ "def format_rules(rules, train_samples):\n train_str = \"('training':'{}')\".format(train_samples)\n llines = [[name if (name[0] != '%') else name + train_str]\n + [' ' + val for val in rules[name]] + [''] for name in rules]\n return '\\n'.join((l for lines in llines for l in lines))\n", "_____no_output_____" ] ], [ [ "Nun wenden wir chatette an, um die Trainingsdaten zu generieren. Dafür bietet chatette ein bequemes [Kommandozeilen-Interface](https://github.com/SimGus/Chatette/wiki/Command-line-interface), aber wir verwenden direkt die zu Grunde liegenden Python-Module.\n\nDie folgende Funktion `chatette` erwartet wie `format_rules` ein Python-Dictionary mit Regeln, schreibt diese passend formatiert in eine Datei, löscht etwaige zuvor generierte Trainingsdateien und erzeugt dann den Regeln entsprechend neue Trainingsdaten.", "_____no_output_____" ] ], [ [ "from chatette.adapters import RasaAdapter\nfrom chatette.parsing import Parser\nfrom chatette.generator import Generator\nimport glob\n\nTRAIN_SAMPLES = 400\nCHATETTE_DIR = os.path.join(DATA_DIR, 'chatette')\n\n\ndef chatette(rules=RULES, train_samples=TRAIN_SAMPLES):\n rules_path = os.path.join(DATA_DIR, 'intents.chatette')\n write_file(rules_path, format_rules(rules, train_samples))\n with open(rules_path, 'r') as rule_file:\n parser = Parser(rule_file)\n parser.parse()\n generator = Generator(parser)\n for f in glob.glob(os.path.join(CHATETTE_DIR, '*')):\n os.remove(f)\n RasaAdapter().write(CHATETTE_DIR, list(generator.generate_train()),\n generator.get_entities_synonyms())\n \nchatette(train_samples=400)", "_____no_output_____" ] ], [ [ "### Und nun: neuer Test!", "_____no_output_____" ], [ "Bringen die umfangreicheren Trainingsdaten wirklich eine Verbesserung? Schauen wir's uns an! Um verschiedene Sprach-Engines zu vergleichen, nutzen wir die folgende Funktion:", "_____no_output_____" ] ], [ [ "def train_and_test(config=CONFIG_TF, utterances=TEST_UTTERANCES):\n interpreter = train(config, CHATETTE_DIR)\n test(interpreter, utterances)\n return interpreter\n\ninterpreter = train_and_test()", "_____no_output_____" ] ], [ [ "Hier wurde nur die letzte Äußerung nicht verstanden, aber das ist auch nicht weiter verwunderlich.", "_____no_output_____" ], [ "## Unser kleiner WetterBot", "_____no_output_____" ], [ "Experimentieren macht mehr Spaß, wenn es auch mal zischt und knallt. Oder zumindest irgendeine andere Reaktion erfolgt. Und deswegen bauen wir uns einen kleinen WetterBot, der auf die erkannten Intents reagieren kann. Zuerst schreiben wir dafür eine Eingabe-Verarbeitungs-Ausgabe-Schleife. Diese erwartet als Parameter erstens die Sprach-Engine `interpreter` und zweitens ein Python-Dictionary `handlers`, welches jeder Intent-Bezeichnung einen Handler zuordnet. Der Handler wird dann mit dem erkannten Intent aufgerufen und sollte zurückgeben, ob die Schleife fortgeführt werden soll oder nicht:", "_____no_output_____" ] ], [ [ "def dialog(interpreter, handlers):\n quit = False\n while not quit:\n intent = extract_intent(interpreter.parse(input('>')))\n print('<', intent)\n intent_name = intent[0]\n if intent_name in handlers:\n quit = handlers[intent_name](intent)\n", "_____no_output_____" ] ], [ [ "Wir implementieren gleich beispielhaft einen Handler für den Intent `Frag_Temperatur`und reagieren auf alle anderen Intents mit einer Standard-Antwort:", "_____no_output_____" ] ], [ [ "def message(msg, quit=False):\n print(msg)\n return quit\n\nHANDLERS = { \n 'Ende': lambda intent: message('=> Oh, wie schade. Bis bald!', True),\n 'Frag_Zeit': lambda intent: message('=> Das ist eine gute Frage.'),\n 'Frag_Ort': lambda intent: message('=> Dafür wurde ich nicht programmiert.'),\n 'Frag_Temperatur': lambda intent: message('=> Das weiss ich nicht.')\n}\n", "_____no_output_____" ] ], [ [ "Um die Fragen nach den Temperaturen zu beantworten, nutzen wir [Archiv-Daten](ftp://ftp-cdc.dwd.de/pub/CDC/regional_averages_DE/annual/air_temperature_mean/regional_averages_tm_year.txt) des [Deutschen Wetterdienstes](https://www.dwd.de), die wir schon etwas aufbereitet haben. Die Routine `show` gibt die nachgefragten Temperaturdaten je nach Anzahl der angegebenen Jahre und Bundesländer als Liniendiagramm, Balkendiagramm oder in Textform an. Der eigentliche Hander `frag_wert` prüft, ob die angegebenen Jahre und Orte auch zulässig sind und setzt, falls eine der beiden Angaben fehlt, einfach alle Jahre beziehungsweise Bundesländer ein:", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom IPython.display import set_matplotlib_formats\n%matplotlib inline\nset_matplotlib_formats('svg')\n\nsns.set()\n\nDATA_PATH = os.path.join(DATA_DIR, 'temperaturen.txt')\ntemperature = pd.read_csv(DATA_PATH, index_col=0, sep=';')\n\ndef show(times, places):\n if (len(places) == 0) and (len(times) == 0):\n print('Keine zulässigen Orte oder Zeiten')\n elif (len(places) == 1) and (len(times) == 1):\n print(temperature.loc[times, places])\n else:\n if (len(places) > 1) and (len(times) == 1):\n temperature.loc[times[0], places].plot.barh()\n if (len(places) == 1) and (len(times) > 1):\n temperature.loc[times, places[0]].plot.line()\n if (len(places) > 1) and (len(times) > 1):\n temperature.loc[times, places].plot.line()\n plt.legend(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.)\n plt.show()\n\ndef frag_temperatur(intent):\n def validate(options, ent_name, fn):\n chosen = [fn(value) for (name, value) in intent[1] if name == ent_name]\n return list(set(options) & set(chosen)) if chosen else options\n places = validate(list(temperature.columns), 'Ort', lambda x:x)\n times = validate(list(temperature.index), 'Zeit', int)\n show(times, places)\n return False\n\nHANDLERS['Frag_Temperatur'] = frag_temperatur", "_____no_output_____" ] ], [ [ "Nun kann der WetterBot getestet werden! Zum Beispiel mit\n\n> \"Wie warm war es in Baden-Württemberg und Sachsen?\"", "_____no_output_____" ] ], [ [ "dialog(interpreter, HANDLERS)", "_____no_output_____" ] ], [ [ "## Intent Recognition selbst gemacht &mdash; ein Bi-LSTM-Netzwerk mit Keras", "_____no_output_____" ], [ "Im Prinzip haben wir nun gesehen, wie sich Intent Recognition mit Hilfe von Rasa NLU recht einfach anwenden lässt. Aber wie funktioniert das ganz genau? In diesem zweiten Teil des Notebooks werden wir\n\n - ein bidirektionales rekurrentes Netz, wie es im Video vorgestellt wurde, implementieren,\n - die mit chatette erstellten Trainingsdaten so aufbereiten, dass wir damit das Netz trainieren können,\n\nund sehen, dass das ganz gut klappt und gar nicht so schwer ist!", "_____no_output_____" ], [ "### Intents einlesen und aufbereiten", "_____no_output_____" ], [ "Zuerst lesen wir die Trainings-Daten, die von chatette im JSON-Format ausgegeben in die Date `RASA_INTENTS` geschrieben wurden, aus, und schauen uns das Format der Einträge an:\n", "_____no_output_____" ] ], [ [ "import json\n\nCHATETTE_DIR = os.path.join(DATA_DIR, 'chatette')\nRASA_INTENTS = os.path.join(CHATETTE_DIR, 'output.json')\n\n\ndef load_intents():\n with open(RASA_INTENTS) as intents_file:\n intents = json.load(intents_file)\n return intents['rasa_nlu_data']['common_examples']\n\n\nsample_intent = load_intents()[0]", "_____no_output_____" ] ], [ [ "Wie bereits im [Video](https://www.youtube.com/watch?v=H_3R8inCOvM) erklärt, sind für Intent Recognition zwei Aufgaben zu lösen:\n\n - die _Klassifikation_ des Intent anhand der gegebenen Äußerung und\n - das Füllen der Slots.\n\nDie zweite Aufgabe kann man als _Sequence Tagging_ auffassen &mdash; für jedes Token der Äußerung ist zu bestimmen, ob es den Parameter für einen Slot darstellt oder nicht. Für den Beispiel-Intent\n\n> `{'entities': [{'end': 20, 'entity': 'Zeit', 'start': 16, 'value': '1993'},\n> {'end': 35, 'entity': 'Ort', 'start': 24, 'value': 'Brandenburg'}],\n> 'intent': 'Frag_Temperatur',\n> 'text': 'Wie warm war es 1993 in Brandenburg'}`\n\nwäre die Eingabe für diese beiden Aufgaben also die Token-Folge\n\n> `['Wie', 'warm', 'war', 'es', '1993', 'in', 'Brandenburg']`\n\nund die gewünschte Ausgabe jeweils\n\n> `'Frag_Temperatur'`\n\nbeziehungsweise die Tag-Folge\n\n> `['-', '-', '-', '-', 'Zeit', '-', 'Ort']`\n\nDie folgende Funktion extrahiert aus den geladenen Beispiel-Intents die gewünschte Eingabe und die Ausgaben für diese beiden Aufgaben:", "_____no_output_____" ] ], [ [ "import spacy\nfrom itertools import accumulate\n\nnlp = spacy.load('de_core_news_sm')\n\ndef tokenize(text):\n return [word for word in nlp(text)]\n\nNO_ENTITY = '-'\n\ndef intent_and_sequences(intent):\n def get_tag(offset):\n \"\"\"Returns the tag (+slot name) for token starting at `offset`\"\"\"\n ents = [ent['entity'] for ent in intent['entities'] if ent['start'] == offset]\n return ents[0] if ents else NO_ENTITY\n token = tokenize(intent['text'])\n # `offsets` is the list of starting positions of the token\n offsets = list(accumulate([0,] + [len(t.text_with_ws) for t in token]))\n return (intent['intent'], token, list(map(get_tag, offsets[:-1])))\n\nintent_and_sequences(sample_intent)\n", "_____no_output_____" ] ], [ [ "### Symbolische Daten in numerische Daten umwandeln", "_____no_output_____" ], [ "Die aufbereiteten Intents enthalten nun jeweils\n\n 1. die Folge der Token als \"Eingabe\"\n 2. den Namen des Intent als Ergebnis der Klassifikation und\n 3. die Folge der Slot-Namen als Ergebnis des Sequence Tagging.\n\nDiese kategoriellen Daten müssen wir für die Weiterverarbeitung in numerische Daten umwandeln. Dafür bieten sich\n\n - für 1. Wortvektoren und\n - für 2. und 3. die One-hot-Kodierung an.\n\nAußerdem müssen wir die Eingabe-Folge und Tag-Folge auf eine feste Länge bringen.\n\nBeginnen wir mit der One-hot-Kodierung. Die folgende Funktion erzeug zu einer gegebenen Menge von Objekten ein Paar von Python-Dictionaries, welche jedem Objekt einen One-hot-Code und umgekehrt jedem Index das entsprechende Objekt zuordnet.\n", "_____no_output_____" ] ], [ [ "def ohe(s):\n codes = np.eye(len(s))\n numerated = list(enumerate(s))\n return ({value: codes[idx] for (idx, value) in numerated},\n {idx: value for (idx, value) in numerated})\n", "_____no_output_____" ] ], [ [ "Die nächste Hilfsfunktion erwartet eine Liste von Elementen und schneidet diese auf eine vorgegebene Länge beziehungsweise füllt sie mit einem vorgegebenen Element auf diese Länge auf.", "_____no_output_____" ] ], [ [ "def fill(items, max_len, filler):\n if len(items) < max_len:\n return items + [filler] * (max_len - len(items))\n else:\n return items[0:max_len]\n", "_____no_output_____" ] ], [ [ "Die Umwandlung der aufbereiteten Intent-Tripel in numerische Daten verpacken wir in einen [scikit-learn-Transformer](https://scikit-learn.org/stable/data_transforms.html), weil während der Umwandlung die One-Hot-Kodierung der Intent-Namen und Slot-Namen gelernt und eventuell später für neue Testdaten wieder gebraucht wird.", "_____no_output_____" ] ], [ [ "from sklearn.base import BaseEstimator, TransformerMixin\n\nMAX_LEN = 20\nVEC_DIM = len(list(nlp(' '))[0].vector)\n\nclass IntentNumerizer(BaseEstimator, TransformerMixin):\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n intent_names = set((x[0] for x in X))\n self.intents_ohe, self.idx_intents = ohe(intent_names)\n self.nr_intents = len(intent_names)\n tag_lists = list(map(lambda x: set(x[2]), X)) + [[NO_ENTITY]]\n tag_names = frozenset().union(*tag_lists)\n # tag_names = set(())\n self.tags_ohe, self.idx_tags = ohe(tag_names)\n self.nr_tags = len(tag_names)\n return self\n\n def transform_utterance(self, token):\n return np.stack(fill([tok.vector for tok in token], MAX_LEN,\n np.zeros((VEC_DIM))))\n\n def transform_tags(self, tags):\n return np.stack([self.tags_ohe[t] for t in fill(tags, MAX_LEN, NO_ENTITY)]) \n\n def transform(self, X):\n return (np.stack([self.transform_utterance(x[1]) for x in X]),\n np.stack([self.intents_ohe[x[0]] for x in X]),\n np.stack([self.transform_tags(x[2]) for x in X]))\n\n def revert(self, intent_idx, tag_idxs):\n return (self.idx_intents[intent_idx],\n [self.idx_tags[t] for t in tag_idxs])\n", "_____no_output_____" ] ], [ [ "### Keras-Implementierung eines Bi-LSTM-Netzes für Intent Recognition", "_____no_output_____" ], [ "Wir implementieren nun mit Keras eine Netz-Architektur, die in [diesem Artikel]() vorgeschlagen wurde und schematisch in folgendem Diagramm dargestellt ist:", "_____no_output_____" ], [ "<img src=\"img/birnn.svg\" style=\"background:white\" width=\"80%\" align=\"middle\">", "_____no_output_____" ], [ "Hierbei wird\n\n1. die Eingabe, wie bereits erklärt, als Folge von Wortvektoren dargestellt,\n2. diese Eingabe erst durch eine rekurrente Schicht forwärts abgearbeitet,\n3. der Endzustand dieser Schicht als Initialisierung einer sich anschließenden rekurrenten Schicht verwendet, welche die Eingabefolge rückwärts abarbeitet,\n4. der Endzustand dieser Schicht an eine Schicht mit genau so vielen Neuronen, wie es Intent-Klassen gibt, zur Klassifikation des Intent weitergleitet,\n5. die Ausgabe der beiden rekurrenten Schichten für jeden Schritt zusammengefügt und\n6. die zusammengefügte Ausgabe jeweils an ein Bündel von so vielen Neuronen, wie es Slot-Arten gibt, zur Klassifikation des Tags des jeweiligen Wortes weitergeleitet.\n\nGenau diesen Aufbau bilden wir nun mit Keras ab, wobei wir die [funktionale API]() benutzen. Als Loss-Funktion verwenden wir jeweils [kategorielle Kreuzentropie](). Für die rekurrenten Schichten verwenden wir [LSTM-Zellen](), auf die wir gleich noch eingehen.", "_____no_output_____" ] ], [ [ "from keras.models import Model\nfrom keras.layers import Input, LSTM, Concatenate, TimeDistributed, Dense\n\nUNITS = 256\n\ndef build_bilstm(input_dim, nr_intents, nr_tags, units=UNITS):\n inputs = Input(shape=(MAX_LEN, input_dim))\n lstm_params = {'units': units, 'return_sequences': True, 'return_state': True}\n fwd = LSTM(**lstm_params)(inputs)\n bwd = LSTM(**lstm_params)(inputs, initial_state=fwd[1:])\n merged = Concatenate()([fwd[0], bwd[0]])\n tags = TimeDistributed(Dense(nr_tags, activation='softmax'))(merged)\n intent = Dense(nr_intents, activation='softmax')(bwd[2])\n model = Model(inputs=inputs, outputs=[intent, tags])\n model.compile(optimizer='Adam' ,loss='categorical_crossentropy')\n return model\n", "_____no_output_____" ] ], [ [ "Schauen wir uns einmal genauer an, wie so eine LSTM-Zelle aufgebaut ist:", "_____no_output_____" ], [ "<img src=\"img/lstm.svg\" style=\"background:white\" width=\"70%\" align=\"middle\">\n", "_____no_output_____" ], [ "Die Bezeichnung 'LSTM' steht für _long short-term memory_ und rührt daher, dass solch eine Zelle neben der Eingabe des aktuellen Schrittes nicht nur die Ausgabe des vorherigen Schrittes, sondern zusätzlich auch einen Speicherwert des vorherigen Schrittes erhält. Nacheinander wird in der LSTM-Zelle dann jeweils anhand der aktuellen Eingabe und der vorherigen Ausgabe\n\n1. in einem _forget gate_ entschieden, wieviel vom alten Speicherwert vergessen werden soll,\n2. in einem _input gate_ entschieden, wieviel von der neuen Eingabe in den neuen Speicherwert aufgenommen werden soll,\n3. in einem _output gate_ aus dem aktuellen Speicher die aktuelle Ausgabe gebildet.", "_____no_output_____" ], [ "### Training und Test des Bi-LSTM-Netzes", "_____no_output_____" ], [ "Schauen wir uns nun an, wie gut das funktioniert! Dazu müssen wir nun alles zusammenfügen und tun das in zwei Schritten.\n\nDie Funktion `train_test_data` erwartet als Eingabe Regeln, wie wir sie für chatette in einem Python-Dictionary gespeichert hatten, und liefert die entsprechend erzeugten Intents in numerisch aufbereiter Form, aufgeteilt in Trainings- und Validierungsdaten, einschließlich des angepassten `IntentNumerizer`zurück.", "_____no_output_____" ] ], [ [ "TRAIN_RATIO = 0.7\n\ndef train_test_data(rules=RULES, train_ratio=TRAIN_RATIO):\n structured_intents = list(map(intent_and_sequences, load_intents()))\n intent_numerizer = IntentNumerizer()\n X, y, Y = intent_numerizer.fit_transform(structured_intents)\n nr_samples = len(y)\n shuffled_indices = np.random.permutation(nr_samples)\n split = int(nr_samples * train_ratio)\n train_indices, test_indices = (shuffled_indices[0:split], shuffled_indices[split:])\n y_train, X_train, Y_train = y[train_indices], X[train_indices], Y[train_indices]\n y_test, X_test, Y_test = y[test_indices], X[test_indices], Y[test_indices]\n return intent_numerizer, X_train, y_train, Y_train, X_test, y_test, Y_test\n", "_____no_output_____" ] ], [ [ "Mit diesen Trainings- und Testdaten trainiert beziehungsweise validiert die folgende Funktion `build_interpreter` nun das von `build_lstm` gebaute neuronale Netz und liefert einen Interpreter-Funktion zurück. Diese erwartet als Eingabe eine Äußerung, transformiert diese anschließend mit dem angepassten `IntentNumerizer` und führt mit dem zuvor trainierten Netz die Intent Recognition durch.", "_____no_output_____" ] ], [ [ "BATCH_SIZE = 128\nEPOCHS = 10\n\ndef build_interpreter(rules=RULES, units=UNITS, batch_size=128, epochs=EPOCHS):\n def interpreter(utterance):\n x = intent_numerizer.transform_utterance(tokenize(utterance))\n y, Y = model.predict(np.stack([x]))\n tag_idxs = np.argmax(Y[0], axis=1)\n intent_idx = np.argmax(y[0])\n return intent_numerizer.revert(intent_idx, tag_idxs)\n\n intent_numerizer, X_train, y_train, Y_train, X_test, y_test, Y_test = train_test_data(rules)\n model = build_bilstm(X_train.shape[2], y_train.shape[1], Y_train.shape[2], units)\n model.fit(x=X_train, y=[y_train, Y_train],\n validation_data=(X_test,[y_test, Y_test]),\n batch_size=batch_size, epochs=epochs)\n return interpreter\n", "_____no_output_____" ] ], [ [ "Und nun sind wir bereit zum Testen!", "_____no_output_____" ] ], [ [ "interpreter = build_interpreter()", "_____no_output_____" ], [ "interpreter('Welche ungefähre Temperatur war 1992 und 2018 in Sachsen')", "_____no_output_____" ] ], [ [ "Und jetzt kannt Du loslegen &mdash; der WetterBot kann noch nicht viel, ist aber nun recht einfach zu trainieren! Und mit der selbstgebauten Intent Recognition wird er bestimmt noch besser! Ein paar Ideen dazu gibt Dir das Notebook mit Aufgaben zu Intent Recognition.\n\n_Viel Spaß und bis bald zu einer neuen Lektion vom codecentric.AI bootcamp!_", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
c52162af28be086c64b3b1ad10d5b2ef981e71b2
68,269
ipynb
Jupyter Notebook
_notebooks/2021-08-01-tutorial-rise-parte-1.ipynb
sebastiandres/blog
614472c7433b5a1ec4c82a8b76e946350a085a69
[ "Apache-2.0" ]
null
null
null
_notebooks/2021-08-01-tutorial-rise-parte-1.ipynb
sebastiandres/blog
614472c7433b5a1ec4c82a8b76e946350a085a69
[ "Apache-2.0" ]
7
2020-03-11T00:50:35.000Z
2022-02-27T11:50:13.000Z
_notebooks/2021-08-01-tutorial-rise-parte-1.ipynb
sebastiandres/blog
614472c7433b5a1ec4c82a8b76e946350a085a69
[ "Apache-2.0" ]
null
null
null
164.503614
55,940
0.900555
[ [ [ "# Tutorial de RISE - parte 1\n> Aspectos básicos para hacer presentaciones interactivas con jupyter notebooks\n\n- featured: false\n- hide: false\n- toc: true\n- badges: true\n- comments: true\n- categories: [jupyter, rise]\n- image: images/preview/rise.gif\n- permalink: /tutorial-rise-1/", "_____no_output_____" ], [ "Esta es la parte 1 de 3 del [tutorial de presentaciones interactivas en jupyter notebook](https://sebastiandres.github.io/blog/tutorial-rise/).", "_____no_output_____" ], [ "## ¿Qué es RISE?\n\nRISE es una extensión a jupyter notebook que, en lugar de desplegar las celdas en una larga página web, las despliega en una presentación usando la librería de javascript [reveal.js](https://revealjs.com/). \n\nSigue siendo una página web (al igual que el notebook), pero las celdas se agrupan en diapositivas.", "_____no_output_____" ], [ "## ¿Qué contenido puedo poner?\n\nPuedes mezclar contenido usando las celdas de markdown y código, según necesites: \n* **Markdown**: texto, latex, imágenes, tablas, etc.\n* **Código**: código, gráficos simples o interactivos, videos, sonido, iframes, javascript, entre otros.\n\nComo regla general, si se muestra correctamente en el notebook, se verá bien en la diapositiva. ¡Sé creativo!", "_____no_output_____" ], [ "## ¿Cómo instalar?\n\nLa instalación de la extensión RISE es extremadamente fácil.\nBasta con usar pip o conda:\n\n```\npip install rise\n```\n\no\n\n```\nconda install -c conda-forge rise\n```\n", "_____no_output_____" ], [ "Eso hará que se agregue el botón de iniciar presentación (destacado en rojo).\n\n![](2021-08-01-tutorial-rise-parte-1/install2.png)", "_____no_output_____" ], [ "\nTambién puedes agregar `rise` a tu archivo `requirements.txt` para que se instale automáticamente al generar un ambiente.\n\nEn caso de tener problemas, revisa los [detalles adicionales de instalación](https://rise.readthedocs.io/en/stable/installation.html).", "_____no_output_____" ], [ "## ¿Cómo configurar lo que está en cada diapositivas? \nPaciencia, se requiere todavía un paso adicional. \n\nEn el menú de jupyter notebook, es necesario seleccionar `View/Cell Toolbar/Slideshow` para que permita configurar el tipo de celda para diapositiva. \n\nEsto se requiere porque a cada celda del notebook se le agregará metadata para saber en que diapositiva debe ir (o si se debe saltar).\n\n![](2021-08-01-tutorial-rise-parte-1/install3.png)", "_____no_output_____" ], [ "Eso dejará todo configurado para poder seleccionar el tipo de celda respecto a la diapositiva.\n\n![](2021-08-01-tutorial-rise-parte-1/install4.png)", "_____no_output_____" ], [ "## ¿Cómo moverse por las slides?\n\nAl hacer click en el botón \"Iniciar presentación\", la presentación se iniciará en la celda que esté activa.\n\n* Se accede a la próxima diapositiva o fragmento con `Espacio` (o la flecha derecha).\n* Se retrocede a la diapositiva o fragmento anterior con `Shift Espacio` (o la flecha izquierda).\n* Se avanza a la proxima sub-diapositiva con `Page Up`.\n* Se retrocede la sub-diapositiva anterior con `Page Down`.\n\n\nUna diferencia de una presentación típica de PowerPoint es que existen 2 dimensiones: las diapositivas (slides) que avanzan de izquierda a derecha como es tradicional, pero también sub-diapositivas (subslides) que son slides opcionales y que avanzan de arriba a abajo.\n\n![](2021-08-01-tutorial-rise-parte-1/revealjs-vertical-slides.gif \"Gif oficial de reveal.js https://revealjs.com/vertical-slides/\")\n\n\nObservación: En general, es dificil recordar el orden de las slides y sub-slides. Yo personalmente nunca uso sub-slides por esta razón y prefiero solo tener orden \"horizontal\".", "_____no_output_____" ], [ "## ¿Cómo se configuran las diapositivas?\n\nExisten varios tipos de celda con distintas funcionalidades:\n* `-`: valor por defecto. La celda se muestra con la slide anterior.\n* `Slide`: inicia una nueva diapositiva (dirección horizontal).\n* `Sub-slide`: iniciar una nueva sub-diapositiva (dirección vertical).\n* `Fragment`: se concatena a la celda anterior, pero no se muestra inmediatamente. \n* `Skip`: no se muestra la celda en las diapositivas.\n* `Notes`: No se muestra en las diapositivas, sólo se muestra en las notas para el presentador.", "_____no_output_____" ], [ "## ¿Qué opciones hay?\n\nExisten múltiples funcionalides accesibles con el teclado durante la presentación, pero las principales a recordar son:\n* `?`: ver todos los shortcuts.\n* `,`: ocultar los botones.\n* `\\`: poner la pantalla en negro. Útil para discutir algo sin distracciones visuales.\n* ``:\n\nLas funcionalidades se controlan con los siguientes botones:\n\n![](2021-08-01-tutorial-rise-parte-1/botones.png)", "_____no_output_____" ], [ "## ¿Se puede editar durante la presentación?\n\n¡Sí! \n\nEs posible editar y ejecutar las celdas de markdown y de código durante la presentación. \n\nSe usa el mismo sistema de doble click para acceder a modo edición, y `Alt Enter` para ejecutarla.\n\n![](2021-08-01-tutorial-rise-parte-1/editar.gif)", "_____no_output_____" ], [ "## ¿Cómo controlo el tamaño?\n\nUn problema común es que al conectar el computador a otra pantalla o datashow, no se alcanza a ver en la diapositiva todo el código, texto o imagen. \n\nLo único que debes hacer es usar usar `Ctrl +` y `Ctrl -` para regular el tamaño (`Command +`y `Command -` en Mac), de la misma manera que regulas el tamaño de una página web.\n\n![](2021-08-01-tutorial-rise-parte-1/tamano.gif)", "_____no_output_____" ], [ "## ¿Dónde están las notas del presentador?\n\nPuedes abrir las notas del presentador presionando `t`.\n\nPara poder usar las notas del presentador necesitas tener al menos 2 pantallas: una pública para compartir y otra para mantener privada.\n\n![](2021-08-01-tutorial-rise-parte-1/notas_presentador.gif)", "_____no_output_____" ], [ "## Comparte tu estructura\n\nCuando se ejecuta código en una presentación, es una buena práctica mostrar la estructura de carpetas y los archivos con los que se va a trabajar. Eso ayudará a despejar dudas de cómo está funcionando el código.\nEsto se puede lograr fácilmente con comando mágicos (como `%ls`) o ejecutando código en bash (como `!ls`). \nLa diferencia entre ambos radica en lo siguiente:\n* Los comandos mágicos son específicos y definidos por cada kernel. Pueden existir comandos que estén en python pero no en R. \n* Los `!` permiten ejecutar instrucciones en el terminal y es más flexible. Uno de los más comunes es ofrecer instalar las librerías, como:\n```bash\npip install rise matplotlib\n```", "_____no_output_____" ], [ "## Gráficos\n\nPara mostrar gráficos resulta práctico que no se genere una ventana adicional, sino que se agreguen a la celda de resultados. Esta es una práctica común en jupyter notebook/lab, pero es más importante aún al pensar en las diapositivas.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nwith plt.xkcd():\n fig = plt.figure(figsize=(14,6))\n x = np.linspace(-5,5,num=100)\n y = np.abs(np.abs(np.sin(2*x)/x))\n plt.plot(x,y)", "_____no_output_____" ] ], [ [ "## Apoyos gráficos\n\nMe ha servido mucho insertar gifs en las presentaciones. Un buen gif animado es un buen compromiso entre una imagen y una película, y sirve para tener una animación que ilustre algun proceso. Existen muchos programas para grabar gifs.\nUna solución que me ha funcionado bien para animar gifs simples es crear un diagrama mediante una animación en PowerPoint, y después grabar un gif considerando apropiadamente los tiempos.", "_____no_output_____" ], [ "Eso concluye la primera parte 1 del [tutorial de presentaciones interactivas en jupyter notebook](https://sebastiandres.github.io/blog/tutorial-rise/).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
c521710aa62ee8d82d37bafdcc189ff410f51900
55,881
ipynb
Jupyter Notebook
Weather_Database.ipynb
ArnavAnjaria/World-Weather-Analysis
f9c6ce15c41b83c1c80730fe126e7e43d4ba32cf
[ "Apache-2.0" ]
null
null
null
Weather_Database.ipynb
ArnavAnjaria/World-Weather-Analysis
f9c6ce15c41b83c1c80730fe126e7e43d4ba32cf
[ "Apache-2.0" ]
null
null
null
Weather_Database.ipynb
ArnavAnjaria/World-Weather-Analysis
f9c6ce15c41b83c1c80730fe126e7e43d4ba32cf
[ "Apache-2.0" ]
null
null
null
42.952344
106
0.570892
[ [ [ "### Imports", "_____no_output_____" ] ], [ [ "# Import the dependencies.\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom citipy import citipy", "_____no_output_____" ] ], [ [ "### Get Random Coordinates", "_____no_output_____" ] ], [ [ "# Create a set of random latitude and longitude combinations.\nlats = np.random.uniform(low=-90.000, high=90.000, size=2000)\nlngs = np.random.uniform(low=-180.000, high=180.000, size=2000)\nlat_lngs = zip(lats, lngs)\nlat_lngs", "_____no_output_____" ], [ "# Add the latitudes and longitudes to a list.\ncoordinates = list(lat_lngs)", "_____no_output_____" ] ], [ [ "### Get the cities from the coordinates", "_____no_output_____" ] ], [ [ "# Create a list for holding the cities.\ncities = []\n# Identify the nearest city for each latitude and longitude combination.\nfor coordinate in coordinates:\n city = citipy.nearest_city(coordinate[0], coordinate[1]).city_name\n\n # If the city is unique, then we will add it to the cities list.\n if city not in cities:\n cities.append(city)\n# Print the city count to confirm sufficient count.\nlen(cities)", "_____no_output_____" ] ], [ [ "### Get the required weather data from OpenWatherMap using the API calls\n\nData Needed: \n- Latitude and longitude\n- Maximum temperature\n- Percent humidity\n- Percent cloudiness\n- Wind speed\n- Weather description (for example, clouds, fog, light rain, clear sky)", "_____no_output_____" ] ], [ [ "# Import the requests library.\nimport requests\n# Import the API key.\nfrom config import weather_api_key\n# Import the time library and the datetime module from the datetime library \nimport time\nfrom datetime import datetime", "_____no_output_____" ], [ "# Starting URL for Weather Map API Call.\nurl = \"http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=\" + weather_api_key", "_____no_output_____" ], [ "# Create an empty list to hold the weather data.\ncity_data = []\n# Print the beginning of the logging.\nprint(\"Beginning Data Retrieval \")\nprint(\"-----------------------------\")\n\n# Create counters.\nrecord_count = 1\nset_count = 1\n\n# Loop through all the cities in the list.\nfor i, city in enumerate(cities):\n\n # Group cities in sets of 50 for logging purposes.\n if (i % 50 == 0 and i >= 50):\n set_count += 1\n record_count = 1\n # time.sleep(60)\n\n # Create endpoint URL with each city.\n city_url = url + \"&q=\" + city.replace(\" \",\"+\")\n\n # Log the URL, record, and set numbers and the city.\n print(f\"Processing Record {record_count} of Set {set_count} | {city}\")\n # Add 1 to the record count.\n record_count += 1\n\n# Run an API request for each of the cities.\n try:\n # Parse the JSON and retrieve data.\n city_weather = requests.get(city_url).json()\n # Parse out the needed data.\n city_lat = city_weather[\"coord\"][\"lat\"]\n city_lng = city_weather[\"coord\"][\"lon\"]\n city_max_temp = city_weather[\"main\"][\"temp_max\"]\n city_humidity = city_weather[\"main\"][\"humidity\"]\n city_clouds = city_weather[\"clouds\"][\"all\"]\n city_wind = city_weather[\"wind\"][\"speed\"]\n city_country = city_weather[\"sys\"][\"country\"]\n # Convert the date to ISO standard.\n city_date = datetime.utcfromtimestamp(city_weather[\"dt\"]).strftime('%Y-%m-%d %H:%M:%S')\n # Append the city information into city_data list.\n city_data.append({\"City\": city.title(),\n \"Lat\": city_lat,\n \"Lng\": city_lng,\n \"Max Temp\": city_max_temp,\n \"Humidity\": city_humidity,\n \"Cloudiness\": city_clouds,\n \"Wind Speed\": city_wind,\n \"Country\": city_country,\n \"Date\": city_date})\n\n# If an error is experienced, skip the city.\n except:\n print(\"City not found. Skipping...\")\n pass\n\n# Indicate that Data Loading is complete.\nprint(\"-----------------------------\")\nprint(\"Data Retrieval Complete \")\nprint(\"-----------------------------\")", "Beginning Data Retrieval \n-----------------------------\nProcessing Record 1 of Set 1 | chernyshevskiy\nProcessing Record 2 of Set 1 | hobart\nProcessing Record 3 of Set 1 | beeville\nProcessing Record 4 of Set 1 | punta arenas\nProcessing Record 5 of Set 1 | atuona\nProcessing Record 6 of Set 1 | beloha\nProcessing Record 7 of Set 1 | nalut\nProcessing Record 8 of Set 1 | bethel\nProcessing Record 9 of Set 1 | ponta do sol\nProcessing Record 10 of Set 1 | casalmaggiore\nProcessing Record 11 of Set 1 | haines junction\nProcessing Record 12 of Set 1 | port hedland\nProcessing Record 13 of Set 1 | saint george\nProcessing Record 14 of Set 1 | tuktoyaktuk\nProcessing Record 15 of Set 1 | santander\nProcessing Record 16 of Set 1 | knokke-heist\nProcessing Record 17 of Set 1 | chaa-khol\nCity not found. Skipping...\nProcessing Record 18 of Set 1 | clyde river\nProcessing Record 19 of Set 1 | north bend\nProcessing Record 20 of Set 1 | cape town\nProcessing Record 21 of Set 1 | ushuaia\nProcessing Record 22 of Set 1 | rikitea\nProcessing Record 23 of Set 1 | upernavik\nProcessing Record 24 of Set 1 | bredasdorp\nProcessing Record 25 of Set 1 | namie\nProcessing Record 26 of Set 1 | macia\nProcessing Record 27 of Set 1 | puerto ayora\nProcessing Record 28 of Set 1 | hualmay\nProcessing Record 29 of Set 1 | chuy\nProcessing Record 30 of Set 1 | victoria\nProcessing Record 31 of Set 1 | touros\nProcessing Record 32 of Set 1 | costa rica\nProcessing Record 33 of Set 1 | vaini\nProcessing Record 34 of Set 1 | skagastrond\nCity not found. Skipping...\nProcessing Record 35 of Set 1 | voh\nProcessing Record 36 of Set 1 | kruisfontein\nProcessing Record 37 of Set 1 | rapid city\nProcessing Record 38 of Set 1 | faanui\nProcessing Record 39 of Set 1 | samusu\nCity not found. Skipping...\nProcessing Record 40 of Set 1 | illoqqortoormiut\nCity not found. Skipping...\nProcessing Record 41 of Set 1 | albany\nProcessing Record 42 of Set 1 | port elizabeth\nProcessing Record 43 of Set 1 | hermanus\nProcessing Record 44 of Set 1 | cherskiy\nProcessing Record 45 of Set 1 | hofn\nProcessing Record 46 of Set 1 | torbay\nProcessing Record 47 of Set 1 | bluff\nProcessing Record 48 of Set 1 | corrente\nProcessing Record 49 of Set 1 | natal\nProcessing Record 50 of Set 1 | saleaula\nCity not found. Skipping...\nProcessing Record 1 of Set 2 | hilo\nProcessing Record 2 of Set 2 | qaanaaq\nProcessing Record 3 of Set 2 | pilar\nProcessing Record 4 of Set 2 | la maddalena\nProcessing Record 5 of Set 2 | pacific grove\nProcessing Record 6 of Set 2 | barrow\nProcessing Record 7 of Set 2 | avarua\nProcessing Record 8 of Set 2 | kushima\nProcessing Record 9 of Set 2 | berlevag\nProcessing Record 10 of Set 2 | asau\nProcessing Record 11 of Set 2 | kodiak\nProcessing Record 12 of Set 2 | busselton\nProcessing Record 13 of Set 2 | butaritari\nProcessing Record 14 of Set 2 | dukat\nProcessing Record 15 of Set 2 | tashtyp\nProcessing Record 16 of Set 2 | mataura\nProcessing Record 17 of Set 2 | palabuhanratu\nCity not found. Skipping...\nProcessing Record 18 of Set 2 | eldorado\nProcessing Record 19 of Set 2 | garmsar\nProcessing Record 20 of Set 2 | eureka\nProcessing Record 21 of Set 2 | klaksvik\nProcessing Record 22 of Set 2 | dunedin\nProcessing Record 23 of Set 2 | tiksi\nProcessing Record 24 of Set 2 | taywarah\nProcessing Record 25 of Set 2 | puerto escondido\nProcessing Record 26 of Set 2 | narsaq\nProcessing Record 27 of Set 2 | quatre cocos\nProcessing Record 28 of Set 2 | arraial do cabo\nProcessing Record 29 of Set 2 | sao joao da barra\nProcessing Record 30 of Set 2 | vila franca do campo\nProcessing Record 31 of Set 2 | ribeira grande\nProcessing Record 32 of Set 2 | hithadhoo\nProcessing Record 33 of Set 2 | nikolskoye\nProcessing Record 34 of Set 2 | amderma\nCity not found. Skipping...\nProcessing Record 35 of Set 2 | aasmae\nCity not found. Skipping...\nProcessing Record 36 of Set 2 | bedesa\nProcessing Record 37 of Set 2 | mago\nProcessing Record 38 of Set 2 | sentyabrskiy\nCity not found. Skipping...\nProcessing Record 39 of Set 2 | barentsburg\nCity not found. Skipping...\nProcessing Record 40 of Set 2 | janauba\nProcessing Record 41 of Set 2 | khatanga\nProcessing Record 42 of Set 2 | saint-philippe\nProcessing Record 43 of Set 2 | balabac\nProcessing Record 44 of Set 2 | castro\nProcessing Record 45 of Set 2 | chokurdakh\nProcessing Record 46 of Set 2 | walvis bay\nProcessing Record 47 of Set 2 | norman wells\nProcessing Record 48 of Set 2 | zhanatas\nCity not found. Skipping...\nProcessing Record 49 of Set 2 | vestmannaeyjar\nProcessing Record 50 of Set 2 | new norfolk\nProcessing Record 1 of Set 3 | yaqui\nProcessing Record 2 of Set 3 | aklavik\nProcessing Record 3 of Set 3 | caravelas\nProcessing Record 4 of Set 3 | labuhan\nProcessing Record 5 of Set 3 | kapaa\nProcessing Record 6 of Set 3 | oranjemund\nProcessing Record 7 of Set 3 | muzquiz\nCity not found. Skipping...\nProcessing Record 8 of Set 3 | ossora\nProcessing Record 9 of Set 3 | provideniya\nProcessing Record 10 of Set 3 | ilulissat\nProcessing Record 11 of Set 3 | husavik\nProcessing Record 12 of Set 3 | yellowknife\nProcessing Record 13 of Set 3 | sola\nProcessing Record 14 of Set 3 | san patricio\nProcessing Record 15 of Set 3 | hanko\nProcessing Record 16 of Set 3 | taolanaro\nCity not found. Skipping...\nProcessing Record 17 of Set 3 | yar-sale\nProcessing Record 18 of Set 3 | ribeira brava\nProcessing Record 19 of Set 3 | hvide sande\nProcessing Record 20 of Set 3 | bara\nProcessing Record 21 of Set 3 | asyut\nProcessing Record 22 of Set 3 | port alfred\nProcessing Record 23 of Set 3 | nizhneyansk\nCity not found. Skipping...\nProcessing Record 24 of Set 3 | torres\nProcessing Record 25 of Set 3 | pisco\nProcessing Record 26 of Set 3 | thompson\nProcessing Record 27 of Set 3 | zonguldak\nProcessing Record 28 of Set 3 | grand-santi\nProcessing Record 29 of Set 3 | esperance\nProcessing Record 30 of Set 3 | sabha\nProcessing Record 31 of Set 3 | east london\nProcessing Record 32 of Set 3 | coihaique\nProcessing Record 33 of Set 3 | ostrovnoy\nProcessing Record 34 of Set 3 | tubruq\nCity not found. Skipping...\nProcessing Record 35 of Set 3 | opuwo\nProcessing Record 36 of Set 3 | kavieng\nProcessing Record 37 of Set 3 | harwich\nProcessing Record 38 of Set 3 | martinsburg\nProcessing Record 39 of Set 3 | mocajuba\nProcessing Record 40 of Set 3 | tasbuget\nCity not found. Skipping...\nProcessing Record 41 of Set 3 | jamestown\nProcessing Record 42 of Set 3 | pervomayskiy\nProcessing Record 43 of Set 3 | maniitsoq\nProcessing Record 44 of Set 3 | vicuna\nProcessing Record 45 of Set 3 | saldanha\nProcessing Record 46 of Set 3 | alihe\nProcessing Record 47 of Set 3 | grand river south east\nCity not found. Skipping...\nProcessing Record 48 of Set 3 | petropavlovsk-kamchatskiy\nProcessing Record 49 of Set 3 | naze\nProcessing Record 50 of Set 3 | sindor\nProcessing Record 1 of Set 4 | longyearbyen\nProcessing Record 2 of Set 4 | barcelos\nProcessing Record 3 of Set 4 | weligama\nProcessing Record 4 of Set 4 | raudeberg\nProcessing Record 5 of Set 4 | tsihombe\nCity not found. Skipping...\nProcessing Record 6 of Set 4 | tasiilaq\nProcessing Record 7 of Set 4 | leningradskiy\nProcessing Record 8 of Set 4 | porto velho\nProcessing Record 9 of Set 4 | katete\nProcessing Record 10 of Set 4 | laguna\nProcessing Record 11 of Set 4 | kirovskiy\nProcessing Record 12 of Set 4 | port-gentil\nProcessing Record 13 of Set 4 | hervey bay\nProcessing Record 14 of Set 4 | bengkulu\nProcessing Record 15 of Set 4 | tabou\nProcessing Record 16 of Set 4 | oulainen\nProcessing Record 17 of Set 4 | myitkyina\nProcessing Record 18 of Set 4 | saint anthony\nProcessing Record 19 of Set 4 | san jeronimo\nProcessing Record 20 of Set 4 | cervo\nProcessing Record 21 of Set 4 | buariki\nCity not found. Skipping...\nProcessing Record 22 of Set 4 | kalutara\nProcessing Record 23 of Set 4 | margate\nProcessing Record 24 of Set 4 | poum\nProcessing Record 25 of Set 4 | kavaratti\nProcessing Record 26 of Set 4 | verkhoyansk\nProcessing Record 27 of Set 4 | padang\nProcessing Record 28 of Set 4 | belushya guba\nCity not found. Skipping...\nProcessing Record 29 of Set 4 | san miguel de cauri\nProcessing Record 30 of Set 4 | adre\nProcessing Record 31 of Set 4 | mahebourg\nProcessing Record 32 of Set 4 | joshimath\nProcessing Record 33 of Set 4 | mar del plata\n" ], [ "# Convert the array of dictionaries to a Pandas DataFrame.\ncity_data_df = pd.DataFrame(city_data)\ncity_data_df.head(10)", "_____no_output_____" ] ], [ [ "### Output to CSV", "_____no_output_____" ] ], [ [ "# Create the output file (CSV).\noutput_data_file = \"Weather_Database/WeatherPy_Database.csv\"\n# Export the City_Data into a CSV.\ncity_data_df.to_csv(output_data_file, index_label=\"City_ID\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
c521720b1f874032976eb63a1b03d01c93ed1712
11,680
ipynb
Jupyter Notebook
15.2/WhirlwindTourOfPython-6f1daf714fe52a8dde6a288674ba46a7feed8816/00-Introduction.ipynb
jwhit204/WIN
6fefc643b23a1bc0fd183ab3659a5b428a28d3ed
[ "MIT" ]
null
null
null
15.2/WhirlwindTourOfPython-6f1daf714fe52a8dde6a288674ba46a7feed8816/00-Introduction.ipynb
jwhit204/WIN
6fefc643b23a1bc0fd183ab3659a5b428a28d3ed
[ "MIT" ]
null
null
null
15.2/WhirlwindTourOfPython-6f1daf714fe52a8dde6a288674ba46a7feed8816/00-Introduction.ipynb
jwhit204/WIN
6fefc643b23a1bc0fd183ab3659a5b428a28d3ed
[ "MIT" ]
null
null
null
55.619048
350
0.694178
[ [ [ "<!--BOOK_INFORMATION-->\n<img align=\"left\" style=\"padding-right:10px;\" src=\"fig/cover-small.jpg\">\n*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).*\n\n*The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).*\n", "_____no_output_____" ], [ "<!--NAVIGATION-->\n| [Contents](Index.ipynb) | [How to Run Python Code](01-How-to-Run-Python-Code.ipynb) >", "_____no_output_____" ], [ "# 1. Introduction", "_____no_output_____" ], [ "Conceived in the late 1980s as a teaching and scripting language, Python has since become an essential tool for many programmers, engineers, researchers, and data scientists across academia and industry.\nAs an astronomer focused on building and promoting the free open tools for data-intensive science, I've found Python to be a near-perfect fit for the types of problems I face day to day, whether it's extracting meaning from large astronomical datasets, scraping and munging data sources from the Web, or automating day-to-day research tasks.\n\nThe appeal of Python is in its simplicity and beauty, as well as the convenience of the large ecosystem of domain-specific tools that have been built on top of it.\nFor example, most of the Python code in scientific computing and data science is built around a group of mature and useful packages:\n\n- [NumPy](http://numpy.org) provides efficient storage and computation for multi-dimensional data arrays.\n- [SciPy](http://scipy.org) contains a wide array of numerical tools such as numerical integration and interpolation.\n- [Pandas](http://pandas.pydata.org) provides a DataFrame object along with a powerful set of methods to manipulate, filter, group, and transform data.\n- [Matplotlib](http://matplotlib.org) provides a useful interface for creation of publication-quality plots and figures.\n- [Scikit-Learn](http://scikit-learn.org) provides a uniform toolkit for applying common machine learning algorithms to data.\n- [IPython/Jupyter](http://jupyter.org) provides an enhanced terminal and an interactive notebook environment that is useful for exploratory analysis, as well as creation of interactive, executable documents. For example, the manuscript for this report was composed entirely in Jupyter notebooks.\n\nNo less important are the numerous other tools and packages which accompany these: if there is a scientific or data analysis task you want to perform, chances are someone has written a package that will do it for you.\n\nTo tap into the power of this data science ecosystem, however, first requires familiarity with the Python language itself.\nI often encounter students and colleagues who have (sometimes extensive) backgrounds in computing in some language – MATLAB, IDL, R, Java, C++, etc. – and are looking for a brief but comprehensive tour of the Python language that respects their level of knowledge rather than starting from ground zero.\nThis report seeks to fill that niche.\n\nAs such, this report in no way aims to be a comprehensive introduction to programming, or a full introduction to the Python language itself; if that is what you are looking for, you might check out one of the recommended references listed in [Resources for Learning](16-Further-Resources.ipynb).\nInstead, this will provide a whirlwind tour of some of Python's essential syntax and semantics, built-in data types and structures, function definitions, control flow statements, and other aspects of the language.\nMy aim is that readers will walk away with a solid foundation from which to explore the data science stack just outlined.", "_____no_output_____" ], [ "## Using Code Examples\n\nSupplemental material (code examples, exercises, etc.) is available for download at https://github.com/jakevdp/WhirlwindTourOfPython/.\nThis book is here to help you get your job done.\nIn general, if example code is offered with this book, you may use it in your programs and documentation.\nYou do not need to contact us for permission unless you’re reproducing a significant portion of the code.\nFor example, writing a program that uses several chunks of code from this book does not require permission.\nSelling or distributing a CD-ROM of examples from O’Reilly books does require permission.\nAnswering a question by citing this book and quoting example code does not require permission.\nIncorporating a significant amount of example code from this book into your product’s documentation does require permission.\n\nWe appreciate, but do not require, attribution.\nAn attribution usually includes the title, author, publisher, and ISBN.\nFor example: \"A Whirlwind Tour of Python by Jake VanderPlas (O’Reilly). Copyright 2016 O’Reilly Media, Inc., 978-1-491-96465-1.\"\n\nIf you feel your use of code examples falls outside fair use or the per‐ mission given above, feel free to contact us at [email protected].", "_____no_output_____" ], [ "## Installation and Practical Considerations\n\nInstalling Python and the suite of libraries that enable scientific computing is straightforward whether you use Windows, Linux, or Mac OS X. This section will outline some of the considerations when setting up your computer.", "_____no_output_____" ], [ "### Python 2 vs Python 3\n\nThis report uses the syntax of Python 3, which contains language enhancements that are not compatible with the *2.x* series of Python.\nThough Python 3.0 was first released in 2008, adoption has been relatively slow, particularly in the scientific and web development communities.\nThis is primarily because it took some time for many of the essential packages and toolkits to be made compatible with the new language internals.\nSince early 2014, however, stable releases of the most important tools in the data science ecosystem have been fully-compatible with both Python 2 and 3, and so this book will use the newer Python 3 syntax.\nEven though that is the case, the vast majority of code snippets in this book will also work without modification in Python 2: in cases where a Py2-incompatible syntax is used, I will make every effort to note it explicitly.", "_____no_output_____" ], [ "### Installation with conda\n\nThough there are various ways to install Python, the one I would suggest – particularly if you wish to eventually use the data science tools mentioned above – is via the cross-platform Anaconda distribution.\nThere are two flavors of the Anaconda distribution:\n\n- [Miniconda](http://conda.pydata.org/miniconda.html) gives you Python interpreter itself, along with a command-line tool called ``conda`` which operates as a cross-platform package manager geared toward Python packages, similar in spirit to the ``apt`` or ``yum`` tools that Linux users might be familiar with.\n- [Anaconda](https://www.continuum.io/downloads) includes both Python and ``conda``, and additionally bundles a suite of other pre-installed packages geared toward scientific computing.\n\nAny of the packages included with Anaconda can also be installed manually on top of Miniconda; for this reason I suggest starting with Miniconda.\n\nTo get started, download and install the Miniconda package – make sure to choose a version with Python 3 – and then install the IPython notebook package:\n```\n[~]$ conda install ipython-notebook\n```\nFor more information on ``conda``, including information about creating and using conda environments, refer to the Miniconda package documentation linked at the above page.", "_____no_output_____" ], [ "## The Zen of Python\n\nPython aficionados are often quick to point out how \"intuitive\", \"beautiful\", or \"fun\" Python is.\nWhile I tend to agree, I also recognize that beauty, intuition, and fun often go hand in hand with familiarity, and so for those familiar with other languages such florid sentiments can come across as a bit smug.\nNevertheless, I hope that if you give Python a chance, you'll see where such impressions might come from.\nAnd if you *really* want to dig into the programming philosophy that drives much of the coding practice of Python power-users, a nice little Easter egg exists in the Python interpreter: simply close your eyes, meditate for a few minutes, and ``import this``:", "_____no_output_____" ] ], [ [ "import this", "The Zen of Python, by Tim Peters\n\nBeautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated.\nFlat is better than nested.\nSparse is better than dense.\nReadability counts.\nSpecial cases aren't special enough to break the rules.\nAlthough practicality beats purity.\nErrors should never pass silently.\nUnless explicitly silenced.\nIn the face of ambiguity, refuse the temptation to guess.\nThere should be one-- and preferably only one --obvious way to do it.\nAlthough that way may not be obvious at first unless you're Dutch.\nNow is better than never.\nAlthough never is often better than *right* now.\nIf the implementation is hard to explain, it's a bad idea.\nIf the implementation is easy to explain, it may be a good idea.\nNamespaces are one honking great idea -- let's do more of those!\n" ] ], [ [ "With that, let's start our tour of the Python language.", "_____no_output_____" ], [ "<!--NAVIGATION-->\n| [Contents](Index.ipynb) | [How to Run Python Code](01-How-to-Run-Python-Code.ipynb) >", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
c521832823ced6209466e32a7606d2e41cdd35d7
122,339
ipynb
Jupyter Notebook
colab/validation.ipynb
fossabot/denmune-clustering-algorithm
aa815150bd3dc0dfddca70d7f75c207f92b5d3b0
[ "BSD-3-Clause" ]
null
null
null
colab/validation.ipynb
fossabot/denmune-clustering-algorithm
aa815150bd3dc0dfddca70d7f75c207f92b5d3b0
[ "BSD-3-Clause" ]
null
null
null
colab/validation.ipynb
fossabot/denmune-clustering-algorithm
aa815150bd3dc0dfddca70d7f75c207f92b5d3b0
[ "BSD-3-Clause" ]
null
null
null
311.295165
55,074
0.915399
[ [ [ "<a href=\"https://colab.research.google.com/github/egy1st/denmune-clustering-algorithm/blob/main/colab/validation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport time\nimport os.path\n\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "# install DenMune clustering algorithm using pip command from the offecial Python repository, PyPi\n# from https://pypi.org/project/denmune/\n!pip install denmune\n\n# then import it\nfrom denmune import DenMune", "_____no_output_____" ], [ "# clone datasets from our repository datasets\nif not os.path.exists('datasets'):\n !git clone https://github.com/egy1st/datasets", "Cloning into 'datasets'...\nremote: Enumerating objects: 63, done.\u001b[K\nremote: Counting objects: 100% (63/63), done.\u001b[K\nremote: Compressing objects: 100% (52/52), done.\u001b[K\nremote: Total 63 (delta 10), reused 59 (delta 9), pack-reused 0\u001b[K\nUnpacking objects: 100% (63/63), done.\nChecking out files: 100% (23/23), done.\n" ] ], [ [ "You can get your validation results using 3 methods\n- by showing the Analyzer\n- extract values from the validity returned list from fit_predict function\n- extract values from the Analyzer dictionary\n\n\nThe algorithm is associated with five built-in validity measures, which are:\n- ACC, Accuracy\n- F1 score\n- NMI index (Normalized Mutual Information)\n- AMI index (Adjusted Mutual Information)\n- ARI index (Adjusted Rand Index)", "_____no_output_____" ] ], [ [ "# Let us show the analyzer by set show_analyzer to True, which is actually the default parameter's value\n\ndata_path = 'datasets/denmune/shapes/' \ndataset = \"aggregation\" \nknn = 6\n\ndata_file = data_path + dataset + '.csv'\nX_train = pd.read_csv(data_file, sep=',', header=None)\ny_train = X_train.iloc[:, -1]\nX_train = X_train.drop(X_train.columns[-1], axis=1) \n\nprint (\"Dataset:\", dataset)\ndm = DenMune(train_data=X_train,\n train_truth=y_train,\n k_nearest=knn,\n rgn_tsne=False)\n\nlabels, validity = dm.fit_predict(show_noise=True, show_analyzer=True)\n", "Dataset: aggregation\nPlotting dataset Groundtruth\n" ], [ "# secondly, we can extract validity returned list from fit_predict function\ndm = DenMune(train_data=X_train, train_truth=y_train, k_nearest=knn, rgn_tsne=False)\nlabels, validity = dm.fit_predict(show_plots=False, show_noise=True, show_analyzer=False)\nvalidity", "_____no_output_____" ], [ "Accuracy = validity['train']['ACC']\nprint ('Accuracy:',Accuracy, 'correctely identified points')\n\nF1_score = validity['train']['F1']\nprint ('F1 score:', round(F1_score*100,2), '%')\n\nNMI = validity['train']['NMI']\nprint ('NMI index:', round(NMI*100,2), '%')\n\nAMI = validity['train']['AMI']\nprint ('AMI index:', round(AMI*100,2), '%')\n\nARI = validity['train']['ARI']\nprint ('ARI index:', round(ARI*100,2), '%')\n", "Accuracy: 785 correctely identified points\nF1 score: 99.62 %\nNMI index: 98.83 %\nAMI index: 98.81 %\nARI index: 99.27 %\n" ], [ "# Third, we can extract extract values from the Analyzer dictionary\ndm = DenMune(train_data=X_train, train_truth=y_train, k_nearest=knn, rgn_tsne=False)\nlabels, validity = dm.fit_predict(show_plots=False, show_noise=True, show_analyzer=False)\ndm.analyzer", "_____no_output_____" ], [ "Accuracy = dm.analyzer['validity']['train']['ACC']\nprint ('Accuracy:',Accuracy, 'correctely identified points')\n\nF1_score = dm.analyzer['validity']['train']['F1']\nprint ('F1 score:', round(F1_score*100,2), '%')\n\nNMI = dm.analyzer['validity']['train']['NMI']\nprint ('NMI index:', round(NMI*100,2), '%')\n\nAMI = dm.analyzer['validity']['train']['AMI']\nprint ('AMI index:', round(AMI*100,2), '%')\n\nARI = dm.analyzer['validity']['train']['ARI']\nprint ('ARI index:', round(ARI*100,2), '%')", "Accuracy: 785 correctely identified points\nF1 score: 99.62 %\nNMI index: 98.83 %\nAMI index: 98.81 %\nARI index: 99.27 %\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
c52183de8e0df2ee259c1be79f13cca9ff5a44a8
2,185
ipynb
Jupyter Notebook
ch03/mnist_show.ipynb
doyu/dlfs1
13fc400a6e810ec147f2e5934992556edda51c63
[ "MIT" ]
null
null
null
ch03/mnist_show.ipynb
doyu/dlfs1
13fc400a6e810ec147f2e5934992556edda51c63
[ "MIT" ]
null
null
null
ch03/mnist_show.ipynb
doyu/dlfs1
13fc400a6e810ec147f2e5934992556edda51c63
[ "MIT" ]
null
null
null
21.421569
89
0.507094
[ [ [ "# coding: utf-8\nimport sys, os\nsys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定\nimport numpy as np\nfrom dataset.mnist import load_mnist\nfrom PIL import Image\n\ndef img_show(img):\n pil_img = Image.fromarray(np.uint8(img))\n pil_img.show()", "_____no_output_____" ], [ "(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False)\n\nimg = x_train[0]\nlabel = t_train[0]\nprint(label) # 5\n\nprint(img.shape) # (784,)\nimg = img.reshape(28, 28) # 形状を元の画像サイズに変形\nprint(img.shape) # (28, 28)\nprint(img.max())\n#img_show(img)", "5\n(784,)\n(28, 28)\n255\n" ], [ "(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False, normalize=True)\n\nimg = x_train[0]\nlabel = t_train[0]\nprint(label) # 5\nprint(img.shape) # (784,)\nprint(img.max())", "5\n(1, 28, 28)\n1.0\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
c521988a063b1b51b47692fecc637b3ce5e907c6
40,640
ipynb
Jupyter Notebook
imaging/ml/ml_codelab/breast_density_auto_ml.ipynb
umair-akb/healthcare
79b8026a82a8ad7a936cb7571fd5518ca4ead596
[ "Apache-2.0" ]
310
2018-02-23T01:40:01.000Z
2022-03-30T12:25:56.000Z
imaging/ml/ml_codelab/breast_density_auto_ml.ipynb
HabibMrad/healthcare
f15f4fd6218cf847e8bd81dd2932d3450b7d4804
[ "Apache-2.0" ]
189
2018-06-19T15:32:10.000Z
2022-03-11T23:48:14.000Z
imaging/ml/ml_codelab/breast_density_auto_ml.ipynb
HabibMrad/healthcare
f15f4fd6218cf847e8bd81dd2932d3450b7d4804
[ "Apache-2.0" ]
165
2018-03-06T19:29:18.000Z
2022-03-21T10:53:45.000Z
42.11399
712
0.587475
[ [ [ "Copyright 2018 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\n\nhttps://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n", "_____no_output_____" ], [ "**This tutorial is for educational purposes purposes only and is not intended for use in clinical diagnosis or clinical decision-making or for any other clinical use.**", "_____no_output_____" ], [ "# Training/Inference on Breast Density Classification Model on AutoML Vision\n", "_____no_output_____" ], [ "The goal of this tutorial is to train, deploy and run inference on a breast density classification model. Breast density is thought to be a factor for an increase in the risk for breast cancer. This will emphasize using the [Cloud Healthcare API](https://cloud.google.com/healthcare/) in order to store, retreive and transcode medical images (in DICOM format) in a managed and scalable way. This tutorial will focus on using [Cloud AutoML Vision](https://cloud.google.com/vision/automl/docs/beginners-guide) to scalably train and serve the model. \n\n**Note: This is the AutoML version of the Cloud ML Engine Codelab found [here](./breast_density_cloud_ml.ipynb).**", "_____no_output_____" ], [ "## Requirements\n- A Google Cloud project.\n- Project has [Cloud Healthcare API](https://cloud.google.com/healthcare/docs/quickstart) enabled.\n- Project has [Cloud AutoML API ](https://cloud.google.com/vision/automl/docs/quickstart) enabled.\n- Project has [Cloud Build API](https://cloud.google.com/cloud-build/docs/quickstart-docker) enabled.\n- Project has [Kubernetes engine API](https://console.developers.google.com/apis/api/container.googleapis.com/overview?project=) enabled.\n- Project has [Cloud Resource Manager API](https://console.cloud.google.com/cloud-resource-manager) enabled.", "_____no_output_____" ], [ "## Notebook dependencies\nWe will need to install the hcls_imaging_ml_toolkit package found [here](./toolkit). This toolkit helps make working with DICOM objects and the Cloud Healthcare API easier.\nIn addition, we will install [dicomweb-client](https://dicomweb-client.readthedocs.io/en/latest/) to help us interact with the DIOCOMWeb API and [pydicom](https://pydicom.github.io/pydicom/dev/index.html) which is used to help up construct DICOM objects.\n", "_____no_output_____" ] ], [ [ "%%bash\n\npip3 install git+https://github.com/GoogleCloudPlatform/healthcare.git#subdirectory=imaging/ml/toolkit\npip3 install dicomweb-client\npip3 install pydicom\n", "_____no_output_____" ] ], [ [ "## Input Dataset\n\nThe dataset that will be used for training is the [TCIA CBIS-DDSM](https://wiki.cancerimagingarchive.net/display/Public/CBIS-DDSM) dataset. This dataset contains ~2500 mammography images in DICOM format. Each image is given a [BI-RADS breast density ](https://breast-cancer.ca/densitbi-rads/) score from 1 to 4. In this tutorial, we will build a binary classifier that distinguishes between breast density \"2\" (*scattered density*) and \"3\" (*heterogeneously dense*). These are the two most common and variably assigned scores. In the literature, this is said to be [particularly difficult for radiologists to consistently distinguish](https://aapm.onlinelibrary.wiley.com/doi/pdf/10.1002/mp.12683).", "_____no_output_____" ] ], [ [ "project_id = \"MY_PROJECT\" # @param\nlocation = \"us-central1\"\ndataset_id = \"MY_DATASET\" # @param\ndicom_store_id = \"MY_DICOM_STORE\" # @param\n\n# Input data used by AutoML must be in a bucket with the following format.\nautoml_bucket_name = \"gs://\" + project_id + \"-vcm\"", "_____no_output_____" ], [ "%%bash -s {project_id} {location} {automl_bucket_name}\n# Create bucket.\ngsutil -q mb -c regional -l $2 $3\n\n# Allow Cloud Healthcare API to write to bucket.\nPROJECT_NUMBER=`gcloud projects describe $1 | grep projectNumber | sed 's/[^0-9]//g'`\nSERVICE_ACCOUNT=\"service-${PROJECT_NUMBER}@gcp-sa-healthcare.iam.gserviceaccount.com\"\nCOMPUTE_ENGINE_SERVICE_ACCOUNT=\"${PROJECT_NUMBER}[email protected]\"\n\ngsutil -q iam ch serviceAccount:${SERVICE_ACCOUNT}:objectAdmin $3\ngsutil -q iam ch serviceAccount:${COMPUTE_ENGINE_SERVICE_ACCOUNT}:objectAdmin $3\ngcloud projects add-iam-policy-binding $1 --member=serviceAccount:${SERVICE_ACCOUNT} --role=roles/pubsub.publisher\ngcloud projects add-iam-policy-binding $1 --member=serviceAccount:${COMPUTE_ENGINE_SERVICE_ACCOUNT} --role roles/pubsub.admin\n# Allow compute service account to create datasets and dicomStores.\ngcloud projects add-iam-policy-binding $1 --member=serviceAccount:${COMPUTE_ENGINE_SERVICE_ACCOUNT} --role roles/healthcare.dicomStoreAdmin\ngcloud projects add-iam-policy-binding $1 --member=serviceAccount:${COMPUTE_ENGINE_SERVICE_ACCOUNT} --role roles/healthcare.datasetAdmin", "_____no_output_____" ], [ "import json\nimport os\nimport google.auth\nfrom google.auth.transport.requests import AuthorizedSession\nfrom hcls_imaging_ml_toolkit import dicom_path\n\ncredentials, project = google.auth.default()\nauthed_session = AuthorizedSession(credentials)\n# Path to Cloud Healthcare API.\nHEALTHCARE_API_URL = 'https://healthcare.googleapis.com/v1'\n\n# Create Cloud Healthcare API dataset.\npath = os.path.join(HEALTHCARE_API_URL, 'projects', project_id, 'locations', location, 'datasets?dataset_id=' + dataset_id)\nheaders = {'Content-Type': 'application/json'}\nresp = authed_session.post(path, headers=headers)\n\nassert resp.status_code == 200, 'error creating Dataset, code: {0}, response: {1}'.format(resp.status_code, resp.text)\nprint('Full response:\\n{0}'.format(resp.text))\n\n# Create Cloud Healthcare API DICOM store.\npath = os.path.join(HEALTHCARE_API_URL, 'projects', project_id, 'locations', location, 'datasets', dataset_id, 'dicomStores?dicom_store_id=' + dicom_store_id)\nresp = authed_session.post(path, headers=headers)\nassert resp.status_code == 200, 'error creating DICOM store, code: {0}, response: {1}'.format(resp.status_code, resp.text)\nprint('Full response:\\n{0}'.format(resp.text))\ndicom_store_path = dicom_path.Path(project_id, location, dataset_id, dicom_store_id)", "_____no_output_____" ] ], [ [ "Next, we are going to transfer the DICOM instances to the Cloud Healthcare API.\n\nNote: We are transfering >100GB of data so this will take some time to complete", "_____no_output_____" ] ], [ [ "# Store DICOM instances in Cloud Healthcare API.\npath = 'https://healthcare.googleapis.com/v1/{}:import'.format(dicom_store_path)\nheaders = {'Content-Type': 'application/json'}\nbody = { \n 'gcsSource': {\n 'uri': 'gs://gcs-public-data--healthcare-tcia-cbis-ddsm/dicom/**'\n }\n}\nresp = authed_session.post(path, headers=headers, json=body)\nassert resp.status_code == 200, 'error creating Dataset, code: {0}, response: {1}'.format(resp.status_code, resp.text)\nprint('Full response:\\n{0}'.format(resp.text))\nresponse = json.loads(resp.text)\noperation_name = response['name']", "_____no_output_____" ], [ "import time\n\ndef wait_for_operation_completion(path, timeout, sleep_time=30): \n success = False\n while time.time() < timeout:\n print('Waiting for operation completion...')\n resp = authed_session.get(path)\n assert resp.status_code == 200, 'error polling for Operation results, code: {0}, response: {1}'.format(resp.status_code, resp.text)\n response = json.loads(resp.text)\n if 'done' in response:\n if response['done'] == True and 'error' not in response:\n success = True;\n break\n time.sleep(sleep_time)\n\n print('Full response:\\n{0}'.format(resp.text)) \n assert success, \"operation did not complete successfully in time limit\"\n print('Success!')\n return response", "_____no_output_____" ], [ "path = os.path.join(HEALTHCARE_API_URL, operation_name)\ntimeout = time.time() + 40*60 # Wait up to 40 minutes.\n_ = wait_for_operation_completion(path, timeout)", "_____no_output_____" ] ], [ [ "### Explore the Cloud Healthcare DICOM dataset (optional)\n\nThis is an optional section to explore the Cloud Healthcare DICOM dataset. In the following code, we simply just list the studies that we have loaded into the Cloud Healthcare API. You can modify the *num_of_studies_to_print* parameter to print as many studies as desired.", "_____no_output_____" ] ], [ [ "num_of_studies_to_print = 2 # @param\n\n\npath = os.path.join(HEALTHCARE_API_URL, dicom_store_path.dicomweb_path_str, 'studies')\nresp = authed_session.get(path)\nassert resp.status_code == 200, 'error querying Dataset, code: {0}, response: {1}'.format(resp.status_code, resp.text)\nresponse = json.loads(resp.text)\n\nprint(json.dumps(response[:num_of_studies_to_print], indent=2))", "_____no_output_____" ] ], [ [ "## Convert DICOM to JPEG\n\nThe ML model that we will build requires that the dataset be in JPEG. We will leverage the Cloud Healthcare API to transcode DICOM to JPEG.\n\nFirst we will create a [Google Cloud Storage](https://cloud.google.com/storage/) bucket to hold the output JPEG files. Next, we will use the ExportDicomData API to transform the DICOMs to JPEGs.", "_____no_output_____" ] ], [ [ "# Folder to store input images for AutoML Vision.\njpeg_folder = automl_bucket_name + \"/images/\"", "_____no_output_____" ] ], [ [ "Next we will convert the DICOMs to JPEGs using the [ExportDicomData](https://cloud.google.com/sdk/gcloud/reference/beta/healthcare/dicom-stores/export/gcs). ", "_____no_output_____" ] ], [ [ "%%bash -s {jpeg_folder} {project_id} {location} {dataset_id} {dicom_store_id}\ngcloud beta healthcare --project $2 dicom-stores export gcs $5 --location=$3 --dataset=$4 --mime-type=\"image/jpeg; transfer-syntax=1.2.840.10008.1.2.4.50\" --gcs-uri-prefix=$1", "_____no_output_____" ] ], [ [ "Meanwhile, you should be able to observe the JPEG images being added to your Google Cloud Storage bucket.", "_____no_output_____" ], [ "Next, we will join the training data stored in Google Cloud Storage with the labels in the TCIA website. The output of this step is a [CSV file that is input to AutoML](https://cloud.google.com/vision/automl/docs/prepare). This CSV contains a list of pairs of (IMAGE_PATH, LABEL).", "_____no_output_____" ] ], [ [ "# tensorflow==1.15.0 to have same versions in all environments - dataflow, automl, ai-platform\n!pip install tensorflow==1.15.0 --ignore-installed\n# CSV to hold (IMAGE_PATH, LABEL) list.\ninput_data_csv = automl_bucket_name + \"/input.csv\"\n\nimport csv\nimport os\nimport re\nfrom tensorflow.python.lib.io import file_io\nimport scripts.tcia_utils as tcia_utils\n\n# Get map of study_uid -> file paths.\npath_list = file_io.get_matching_files(os.path.join(jpeg_folder, '*/*/*'))\nstudy_uid_to_file_paths = {}\npattern = r'^{0}(?P<study_uid>[^/]+)/(?P<series_uid>[^/]+)/(?P<instance_uid>.*)'.format(jpeg_folder)\nfor path in path_list:\n match = re.search(pattern, path)\n study_uid_to_file_paths[match.group('study_uid')] = path\n\n# Get map of study_uid -> labels.\nstudy_uid_to_labels = tcia_utils.GetStudyUIDToLabelMap()\n\n# Join the two maps, output results to CSV in Google Cloud Storage.\nwith file_io.FileIO(input_data_csv, 'w') as f:\n writer = csv.writer(f, delimiter=',')\n for study_uid, label in study_uid_to_labels.items():\n if study_uid in study_uid_to_file_paths:\n writer.writerow([study_uid_to_file_paths[study_uid], label])", "_____no_output_____" ] ], [ [ "## Training\n\n***This section will focus on using AutoML through its API. AutoML can also be used through the user interface found [here](https://console.cloud.google.com/vision/). The below steps in this section can all be done through the web UI .***\n\nWe will use [AutoML Vision ](https://cloud.google.com/automl/) to train the classification model. AutoML provides a fully managed solution for training the model. All we will do is input the list of input images and labels. The trained model in AutoML will be able to classify the mammography images as either \"2\" (scattered density) or \"3\" (heterogeneously dense).\n\nAs a first step, we will create a AutoML dataset.\n", "_____no_output_____" ] ], [ [ "automl_dataset_display_name = \"MY_AUTOML_DATASET\" # @param", "_____no_output_____" ], [ "import json\nimport os\n\n# Path to AutoML API.\nAUTOML_API_URL = 'https://automl.googleapis.com/v1beta1'\n\n# Path to request creation of AutoML dataset.\npath = os.path.join(AUTOML_API_URL, 'projects', project_id, 'locations', location, 'datasets')\n\n# Headers (request in JSON format).\nheaders = {'Content-Type': 'application/json'}\n\n# Body (encoded in JSON format).\nconfig = {'display_name': automl_dataset_display_name, 'image_classification_dataset_metadata': {'classification_type': 'MULTICLASS'}}\n\nresp = authed_session.post(path, headers=headers, json=config)\nassert resp.status_code == 200, 'creating AutoML dataset, code: {0}, response: {1}'.format(resp.status_code, resp.text)\nprint('Full response:\\n{0}'.format(resp.text))\n\n# Record the AutoML dataset name.\nresponse = json.loads(resp.text)\nautoml_dataset_name = response['name']", "_____no_output_____" ] ], [ [ "Next, we will import the CSV that contains the list of (IMAGE_PATH, LABEL) list into AutoML. **Please ignore errors regarding an existing ground truth.**", "_____no_output_____" ] ], [ [ "# Path to request import into AutoML dataset.\npath = os.path.join(AUTOML_API_URL, automl_dataset_name + ':importData')\n\n# Body (encoded in JSON format).\nconfig = {'input_config': {'gcs_source': {'input_uris': [input_data_csv]}}} \n\nresp = authed_session.post(path, headers=headers, json=config)\nassert resp.status_code == 200, 'error importing AutoML dataset, code: {0}, response: {1}'.format(resp.status_code, resp.text)\nprint('Full response:\\n{0}'.format(resp.text))\n\n# Record operation_name so we can poll for it later.\nresponse = json.loads(resp.text)\noperation_name = response['name']", "_____no_output_____" ] ], [ [ "The output of the previous step is an [operation](https://cloud.google.com/vision/automl/docs/models#get-operation) that will need to poll the status for. We will poll until the operation's \"done\" field is set to true. This will take a few minutes to complete so we will wait until completion.", "_____no_output_____" ] ], [ [ "path = os.path.join(AUTOML_API_URL, operation_name)\ntimeout = time.time() + 40*60 # Wait up to 40 minutes.\n_ = wait_for_operation_completion(path, timeout)", "_____no_output_____" ] ], [ [ "Next, we will train the model to perform classification. We will set the training budget to be a maximum of 1hr (but this can be modified below). The cost of using AutoML can be found [here](https://cloud.google.com/vision/automl/pricing). Typically, the longer the model is trained for, the more accurate it will be.", "_____no_output_____" ] ], [ [ "# Name of the model.\nmodel_display_name = \"MY_MODEL_NAME\" # @param\n\n# Training budget (1 hr).\ntraining_budget = 1 # @param", "_____no_output_____" ], [ "# Path to request import into AutoML dataset.\npath = os.path.join(AUTOML_API_URL, 'projects', project_id, 'locations', location, 'models')\n\n# Headers (request in JSON format).\nheaders = {'Content-Type': 'application/json'}\n\n# Body (encoded in JSON format).\nautoml_dataset_id = automl_dataset_name.split('/')[-1]\nconfig = {'display_name': model_display_name, 'dataset_id': automl_dataset_id, 'image_classification_model_metadata': {'train_budget': training_budget}}\n\nresp = authed_session.post(path, headers=headers, json=config)\nassert resp.status_code == 200, 'error creating AutoML model, code: {0}, response: {1}'.format(resp.status_code, contenresp.text)\nprint('Full response:\\n{0}'.format(resp.text))\n\n# Record operation_name so we can poll for it later.\nresponse = json.loads(resp.text)\noperation_name = response['name']", "_____no_output_____" ] ], [ [ "The output of the previous step is also an [operation](https://cloud.google.com/vision/automl/docs/models#get-operation) that will need to poll the status of. We will poll until the operation's \"done\" field is set to true. This will take a few minutes to complete.", "_____no_output_____" ] ], [ [ "path = os.path.join(AUTOML_API_URL, operation_name)\ntimeout = time.time() + 40*60 # Wait up to 40 minutes.\nsleep_time = 5*60 # Update each 5 minutes.\nresponse = wait_for_operation_completion(path, timeout, sleep_time)\nfull_model_name = response['response']['name']", "_____no_output_____" ], [ "# google.cloud.automl to make api calls to Cloud AutoML\n!pip install google-cloud-automl\nfrom google.cloud import automl_v1\nclient = automl_v1.AutoMlClient()\nresponse = client.deploy_model(full_model_name)\nprint(u'Model deployment finished. {}'.format(response.result()))", "_____no_output_____" ] ], [ [ "Next, we will check out the accuracy metrics for the trained model. The following command will return the [AUC (ROC)](https://developers.google.com/machine-learning/crash-course/classification/roc-and-auc), [precision](https://developers.google.com/machine-learning/crash-course/classification/precision-and-recall) and [recall](https://developers.google.com/machine-learning/crash-course/classification/precision-and-recall) for the model, for various ML classification thresholds.", "_____no_output_____" ] ], [ [ "# Path to request to get model accuracy metrics.\npath = os.path.join(AUTOML_API_URL, full_model_name, 'modelEvaluations')\n\nresp = authed_session.get(path)\nassert resp.status_code == 200, 'error getting AutoML model evaluations, code: {0}, response: {1}'.format(resp.status_code, resp.text)\nprint('Full response:\\n{0}'.format(resp.text))", "_____no_output_____" ] ], [ [ "## Inference\n\nTo allow medical imaging ML models to be easily integrated into clinical workflows, an *inference module* can be used. A standalone modality, a PACS system or a DICOM router can push DICOM instances into Cloud Healthcare [DICOM stores](https://cloud.google.com/healthcare/docs/introduction), allowing ML models to be triggered for inference. This inference results can then be structured into various DICOM formats (e.g. DICOM [structured reports](http://dicom.nema.org/MEDICAL/Dicom/2014b/output/chtml/part20/sect_A.3.html)) and stored in the Cloud Healthcare API, which can then be retrieved by the customer.\n\nThe inference module is built as a [Docker](https://www.docker.com/) container and deployed using [Kubernetes](https://kubernetes.io/), allowing you to easily scale your deployment. The dataflow for inference can look as follows (see corresponding diagram below):\n\n1. Client application uses [STOW-RS](ftp://dicom.nema.org/medical/Dicom/2013/output/chtml/part18/sect_6.6.html) to push a new DICOM instance to the Cloud Healthcare DICOMWeb API.\n\n2. The insertion of the DICOM instance triggers a [Cloud Pubsub](https://cloud.google.com/pubsub/) message to be published. The *inference module* will pull incoming Pubsub messages and will recieve a message for the previously inserted DICOM instance. \n\n3. The *inference module* will retrieve the instance in JPEG format from the Cloud Healthcare API using [WADO-RS](ftp://dicom.nema.org/medical/Dicom/2013/output/chtml/part18/sect_6.5.html).\n\n4. The *inference module* will send the JPEG bytes to the model hosted on AutoML.\n\n5. AutoML will return the prediction back to the *inference module*.\n\n6. The *inference module* will package the prediction into a DICOM instance. This can potentially be a DICOM structured report, [presentation state](ftp://dicom.nema.org/MEDICAL/dicom/2014b/output/chtml/part03/sect_A.33.html), or even burnt text on the image. In this codelab, we will focus on just DICOM structured reports, specifically [Comprehensive Structured Reports](http://dicom.nema.org/dicom/2013/output/chtml/part20/sect_A.3.html). The structured report is then stored back in the Cloud Healthcare API using STOW-RS.\n\n7. The client application can query for (or retrieve) the structured report by using [QIDO-RS](http://dicom.nema.org/dicom/2013/output/chtml/part18/sect_6.7.html) or WADO-RS. Pubsub can also be used by the client application to poll for the newly created DICOM structured report instance.\n\n![Inference data flow](images/automl_inference_pipeline.png)\n\n\nTo begin, we will create a new DICOM store that will store our inference source (DICOM mammography instance) and results (DICOM structured report). In order to enable Pubsub notifications to be triggered on inserted instances, we will give the DICOM store a Pubsub channel to publish on.", "_____no_output_____" ] ], [ [ "# Pubsub config.\npubsub_topic_id = \"MY_PUBSUB_TOPIC_ID\" # @param\npubsub_subscription_id = \"MY_PUBSUB_SUBSRIPTION_ID\" # @param\n\n# DICOM Store for store DICOM used for inference.\ninference_dicom_store_id = \"MY_INFERENCE_DICOM_STORE\" # @param\n\npubsub_subscription_name = \"projects/\" + project_id + \"/subscriptions/\" + pubsub_subscription_id\ninference_dicom_store_path = dicom_path.FromPath(dicom_store_path, store_id=inference_dicom_store_id)", "_____no_output_____" ], [ "%%bash -s {pubsub_topic_id} {pubsub_subscription_id} {project_id} {location} {dataset_id} {inference_dicom_store_id}\n\n# Create Pubsub channel.\ngcloud beta pubsub topics create $1\ngcloud beta pubsub subscriptions create $2 --topic $1\n\n# Create a Cloud Healthcare DICOM store that published on given Pubsub topic.\nTOKEN=`gcloud beta auth application-default print-access-token`\nNOTIFICATION_CONFIG=\"{notification_config: {pubsub_topic: \\\"projects/$3/topics/$1\\\"}}\"\ncurl -s -X POST -H \"Content-Type: application/json\" -H \"Authorization: Bearer ${TOKEN}\" -d \"${NOTIFICATION_CONFIG}\" https://healthcare.googleapis.com/v1/projects/$3/locations/$4/datasets/$5/dicomStores?dicom_store_id=$6\n\n# Enable Cloud Healthcare API to publish on given Pubsub topic.\nPROJECT_NUMBER=`gcloud projects describe $3 | grep projectNumber | sed 's/[^0-9]//g'`\nSERVICE_ACCOUNT=\"service-${PROJECT_NUMBER}@gcp-sa-healthcare.iam.gserviceaccount.com\"\ngcloud beta pubsub topics add-iam-policy-binding $1 --member=\"serviceAccount:${SERVICE_ACCOUNT}\" --role=\"roles/pubsub.publisher\"", "_____no_output_____" ] ], [ [ "Next, we will building the *inference module* using [Cloud Build API](https://cloud.google.com/cloud-build/docs/api/reference/rest/). This will create a Docker container that will be stored in [Google Container Registry](https://cloud.google.com/container-registry/). The inference module code is found in *[inference.py](./scripts/inference/inference.py)*. The build script used to build the Docker container for this module is *[cloudbuild.yaml](./scripts/inference/cloudbuild.yaml)*. Progress of build may be found on [cloud build dashboard](https://console.cloud.google.com/cloud-build/builds?project=).", "_____no_output_____" ] ], [ [ "%%bash -s {project_id}\nPROJECT_ID=$1\n\ngcloud builds submit --config scripts/inference/cloudbuild.yaml --timeout 1h scripts/inference", "_____no_output_____" ] ], [ [ "Next, we will deploy the *inference module* to Kubernetes.", "_____no_output_____" ], [ "Then we create a Kubernetes Cluster and a Deployment for the *inference module*.", "_____no_output_____" ] ], [ [ "%%bash -s {project_id} {location} {pubsub_subscription_name} {full_model_name} {inference_dicom_store_path}\ngcloud container clusters create inference-module --region=$2 --scopes https://www.googleapis.com/auth/cloud-platform --num-nodes=1\n\nPROJECT_ID=$1\nSUBSCRIPTION_PATH=$3\nMODEL_PATH=$4\nINFERENCE_DICOM_STORE_PATH=$5\n\ncat <<EOF | kubectl create -f -\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: inference-module\n namespace: default\nspec:\n replicas: 1\n template:\n metadata:\n labels:\n app: inference-module\n spec:\n containers:\n - name: inference-module\n image: gcr.io/${PROJECT_ID}/inference-module:latest\n command:\n - \"/opt/inference_module/bin/inference_module\"\n - \"--subscription_path=${SUBSCRIPTION_PATH}\"\n - \"--model_path=${MODEL_PATH}\"\n - \"--dicom_store_path=${INFERENCE_DICOM_STORE_PATH}\"\n - \"--prediction_service=AutoML\"\nEOF", "_____no_output_____" ] ], [ [ "Next, we will store a mammography DICOM instance from the TCIA dataset to the DICOM store. This is the image that we will request inference for. Pushing this instance to the DICOM store will result in a Pubsub message, which will trigger the *inference module*.", "_____no_output_____" ] ], [ [ "# DICOM Study/Series UID of input mammography image that we'll push for inference.\ninput_mammo_study_uid = \"1.3.6.1.4.1.9590.100.1.2.85935434310203356712688695661986996009\"\ninput_mammo_series_uid = \"1.3.6.1.4.1.9590.100.1.2.374115997511889073021386151921807063992\"\ninput_mammo_instance_uid = \"1.3.6.1.4.1.9590.100.1.2.289923739312470966435676008311959891294\"", "_____no_output_____" ], [ "from google.cloud import storage\nfrom dicomweb_client.api import DICOMwebClient\nfrom dicomweb_client import session_utils\nfrom pydicom\n\n\nstorage_client = storage.Client()\nbucket = storage_client.bucket('gcs-public-data--healthcare-tcia-cbis-ddsm', user_project=project_id)\nblob = bucket.blob(\"dicom/{}/{}/{}.dcm\".format(input_mammo_study_uid,input_mammo_series_uid,input_mammo_instance_uid))\nblob.download_to_filename('example.dcm')\ndataset = pydicom.dcmread('example.dcm')\nsession = session_utils.create_session_from_gcp_credentials()\nstudy_path = dicom_path.FromPath(inference_dicom_store_path, study_uid=input_mammo_study_uid)\ndicomweb_url = os.path.join(HEALTHCARE_API_URL, study_path.dicomweb_path_str)\ndcm_client = DICOMwebClient(dicomweb_url, session)\ndcm_client.store_instances(datasets=[dataset])", "_____no_output_____" ] ], [ [ "You should be able to observe the *inference module*'s logs by running the following command. In the logs, you should observe that the inference module successfully recieved the the Pubsub message and ran inference on the DICOM instance. The logs should also include the inference results. It can take a few minutes for the Kubernetes deployment to start up, so you many need to run this a few times. The logs should also include the inference results. It can take a few minutes for the Kubernetes deployment to start up, so you many need to run this a few times.", "_____no_output_____" ] ], [ [ "!kubectl logs -l app=inference-module", "_____no_output_____" ] ], [ [ "You can also query the Cloud Healthcare DICOMWeb API (using QIDO-RS) to see that the DICOM structured report has been inserted for the study. The structured report contents can be found under tag **\"0040A730\"**. \n\nYou can optionally also use WADO-RS to recieve the instance (e.g. for viewing).", "_____no_output_____" ] ], [ [ "dcm_client.search_for_instances(study_path.study_uid, fields=['all'])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
c521a8d2690dfe2f18e25c0eced099ba91e3ec44
12,861
ipynb
Jupyter Notebook
site/ko/r1/tutorials/eager/custom_training.ipynb
justaverygoodboy/docs-l10n
8d4857750f2b5e8e6889acbb4b1e2f98ad7ce34e
[ "Apache-2.0" ]
null
null
null
site/ko/r1/tutorials/eager/custom_training.ipynb
justaverygoodboy/docs-l10n
8d4857750f2b5e8e6889acbb4b1e2f98ad7ce34e
[ "Apache-2.0" ]
null
null
null
site/ko/r1/tutorials/eager/custom_training.ipynb
justaverygoodboy/docs-l10n
8d4857750f2b5e8e6889acbb4b1e2f98ad7ce34e
[ "Apache-2.0" ]
null
null
null
29.497706
299
0.479512
[ [ [ "##### Copyright 2018 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# 사용자 정의 학습: 기초", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/r1/tutorials/eager/custom_training.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />구글 코랩(Colab)에서 실행하기</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/ko/r1/tutorials/eager/custom_training.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />깃허브(GitHub) 소스 보기</a>\n </td>\n</table>", "_____no_output_____" ], [ "Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도\n불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.\n이 번역에 개선할 부분이 있다면\n[tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.\n문서 번역이나 리뷰에 참여하려면\n[[email protected]](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ko)로\n메일을 보내주시기 바랍니다.", "_____no_output_____" ], [ "이전 튜토리얼에서는 머신러닝을 위한 기본 구성 요소인 자동 미분(automatic differentiation)을 위한 텐서플로 API를 알아보았습니다. 이번 튜토리얼에서는 이전 튜토리얼에서 소개되었던 텐서플로의 기본 요소를 사용하여 간단한 머신러닝을 수행해보겠습니다. \n\n텐서플로는 반복되는 코드를 줄이기 위해 유용한 추상화를 제공하는 고수준 신경망(neural network) API인 `tf.keras`를 포함하고 있습니다. 신경망을 다룰 때 이러한 고수준의 API을 강하게 추천합니다. 이번 짧은 튜토리얼에서는 탄탄한 기초를 기르기 위해 기본적인 요소만으로 신경망 훈련시켜 보겠습니다.", "_____no_output_____" ], [ "## 설정", "_____no_output_____" ] ], [ [ "import tensorflow.compat.v1 as tf\n", "_____no_output_____" ] ], [ [ "## 변수\n\n텐서플로의 텐서(Tensor)는 상태가 없고, 변경이 불가능한(immutable stateless) 객체입니다. 그러나 머신러닝 모델은 상태가 변경될(stateful) 필요가 있습니다. 예를 들어, 모델 학습에서 예측을 계산하기 위한 동일한 코드는 시간이 지남에 따라 다르게(희망하건대 더 낮은 손실로 가는 방향으로)동작해야 합니다. 이 연산 과정을 통해 변화되어야 하는 상태를 표현하기 위해 명령형 프로그래밍 언어인 파이썬을 사용 할 수 있습니다. ", "_____no_output_____" ] ], [ [ "# 파이썬 구문 사용\nx = tf.zeros([10, 10])\nx += 2 # 이것은 x = x + 2와 같으며, x의 초기값을 변경하지 않습니다.\nprint(x)", "_____no_output_____" ] ], [ [ "텐서플로는 상태를 변경할 수 있는 연산자가 내장되어 있으며, 이러한 연산자는 상태를 표현하기 위한 저수준 파이썬 표현보다 사용하기가 더 좋습니다. 예를 들어, 모델에서 가중치를 나타내기 위해서 텐서플로 변수를 사용하는 것이 편하고 효율적입니다. \n\n텐서플로 변수는 값을 저장하는 객체로 텐서플로 연산에 사용될 때 저장된 이 값을 읽어올 것입니다. `tf.assign_sub`, `tf.scatter_update` 등은 텐서플로 변수에 저장되있는 값을 조작하는 연산자입니다.", "_____no_output_____" ] ], [ [ "v = tf.Variable(1.0)\nassert v.numpy() == 1.0\n\n# 값을 재배열합니다.\nv.assign(3.0)\nassert v.numpy() == 3.0\n\n# tf.square()와 같은 텐서플로 연산에 `v`를 사용하고 재할당합니다. \nv.assign(tf.square(v))\nassert v.numpy() == 9.0", "_____no_output_____" ] ], [ [ "변수를 사용한 연산은 그래디언트가 계산될 때 자동적으로 추적됩니다. 임베딩(embedding)을 나타내는 변수의 경우 기본적으로 희소 텐서(sparse tensor)를 사용하여 업데이트됩니다. 이는 연산과 메모리에 더욱 효율적입니다. \n\n또한 변수를 사용하는 것은 코드를 읽는 독자에게 상태가 변경될 수 있다는 것을 알려주는 손쉬운 방법입니다.", "_____no_output_____" ], [ "## 예: 선형 모델 훈련\n\n지금까지 몇 가지 개념을 설명했습니다. 간단한 모델을 구축하고 학습시키기 위해 ---`Tensor`, `GradientTape`, `Variable` --- 등을 사용하였고, 이는 일반적으로 다음의 과정을 포함합니다.\n\n1. 모델 정의\n2. 손실 함수 정의\n3. 훈련 데이터 가져오기\n4. 훈련 데이터에서 실행, 데이터에 최적화하기 위해 \"옵티마이저(optimizer)\"를 사용한 변수 조정\n\n이번 튜토리얼에서는 선형 모델의 간단한 예제를 살펴보겠습니다. `f(x) = x * W + b`, 모델은 `W` 와 `b` 두 변수를 가지고 있는 선형모델이며, 잘 학습된 모델이 `W = 3.0` and `b = 2.0`의 값을 갖도록 합성 데이터를 만들겠습니다.", "_____no_output_____" ], [ "### 모델 정의\n\n변수와 연산을 캡슐화하기 위한 간단한 클래스를 정의해봅시다.", "_____no_output_____" ] ], [ [ "class Model(object):\n def __init__(self):\n # 변수를 (5.0, 0.0)으로 초기화 합니다.\n # 실제로는 임의의 값으로 초기화 되어야합니다.\n self.W = tf.Variable(5.0)\n self.b = tf.Variable(0.0)\n \n def __call__(self, x):\n return self.W * x + self.b\n \nmodel = Model()\n\nassert model(3.0).numpy() == 15.0", "_____no_output_____" ] ], [ [ "### 손실 함수 정의\n\n손실 함수는 주어진 입력에 대한 모델의 출력이 원하는 출력과 얼마나 잘 일치하는지를 측정합니다. 평균 제곱 오차(mean square error)를 적용한 손실 함수를 사용하겠습니다.", "_____no_output_____" ] ], [ [ "def loss(predicted_y, desired_y):\n return tf.reduce_mean(tf.square(predicted_y - desired_y))", "_____no_output_____" ] ], [ [ "### 훈련 데이터 가져오기\n\n약간의 잡음과 훈련 데이터를 합칩니다.", "_____no_output_____" ] ], [ [ "TRUE_W = 3.0\nTRUE_b = 2.0\nNUM_EXAMPLES = 1000\n\ninputs = tf.random_normal(shape=[NUM_EXAMPLES])\nnoise = tf.random_normal(shape=[NUM_EXAMPLES])\noutputs = inputs * TRUE_W + TRUE_b + noise", "_____no_output_____" ] ], [ [ "모델을 훈련시키기 전에, 모델의 현재 상태를 시각화합시다. 모델의 예측을 빨간색으로, 훈련 데이터를 파란색으로 구성합니다.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nplt.scatter(inputs, outputs, c='b')\nplt.scatter(inputs, model(inputs), c='r')\nplt.show()\n\nprint('현재 손실: '),\nprint(loss(model(inputs), outputs).numpy())", "_____no_output_____" ] ], [ [ "### 훈련 루프 정의\n\n이제 네트워크와 훈련 데이터가 준비되었습니다. 모델의 변수(`W` 와 `b`)를 업데이트하기 위해 훈련 데이터를 사용하여 훈련시켜 보죠. 그리고 [경사 하강법(gradient descent)](https://en.wikipedia.org/wiki/Gradient_descent)을 사용하여 손실을 감소시킵니다. 경사 하강법에는 여러가지 방법이 있으며, `tf.train.Optimizer` 에 구현되어있습니다. 이러한 구현을 사용하는것을 강력히 추천드립니다. 그러나 이번 튜토리얼에서는 기본적인 방법을 사용하겠습니다.", "_____no_output_____" ] ], [ [ "def train(model, inputs, outputs, learning_rate):\n with tf.GradientTape() as t:\n current_loss = loss(model(inputs), outputs)\n dW, db = t.gradient(current_loss, [model.W, model.b])\n model.W.assign_sub(learning_rate * dW)\n model.b.assign_sub(learning_rate * db)", "_____no_output_____" ] ], [ [ "마지막으로, 훈련 데이터를 반복적으로 실행하고, `W` 와 `b`의 변화 과정을 확인합니다.", "_____no_output_____" ] ], [ [ "model = Model()\n\n# 도식화를 위해 W값과 b값의 변화를 저장합니다.\nWs, bs = [], []\nepochs = range(10)\nfor epoch in epochs:\n Ws.append(model.W.numpy())\n bs.append(model.b.numpy())\n current_loss = loss(model(inputs), outputs)\n\n train(model, inputs, outputs, learning_rate=0.1)\n print('에포크 %2d: W=%1.2f b=%1.2f, 손실=%2.5f' %\n (epoch, Ws[-1], bs[-1], current_loss))\n\n# 저장된 값들을 도식화합니다.\nplt.plot(epochs, Ws, 'r',\n epochs, bs, 'b')\nplt.plot([TRUE_W] * len(epochs), 'r--',\n [TRUE_b] * len(epochs), 'b--')\nplt.legend(['W', 'b', 'true W', 'true_b'])\nplt.show()", "_____no_output_____" ] ], [ [ "## 다음 단계\n\n이번 튜토리얼에서는 변수를 다루었으며, 지금까지 논의된 텐서플로의 기본 요소를 사용하여 간단한 선형 모델을 구축하고 훈련시켰습니다.\n\n이론적으로, 텐서플로를 머신러닝 연구에 사용하기 위해 알아야 할 것이 매우 많습니다. 실제로 신경망에 있어 `tf.keras`와 같은 고수준 API는 고수준 구성 요소(\"층\"으로 불리는)를 제공하고, 저장 및 복원을 위한 유틸리티, 손실 함수 모음, 최적화 전략 모음 등을 제공하기 때문에 더욱 편리합니다. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
c521ad85cf4aa2d6b5760f9199ca38e4e0e7f56b
8,953
ipynb
Jupyter Notebook
CS246_Colab_5.ipynb
masvgp/math_3280
a47aff822e12d7eb097097273eac59169a0b82b4
[ "MIT" ]
null
null
null
CS246_Colab_5.ipynb
masvgp/math_3280
a47aff822e12d7eb097097273eac59169a0b82b4
[ "MIT" ]
null
null
null
CS246_Colab_5.ipynb
masvgp/math_3280
a47aff822e12d7eb097097273eac59169a0b82b4
[ "MIT" ]
null
null
null
31.524648
476
0.556573
[ [ [ "<a href=\"https://colab.research.google.com/github/masvgp/math_3280/blob/main/CS246_Colab_5.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# CS246 - Colab 5\n## PageRank", "_____no_output_____" ], [ "### Setup", "_____no_output_____" ], [ "First of all, we authenticate a Google Drive client to download the dataset we will be processing in this Colab.\n\n**Make sure to follow the interactive instructions.**", "_____no_output_____" ] ], [ [ "from pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\nfrom google.colab import auth\nfrom oauth2client.client import GoogleCredentials\n\n# Authenticate and create the PyDrive client\nauth.authenticate_user()\ngauth = GoogleAuth()\ngauth.credentials = GoogleCredentials.get_application_default()\ndrive = GoogleDrive(gauth)", "_____no_output_____" ], [ "id='1EoolSK32_U74I4FeLox88iuUB_SUUYsI'\ndownloaded = drive.CreateFile({'id': id})\ndownloaded.GetContentFile('web-Stanford.txt')", "_____no_output_____" ] ], [ [ "If you executed the cells above, you should be able to see the dataset we will use for this Colab under the \"Files\" tab on the left panel.\n\nNext, we import some of the common libraries needed for our task.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ] ], [ [ "### Data Loading", "_____no_output_____" ], [ "For this Colab we will be using [NetworkX](https://networkx.github.io), a Python package for the creation, manipulation, and study of the structure, dynamics, and functions of complex networks.\n\nThe dataset we will analyze is a snapshot of the Web Graph centered around [stanford.edu](https://stanford.edu), collected in 2002. Nodes represent pages from Stanford University (stanford.edu) and directed edges represent hyperlinks between them. [[More Info]](http://snap.stanford.edu/data/web-Stanford.html)", "_____no_output_____" ] ], [ [ "import networkx as nx\n\nG = nx.read_edgelist('web-Stanford.txt', create_using=nx.DiGraph)", "_____no_output_____" ], [ "print(nx.info(G))", "_____no_output_____" ] ], [ [ "### Your Task", "_____no_output_____" ], [ "To begin with, let's simplify our analysis by ignoring the dangling nodes and the disconnected components in the original graph.\n\nUse NetworkX to identify the **largest** weakly connected component in the ```G``` graph. From now on, use this connected component for all the following tasks.", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE\n", "_____no_output_____" ] ], [ [ "Compute the PageRank vector, using the default parameters in NetworkX: [https://networkx.github.io/documentation/stable/reference/algorithms/generated/networkx.algorithms.link_analysis.pagerank_alg.pagerank.html#networkx.algorithms.link_analysis.pagerank_alg.pageranky](https://networkx.github.io/documentation/stable/reference/algorithms/generated/networkx.algorithms.link_analysis.pagerank_alg.pagerank.html#networkx.algorithms.link_analysis.pagerank_alg.pagerank)", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE\n", "_____no_output_____" ] ], [ [ "In 1999, Barabási and Albert proposed an elegant mathematical model which can generate graphs with topological properties similar to the Web Graph (also called Scale-free Networks).\n\nIf you complete the steps below, you should obtain some empirical evidence that the Random Graph model is inferior compared to the Barabási–Albert model when it comes to generating a graph resembling the World Wide Web!", "_____no_output_____" ], [ "As such, we will use two different graph generator methods, and then we will test how well they approximate the Web Graph structure by means of comparing the respective PageRank vectors. [[NetworkX Graph generators]](https://networkx.github.io/documentation/stable/reference/generators.html#)\n\nUsing for both methods ```seed = 1```, generate:\n\n\n1. a random graph (with the fast method), setting ```n``` equal to the number of nodes in the original connected component, and ```p = 0.00008```\n2. a Barabasi-Albert graph (with the standard method), setting ```n``` equal to the number of nodes in the original connected component, and finding the right ***integer*** value for ```m``` such as the resulting number of edges **approximates by excess** the number of edges in the original connected component\n\nand compute the PageRank vectors for both graphs.\n", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE\n", "_____no_output_____" ] ], [ [ "Compare the PageRank vectors obtained on the generated graphs with the PageRank vector you computed on the original connected component.\n**Sort** the components of each vector by value, and use cosine similarity as similarity measure. \n\nFeel free to use any implementation of the cosine similarity available in third-party libraries, or implement your own with ```numpy```.", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE\n", "_____no_output_____" ] ], [ [ "Once you have working code for each cell above, **head over to Gradescope, read carefully the questions, and submit your solution for this Colab**!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
c521b8323fc031a889bfd5a34495d74821668095
530,060
ipynb
Jupyter Notebook
Chapter04/Activity4.06/Activity4.06.ipynb
jackleg/The-Data-Visualization-Workshop
247077964e0c931db129ee8267de45e378909c4d
[ "MIT" ]
null
null
null
Chapter04/Activity4.06/Activity4.06.ipynb
jackleg/The-Data-Visualization-Workshop
247077964e0c931db129ee8267de45e378909c4d
[ "MIT" ]
null
null
null
Chapter04/Activity4.06/Activity4.06.ipynb
jackleg/The-Data-Visualization-Workshop
247077964e0c931db129ee8267de45e378909c4d
[ "MIT" ]
null
null
null
970.805861
312,248
0.949662
[ [ [ "## Activity 4.06: Visualizing the Impact of Education on Annual Salary and Weekly Working Hours\nYou're asked to get insights whether the education of people has an influence on the annual salary and weekly working hours. You ask 500 people in the state of New York about their age, annual salary, weekly working hours, and their education. You first want to know the percentage for each education type, therefore, use a tree map. Two violin plots shall be used to visualize the annual salary and the weekly working hours. Compare in each case to what extent the education has an impact.\nIt should also be taken into account that all visualizations in this activity are designed to be suitable for color blind people. In principle, this is always a good idea to bear in mind.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport squarify\nsns.set()", "_____no_output_____" ] ], [ [ "Use pandas to read the dataset age_salary_hours.csv located in the Dataset folder. Use a colormap that is suitable for colorblind people.", "_____no_output_____" ] ], [ [ "data = pd.read_csv(\"../../Datasets/age_salary_hours.csv\")", "_____no_output_____" ], [ "data.head()", "_____no_output_____" ] ], [ [ "Use a tree map to visualize the percentages for each education type.", "_____no_output_____" ] ], [ [ "# Compute percentages from dataset\ndegrees = set(data['Education'])\npercentages = []\nfor degree in degrees:\n percentages.append(data[data['Education'] == degree].shape[0])\npercentages = np.array(percentages)\npercentages = ((percentages / percentages.sum()) * 100)", "_____no_output_____" ], [ "percentages", "_____no_output_____" ], [ "# Create labels for tree map\nlabels = [degree + '\\n({0:.1f}%)'.format(percentage) for degree, percentage in zip(degrees, percentages)]", "_____no_output_____" ], [ "labels", "_____no_output_____" ], [ "# Create figure\nplt.figure(figsize=(9, 6), dpi=200)\nsquarify.plot(percentages, label=labels, color=sns.color_palette('colorblind', len(degrees)))\nplt.axis('off')\n# Add title\nplt.title('Degrees')\n# Show plot\nplt.show()", "_____no_output_____" ] ], [ [ "Create a subplot with two rows to visualize two violin plots for the annual salary and weekly working hours, respectively. Compare in each case to what extent the education has an impact. To exclude pensioners, only consider people younger than 65. Use a colormap that is suitable for colorblind people. subplots() can be used in combination with Seaborn's plot, by simply passing the ax argument with the respective Axes.", "_____no_output_____" ] ], [ [ "ordered_degrees = sorted(list(degrees))\nordered_degrees = [ordered_degrees[4], ordered_degrees[3], ordered_degrees[1], ordered_degrees[0], ordered_degrees[2]]", "_____no_output_____" ], [ "ordered_degrees", "_____no_output_____" ], [ "data = data.loc[data['Age'] < 65]", "_____no_output_____" ], [ "data", "_____no_output_____" ], [ "sns.set_palette('colorblind')\nfig, ax = plt.subplots(2, 1, dpi=200, figsize=(8, 8))\nsns.violinplot('Education', 'Annual Salary', data=data, cut=0, order=ordered_degrees, ax=ax[0])\nax[0]\n#ax[0].set_xticklabels(ax[0].get_xticklabels(), rotation=10)\n#sns.violinplot('Education', 'Weekly hours', data=data, cut=0, order=ordered_degrees, ax=ax[1])\n#ax[1].set_xticklabels(ax[1].get_xticklabels(), rotation=10)\n#plt.tight_layout()\n# Add title\n#fig.suptitle('Impact of Education on Annual Salary and Weekly Working Hours')\n# Show figure", "_____no_output_____" ], [ "# Set color palette to colorblind\nsns.set_palette('colorblind')\n# Create subplot with two rows\nfig, ax = plt.subplots(2, 1, dpi=200, figsize=(8, 8))\nsns.violinplot('Education', 'Annual Salary', data=data, cut=0, order=ordered_degrees, ax=ax[0])\nax[0].set_xticklabels(ax[0].get_xticklabels(), rotation=10)\nsns.violinplot('Education', 'Weekly hours', data=data, cut=0, order=ordered_degrees, ax=ax[1])\nax[1].set_xticklabels(ax[1].get_xticklabels(), rotation=10)\nplt.tight_layout()\n# Add title\nfig.suptitle('Impact of Education on Annual Salary and Weekly Working Hours')\n# Show figure\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
c521be0cdc142f8295e76a0e99b19154c73a2392
5,010
ipynb
Jupyter Notebook
Chapter_7/Section_7.2.1.ipynb
godfanmiao/ML-Kaggle-Github-2022
19c9fd0fe5db432f43f5844e170f952eaaaeaefd
[ "BSD-3-Clause" ]
8
2021-10-15T12:27:01.000Z
2022-02-21T13:50:04.000Z
Chapter_7/Section_7.2.1.ipynb
godfanmiao/ML-Kaggle-Github-2022
19c9fd0fe5db432f43f5844e170f952eaaaeaefd
[ "BSD-3-Clause" ]
null
null
null
Chapter_7/Section_7.2.1.ipynb
godfanmiao/ML-Kaggle-Github-2022
19c9fd0fe5db432f43f5844e170f952eaaaeaefd
[ "BSD-3-Clause" ]
1
2022-02-04T07:25:34.000Z
2022-02-04T07:25:34.000Z
24.558824
215
0.504591
[ [ [ "from pyspark import SparkContext\n\n\n#创建SparkContext。\nsc = SparkContext()\n", "21/10/11 17:08:40 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\nUsing Spark's default log4j profile: org/apache/spark/log4j-defaults.properties\nSetting default log level to \"WARN\".\nTo adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n" ], [ "#读取文件并存储到RDD中。\nrdd = sc.textFile('../Datasets/news/news_sentiment.csv')", "_____no_output_____" ], [ "#分布式统计文件的行数。\nrdd.count()", " \r" ], [ "#查看前2行原始文件内容。\nrdd.take(2)", "_____no_output_____" ], [ "#用map构建标签的键-值对。\nlabels = rdd.map(lambda line: (line.split(',')[0], 1))\n\n#用reduce对相同键的值进行求和。\nlabel_counts = labels.reduceByKey(lambda a, b: a + b)\n\n#查看标签的分布。\nlabel_counts.take(5)", "/opt/anaconda3/envs/python_dml/lib/python3.8/site-packages/pyspark/python/lib/pyspark.zip/pyspark/shuffle.py:60: UserWarning: Please install psutil to have better support with spilling\n/opt/anaconda3/envs/python_dml/lib/python3.8/site-packages/pyspark/python/lib/pyspark.zip/pyspark/shuffle.py:60: UserWarning: Please install psutil to have better support with spilling\n \r" ], [ "def extract_words(line):\n items = list()\n line = line.lower()\n words = line.split(',')[1]\n for word in words.split(' '):\n items.append((word, 1))\n return items\n \nwords = rdd.flatMap(extract_words)\n\nword_counts = words.reduceByKey(lambda a, b: a + b)\n\n#查看词频最高的10个单词。\nword_counts.sortBy(lambda pair: pair[1], False).collect()[:10]", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
c521c7aa40c6967f755c31f6092ae1e0cceb48cf
6,104
ipynb
Jupyter Notebook
Data Cleaning/DataCleaning_Portland_families.ipynb
shimonyagrawal/Exploratory-Data-Analysis-and-Customer-Segmentation
5c66314c7e163ef2c154e414687b5ea4bf564262
[ "MIT" ]
null
null
null
Data Cleaning/DataCleaning_Portland_families.ipynb
shimonyagrawal/Exploratory-Data-Analysis-and-Customer-Segmentation
5c66314c7e163ef2c154e414687b5ea4bf564262
[ "MIT" ]
null
null
null
Data Cleaning/DataCleaning_Portland_families.ipynb
shimonyagrawal/Exploratory-Data-Analysis-and-Customer-Segmentation
5c66314c7e163ef2c154e414687b5ea4bf564262
[ "MIT" ]
1
2021-09-09T02:42:11.000Z
2021-09-09T02:42:11.000Z
28.259259
89
0.344364
[ [ [ "import pandas as pd\nimport numpy as np\nPortland_families = pd.read_csv(\"portland_families.csv\")\nPortland_families.head()", "_____no_output_____" ] ], [ [ "We will remove the household ID column as this may not be required in the analysis.", "_____no_output_____" ], [ "Let's ensure that the dataset doesn't contain any NaN value. ", "_____no_output_____" ] ], [ [ "Portland_families.isnull().values.any()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
c521d929763ee1fca91e0ce4bbc9dc419175e26d
4,807
ipynb
Jupyter Notebook
looking_apartments.ipynb
oramirezperera/Vectors_linear_algebra
358eacf00e6cbe58d84b49fb0a014795bb0b9347
[ "MIT" ]
null
null
null
looking_apartments.ipynb
oramirezperera/Vectors_linear_algebra
358eacf00e6cbe58d84b49fb0a014795bb0b9347
[ "MIT" ]
null
null
null
looking_apartments.ipynb
oramirezperera/Vectors_linear_algebra
358eacf00e6cbe58d84b49fb0a014795bb0b9347
[ "MIT" ]
null
null
null
30.424051
293
0.503016
[ [ [ "<a href=\"https://colab.research.google.com/github/oramirezperera/Vectors_linear_algebra/blob/main/looking_apartments.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "Imaging we are looking for a new apartment, but we want to know which of the apartments we have selected is the better for us. For this we are going to create a vector with our ideal apartment.\n\nWe want our apartment to be greater than 80 mts but less than 110, we want two rooms, two bathrooms and one parking spot. \n\nIf we create out Ideal vector with the meters we will add a lot of noise so we create categories for each apartment sizes.\n\n1. If the area of the apartmen is less than 60 mts.\n2. If the area is greater or equal to 60 but less than 80 mts.\n3. If the area is greater or equal to 80 but less than 110 mts.\n4. If the area is greater or equal to 110 but less than 130 mts.\n5. If the area is greater or equal to 130\n\nOur ideal vector will be an apartment with an area between 80 to 110, 2 rooms, 2 bathrooms and 1 parking spot.\n\n$$ \\mathbf{I} = \\begin{bmatrix} 3\\\\ 2\\\\ 2\\\\1 \\end{bmatrix}$$\n\nAnd we have these three apartments:\n\n$$ \\mathbf{A_{1}} = \\begin{bmatrix}4 \\\\3 \\\\ 3\\\\3 \\end{bmatrix}, \\qquad \\mathbf{A_{2}} = \\begin{bmatrix}3 \\\\3 \\\\ 1\\\\0 \\end{bmatrix}, \\qquad \\mathbf{A_{3}} = \\begin{bmatrix}5 \\\\4 \\\\ 3\\\\0 \\end{bmatrix} $$\n", "_____no_output_____" ], [ "## Dependencies", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ] ], [ [ "We create our vectors", "_____no_output_____" ] ], [ [ "I = np.array([3, 2, 2, 1])\nA_1 = np.array([4, 3, 3, 3])\nA_2 = np.array([3, 3, 1, 0])\nA_3 = np.array([5, 4, 3, 0])", "_____no_output_____" ] ], [ [ "Now we calculte the distance between each vector and our Ideal vector", "_____no_output_____" ] ], [ [ "print(f'||I-A_1|| =',np.linalg.norm(I-A_1))\nprint(f'||I-A_2|| =',np.linalg.norm(I-A_2))\nprint(f'||I-A_3|| =',np.linalg.norm(I-A_3))", "||I-A_1|| = 2.6457513110645907\n||I-A_2|| = 1.7320508075688772\n||I-A_3|| = 3.1622776601683795\n" ] ], [ [ "As we can see, the ideal apartment will be A2 because it's the nearest to our ideal apartment. We can change this exercise giving weights to each of the paramaters we are considering, because for example you thinks is most important to have a parking spot than another bathroom, etc.", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
c521da3878976c9a4ea194f7c8de222cec612d3a
139,328
ipynb
Jupyter Notebook
Labs/Lab_2/Lab2.ipynb
DMinghao/Deep_Learning_with_Unstructured_Data
2fec6960e7fc7b8ad36dc0a357d082bc34e099fa
[ "MIT" ]
4
2021-01-25T13:46:35.000Z
2021-01-30T04:21:15.000Z
Labs/Lab_2/Lab2.ipynb
DMinghao/Deep_Learning_with_Unstructured_Data
2fec6960e7fc7b8ad36dc0a357d082bc34e099fa
[ "MIT" ]
null
null
null
Labs/Lab_2/Lab2.ipynb
DMinghao/Deep_Learning_with_Unstructured_Data
2fec6960e7fc7b8ad36dc0a357d082bc34e099fa
[ "MIT" ]
null
null
null
311.695749
46,215
0.796724
[ [ [ "# Lab 2: networkX Drawing and Network Properties", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport pandas as pd \nfrom networkx import nx", "_____no_output_____" ] ], [ [ "## TOC\n1. [Q1](#Q1)\n2. [Q2](#Q2)\n3. [Q3](#Q3)\n4. [Q4](#Q4)\n", "_____no_output_____" ] ], [ [ "fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(11, 8))\nax = axes.flatten()\npath = nx.path_graph(5)\nnx.draw_networkx(path, with_labels=True, ax=ax[0])\nax[0].set_title('Path')\ncycle = nx.cycle_graph(5)\nnx.draw_networkx(cycle, node_color='green', with_labels=True, ax=ax[1])\nax[1].set_title('Cycle')\ncomplete = nx.complete_graph(5)\nnx.draw_networkx(complete, node_color='#A0CBE2', edge_color='red', width=2, with_labels=False, ax=ax[2])\nax[2].set_title('Complete')\nstar = nx.star_graph(5)\npos=nx.spring_layout(star)\nnx.draw_networkx(star, pos, with_labels=True, ax=ax[3])\nax[3].set_title('Star')\nfor i in range(4): ax[i].set_axis_off()\nplt.show()", "_____no_output_____" ] ], [ [ "### Q1:\n*Use one sentence each to briefly describe the characteristics of each graph\ntype (its shape, edges, etc..)*", "_____no_output_____" ], [ "$V$ = a set of vertices, where $V \\ni \\{v_1, v_2, ... , v_n\\}$\n\n$E$ = a set of edges, where $E \\subseteq \\{\\{v_x,v_y\\}\\mid v_x,v_y\\in V\\}$\n\nLet $G$ = ($V$, $E$) be an undirected graph\n\n- **Path Graph** := Suppose there are n vertices ($v_0, v_1, ... , v_n$) in $G$, such that $\\forall e_{(v_x,v_y)} \\in E $ | $0 \\leq x \\leq n-1$; $y = x + 1$ \n- **Cycle Graph** := Suppose there are n vertices ($v_0, v_1, ... , v_n$) in $G$, such that $\\forall e_{(v_x,v_y)} \\in E $ | $0 \\leq x \\leq n; \\{(0 \\leq x \\leq n-1) \\Rightarrow (y = x + 1)\\} \\land \\{(x = n) \\Rightarrow (y = 0)\\}$ \n- **Complete Graph**:= Suppose there are n vertices ($v_0, v_1, ... , v_n$) in $G$, such that $\\forall e_{(v_x,v_y)} \\in E $ | $x \\neq y; 0 \\leq x,y \\leq n$\n- **Star Graph** := Suppose there are n vertices ($v_0, v_1, ... , v_n$) in $G$, such that $\\forall e_{(v_x,v_y)} \\in E $ | $x = 0; 1 \\leq y \\leq n$", "_____no_output_____" ] ], [ [ "G = nx.lollipop_graph(3,2)\nnx.draw(G, with_labels=True)\nplt.show()", "_____no_output_____" ], [ "list(nx.connected_components(G))\n", "_____no_output_____" ], [ "nx.clustering(G)", "_____no_output_____" ] ], [ [ "### Q2: \n*How many connected components are there in the graph? What are they?*", "_____no_output_____" ], [ "There is only one connected component in the graph, it's all 5 vertices of the graph ", "_____no_output_____" ], [ "### Q3:\n*Which nodes have the highest local clustering coefficient? Explain (from the\ndefinition) why they have high clustering coefficient.*\n", "_____no_output_____" ], [ "Node 0 and 1 have the highest local clustering coefficient of 1, because the neighbor of these two nodes are each other and node 2, $(2*1\\text{ between neighbor link})\\div(2\\text{ degrees}*(2-1)) = 1$", "_____no_output_____" ] ], [ [ "def netMeta(net):\n meta = {}\n meta[\"radius\"]= nx.radius(net)\n meta[\"diameter\"]= nx.diameter(net)\n meta[\"eccentricity\"]= nx.eccentricity(net)\n meta[\"center\"]= nx.center(net)\n meta[\"periphery\"]= nx.periphery(net)\n meta[\"density\"]= nx.density(net)\n return meta", "_____no_output_____" ], [ "netMeta(G)", "_____no_output_____" ], [ "def netAna(net): \n cols = ['Node name', \"Betweenness centrality\", \"Degree centrality\", \"Closeness centrality\", \"Eigenvector centrality\"]\n rows =[]\n print()\n a = nx.betweenness_centrality(net)\n b = nx.degree_centrality(net)\n c = nx.closeness_centrality(net)\n d = nx.eigenvector_centrality(net)\n for v in net.nodes(): \n temp = []\n temp.append(v)\n temp.append(a[v])\n temp.append(b[v])\n temp.append(c[v])\n temp.append(d[v])\n rows.append(temp)\n df = pd.DataFrame(rows,columns=cols)\n df.set_index('Node name', inplace = True)\n return df", "_____no_output_____" ], [ "G_stat = netAna(G)\nG_stat", "\n" ], [ "G_stat.sort_values(by=['Eigenvector centrality'])", "_____no_output_____" ] ], [ [ "### Q4: \n*Which node(s) has the highest betweenness, degree, closeness, eigenvector\ncentrality? Explain using the definitions and graph structures.*", "_____no_output_____" ], [ "Node 2 has the highest betweenness, degree, closeness, and eigenvector centrality\n\nBecause node 2 has the most geodesics passing through, it has the highest degree of 3, it has the shortest average path length, and it has the most refferences by its neighbors ", "_____no_output_____" ] ], [ [ "pathlengths = []\nprint(\"source vertex {target:length, }\")\nfor v in G.nodes():\n spl = dict(nx.single_source_shortest_path_length(G, v))\n print('{} {} '.format(v, spl))\n for p in spl:\n pathlengths.append(spl[p])\nprint('')\nprint(\"average shortest path length %s\" % (sum(pathlengths) / len(pathlengths)))\ndist = {}\nfor p in pathlengths:\n if p in dist: dist[p] += 1\n else: dist[p] = 1\nprint('')\nprint(\"length #paths\")\nverts = dist.keys()\nfor d in sorted(verts):\n print('%s %d' % (d, dist[d]))", "source vertex {target:length, }\n0 {0: 0, 1: 1, 2: 1, 3: 2, 4: 3} \n1 {1: 0, 0: 1, 2: 1, 3: 2, 4: 3} \n2 {2: 0, 0: 1, 1: 1, 3: 1, 4: 2} \n3 {3: 0, 2: 1, 4: 1, 0: 2, 1: 2} \n4 {4: 0, 3: 1, 2: 2, 0: 3, 1: 3} \n\naverage shortest path length 1.36\n\nlength #paths\n0 5\n1 10\n2 6\n3 4\n" ], [ "mapping = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e'}\nH = nx.relabel_nodes(G, mapping)\nnx.draw(H, with_labels=True)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
c521e4c7d4aac90c6781d58f3a9d692f02a18aaa
12,176
ipynb
Jupyter Notebook
2 Refine Data.ipynb
veicco/bcg-gamma
598106d3f734f9f88640019c3c99e5fe6f20ad4b
[ "MIT" ]
3
2020-08-19T20:58:53.000Z
2020-11-19T00:12:33.000Z
2 Refine Data.ipynb
KovaVeikko/bcg-gamma
598106d3f734f9f88640019c3c99e5fe6f20ad4b
[ "MIT" ]
null
null
null
2 Refine Data.ipynb
KovaVeikko/bcg-gamma
598106d3f734f9f88640019c3c99e5fe6f20ad4b
[ "MIT" ]
3
2019-04-20T08:31:10.000Z
2019-09-18T20:49:18.000Z
32.21164
170
0.394136
[ [ [ "# 2 Refine Data\n\nThis module adds calculated features to the original data.\n\nAfter downloading the raw data (with \"1 Download Raw Data\" notebook), run all cells of this notebook to get \"refined_dataset.csv\".", "_____no_output_____" ] ], [ [ "import pandas as pd\nfrom dateutil import parser", "_____no_output_____" ], [ "def add_daily_sum(data):\n data[\"sum\"] = data.apply(lambda row: sum([row[\"hour_{}\".format(x)] for x in range(1,25)]), axis=1)\n\ndef add_date_features(data):\n data[\"year\"] = data.date.apply(lambda date: date.year)\n data[\"weekday\"] = data.date.apply(lambda date: date.weekday())\n data[\"week\"] = data.date.apply(lambda date: date.isocalendar()[1])\n \ndef add_weather_data(data, weather_data):\n weather_data[\"date\"] = weather_data.apply(lambda row: \"{:04}/{:02}/{:02}\".format(row[\"Vuosi\"], row[\"Kk\"], row[\"Pv\"]), axis=1).apply(pd.to_datetime)\n weather_data = weather_data[[\"date\", \"Sademäärä (mm)\", \"Ilman lämpötila (degC)\"]]\n weather_data.columns = [\"date\", \"rain\", \"temperature\"]\n merged_data = raw_data.merge(weather_data, on=\"date\", how=\"left\")\n data = merged_data", "_____no_output_____" ], [ "# import datasets\ndata = pd.read_csv(\"raw_dataset.csv\", parse_dates=[\"date\"])\n#weather_data = pd.read_csv(\"weather_1995_2018.csv\")", "_____no_output_____" ], [ "# add calculated features\nadd_date_features(data)\nadd_daily_sum(data)", "_____no_output_____" ], [ "# ensure the DataFrame looks good\nprint(data.info())\n\ndata.head()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 111151 entries, 0 to 111150\nData columns (total 33 columns):\nlocation_id 111151 non-null int64\nlocation_name 111151 non-null object\ndate 111151 non-null datetime64[ns]\ndirection 111151 non-null int64\nvehicle_type 111151 non-null object\nhour_1 111151 non-null float64\nhour_2 111151 non-null float64\nhour_3 111151 non-null float64\nhour_4 111151 non-null float64\nhour_5 111151 non-null float64\nhour_6 111151 non-null float64\nhour_7 111151 non-null float64\nhour_8 111151 non-null float64\nhour_9 111151 non-null float64\nhour_10 111151 non-null float64\nhour_11 111151 non-null float64\nhour_12 111151 non-null float64\nhour_13 111151 non-null float64\nhour_14 111151 non-null float64\nhour_15 111151 non-null float64\nhour_16 111151 non-null float64\nhour_17 111151 non-null float64\nhour_18 111151 non-null float64\nhour_19 111151 non-null float64\nhour_20 111151 non-null float64\nhour_21 111151 non-null float64\nhour_22 111151 non-null float64\nhour_23 111151 non-null float64\nhour_24 111151 non-null float64\nyear 111151 non-null int64\nweekday 111151 non-null int64\nweek 111151 non-null int64\nsum 111151 non-null float64\ndtypes: datetime64[ns](1), float64(25), int64(5), object(2)\nmemory usage: 28.0+ MB\nNone\n" ], [ "# save the refined table as csv\ndata.to_csv(\"refined_dataset.csv\", index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
c521f86df6eafc0ece33c3ccdc75414aa394a18b
19,314
ipynb
Jupyter Notebook
source/convnet-dataviz.ipynb
harddownloader/ml-demos
1abfb2a0b4545146c8bc8b6bf6b39baeb776e7e2
[ "MIT" ]
110
2018-07-06T03:00:36.000Z
2021-06-15T21:49:59.000Z
source/convnet-dataviz.ipynb
bzqweiyi/ml-demos
5843fed683ff606f60b2554b43b98da8f0184485
[ "MIT" ]
null
null
null
source/convnet-dataviz.ipynb
bzqweiyi/ml-demos
5843fed683ff606f60b2554b43b98da8f0184485
[ "MIT" ]
54
2018-05-17T15:50:09.000Z
2022-01-11T04:06:13.000Z
29.352584
161
0.542301
[ [ [ "# Lesson 1 Experiments\nThis section just reproduces lesson 1 logic using my own code and with 30 tennis and 30 basketball player images. I chose all male players for simplicity. ", "_____no_output_____" ] ], [ [ "# Put these at the top of every notebook, to get automatic reloading and inline plotting\n%reload_ext autoreload\n%autoreload 2\n%matplotlib inline", "_____no_output_____" ], [ "# This file contains all the main external libs we'll use\nfrom fastai.imports import *\nfrom fastai.transforms import *\nfrom fastai.conv_learner import *\nfrom fastai.model import *\nfrom fastai.dataset import *\nfrom fastai.sgdr import *\nfrom fastai.plots import *\nfrom typing import List, Union\nfrom pathlib import Path\n", "_____no_output_____" ] ], [ [ "## Download the Sample Data\nOnly execute the cell below once! If the commands below don't work, try the direct link [here](https://1drv.ms/u/s!AkhwiUY5vHPCs03Q26908HIwKFkG).", "_____no_output_____" ] ], [ [ "!wget 'https://onedrive.live.com/download?cid=C273BC3946897048&resid=C273BC3946897048%216605&authkey=AIVFQLj7IoJYiz4' -O foo.zip\n!unzip -d data foo.zip \n!rm foo.zip", "_____no_output_____" ] ], [ [ "## Load the Sample Data", "_____no_output_____" ] ], [ [ "sz=224\npath = Path('data/tennisbball')\npath.absolute(), list(path.glob('*'))", "_____no_output_____" ], [ "sample = plt.imread(next(iter((path / 'valid' / 'tennis').iterdir())))\nplt.imshow(sample)\nplt.figure()\nsample = plt.imread(next(iter((path / 'valid' / 'bball').iterdir())))\nplt.imshow(sample)", "_____no_output_____" ], [ "sample.shape, sample[:4,:4]", "_____no_output_____" ], [ "torch.cuda.is_available(),torch.backends.cudnn.enabled", "_____no_output_____" ] ], [ [ "## Construct the Model\nDefine the model architecture", "_____no_output_____" ] ], [ [ "#tfms_from_model -- model based image transforms (preprocessing stats)\narch=resnet50\ndata = ImageClassifierData.from_paths(path, test_name='test', test_with_labels=True, tfms=tfms_from_model(arch, sz))\n\n#precompute=True to save conv layer activations! pass False if you want to run the data viz below\nlearner = ConvLearner.pretrained(f=arch, data=data, precompute=False)", "_____no_output_____" ] ], [ [ "## Train a Model\nThis section trains a model using transfer learning.", "_____no_output_____" ] ], [ [ "\nlearner.fit(0.01, 15)", "_____no_output_____" ], [ "#uncomment line below to save the model\n\n#learner.save('tennis_v_bball.lrnr')", "_____no_output_____" ] ], [ [ "## Load/Visualize an Existing Model\nOr if you've already trained a model, skip the above section and start from here.", "_____no_output_____" ] ], [ [ "learner.load('tennis_v_bball.lrnr')", "_____no_output_____" ], [ "probs = np.exp(learner.predict())\nprobs", "_____no_output_____" ], [ "#TODO: improve\ndef display_images(images:List[Union[Path, np.ndarray]], columns:int, titles:List[str]=None, figsize=None) -> None:\n if not titles:\n titles = [f'Image {i+1}' for i in range(len(images))]\n rows = len(images) // columns + int(len(images) % columns > 0)\n if figsize is None:\n figsize = (60,60)\n plt.figure(figsize=figsize)\n for i, (image, title) in enumerate(zip(images, titles)):\n if isinstance(image, Path):\n image = np.array(PIL.Image.open(image))\n plt.subplot(rows, columns, i+1)\n plt.imshow(image)\n plt.title(title, fontsize=10*columns)\n plt.axis('off')", "_____no_output_____" ], [ "#val images\npredictions = probs.argmax(axis=1)\nimages, titles = [], []\nfor prob, pclass, fname in zip(probs, predictions, data.val_ds.fnames):\n images.append(path / fname)\n titles.append(f'{fname} -- {prob[pclass]:.{3}f} ({data.classes[pclass]})')\n \ndisplay_images(images, 4, titles)", "_____no_output_____" ], [ "test_probs = np.exp(learner.predict(is_test=True))\ntest_predictions = test_probs.argmax(axis=1)\n\n#test images\nimages, titles = [],[]\nfor prob, pclass, fname in zip(test_probs, test_predictions, data.test_ds.fnames):\n images.append(path / fname)\n titles.append(f'{fname} -- {prob[pclass]:.{3}f} ({data.classes[pclass]})')\n \ndisplay_images(images, 4, titles)", "_____no_output_____" ] ], [ [ "## Dataviz -- Activations", "_____no_output_____" ] ], [ [ "#check out the model structure\nmodel = learner.model\nmodel", "_____no_output_____" ], [ "#\n# utilize torch hooks to capture the activations for any conv layer. for simplicity we use a \n# batch size of 1.\n#\nclass ActivationHook:\n def __init__(self):\n self.output = []\n \n def __call__(self, module, input, output):\n self.output = output.data\n \ndef find_layers(module, ltype): \n rv = []\n if isinstance(module, ltype):\n rv.append(module)\n else:\n for c in module.children():\n rv.extend(find_layers(c, ltype))\n \n return rv\n\ndef capture_activations(model, x):\n layers = find_layers(model, nn.Conv2d)\n hooks = [ActivationHook() for _ in layers]\n handles = [conv.register_forward_hook(hook) for conv, hook in zip(layers, hooks)]\n model(x)\n for h in handles:\n h.remove()\n \n return [h.output for h in hooks]\n\nbs = data.bs\ndata.bs = 1\ndl = data.get_dl(data.test_ds, False) \ni = iter(dl)\nball_x = next(i)[0]\nnoball_x = next(i)[0]\ndata.bs = bs", "_____no_output_____" ], [ "ball_activations = capture_activations(model, Variable(ball_x))\nnoball_activations = capture_activations(model, Variable(noball_x))\nfor i, layer_output in enumerate(ball_activations):\n print(f'Layer {i}: {layer_output.squeeze().shape}')", "_____no_output_____" ], [ "#layer 5, filter 18, 36 seems to like circular type things\nlayer_idx = 0\nimages = []\ntitles = []\nnum_filters = ball_activations[layer_idx].shape[1]\nasize = ball_activations[layer_idx].shape[2]\n\ndef filter_activations_to_image(activations, lidx, fidx):\n a = activations[lidx].squeeze() #choose conv layer & discard batch dimension\n a = a[fidx] #choose conv filter\n a = (a - a.mean())/(3*a.std()) + 0.5 #center and scale down\n a = a.clamp(0, 1).numpy() # and finally clamp \n return a\n\nbuff_size = 10\nfor filter_idx in range(num_filters):\n a0 = filter_activations_to_image(ball_activations, layer_idx, filter_idx)\n a1 = filter_activations_to_image(noball_activations, layer_idx, filter_idx)\n z = np.hstack([a0, np.ones((asize, 10)), a1])\n plt.imshow(z, cmap='gray')\n plt.axis('off')\n plt.title(f'Filter {filter_idx}')\n plt.show()\n", "_____no_output_____" ] ], [ [ "## DataViz -- Filters\n\nWe can also look at filters. This is easiest at the first layer where each filter is 3 dimensional.", "_____no_output_____" ] ], [ [ "import matplotlib.colors as mc\nimport math\nconv = find_layers(learner.model, nn.Conv2d)[0]\nweight = conv.weight.data.numpy()\n\nnum_filters, depth, w, h = weight.shape\n\nrows = int(num_filters**0.5)\ncols = int(math.ceil(num_filters/rows))\nborder = 1\nimg = np.zeros((depth, rows*h + (1+rows)*border, cols*w + (1+cols)*border))\nfor f in range(num_filters):\n r = f // rows\n c = f % cols\n x = border + r * (w+border)\n y = border + c * (w+border)\n norm = mc.Normalize()\n img[:, x:x+w, y:y+h] = norm(weight[f, :, :, :])\n\nplt.figure(figsize=(12,12))\nplt.imshow(img.transpose(1,2,0))\n_ = plt.axis('off')", "_____no_output_____" ] ], [ [ "We can also visualize subsequent layers, though it's not so pretty. We can map each dimension of each filter back into grayscale.", "_____no_output_____" ] ], [ [ "# for i, conv in enumerate(find_layers(learner.model, nn.Conv2d)):\n# print(conv, conv.weight.shape)\nweight = find_layers(learner.model, nn.Conv2d)[2].weight.data.numpy()\nnum_filters, depth, w, h = weight.shape\nrows = num_filters\ncols = depth\nborder = 1\nimg = np.zeros((rows*h + (1+rows)*border, cols*w + (1+cols)*border))\nfor f in range(num_filters):\n norm = mc.Normalize()\n normed = norm(weight[f, :, :, :]) #normalize over all the weights in a filter\n for d in range(depth):\n r = f\n c = d\n x = border + r * (w+border)\n y = border + c * (w+border)\n img[x:x+w, y:y+h] = normed[d]\n\nplt.figure(figsize=(18,18))\nplt.imshow(img, cmap='gray')\n_ = plt.axis('off')\n", "_____no_output_____" ] ], [ [ "## Occlusion\nWe can also mask out portions of the image by sliding a gray block over the image repeatedly and record how the predictions change.", "_____no_output_____" ] ], [ [ "block_size = 50\nimage_path = path / data.test_ds.fnames[0]\nimage = open_image(image_path)\nimage[50:250, 50:250] = np.full((200,200,3), 0.75)\nscaled_image = Scale(sz=224).do_transform(orig_image, False)\n# image[0:block_size, 0:block_size] = np.full((block_size,block_size,3), 0.75)\nplt.imshow(image)\n_ = plt.axis('off')", "_____no_output_____" ], [ "block_size = 50\nimage_path = path / data.test_ds.fnames[0]\norig_image = open_image(image_path)\n# image[0:200, 0:200] = np.full((200,200,3), 0.75)\nscaled_image = Scale(sz=224).do_transform(orig_image, False)\n# image[0:block_size, 0:block_size] = np.full((block_size,block_size,3), 0.75)\n# plt.imshow(image)\nplt.axis('off')\n\n#the prediction for the smaller image should be essentially unchanged\nprint(learner.model(VV(tfms_from_model(arch, sz)[1](scaled_image)).unsqueeze(0)).exp())\nw,h,_ = scaled_image.shape\nlearner.model.eval()\nt0 = time.time()\nprob_map = np.zeros((2, w, h))\n\nz = 0\n\n#TODO: add stride for efficiency.\nfor x in tqdm(range(1 - block_size, w)):\n for y in range(1 - block_size, h):\n image = np.array(scaled_image)\n x0, x1 = max(0, x), min(w, x + block_size)\n y0, y1 = max(0, y), min(h, y + block_size)\n image[x0:x1,y0:y1] = np.full((x1-x0, y1-y0, 3), 0.75)\n image = tfms_from_model(arch, sz)[1](image)\n predictions = learner.model(VV(image).unsqueeze(0)) \n prob_map[0,x0:x1,y0:y1] += predictions.exp().data[0][0]\n prob_map[1,x0:x1,y0:y1] += 1\n", "_____no_output_____" ], [ "np.save('probs-heatmap.npy', prob_map)", "_____no_output_____" ], [ "heatmap = prob_map[0]/prob_map[1]\nplt.subplot(1,2,1)\nplt.imshow(1 - heatmap, cmap='jet')\nplt.axis('off')\nplt.subplot(1,2,2)\nplt.imshow(orig_image)\n_ = plt.axis('off')", "_____no_output_____" ], [ "block_size = 50\nimage_path = path / 'valid/bball/29.jpg'\norig_image = open_image(image_path)\n# image[0:200, 0:200] = np.full((200,200,3), 0.75)\nscaled_image = Scale(sz=224).do_transform(orig_image, False)\n# orig_image[0:block_size, 0:block_size] = np.full((block_size,block_size,3), 0.75)\n# plt.imshow(orig_image)\n# plt.axis('off')\n\n#the prediction for the smaller image should be essentially unchanged\nprint(learner.model(VV(tfms_from_model(arch, sz)[1](scaled_image)).unsqueeze(0)).exp())\nw,h,_ = scaled_image.shape\nlearner.model.eval()\nt0 = time.time()\nprob_map = np.zeros((2, w, h))\n\nz = 0\n\n#TODO: add stride for efficiency.\nfor x in tqdm(range(1 - block_size, w)):\n for y in range(1 - block_size, h):b\n image = np.array(scaled_image)\n x0, x1 = max(0, x), min(w, x + block_size)\n y0, y1 = max(0, y), min(h, y + block_size)\n image[x0:x1,y0:y1] = np.full((x1-x0, y1-y0, 3), 0.75)\n image = tfms_from_model(arch, sz)[1](image)\n predictions = learner.model(VV(image).unsqueeze(0)) \n prob_map[0,x0:x1,y0:y1] += predictions.exp().data[0][0]\n prob_map[1,x0:x1,y0:y1] += 1", "_____no_output_____" ], [ "np.save('probs-giannis-heatmap.npy', prob_map)", "_____no_output_____" ], [ "heatmap = prob_map[0]/prob_map[1]\nplt.subplot(1,2,1)\nplt.imshow(1 - heatmap, cmap='jet')\nplt.axis('off')\nplt.subplot(1,2,2)\nplt.imshow(orig_image)\n_ = plt.axis('off')", "_____no_output_____" ], [ "block_size = 50\nimage_path = path / 'valid/tennis/23.jpg'\norig_image = open_image(image_path)\n# image[0:200, 0:200] = np.full((200,200,3), 0.75)\nscaled_image = Scale(sz=224).do_transform(orig_image, False)\n# orig_image[0:block_size, 0:block_size] = np.full((block_size,block_size,3), 0.75)\nplt.imshow(scaled_image)\n# plt.axis('off')\n\n#the prediction for the smaller image should be essentially unchanged\nprint(learner.model(VV(tfms_from_model(arch, sz)[1](scaled_image)).unsqueeze(0)).exp())\nw,h,_ = scaled_image.shape\nlearner.model.eval()\nt0 = time.time()\nprob_map = np.zeros((2, w, h))\n\nz = 0\n\n#TODO: add stride for efficiency.\nfor x in tqdm(range(1 - block_size, w)):\n for y in range(1 - block_size, h):\n image = np.array(scaled_image)\n x0, x1 = max(0, x), min(w, x + block_size)\n y0, y1 = max(0, y), min(h, y + block_size)\n image[x0:x1,y0:y1] = np.full((x1-x0, y1-y0, 3), 0.75)\n image = tfms_from_model(arch, sz)[1](image)\n predictions = learner.model(VV(image).unsqueeze(0)) \n prob_map[0,x0:x1,y0:y1] += predictions.exp().data[0][0]\n prob_map[1,x0:x1,y0:y1] += 1", "_____no_output_____" ], [ "np.save('probs-tennis-heatmap.npy', prob_map)", "_____no_output_____" ], [ "heatmap = prob_map[0]/prob_map[1]\nplt.subplot(1,2,1)\nplt.imshow(heatmap, cmap='jet')\nplt.axis('off')\nplt.subplot(1,2,2)\nplt.imshow(orig_image)\n_ = plt.axis('off')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c52206a3667d06b0a4cd9aca0f24039b4dd1ff3a
34,685
ipynb
Jupyter Notebook
site/en/guide/keras/rnn.ipynb
jonathanking/docs-1
3082041fb5ef2b29217584659bc43d89602d57cf
[ "Apache-2.0" ]
1
2020-06-02T13:44:36.000Z
2020-06-02T13:44:36.000Z
site/en/guide/keras/rnn.ipynb
jonathanking/docs-1
3082041fb5ef2b29217584659bc43d89602d57cf
[ "Apache-2.0" ]
null
null
null
site/en/guide/keras/rnn.ipynb
jonathanking/docs-1
3082041fb5ef2b29217584659bc43d89602d57cf
[ "Apache-2.0" ]
1
2020-06-12T11:26:06.000Z
2020-06-12T11:26:06.000Z
38.538889
432
0.552083
[ [ [ "##### Copyright 2019 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Recurrent Neural Networks (RNN) with Keras\n\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/guide/keras/rnn\">\n <img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />\n View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/keras/rnn.ipynb\">\n <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />\n Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/guide/keras/rnn.ipynb\">\n <img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />\n View source on GitHub</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/keras/rnn.ipynb\">\n <img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />\n Download notebook</a>\n </td>\n</table>\n", "_____no_output_____" ], [ "Recurrent neural networks (RNN) are a class of neural networks that is powerful for modeling sequence data such as time series or natural language.\n\nSchematically, a RNN layer uses a `for` loop to iterate over the timesteps of a sequence, while maintaining an internal state that encodes information about the timesteps it has seen so far.\n\nThe Keras RNN API is designed with a focus on:\n\n- **Ease of use**: the built-in `tf.keras.layers.RNN`, `tf.keras.layers.LSTM`, `tf.keras.layers.GRU` layers enable you to quickly build recurrent models without having to make difficult configuration choices.\n \n- **Ease of customization**: You can also define your own RNN cell layer (the inner part of the `for` loop) with custom behavior, and use it with the generic `tf.keras.layers.RNN` layer (the `for` loop itself). This allows you to quickly prototype different research ideas in a flexible way with minimal code.\n ", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "import collections\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom tensorflow.keras import layers", "_____no_output_____" ] ], [ [ "## Build a simple model\n", "_____no_output_____" ], [ "There are three built-in RNN layers in Keras:\n\n1. `tf.keras.layers.SimpleRNN`, a fully-connected RNN where the output from previous timestep is to be fed to next timestep.\n\n2. `tf.keras.layers.GRU`, first proposed in [Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation](https://arxiv.org/abs/1406.1078).\n\n3. `tf.keras.layers.LSTM`, first proposed in [Long Short-Term Memory](https://www.bioinf.jku.at/publications/older/2604.pdf).\n\nIn early 2015, Keras had the first reusable open-source Python implementations of LSTM and GRU.\n\nHere is a simple example of a `Sequential` model that processes sequences of integers, embeds each integer into a 64-dimensional vector, then processes the sequence of vectors using a `LSTM` layer.", "_____no_output_____" ] ], [ [ "model = tf.keras.Sequential()\n# Add an Embedding layer expecting input vocab of size 1000, and\n# output embedding dimension of size 64.\nmodel.add(layers.Embedding(input_dim=1000, output_dim=64))\n\n# Add a LSTM layer with 128 internal units.\nmodel.add(layers.LSTM(128))\n\n# Add a Dense layer with 10 units.\nmodel.add(layers.Dense(10))\n\nmodel.summary()", "_____no_output_____" ] ], [ [ "## Outputs and states", "_____no_output_____" ], [ "By default, the output of a RNN layer contain a single vector per sample. This vector is the RNN cell output corresponding to the last timestep, containing information about the entire input sequence. The shape of this output is `(batch_size, units)` where `units` corresponds to the `units` argument passed to the layer's constructor. \n\nA RNN layer can also return the entire sequence of outputs for each sample (one vector per timestep per sample), if you set `return_sequences=True`. The shape of this output is `(batch_size, timesteps, units)`.", "_____no_output_____" ] ], [ [ "model = tf.keras.Sequential()\nmodel.add(layers.Embedding(input_dim=1000, output_dim=64))\n\n# The output of GRU will be a 3D tensor of shape (batch_size, timesteps, 256)\nmodel.add(layers.GRU(256, return_sequences=True))\n\n# The output of SimpleRNN will be a 2D tensor of shape (batch_size, 128)\nmodel.add(layers.SimpleRNN(128))\n\nmodel.add(layers.Dense(10))\n\nmodel.summary() ", "_____no_output_____" ] ], [ [ "In addition, a RNN layer can return its final internal state(s). The returned states can be used to resume the RNN execution later, or [to initialize another RNN](https://arxiv.org/abs/1409.3215). This setting is commonly used in the encoder-decoder sequence-to-sequence model, where the encoder final state is used as the initial state of the decoder.\n\nTo configure a RNN layer to return its internal state, set the `return_state` parameter to `True` when creating the layer. Note that `LSTM` has 2 state tensors, but `GRU` only has one.\n\nTo configure the initial state of the layer, just call the layer with additional keyword argument `initial_state`.\nNote that the shape of the state needs to match the unit size of the layer, like in the example below.", "_____no_output_____" ] ], [ [ "encoder_vocab = 1000\ndecoder_vocab = 2000\n\nencoder_input = layers.Input(shape=(None, ))\nencoder_embedded = layers.Embedding(input_dim=encoder_vocab, output_dim=64)(encoder_input)\n\n# Return states in addition to output\noutput, state_h, state_c = layers.LSTM(\n 64, return_state=True, name='encoder')(encoder_embedded)\nencoder_state = [state_h, state_c]\n\ndecoder_input = layers.Input(shape=(None, ))\ndecoder_embedded = layers.Embedding(input_dim=decoder_vocab, output_dim=64)(decoder_input)\n\n# Pass the 2 states to a new LSTM layer, as initial state\ndecoder_output = layers.LSTM(\n 64, name='decoder')(decoder_embedded, initial_state=encoder_state)\noutput = layers.Dense(10)(decoder_output)\n\nmodel = tf.keras.Model([encoder_input, decoder_input], output)\nmodel.summary()", "_____no_output_____" ] ], [ [ "## RNN layers and RNN cells", "_____no_output_____" ], [ "In addition to the built-in RNN layers, the RNN API also provides cell-level APIs. Unlike RNN layers, which processes whole batches of input sequences, the RNN cell only processes a single timestep.\n\nThe cell is the inside of the `for` loop of a RNN layer. Wrapping a cell inside a `tf.keras.layers.RNN` layer gives you a layer capable of processing batches of sequences, e.g. `RNN(LSTMCell(10))`.\n\nMathematically, `RNN(LSTMCell(10))` produces the same result as `LSTM(10)`. In fact, the implementation of this layer in TF v1.x was just creating the corresponding RNN cell and wrapping it in a RNN layer. However using the built-in `GRU` and `LSTM` layers enables the use of CuDNN and you may see better performance.\n\nThere are three built-in RNN cells, each of them corresponding to the matching RNN layer.\n\n- `tf.keras.layers.SimpleRNNCell` corresponds to the `SimpleRNN` layer.\n\n- `tf.keras.layers.GRUCell` corresponds to the `GRU` layer.\n\n- `tf.keras.layers.LSTMCell` corresponds to the `LSTM` layer.\n\nThe cell abstraction, together with the generic `tf.keras.layers.RNN` class, make it very easy to implement custom RNN architectures for your research.\n", "_____no_output_____" ], [ "## Cross-batch statefulness", "_____no_output_____" ], [ "When processing very long sequences (possibly infinite), you may want to use the pattern of **cross-batch statefulness**.\n\nNormally, the internal state of a RNN layer is reset every time it sees a new batch (i.e. every sample seen by the layer is assume to be independent from the past). The layer will only maintain a state while processing a given sample.\n\nIf you have very long sequences though, it is useful to break them into shorter sequences, and to feed these shorter sequences sequentially into a RNN layer without resetting the layer's state. That way, the layer can retain information about the entirety of the sequence, even though it's only seeing one sub-sequence at a time.\n\nYou can do this by setting `stateful=True` in the constructor.\n\nIf you have a sequence `s = [t0, t1, ... t1546, t1547]`, you would split it into e.g.\n\n```\ns1 = [t0, t1, ... t100]\ns2 = [t101, ... t201]\n...\ns16 = [t1501, ... t1547]\n```\n\nThen you would process it via:\n\n```python\nlstm_layer = layers.LSTM(64, stateful=True)\nfor s in sub_sequences:\n output = lstm_layer(s)\n```\n\nWhen you want to clear the state, you can use `layer.reset_states()`.\n\n\n> Note: In this setup, sample `i` in a given batch is assumed to be the continuation of sample `i` in the previous batch. This means that all batches should contain the same number of samples (batch size). E.g. if a batch contains `[sequence_A_from_t0_to_t100, sequence_B_from_t0_to_t100]`, the next batch should contain `[sequence_A_from_t101_to_t200, sequence_B_from_t101_to_t200]`.\n\n\n\n\nHere is a complete example:\n", "_____no_output_____" ] ], [ [ "paragraph1 = np.random.random((20, 10, 50)).astype(np.float32)\nparagraph2 = np.random.random((20, 10, 50)).astype(np.float32)\nparagraph3 = np.random.random((20, 10, 50)).astype(np.float32)\n\nlstm_layer = layers.LSTM(64, stateful=True)\noutput = lstm_layer(paragraph1)\noutput = lstm_layer(paragraph2)\noutput = lstm_layer(paragraph3)\n\n# reset_states() will reset the cached state to the original initial_state.\n# If no initial_state was provided, zero-states will be used by default.\nlstm_layer.reset_states()\n", "_____no_output_____" ] ], [ [ "### RNN State Reuse\n<a id=\"rnn_state_reuse\"></a>", "_____no_output_____" ], [ "The recorded states of the RNN layer are not included in the `layer.weights()`. If you would like to reuse the state from a RNN layer, you can retrieve the states value by `layer.states` and use it as the\ninitial state for a new layer via the Keras functional API like `new_layer(inputs, initial_state=layer.states)`, or model subclassing.\n\nPlease also note that sequential model might not be used in this case since it only supports layers with single input and output, the extra input of initial state makes it impossible to use here.\n", "_____no_output_____" ] ], [ [ "paragraph1 = np.random.random((20, 10, 50)).astype(np.float32)\nparagraph2 = np.random.random((20, 10, 50)).astype(np.float32)\nparagraph3 = np.random.random((20, 10, 50)).astype(np.float32)\n\nlstm_layer = layers.LSTM(64, stateful=True)\noutput = lstm_layer(paragraph1)\noutput = lstm_layer(paragraph2)\n\nexisting_state = lstm_layer.states\n\nnew_lstm_layer = layers.LSTM(64)\nnew_output = new_lstm_layer(paragraph3, initial_state=existing_state)\n", "_____no_output_____" ] ], [ [ "##Bidirectional RNNs", "_____no_output_____" ], [ "For sequences other than time series (e.g. text), it is often the case that a RNN model can perform better if it not only processes sequence from start to end, but also backwards. For example, to predict the next word in a sentence, it is often useful to have the context around the word, not only just the words that come before it.\n\nKeras provides an easy API for you to build such bidirectional RNNs: the `tf.keras.layers.Bidirectional` wrapper.", "_____no_output_____" ] ], [ [ "model = tf.keras.Sequential()\n\nmodel.add(layers.Bidirectional(layers.LSTM(64, return_sequences=True), \n input_shape=(5, 10)))\nmodel.add(layers.Bidirectional(layers.LSTM(32)))\nmodel.add(layers.Dense(10))\n\nmodel.summary()", "_____no_output_____" ] ], [ [ "Under the hood, `Bidirectional` will copy the RNN layer passed in, and flip the `go_backwards` field of the newly copied layer, so that it will process the inputs in reverse order.\n\nThe output of the `Bidirectional` RNN will be, by default, the concatenation of the forward layer output and the backward layer output. If you need a different merging behavior, e.g. sum, change the `merge_mode` parameter in the `Bidirectional` wrapper constructor. For more details about `Bidirectional`, please check [the API docs](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/Bidirectional).", "_____no_output_____" ], [ "## Performance optimization and CuDNN kernels in TensorFlow 2.0", "_____no_output_____" ], [ "In TensorFlow 2.0, the built-in LSTM and GRU layers have been updated to leverage CuDNN kernels by default when a GPU is available. With this change, the prior `keras.layers.CuDNNLSTM/CuDNNGRU` layers have been deprecated, and you can build your model without worrying about the hardware it will run on.\n\nSince the CuDNN kernel is built with certain assumptions, this means the layer **will not be able to use the CuDNN kernel if you change the defaults of the built-in LSTM or GRU layers**. E.g.:\n\n- Changing the `activation` function from `tanh` to something else.\n- Changing the `recurrent_activation` function from `sigmoid` to something else.\n- Using `recurrent_dropout` > 0.\n- Setting `unroll` to True, which forces LSTM/GRU to decompose the inner `tf.while_loop` into an unrolled `for` loop.\n- Setting `use_bias` to False.\n- Using masking when the input data is not strictly right padded (if the mask corresponds to strictly right padded data, CuDNN can still be used. This is the most common case).\n\nFor the detailed list of constraints, please see the documentation for the [LSTM](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/LSTM) and [GRU](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/GRU) layers.", "_____no_output_____" ], [ "### Using CuDNN kernels when available\n\nLet's build a simple LSTM model to demonstrate the performance difference.\n\nWe'll use as input sequences the sequence of rows of MNIST digits (treating each row of pixels as a timestep), and we'll predict the digit's label.\n", "_____no_output_____" ] ], [ [ "batch_size = 64\n# Each MNIST image batch is a tensor of shape (batch_size, 28, 28).\n# Each input sequence will be of size (28, 28) (height is treated like time).\ninput_dim = 28\n\nunits = 64\noutput_size = 10 # labels are from 0 to 9\n\n# Build the RNN model\ndef build_model(allow_cudnn_kernel=True):\n # CuDNN is only available at the layer level, and not at the cell level.\n # This means `LSTM(units)` will use the CuDNN kernel,\n # while RNN(LSTMCell(units)) will run on non-CuDNN kernel.\n if allow_cudnn_kernel:\n # The LSTM layer with default options uses CuDNN.\n lstm_layer = tf.keras.layers.LSTM(units, input_shape=(None, input_dim))\n else:\n # Wrapping a LSTMCell in a RNN layer will not use CuDNN.\n lstm_layer = tf.keras.layers.RNN(\n tf.keras.layers.LSTMCell(units),\n input_shape=(None, input_dim))\n model = tf.keras.models.Sequential([\n lstm_layer,\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Dense(output_size)]\n )\n return model\n", "_____no_output_____" ] ], [ [ "### Load MNIST dataset", "_____no_output_____" ] ], [ [ "mnist = tf.keras.datasets.mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\nsample, sample_label = x_train[0], y_train[0]", "_____no_output_____" ] ], [ [ "### Create a model instance and compile it\nWe choose `sparse_categorical_crossentropy` as the loss function for the model. The output of the model has shape of `[batch_size, 10]`. The target for the model is a integer vector, each of the integer is in the range of 0 to 9.", "_____no_output_____" ] ], [ [ "model = build_model(allow_cudnn_kernel=True)\n\nmodel.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), \n optimizer='sgd',\n metrics=['accuracy'])\n", "_____no_output_____" ], [ "model.fit(x_train, y_train,\n validation_data=(x_test, y_test),\n batch_size=batch_size,\n epochs=5)", "_____no_output_____" ] ], [ [ "### Build a new model without CuDNN kernel", "_____no_output_____" ] ], [ [ "slow_model = build_model(allow_cudnn_kernel=False)\nslow_model.set_weights(model.get_weights())\nslow_model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), \n optimizer='sgd', \n metrics=['accuracy'])\nslow_model.fit(x_train, y_train, \n validation_data=(x_test, y_test), \n batch_size=batch_size,\n epochs=1) # We only train for one epoch because it's slower.", "_____no_output_____" ] ], [ [ "As you can see, the model built with CuDNN is much faster to train compared to the model that use the regular TensorFlow kernel.\n\nThe same CuDNN-enabled model can also be use to run inference in a CPU-only environment. The `tf.device` annotation below is just forcing the device placement. The model will run on CPU by default if no GPU is available.\n\nYou simply don't have to worry about the hardware you're running on anymore. Isn't that pretty cool?", "_____no_output_____" ] ], [ [ "with tf.device('CPU:0'):\n cpu_model = build_model(allow_cudnn_kernel=True)\n cpu_model.set_weights(model.get_weights())\n result = tf.argmax(cpu_model.predict_on_batch(tf.expand_dims(sample, 0)), axis=1)\n print('Predicted result is: %s, target result is: %s' % (result.numpy(), sample_label))\n plt.imshow(sample, cmap=plt.get_cmap('gray'))", "_____no_output_____" ] ], [ [ "## RNNs with list/dict inputs, or nested inputs\n\nNested structures allow implementers to include more information within a single timestep. For example, a video frame could have audio and video input at the same time. The data shape in this case could be:\n\n`[batch, timestep, {\"video\": [height, width, channel], \"audio\": [frequency]}]`\n\nIn another example, handwriting data could have both coordinates x and y for the current position of the pen, as well as pressure information. So the data representation could be:\n\n`[batch, timestep, {\"location\": [x, y], \"pressure\": [force]}]`\n\nThe following code provides an example of how to build a custom RNN cell that accepts such structured inputs.\n", "_____no_output_____" ], [ "### Define a custom cell that support nested input/output", "_____no_output_____" ], [ "See [Custom Layers and Models](custom_layers_and_models.ipynb) for details on \nwriting your own layers.", "_____no_output_____" ] ], [ [ "class NestedCell(tf.keras.layers.Layer):\n\n def __init__(self, unit_1, unit_2, unit_3, **kwargs):\n self.unit_1 = unit_1\n self.unit_2 = unit_2\n self.unit_3 = unit_3\n self.state_size = [tf.TensorShape([unit_1]),\n tf.TensorShape([unit_2, unit_3])]\n self.output_size = [tf.TensorShape([unit_1]),\n tf.TensorShape([unit_2, unit_3])]\n super(NestedCell, self).__init__(**kwargs)\n\n def build(self, input_shapes):\n # expect input_shape to contain 2 items, [(batch, i1), (batch, i2, i3)]\n i1 = input_shapes[0][1]\n i2 = input_shapes[1][1]\n i3 = input_shapes[1][2]\n\n self.kernel_1 = self.add_weight(\n shape=(i1, self.unit_1), initializer='uniform', name='kernel_1')\n self.kernel_2_3 = self.add_weight(\n shape=(i2, i3, self.unit_2, self.unit_3),\n initializer='uniform',\n name='kernel_2_3')\n\n def call(self, inputs, states):\n # inputs should be in [(batch, input_1), (batch, input_2, input_3)]\n # state should be in shape [(batch, unit_1), (batch, unit_2, unit_3)]\n input_1, input_2 = tf.nest.flatten(inputs)\n s1, s2 = states\n\n output_1 = tf.matmul(input_1, self.kernel_1)\n output_2_3 = tf.einsum('bij,ijkl->bkl', input_2, self.kernel_2_3)\n state_1 = s1 + output_1\n state_2_3 = s2 + output_2_3\n\n output = (output_1, output_2_3)\n new_states = (state_1, state_2_3)\n\n return output, new_states\n\n def get_config(self):\n return {'unit_1':self.unit_1, 'unit_2':unit_2, 'unit_3':self.unit_3}", "_____no_output_____" ] ], [ [ "### Build a RNN model with nested input/output\n\nLet's build a Keras model that uses a `tf.keras.layers.RNN` layer and the custom cell we just defined.", "_____no_output_____" ] ], [ [ "unit_1 = 10\nunit_2 = 20\nunit_3 = 30\n\ni1 = 32\ni2 = 64\ni3 = 32\nbatch_size = 64\nnum_batches = 100\ntimestep = 50\n\ncell = NestedCell(unit_1, unit_2, unit_3)\nrnn = tf.keras.layers.RNN(cell)\n\ninput_1 = tf.keras.Input((None, i1))\ninput_2 = tf.keras.Input((None, i2, i3))\n\noutputs = rnn((input_1, input_2))\n\nmodel = tf.keras.models.Model([input_1, input_2], outputs)\n\nmodel.compile(optimizer='adam', loss='mse', metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "### Train the model with randomly generated data\n\nSince there isn't a good candidate dataset for this model, we use random Numpy data for demonstration.", "_____no_output_____" ] ], [ [ "input_1_data = np.random.random((batch_size * num_batches, timestep, i1))\ninput_2_data = np.random.random((batch_size * num_batches, timestep, i2, i3))\ntarget_1_data = np.random.random((batch_size * num_batches, unit_1))\ntarget_2_data = np.random.random((batch_size * num_batches, unit_2, unit_3))\ninput_data = [input_1_data, input_2_data]\ntarget_data = [target_1_data, target_2_data]\n\nmodel.fit(input_data, target_data, batch_size=batch_size)", "_____no_output_____" ] ], [ [ "With the Keras `tf.keras.layers.RNN` layer, You are only expected to define the math logic for individual step within the sequence, and the `tf.keras.layers.RNN` layer will handle the sequence iteration for you. It's an incredibly powerful way to quickly prototype new kinds of RNNs (e.g. a LSTM variant).\n\nFor more details, please visit the [API docs](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/RNN).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
c52209cdf41308f389d4bd475068e697a529469c
11,309
ipynb
Jupyter Notebook
examples/notebooks/making_your_own_autoencoder.ipynb
clementchadebec/benchmark_VAE
943e231f9e5dfa40b4eec14d4536f1c229ad9be1
[ "Apache-2.0" ]
143
2021-10-17T08:43:33.000Z
2022-03-31T11:10:53.000Z
examples/notebooks/making_your_own_autoencoder.ipynb
louis-j-vincent/benchmark_VAE
943e231f9e5dfa40b4eec14d4536f1c229ad9be1
[ "Apache-2.0" ]
6
2022-01-21T17:40:09.000Z
2022-03-16T13:09:22.000Z
examples/notebooks/making_your_own_autoencoder.ipynb
louis-j-vincent/benchmark_VAE
943e231f9e5dfa40b4eec14d4536f1c229ad9be1
[ "Apache-2.0" ]
18
2021-12-16T15:17:08.000Z
2022-03-15T01:30:13.000Z
26.057604
293
0.535503
[ [ [ "# Tutorial\n\nIn this notebook, we will see how to pass your own encoder and decoder's architectures to your VAE model using pythae!", "_____no_output_____" ] ], [ [ "# If you run on colab uncomment the following line\n#!pip install git+https://github.com/clementchadebec/benchmark_VAE.git", "_____no_output_____" ], [ "import torch\nimport torchvision.datasets as datasets\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\n%matplotlib inline\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ] ], [ [ "### Get the data", "_____no_output_____" ] ], [ [ "mnist_trainset = datasets.MNIST(root='../data', train=True, download=True, transform=None)\nn_samples = 200\ndataset = mnist_trainset.data[np.array(mnist_trainset.targets)==2][:n_samples].reshape(-1, 1, 28, 28) / 255.", "_____no_output_____" ], [ "fig, axes = plt.subplots(2, 10, figsize=(10, 2))\nfor i in range(2):\n for j in range(10):\n axes[i][j].matshow(dataset[i*10 +j].reshape(28, 28), cmap='gray')\n axes[i][j].axis('off')\n\nplt.tight_layout(pad=0.8)", "_____no_output_____" ] ], [ [ "## Let's build a custom auto-encoding architecture!", "_____no_output_____" ], [ "### First thing, you need to import the ``BaseEncoder`` and ``BaseDecoder`` as well as ``ModelOutput`` classes from pythae by running", "_____no_output_____" ] ], [ [ "from pythae.models.nn import BaseEncoder, BaseDecoder\nfrom pythae.models.base.base_utils import ModelOutput", "_____no_output_____" ] ], [ [ "### Then build your own architectures", "_____no_output_____" ] ], [ [ "import torch.nn as nn\n\n\nclass Encoder_VAE_MNIST(BaseEncoder):\n def __init__(self, args):\n BaseEncoder.__init__(self)\n\n self.input_dim = (1, 28, 28)\n self.latent_dim = args.latent_dim\n self.n_channels = 1\n\n self.conv_layers = nn.Sequential(\n nn.Conv2d(self.n_channels, 128, 4, 2, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n nn.Conv2d(128, 256, 4, 2, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.Conv2d(256, 512, 4, 2, padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.Conv2d(512, 1024, 4, 2, padding=1),\n nn.BatchNorm2d(1024),\n nn.ReLU(),\n )\n\n self.embedding = nn.Linear(1024, args.latent_dim)\n self.log_var = nn.Linear(1024, args.latent_dim)\n\n def forward(self, x: torch.Tensor):\n h1 = self.conv_layers(x).reshape(x.shape[0], -1)\n output = ModelOutput(\n embedding=self.embedding(h1),\n log_covariance=self.log_var(h1)\n )\n return output\n\n\nclass Decoder_AE_MNIST(BaseDecoder):\n def __init__(self, args):\n BaseDecoder.__init__(self)\n self.input_dim = (1, 28, 28)\n self.latent_dim = args.latent_dim\n self.n_channels = 1\n\n self.fc = nn.Linear(args.latent_dim, 1024 * 4 * 4)\n self.deconv_layers = nn.Sequential(\n nn.ConvTranspose2d(1024, 512, 3, 2, padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU(),\n nn.ConvTranspose2d(512, 256, 3, 2, padding=1, output_padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.ConvTranspose2d(256, self.n_channels, 3, 2, padding=1, output_padding=1),\n nn.Sigmoid(),\n )\n\n def forward(self, z: torch.Tensor):\n h1 = self.fc(z).reshape(z.shape[0], 1024, 4, 4)\n output = ModelOutput(reconstruction=self.deconv_layers(h1))\n\n return output", "_____no_output_____" ] ], [ [ "### Define a model configuration (in which the latent will be stated). Here, we use the RHVAE model.", "_____no_output_____" ] ], [ [ "from pythae.models import VAEConfig\n\nmodel_config = VAEConfig(\n input_dim=(1, 28, 28),\n latent_dim=10\n )", "_____no_output_____" ] ], [ [ "### Build your encoder and decoder", "_____no_output_____" ] ], [ [ "encoder = Encoder_VAE_MNIST(model_config)\ndecoder= Decoder_AE_MNIST(model_config)", "_____no_output_____" ] ], [ [ "### Last but not least. Build you RHVAE model by passing the ``encoder`` and ``decoder`` arguments", "_____no_output_____" ] ], [ [ "from pythae.models import VAE\n\nmodel = VAE(\n model_config=model_config,\n encoder=encoder,\n decoder=decoder\n)", "_____no_output_____" ] ], [ [ "### Now you can see the model that you've just built contains the custom autoencoder and decoder", "_____no_output_____" ] ], [ [ "model", "_____no_output_____" ] ], [ [ "### *note*: If you want to launch a training of such a model, try to ensure that the provided architectures are suited for the data. pythae performs a model sanity check before launching training and raises an error if the model cannot encode and decode an input data point", "_____no_output_____" ], [ "## Train the model !", "_____no_output_____" ] ], [ [ "from pythae.trainers import BaseTrainerConfig\nfrom pythae.pipelines import TrainingPipeline", "_____no_output_____" ] ], [ [ "### Build the training pipeline with your ``TrainingConfig`` instance", "_____no_output_____" ] ], [ [ "training_config = BaseTrainerConfig(\n output_dir='my_model_with_custom_archi',\n learning_rate=1e-3,\n batch_size=200,\n steps_saving=None,\n num_epochs=200)", "_____no_output_____" ], [ "pipeline = TrainingPipeline(\n model=model,\n training_config=training_config)", "_____no_output_____" ] ], [ [ "### Launch the ``Pipeline``", "_____no_output_____" ] ], [ [ "torch.manual_seed(8)\ntorch.cuda.manual_seed(8)\n\npipeline(\n train_data=dataset\n)", "_____no_output_____" ] ], [ [ "### *note 1*: You will see now that a ``encoder.pkl`` and ``decoder.pkl`` appear in the folder ``my_model_with_custom_archi/training_YYYY_MM_DD_hh_mm_ss/final_model`` to allow model rebuilding with your own architecture ``Encoder_VAE_MNIST`` and ``Decoder_AE_MNIST``.\n\n### *note 2*: Model rebuilding is based on the [dill](https://pypi.org/project/dill/) librairy allowing to reload the class whithout importing them. Hence, you should still be able to reload the model even if the classes ``Encoder_VAE_MNIST`` or ``Decoder_AE_MNIST`` were not imported. ", "_____no_output_____" ] ], [ [ "last_training = sorted(os.listdir('my_model_with_custom_archi'))[-1]\nprint(last_training)", "_____no_output_____" ] ], [ [ "### You can now reload the model easily using the classmethod ``VAE.load_from_folder``", "_____no_output_____" ] ], [ [ "model_rec = VAE.load_from_folder(os.path.join('my_model_with_custom_archi', last_training, 'final_model'))\nmodel_rec", "_____no_output_____" ] ], [ [ "## The model can now be used to generate new samples !", "_____no_output_____" ] ], [ [ "from pythae.samplers import NormalSampler\n\n\nsampler = NormalSampler(\n model=model_rec\n)\ngen_data = sampler.sample(\n num_samples=25\n)", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\nfig, axes = plt.subplots(nrows=5, ncols=5, figsize=(10, 10))\n\nfor i in range(5):\n for j in range(5):\n axes[i][j].imshow(gen_data[i*5 +j].cpu().reshape(28, 28), cmap='gray')\n axes[i][j].axis('off')\nplt.tight_layout(pad=0.)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
c5220ef874b9a4553e65e9f6bd92e7306a0bac80
67,706
ipynb
Jupyter Notebook
tests/practice/pda_ch-03_pandas.ipynb
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
1
2019-05-10T09:16:23.000Z
2019-05-10T09:16:23.000Z
tests/practice/pda_ch-03_pandas.ipynb
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
null
null
null
tests/practice/pda_ch-03_pandas.ipynb
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
1
2019-05-10T09:17:28.000Z
2019-05-10T09:17:28.000Z
40.229352
286
0.373246
[ [ [ "# Chapter 3 : pandas", "_____no_output_____" ] ], [ [ "#load watermark\n%load_ext watermark\n%watermark -a 'Gopala KR' -u -d -v -p watermark,numpy,pandas,matplotlib,nltk,sklearn,tensorflow,theano,mxnet,chainer,seaborn,keras,tflearn,bokeh,gensim", "/srv/venv/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nWARNING (theano.tensor.blas): Using NumPy C-API based implementation for BLAS functions.\nUsing TensorFlow backend.\n/srv/venv/lib/python3.6/site-packages/tensorflow/python/util/tf_inspect.py:45: DeprecationWarning: inspect.getargspec() is deprecated, use inspect.signature() or inspect.getfullargspec()\n if d.decorator_argspec is not None), _inspect.getargspec(target))\n" ] ], [ [ "# pandas DataFrames", "_____no_output_____" ] ], [ [ "import numpy as np\nimport scipy as sp\nimport pandas as pd", "_____no_output_____" ] ], [ [ "## Load the data file into data frame", "_____no_output_____" ] ], [ [ "from pandas.io.parsers import read_csv\n\ndf = read_csv(\"WHO_first9cols.csv\")\nprint(\"Dataframe Top 5 rows:\\n\", df.head())", "Dataframe Top 5 rows:\n Country CountryID Continent Adolescent fertility rate (%) \\\n0 Afghanistan 1 1 151.0 \n1 Albania 2 2 27.0 \n2 Algeria 3 3 6.0 \n3 Andorra 4 2 NaN \n4 Angola 5 3 146.0 \n\n Adult literacy rate (%) \\\n0 28.0 \n1 98.7 \n2 69.9 \n3 NaN \n4 67.4 \n\n Gross national income per capita (PPP international $) \\\n0 NaN \n1 6000.0 \n2 5940.0 \n3 NaN \n4 3890.0 \n\n Net primary school enrolment ratio female (%) \\\n0 NaN \n1 93.0 \n2 94.0 \n3 83.0 \n4 49.0 \n\n Net primary school enrolment ratio male (%) \\\n0 NaN \n1 94.0 \n2 96.0 \n3 83.0 \n4 51.0 \n\n Population (in thousands) total \n0 26088.0 \n1 3172.0 \n2 33351.0 \n3 74.0 \n4 16557.0 \n" ], [ "print(\"Shape:\\n\", df.shape)\nprint(\"\\n\")\nprint(\"Length:\\n\", len(df))\nprint(\"\\n\")\nprint(\"Column Headers:\\n\", df.columns)\nprint(\"\\n\")\nprint(\"Data types:\\n\", df.dtypes)\nprint(\"\\n\")\nprint(\"Index:\\n\", df.index)\nprint(\"\\n\")\nprint(\"Values:\\n\", df.values)", "Shape:\n (202, 9)\n\n\nLength:\n 202\n\n\nColumn Headers:\n Index(['Country', 'CountryID', 'Continent', 'Adolescent fertility rate (%)',\n 'Adult literacy rate (%)',\n 'Gross national income per capita (PPP international $)',\n 'Net primary school enrolment ratio female (%)',\n 'Net primary school enrolment ratio male (%)',\n 'Population (in thousands) total'],\n dtype='object')\n\n\nData types:\n Country object\nCountryID int64\nContinent int64\nAdolescent fertility rate (%) float64\nAdult literacy rate (%) float64\nGross national income per capita (PPP international $) float64\nNet primary school enrolment ratio female (%) float64\nNet primary school enrolment ratio male (%) float64\nPopulation (in thousands) total float64\ndtype: object\n\n\nIndex:\n RangeIndex(start=0, stop=202, step=1)\n\n\nValues:\n [['Afghanistan' 1 1 ... nan nan 26088.0]\n ['Albania' 2 2 ... 93.0 94.0 3172.0]\n ['Algeria' 3 3 ... 94.0 96.0 33351.0]\n ...\n ['Yemen' 200 1 ... 65.0 85.0 21732.0]\n ['Zambia' 201 3 ... 94.0 90.0 11696.0]\n ['Zimbabwe' 202 3 ... 88.0 87.0 13228.0]]\n" ] ], [ [ "# pandas Series", "_____no_output_____" ] ], [ [ "country_col = df[\"Country\"]\nprint(\"Type df:\\n\", type(df), \"\\n\")\nprint(\"Type country col:\\n\", type(country_col), \"\\n\")", "Type df:\n <class 'pandas.core.frame.DataFrame'> \n\nType country col:\n <class 'pandas.core.series.Series'> \n\n" ], [ "print(\"Series shape:\\n\", country_col.shape, \"\\n\")\nprint(\"Series index:\\n\", country_col.index, \"\\n\")\nprint(\"Series values:\\n\", country_col.values, \"\\n\")\nprint(\"Series name:\\n\", country_col.name, \"\\n\")", "Series shape:\n (202,) \n\nSeries index:\n RangeIndex(start=0, stop=202, step=1) \n\nSeries values:\n ['Afghanistan' 'Albania' 'Algeria' 'Andorra' 'Angola'\n 'Antigua and Barbuda' 'Argentina' 'Armenia' 'Australia' 'Austria'\n 'Azerbaijan' 'Bahamas' 'Bahrain' 'Bangladesh' 'Barbados' 'Belarus'\n 'Belgium' 'Belize' 'Benin' 'Bermuda' 'Bhutan' 'Bolivia'\n 'Bosnia and Herzegovina' 'Botswana' 'Brazil' 'Brunei Darussalam'\n 'Bulgaria' 'Burkina Faso' 'Burundi' 'Cambodia' 'Cameroon' 'Canada'\n 'Cape Verde' 'Central African Republic' 'Chad' 'Chile' 'China' 'Colombia'\n 'Comoros' 'Congo, Dem. Rep.' 'Congo, Rep.' 'Cook Islands' 'Costa Rica'\n \"Cote d'Ivoire\" 'Croatia' 'Cuba' 'Cyprus' 'Czech Republic' 'Denmark'\n 'Djibouti' 'Dominica' 'Dominican Republic' 'Ecuador' 'Egypt'\n 'El Salvador' 'Equatorial Guinea' 'Eritrea' 'Estonia' 'Ethiopia' 'Fiji'\n 'Finland' 'France' 'French Polynesia' 'Gabon' 'Gambia' 'Georgia'\n 'Germany' 'Ghana' 'Greece' 'Grenada' 'Guatemala' 'Guinea' 'Guinea-Bissau'\n 'Guyana' 'Haiti' 'Honduras' 'Hong Kong, China' 'Hungary' 'Iceland'\n 'India' 'Indonesia' 'Iran (Islamic Republic of)' 'Iraq' 'Ireland'\n 'Israel' 'Italy' 'Jamaica' 'Japan' 'Jordan' 'Kazakhstan' 'Kenya'\n 'Kiribati' 'Korea, Dem. Rep.' 'Korea, Rep.' 'Kuwait' 'Kyrgyzstan'\n \"Lao People's Democratic Republic\" 'Latvia' 'Lebanon' 'Lesotho' 'Liberia'\n 'Libyan Arab Jamahiriya' 'Lithuania' 'Luxembourg' 'Macao, China'\n 'Macedonia' 'Madagascar' 'Malawi' 'Malaysia' 'Maldives' 'Mali' 'Malta'\n 'Marshall Islands' 'Mauritania' 'Mauritius' 'Mexico'\n 'Micronesia (Federated States of)' 'Moldova' 'Monaco' 'Mongolia'\n 'Montenegro' 'Morocco' 'Mozambique' 'Myanmar' 'Namibia' 'Nauru' 'Nepal'\n 'Netherlands' 'Netherlands Antilles' 'New Caledonia' 'New Zealand'\n 'Nicaragua' 'Niger' 'Nigeria' 'Niue' 'Norway' 'Oman' 'Pakistan' 'Palau'\n 'Panama' 'Papua New Guinea' 'Paraguay' 'Peru' 'Philippines' 'Poland'\n 'Portugal' 'Puerto Rico' 'Qatar' 'Romania' 'Russia' 'Rwanda'\n 'Saint Kitts and Nevis' 'Saint Lucia' 'Saint Vincent and the Grenadines'\n 'Samoa' 'San Marino' 'Sao Tome and Principe' 'Saudi Arabia' 'Senegal'\n 'Serbia' 'Seychelles' 'Sierra Leone' 'Singapore' 'Slovakia' 'Slovenia'\n 'Solomon Islands' 'Somalia' 'South Africa' 'Spain' 'Sri Lanka' 'Sudan'\n 'Suriname' 'Swaziland' 'Sweden' 'Switzerland' 'Syria' 'Taiwan'\n 'Tajikistan' 'Tanzania' 'Thailand' 'Timor-Leste' 'Togo' 'Tonga'\n 'Trinidad and Tobago' 'Tunisia' 'Turkey' 'Turkmenistan' 'Tuvalu' 'Uganda'\n 'Ukraine' 'United Arab Emirates' 'United Kingdom'\n 'United States of America' 'Uruguay' 'Uzbekistan' 'Vanuatu' 'Venezuela'\n 'Vietnam' 'West Bank and Gaza' 'Yemen' 'Zambia' 'Zimbabwe'] \n\nSeries name:\n Country \n\n" ], [ "print(\"Last 2 countries:\\n\", country_col[-2:], \"\\n\")\nprint(\"Last 2 countries type:\\n\", type(country_col[-2:]), \"\\n\")", "Last 2 countries:\n 200 Zambia\n201 Zimbabwe\nName: Country, dtype: object \n\nLast 2 countries type:\n <class 'pandas.core.series.Series'> \n\n" ], [ "last_col = df.columns[-1]\nprint(\"Last df column signs:\\n\", last_col, np.sign(df[last_col]), \"\\n\")", "Last df column signs:\n Population (in thousands) total 0 1.0\n1 1.0\n2 1.0\n3 1.0\n4 1.0\n5 1.0\n6 1.0\n7 1.0\n8 1.0\n9 1.0\n10 1.0\n11 1.0\n12 1.0\n13 1.0\n14 1.0\n15 1.0\n16 1.0\n17 1.0\n18 1.0\n19 NaN\n20 1.0\n21 1.0\n22 1.0\n23 1.0\n24 1.0\n25 1.0\n26 1.0\n27 1.0\n28 1.0\n29 1.0\n ... \n172 1.0\n173 1.0\n174 1.0\n175 1.0\n176 NaN\n177 1.0\n178 1.0\n179 1.0\n180 1.0\n181 1.0\n182 1.0\n183 1.0\n184 1.0\n185 1.0\n186 1.0\n187 1.0\n188 1.0\n189 1.0\n190 1.0\n191 1.0\n192 1.0\n193 1.0\n194 1.0\n195 1.0\n196 1.0\n197 1.0\n198 NaN\n199 1.0\n200 1.0\n201 1.0\nName: Population (in thousands) total, Length: 202, dtype: float64 \n\n" ], [ "np.sum([0, np.nan])", "_____no_output_____" ], [ "df.dtypes", "_____no_output_____" ], [ "print(np.sum(df[last_col] - df[last_col].values))", "0.0\n" ] ], [ [ "# Querying Data in pandas", "_____no_output_____" ] ], [ [ "!pip install quandl", "Collecting quandl\n Downloading Quandl-3.3.0-py2.py3-none-any.whl\nRequirement already satisfied: numpy>=1.8 in /srv/venv/lib/python3.6/site-packages (from quandl)\nCollecting inflection>=0.3.1 (from quandl)\n Downloading inflection-0.3.1.tar.gz\nRequirement already satisfied: requests>=2.7.0 in /srv/venv/lib/python3.6/site-packages (from quandl)\nCollecting more-itertools (from quandl)\n Downloading more_itertools-4.1.0-py3-none-any.whl (47kB)\n\u001b[K 100% |████████████████████████████████| 51kB 3.6MB/s ta 0:00:011\n\u001b[?25hCollecting pyOpenSSL (from quandl)\n Downloading pyOpenSSL-17.5.0-py2.py3-none-any.whl (53kB)\n\u001b[K 100% |████████████████████████████████| 61kB 5.7MB/s ta 0:00:011\n\u001b[?25hRequirement already satisfied: pandas>=0.14 in /srv/venv/lib/python3.6/site-packages (from quandl)\nCollecting ndg-httpsclient (from quandl)\n Downloading ndg_httpsclient-0.4.4-py3-none-any.whl\nRequirement already satisfied: python-dateutil in /srv/venv/lib/python3.6/site-packages (from quandl)\nCollecting pyasn1 (from quandl)\n Downloading pyasn1-0.4.2-py2.py3-none-any.whl (71kB)\n\u001b[K 100% |████████████████████████████████| 71kB 4.2MB/s ta 0:00:011\n\u001b[?25hRequirement already satisfied: six in /srv/venv/lib/python3.6/site-packages (from quandl)\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /srv/venv/lib/python3.6/site-packages (from requests>=2.7.0->quandl)\nRequirement already satisfied: urllib3<1.23,>=1.21.1 in /srv/venv/lib/python3.6/site-packages (from requests>=2.7.0->quandl)\nRequirement already satisfied: idna<2.7,>=2.5 in /srv/venv/lib/python3.6/site-packages (from requests>=2.7.0->quandl)\nRequirement already satisfied: certifi>=2017.4.17 in /srv/venv/lib/python3.6/site-packages (from requests>=2.7.0->quandl)\nCollecting cryptography>=2.1.4 (from pyOpenSSL->quandl)\n Downloading cryptography-2.1.4-cp36-cp36m-manylinux1_x86_64.whl (2.2MB)\n\u001b[K 100% |████████████████████████████████| 2.2MB 409kB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: pytz>=2011k in /srv/venv/lib/python3.6/site-packages (from pandas>=0.14->quandl)\nCollecting cffi>=1.7; platform_python_implementation != \"PyPy\" (from cryptography>=2.1.4->pyOpenSSL->quandl)\n Downloading cffi-1.11.4-cp36-cp36m-manylinux1_x86_64.whl (420kB)\n\u001b[K 100% |████████████████████████████████| 430kB 1.7MB/s eta 0:00:01\n\u001b[?25hCollecting asn1crypto>=0.21.0 (from cryptography>=2.1.4->pyOpenSSL->quandl)\n Downloading asn1crypto-0.24.0-py2.py3-none-any.whl (101kB)\n\u001b[K 100% |████████████████████████████████| 102kB 9.5MB/s ta 0:00:01\n\u001b[?25hCollecting pycparser (from cffi>=1.7; platform_python_implementation != \"PyPy\"->cryptography>=2.1.4->pyOpenSSL->quandl)\n Downloading pycparser-2.18.tar.gz (245kB)\n\u001b[K 100% |████████████████████████████████| 256kB 4.2MB/s eta 0:00:01\n\u001b[?25hBuilding wheels for collected packages: inflection, pycparser\n Running setup.py bdist_wheel for inflection ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /home/jovyan/.cache/pip/wheels/41/fa/e9/2995f4ab121e9f30f342fa2d43f0b27f851a0cb9f0d98d3b45\n Running setup.py bdist_wheel for pycparser ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /home/jovyan/.cache/pip/wheels/95/14/9a/5e7b9024459d2a6600aaa64e0ba485325aff7a9ac7489db1b6\nSuccessfully built inflection pycparser\nInstalling collected packages: inflection, more-itertools, pycparser, cffi, asn1crypto, cryptography, pyOpenSSL, pyasn1, ndg-httpsclient, quandl\nSuccessfully installed asn1crypto-0.24.0 cffi-1.11.4 cryptography-2.1.4 inflection-0.3.1 more-itertools-4.1.0 ndg-httpsclient-0.4.4 pyOpenSSL-17.5.0 pyasn1-0.4.2 pycparser-2.18 quandl-3.3.0\n" ], [ "import quandl", "_____no_output_____" ], [ "sunspots = quandl.get(\"SIDC/SUNSPOTS_A\")", "_____no_output_____" ], [ "print(\"Head 2:\\n\", sunspots.head(2) )", "Head 2:\n Yearly Mean Total Sunspot Number Yearly Mean Standard Deviation \\\nDate \n1700-12-31 8.3 NaN \n1701-12-31 18.3 NaN \n\n Number of Observations Definitive/Provisional Indicator \nDate \n1700-12-31 NaN 1.0 \n1701-12-31 NaN 1.0 \n" ], [ "print(\"Tail 2:\\n\", sunspots.tail(2))", "Tail 2:\n Yearly Mean Total Sunspot Number Yearly Mean Standard Deviation \\\nDate \n2016-12-31 39.8 3.9 \n2017-12-31 21.7 2.6 \n\n Number of Observations Definitive/Provisional Indicator \nDate \n2016-12-31 9940.0 1.0 \n2017-12-31 11020.0 0.0 \n" ], [ "last_date = sunspots.index[-1]\nprint(\"Last value:\\n\",sunspots.loc[last_date])", "Last value:\n Yearly Mean Total Sunspot Number 21.7\nYearly Mean Standard Deviation 2.6\nNumber of Observations 11020.0\nDefinitive/Provisional Indicator 0.0\nName: 2017-12-31 00:00:00, dtype: float64\n" ], [ "print(\"Values slice by date:\\n\", sunspots[\"20020101\": \"20131231\"])", "Values slice by date:\n Yearly Mean Total Sunspot Number Yearly Mean Standard Deviation \\\nDate \n2002-12-31 163.6 9.8 \n2003-12-31 99.3 7.1 \n2004-12-31 65.3 5.9 \n2005-12-31 45.8 4.7 \n2006-12-31 24.7 3.5 \n2007-12-31 12.6 2.7 \n2008-12-31 4.2 2.5 \n2009-12-31 4.8 2.5 \n2010-12-31 24.9 3.4 \n2011-12-31 80.8 6.7 \n2012-12-31 84.5 6.7 \n2013-12-31 94.0 6.9 \n\n Number of Observations Definitive/Provisional Indicator \nDate \n2002-12-31 6588.0 1.0 \n2003-12-31 7087.0 1.0 \n2004-12-31 6882.0 1.0 \n2005-12-31 7084.0 1.0 \n2006-12-31 6370.0 1.0 \n2007-12-31 6841.0 1.0 \n2008-12-31 6644.0 1.0 \n2009-12-31 6465.0 1.0 \n2010-12-31 6328.0 1.0 \n2011-12-31 6077.0 1.0 \n2012-12-31 5753.0 1.0 \n2013-12-31 5347.0 1.0 \n" ], [ "print(\"Slice from a list of indices:\\n\", sunspots.iloc[[2, 4, -4, -2]])", "Slice from a list of indices:\n Yearly Mean Total Sunspot Number Yearly Mean Standard Deviation \\\nDate \n1702-12-31 26.7 NaN \n1704-12-31 60.0 NaN \n2014-12-31 113.3 8.0 \n2016-12-31 39.8 3.9 \n\n Number of Observations Definitive/Provisional Indicator \nDate \n1702-12-31 NaN 1.0 \n1704-12-31 NaN 1.0 \n2014-12-31 5273.0 1.0 \n2016-12-31 9940.0 1.0 \n" ], [ "print(\"Scalar with Iloc:\", sunspots.iloc[0, 0])\nprint(\"Scalar with iat\", sunspots.iat[1, 0])", "Scalar with Iloc: 8.3\nScalar with iat 18.3\n" ], [ "print(\"Boolean selection:\\n\", sunspots[sunspots > sunspots.mean()])", "Boolean selection:\n Yearly Mean Total Sunspot Number Yearly Mean Standard Deviation \\\nDate \n1700-12-31 NaN NaN \n1701-12-31 NaN NaN \n1702-12-31 NaN NaN \n1703-12-31 NaN NaN \n1704-12-31 NaN NaN \n1705-12-31 96.7 NaN \n1706-12-31 NaN NaN \n1707-12-31 NaN NaN \n1708-12-31 NaN NaN \n1709-12-31 NaN NaN \n1710-12-31 NaN NaN \n1711-12-31 NaN NaN \n1712-12-31 NaN NaN \n1713-12-31 NaN NaN \n1714-12-31 NaN NaN \n1715-12-31 NaN NaN \n1716-12-31 NaN NaN \n1717-12-31 105.0 NaN \n1718-12-31 100.0 NaN \n1719-12-31 NaN NaN \n1720-12-31 NaN NaN \n1721-12-31 NaN NaN \n1722-12-31 NaN NaN \n1723-12-31 NaN NaN \n1724-12-31 NaN NaN \n1725-12-31 NaN NaN \n1726-12-31 130.0 NaN \n1727-12-31 203.3 NaN \n1728-12-31 171.7 NaN \n1729-12-31 121.7 NaN \n... ... ... \n1988-12-31 123.0 8.4 \n1989-12-31 211.1 12.8 \n1990-12-31 191.8 11.2 \n1991-12-31 203.3 12.7 \n1992-12-31 133.0 8.9 \n1993-12-31 NaN NaN \n1994-12-31 NaN NaN \n1995-12-31 NaN NaN \n1996-12-31 NaN NaN \n1997-12-31 NaN NaN \n1998-12-31 88.3 NaN \n1999-12-31 136.3 9.3 \n2000-12-31 173.9 10.1 \n2001-12-31 170.4 10.5 \n2002-12-31 163.6 9.8 \n2003-12-31 99.3 NaN \n2004-12-31 NaN NaN \n2005-12-31 NaN NaN \n2006-12-31 NaN NaN \n2007-12-31 NaN NaN \n2008-12-31 NaN NaN \n2009-12-31 NaN NaN \n2010-12-31 NaN NaN \n2011-12-31 80.8 NaN \n2012-12-31 84.5 NaN \n2013-12-31 94.0 NaN \n2014-12-31 113.3 8.0 \n2015-12-31 NaN NaN \n2016-12-31 NaN NaN \n2017-12-31 NaN NaN \n\n Number of Observations Definitive/Provisional Indicator \nDate \n1700-12-31 NaN 1.0 \n1701-12-31 NaN 1.0 \n1702-12-31 NaN 1.0 \n1703-12-31 NaN 1.0 \n1704-12-31 NaN 1.0 \n1705-12-31 NaN 1.0 \n1706-12-31 NaN 1.0 \n1707-12-31 NaN 1.0 \n1708-12-31 NaN 1.0 \n1709-12-31 NaN 1.0 \n1710-12-31 NaN 1.0 \n1711-12-31 NaN 1.0 \n1712-12-31 NaN 1.0 \n1713-12-31 NaN 1.0 \n1714-12-31 NaN 1.0 \n1715-12-31 NaN 1.0 \n1716-12-31 NaN 1.0 \n1717-12-31 NaN 1.0 \n1718-12-31 NaN 1.0 \n1719-12-31 NaN 1.0 \n1720-12-31 NaN 1.0 \n1721-12-31 NaN 1.0 \n1722-12-31 NaN 1.0 \n1723-12-31 NaN 1.0 \n1724-12-31 NaN 1.0 \n1725-12-31 NaN 1.0 \n1726-12-31 NaN 1.0 \n1727-12-31 NaN 1.0 \n1728-12-31 NaN 1.0 \n1729-12-31 NaN 1.0 \n... ... ... \n1988-12-31 6556.0 1.0 \n1989-12-31 6932.0 1.0 \n1990-12-31 7108.0 1.0 \n1991-12-31 6932.0 1.0 \n1992-12-31 7845.0 1.0 \n1993-12-31 8010.0 1.0 \n1994-12-31 8524.0 1.0 \n1995-12-31 8429.0 1.0 \n1996-12-31 7614.0 1.0 \n1997-12-31 7294.0 1.0 \n1998-12-31 6353.0 1.0 \n1999-12-31 6413.0 1.0 \n2000-12-31 5953.0 1.0 \n2001-12-31 6558.0 1.0 \n2002-12-31 6588.0 1.0 \n2003-12-31 7087.0 1.0 \n2004-12-31 6882.0 1.0 \n2005-12-31 7084.0 1.0 \n2006-12-31 6370.0 1.0 \n2007-12-31 6841.0 1.0 \n2008-12-31 6644.0 1.0 \n2009-12-31 6465.0 1.0 \n2010-12-31 6328.0 1.0 \n2011-12-31 6077.0 1.0 \n2012-12-31 5753.0 1.0 \n2013-12-31 5347.0 1.0 \n2014-12-31 5273.0 1.0 \n2015-12-31 8903.0 1.0 \n2016-12-31 9940.0 1.0 \n2017-12-31 11020.0 NaN \n\n[318 rows x 4 columns]\n" ], [ "print(\"Boolean selection with column label:\\n\", sunspots[sunspots['Number of Observations'] > sunspots['Number of Observations'].mean()])", "Boolean selection with column label:\n Yearly Mean Total Sunspot Number Yearly Mean Standard Deviation \\\nDate \n1981-12-31 198.9 13.1 \n1982-12-31 162.4 12.1 \n1983-12-31 91.0 7.6 \n1984-12-31 60.5 5.9 \n1985-12-31 20.6 3.7 \n1986-12-31 14.8 3.5 \n1987-12-31 33.9 3.7 \n1988-12-31 123.0 8.4 \n1989-12-31 211.1 12.8 \n1990-12-31 191.8 11.2 \n1991-12-31 203.3 12.7 \n1992-12-31 133.0 8.9 \n1993-12-31 76.1 5.8 \n1994-12-31 44.9 4.4 \n1995-12-31 25.1 3.7 \n1996-12-31 11.6 3.1 \n1997-12-31 28.9 3.6 \n1998-12-31 88.3 6.6 \n1999-12-31 136.3 9.3 \n2000-12-31 173.9 10.1 \n2001-12-31 170.4 10.5 \n2002-12-31 163.6 9.8 \n2003-12-31 99.3 7.1 \n2004-12-31 65.3 5.9 \n2005-12-31 45.8 4.7 \n2006-12-31 24.7 3.5 \n2007-12-31 12.6 2.7 \n2008-12-31 4.2 2.5 \n2009-12-31 4.8 2.5 \n2010-12-31 24.9 3.4 \n2011-12-31 80.8 6.7 \n2012-12-31 84.5 6.7 \n2013-12-31 94.0 6.9 \n2014-12-31 113.3 8.0 \n2015-12-31 69.8 6.4 \n2016-12-31 39.8 3.9 \n2017-12-31 21.7 2.6 \n\n Number of Observations Definitive/Provisional Indicator \nDate \n1981-12-31 3049.0 1.0 \n1982-12-31 3436.0 1.0 \n1983-12-31 4216.0 1.0 \n1984-12-31 5103.0 1.0 \n1985-12-31 5543.0 1.0 \n1986-12-31 5934.0 1.0 \n1987-12-31 6396.0 1.0 \n1988-12-31 6556.0 1.0 \n1989-12-31 6932.0 1.0 \n1990-12-31 7108.0 1.0 \n1991-12-31 6932.0 1.0 \n1992-12-31 7845.0 1.0 \n1993-12-31 8010.0 1.0 \n1994-12-31 8524.0 1.0 \n1995-12-31 8429.0 1.0 \n1996-12-31 7614.0 1.0 \n1997-12-31 7294.0 1.0 \n1998-12-31 6353.0 1.0 \n1999-12-31 6413.0 1.0 \n2000-12-31 5953.0 1.0 \n2001-12-31 6558.0 1.0 \n2002-12-31 6588.0 1.0 \n2003-12-31 7087.0 1.0 \n2004-12-31 6882.0 1.0 \n2005-12-31 7084.0 1.0 \n2006-12-31 6370.0 1.0 \n2007-12-31 6841.0 1.0 \n2008-12-31 6644.0 1.0 \n2009-12-31 6465.0 1.0 \n2010-12-31 6328.0 1.0 \n2011-12-31 6077.0 1.0 \n2012-12-31 5753.0 1.0 \n2013-12-31 5347.0 1.0 \n2014-12-31 5273.0 1.0 \n2015-12-31 8903.0 1.0 \n2016-12-31 9940.0 1.0 \n2017-12-31 11020.0 0.0 \n" ] ], [ [ "# Statistics with pandas DataFrame", "_____no_output_____" ] ], [ [ "import quandl\n\n# Data from http://www.quandl.com/SIDC/SUNSPOTS_A-Sunspot-Numbers-Annual\n# PyPi url https://pypi.python.org/pypi/Quandl\nsunspots = quandl.get(\"SIDC/SUNSPOTS_A\")\nprint(\"Describe\", sunspots.describe(),\"\\n\")\nprint(\"Non NaN observations\", sunspots.count(),\"\\n\")\nprint(\"MAD\", sunspots.mad(),\"\\n\")\nprint(\"Median\", sunspots.median(),\"\\n\")\nprint(\"Min\", sunspots.min(),\"\\n\")\nprint(\"Max\", sunspots.max(),\"\\n\")\nprint(\"Mode\", sunspots.mode(),\"\\n\")\nprint(\"Standard Deviation\", sunspots.std(),\"\\n\")\nprint(\"Variance\", sunspots.var(),\"\\n\")\nprint(\"Skewness\", sunspots.skew(),\"\\n\")\nprint(\"Kurtosis\", sunspots.kurt(),\"\\n\")", "Describe Yearly Mean Total Sunspot Number Yearly Mean Standard Deviation \\\ncount 318.000000 200.000000 \nmean 79.196855 7.982500 \nstd 61.985539 3.818567 \nmin 0.000000 1.700000 \n25% 24.950000 4.700000 \n50% 66.250000 7.650000 \n75% 116.025000 10.425000 \nmax 269.300000 19.100000 \n\n Number of Observations Definitive/Provisional Indicator \ncount 200.000000 318.000000 \nmean 1515.440000 0.996855 \nstd 2548.854285 0.056077 \nmin 150.000000 0.000000 \n25% 365.000000 1.000000 \n50% 365.000000 1.000000 \n75% 366.000000 1.000000 \nmax 11020.000000 1.000000 \n\nNon NaN observations Yearly Mean Total Sunspot Number 318\nYearly Mean Standard Deviation 200\nNumber of Observations 200\nDefinitive/Provisional Indicator 318\ndtype: int64 \n\nMAD Yearly Mean Total Sunspot Number 50.922167\nYearly Mean Standard Deviation 3.138625\nNumber of Observations 1907.287200\nDefinitive/Provisional Indicator 0.006270\ndtype: float64 \n\nMedian Yearly Mean Total Sunspot Number 66.25\nYearly Mean Standard Deviation 7.65\nNumber of Observations 365.00\nDefinitive/Provisional Indicator 1.00\ndtype: float64 \n\nMin Yearly Mean Total Sunspot Number 0.0\nYearly Mean Standard Deviation 1.7\nNumber of Observations 150.0\nDefinitive/Provisional Indicator 0.0\ndtype: float64 \n\nMax Yearly Mean Total Sunspot Number 269.3\nYearly Mean Standard Deviation 19.1\nNumber of Observations 11020.0\nDefinitive/Provisional Indicator 1.0\ndtype: float64 \n\nMode Yearly Mean Total Sunspot Number Yearly Mean Standard Deviation \\\n0 18.3 9.2 \n\n Number of Observations Definitive/Provisional Indicator \n0 365.0 1.0 \n\nStandard Deviation Yearly Mean Total Sunspot Number 61.985539\nYearly Mean Standard Deviation 3.818567\nNumber of Observations 2548.854285\nDefinitive/Provisional Indicator 0.056077\ndtype: float64 \n\nVariance Yearly Mean Total Sunspot Number 3.842207e+03\nYearly Mean Standard Deviation 1.458145e+01\nNumber of Observations 6.496658e+06\nDefinitive/Provisional Indicator 3.144654e-03\ndtype: float64 \n\nSkewness Yearly Mean Total Sunspot Number 0.808657\nYearly Mean Standard Deviation 0.561499\nNumber of Observations 1.892803\nDefinitive/Provisional Indicator -17.832555\ndtype: float64 \n\nKurtosis Yearly Mean Total Sunspot Number -0.129808\nYearly Mean Standard Deviation -0.249446\nNumber of Observations 2.082363\nDefinitive/Provisional Indicator 318.000000\ndtype: float64 \n\n" ] ], [ [ "# Data Aggregation", "_____no_output_____" ] ], [ [ "import pandas as pd\nfrom numpy.random import seed\nfrom numpy.random import rand\nfrom numpy.random import randint\nimport numpy as np\n\nseed(42)\n\ndf = pd.DataFrame({'Weather' : ['cold', 'hot', 'cold', 'hot',\n 'cold', 'hot', 'cold'],\n 'Food' : ['soup', 'soup', 'icecream', 'chocolate',\n 'icecream', 'icecream', 'soup'],\n 'Price' : 10 * rand(7), 'Number' : randint(1, 9)})\n\nprint(df)", " Food Number Price Weather\n0 soup 8 3.745401 cold\n1 soup 8 9.507143 hot\n2 icecream 8 7.319939 cold\n3 chocolate 8 5.986585 hot\n4 icecream 8 1.560186 cold\n5 icecream 8 1.559945 hot\n6 soup 8 0.580836 cold\n" ], [ "weather_group = df.groupby('Weather')\n\ni = 0\n\nfor name, group in weather_group:\n i = i + 1\n print(\"Group\", i, name)\n print(group)", "Group 1 cold\n Food Number Price Weather\n0 soup 8 3.745401 cold\n2 icecream 8 7.319939 cold\n4 icecream 8 1.560186 cold\n6 soup 8 0.580836 cold\nGroup 2 hot\n Food Number Price Weather\n1 soup 8 9.507143 hot\n3 chocolate 8 5.986585 hot\n5 icecream 8 1.559945 hot\n" ], [ "print(\"Weather group first\\n\", weather_group.first())\nprint(\"Weather group last\\n\", weather_group.last())\nprint(\"Weather group mean\\n\", weather_group.mean())", "Weather group first\n Food Number Price\nWeather \ncold soup 8 3.745401\nhot soup 8 9.507143\nWeather group last\n Food Number Price\nWeather \ncold soup 8 0.580836\nhot icecream 8 1.559945\nWeather group mean\n Number Price\nWeather \ncold 8 3.301591\nhot 8 5.684558\n" ], [ "wf_group = df.groupby(['Weather', 'Food'])\nprint(\"WF Groups\", wf_group.groups)", "WF Groups {('cold', 'icecream'): Int64Index([2, 4], dtype='int64'), ('cold', 'soup'): Int64Index([0, 6], dtype='int64'), ('hot', 'chocolate'): Int64Index([3], dtype='int64'), ('hot', 'icecream'): Int64Index([5], dtype='int64'), ('hot', 'soup'): Int64Index([1], dtype='int64')}\n" ], [ "print(\"WF Aggregated\\n\", wf_group.agg([np.mean, np.median]))", "WF Aggregated\n Number Price \n mean median mean median\nWeather Food \ncold icecream 8 8 4.440063 4.440063\n soup 8 8 2.163119 2.163119\nhot chocolate 8 8 5.986585 5.986585\n icecream 8 8 1.559945 1.559945\n soup 8 8 9.507143 9.507143\n" ] ], [ [ "# Concatenating and appending DataFrames", "_____no_output_____" ] ], [ [ "print(\"df :3\\n\", df[:3])", "df :3\n Food Number Price Weather\n0 soup 8 3.745401 cold\n1 soup 8 9.507143 hot\n2 icecream 8 7.319939 cold\n" ], [ "print(\"Concat Back together\\n\", pd.concat([df[:3], df[3:]]))", "Concat Back together\n Food Number Price Weather\n0 soup 8 3.745401 cold\n1 soup 8 9.507143 hot\n2 icecream 8 7.319939 cold\n3 chocolate 8 5.986585 hot\n4 icecream 8 1.560186 cold\n5 icecream 8 1.559945 hot\n6 soup 8 0.580836 cold\n" ], [ "print(\"Appending rows\\n\", df[:3].append(df[5:]))", "Appending rows\n Food Number Price Weather\n0 soup 8 3.745401 cold\n1 soup 8 9.507143 hot\n2 icecream 8 7.319939 cold\n5 icecream 8 1.559945 hot\n6 soup 8 0.580836 cold\n" ] ], [ [ "# joining DataFrames", "_____no_output_____" ] ], [ [ "dests = pd.read_csv('dest.csv')\nprint(\"Dests\\n\", dests)\n\ntips = pd.read_csv('tips.csv')\nprint(\"Tips\\n\", tips)\n\nprint(\"Merge() on key\\n\", pd.merge(dests, tips, on='EmpNr'))\nprint(\"Dests join() tips\\n\", dests.join(tips, lsuffix='Dest', rsuffix='Tips'))\n\nprint(\"Inner join with merge()\\n\", pd.merge(dests, tips, how='inner'))\nprint(\"Outer join\\n\", pd.merge(dests, tips, how='outer'))", "Dests\n EmpNr Dest\n0 5 The Hague\n1 3 Amsterdam\n2 9 Rotterdam\nTips\n EmpNr Amount\n0 5 10.0\n1 9 5.0\n2 7 2.5\nMerge() on key\n EmpNr Dest Amount\n0 5 The Hague 10.0\n1 9 Rotterdam 5.0\nDests join() tips\n EmpNrDest Dest EmpNrTips Amount\n0 5 The Hague 5 10.0\n1 3 Amsterdam 9 5.0\n2 9 Rotterdam 7 2.5\nInner join with merge()\n EmpNr Dest Amount\n0 5 The Hague 10.0\n1 9 Rotterdam 5.0\nOuter join\n EmpNr Dest Amount\n0 5 The Hague 10.0\n1 3 Amsterdam NaN\n2 9 Rotterdam 5.0\n3 7 NaN 2.5\n" ] ], [ [ "# Handlng missing Values", "_____no_output_____" ] ], [ [ "df = pd.read_csv('WHO_first9cols.csv')\n# Select first 3 rows of country and Net primary school enrolment ratio male (%)\ndf = df[['Country', df.columns[-2]]][:2]\nprint(\"New df\\n\", df)\nprint(\"Null Values\\n\", pd.isnull(df))\nprint(\"Total Null Values\\n\", pd.isnull(df).sum())\nprint(\"Not Null Values\\n\", df.notnull())\nprint(\"Last Column Doubled\\n\", 2 * df[df.columns[-1]])\nprint(\"Last Column plus NaN\\n\", df[df.columns[-1]] + np.nan)\nprint(\"Zero filled\\n\", df.fillna(0))", "New df\n Country Net primary school enrolment ratio male (%)\n0 Afghanistan NaN\n1 Albania 94.0\nNull Values\n Country Net primary school enrolment ratio male (%)\n0 False True\n1 False False\nTotal Null Values\n Country 0\nNet primary school enrolment ratio male (%) 1\ndtype: int64\nNot Null Values\n Country Net primary school enrolment ratio male (%)\n0 True False\n1 True True\nLast Column Doubled\n 0 NaN\n1 188.0\nName: Net primary school enrolment ratio male (%), dtype: float64\nLast Column plus NaN\n 0 NaN\n1 NaN\nName: Net primary school enrolment ratio male (%), dtype: float64\nZero filled\n Country Net primary school enrolment ratio male (%)\n0 Afghanistan 0.0\n1 Albania 94.0\n" ] ], [ [ "# dealing with dates", "_____no_output_____" ] ], [ [ "print(\"Date range\", pd.date_range('1/1/1900', periods=42, freq='D'))", "Date range DatetimeIndex(['1900-01-01', '1900-01-02', '1900-01-03', '1900-01-04',\n '1900-01-05', '1900-01-06', '1900-01-07', '1900-01-08',\n '1900-01-09', '1900-01-10', '1900-01-11', '1900-01-12',\n '1900-01-13', '1900-01-14', '1900-01-15', '1900-01-16',\n '1900-01-17', '1900-01-18', '1900-01-19', '1900-01-20',\n '1900-01-21', '1900-01-22', '1900-01-23', '1900-01-24',\n '1900-01-25', '1900-01-26', '1900-01-27', '1900-01-28',\n '1900-01-29', '1900-01-30', '1900-01-31', '1900-02-01',\n '1900-02-02', '1900-02-03', '1900-02-04', '1900-02-05',\n '1900-02-06', '1900-02-07', '1900-02-08', '1900-02-09',\n '1900-02-10', '1900-02-11'],\n dtype='datetime64[ns]', freq='D')\n" ], [ "import sys\ntry:\n print(\"Date range\", pd.date_range('1/1/1677', periods=4, freq='D'))\nexcept:\n etype, value, _ = sys.exc_info()\n print(\"Error encountered\", etype, value)", "Error encountered <class 'pandas._libs.tslib.OutOfBoundsDatetime'> Out of bounds nanosecond timestamp: 1677-01-01 00:00:00\n" ], [ "offset = pd.DateOffset(seconds=2 ** 33/10 ** 9)\nmid = pd.to_datetime('1/1/1970')\nprint(\"Start valid range\", mid - offset)\nprint(\"End valid range\", mid + offset)", "Start valid range 1969-12-31 23:59:51.410065\nEnd valid range 1970-01-01 00:00:08.589935\n" ], [ "print(\"With format\", pd.to_datetime(['19021112', '19031230'], format='%Y%m%d'))", "With format DatetimeIndex(['1902-11-12', '1903-12-30'], dtype='datetime64[ns]', freq=None)\n" ], [ "print(\"Illegal date coerced\", pd.to_datetime(['1902-11-12', 'not a date'], errors='coerce'))", "Illegal date coerced DatetimeIndex(['1902-11-12', 'NaT'], dtype='datetime64[ns]', freq=None)\n" ] ], [ [ "# Pivot Tables", "_____no_output_____" ] ], [ [ "seed(42)\nN = 7\ndf = pd.DataFrame({\n 'Weather' : ['cold', 'hot', 'cold', 'hot',\n 'cold', 'hot', 'cold'],\n 'Food' : ['soup', 'soup', 'icecream', 'chocolate',\n 'icecream', 'icecream', 'soup'],\n 'Price' : 10 * rand(N), 'Number' : randint(1, 9)})", "_____no_output_____" ], [ "print(\"DataFrame\\n\", df)", "DataFrame\n Food Number Price Weather\n0 soup 8 3.745401 cold\n1 soup 8 9.507143 hot\n2 icecream 8 7.319939 cold\n3 chocolate 8 5.986585 hot\n4 icecream 8 1.560186 cold\n5 icecream 8 1.559945 hot\n6 soup 8 0.580836 cold\n" ], [ "print(pd.pivot_table(df, columns=['Food'], aggfunc=np.sum))", "Food chocolate icecream soup\nNumber 8.000000 24.000000 24.00000\nPrice 5.986585 10.440071 13.83338\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
c5221f00b0c7a0c8169a43a3685805ff4b488799
235,545
ipynb
Jupyter Notebook
notebooks/well_example.ipynb
vcantarella/ttim
bdcc9f9446c0bda3f437073a29d9a263c59810b2
[ "MIT" ]
15
2015-08-24T17:28:37.000Z
2021-12-21T19:52:56.000Z
notebooks/well_example.ipynb
vcantarella/ttim
bdcc9f9446c0bda3f437073a29d9a263c59810b2
[ "MIT" ]
26
2016-02-09T13:23:56.000Z
2022-02-24T08:06:54.000Z
notebooks/well_example.ipynb
vcantarella/ttim
bdcc9f9446c0bda3f437073a29d9a263c59810b2
[ "MIT" ]
23
2015-09-26T09:18:08.000Z
2021-11-15T19:51:50.000Z
311.980132
43,040
0.930145
[ [ [ "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom ttim import *", "_____no_output_____" ] ], [ [ "### Theis", "_____no_output_____" ] ], [ [ "from scipy.special import exp1\ndef theis(r, t, T, S, Q):\n u = r ** 2 * S / (4 * T * t)\n h = -Q / (4 * np.pi * T) * exp1(u)\n return h\n\ndef theisQr(r, t, T, S, Q):\n u = r ** 2 * S / (4 * T * t)\n return -Q / (2 * np.pi) * np.exp(-u) / r", "_____no_output_____" ], [ "T = 500\nS = 1e-4\nt = np.logspace(-5, 0, 100)\nr = 30\nQ = 788", "_____no_output_____" ], [ "htheis = theis(r, t, T, S, Q)\nQrtheis = theisQr(r, t, T, S, Q)", "_____no_output_____" ], [ "ml = ModelMaq(kaq=25, z=[20, 0], Saq=S/20, tmin=1e-5, tmax=1)\nw = Well(ml, tsandQ=[(0, Q)], rw=1e-5)\nml.solve()\nh = ml.head(r, 0, t)\nQx, Qy = ml.disvec(r, 0, t)", "self.neq 1\nsolution complete\n" ], [ "plt.figure(figsize=(12, 4))\nplt.subplot(121)\nplt.semilogx(t, htheis, 'b', label='theis')\nplt.semilogx(t, h[0], 'r--', label='ttim')\nplt.xlabel('time (day)')\nplt.ylabel('head (m)')\nplt.legend();\nplt.subplot(122)\nplt.semilogx(t, Qrtheis, 'b', label='theis')\nplt.semilogx(t, Qx[0], 'r--', label='ttim')\nplt.xlabel('time (day)')\nplt.ylabel('head (m)')\nplt.legend(loc='best');", "_____no_output_____" ], [ "def test(M=10):\n ml = ModelMaq(kaq=25, z=[20, 0], Saq=S/20, tmin=1e-5, tmax=1, M=M)\n w = Well(ml, tsandQ=[(0, Q)], rw=1e-5)\n ml.solve(silent=True)\n h = ml.head(r, 0, t)\n return htheis - h[0]", "_____no_output_____" ], [ "enumba = test(M=10)\nplt.plot(t, enumba, 'C1')\nplt.xlabel('time (d)')\nplt.ylabel('head difference Thies - Ttim');", "_____no_output_____" ], [ "plt.plot(t, Qrtheis - Qx[0])\nplt.xlabel('time (d)')\nplt.ylabel('Qx difference Thies - Ttim');", "_____no_output_____" ], [ "def compare(M=10):\n ml = ModelMaq(kaq=25, z=[20, 0], Saq=S/20, tmin=1e-5, tmax=1, M=M)\n w = Well(ml, tsandQ=[(0, Q)], rw=1e-5)\n ml.solve(silent=True)\n h = ml.head(r, 0, t)\n rmse = np.sqrt(np.mean((h[0] - htheis)**2))\n return rmse\n\nMlist = np.arange(1, 21)\nrmse = np.zeros(len(Mlist))\nfor i, M in enumerate(Mlist):\n rmse[i] = compare(M)\nplt.semilogy(Mlist, rmse)\nplt.xlabel('Number of terms M')\nplt.xticks(np.arange(1, 21))\nplt.ylabel('relative error')\nplt.title('comparison between TTim solution and Theis \\n solution using numba and M terms')\nplt.grid()", "_____no_output_____" ], [ "def volume(r, t=1):\n return -2 * np.pi * r * ml.head(r, 0, t) * ml.aq.Scoefaq[0]\n\nfrom scipy.integrate import quad\nquad(volume, 1e-5, np.inf)", "_____no_output_____" ], [ "from scipy.special import exp1\ndef theis2(r, t, T, S, Q, tend):\n u1 = r ** 2 * S / (4 * T * t)\n u2 = r ** 2 * S / (4 * T * (t[t > tend] - tend))\n h = -Q / (4 * np.pi * T) * exp1(u1)\n h[t > tend] -= -Q / (4 * np.pi * T) * exp1(u2)\n return h", "_____no_output_____" ], [ "ml2 = ModelMaq(kaq=25, z=[20, 0], Saq=S/20, tmin=1e-5, tmax=10)\nw2 = Well(ml2, tsandQ=[(0, Q), (1, 0)])\nml2.solve()", "self.neq 1\nsolution complete\n" ], [ "t2 = np.linspace(0.01, 2, 100)\nhtheis2 = theis2(r, t2, T, S, Q, tend=1)\nh2 = ml2.head(r, 0, t2)", "_____no_output_____" ], [ "plt.plot(t2, htheis2, 'b', label='theis')\nplt.plot(t2, h2[0], 'r--', label='ttim')\nplt.legend(loc='best');", "_____no_output_____" ] ], [ [ "### Hantush", "_____no_output_____" ] ], [ [ "T = 500\nS = 1e-4\nc = 1000\nt = np.logspace(-5, 0, 100)\nr = 30\nQ = 788", "_____no_output_____" ], [ "from scipy.integrate import quad\ndef integrand_hantush(y, r, lab):\n return np.exp(-y - r ** 2 / (4 * lab ** 2 * y)) / y\n\ndef hantush(r, t, T, S, c, Q, tstart=0):\n lab = np.sqrt(T * c)\n u = r ** 2 * S / (4 * T * (t - tstart))\n F = quad(integrand_hantush, u, np.inf, args=(r, lab))[0]\n return -Q / (4 * np.pi * T) * F\n\nhantushvec = np.vectorize(hantush)", "_____no_output_____" ], [ "ml = ModelMaq(kaq=25, z=[21, 20, 0], c=[1000], Saq=S/20, topboundary='semi', tmin=1e-5, tmax=1)\nw = Well(ml, tsandQ=[(0, Q)])\nml.solve()", "self.neq 1\nsolution complete\n" ], [ "hhantush = hantushvec(30, t, T, S, c, Q)\nh = ml.head(r, 0, t)\nplt.semilogx(t, hhantush, 'b', label='hantush')\nplt.semilogx(t, h[0], 'r--', label='ttim')\nplt.legend(loc='best');", "_____no_output_____" ] ], [ [ "### Well with welbore storage", "_____no_output_____" ] ], [ [ "T = 500\nS = 1e-4\nt = np.logspace(-5, 0, 100)\nrw = 0.3\nQ = 788\n\nml = ModelMaq(kaq=25, z=[20, 0], Saq=S/20, tmin=1e-5, tmax=1)\nw = Well(ml, rw=rw, tsandQ=[(0, Q)])\nml.solve()\nhnostorage = ml.head(rw, 0, t)\n\nml = ModelMaq(kaq=25, z=[20, 0], Saq=S/20, tmin=1e-5, tmax=1)\nw = Well(ml, rw=rw, tsandQ=[(0, Q)], rc=rw)\nml.solve()\nhstorage = ml.head(rw, 0, t)\n\nplt.semilogx(t, hnostorage[0], label='no storage')\nplt.semilogx(t, hstorage[0], label='with storage')\nplt.legend(loc='best')\nplt.xticks([1/(24*60*60), 1/(24 * 60), 1/24, 1], ['1 sec', '1 min', '1 hr', '1 d']);", "self.neq 1\nsolution complete\nself.neq 1\nsolution complete\n" ] ], [ [ "### Slug test", "_____no_output_____" ] ], [ [ "k = 25\nH = 20\nS = 1e-4 / H\nt = np.logspace(-7, -1, 100)\nrw = 0.2\nrc = 0.2\ndelh = 1\nml = ModelMaq(kaq=k, z=[H, 0], Saq=S, tmin=1e-7, tmax=1)\nQslug = np.pi * rc ** 2 * delh\nw = Well(ml, tsandQ=[(0, -Qslug)], rw=rw, rc=rc, wbstype='slug')\nml.solve()\nh = w.headinside(t)\nplt.semilogx(t, h[0])\nplt.xticks([1 / (24 * 60 * 60) / 10, 1 / (24 * 60 * 60), 1 / (24 * 60), 1 / 24], \n ['0.1 sec', '1 sec', '1 min', '1 hr']);", "self.neq 1\nsolution complete\n" ] ], [ [ "### Slug test in 5-layer aquifer\nWell in top 2 layers", "_____no_output_____" ] ], [ [ "k = 25\nH = 20\nSs = 1e-4 / H\nt = np.logspace(-7, -1, 100)\nrw = 0.2\nrc = 0.2\ndelh = 1\nml = Model3D(kaq=k, z=np.linspace(H, 0, 6), Saq=Ss, tmin=1e-7, tmax=1)\nQslug = np.pi * rc**2 * delh\nw = Well(ml, tsandQ=[(0, -Qslug)], rw=rw, rc=rc, layers=[0, 1], wbstype='slug')\nml.solve()\nhw = w.headinside(t)\nplt.semilogx(t, hw[0], label='inside well')\nh = ml.head(0.2 + 1e-8, 0, t)\nfor i in range(2, 5):\n plt.semilogx(t, h[i], label='layer' + str(i))\nplt.legend()\nplt.xticks([1/(24*60*60)/10, 1/(24*60*60), 1/(24 * 60), 1/24], ['0.1 sec', '1 sec', '1 min', '1 hr']);", "self.neq 2\nsolution complete\n" ] ], [ [ "20 layers", "_____no_output_____" ] ], [ [ "k = 25\nH = 20\nS = 1e-4 / H\nt = np.logspace(-7, -1, 100)\nrw = 0.2\nrc = 0.2\ndelh = 1\nml = Model3D(kaq=k, z=np.linspace(H, 0, 21), Saq=S, tmin=1e-7, tmax=1)\nQslug = np.pi * rc**2 * delh\nw = Well(ml, tsandQ=[(0, -Qslug)], rw=rw, rc=rc, layers=np.arange(8), wbstype='slug')\nml.solve()\nhw = w.headinside(t)\nplt.semilogx(t, hw[0], label='inside well')\nh = ml.head(0.2 + 1e-8, 0, t)\nfor i in range(8, 20):\n plt.semilogx(t, h[i], label='layer' + str(i))\nplt.legend()\nplt.xticks([1/(24*60*60)/10, 1/(24*60*60), 1/(24 * 60), 1/24], ['0.1 sec', '1 sec', '1 min', '1 hr']);", "self.neq 8\nsolution complete\n" ] ], [ [ "### Head Well", "_____no_output_____" ] ], [ [ "ml = ModelMaq(kaq=25, z=[20, 0], Saq=1e-5, tmin=1e-3, tmax=1000)\nw = HeadWell(ml, tsandh=[(0, -1)], rw=0.2)\nml.solve()\nplt.figure(figsize=(12,5))\nplt.subplot(1,2,1)\nml.xsection(0.2, 100, 0, 0, 100, t=[0.1, 1, 10], sstart=0.2, newfig=False)\nt = np.logspace(-3, 3, 100)\ndis = w.discharge(t)\nplt.subplot(1,2,2)\nplt.semilogx(t, dis[0], label='rw=0.2')\nml = ModelMaq(kaq=25, z=[20, 0], Saq=1e-5, tmin=1e-3, tmax=1000)\nw = HeadWell(ml, tsandh=[(0, -1)], rw=0.3)\nml.solve()\ndis = w.discharge(t)\nplt.semilogx(t, dis[0], label='rw=0.3')\nplt.xlabel('time (d)')\nplt.ylabel('discharge (m3/d)')\nplt.legend();", "self.neq 1\nsolution complete\nself.neq 1\nsolution complete\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
c522301a71ac1c1a322adf12b7dea23e2228b998
1,289
ipynb
Jupyter Notebook
search_data.ipynb
cris18274/Analisis_datos_python
7c2d03b77f3555330be2dd1375ee9b03a52a7b26
[ "MIT" ]
1
2020-09-25T22:00:52.000Z
2020-09-25T22:00:52.000Z
search_data.ipynb
cris18274/Analisis_datos_python
7c2d03b77f3555330be2dd1375ee9b03a52a7b26
[ "MIT" ]
null
null
null
search_data.ipynb
cris18274/Analisis_datos_python
7c2d03b77f3555330be2dd1375ee9b03a52a7b26
[ "MIT" ]
null
null
null
29.295455
69
0.707525
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
c5223370e350fc9921dc6893b5b1530822ebf979
17,137
ipynb
Jupyter Notebook
revision.ipynb
Atimangojackline/Business-analytics-with-python
168025e2b7d10ff460f42bf358baf5e5bcfb6c0e
[ "Apache-2.0" ]
null
null
null
revision.ipynb
Atimangojackline/Business-analytics-with-python
168025e2b7d10ff460f42bf358baf5e5bcfb6c0e
[ "Apache-2.0" ]
null
null
null
revision.ipynb
Atimangojackline/Business-analytics-with-python
168025e2b7d10ff460f42bf358baf5e5bcfb6c0e
[ "Apache-2.0" ]
null
null
null
20.8226
67
0.375912
[ [ [ "print(\"Hello python\")\n#print python", "Hello python\n" ], [ "x=10\nprint(x)", "10\n" ], [ "a=10", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "b=20", "_____no_output_____" ], [ "b", "_____no_output_____" ], [ "a+b", "_____no_output_____" ], [ "c=a+b", "_____no_output_____" ], [ "c", "_____no_output_____" ], [ "k=\"python\"", "_____no_output_____" ], [ "print(k)", "python\n" ], [ "k+\" \" + \"program\"", "_____no_output_____" ], [ "a, b=1, 2", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "b", "_____no_output_____" ], [ "x=4\ny=x+1\nx=2\nprint(x, y)", "2 5\n" ], [ "x, y=2, 6\nx, y= y, x+2\nprint(x, y)", "6 4\n" ], [ "4.6+5.2", "_____no_output_____" ], [ "2+1", "_____no_output_____" ], [ "2/1", "_____no_output_____" ], [ "2*3", "_____no_output_____" ], [ "10%3", "_____no_output_____" ], [ "10-25", "_____no_output_____" ], [ "7+2+5-3", "_____no_output_____" ], [ "(2+3)*4", "_____no_output_____" ], [ "x=\"hello\"\ny=\"world\"", "_____no_output_____" ], [ "x+y", "_____no_output_____" ], [ "def square(x):\n y=x+x\n return(y)\n", "_____no_output_____" ], [ "square(10)", "_____no_output_____" ], [ "square(2)+square(3)", "_____no_output_____" ], [ "def sum_of_square(x, y):\n return square(x) + square(y)", "_____no_output_____" ], [ "sum_of_square(2, 3)", "_____no_output_____" ], [ "f=square", "_____no_output_____" ], [ "f(4)", "_____no_output_____" ], [ "def fxy(f, x, y):\n return f(x) + f(y)", "_____no_output_____" ], [ "fxy(square, 2, 3)", "_____no_output_____" ], [ "x=0\ny=0\ndef incr(x):", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c522364da9d2c20eb294fca2e7fd174ca1b66287
382,670
ipynb
Jupyter Notebook
Predicting House Prices In Bengaluru(regression Problem).ipynb
ImranRiazChohan/Regression
f3b340d9be684aca96179b55a882d2f40f2b2c49
[ "MIT" ]
null
null
null
Predicting House Prices In Bengaluru(regression Problem).ipynb
ImranRiazChohan/Regression
f3b340d9be684aca96179b55a882d2f40f2b2c49
[ "MIT" ]
null
null
null
Predicting House Prices In Bengaluru(regression Problem).ipynb
ImranRiazChohan/Regression
f3b340d9be684aca96179b55a882d2f40f2b2c49
[ "MIT" ]
null
null
null
22.597732
30,892
0.486607
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler,LabelEncoder\nfrom sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error", "_____no_output_____" ], [ "data=pd.read_csv(\"/Users/chohan/Desktop/ML_DL_ Hackathon/MachineHack Machine Learning Challenge Predicting House Prices In Bengaluru/Data/train.csv\")", "_____no_output_____" ], [ "data.head()", "_____no_output_____" ], [ "data.tail()", "_____no_output_____" ], [ "data.shape", "_____no_output_____" ], [ "data.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 13320 entries, 0 to 13319\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 area_type 13320 non-null object \n 1 availability 13320 non-null object \n 2 location 13319 non-null object \n 3 size 13304 non-null object \n 4 society 7818 non-null object \n 5 total_sqft 13320 non-null object \n 6 bath 13247 non-null float64\n 7 balcony 12711 non-null float64\n 8 price 13320 non-null float64\ndtypes: float64(3), object(6)\nmemory usage: 936.7+ KB\n" ], [ "data.describe()", "_____no_output_____" ], [ "data.columns", "_____no_output_____" ], [ "data.isna().sum()", "_____no_output_____" ], [ "data.isnull().sum()", "_____no_output_____" ], [ "data.dtypes", "_____no_output_____" ], [ "data[\"area_type\"].unique()", "_____no_output_____" ], [ "data[\"size\"].unique()", "_____no_output_____" ], [ "data[\"society\"].unique()", "_____no_output_____" ], [ "data[\"total_sqft\"].unique()", "_____no_output_____" ], [ "data[\"bath\"].unique()", "_____no_output_____" ], [ "data[\"availability\"].unique()", "_____no_output_____" ], [ "data[\"balcony\"].unique()", "_____no_output_____" ], [ "#Check Null Values of Bath\ndata[\"bath\"].isna().sum()", "_____no_output_____" ], [ "#Mean of a Bath value\nmean_bath=data[\"bath\"].mean().round()", "_____no_output_____" ], [ "#Replace all Null value to mean_bath\ndata[\"bath\"]=data[\"bath\"].replace(np.nan,mean_bath).astype(float)", "_____no_output_____" ], [ "#After changing check the nan value\ndata[\"bath\"].isna().sum()", "_____no_output_____" ], [ "#Check Null Values of Balcony\ndata[\"balcony\"].isna().sum()", "_____no_output_____" ], [ "#Replace and Mean of Balcony\nmean_balcony=data[\"balcony\"].mean().round()\ndata[\"balcony\"]=data[\"balcony\"].replace(np.nan,mean_balcony).astype(float)", "_____no_output_____" ], [ "#After changing check the nan value\ndata[\"balcony\"].isna().sum()", "_____no_output_____" ], [ "import re\ntotal_sqrft=[]\nfor i in range(len(data)):\n a=data[\"total_sqft\"][i]\n total_sqrft.append(a)", "_____no_output_____" ], [ "total_sqft1=[]\nfor i in range(len(total_sqrft)):\n result=re.sub(\"\\d - \\d\",\"\",total_sqrft[i])\n total_sqft1.append(result)", "_____no_output_____" ], [ "total_sqft2=[]\nfor i in range(0,len(total_sqft1)):\n result=re.sub(\"[Sq. Meter,Perch, Yards,A,G,C,n,u,o]\",\"\",total_sqft1[i])\n total_sqft2.append(result)", "_____no_output_____" ], [ "data[\"total_sqft\"]=total_sqft2", "_____no_output_____" ], [ "def total_sqftreplace():\n total_sqrft=[]\n for i in range(len(data)):\n a=data[\"total_sqft\"][i]\n total_sqrft.append(a)\n total_sqft1=[]\n for i in range(len(total_sqrft)):\n result=re.sub(\"\\d - \\d\",\"\",total_sqrft[i])\n total_sqft1.append(result)\n total_sqft2=[]\n for i in range(0,len(total_sqft1)):\n result=re.sub(\"[Sq. Meter,Perch, Yards,A,G,C,n,u,o]\",\"\",total_sqft1[i])\n total_sqft2.append(result)\n data[\"total_sqft\"]=total_sqft2\n data[\"total_sqft\"]=data[\"total_sqft\"].replace(\"\",data[\"total_sqft\"][0])\n new=[]\n for i in range(0,len(data[\"total_sqft\"])):\n a=eval(data[\"total_sqft\"][i])\n new.append(a)\n print(a)\n data[\"total_sqft\"]=new\n data[\"total_sqft\"]=data[\"total_sqft\"].astype(float)\n return data[\"total_sqft\"]", "_____no_output_____" ], [ "data[\"total_sqft\"].head()", "_____no_output_____" ], [ "data.drop(columns=['availability', 'location', 'size', 'society'],inplace=True)", "_____no_output_____" ], [ "data.head()", "_____no_output_____" ], [ "data.dtypes", "_____no_output_____" ], [ "data[\"total_sqft\"].replace(\"\",data[\"total_sqft\"][0])", "_____no_output_____" ], [ "len(total_sqft2)", "_____no_output_____" ], [ "new=[]\nfor i in range(0,len(data[\"total_sqft\"])):\n a=eval(data[\"total_sqft\"][i])\n new.append(a)\n print(a)", "1056\n2600\n1440\n1521\n1200\n1170\n2732\n3300\n1310\n1020\n1800\n2785\n1000\n1100\n2250\n1175\n1180\n1540\n2770\n1100\n600\n1755\n2800\n1767\n510\n1250\n660\n1610\n1151\n1025\n210850\n1075\n1760\n1693\n1925\n700\n1070\n1724\n1290\n1143\n1296\n1254\n600\n660\n133074\n600\n970\n1459\n800\n869\n1270\n1670\n2010\n1185\n1600\n1200\n301410\n1500\n1407\n840\n4395\n845\n5700\n1160\n3000\n1100\n1140\n1220\n1350\n1005\n500\n1358\n1569\n1240\n2089\n1206\n1150\n2511\n460\n4400\n1660\n295450\n1326\n1325\n1499\n3000\n1665\n708\n1060\n710\n1000\n1000\n1450\n1200\n1296\n1540\n2894\n1330\n1200\n1200\n2502\n650\n2400\n1007\n1200\n966\n1630\n1640\n782\n1260\n1800\n1413\n1116\n1530\n3700\n2497\n1540\n1436\n1100\n276\n1427\n2061\n306156\n2650\n1282\n1050\n1600\n945\n1500\n950\n1870\n1600\n880\n1200\n1535\n950\n1360\n104105\n1280\n1260\n5000\n3050\n156305\n1000\n1167\n4000\n1828\n890\n1612\n1034\n1710\n957\n1250\n2795\n1125\n1020\n1200\n1735\n2050\n3750\n1350\n1063\n1904\n4200\n2000\n114340\n1425\n1500\n1060\n1470\n1300\n450\n1152\n1350\n1550\n1500\n600\n400\n705\n770\n1242\n1700\n2144\n1704\n1070\n1846\n1340\n1025\n101540\n1200\n2250\n1550\n1200\n1800\n1200\n1327\n1186\n1783\n1400\n980\n1285\n912\n1225\n1075\n1260\n1282\n1909\n1359\n1207\n1736\n2850\n1595\n1798\n1475\n1580\n1295\n3600\n589\n1415\n1787\n1787\n1475\n2000\n984\n152740\n2405\n1080\n1500\n1900\n805\n1153\n1148\n1110\n1100\n1290\n1500\n1080\n1933\n3500\n1060\n645\n2600\n645\n1644\n1285\n1200\n910\n1577\n4050\n2420\n800\n1060\n1270\n900\n1280\n1200\n1025\n1108\n1200\n3045\n2900\n1500\n1295\n1162\n1035\n1600\n1464\n1866\n700\n1804\n913\n1868\n883\n900\n1664\n2026\n1210\n4111\n1762\n1252\n861\n1420\n1450\n1490\n1075\n1425\n1200\n1280\n1084\n1015\n1017\n1027\n1069\n1349\n1005\n1417\n1475\n950\n2000\n880\n1863\n1010\n1425\n1450\n1847\n1100\n525\n1665\n1664\n1850\n1438\n1560\n1350\n1550\n1140\n1200\n850\n1280\n1170\n1113\n1385\n1128\n1200\n1200\n2390\n2400\n1464\n1200\n1200\n1150\n1645\n1000\n2650\n1192\n2135\n1173\n1020\n3122\n1600\n1200\n1230\n1250\n1325\n1850\n525\n1200\n1350\n1260\n1800\n11\n1100\n1508\n1592\n1388\n630\n2000\n1762\n950\n3252\n1116\n1308\n1200\n1500\n500\n1075\n530\n1205\n1075\n930\n1380\n2483\n1166\n1050\n202371\n1200\n1600\n1935\n451\n1800\n1400\n1801\n1451\n1200\n1160\n1000\n950\n1629\n1580\n1826\n1245\n1145\n825\n1693\n111327\n1460\n1050\n1260\n700\n1656\n1208\n1910\n3252\n1200\n1200\n1175\n1000\n1200\n2390\n12000\n550\n3446\n1185\n750\n1200\n1550\n1760\n1125\n1350\n1000\n1090\n1200\n1991\n1060\n1105\n985\n1533\n1590\n1120\n1069\n1933\n1194\n1150\n1240\n1419\n1200\n2150\n1450\n1630\n708\n1000\n11890\n1250\n1400\n1670\n1750\n1404\n1715\n630\n175212\n1650\n1295\n1800\n1346\n1200\n1150\n3309\n1190\n1620\n950\n5000\n2450\n1220\n850\n1020\n1100\n1150\n1760\n1800\n1070\n1500\n1440\n900\n1200\n1130\n1320\n1270\n1200\n4800\n1200\n929\n1150\n1125\n500\n1200\n1464\n3600\n2000\n1208\n1130\n1753\n4500\n600\n1196\n1150\n1128\n1000\n1035\n950\n1075\n1040\n720\n1511\n1300\n1545\n375\n1062\n1115\n1000\n883\n1195\n1200\n700\n1246\n660\n8500\n1600\n2805\n1584\n3000\n1175\n1595\n1353\n1599\n1150\n5230\n1155\n1200\n1000\n3000\n1867\n1251\n1028\n1222\n1170\n2400\n1385\n1372\n1282\n5000\n3000\n1135\n1768\n1325\n1599\n1500\n2610\n1286\n2845\n1600\n119440\n3450\n1102\n1350\n1200\n1300\n1800\n950\n525\n656\n1780\n1056\n595\n1080\n2225\n1126\n1490\n2000\n1550\n1600\n1160\n4144\n2100\n1404\n2230\n1544\n1305\n1230\n1200\n1460\n120400\n967\n540\n715\n2500\n1578\n1020\n1020\n1253\n1180\n961\n1419\n1709\n1600\n416\n1100\n1430\n1260\n1630\n1249\n1450\n2791\n600\n834\n1125\n2060\n12000\n891\n1133\n1075\n3000\n2440\n1200\n1075\n1140\n940\n2160\n1090\n1500\n4104\n1790\n1920\n1374\n1445\n711\n1500\n1720\n1400\n1030\n1200\n1375\n1050\n2250\n1000\n469\n3800\n883\n1820\n1440\n3600\n2500\n1225\n4000\n875\n750\n1180\n1194\n1500\n1160\n4125\n2378\n1128\n1500\n1220\n1210\n1100\n1200\n3385\n1641\n1200\n1260\n1720\n112145\n1210\n2200\n1075\n1702\n1630\n1141\n2072\n440640\n1200\n1350\n309002\n1180\n35000\n1355\n600\n1200\n1640\n1200\n1190\n2400\n1019\n1875\n1250\n1200\n1140\n1683\n1515\n2118\n1380\n1083\n600\n2300\n1240\n1750\n1230\n1500\n985\n1500\n1125\n950\n440800\n1580\n1092\n1090\n2264\n1033\n810\n1045\n1337\n1200\n1580\n1500\n1640\n1570\n1470\n1160\n1050\n1855\n1460\n1823\n1094\n1153\n1325\n1200\n1590\n1210\n1200\n1202\n1202\n1200\n1688\n1020\n1185\n1235\n3205\n1077\n1415\n2330\n805\n425\n1155\n5270\n656\n1100\n1150\n600\n1468\n4300\n2280\n1341\n1279\n2225\n1185\n1750\n1152\n1000\n1300\n2760\n1070\n1101\n775\n667\n1070\n1875\n735\n4360\n1750\n1215\n600\n820\n116195\n1779\n1105\n1000\n1650\n1694\n2376\n1975\n674\n1185\n445\n900\n1475\n1618\n1120\n2181\n1200\n600\n1556\n1179\n1296\n1275\n1875\n1400\n1222\n940\n1185\n1779\n1615\n400249\n1056\n1100\n920\n1602\n1176\n675\n1352\n1717\n10961\n2119\n1246\n1141\n1157\n1025\n650\n1349\n1566\n2830\n1320\n1780\n1091\n3670\n918\n1950\n1695\n1705\n1375\n525\n1447\n1114\n1450\n460\n1022\n1000\n1128\n1180\n1000\n1200\n3761\n1339\n1400\n1040\n1198\n1200\n1691\n1075\n1630\n1240\n111130\n1665\n2489\n1142\n1976\n5500\n1853\n600\n1567\n1090\n1200\n2400\n1175\n1200\n995\n884\n900\n1170\n1225\n1342\n1345\n1320\n1100\n1140\n1300\n1652\n2072\n1740\n1360\n1145\n1540\n1278\n630\n1100\n1200\n2500\n5245\n1015\n3300\n4500\n1356\n823\n1200\n1050\n1180\n1116\n1070\n1385\n1000\n1897\n1005\n1575\n975\n1904\n930\n525\n686\n2400\n1410\n2238\n1174\n3800\n2225\n1250\n918\n1250\n1800\n1170\n1300\n793\n1710\n1082\n1001\n1500\n1590\n2400\n1554\n100285\n4239\n884\n1045\n1019\n1935\n1135\n1680\n2470\n2825\n2480\n1260\n1800\n1799\n360091\n400\n1047\n1282\n1020\n1080\n1495\n3260\n1611\n3500\n1500\n1500\n3206\n1540\n1639\n700\n1303\n1005\n6565\n1305\n600\n1300\n1440\n901\n1725\n900\n1060\n1020\n1535\n530\n1464\n1350\n1115\n1175\n1113\n1200\n1640\n650\n1325\n950\n1396\n1050\n1825\n1190\n1220\n1565\n1891\n900\n1161\n6366\n2400\n315\n665\n1175\n1425\n1200\n1200\n1255\n211295\n600\n1810\n1200\n1033\n1548\n1611\n1400\n1485\n2400\n1025\n600\n1225\n1256\n2268\n1100\n1141\n400\n1400\n4100\n531\n15\n1175\n1100\n1843\n1200\n1460\n1530\n1467\n950\n1246\n711\n1194\n1358\n1500\n1420\n1250\n1280\n1209\n4200\n1215\n1315\n1800\n1600\n1600\n3968\n1563\n1560\n2169\n900\n1050\n3235\n1200\n1140\n1500\n1560\n1100\n1260\n1036\n1662\n1464\n1234\n1403\n915\n3000\n1150\n3900\n850\n929\n2400\n1350\n1135\n1405\n1200\n2805\n1161\n1290\n2557\n1480\n3300\n1350\n1580\n1470\n500\n1346\n1600\n720\n30\n1200\n1200\n1340\n6136\n918\n1500\n1315\n2400\n1390\n1330\n3100\n1824\n1420\n750\n1260\n1561\n812\n1060\n1120\n1225\n2144\n1200\n880\n1639\n1240\n1500\n1243\n1575\n1027\n1082\n1637\n1665\n1265\n1691\n1100\n24\n1500\n3205\n1666\n1115\n1070\n1128\n1162\n1339\n1192\n1920\n1255\n1357\n1600\n1225\n2400\n1570\n1093\n1865\n1200\n1039\n1246\n1985\n1590\n4000\n775\n1000\n1800\n1035\n1520\n1360\n6000\n1117\n1200\n1330\n1937\n1128\n1053\n1198\n1152\n1200\n1625\n1397\n1523\n883\n700\n1170\n1200\n2400\n1480\n1897\n996\n1875\n1500\n3095\n1390\n144455\n697\n710\n910\n1025\n88116\n1200\n1453\n3335\n85093\n1281\n565\n1024\n1620\n1254\n1300\n1233\n1200\n1510\n950\n1945\n1008\n1265\n1255\n2689\n1200\n1403\n1535\n1500\n1475\n1390\n1278\n1133\n1560\n2900\n1170\n1200\n1697\n1651\n1560\n1192\n1078\n800\n1349\n2300\n1680\n1314\n1060\n820\n5000\n2000\n856\n1621\n1200\n1484\n600\n14000\n500\n485\n1050\n1617\n550\n1384\n1408\n1150\n1410\n2150\n2292\n2006\n1508\n1040\n1240\n1535\n1180\n2200\n1400\n500\n4800\n1984\n3000\n1960\n1035\n2150\n1700\n3024\n1586\n2264\n2325\n1120\n144884\n1128\n1200\n924\n1600\n1006\n1200\n1250\n1200\n957\n1140\n1050\n1565\n1190\n2842\n155867\n630\n1403\n1542\n2750\n3596\n1635\n1239\n1194\n1596\n1009\n1726\n1535\n1300\n3100\n1800\n665\n4050\n1500\n925\n3356\n1395\n1415\n1419\n1121\n1082\n1100\n1606\n1650\n4634\n1206\n1495\n840\n710\n1210\n1523\n1232\n680\n1355\n1430\n1275\n1070\n1200\n2000\n1200\n1610\n1140\n1143\n346786\n1530\n980\n1485\n1500\n1280\n1265\n1132\n1262\n940\n1515\n1300\n1840\n890\n1655\n2060\n1730\n2195\n1650\n1200\n1079\n1085\n5000\n1075\n1380\n1450\n1200\n3200\n1639\n1200\n1739\n4346\n935\n510\n1200\n1178\n1375\n1065\n1450\n999\n4400\n1350\n1100\n1835\n2090\n1519\n1267\n1150\n1146\n1200\n1550\n850\n2200\n3329\n785\n2900\n1610\n921\n850\n2700\n1700\n1020\n1090\n3600\n1027\n1185\n850\n1197\n3500\n920\n716\n1105\n1850\n1300\n1595\n3073\n755\n4050\n1300\n1215\n900\n1145\n1093\n1213\n1654\n1105\n1611\n1367\n1010\n750\n1535\n620\n982\n1700\n1330\n2500\n1475\n1410\n440\n1614\n1363\n650\n1015\n1156\n1100\n1455\n930\n1650\n1200\n700\n2900\n1890\n2197\n3522\n1240\n1440\n650\n764\n900\n1693\n1445\n900\n1030\n1381\n1100\n1575\n1200\n1200\n1080\n2280\n1360\n1865\n1157\n674\n1405\n1160\n1600\n1790\n1128\n1180\n1450\n340\n2000\n1050\n1125\n1246\n1372\n1330\n2465\n1300\n1420\n810\n1065\n700\n54732731\n1050\n1250\n865\n1200\n1756\n750\n628\n1200\n1207\n1765\n1130\n1532\n1650\n1200\n8041\n1200\n1197\n1200\n1385\n1010\n2225\n1180\n1525\n1100\n1435\n400\n1200\n1272\n1205\n980\n1225\n1541\n1610\n1694\n600\n950\n1560\n1201\n1500\n1785\n1258\n910\n1297\n1291\n1070\n1012\n1260\n1282\n1843\n2400\n3356\n2790\n1330\n1400\n1192\n1000\n1297\n5850\n1490\n1373\n1035\n3750\n1010\n1195\n1750\n1052\n1180\n3680\n2100\n2863\n1128\n2700\n1120\n2424\n1300\n1090\n2710\n3040\n1330\n1427\n1294\n1580\n400\n1808\n3516\n1194\n900\n1310\n900\n3850\n925\n2400\n1290\n3770\n1390\n1200\n845\n1537\n1135\n1512\n900\n1230\n1378\n1277\n1985\n8000\n1210\n890\n2550\n1200\n3198\n1464\n2483\n1550\n4111\n975\n2500\n1690\n1063\n2572\n2200\n1600\n1250\n520\n4000\n1390\n1185\n1219\n1564\n342435\n1100\n1300\n1600\n1600\n1482\n1000\n1200\n600\n1200\n600\n2400\n1469\n551\n1160\n1205\n1550\n1745\n1028\n1700\n1200\n2700\n1100\n1200\n126972\n1266\n1060\n1168\n1552\n180273\n500\n1080\n2100\n1300\n920\n817\n1310\n1650\n1515\n1157\n2400\n3000\n2200\n1243\n1530\n2070\n1815\n3900\n1000\n982\n1232\n1282\n654\n1181\n1075\n1200\n1616\n1800\n1200\n1306\n1626\n2700\n1585\n1370\n3250\n1495\n2000\n2400\n1375\n363800\n1400\n1100\n6670\n1200\n1150\n1080\n1645\n1760\n595\n1200\n400249\n1410\n1090\n540\n1300\n1890\n1254\n2800\n1275\n1245\n1170\n1193\n850\n1678\n1600\n1750\n1125\n2200\n1500\n2800\n1200\n1000\n2780\n1320\n8000\n2422\n1100\n882\n845\n883\n6233\n1595\n1280\n1700\n920\n1300\n1450\n1757\n600\n2400\n1240\n750\n1330\n1062\n1078\n1460\n1200\n1200\n912\n14261\n2400\n2028\n980\n1000\n1050\n2611\n1875\n1065\n1800\n1163\n1098\n1725\n4000\n1128\n1320\n1705\n1418\n1500\n1100\n1464\n812\n269940\n1280\n1420\n1200\n727\n1200\n2774\n2000\n1722\n2800\n200634\n1650\n840\n914\n1482\n1724\n1670\n1184\n1200\n1200\n927\n1242\n1220\n1360\n2800\n1180\n1697\n550\n1225\n1690\n1660\n1488\n1250\n1050\n1027\n1699\n1205\n712\n950\n1200\n1476\n1070\n2160\n1296\n1800\n1570\n1300\n1550\n770\n1870\n1650\n1429\n1167\n1035\n1188\n1200\n1574\n884\n1194\n1026\n1400\n1115\n1200\n2690\n975\n675\n1900\n600\n1101\n1700\n3606\n1258\n1560\n3630\n2000\n2790\n1494\n1482\n800\n1185\n1100\n1300\n1225\n1758\n1145\n1180\n1260\n345472\n1190\n1691\n3675\n1305\n2337\n1200\n1206\n1350\n1140\n1250\n600\n1225\n800\n1020\n2100\n1350\n925\n1800\n2000\n1510\n5800\n1614\n682\n1200\n900\n1632\n1041\n5080\n3012\n1530\n1450\n1200\n1200\n1508\n600\n1188\n2690\n1174\n1350\n1870\n1072\n52272\n726\n1345\n1200\n980\n1050\n993\n1300\n1743\n1204\n1200\n1480\n950\n640\n972\n1840\n1128\n680\n2400\n1045\n" ], [ "data[\"total_sqft\"]=new\ndata[\"total_sqft\"]=data[\"total_sqft\"].astype(float)", "_____no_output_____" ], [ "data.dtypes", "_____no_output_____" ], [ "data[\"area_type\"].unique()", "_____no_output_____" ], [ "data[\"area_type\"]=data[\"area_type\"].replace(['Super built-up Area', 'Plot Area', 'Built-up Area',\n 'Carpet Area'],[0,1,2,3])\ndata[\"area_type\"]=data[\"area_type\"].astype(float)", "_____no_output_____" ], [ "data.dtypes", "_____no_output_____" ], [ "sns.scatterplot(data[\"bath\"],data[\"price\"],hue=data[\"bath\"])", "C:\\Users\\chohan\\anaconda3\\envs\\usama_env\\lib\\site-packages\\seaborn\\_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n" ], [ "sns.scatterplot(data[\"balcony\"],data[\"price\"],hue=data[\"balcony\"],palette=\"deep\")", "C:\\Users\\chohan\\anaconda3\\envs\\usama_env\\lib\\site-packages\\seaborn\\_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n" ], [ "sns.scatterplot(data[\"area_type\"],data[\"price\"])", "C:\\Users\\chohan\\anaconda3\\envs\\usama_env\\lib\\site-packages\\seaborn\\_decorators.py:43: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n" ], [ "sns.countplot(data[\"balcony\"])", "C:\\Users\\chohan\\anaconda3\\envs\\usama_env\\lib\\site-packages\\seaborn\\_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n" ], [ "sns.countplot(data[\"bath\"])", "C:\\Users\\chohan\\anaconda3\\envs\\usama_env\\lib\\site-packages\\seaborn\\_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n" ], [ "sns.countplot(data[\"area_type\"])", "C:\\Users\\chohan\\anaconda3\\envs\\usama_env\\lib\\site-packages\\seaborn\\_decorators.py:43: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n FutureWarning\n" ], [ "X=data.iloc[:,0:4].values\nY=data.iloc[:,-1].values", "_____no_output_____" ], [ "X", "_____no_output_____" ], [ "sc=StandardScaler()\nx_labeled=sc.fit(X).transform(X)", "_____no_output_____" ], [ "x_labeled[0]", "_____no_output_____" ], [ "x_train,x_test,y_train,y_test=train_test_split(x_labeled,Y,test_size=0.2,random_state=True)\nprint(len(x_train),len(y_train))\nprint(len(x_test),len(y_test))\nprint(x_train.shape,y_train.shape)\nprint(x_test.shape,y_test.shape)", "10656 10656\n2664 2664\n(10656, 4) (10656,)\n(2664, 4) (2664,)\n" ], [ "linear_model=LinearRegression()\nlinear_model.fit(x_train,y_train)\nprint(linear_model.intercept_)\nprint(linear_model.coef_)\npredict=linear_model.predict(x_test[1:7])\nprint(\"Training_Accuracy:\",linear_model.score(x_train,y_train)*100)\nprint(\"Testing_Accuracy:\",linear_model.score(x_test,y_test)*100)\nprint(\"Model_Accuracy:\",r2_score(Y,linear_model.predict(x_labeled))*100)", "111.92698278770119\n[ 5.60787589 1.47079063 62.41201869 5.62357673]\nTraining_Accuracy: 20.49303870847814\nTesting_Accuracy: 22.052829643156624\nModel_Accuracy: 20.878972391121952\n" ], [ "random_forest_model=RandomForestRegressor(n_estimators=50)\nrandom_forest_model.fit(x_train,y_train)\npredict1=random_forest_model.predict(x_test[0:7])\nprint(\"Training_Accuracy:\",random_forest_model.score(x_train,y_train)*100)\nprint(\"Testing_Accuracy:\",random_forest_model.score(x_test,y_test)*100)\nprint(\"Model_Accuracy:\",r2_score(Y,random_forest_model.predict(x_labeled))*100)", "Training_Accuracy: 88.3950978172475\nTesting_Accuracy: 53.40150058562821\nModel_Accuracy: 79.7800664667986\n" ], [ "decision_tree_model=DecisionTreeRegressor(criterion=\"mse\")\ndecision_tree_model.fit(x_train,y_train)\npredict2=decision_tree_model.predict(x_test[0:7])\nprint(\"Training_Accuracy:\",decision_tree_model.score(x_train,y_train)*100)\nprint(\"Testing_Accuracy:\",decision_tree_model.score(x_test,y_test)*100)\nprint(\"Model_Accuracy:\",r2_score(Y,decision_tree_model.predict(x_labeled))*100)", "Training_Accuracy: 93.33960092529438\nTesting_Accuracy: 30.20152651454272\nModel_Accuracy: 77.79537033322993\n" ], [ "data=pd.read_csv(\"/Users/chohan/Desktop/ML_DL_ Hackathon/MachineHack Machine Learning Challenge Predicting House Prices In Bengaluru/Data/test.csv\")", "_____no_output_____" ], [ "data.head()", "_____no_output_____" ], [ "data.isnull().sum()", "_____no_output_____" ], [ "data.dtypes", "_____no_output_____" ], [ "data.columns", "_____no_output_____" ], [ "data.drop(columns=['availability', 'location', 'size', 'society',\"price\"],inplace=True)", "_____no_output_____" ], [ "bath_mean=data[\"bath\"].mean().round()\nbalcony_mean=data[\"balcony\"].mean().round()\n# data[\"bath\"]=data[\"bath\"].replace(np.nan,mean_bath).astype(float)\ndata[\"bath\"]=data[\"bath\"].replace(np.nan,bath_mean)\ndata[\"balcony\"]=data[\"balcony\"].replace(np.nan,balcony_mean)\ndata[\"area_type\"]=data[\"area_type\"].replace(['Super built-up Area', 'Plot Area', 'Built-up Area','Carpet Area'],[0,1,2,3])\ndata[\"area_type\"]=data[\"area_type\"].astype(float)\ndata[\"bath\"]=data[\"bath\"].astype(float)\ndata[\"balcony\"]=data[\"balcony\"].astype(float)", "_____no_output_____" ], [ "data.isnull().sum()", "_____no_output_____" ], [ "total_sqftreplace()", "1225\n2400\n1650\n1322\n1161\n760\n4500\n960\n1569\n2400\n1445\n1505\n3122\n1096\n1450\n2884\n604\n1170\n957\n2900\n1160\n1725\n450\n1500\n1824\n2250\n1698\n1400\n1200\n190255\n1150\n1430\n1215\n3083\n1025\n1353\n6369\n1085\n1200\n1289\n1200\n2300\n169170\n1021\n1750\n1404\n1060\n1029\n850\n3565\n2134\n1703\n1190\n700\n929\n955\n2400\n1200\n1843\n1250\n1893\n1240\n1151\n1560\n600\n1050\n1063\n2465\n900\n1255\n2091\n1477\n2145\n1314\n1860\n1095\n1225\n1400\n2400\n1200\n1050\n1200\n1050\n1295\n2000\n1386\n950\n843\n2439\n1280\n1010\n793\n1100\n1050\n40\n1220\n630\n1444\n1710\n800\n1745\n870\n1250\n1393\n1000\n1371\n2140\n1450\n1580\n1368\n1128\n1171\n1200\n1500\n1564\n1345\n1512\n775\n1200\n1090\n800\n969\n1095\n1483\n1252\n3375\n2400\n3650\n900\n2440\n1010\n1680\n2400\n1990\n1523\n2439\n4930\n1200\n1195\n1050\n550\n540\n1200\n1294\n1345\n1100\n1500\n1560\n780\n1565\n1790\n120000\n159498\n1055\n1060\n845\n1640\n1350\n2280\n1250\n1016\n1230\n1480\n736\n1200\n1400\n1500\n1000\n1717\n1056\n1200\n1200\n1130\n1575\n189798\n1685\n1000\n1100\n2400\n1346\n5294\n2938\n1908\n1832\n3150\n1846\n2000\n920\n700\n1610\n1200\n3927\n1222\n900\n181918\n1200\n1080\n1170\n1530\n2025\n1700\n900\n650\n1207\n1305\n2425\n1724\n1200\n648\n1115\n3750\n2600\n1561\n1050\n2400\n5363\n1150\n1550\n937\n2340\n2100\n600\n3600\n1935\n3210\n1108\n985\n1500\n1185\n1545\n1490\n1268\n2400\n1410\n1080\n1350\n1300\n1800\n1068\n1170\n1200\n800\n1809\n1615\n1662\n2215\n1999\n1150\n4395\n1611\n1160\n1150\n1260\n2750\n1196\n181918\n2555\n1535\n1299\n1257\n149523\n1750\n1183\n1400\n1950\n1680\n1395\n1425\n1540\n2671\n6700\n2388\n1000\n1235\n1015\n970\n1225\n1200\n1156\n1735\n750\n750\n1775\n1350\n965\n1505\n1342\n374\n2178\n1452\n1580\n3367\n1305\n1210\n1090\n1800\n1175\n1103\n2700\n336\n2767\n600\n3630\n2556\n2400\n1303\n1000\n1050\n2504\n1035\n1076\n1200\n1090\n1180\n1349\n2000\n4000\n3450\n1300\n2424\n1230\n2119\n1600\n750\n1225\n5\n3600\n1350\n1025\n920\n1140\n1200\n1350\n1750\n638\n1130\n1984\n1163\n1390\n400\n864\n540\n950\n1171\n3075\n1692\n3321\n1140\n1260\n700\n1485\n6690\n1090\n978\n2200\n1700\n1200\n1464\n1804\n1230\n2082\n1500\n1100\n1530\n909\n1380\n2140\n1309\n1070\n2650\n1150\n3500\n675\n1333\n1545\n1500\n1697\n1000\n2735\n480\n2171\n3054\n116225\n1214\n570\n1249\n1000\n269940\n1924\n1153\n663\n1730\n1145\n1304\n1645\n2247\n1152\n1512\n2830\n1080\n2200\n1608\n1068\n1200\n2100\n1711\n1769\n1650\n4000\n1010\n1145\n2050\n600\n3500\n1295\n4000\n2660\n1128\n5576\n1350\n1200\n190255\n1180\n2666\n1194\n3335\n900\n900\n1960\n4000\n882\n1150\n2600\n405\n1194\n1485\n1150\n2485\n1200\n1095\n1314\n1180\n1750\n1304\n1222\n1410\n880\n2163\n1200\n197289\n1920\n1197\n1245\n1007\n1410\n950\n1050\n1478\n1425\n1275\n1200\n5540\n1245\n1133\n1650\n1200\n900\n993\n661\n1101\n785\n1200\n1200\n1120\n1210\n2150\n1450\n1000\n1197\n695\n1420\n1565\n2200\n1200\n1047\n1375\n990\n2400\n1260\n1080\n1200\n1348\n1170\n806\n1245\n2200\n1200\n1370\n1449\n1355\n1240\n1290\n1232\n1212\n1225\n1070\n1974\n1711\n460\n1276\n1168\n1200\n1650\n5400\n750\n1025\n2526\n1853\n905\n1500\n3485\n845\n1440\n2800\n732\n1200\n3000\n950\n1098\n700\n1153\n1300\n1166\n1055\n1197\n1270\n1500\n1200\n1164\n1447\n1500\n1600\n1198\n3200\n1174\n4500\n1650\n1000\n1175\n1345\n1560\n1340\n939\n605\n1115\n2992\n2572\n1200\n1225\n1340\n1128\n1404\n1874\n1140\n3677\n1464\n1200\n1736\n1600\n1249\n1323\n1650\n1100\n1210\n1200\n1452\n1475\n1098\n1910\n2000\n550\n1400\n1220\n1350\n1582\n1121\n2000\n1244\n1090\n1140\n1664\n825\n1640\n984\n1301\n2400\n750\n1300\n1200\n1500\n1082\n1791\n1380\n1150\n1410\n1211\n960\n2321\n2000\n1169\n745\n1389\n1095\n1894\n1308\n1170\n1100\n2116\n15225\n1750\n1200\n1698\n1610\n600\n1430\n1005\n1564\n1230\n790\n5000\n1630\n800\n800\n1200\n4000\n2790\n3000\n1180\n1530\n3004\n780\n1165\n1500\n2475\n1225\n4730\n1200\n800\n1550\n1285\n1265\n5120\n1225\n580\n109421\n1350\n1200\n5000\n1303\n1135\n174091\n1238\n1540\n1050\n1208\n1180\n2876\n1209\n2558\n1385\n1513\n525\n1000\n950\n958\n1130\n2400\n1500\n1547\n2000\n2690\n1924\n1560\n1200\n1460\n1600\n3930\n1050\n1770\n1165\n1285\n2100\n935\n1250\n1717\n707\n1047\n3616\n1000\n1066\n1200\n1101\n1250\n635\n755\n2569\n1255\n1550\n1061\n1512\n1820\n1200\n2000\n970\n1448\n883\n1500\n1215\n4000\n2300\n985\n1120\n1241\n2437\n1200\n1100\n1724\n3000\n1125\n3436\n3083\n1000\n1700\n2500\n1868\n1697\n901\n1500\n1240\n1341\n3895\n1535\n861\n1100\n1020\n1500\n1100\n1080\n2500\n1650\n700\n4150\n2900\n1200\n1080\n1735\n1210\n1150\n5407\n1865\n1350\n1200\n2041\n1445\n1690\n1200\n1900\n7000\n1087\n1740\n1075\n2254\n4395\n1339\n2695\n1160\n1600\n1501\n1982\n1200\n825\n1215\n1200\n1300\n1250\n1836\n1920\n2400\n3500\n1420\n2400\n1842\n3260\n1750\n1580\n933\n1105\n1330\n1185\n1199\n1183\n1200\n1460\n1870\n1930\n1075\n2292\n1450\n1630\n656\n1250\n525\n950\n600\n2700\n1606\n1000\n269940\n900\n1200\n1200\n2119\n1820\n1218\n1140\n1362\n1040\n1000\n2955\n2289\n1320\n1373\n1564\n1590\n823\n1000\n1652\n1212\n2400\n1350\n2160\n2400\n1239\n1500\n1650\n2200\n3009\n1890\n1330\n1667\n1750\n1755\n1250\n1300\n1691\n2404\n3150\n2200\n1210\n3395\n1350\n2732\n459\n1485\n600\n1100\n1100\n620\n920\n4239\n2290\n1130\n1395\n597\n100\n1325\n700\n1500\n1092\n1265\n1150\n1000\n1612\n1320\n1007\n1562\n1385\n1671\n1200\n1285\n2400\n1240\n1350\n2800\n1350\n3005\n1500\n1073\n1120\n664\n1305\n620\n1470\n1190\n1232\n1275\n1308\n1546\n1630\n3553\n1500\n1275\n3000\n1580\n1498\n1328\n1291\n1700\n1200\n600\n1491\n1305\n2300\n1070\n1143\n1307\n2400\n1535\n1660\n1135\n750\n1367\n630\n2656\n1200\n700\n545\n1890\n1200\n914\n1163\n1095\n754\n2159\n1200\n1300\n3600\n900\n1075\n1900\n1600\n1285\n2640\n1150\n1930\n600\n1920\n1241\n1906\n1650\n5375\n2500\n1400\n1350\n1400\n1500\n1507\n1110\n116260\n1609\n1280\n1610\n1335\n1650\n1280\n1050\n1310\n1350\n540\n1263\n1340\n2690\n1511\n2500\n1589\n1200\n1527\n1017\n435\n805\n750\n3400\n1915\n4000\n1120\n1290\n1200\n1200\n2400\n1700\n1600\n1240\n1100\n525\n1200\n2190\n1435\n174074\n1200\n1260\n470\n1550\n1060\n1710\n1610\n560\n1763\n1200\n1200\n1000\n1360\n868\n4600\n1273\n1100\n1175\n1165\n1804\n1109\n1610\n600\n1020\n1682\n1000\n1175\n1400\n2500\n1650\n1009\n2400\n1353\n1640\n1500\n1295\n2100\n115197\n1456\n1065\n1575\n1180\n1881\n1161\n1278\n1335\n1650\n778\n1575\n781\n1200\n1690\n1270\n1013\n1298\n1220\n1370\n2790\n1184\n2996\n1740\n920\n1352\n1313\n1105\n2400\n8000\n992\n1410\n1065\n1386\n1815\n1500\n1040\n1000\n1610\n1140\n1521\n1245\n1200\n1195\n1210\n2059\n1500\n1166\n1788\n283882\n1320\n2650\n2261\n825\n1477\n1185\n1420\n1443\n1200\n1200\n1800\n1200\n1893\n1320\n1101\n4500\n2400\n1800\n1205\n1120\n1200\n3070\n1345\n904\n1314\n1000\n1600\n1350\n1032\n845\n2000\n1150\n2438\n1760\n1450\n1245\n1950\n3750\n1225\n1098\n1216\n1450\n1610\n1303\n1580\n800\n1020\n2850\n1030\n825\n1245\n1082\n1762\n3565\n1058\n770\n2800\n1878\n2630\n1012\n2259\n2030\n625\n1550\n2400\n60000\n1246\n1659\n2600\n1804\n1025\n600\n1145\n3650\n810\n1334\n1105\n1375\n1500\n941\n1290\n995\n1100\n975\n1200\n1200\n1741\n1229\n1146\n3124\n1151\n1000\n1205\n1200\n330464\n1724\n1500\n1080\n640\n5060\n1265\n1691\n1843\n1195\n765\n1700\n177726\n2250\n660\n1066\n1290\n1082\n1397\n1891\n600\n1007\n940\n492\n600\n3144\n1124\n1200\n1200\n960\n1350\n1000\n1212\n1050\n1200\n1205\n1700\n925\n1080\n1500\n1200\n1252\n269940\n1157\n1435\n1035\n1200\n1150\n1354\n1304\n300\n1071\n3526\n1364\n1035\n720\n1900\n1030\n1750\n1200\n1406\n2400\n1890\n1200\n1000\n1100\n2800\n1800\n1600\n960\n1197\n1802\n1300\n1920\n1789\n1620\n1697\n2400\n1763\n1100\n760\n1655\n985\n1200\n1000\n830\n1075\n600\n2000\n1297\n1725\n1200\n2000\n1230\n1249\n2870\n1400\n1215\n1245\n2047\n1271\n1280\n1500\n256914\n4000\n1200\n3000\n1360\n1500\n2524\n600\n1520\n1245\n1265\n900\n1225\n2200\n1320\n1600\n800\n1365\n1168\n1500\n1000\n3000\n1350\n1700\n2000\n1800\n1290\n1094\n1262\n1888\n1390\n1380\n1800\n1200\n1314\n640\n1035\n3440\n1296\n620\n1710\n1277\n1265\n3005\n1246\n1600\n1100\n900\n925\n1200\n1050\n934\n1100\n4705827\n1360\n1200\n1205\n1255\n1065\n1225\n1086\n926\n600\n2500\n1200\n1022\n1525\n1595\n1000\n1118\n1190\n1475\n950\n1650\n1000\n1400\n1325\n1200\n1345\n1630\n1377\n1842\n1600\n1381\n525\n950\n5130\n1440\n1237\n2550\n1230\n1500\n1200\n1500\n525\n1717\n1800\n1917\n1322\n2954\n1398\n1120\n2400\n760\n600\n2400\n1150\n1151\n1170\n1586\n1700\n1149\n175212\n1475\n114991\n1450\n1080\n1500\n1347\n2025\n1070\n1505\n1020\n1566\n600\n2900\n1779\n1523\n950\n3500\n1159\n1385\n1200\n1360\n1650\n600\n1105\n1397\n1560\n1050\n1343\n1150\n890\n2500\n1335\n1350\n1200\n1891\n1200\n1293\n1800\n3383\n2476\n1180\n1400\n2495\n1800\n1204\n1105\n5149\n1199\n1400\n1079\n1275\n1532\n1245\n3500\n1464\n1269\n1400\n1160\n1290\n1160\n1730\n600\n1588\n1047\n1200\n1100\n1731\n1200\n6750\n4250\n1246\n1660\n1216\n996\n1150\n" ], [ "data.dtypes", "_____no_output_____" ], [ "x=data.iloc[:,0:4].values\nx[0]", "_____no_output_____" ], [ "y_hat=random_forest_model.predict(x)", "_____no_output_____" ], [ "y_hat", "_____no_output_____" ], [ "y_hat1=decision_tree_model.predict(x)", "_____no_output_____" ], [ "y_hat1", "_____no_output_____" ], [ "y_hat2=linear_model.predict(x)", "_____no_output_____" ], [ "y_hat2", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c52265094b8fbb6dea4f9b031311de70e7a634e6
197,934
ipynb
Jupyter Notebook
notebooks/covid-19-inference.ipynb
aehsani/ergo
1c03494fcbc89192212b9595bb00acc794bd621c
[ "MIT" ]
null
null
null
notebooks/covid-19-inference.ipynb
aehsani/ergo
1c03494fcbc89192212b9595bb00acc794bd621c
[ "MIT" ]
5
2020-04-28T18:02:49.000Z
2020-04-30T23:15:47.000Z
notebooks/covid-19-inference.ipynb
wbaizer/ought-copy
09b265320f7512402471ca87aadac69fe43e8caf
[ "MIT" ]
null
null
null
210.344315
119,501
0.482312
[ [ [ "<a href=\"https://colab.research.google.com/github/oughtinc/ergo/blob/notebooks-readme/notebooks/covid-19-inference.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "Notes\n\n* Switch to Italy\n* Graph data and results\n* Add variable for true initial infections, lockdown start date (11 March)\n* Add lag time (see Jacob/NYT models)\n* Add patient recovery", "_____no_output_____" ], [ "# Setup", "_____no_output_____" ], [ "Install [Ergo](https://github.com/oughtinc/ergo) (our forecasting library) and a few tools we'll use in this colab:", "_____no_output_____" ] ], [ [ "!pip install --quiet poetry # Fixes https://github.com/python-poetry/poetry/issues/532\n!pip install --quiet pendulum seaborn\n!pip install --quiet torchviz", "_____no_output_____" ], [ "# !pip uninstall --yes --quiet ergo\n!pip install --quiet git+https://github.com/oughtinc/ergo.git@william\n# !pip install --upgrade --no-cache-dir --quiet git+https://github.com/oughtinc/ergo.git\n", " Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n Preparing wheel metadata ... \u001b[?25l\u001b[?25hdone\n Building wheel for ergo (PEP 517) ... \u001b[?25l\u001b[?25hdone\n" ], [ "import ergo\n\nconfirmed_infections = ergo.data.covid19.ConfirmedInfections()", "WARNING:root:Diamond Princess not found in regex\n" ], [ "%load_ext google.colab.data_table", "The google.colab.data_table extension is already loaded. To reload it, use:\n %reload_ext google.colab.data_table\n" ], [ "import re\nimport ergo\nimport pendulum\nimport pandas\nimport seaborn\n\nfrom types import SimpleNamespace\nfrom typing import List\nfrom pendulum import DateTime\nfrom matplotlib import pyplot", "_____no_output_____" ] ], [ [ "# Questions", "_____no_output_____" ], [ "Here are Metaculus ids for the questions we'll load, and some short names that will allow us to associate questions with variables in our model:", "_____no_output_____" ] ], [ [ "question_ids = [3704, 3712, 3713, 3711, 3722, 3761, 3705, 3706] # 3740, 3736, \nquestion_names = [\n # \"WHO Eastern Mediterranean Region on 2020/03/27\",\n # \"WHO Region of the Americas on 2020/03/27\",\n # \"WHO Western Pacific Region on 2020/03/27\",\n # \"WHO South-East Asia Region on 2020/03/27\",\n \"South Korea on 2020/03/27\",\n # \"United Kingdom on 2020/03/27\",\n # \"WHO African Region on 2020/03/27\",\n # \"WHO European Region on 2020/03/27\",\n # \"Bay Area on 2020/04/01\",\n # \"San Francisco on 2020/04/01\"\n]", "_____no_output_____" ] ], [ [ "We load the question data from Metaculus:", "_____no_output_____" ] ], [ [ "metaculus = ergo.Metaculus(username=\"ought\", password=\"R9gHrPtoRQNG29\")\nquestions = [metaculus.get_question(id, name=name) for id, name in zip(question_ids, question_names)]\nergo.MetaculusQuestion.to_dataframe(questions)", "_____no_output_____" ] ], [ [ "# Data", "_____no_output_____" ], [ "Our most important data is the data about confirmed cases (from Hopkins):", "_____no_output_____" ] ], [ [ "confirmed_infections = ergo.data.covid19.ConfirmedInfections()", "WARNING:root:Diamond Princess not found in regex\n" ] ], [ [ "# Assumptions", "_____no_output_____" ], [ "Assumptions are things that should be inferred from data but currently aren't:", "_____no_output_____" ] ], [ [ "assumptions = SimpleNamespace()", "_____no_output_____" ], [ "assumptions.lockdown_start = {\n \"Italy\": pendulum.datetime(2020,3,11),\n \"Spain\": pendulum.datetime(2020,3,15),\n}", "_____no_output_____" ] ], [ [ "# Model", "_____no_output_____" ], [ "Main model:", "_____no_output_____" ] ], [ [ "import torch\nimport pyro\n\nArea = str\n\[email protected]\ndef model(start: DateTime, end: DateTime, areas: List[Area], training=True):\n for area in areas:\n doubling_time = ergo.lognormal_from_interval(1., 14., name=f\"doubling_time {area}\")\n doubling_time_lockdown = ergo.lognormal_from_interval(1., torch.max(doubling_time, ergo.to_float(1.1)), name=f\"doubling_time_lockdown {area}\")\n observation_noise = ergo.halfnormal_from_interval(0.1, name=f\"observation_noise {area}\")\n predicted = ergo.to_float(confirmed_infections(area, start))\n\n for i in range(1,(end - start).days):\n date = start.add(days=i)\n datestr = date.format('YYYY/MM/DD')\n confirmed = None\n try:\n confirmed = ergo.to_float(confirmed_infections(area, date))\n ergo.tag(confirmed, f\"actual {area} on {datestr}\")\n except KeyError:\n pass\n doubling_time_today = doubling_time\n if area in assumptions.lockdown_start.keys() and date >= assumptions.lockdown_start[area]:\n doubling_time_today = doubling_time_lockdown\n predicted = predicted * 2**(1. / doubling_time_today)\n ergo.tag(predicted, f\"predicted {area} on {datestr}\")\n if (not training) or (confirmed is not None):\n predict_observed = ergo.normal(predicted, \n predicted*observation_noise, \n name=f\"predict_observed {area} on {datestr}\",\n obs=confirmed)", "_____no_output_____" ] ], [ [ "Run the model:", "_____no_output_____" ] ], [ [ "start_date = pendulum.datetime(2020,3,1)\nend_date = pendulum.datetime(2020,4,1)\nareas = [\"Italy\", \"Spain\"]\nmodel_args = (start_date, end_date, areas)\n", "_____no_output_____" ], [ "import pandas as pd\nfrom pyro.infer import SVI, Trace_ELBO\nfrom pyro.infer import Predictive\nimport functools\n\ndef infer_and_run(model, num_samples=5000, num_iterations=2000, \n debug=False, learning_rate=0.01, \n early_stopping_patience=200) -> pd.DataFrame:\n \"\"\"\n debug - whether to output debug information\n num_iterations - Number of optimizer iterations\n learning_rate - Optimizer learning rate\n early_stopping_patience - Stop training if loss hasn't improved for this many iterations\n \"\"\"\n def to_numpy(d):\n return {k:v.detach().numpy() for k, v in d.items()}\n\n def debug_output(guide):\n quantiles = to_numpy(guide.quantiles([0.05, 0.5, 0.95]))\n for k, v in quantiles.items():\n print(f\"{k}: {v[1]:.4f} [{v[0]:.4f}, {v[2]:.4f}]\")\n\n guide = pyro.infer.autoguide.AutoNormal(model, \n init_loc_fn=pyro.infer.autoguide.init_to_median)\n pyro.clear_param_store()\n guide(training=True)\n adam = pyro.optim.Adam({\"lr\": 0.01})\n svi = SVI(model, guide, adam, loss=Trace_ELBO())\n\n if debug:\n debug_output(guide)\n print()\n\n best_loss = None\n last_improvement = None\n\n for j in range(num_iterations):\n # calculate the loss and take a gradient step\n loss = svi.step(training=True)\n if best_loss is None or best_loss > loss:\n best_loss = loss\n last_improvement = j\n if j % 100 == 0:\n if debug:\n print(\"[iteration %04d]\" % (j + 1 ))\n print(f\"loss: {loss:.4f}\")\n debug_output(guide)\n print()\n if j > (last_improvement + early_stopping_patience):\n print(\"Stopping Early\")\n break\n\n print(f\"Final loss: {loss:.4f}\")\n predictive = Predictive(model, guide=guide, num_samples=num_samples)\n raw_samples = predictive(training=False)\n return pandas.DataFrame(to_numpy(raw_samples))\n\n\n\nsamples = infer_and_run(functools.partial(model, *model_args),\n num_iterations=1000, \n num_samples=1000,\n debug=True)\nsamples.describe().transpose()", "ERROR! Session/line number was not unique in database. History logging moved to new session 63\ndoubling_time Italy: 5.3190 [4.5122, 6.2699]\ndoubling_time_lockdown Italy: 2.2932 [1.9454, 2.7031]\nobservation_noise Italy: 0.0391 [0.0332, 0.0461]\ndoubling_time Spain: 4.0816 [3.4625, 4.8113]\ndoubling_time_lockdown Spain: 1.8753 [1.5908, 2.2105]\nobservation_noise Spain: 0.0400 [0.0339, 0.0471]\n\n[iteration 0001]\nloss: 84950.2814\ndoubling_time Italy: 5.2660 [4.4747, 6.1974]\ndoubling_time_lockdown Italy: 2.3162 [1.9681, 2.7258]\nobservation_noise Italy: 0.0395 [0.0335, 0.0466]\ndoubling_time Spain: 4.0410 [3.4224, 4.7713]\ndoubling_time_lockdown Spain: 1.8566 [1.5776, 2.1850]\nobservation_noise Spain: 0.0404 [0.0342, 0.0477]\n\n[iteration 0101]\nloss: 1774.4991\ndoubling_time Italy: 3.7038 [3.3161, 4.1367]\ndoubling_time_lockdown Italy: 4.2917 [3.7622, 4.8958]\nobservation_noise Italy: 0.0646 [0.0551, 0.0756]\ndoubling_time Spain: 2.6136 [2.2543, 3.0303]\ndoubling_time_lockdown Spain: 1.4730 [1.2735, 1.7038]\nobservation_noise Spain: 0.0598 [0.0505, 0.0708]\n\n[iteration 0201]\nloss: 2362.3005\ndoubling_time Italy: 3.3938 [3.1188, 3.6930]\ndoubling_time_lockdown Italy: 4.9256 [4.4309, 5.4755]\nobservation_noise Italy: 0.0716 [0.0613, 0.0836]\ndoubling_time Spain: 2.3670 [2.0685, 2.7087]\ndoubling_time_lockdown Spain: 1.5484 [1.3397, 1.7896]\nobservation_noise Spain: 0.0659 [0.0560, 0.0776]\n\n[iteration 0301]\nloss: 1174.0760\ndoubling_time Italy: 3.2893 [3.0668, 3.5279]\ndoubling_time_lockdown Italy: 5.1564 [4.7155, 5.6384]\nobservation_noise Italy: 0.0760 [0.0652, 0.0886]\ndoubling_time Spain: 2.2899 [2.0138, 2.6038]\ndoubling_time_lockdown Spain: 1.6640 [1.4388, 1.9245]\nobservation_noise Spain: 0.0705 [0.0600, 0.0828]\n\n[iteration 0401]\nloss: 1344.2424\ndoubling_time Italy: 3.2749 [3.0812, 3.4808]\ndoubling_time_lockdown Italy: 5.2573 [4.8504, 5.6983]\nobservation_noise Italy: 0.0791 [0.0678, 0.0923]\ndoubling_time Spain: 2.2407 [1.9855, 2.5287]\ndoubling_time_lockdown Spain: 1.7985 [1.5550, 2.0803]\nobservation_noise Spain: 0.0752 [0.0641, 0.0881]\n\n[iteration 0501]\nloss: 936.6817\ndoubling_time Italy: 3.2610 [3.0861, 3.4457]\ndoubling_time_lockdown Italy: 5.2968 [4.9250, 5.6968]\nobservation_noise Italy: 0.0819 [0.0701, 0.0955]\ndoubling_time Spain: 2.2375 [1.9916, 2.5138]\ndoubling_time_lockdown Spain: 1.9671 [1.6983, 2.2786]\nobservation_noise Spain: 0.0793 [0.0679, 0.0927]\n\n[iteration 0601]\nloss: 632.5699\ndoubling_time Italy: 3.2488 [3.0935, 3.4118]\ndoubling_time_lockdown Italy: 5.2637 [4.9303, 5.6197]\nobservation_noise Italy: 0.0847 [0.0725, 0.0988]\ndoubling_time Spain: 2.2280 [1.9925, 2.4914]\ndoubling_time_lockdown Spain: 2.1501 [1.8571, 2.4893]\nobservation_noise Spain: 0.0831 [0.0713, 0.0969]\n\n[iteration 0701]\nloss: 613.1308\ndoubling_time Italy: 3.2909 [3.1445, 3.4442]\ndoubling_time_lockdown Italy: 5.2994 [4.9850, 5.6335]\nobservation_noise Italy: 0.0865 [0.0740, 0.1011]\ndoubling_time Spain: 2.2213 [1.9948, 2.4735]\ndoubling_time_lockdown Spain: 2.3438 [2.0259, 2.7116]\nobservation_noise Spain: 0.0864 [0.0741, 0.1006]\n\n[iteration 0801]\nloss: 612.4920\ndoubling_time Italy: 3.2495 [3.1133, 3.3916]\ndoubling_time_lockdown Italy: 5.2707 [4.9780, 5.5807]\nobservation_noise Italy: 0.0883 [0.0756, 0.1033]\ndoubling_time Spain: 2.2073 [1.9905, 2.4477]\ndoubling_time_lockdown Spain: 2.5279 [2.1848, 2.9250]\nobservation_noise Spain: 0.0889 [0.0764, 0.1036]\n\n[iteration 0901]\nloss: 827.8549\ndoubling_time Italy: 3.2352 [3.1057, 3.3701]\ndoubling_time_lockdown Italy: 5.2435 [4.9676, 5.5346]\nobservation_noise Italy: 0.0896 [0.0765, 0.1048]\ndoubling_time Spain: 2.1739 [1.9706, 2.3982]\ndoubling_time_lockdown Spain: 2.6844 [2.3255, 3.0987]\nobservation_noise Spain: 0.0915 [0.0786, 0.1064]\n\nFinal loss: 548.1464\n" ], [ "from datetime import datetime\n\nto_plot = [\n # (\"predict_observed\", \"predict_observed {area} on {date}\"),\n (\"predicted\", \"predicted {area} on {date}\"),\n (\"actual\", \"actual {area} on {date}\"), \n]\n\nhigh_quantile = 0.95\nlow_quantile = 0.05\n\nfor area in areas:\n for name, template in to_plot: \n indices = [x for x in range((end_date - start_date).days)]\n highs = []\n lows = []\n means = []\n for i in indices:\n date = start_date.add(days=i)\n datestr = date.format('YYYY/MM/DD')\n key = template.format(area = area, date=datestr)\n try:\n means.append(samples[key].mean())\n highs.append(samples[key].quantile(high_quantile))\n lows.append(samples[key].quantile(low_quantile))\n except KeyError:\n means.append(float(\"NaN\"))\n highs.append(float(\"NaN\"))\n lows.append(float(\"NaN\"))\n pyplot.fill_between(indices, lows, highs, label=name, alpha=0.5)\n pyplot.plot(indices, means, label=name)\n pyplot.title(area)\n pyplot.legend()\n pyplot.yscale(\"log\")\n pyplot.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
c5226eb2d9ac5de38a9e858f7fa26349eccbe5a0
3,515
ipynb
Jupyter Notebook
notebooks/.ipynb_checkpoints/ModelPersistence-checkpoint.ipynb
knathanieltucker/bit-of-data-science-and-scikit-learn
66219307cddfda9f9e6243557e4b8ee05c0590e8
[ "MIT" ]
120
2017-06-22T05:19:52.000Z
2022-03-11T17:22:15.000Z
notebooks/.ipynb_checkpoints/ModelPersistence-checkpoint.ipynb
CharlieBlogg/bit-of-data-science-and-scikit-learn
66219307cddfda9f9e6243557e4b8ee05c0590e8
[ "MIT" ]
null
null
null
notebooks/.ipynb_checkpoints/ModelPersistence-checkpoint.ipynb
CharlieBlogg/bit-of-data-science-and-scikit-learn
66219307cddfda9f9e6243557e4b8ee05c0590e8
[ "MIT" ]
153
2017-07-17T13:17:32.000Z
2022-03-07T14:35:16.000Z
21.564417
321
0.53798
[ [ [ "## Model Persistence\n\nAfter training a scikit-learn model, it is desirable to have a way to persist the model for future use without having to retrain. The following section gives you an example of how to persist a model with pickle. We’ll also review a few security and maintainability issues when working with pickle serialization.\n", "_____no_output_____" ] ], [ [ "from sklearn import svm\nfrom sklearn import datasets\nclf = svm.SVC()\niris = datasets.load_iris()\nX, y = iris.data, iris.target\nclf.fit(X, y) ", "_____no_output_____" ], [ "import pickle\ns = pickle.dumps(clf)\nclf2 = pickle.loads(s)\nclf2.predict(X[0:1])\n", "_____no_output_____" ], [ "y[0]", "_____no_output_____" ] ], [ [ "In the specific case of the scikit, it may be more interesting to use joblib’s replacement of pickle (joblib.dump & joblib.load), which is more efficient on objects that carry large numpy arrays internally as is often the case for fitted scikit-learn estimators, but can only pickle to the disk and not to a string:", "_____no_output_____" ] ], [ [ "from sklearn.externals import joblib\n\njoblib.dump(clf, 'model.pkl') ", "_____no_output_____" ], [ "clf = joblib.load('model.pkl') ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
c5226fcd18c78098a57282b059d2e77bc0bcf7f0
61,827
ipynb
Jupyter Notebook
Assignments/5. GCN/.ipynb_checkpoints/Untitled-checkpoint.ipynb
SeungsuKim/CH485--AI-and-Chemistry
c85ce8716ac2e351d730543a2d45fd7054014d4f
[ "MIT" ]
2
2020-05-28T20:45:28.000Z
2021-04-07T04:24:38.000Z
Assignments/5. GCN/.ipynb_checkpoints/Untitled-checkpoint.ipynb
SeungsuKim/CH485--AI-and-Chemistry
c85ce8716ac2e351d730543a2d45fd7054014d4f
[ "MIT" ]
null
null
null
Assignments/5. GCN/.ipynb_checkpoints/Untitled-checkpoint.ipynb
SeungsuKim/CH485--AI-and-Chemistry
c85ce8716ac2e351d730543a2d45fd7054014d4f
[ "MIT" ]
4
2020-01-23T10:39:06.000Z
2021-12-13T05:08:16.000Z
74.941818
23,040
0.716645
[ [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import argparse\nimport sys\nfrom time import sleep\n\nimport numpy as np\n\nfrom rdkit import Chem, DataStructs\nfrom rdkit.Chem import AllChem\nfrom rdkit.Chem.Crippen import MolLogP\n\nfrom sklearn.metrics import accuracy_score, mean_squared_error\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\n\n#from utils import read_ZINC_smiles, smiles_to_onehot, partition, OneHotLogPDataSet\nfrom tqdm import tnrange, tqdm_notebook\nimport pandas as pd\nimport seaborn as sns", "_____no_output_____" ], [ "paser = argparse.ArgumentParser()\nargs = paser.parse_args(\"\")\nargs.seed = 123\nargs.val_size = 0.15\nargs.test_size = 0.15\nargs.shuffle = True", "_____no_output_____" ], [ "np.random.seed(args.seed)\ntorch.manual_seed(args.seed)", "_____no_output_____" ] ], [ [ "## 1. Pre-Processing", "_____no_output_____" ] ], [ [ "def read_ZINC_smiles(file_name, num_mol):\n f = open(file_name, 'r')\n contents = f.readlines()\n\n smi_list = []\n logP_list = []\n\n for i in tqdm_notebook(range(num_mol), desc='Reading Data'):\n smi = contents[i].strip()\n m = Chem.MolFromSmiles(smi)\n smi_list.append(smi)\n logP_list.append(MolLogP(m))\n\n logP_list = np.asarray(logP_list).astype(float)\n\n return smi_list, logP_list\n\n\ndef smiles_to_onehot(smi_list):\n def smiles_to_vector(smiles, vocab, max_length):\n while len(smiles) < max_length:\n smiles += \" \"\n vector = [vocab.index(str(x)) for x in smiles]\n one_hot = np.zeros((len(vocab), max_length), dtype=int)\n for i, elm in enumerate(vector):\n one_hot[elm][i] = 1\n return one_hot\n\n vocab = np.load('./vocab.npy')\n smi_total = []\n\n for i, smi in tqdm_notebook(enumerate(smi_list), desc='Converting to One Hot'):\n smi_onehot = smiles_to_vector(smi, list(vocab), 120)\n smi_total.append(smi_onehot)\n\n return np.asarray(smi_total)\n\ndef convert_to_graph(smiles_list):\n adj = []\n adj_norm = []\n features = []\n maxNumAtoms = 50\n for i in tqdm_notebook(smiles_list, desc='Converting to Graph'):\n # Mol\n iMol = Chem.MolFromSmiles(i.strip())\n #Adj\n iAdjTmp = Chem.rdmolops.GetAdjacencyMatrix(iMol)\n # Feature\n if( iAdjTmp.shape[0] <= maxNumAtoms):\n # Feature-preprocessing\n iFeature = np.zeros((maxNumAtoms, 58))\n iFeatureTmp = []\n for atom in iMol.GetAtoms():\n iFeatureTmp.append( atom_feature(atom) ) ### atom features only\n iFeature[0:len(iFeatureTmp), 0:58] = iFeatureTmp ### 0 padding for feature-set\n features.append(iFeature)\n\n # Adj-preprocessing\n iAdj = np.zeros((maxNumAtoms, maxNumAtoms))\n iAdj[0:len(iFeatureTmp), 0:len(iFeatureTmp)] = iAdjTmp + np.eye(len(iFeatureTmp))\n adj.append(np.asarray(iAdj))\n features = np.asarray(features)\n\n return features, adj\n \ndef atom_feature(atom):\n return np.array(one_of_k_encoding_unk(atom.GetSymbol(),\n ['C', 'N', 'O', 'S', 'F', 'H', 'Si', 'P', 'Cl', 'Br',\n 'Li', 'Na', 'K', 'Mg', 'Ca', 'Fe', 'As', 'Al', 'I', 'B',\n 'V', 'Tl', 'Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn',\n 'Ge', 'Cu', 'Au', 'Ni', 'Cd', 'Mn', 'Cr', 'Pt', 'Hg', 'Pb']) +\n one_of_k_encoding(atom.GetDegree(), [0, 1, 2, 3, 4, 5]) +\n one_of_k_encoding_unk(atom.GetTotalNumHs(), [0, 1, 2, 3, 4]) +\n one_of_k_encoding_unk(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5]) +\n [atom.GetIsAromatic()]) # (40, 6, 5, 6, 1)\n\ndef one_of_k_encoding(x, allowable_set):\n if x not in allowable_set:\n raise Exception(\"input {0} not in allowable set{1}:\".format(x, allowable_set))\n #print list((map(lambda s: x == s, allowable_set)))\n return list(map(lambda s: x == s, allowable_set))\n\ndef one_of_k_encoding_unk(x, allowable_set):\n \"\"\"Maps inputs not in the allowable set to the last element.\"\"\"\n if x not in allowable_set:\n x = allowable_set[-1]\n return list(map(lambda s: x == s, allowable_set))\n\n\nclass GCNDataset(Dataset):\n def __init__(self, list_feature, list_adj, list_logP):\n self.list_feature = list_feature\n self.list_adj = list_adj\n self.list_logP = list_logP\n\n def __len__(self):\n return len(self.list_feature)\n\n def __getitem__(self, index):\n return self.list_feature[index], self.list_adj[index], self.list_logP[index]\n\n\ndef partition(list_feature, list_adj, list_logP, args):\n num_total = list_feature.shape[0]\n num_train = int(num_total * (1 - args.test_size - args.val_size))\n num_val = int(num_total * args.val_size)\n num_test = int(num_total * args.test_size)\n\n feature_train = list_feature[:num_train]\n adj_train = list_adj[:num_train]\n logP_train = list_logP[:num_train]\n feature_val = list_feature[num_train:num_train + num_val]\n adj_val = list_adj[num_train:num_train + num_val]\n logP_val = list_logP[num_train:num_train + num_val]\n feature_test = list_feature[num_total - num_test:]\n adj_test = list_adj[num_train:num_train + num_val]\n logP_test = list_logP[num_total - num_test:]\n\n train_set = GCNDataset(feature_train, adj_train, logP_train)\n val_set = GCNDataset(feature_val, adj_val, logP_val)\n test_set = GCNDataset(feature_test, adj_test, logP_test)\n\n partition = {\n 'train': train_set,\n 'val': val_set,\n 'test': test_set\n }\n\n return partition", "_____no_output_____" ], [ "list_smi, list_logP = read_ZINC_smiles('ZINC.smiles', 2000)\nlist_feature, list_adj = convert_to_graph(list_smi)\nargs.dict_partition = partition(list_feature, list_adj, list_logP, args)", "_____no_output_____" ] ], [ [ "## 2. Model Construction", "_____no_output_____" ] ], [ [ "class GatedSkipConnection(nn.Module):\n \n def __init__(self, in_dim, new_dim, out_dim, activation):\n super(GatedSkipConnection, self).__init__()\n \n self.in_dim = in_dim\n self.new_dim = new_dim\n self.out_dim = out_dim\n self.activation = activation\n \n self.linear_in = nn.Linear(in_dim, out_dim)\n self.linear_new = nn.Linear(new_dim, out_dim)\n self.sigmoid = nn.Sigmoid()\n \n def forward(self, input_x, new_x): \n z = self.gate_coefficient(input_x, new_x)\n \n if (self.in_dim != self.out_dim):\n input_x = self.linear_in(input_x)\n if (self.new_dim != self.out_dim):\n new_x = self.linear_new(new_x)\n \n out = torch.mul(new_x, z) + torch.mul(input_x, 1.0-z)\n \n \n \n return out\n \n def gate_coefficient(self, input_x, new_x):\n X1 = self.linear_in(input_x)\n X2 = self.linear_new(new_x)\n gate_coefficient = self.sigmoid(X1 + X2)\n \n return gate_coefficient", "_____no_output_____" ], [ "class GraphConvolution(nn.Module):\n \n def __init__(self, in_dim, hidden_dim, activation, sc='no'):\n super(GraphConvolution, self).__init__()\n \n self.in_dim = in_dim\n self.hidden_dim = hidden_dim\n self.activation = activation\n self.sc = sc\n\n self.linear = nn.Linear(self.in_dim, \n self.hidden_dim)\n nn.init.xavier_uniform_(self.linear.weight)\n self.gated_skip_connection = GatedSkipConnection(self.in_dim,\n self.hidden_dim,\n self.hidden_dim, \n self.activation)\n \n def forward(self, x, adj):\n out = self.linear(x)\n out = torch.matmul(adj, out)\n \n if (self.sc == 'gsc'):\n out = self.gated_skip_connection(x, out)\n elif (self.sc == 'no'):\n out = self.activation(out)\n else:\n out = self.activation(out)\n \n return out\n ", "_____no_output_____" ], [ "class ReadOut(nn.Module):\n \n def __init__(self, in_dim, out_dim, activation):\n super(ReadOut, self).__init__()\n \n self.in_dim = in_dim\n self.out_dim= out_dim\n \n self.linear = nn.Linear(self.in_dim, \n self.out_dim)\n nn.init.xavier_uniform_(self.linear.weight)\n self.activation = activation\n\n def forward(self, x):\n out = self.linear(x)\n out = torch.sum(out, dim=1)\n out = self.activation(out)\n \n return out", "_____no_output_____" ], [ "class Predictor(nn.Module):\n \n def __init__(self, in_dim, out_dim, activation=None):\n super(Predictor, self).__init__()\n \n self.in_dim = in_dim\n self.out_dim = out_dim\n \n self.linear = nn.Linear(self.in_dim,\n self.out_dim)\n nn.init.xavier_uniform_(self.linear.weight)\n self.activation = activation\n \n def forward(self, x):\n out = self.linear(x)\n if self.activation != None:\n out = self.activation(out)\n \n return out", "_____no_output_____" ], [ "class LogPPredictor(nn.Module):\n \n def __init__(self, \n n_layer, \n in_dim, \n hidden_dim_1, \n hidden_dim_2,\n out_dim,\n sc='no'):\n super(LogPPredictor, self).__init__()\n \n self.n_layer = n_layer\n self.graph_convolution_1 = GraphConvolution(in_dim, hidden_dim_1, nn.ReLU(), sc)\n self.graph_convolution_2 = GraphConvolution(hidden_dim_1, hidden_dim_1, nn.ReLU(), sc)\n self.readout = ReadOut(hidden_dim_1, hidden_dim_2, nn.Sigmoid())\n self.predictor_1 = Predictor(hidden_dim_2, hidden_dim_2, nn.ReLU())\n self.predictor_2 = Predictor(hidden_dim_2, hidden_dim_2, nn.Tanh())\n self.predictor_3 = Predictor(hidden_dim_2, out_dim)\n \n def forward(self, x, adj):\n out = self.graph_convolution_1(x, adj)\n for i in range(self.n_layer-1):\n out = self.graph_convolution_2(out, adj)\n out = self.readout(out)\n out = self.predictor_1(out)\n out = self.predictor_2(out)\n out = self.predictor_3(out)\n \n return out ", "_____no_output_____" ], [ "args.batch_size = 10\nargs.lr = 0.001\nargs.l2_coef = 0.001\nargs.optim = optim.Adam\nargs.criterion = nn.MSELoss()\nargs.epoch = 10\nargs.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')", "_____no_output_____" ], [ "use_gpu = lambda x=True: torch.set_default_tensor_type(torch.cuda.DoubleTensor if torch.cuda.is_available() and x else torch.FloatTensor)\nuse_gpu()\n\nprint(args.device)\n\nmodel = LogPPredictor(1, 58, 64, 128, 1, 'gsc')\nmodel.to(args.device)\nmodel.cuda()\n\nlist_train_loss = list()\nlist_val_loss = list()\nacc = 0\nmse = 0\n\noptimizer = args.optim(model.parameters(),\n lr=args.lr,\n weight_decay=args.l2_coef)\n\ndata_train = DataLoader(args.dict_partition['train'], \n batch_size=args.batch_size,\n shuffle=args.shuffle)\n\ndata_val = DataLoader(args.dict_partition['val'],\n batch_size=args.batch_size,\n shuffle=args.shuffle)\n\nfor epoch in tqdm_notebook(range(args.epoch), desc='Epoch'):\n model.train()\n epoch_train_loss = 0\n for i, batch in enumerate(data_train):\n list_feature = torch.tensor(batch[0])\n list_adj = torch.tensor(batch[1])\n list_logP = torch.tensor(batch[2])\n list_logP = list_logP.view(-1,1)\n list_feature, list_adj, list_logP = list_feature.to(args.device), list_adj.to(args.device), list_logP.to(args.device)\n \n optimizer.zero_grad()\n list_pred_logP = model(list_feature, list_adj)\n list_pred_logP.require_grad = False\n train_loss = args.criterion(list_pred_logP, list_logP)\n epoch_train_loss += train_loss.item()\n train_loss.backward()\n optimizer.step()\n \n list_train_loss.append(epoch_train_loss/len(data_train))\n \n model.eval()\n epoch_val_loss = 0\n with torch.no_grad():\n for i, batch in enumerate(data_val):\n list_feature = torch.tensor(batch[0])\n list_adj = torch.tensor(batch[1])\n list_logP = torch.tensor(batch[2])\n list_logP = list_logP.view(-1,1)\n list_feature, list_adj, list_logP = list_feature.to(args.device), list_adj.to(args.device), list_logP.to(args.device)\n\n\n list_pred_logP = model(list_feature, list_adj)\n val_loss = args.criterion(list_pred_logP, list_logP)\n epoch_val_loss += val_loss.item()\n \n list_val_loss.append(epoch_val_loss/len(data_val))\n \ndata_test = DataLoader(args.dict_partition['test'],\n batch_size=args.batch_size,\n shuffle=args.shuffle)\n\nmodel.eval()\nwith torch.no_grad():\n logP_total = list()\n pred_logP_total = list()\n for i, batch in enumerate(data_val):\n list_feature = torch.tensor(batch[0])\n list_adj = torch.tensor(batch[1])\n list_logP = torch.tensor(batch[2])\n logP_total += list_logP.tolist()\n list_logP = list_logP.view(-1,1)\n list_feature, list_adj, list_logP = list_feature.to(args.device), list_adj.to(args.device), list_logP.to(args.device)\n\n \n list_pred_logP = model(list_feature, list_adj)\n \n pred_logP_total += list_pred_logP.tolist()\n \n mse = mean_squared_error(logP_total, pred_logP_total)", "cuda\n" ], [ "data = np.vstack((list_train_loss, list_val_loss))\ndata = np.transpose(data)\nepochs = np.arange(args.epoch)\ndf_loss = pd.DataFrame(data, epochs, [\"Train Loss\", \"Validation Loss\"])\n\nsns.set_style(\"darkgrid\", {\"axes.facecolor\": \".9\"})\ngrid = sns.lineplot(data=df_loss)\ngrid.set_title(\"Loss vs Epoch (tox=nr-ahr)\")\ngrid.set_ylabel(\"Loss\")\ngrid.set_xlabel(\"Epoch\")", "_____no_output_____" ], [ "model = LogPPredictor(1, 58, 64, 128, 1, 'gsc')\nmodel.to(args.device)\n\nlist_train_loss = list()\nlist_val_loss = list()\nacc = 0\nmse = 0\n\noptimizer = args.optim(model.parameters(),\n lr=args.lr,\n weight_decay=args.l2_coef)\n\ndata_train = DataLoader(args.dict_partition['train'], \n batch_size=args.batch_size,\n shuffle=args.shuffle)\n\ndata_val = DataLoader(args.dict_partition['val'],\n batch_size=args.batch_size,\n shuffle=args.shuffle)\n\nfor epoch in tqdm_notebook(range(args.epoch), desc='Epoch'):\n model.train()\n epoch_train_loss = 0\n for i, batch in enumerate(data_train):\n list_feature = torch.tensor(batch[0])\n list_adj = torch.tensor(batch[1])\n list_logP = torch.tensor(batch[2])\n list_logP = list_logP.view(-1,1)\n list_feature, list_adj, list_logP = list_feature.to(args.device), list_adj.to(args.device), list_logP.to(args.device)\n \n optimizer.zero_grad()\n list_pred_logP = model(list_feature, list_adj)\n list_pred_logP.require_grad = False\n train_loss = args.criterion(list_pred_logP, list_logP)\n epoch_train_loss += train_loss.item()\n train_loss.backward()\n optimizer.step()\n \n list_train_loss.append(epoch_train_loss/len(data_train))\n \n model.eval()\n epoch_val_loss = 0\n with torch.no_grad():\n for i, batch in enumerate(data_val):\n list_feature = torch.tensor(batch[0])\n list_adj = torch.tensor(batch[1])\n list_logP = torch.tensor(batch[2])\n list_logP = list_logP.view(-1,1)\n list_feature, list_adj, list_logP = list_feature.to(args.device), list_adj.to(args.device), list_logP.to(args.device)\n\n\n list_pred_logP = model(list_feature, list_adj)\n val_loss = args.criterion(list_pred_logP, list_logP)\n epoch_val_loss += val_loss.item()\n \n list_val_loss.append(epoch_val_loss/len(data_val))\n \ndata_test = DataLoader(args.dict_partition['test'],\n batch_size=args.batch_size,\n shuffle=args.shuffle)\n\nmodel.eval()\nwith torch.no_grad():\n logP_total = list()\n pred_logP_total = list()\n for i, batch in enumerate(data_val):\n list_feature = torch.tensor(batch[0])\n list_adj = torch.tensor(batch[1])\n list_logP = torch.tensor(batch[2])\n logP_total += list_logP.tolist()\n list_logP = list_logP.view(-1,1)\n list_feature, list_adj, list_logP = list_feature.to(args.device), list_adj.to(args.device), list_logP.to(args.device)\n\n \n list_pred_logP = model(list_feature, list_adj)\n \n pred_logP_total += list_pred_logP.tolist()\n \n mse = mean_squared_error(logP_total, pred_logP_total)", "_____no_output_____" ], [ "data = np.vstack((list_train_loss, list_val_loss))\ndata = np.transpose(data)\nepochs = np.arange(args.epoch)\ndf_loss = pd.DataFrame(data, epochs, [\"Train Loss\", \"Validation Loss\"])\n\nsns.set_style(\"darkgrid\", {\"axes.facecolor\": \".9\"})\ngrid = sns.lineplot(data=df_loss)\ngrid.set_title(\"Loss vs Epoch (tox=nr-ahr)\")\ngrid.set_ylabel(\"Loss\")\ngrid.set_xlabel(\"Epoch\")", "_____no_output_____" ], [ "for i in tqdm_notebook(range(10), desc='1', leave=True, position=1):\n for j in tqdm_notebook(range(100), desc='2', leave=False, position=2):\n sleep(0.01)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c52276d77c16640d54fff7ec05427f2c3a3dedf4
68,263
ipynb
Jupyter Notebook
4_3_3_Variance_Covariance_Beta/variance.ipynb
jpweldon/Module_4_Practice
f1a4b56fad360eb264d9ba711469b6df99696789
[ "MIT" ]
1
2021-07-18T07:53:42.000Z
2021-07-18T07:53:42.000Z
4_3_3_Variance_Covariance_Beta/variance.ipynb
jpweldon/Module_4_Practice
f1a4b56fad360eb264d9ba711469b6df99696789
[ "MIT" ]
null
null
null
4_3_3_Variance_Covariance_Beta/variance.ipynb
jpweldon/Module_4_Practice
f1a4b56fad360eb264d9ba711469b6df99696789
[ "MIT" ]
null
null
null
521.091603
65,228
0.946106
[ [ [ "# Variance Exercise", "_____no_output_____" ] ], [ [ "# Import the pandas library\nimport pandas as pd\n\n# Create the stock DataFrame\nstocks = pd.DataFrame({\n'stock_hij' : [142.13, 143.54, 142.52, 143.43, 141.73, 137.68, 139.71, 139.60, 139.61, 141.57],\n'stock_klm' : [53.88, 52.83, 52.55, 53.84, 54.16, 55.58, 53.43, 52.50, 53.90, 47.86],\n'market' : [3534.22, 3511.93, 3488.67, 3483.34, 3483.81, 3426.92, 3443.12, 3435.56, 3453.49, 3449.52]\n})\n", "_____no_output_____" ], [ "# Create a daily_returns DataFrame by using the pct_change and dropna functions.\ndaily_returns = stocks.pct_change().dropna()\n", "_____no_output_____" ], [ "# Create a default line plot to visualize the volatility of each of the three stocks.\ndaily_returns.plot(figsize=(15, 10), title='Daily Returns of Stock HIJ, Stock KLM, and the Market', ylabel='Daily Returns')\n", "_____no_output_____" ], [ "# Use the var function to calculate the variance for each of the three stocks.\n# Sorting values with the sort_values function.\ndaily_returns.var().sort_values()\n", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ] ]
c52278a8ca35634dfeba716c717c1287bb82e9bd
611,122
ipynb
Jupyter Notebook
notebooks/henon/Henon.ipynb
bodokaiser/complex-systems
5e348d9cc382059b316ccc0a391183ba65aa8f3f
[ "Apache-2.0" ]
2
2019-04-22T17:32:33.000Z
2019-10-03T18:09:01.000Z
notebooks/henon/Henon.ipynb
bodokaiser/complex-systems
5e348d9cc382059b316ccc0a391183ba65aa8f3f
[ "Apache-2.0" ]
null
null
null
notebooks/henon/Henon.ipynb
bodokaiser/complex-systems
5e348d9cc382059b316ccc0a391183ba65aa8f3f
[ "Apache-2.0" ]
null
null
null
70.486967
173,089
0.718074
[ [ [ "# Control\n\nIn this notebook we want to control the chaos in the Henon map. The Henon map is defined by\n\n$$\n\\begin{align}\nx_{n+1}&=1-ax_n^2+y_n\\\\\ny_{n+1}&=bx_n\n\\end{align}.\n$$", "_____no_output_____" ] ], [ [ "from plotly import offline as py\nfrom plotly import graph_objs as go\n\npy.init_notebook_mode(connected=True)", "_____no_output_____" ] ], [ [ "### Fixed points\n\nFirst we need to find the fixed points of the Henon map. From $y_n=y_{n+1}=bx_n$ we can elliminate $y_n$ in the first equation. The quadratic equation obtained after ellimination with $x_n=x_{n+1}$ yields,\n$$\n\\begin{align}\nx^*=\\frac{b-1\\pm\\sqrt{4a+(b-1)^2}}{2a},\n&&\ny^*=bx^*,\n\\end{align}\n$$\nas the fixed points of the Henon map.", "_____no_output_____" ] ], [ [ "def henon_map(x0, y0, a, b, N):\n x = [x0]\n y = [y0]\n \n for i in range(N):\n xn = x[-1]\n yn = y[-1]\n \n x.append(1 - a * xn**2 + yn)\n y.append(b * xn)\n \n return x, y\n\ndef fixed_points(a, b):\n u = (b - 1) / (2 * a)\n v = np.sqrt(4 * a + (b - 1)**2) / (2 * a)\n \n x1 = u - v\n x2 = u + v\n \n y1 = b * x1\n y2 = b * x2\n \n return [(x1, y1), (x2, y2)]", "_____no_output_____" ], [ "((xf1, yf1), (xf2, yf2)) = fixed_points(a=1.4, b=0.3)\n\nradius = 0.1\n\nlayout = go.Layout(\n title='Henon Attractor',\n xaxis=dict(title='x'),\n yaxis=dict(title='y', scaleanchor='x'),\n showlegend=False,\n shapes=[\n {\n 'type': 'circle',\n 'xref': 'x',\n 'yref': 'y',\n 'x0': xf1 + radius,\n 'y0': yf1 + radius,\n 'x1': xf1 - radius,\n 'y1': yf1 - radius,\n 'line': { 'color': 'gray' },\n },\n {\n 'type': 'circle',\n 'xref': 'x',\n 'yref': 'y',\n 'x0': xf2 + radius,\n 'y0': yf2 + radius,\n 'x1': xf2 - radius,\n 'y1': yf2 - radius,\n },\n ]\n)\n\nx = []\ny = []\n\nfor i in range(50):\n x0, y0 = np.random.uniform(0.2, 0.8, 2)\n \n xx, yy = henon_map(x0, y0, a=1.4, b=0.3, N=100)\n \n if np.abs(xx[-1]) < 10 and np.abs(yy[-1]) < 10:\n x += xx\n y += yy\n\nfigure = go.Figure([\n go.Scatter(x=x, y=y, mode='markers', marker=dict(size=3))\n], layout)\n\npy.iplot(figure)", "_____no_output_____" ] ], [ [ "So the second fixed point (positive sign) sits on the attractor.", "_____no_output_____" ] ], [ [ "def fixed_point(a, b):\n return fixed_points(a, b)[1]\n\nfixed_point(a=1.4, b=0.3)", "_____no_output_____" ] ], [ [ "We assume that coordinates and parameters are sufficiently close such that the following Taylor expansion is valid,$$\n\\boldsymbol{x}_{n+1}\n=\n\\boldsymbol{F}\\left(\\boldsymbol{x}^*,\\boldsymbol{r}_0\\right)\n+\n\\frac{d\\boldsymbol{F}}{d\\boldsymbol{x}_n}\\Bigr|_{\\boldsymbol{x}^*,\\boldsymbol{r}_0}\\left(\\boldsymbol{x}_n-\\boldsymbol{x}^*\\right)\n+\n\\frac{d\\boldsymbol{F}}{d\\boldsymbol{r}_n}\\Bigr|_{\\boldsymbol{x}^*,\\boldsymbol{r}_0}\\left(\\boldsymbol{r}_n-\\boldsymbol{r}_0\\right).$$\n\nIn the regime where these linear approximations are valid we can use, $$\n\\Delta\\boldsymbol{r}_n\n=\n\\gamma\\left(\\boldsymbol{x}_n-\\boldsymbol{x}^*\\right). $$\nFurther introducing $\\Delta\\boldsymbol{x}_n=\\boldsymbol{x}_n-\\boldsymbol{x}^*$ we can rewrite the map as, $$\n\\Delta\\boldsymbol{x}_{n+1}\n=\n\\underbrace{\\left(\n\\frac{d\\boldsymbol{F}}{d\\boldsymbol{x}_n}\\Bigr|_{\\boldsymbol{x}^*,\\boldsymbol{r}_0}\n+\n\\frac{d\\boldsymbol{F}}{d\\boldsymbol{r}_n}\\Bigr|_{\\boldsymbol{x}^*,\\boldsymbol{r}_0}\n\\right)}_{A}\n\\Delta\\boldsymbol{x}_n.$$\n\nThe Jacobians are $$\n\\begin{align}\n\\frac{d\\boldsymbol{F}}{d\\boldsymbol{x}_n}\\Bigr|_{\\boldsymbol{x}^*,\\boldsymbol{r}_0}\n=\n\\begin{pmatrix}\n-2 a_0 x^* & 1 \\\\\nb_0 & 0\n\\end{pmatrix},\n&&\n\\frac{d\\boldsymbol{F}}{d\\boldsymbol{r}_n}\\Bigr|_{\\boldsymbol{x}^*,\\boldsymbol{r}_0}\n=\n\\begin{pmatrix}\n-{x^*}^2 & 0 \\\\\n0 & x^*\n\\end{pmatrix}\n\\end{align}. $$\n\nThus the matrix $A$ reads, $$\nA\n=\n\\begin{pmatrix}\n-2a_0x^*-\\gamma{x^*}^2 & 1 \\\\\nb_0 & \\gamma x^*\n\\end{pmatrix}.\n$$ The optimal value for $\\gamma$ can be found for $0=A\\Delta\\boldsymbol{x}_n$.", "_____no_output_____" ] ], [ [ "def eigenvector(a, b):\n xf, yf = fixed_point(a, b)\n \n A = np.array([\n [-2 * a * xf - xf**2, 1],\n [b, xf]\n ])\n \n return u-v, u+v\n\neigenvalues(a=1.4, b=0.3)", "_____no_output_____" ] ], [ [ "\nThe Jacobian of the Henon map close to $(x^*,a_0,b_0)$ is given through, $$\n\\begin{pmatrix}\n-2 a_0 x^* & 1 \\\\\nb_0 & 0\n\\end{pmatrix},$$\nand has eigenvalues $$\\lambda=-a_0\\left[x^*\\pm\\sqrt{{x^*}^2+b_0/a_0^2}\\right]$$", "_____no_output_____" ] ], [ [ "fixed_point(a=1.4, b=0.3)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
c5227dae7405e102aebda12b7841d07847f5b69d
82,882
ipynb
Jupyter Notebook
notebooks/1.0-ak-extract-titanic-data.ipynb
suren2k17/titanic
312b188510b1ca5916b8e5a6bb9b9ae476565fa9
[ "MIT" ]
null
null
null
notebooks/1.0-ak-extract-titanic-data.ipynb
suren2k17/titanic
312b188510b1ca5916b8e5a6bb9b9ae476565fa9
[ "MIT" ]
null
null
null
notebooks/1.0-ak-extract-titanic-data.ipynb
suren2k17/titanic
312b188510b1ca5916b8e5a6bb9b9ae476565fa9
[ "MIT" ]
null
null
null
66.146848
142
0.578244
[ [ [ "## Extracting Titanic Disaster Data From Kaggle", "_____no_output_____" ] ], [ [ "!pip install python-dotenv", "Requirement already satisfied: python-dotenv in c:\\development_avecto\\anaconda2\\lib\\site-packages\n" ], [ "from dotenv import load_dotenv, find_dotenv", "_____no_output_____" ], [ "# find .env automatically by walking up directories until it's found\ndotenv_path = find_dotenv()\n# load up the entries as environment variables\nload_dotenv(dotenv_path)", "_____no_output_____" ], [ "# extracting environment variable using os.environ.get\nimport os\nKAGGLE_USERNAME = os.environ.get(\"KAGGLE_USERNAME\")\nprint(KAGGLE_USERNAME)", "suren2k18\n" ], [ "# imports\nimport requests\nfrom requests import session\nimport os\nfrom dotenv import load_dotenv, find_dotenv", "_____no_output_____" ], [ "# payload for post \npayload = {\n 'action': 'login',\n 'username': os.environ.get(\"KAGGLE_USERNAME\"),\n 'password': os.environ.get(\"KAGGLE_PASSWORD\")\n}\n\n# url for train file (get the link from Kaggle website)\nurl = 'https://www.kaggle.com/c/titanic/download/train.csv'\n\n\n# setup session\nwith session() as c:\n # post request\n c.post('https://www.kaggle.com/account/login', data=payload)\n # get request\n response = c.get(url)\n # print response text\n print(response.text)", "PassengerId,Survived,Pclass,Name,Sex,Age,SibSp,Parch,Ticket,Fare,Cabin,Embarked\r\n1,0,3,\"Braund, Mr. Owen Harris\",male,22,1,0,A/5 21171,7.25,,S\r\n2,1,1,\"Cumings, Mrs. John Bradley (Florence Briggs Thayer)\",female,38,1,0,PC 17599,71.2833,C85,C\r\n3,1,3,\"Heikkinen, Miss. Laina\",female,26,0,0,STON/O2. 3101282,7.925,,S\r\n4,1,1,\"Futrelle, Mrs. Jacques Heath (Lily May Peel)\",female,35,1,0,113803,53.1,C123,S\r\n5,0,3,\"Allen, Mr. William Henry\",male,35,0,0,373450,8.05,,S\r\n6,0,3,\"Moran, Mr. James\",male,,0,0,330877,8.4583,,Q\r\n7,0,1,\"McCarthy, Mr. Timothy J\",male,54,0,0,17463,51.8625,E46,S\r\n8,0,3,\"Palsson, Master. Gosta Leonard\",male,2,3,1,349909,21.075,,S\r\n9,1,3,\"Johnson, Mrs. Oscar W (Elisabeth Vilhelmina Berg)\",female,27,0,2,347742,11.1333,,S\r\n10,1,2,\"Nasser, Mrs. Nicholas (Adele Achem)\",female,14,1,0,237736,30.0708,,C\r\n11,1,3,\"Sandstrom, Miss. Marguerite Rut\",female,4,1,1,PP 9549,16.7,G6,S\r\n12,1,1,\"Bonnell, Miss. Elizabeth\",female,58,0,0,113783,26.55,C103,S\r\n13,0,3,\"Saundercock, Mr. William Henry\",male,20,0,0,A/5. 2151,8.05,,S\r\n14,0,3,\"Andersson, Mr. Anders Johan\",male,39,1,5,347082,31.275,,S\r\n15,0,3,\"Vestrom, Miss. Hulda Amanda Adolfina\",female,14,0,0,350406,7.8542,,S\r\n16,1,2,\"Hewlett, Mrs. (Mary D Kingcome) \",female,55,0,0,248706,16,,S\r\n17,0,3,\"Rice, Master. Eugene\",male,2,4,1,382652,29.125,,Q\r\n18,1,2,\"Williams, Mr. Charles Eugene\",male,,0,0,244373,13,,S\r\n19,0,3,\"Vander Planke, Mrs. Julius (Emelia Maria Vandemoortele)\",female,31,1,0,345763,18,,S\r\n20,1,3,\"Masselmani, Mrs. Fatima\",female,,0,0,2649,7.225,,C\r\n21,0,2,\"Fynney, Mr. Joseph J\",male,35,0,0,239865,26,,S\r\n22,1,2,\"Beesley, Mr. Lawrence\",male,34,0,0,248698,13,D56,S\r\n23,1,3,\"McGowan, Miss. Anna \"\"Annie\"\"\",female,15,0,0,330923,8.0292,,Q\r\n24,1,1,\"Sloper, Mr. William Thompson\",male,28,0,0,113788,35.5,A6,S\r\n25,0,3,\"Palsson, Miss. Torborg Danira\",female,8,3,1,349909,21.075,,S\r\n26,1,3,\"Asplund, Mrs. Carl Oscar (Selma Augusta Emilia Johansson)\",female,38,1,5,347077,31.3875,,S\r\n27,0,3,\"Emir, Mr. Farred Chehab\",male,,0,0,2631,7.225,,C\r\n28,0,1,\"Fortune, Mr. Charles Alexander\",male,19,3,2,19950,263,C23 C25 C27,S\r\n29,1,3,\"O'Dwyer, Miss. Ellen \"\"Nellie\"\"\",female,,0,0,330959,7.8792,,Q\r\n30,0,3,\"Todoroff, Mr. Lalio\",male,,0,0,349216,7.8958,,S\r\n31,0,1,\"Uruchurtu, Don. Manuel E\",male,40,0,0,PC 17601,27.7208,,C\r\n32,1,1,\"Spencer, Mrs. William Augustus (Marie Eugenie)\",female,,1,0,PC 17569,146.5208,B78,C\r\n33,1,3,\"Glynn, Miss. Mary Agatha\",female,,0,0,335677,7.75,,Q\r\n34,0,2,\"Wheadon, Mr. Edward H\",male,66,0,0,C.A. 24579,10.5,,S\r\n35,0,1,\"Meyer, Mr. Edgar Joseph\",male,28,1,0,PC 17604,82.1708,,C\r\n36,0,1,\"Holverson, Mr. Alexander Oskar\",male,42,1,0,113789,52,,S\r\n37,1,3,\"Mamee, Mr. Hanna\",male,,0,0,2677,7.2292,,C\r\n38,0,3,\"Cann, Mr. Ernest Charles\",male,21,0,0,A./5. 2152,8.05,,S\r\n39,0,3,\"Vander Planke, Miss. Augusta Maria\",female,18,2,0,345764,18,,S\r\n40,1,3,\"Nicola-Yarred, Miss. Jamila\",female,14,1,0,2651,11.2417,,C\r\n41,0,3,\"Ahlin, Mrs. Johan (Johanna Persdotter Larsson)\",female,40,1,0,7546,9.475,,S\r\n42,0,2,\"Turpin, Mrs. William John Robert (Dorothy Ann Wonnacott)\",female,27,1,0,11668,21,,S\r\n43,0,3,\"Kraeff, Mr. Theodor\",male,,0,0,349253,7.8958,,C\r\n44,1,2,\"Laroche, Miss. Simonne Marie Anne Andree\",female,3,1,2,SC/Paris 2123,41.5792,,C\r\n45,1,3,\"Devaney, Miss. Margaret Delia\",female,19,0,0,330958,7.8792,,Q\r\n46,0,3,\"Rogers, Mr. William John\",male,,0,0,S.C./A.4. 23567,8.05,,S\r\n47,0,3,\"Lennon, Mr. Denis\",male,,1,0,370371,15.5,,Q\r\n48,1,3,\"O'Driscoll, Miss. Bridget\",female,,0,0,14311,7.75,,Q\r\n49,0,3,\"Samaan, Mr. Youssef\",male,,2,0,2662,21.6792,,C\r\n50,0,3,\"Arnold-Franchi, Mrs. Josef (Josefine Franchi)\",female,18,1,0,349237,17.8,,S\r\n51,0,3,\"Panula, Master. Juha Niilo\",male,7,4,1,3101295,39.6875,,S\r\n52,0,3,\"Nosworthy, Mr. Richard Cater\",male,21,0,0,A/4. 39886,7.8,,S\r\n53,1,1,\"Harper, Mrs. Henry Sleeper (Myna Haxtun)\",female,49,1,0,PC 17572,76.7292,D33,C\r\n54,1,2,\"Faunthorpe, Mrs. Lizzie (Elizabeth Anne Wilkinson)\",female,29,1,0,2926,26,,S\r\n55,0,1,\"Ostby, Mr. Engelhart Cornelius\",male,65,0,1,113509,61.9792,B30,C\r\n56,1,1,\"Woolner, Mr. Hugh\",male,,0,0,19947,35.5,C52,S\r\n57,1,2,\"Rugg, Miss. Emily\",female,21,0,0,C.A. 31026,10.5,,S\r\n58,0,3,\"Novel, Mr. Mansouer\",male,28.5,0,0,2697,7.2292,,C\r\n59,1,2,\"West, Miss. Constance Mirium\",female,5,1,2,C.A. 34651,27.75,,S\r\n60,0,3,\"Goodwin, Master. William Frederick\",male,11,5,2,CA 2144,46.9,,S\r\n61,0,3,\"Sirayanian, Mr. Orsen\",male,22,0,0,2669,7.2292,,C\r\n62,1,1,\"Icard, Miss. Amelie\",female,38,0,0,113572,80,B28,\r\n63,0,1,\"Harris, Mr. Henry Birkhardt\",male,45,1,0,36973,83.475,C83,S\r\n64,0,3,\"Skoog, Master. Harald\",male,4,3,2,347088,27.9,,S\r\n65,0,1,\"Stewart, Mr. Albert A\",male,,0,0,PC 17605,27.7208,,C\r\n66,1,3,\"Moubarek, Master. Gerios\",male,,1,1,2661,15.2458,,C\r\n67,1,2,\"Nye, Mrs. (Elizabeth Ramell)\",female,29,0,0,C.A. 29395,10.5,F33,S\r\n68,0,3,\"Crease, Mr. Ernest James\",male,19,0,0,S.P. 3464,8.1583,,S\r\n69,1,3,\"Andersson, Miss. Erna Alexandra\",female,17,4,2,3101281,7.925,,S\r\n70,0,3,\"Kink, Mr. Vincenz\",male,26,2,0,315151,8.6625,,S\r\n71,0,2,\"Jenkin, Mr. Stephen Curnow\",male,32,0,0,C.A. 33111,10.5,,S\r\n72,0,3,\"Goodwin, Miss. Lillian Amy\",female,16,5,2,CA 2144,46.9,,S\r\n73,0,2,\"Hood, Mr. Ambrose Jr\",male,21,0,0,S.O.C. 14879,73.5,,S\r\n74,0,3,\"Chronopoulos, Mr. Apostolos\",male,26,1,0,2680,14.4542,,C\r\n75,1,3,\"Bing, Mr. Lee\",male,32,0,0,1601,56.4958,,S\r\n76,0,3,\"Moen, Mr. Sigurd Hansen\",male,25,0,0,348123,7.65,F G73,S\r\n77,0,3,\"Staneff, Mr. Ivan\",male,,0,0,349208,7.8958,,S\r\n78,0,3,\"Moutal, Mr. Rahamin Haim\",male,,0,0,374746,8.05,,S\r\n79,1,2,\"Caldwell, Master. Alden Gates\",male,0.83,0,2,248738,29,,S\r\n80,1,3,\"Dowdell, Miss. Elizabeth\",female,30,0,0,364516,12.475,,S\r\n81,0,3,\"Waelens, Mr. Achille\",male,22,0,0,345767,9,,S\r\n82,1,3,\"Sheerlinck, Mr. Jan Baptist\",male,29,0,0,345779,9.5,,S\r\n83,1,3,\"McDermott, Miss. Brigdet Delia\",female,,0,0,330932,7.7875,,Q\r\n84,0,1,\"Carrau, Mr. Francisco M\",male,28,0,0,113059,47.1,,S\r\n85,1,2,\"Ilett, Miss. Bertha\",female,17,0,0,SO/C 14885,10.5,,S\r\n86,1,3,\"Backstrom, Mrs. Karl Alfred (Maria Mathilda Gustafsson)\",female,33,3,0,3101278,15.85,,S\r\n87,0,3,\"Ford, Mr. William Neal\",male,16,1,3,W./C. 6608,34.375,,S\r\n88,0,3,\"Slocovski, Mr. Selman Francis\",male,,0,0,SOTON/OQ 392086,8.05,,S\r\n89,1,1,\"Fortune, Miss. Mabel Helen\",female,23,3,2,19950,263,C23 C25 C27,S\r\n90,0,3,\"Celotti, Mr. Francesco\",male,24,0,0,343275,8.05,,S\r\n91,0,3,\"Christmann, Mr. Emil\",male,29,0,0,343276,8.05,,S\r\n92,0,3,\"Andreasson, Mr. Paul Edvin\",male,20,0,0,347466,7.8542,,S\r\n93,0,1,\"Chaffee, Mr. Herbert Fuller\",male,46,1,0,W.E.P. 5734,61.175,E31,S\r\n94,0,3,\"Dean, Mr. Bertram Frank\",male,26,1,2,C.A. 2315,20.575,,S\r\n95,0,3,\"Coxon, Mr. Daniel\",male,59,0,0,364500,7.25,,S\r\n96,0,3,\"Shorney, Mr. Charles Joseph\",male,,0,0,374910,8.05,,S\r\n97,0,1,\"Goldschmidt, Mr. George B\",male,71,0,0,PC 17754,34.6542,A5,C\r\n98,1,1,\"Greenfield, Mr. William Bertram\",male,23,0,1,PC 17759,63.3583,D10 D12,C\r\n99,1,2,\"Doling, Mrs. John T (Ada Julia Bone)\",female,34,0,1,231919,23,,S\r\n100,0,2,\"Kantor, Mr. Sinai\",male,34,1,0,244367,26,,S\r\n101,0,3,\"Petranec, Miss. Matilda\",female,28,0,0,349245,7.8958,,S\r\n102,0,3,\"Petroff, Mr. Pastcho (\"\"Pentcho\"\")\",male,,0,0,349215,7.8958,,S\r\n103,0,1,\"White, Mr. Richard Frasar\",male,21,0,1,35281,77.2875,D26,S\r\n104,0,3,\"Johansson, Mr. Gustaf Joel\",male,33,0,0,7540,8.6542,,S\r\n105,0,3,\"Gustafsson, Mr. Anders Vilhelm\",male,37,2,0,3101276,7.925,,S\r\n106,0,3,\"Mionoff, Mr. Stoytcho\",male,28,0,0,349207,7.8958,,S\r\n107,1,3,\"Salkjelsvik, Miss. Anna Kristine\",female,21,0,0,343120,7.65,,S\r\n108,1,3,\"Moss, Mr. Albert Johan\",male,,0,0,312991,7.775,,S\r\n109,0,3,\"Rekic, Mr. Tido\",male,38,0,0,349249,7.8958,,S\r\n110,1,3,\"Moran, Miss. Bertha\",female,,1,0,371110,24.15,,Q\r\n111,0,1,\"Porter, Mr. Walter Chamberlain\",male,47,0,0,110465,52,C110,S\r\n112,0,3,\"Zabour, Miss. Hileni\",female,14.5,1,0,2665,14.4542,,C\r\n113,0,3,\"Barton, Mr. David John\",male,22,0,0,324669,8.05,,S\r\n114,0,3,\"Jussila, Miss. Katriina\",female,20,1,0,4136,9.825,,S\r\n115,0,3,\"Attalah, Miss. Malake\",female,17,0,0,2627,14.4583,,C\r\n116,0,3,\"Pekoniemi, Mr. Edvard\",male,21,0,0,STON/O 2. 3101294,7.925,,S\r\n117,0,3,\"Connors, Mr. Patrick\",male,70.5,0,0,370369,7.75,,Q\r\n118,0,2,\"Turpin, Mr. William John Robert\",male,29,1,0,11668,21,,S\r\n119,0,1,\"Baxter, Mr. Quigg Edmond\",male,24,0,1,PC 17558,247.5208,B58 B60,C\r\n120,0,3,\"Andersson, Miss. Ellis Anna Maria\",female,2,4,2,347082,31.275,,S\r\n121,0,2,\"Hickman, Mr. Stanley George\",male,21,2,0,S.O.C. 14879,73.5,,S\r\n122,0,3,\"Moore, Mr. Leonard Charles\",male,,0,0,A4. 54510,8.05,,S\r\n123,0,2,\"Nasser, Mr. Nicholas\",male,32.5,1,0,237736,30.0708,,C\r\n124,1,2,\"Webber, Miss. Susan\",female,32.5,0,0,27267,13,E101,S\r\n125,0,1,\"White, Mr. Percival Wayland\",male,54,0,1,35281,77.2875,D26,S\r\n126,1,3,\"Nicola-Yarred, Master. Elias\",male,12,1,0,2651,11.2417,,C\r\n127,0,3,\"McMahon, Mr. Martin\",male,,0,0,370372,7.75,,Q\r\n128,1,3,\"Madsen, Mr. Fridtjof Arne\",male,24,0,0,C 17369,7.1417,,S\r\n129,1,3,\"Peter, Miss. Anna\",female,,1,1,2668,22.3583,F E69,C\r\n130,0,3,\"Ekstrom, Mr. Johan\",male,45,0,0,347061,6.975,,S\r\n131,0,3,\"Drazenoic, Mr. Jozef\",male,33,0,0,349241,7.8958,,C\r\n132,0,3,\"Coelho, Mr. Domingos Fernandeo\",male,20,0,0,SOTON/O.Q. 3101307,7.05,,S\r\n133,0,3,\"Robins, Mrs. Alexander A (Grace Charity Laury)\",female,47,1,0,A/5. 3337,14.5,,S\r\n134,1,2,\"Weisz, Mrs. Leopold (Mathilde Francoise Pede)\",female,29,1,0,228414,26,,S\r\n135,0,2,\"Sobey, Mr. Samuel James Hayden\",male,25,0,0,C.A. 29178,13,,S\r\n136,0,2,\"Richard, Mr. Emile\",male,23,0,0,SC/PARIS 2133,15.0458,,C\r\n137,1,1,\"Newsom, Miss. Helen Monypeny\",female,19,0,2,11752,26.2833,D47,S\r\n138,0,1,\"Futrelle, Mr. Jacques Heath\",male,37,1,0,113803,53.1,C123,S\r\n139,0,3,\"Osen, Mr. Olaf Elon\",male,16,0,0,7534,9.2167,,S\r\n140,0,1,\"Giglio, Mr. Victor\",male,24,0,0,PC 17593,79.2,B86,C\r\n141,0,3,\"Boulos, Mrs. Joseph (Sultana)\",female,,0,2,2678,15.2458,,C\r\n142,1,3,\"Nysten, Miss. Anna Sofia\",female,22,0,0,347081,7.75,,S\r\n143,1,3,\"Hakkarainen, Mrs. Pekka Pietari (Elin Matilda Dolck)\",female,24,1,0,STON/O2. 3101279,15.85,,S\r\n144,0,3,\"Burke, Mr. Jeremiah\",male,19,0,0,365222,6.75,,Q\r\n145,0,2,\"Andrew, Mr. Edgardo Samuel\",male,18,0,0,231945,11.5,,S\r\n146,0,2,\"Nicholls, Mr. Joseph Charles\",male,19,1,1,C.A. 33112,36.75,,S\r\n147,1,3,\"Andersson, Mr. August Edvard (\"\"Wennerstrom\"\")\",male,27,0,0,350043,7.7958,,S\r\n148,0,3,\"Ford, Miss. Robina Maggie \"\"Ruby\"\"\",female,9,2,2,W./C. 6608,34.375,,S\r\n149,0,2,\"Navratil, Mr. Michel (\"\"Louis M Hoffman\"\")\",male,36.5,0,2,230080,26,F2,S\r\n150,0,2,\"Byles, Rev. Thomas Roussel Davids\",male,42,0,0,244310,13,,S\r\n151,0,2,\"Bateman, Rev. Robert James\",male,51,0,0,S.O.P. 1166,12.525,,S\r\n152,1,1,\"Pears, Mrs. Thomas (Edith Wearne)\",female,22,1,0,113776,66.6,C2,S\r\n153,0,3,\"Meo, Mr. Alfonzo\",male,55.5,0,0,A.5. 11206,8.05,,S\r\n154,0,3,\"van Billiard, Mr. Austin Blyler\",male,40.5,0,2,A/5. 851,14.5,,S\r\n155,0,3,\"Olsen, Mr. Ole Martin\",male,,0,0,Fa 265302,7.3125,,S\r\n156,0,1,\"Williams, Mr. Charles Duane\",male,51,0,1,PC 17597,61.3792,,C\r\n157,1,3,\"Gilnagh, Miss. Katherine \"\"Katie\"\"\",female,16,0,0,35851,7.7333,,Q\r\n158,0,3,\"Corn, Mr. Harry\",male,30,0,0,SOTON/OQ 392090,8.05,,S\r\n159,0,3,\"Smiljanic, Mr. Mile\",male,,0,0,315037,8.6625,,S\r\n160,0,3,\"Sage, Master. Thomas Henry\",male,,8,2,CA. 2343,69.55,,S\r\n161,0,3,\"Cribb, Mr. John Hatfield\",male,44,0,1,371362,16.1,,S\r\n162,1,2,\"Watt, Mrs. James (Elizabeth \"\"Bessie\"\" Inglis Milne)\",female,40,0,0,C.A. 33595,15.75,,S\r\n163,0,3,\"Bengtsson, Mr. John Viktor\",male,26,0,0,347068,7.775,,S\r\n164,0,3,\"Calic, Mr. Jovo\",male,17,0,0,315093,8.6625,,S\r\n165,0,3,\"Panula, Master. Eino Viljami\",male,1,4,1,3101295,39.6875,,S\r\n166,1,3,\"Goldsmith, Master. Frank John William \"\"Frankie\"\"\",male,9,0,2,363291,20.525,,S\r\n167,1,1,\"Chibnall, Mrs. (Edith Martha Bowerman)\",female,,0,1,113505,55,E33,S\r\n168,0,3,\"Skoog, Mrs. William (Anna Bernhardina Karlsson)\",female,45,1,4,347088,27.9,,S\r\n169,0,1,\"Baumann, Mr. John D\",male,,0,0,PC 17318,25.925,,S\r\n170,0,3,\"Ling, Mr. Lee\",male,28,0,0,1601,56.4958,,S\r\n171,0,1,\"Van der hoef, Mr. Wyckoff\",male,61,0,0,111240,33.5,B19,S\r\n172,0,3,\"Rice, Master. Arthur\",male,4,4,1,382652,29.125,,Q\r\n173,1,3,\"Johnson, Miss. Eleanor Ileen\",female,1,1,1,347742,11.1333,,S\r\n174,0,3,\"Sivola, Mr. Antti Wilhelm\",male,21,0,0,STON/O 2. 3101280,7.925,,S\r\n175,0,1,\"Smith, Mr. James Clinch\",male,56,0,0,17764,30.6958,A7,C\r\n176,0,3,\"Klasen, Mr. Klas Albin\",male,18,1,1,350404,7.8542,,S\r\n177,0,3,\"Lefebre, Master. Henry Forbes\",male,,3,1,4133,25.4667,,S\r\n178,0,1,\"Isham, Miss. Ann Elizabeth\",female,50,0,0,PC 17595,28.7125,C49,C\r\n179,0,2,\"Hale, Mr. Reginald\",male,30,0,0,250653,13,,S\r\n180,0,3,\"Leonard, Mr. Lionel\",male,36,0,0,LINE,0,,S\r\n181,0,3,\"Sage, Miss. Constance Gladys\",female,,8,2,CA. 2343,69.55,,S\r\n182,0,2,\"Pernot, Mr. Rene\",male,,0,0,SC/PARIS 2131,15.05,,C\r\n183,0,3,\"Asplund, Master. Clarence Gustaf Hugo\",male,9,4,2,347077,31.3875,,S\r\n184,1,2,\"Becker, Master. Richard F\",male,1,2,1,230136,39,F4,S\r\n185,1,3,\"Kink-Heilmann, Miss. Luise Gretchen\",female,4,0,2,315153,22.025,,S\r\n186,0,1,\"Rood, Mr. Hugh Roscoe\",male,,0,0,113767,50,A32,S\r\n187,1,3,\"O'Brien, Mrs. Thomas (Johanna \"\"Hannah\"\" Godfrey)\",female,,1,0,370365,15.5,,Q\r\n188,1,1,\"Romaine, Mr. Charles Hallace (\"\"Mr C Rolmane\"\")\",male,45,0,0,111428,26.55,,S\r\n189,0,3,\"Bourke, Mr. John\",male,40,1,1,364849,15.5,,Q\r\n190,0,3,\"Turcin, Mr. Stjepan\",male,36,0,0,349247,7.8958,,S\r\n191,1,2,\"Pinsky, Mrs. (Rosa)\",female,32,0,0,234604,13,,S\r\n192,0,2,\"Carbines, Mr. William\",male,19,0,0,28424,13,,S\r\n193,1,3,\"Andersen-Jensen, Miss. Carla Christine Nielsine\",female,19,1,0,350046,7.8542,,S\r\n194,1,2,\"Navratil, Master. Michel M\",male,3,1,1,230080,26,F2,S\r\n195,1,1,\"Brown, Mrs. James Joseph (Margaret Tobin)\",female,44,0,0,PC 17610,27.7208,B4,C\r\n196,1,1,\"Lurette, Miss. Elise\",female,58,0,0,PC 17569,146.5208,B80,C\r\n197,0,3,\"Mernagh, Mr. Robert\",male,,0,0,368703,7.75,,Q\r\n198,0,3,\"Olsen, Mr. Karl Siegwart Andreas\",male,42,0,1,4579,8.4042,,S\r\n199,1,3,\"Madigan, Miss. Margaret \"\"Maggie\"\"\",female,,0,0,370370,7.75,,Q\r\n200,0,2,\"Yrois, Miss. Henriette (\"\"Mrs Harbeck\"\")\",female,24,0,0,248747,13,,S\r\n201,0,3,\"Vande Walle, Mr. Nestor Cyriel\",male,28,0,0,345770,9.5,,S\r\n202,0,3,\"Sage, Mr. Frederick\",male,,8,2,CA. 2343,69.55,,S\r\n203,0,3,\"Johanson, Mr. Jakob Alfred\",male,34,0,0,3101264,6.4958,,S\r\n204,0,3,\"Youseff, Mr. Gerious\",male,45.5,0,0,2628,7.225,,C\r\n205,1,3,\"Cohen, Mr. Gurshon \"\"Gus\"\"\",male,18,0,0,A/5 3540,8.05,,S\r\n206,0,3,\"Strom, Miss. Telma Matilda\",female,2,0,1,347054,10.4625,G6,S\r\n207,0,3,\"Backstrom, Mr. Karl Alfred\",male,32,1,0,3101278,15.85,,S\r\n208,1,3,\"Albimona, Mr. Nassef Cassem\",male,26,0,0,2699,18.7875,,C\r\n209,1,3,\"Carr, Miss. Helen \"\"Ellen\"\"\",female,16,0,0,367231,7.75,,Q\r\n210,1,1,\"Blank, Mr. Henry\",male,40,0,0,112277,31,A31,C\r\n211,0,3,\"Ali, Mr. Ahmed\",male,24,0,0,SOTON/O.Q. 3101311,7.05,,S\r\n212,1,2,\"Cameron, Miss. Clear Annie\",female,35,0,0,F.C.C. 13528,21,,S\r\n213,0,3,\"Perkin, Mr. John Henry\",male,22,0,0,A/5 21174,7.25,,S\r\n214,0,2,\"Givard, Mr. Hans Kristensen\",male,30,0,0,250646,13,,S\r\n215,0,3,\"Kiernan, Mr. Philip\",male,,1,0,367229,7.75,,Q\r\n216,1,1,\"Newell, Miss. Madeleine\",female,31,1,0,35273,113.275,D36,C\r\n217,1,3,\"Honkanen, Miss. Eliina\",female,27,0,0,STON/O2. 3101283,7.925,,S\r\n218,0,2,\"Jacobsohn, Mr. Sidney Samuel\",male,42,1,0,243847,27,,S\r\n219,1,1,\"Bazzani, Miss. Albina\",female,32,0,0,11813,76.2917,D15,C\r\n220,0,2,\"Harris, Mr. Walter\",male,30,0,0,W/C 14208,10.5,,S\r\n221,1,3,\"Sunderland, Mr. Victor Francis\",male,16,0,0,SOTON/OQ 392089,8.05,,S\r\n222,0,2,\"Bracken, Mr. James H\",male,27,0,0,220367,13,,S\r\n223,0,3,\"Green, Mr. George Henry\",male,51,0,0,21440,8.05,,S\r\n224,0,3,\"Nenkoff, Mr. Christo\",male,,0,0,349234,7.8958,,S\r\n225,1,1,\"Hoyt, Mr. Frederick Maxfield\",male,38,1,0,19943,90,C93,S\r\n226,0,3,\"Berglund, Mr. Karl Ivar Sven\",male,22,0,0,PP 4348,9.35,,S\r\n227,1,2,\"Mellors, Mr. William John\",male,19,0,0,SW/PP 751,10.5,,S\r\n228,0,3,\"Lovell, Mr. John Hall (\"\"Henry\"\")\",male,20.5,0,0,A/5 21173,7.25,,S\r\n229,0,2,\"Fahlstrom, Mr. Arne Jonas\",male,18,0,0,236171,13,,S\r\n230,0,3,\"Lefebre, Miss. Mathilde\",female,,3,1,4133,25.4667,,S\r\n231,1,1,\"Harris, Mrs. Henry Birkhardt (Irene Wallach)\",female,35,1,0,36973,83.475,C83,S\r\n232,0,3,\"Larsson, Mr. Bengt Edvin\",male,29,0,0,347067,7.775,,S\r\n233,0,2,\"Sjostedt, Mr. Ernst Adolf\",male,59,0,0,237442,13.5,,S\r\n234,1,3,\"Asplund, Miss. Lillian Gertrud\",female,5,4,2,347077,31.3875,,S\r\n235,0,2,\"Leyson, Mr. Robert William Norman\",male,24,0,0,C.A. 29566,10.5,,S\r\n236,0,3,\"Harknett, Miss. Alice Phoebe\",female,,0,0,W./C. 6609,7.55,,S\r\n237,0,2,\"Hold, Mr. Stephen\",male,44,1,0,26707,26,,S\r\n238,1,2,\"Collyer, Miss. Marjorie \"\"Lottie\"\"\",female,8,0,2,C.A. 31921,26.25,,S\r\n239,0,2,\"Pengelly, Mr. Frederick William\",male,19,0,0,28665,10.5,,S\r\n240,0,2,\"Hunt, Mr. George Henry\",male,33,0,0,SCO/W 1585,12.275,,S\r\n241,0,3,\"Zabour, Miss. Thamine\",female,,1,0,2665,14.4542,,C\r\n242,1,3,\"Murphy, Miss. Katherine \"\"Kate\"\"\",female,,1,0,367230,15.5,,Q\r\n243,0,2,\"Coleridge, Mr. Reginald Charles\",male,29,0,0,W./C. 14263,10.5,,S\r\n244,0,3,\"Maenpaa, Mr. Matti Alexanteri\",male,22,0,0,STON/O 2. 3101275,7.125,,S\r\n245,0,3,\"Attalah, Mr. Sleiman\",male,30,0,0,2694,7.225,,C\r\n246,0,1,\"Minahan, Dr. William Edward\",male,44,2,0,19928,90,C78,Q\r\n247,0,3,\"Lindahl, Miss. Agda Thorilda Viktoria\",female,25,0,0,347071,7.775,,S\r\n248,1,2,\"Hamalainen, Mrs. William (Anna)\",female,24,0,2,250649,14.5,,S\r\n249,1,1,\"Beckwith, Mr. Richard Leonard\",male,37,1,1,11751,52.5542,D35,S\r\n250,0,2,\"Carter, Rev. Ernest Courtenay\",male,54,1,0,244252,26,,S\r\n251,0,3,\"Reed, Mr. James George\",male,,0,0,362316,7.25,,S\r\n252,0,3,\"Strom, Mrs. Wilhelm (Elna Matilda Persson)\",female,29,1,1,347054,10.4625,G6,S\r\n253,0,1,\"Stead, Mr. William Thomas\",male,62,0,0,113514,26.55,C87,S\r\n254,0,3,\"Lobb, Mr. William Arthur\",male,30,1,0,A/5. 3336,16.1,,S\r\n255,0,3,\"Rosblom, Mrs. Viktor (Helena Wilhelmina)\",female,41,0,2,370129,20.2125,,S\r\n256,1,3,\"Touma, Mrs. Darwis (Hanne Youssef Razi)\",female,29,0,2,2650,15.2458,,C\r\n257,1,1,\"Thorne, Mrs. Gertrude Maybelle\",female,,0,0,PC 17585,79.2,,C\r\n258,1,1,\"Cherry, Miss. Gladys\",female,30,0,0,110152,86.5,B77,S\r\n259,1,1,\"Ward, Miss. Anna\",female,35,0,0,PC 17755,512.3292,,C\r\n260,1,2,\"Parrish, Mrs. (Lutie Davis)\",female,50,0,1,230433,26,,S\r\n261,0,3,\"Smith, Mr. Thomas\",male,,0,0,384461,7.75,,Q\r\n262,1,3,\"Asplund, Master. Edvin Rojj Felix\",male,3,4,2,347077,31.3875,,S\r\n263,0,1,\"Taussig, Mr. Emil\",male,52,1,1,110413,79.65,E67,S\r\n264,0,1,\"Harrison, Mr. William\",male,40,0,0,112059,0,B94,S\r\n265,0,3,\"Henry, Miss. Delia\",female,,0,0,382649,7.75,,Q\r\n266,0,2,\"Reeves, Mr. David\",male,36,0,0,C.A. 17248,10.5,,S\r\n267,0,3,\"Panula, Mr. Ernesti Arvid\",male,16,4,1,3101295,39.6875,,S\r\n268,1,3,\"Persson, Mr. Ernst Ulrik\",male,25,1,0,347083,7.775,,S\r\n269,1,1,\"Graham, Mrs. William Thompson (Edith Junkins)\",female,58,0,1,PC 17582,153.4625,C125,S\r\n270,1,1,\"Bissette, Miss. Amelia\",female,35,0,0,PC 17760,135.6333,C99,S\r\n271,0,1,\"Cairns, Mr. Alexander\",male,,0,0,113798,31,,S\r\n272,1,3,\"Tornquist, Mr. William Henry\",male,25,0,0,LINE,0,,S\r\n273,1,2,\"Mellinger, Mrs. (Elizabeth Anne Maidment)\",female,41,0,1,250644,19.5,,S\r\n274,0,1,\"Natsch, Mr. Charles H\",male,37,0,1,PC 17596,29.7,C118,C\r\n275,1,3,\"Healy, Miss. Hanora \"\"Nora\"\"\",female,,0,0,370375,7.75,,Q\r\n276,1,1,\"Andrews, Miss. Kornelia Theodosia\",female,63,1,0,13502,77.9583,D7,S\r\n277,0,3,\"Lindblom, Miss. Augusta Charlotta\",female,45,0,0,347073,7.75,,S\r\n278,0,2,\"Parkes, Mr. Francis \"\"Frank\"\"\",male,,0,0,239853,0,,S\r\n279,0,3,\"Rice, Master. Eric\",male,7,4,1,382652,29.125,,Q\r\n280,1,3,\"Abbott, Mrs. Stanton (Rosa Hunt)\",female,35,1,1,C.A. 2673,20.25,,S\r\n281,0,3,\"Duane, Mr. Frank\",male,65,0,0,336439,7.75,,Q\r\n282,0,3,\"Olsson, Mr. Nils Johan Goransson\",male,28,0,0,347464,7.8542,,S\r\n283,0,3,\"de Pelsmaeker, Mr. Alfons\",male,16,0,0,345778,9.5,,S\r\n284,1,3,\"Dorking, Mr. Edward Arthur\",male,19,0,0,A/5. 10482,8.05,,S\r\n285,0,1,\"Smith, Mr. Richard William\",male,,0,0,113056,26,A19,S\r\n286,0,3,\"Stankovic, Mr. Ivan\",male,33,0,0,349239,8.6625,,C\r\n287,1,3,\"de Mulder, Mr. Theodore\",male,30,0,0,345774,9.5,,S\r\n288,0,3,\"Naidenoff, Mr. Penko\",male,22,0,0,349206,7.8958,,S\r\n289,1,2,\"Hosono, Mr. Masabumi\",male,42,0,0,237798,13,,S\r\n290,1,3,\"Connolly, Miss. Kate\",female,22,0,0,370373,7.75,,Q\r\n291,1,1,\"Barber, Miss. Ellen \"\"Nellie\"\"\",female,26,0,0,19877,78.85,,S\r\n292,1,1,\"Bishop, Mrs. Dickinson H (Helen Walton)\",female,19,1,0,11967,91.0792,B49,C\r\n293,0,2,\"Levy, Mr. Rene Jacques\",male,36,0,0,SC/Paris 2163,12.875,D,C\r\n294,0,3,\"Haas, Miss. Aloisia\",female,24,0,0,349236,8.85,,S\r\n295,0,3,\"Mineff, Mr. Ivan\",male,24,0,0,349233,7.8958,,S\r\n296,0,1,\"Lewy, Mr. Ervin G\",male,,0,0,PC 17612,27.7208,,C\r\n297,0,3,\"Hanna, Mr. Mansour\",male,23.5,0,0,2693,7.2292,,C\r\n298,0,1,\"Allison, Miss. Helen Loraine\",female,2,1,2,113781,151.55,C22 C26,S\r\n299,1,1,\"Saalfeld, Mr. Adolphe\",male,,0,0,19988,30.5,C106,S\r\n300,1,1,\"Baxter, Mrs. James (Helene DeLaudeniere Chaput)\",female,50,0,1,PC 17558,247.5208,B58 B60,C\r\n301,1,3,\"Kelly, Miss. Anna Katherine \"\"Annie Kate\"\"\",female,,0,0,9234,7.75,,Q\r\n302,1,3,\"McCoy, Mr. Bernard\",male,,2,0,367226,23.25,,Q\r\n303,0,3,\"Johnson, Mr. William Cahoone Jr\",male,19,0,0,LINE,0,,S\r\n304,1,2,\"Keane, Miss. Nora A\",female,,0,0,226593,12.35,E101,Q\r\n305,0,3,\"Williams, Mr. Howard Hugh \"\"Harry\"\"\",male,,0,0,A/5 2466,8.05,,S\r\n306,1,1,\"Allison, Master. Hudson Trevor\",male,0.92,1,2,113781,151.55,C22 C26,S\r\n307,1,1,\"Fleming, Miss. Margaret\",female,,0,0,17421,110.8833,,C\r\n308,1,1,\"Penasco y Castellana, Mrs. Victor de Satode (Maria Josefa Perez de Soto y Vallejo)\",female,17,1,0,PC 17758,108.9,C65,C\r\n309,0,2,\"Abelson, Mr. Samuel\",male,30,1,0,P/PP 3381,24,,C\r\n310,1,1,\"Francatelli, Miss. Laura Mabel\",female,30,0,0,PC 17485,56.9292,E36,C\r\n311,1,1,\"Hays, Miss. Margaret Bechstein\",female,24,0,0,11767,83.1583,C54,C\r\n312,1,1,\"Ryerson, Miss. Emily Borie\",female,18,2,2,PC 17608,262.375,B57 B59 B63 B66,C\r\n313,0,2,\"Lahtinen, Mrs. William (Anna Sylfven)\",female,26,1,1,250651,26,,S\r\n314,0,3,\"Hendekovic, Mr. Ignjac\",male,28,0,0,349243,7.8958,,S\r\n315,0,2,\"Hart, Mr. Benjamin\",male,43,1,1,F.C.C. 13529,26.25,,S\r\n316,1,3,\"Nilsson, Miss. Helmina Josefina\",female,26,0,0,347470,7.8542,,S\r\n317,1,2,\"Kantor, Mrs. Sinai (Miriam Sternin)\",female,24,1,0,244367,26,,S\r\n318,0,2,\"Moraweck, Dr. Ernest\",male,54,0,0,29011,14,,S\r\n319,1,1,\"Wick, Miss. Mary Natalie\",female,31,0,2,36928,164.8667,C7,S\r\n320,1,1,\"Spedden, Mrs. Frederic Oakley (Margaretta Corning Stone)\",female,40,1,1,16966,134.5,E34,C\r\n321,0,3,\"Dennis, Mr. Samuel\",male,22,0,0,A/5 21172,7.25,,S\r\n322,0,3,\"Danoff, Mr. Yoto\",male,27,0,0,349219,7.8958,,S\r\n323,1,2,\"Slayter, Miss. Hilda Mary\",female,30,0,0,234818,12.35,,Q\r\n324,1,2,\"Caldwell, Mrs. Albert Francis (Sylvia Mae Harbaugh)\",female,22,1,1,248738,29,,S\r\n325,0,3,\"Sage, Mr. George John Jr\",male,,8,2,CA. 2343,69.55,,S\r\n326,1,1,\"Young, Miss. Marie Grice\",female,36,0,0,PC 17760,135.6333,C32,C\r\n327,0,3,\"Nysveen, Mr. Johan Hansen\",male,61,0,0,345364,6.2375,,S\r\n328,1,2,\"Ball, Mrs. (Ada E Hall)\",female,36,0,0,28551,13,D,S\r\n329,1,3,\"Goldsmith, Mrs. Frank John (Emily Alice Brown)\",female,31,1,1,363291,20.525,,S\r\n330,1,1,\"Hippach, Miss. Jean Gertrude\",female,16,0,1,111361,57.9792,B18,C\r\n331,1,3,\"McCoy, Miss. Agnes\",female,,2,0,367226,23.25,,Q\r\n332,0,1,\"Partner, Mr. Austen\",male,45.5,0,0,113043,28.5,C124,S\r\n333,0,1,\"Graham, Mr. George Edward\",male,38,0,1,PC 17582,153.4625,C91,S\r\n334,0,3,\"Vander Planke, Mr. Leo Edmondus\",male,16,2,0,345764,18,,S\r\n335,1,1,\"Frauenthal, Mrs. Henry William (Clara Heinsheimer)\",female,,1,0,PC 17611,133.65,,S\r\n336,0,3,\"Denkoff, Mr. Mitto\",male,,0,0,349225,7.8958,,S\r\n337,0,1,\"Pears, Mr. Thomas Clinton\",male,29,1,0,113776,66.6,C2,S\r\n338,1,1,\"Burns, Miss. Elizabeth Margaret\",female,41,0,0,16966,134.5,E40,C\r\n339,1,3,\"Dahl, Mr. Karl Edwart\",male,45,0,0,7598,8.05,,S\r\n340,0,1,\"Blackwell, Mr. Stephen Weart\",male,45,0,0,113784,35.5,T,S\r\n341,1,2,\"Navratil, Master. Edmond Roger\",male,2,1,1,230080,26,F2,S\r\n342,1,1,\"Fortune, Miss. Alice Elizabeth\",female,24,3,2,19950,263,C23 C25 C27,S\r\n343,0,2,\"Collander, Mr. Erik Gustaf\",male,28,0,0,248740,13,,S\r\n344,0,2,\"Sedgwick, Mr. Charles Frederick Waddington\",male,25,0,0,244361,13,,S\r\n345,0,2,\"Fox, Mr. Stanley Hubert\",male,36,0,0,229236,13,,S\r\n346,1,2,\"Brown, Miss. Amelia \"\"Mildred\"\"\",female,24,0,0,248733,13,F33,S\r\n347,1,2,\"Smith, Miss. Marion Elsie\",female,40,0,0,31418,13,,S\r\n348,1,3,\"Davison, Mrs. Thomas Henry (Mary E Finck)\",female,,1,0,386525,16.1,,S\r\n349,1,3,\"Coutts, Master. William Loch \"\"William\"\"\",male,3,1,1,C.A. 37671,15.9,,S\r\n350,0,3,\"Dimic, Mr. Jovan\",male,42,0,0,315088,8.6625,,S\r\n351,0,3,\"Odahl, Mr. Nils Martin\",male,23,0,0,7267,9.225,,S\r\n352,0,1,\"Williams-Lambert, Mr. Fletcher Fellows\",male,,0,0,113510,35,C128,S\r\n353,0,3,\"Elias, Mr. Tannous\",male,15,1,1,2695,7.2292,,C\r\n354,0,3,\"Arnold-Franchi, Mr. Josef\",male,25,1,0,349237,17.8,,S\r\n355,0,3,\"Yousif, Mr. Wazli\",male,,0,0,2647,7.225,,C\r\n356,0,3,\"Vanden Steen, Mr. Leo Peter\",male,28,0,0,345783,9.5,,S\r\n357,1,1,\"Bowerman, Miss. Elsie Edith\",female,22,0,1,113505,55,E33,S\r\n358,0,2,\"Funk, Miss. Annie Clemmer\",female,38,0,0,237671,13,,S\r\n359,1,3,\"McGovern, Miss. Mary\",female,,0,0,330931,7.8792,,Q\r\n360,1,3,\"Mockler, Miss. Helen Mary \"\"Ellie\"\"\",female,,0,0,330980,7.8792,,Q\r\n361,0,3,\"Skoog, Mr. Wilhelm\",male,40,1,4,347088,27.9,,S\r\n362,0,2,\"del Carlo, Mr. Sebastiano\",male,29,1,0,SC/PARIS 2167,27.7208,,C\r\n363,0,3,\"Barbara, Mrs. (Catherine David)\",female,45,0,1,2691,14.4542,,C\r\n364,0,3,\"Asim, Mr. Adola\",male,35,0,0,SOTON/O.Q. 3101310,7.05,,S\r\n365,0,3,\"O'Brien, Mr. Thomas\",male,,1,0,370365,15.5,,Q\r\n366,0,3,\"Adahl, Mr. Mauritz Nils Martin\",male,30,0,0,C 7076,7.25,,S\r\n367,1,1,\"Warren, Mrs. Frank Manley (Anna Sophia Atkinson)\",female,60,1,0,110813,75.25,D37,C\r\n368,1,3,\"Moussa, Mrs. (Mantoura Boulos)\",female,,0,0,2626,7.2292,,C\r\n369,1,3,\"Jermyn, Miss. Annie\",female,,0,0,14313,7.75,,Q\r\n370,1,1,\"Aubart, Mme. Leontine Pauline\",female,24,0,0,PC 17477,69.3,B35,C\r\n371,1,1,\"Harder, Mr. George Achilles\",male,25,1,0,11765,55.4417,E50,C\r\n372,0,3,\"Wiklund, Mr. Jakob Alfred\",male,18,1,0,3101267,6.4958,,S\r\n373,0,3,\"Beavan, Mr. William Thomas\",male,19,0,0,323951,8.05,,S\r\n374,0,1,\"Ringhini, Mr. Sante\",male,22,0,0,PC 17760,135.6333,,C\r\n375,0,3,\"Palsson, Miss. Stina Viola\",female,3,3,1,349909,21.075,,S\r\n376,1,1,\"Meyer, Mrs. Edgar Joseph (Leila Saks)\",female,,1,0,PC 17604,82.1708,,C\r\n377,1,3,\"Landergren, Miss. Aurora Adelia\",female,22,0,0,C 7077,7.25,,S\r\n378,0,1,\"Widener, Mr. Harry Elkins\",male,27,0,2,113503,211.5,C82,C\r\n379,0,3,\"Betros, Mr. Tannous\",male,20,0,0,2648,4.0125,,C\r\n380,0,3,\"Gustafsson, Mr. Karl Gideon\",male,19,0,0,347069,7.775,,S\r\n381,1,1,\"Bidois, Miss. Rosalie\",female,42,0,0,PC 17757,227.525,,C\r\n382,1,3,\"Nakid, Miss. Maria (\"\"Mary\"\")\",female,1,0,2,2653,15.7417,,C\r\n383,0,3,\"Tikkanen, Mr. Juho\",male,32,0,0,STON/O 2. 3101293,7.925,,S\r\n384,1,1,\"Holverson, Mrs. Alexander Oskar (Mary Aline Towner)\",female,35,1,0,113789,52,,S\r\n385,0,3,\"Plotcharsky, Mr. Vasil\",male,,0,0,349227,7.8958,,S\r\n386,0,2,\"Davies, Mr. Charles Henry\",male,18,0,0,S.O.C. 14879,73.5,,S\r\n387,0,3,\"Goodwin, Master. Sidney Leonard\",male,1,5,2,CA 2144,46.9,,S\r\n388,1,2,\"Buss, Miss. Kate\",female,36,0,0,27849,13,,S\r\n389,0,3,\"Sadlier, Mr. Matthew\",male,,0,0,367655,7.7292,,Q\r\n390,1,2,\"Lehmann, Miss. Bertha\",female,17,0,0,SC 1748,12,,C\r\n391,1,1,\"Carter, Mr. William Ernest\",male,36,1,2,113760,120,B96 B98,S\r\n392,1,3,\"Jansson, Mr. Carl Olof\",male,21,0,0,350034,7.7958,,S\r\n393,0,3,\"Gustafsson, Mr. Johan Birger\",male,28,2,0,3101277,7.925,,S\r\n394,1,1,\"Newell, Miss. Marjorie\",female,23,1,0,35273,113.275,D36,C\r\n395,1,3,\"Sandstrom, Mrs. Hjalmar (Agnes Charlotta Bengtsson)\",female,24,0,2,PP 9549,16.7,G6,S\r\n396,0,3,\"Johansson, Mr. Erik\",male,22,0,0,350052,7.7958,,S\r\n397,0,3,\"Olsson, Miss. Elina\",female,31,0,0,350407,7.8542,,S\r\n398,0,2,\"McKane, Mr. Peter David\",male,46,0,0,28403,26,,S\r\n399,0,2,\"Pain, Dr. Alfred\",male,23,0,0,244278,10.5,,S\r\n400,1,2,\"Trout, Mrs. William H (Jessie L)\",female,28,0,0,240929,12.65,,S\r\n401,1,3,\"Niskanen, Mr. Juha\",male,39,0,0,STON/O 2. 3101289,7.925,,S\r\n402,0,3,\"Adams, Mr. John\",male,26,0,0,341826,8.05,,S\r\n403,0,3,\"Jussila, Miss. Mari Aina\",female,21,1,0,4137,9.825,,S\r\n404,0,3,\"Hakkarainen, Mr. Pekka Pietari\",male,28,1,0,STON/O2. 3101279,15.85,,S\r\n405,0,3,\"Oreskovic, Miss. Marija\",female,20,0,0,315096,8.6625,,S\r\n406,0,2,\"Gale, Mr. Shadrach\",male,34,1,0,28664,21,,S\r\n407,0,3,\"Widegren, Mr. Carl/Charles Peter\",male,51,0,0,347064,7.75,,S\r\n408,1,2,\"Richards, Master. William Rowe\",male,3,1,1,29106,18.75,,S\r\n409,0,3,\"Birkeland, Mr. Hans Martin Monsen\",male,21,0,0,312992,7.775,,S\r\n410,0,3,\"Lefebre, Miss. Ida\",female,,3,1,4133,25.4667,,S\r\n411,0,3,\"Sdycoff, Mr. Todor\",male,,0,0,349222,7.8958,,S\r\n412,0,3,\"Hart, Mr. Henry\",male,,0,0,394140,6.8583,,Q\r\n413,1,1,\"Minahan, Miss. Daisy E\",female,33,1,0,19928,90,C78,Q\r\n414,0,2,\"Cunningham, Mr. Alfred Fleming\",male,,0,0,239853,0,,S\r\n415,1,3,\"Sundman, Mr. Johan Julian\",male,44,0,0,STON/O 2. 3101269,7.925,,S\r\n416,0,3,\"Meek, Mrs. Thomas (Annie Louise Rowley)\",female,,0,0,343095,8.05,,S\r\n417,1,2,\"Drew, Mrs. James Vivian (Lulu Thorne Christian)\",female,34,1,1,28220,32.5,,S\r\n418,1,2,\"Silven, Miss. Lyyli Karoliina\",female,18,0,2,250652,13,,S\r\n419,0,2,\"Matthews, Mr. William John\",male,30,0,0,28228,13,,S\r\n420,0,3,\"Van Impe, Miss. Catharina\",female,10,0,2,345773,24.15,,S\r\n421,0,3,\"Gheorgheff, Mr. Stanio\",male,,0,0,349254,7.8958,,C\r\n422,0,3,\"Charters, Mr. David\",male,21,0,0,A/5. 13032,7.7333,,Q\r\n423,0,3,\"Zimmerman, Mr. Leo\",male,29,0,0,315082,7.875,,S\r\n424,0,3,\"Danbom, Mrs. Ernst Gilbert (Anna Sigrid Maria Brogren)\",female,28,1,1,347080,14.4,,S\r\n425,0,3,\"Rosblom, Mr. Viktor Richard\",male,18,1,1,370129,20.2125,,S\r\n426,0,3,\"Wiseman, Mr. Phillippe\",male,,0,0,A/4. 34244,7.25,,S\r\n427,1,2,\"Clarke, Mrs. Charles V (Ada Maria Winfield)\",female,28,1,0,2003,26,,S\r\n428,1,2,\"Phillips, Miss. Kate Florence (\"\"Mrs Kate Louise Phillips Marshall\"\")\",female,19,0,0,250655,26,,S\r\n429,0,3,\"Flynn, Mr. James\",male,,0,0,364851,7.75,,Q\r\n430,1,3,\"Pickard, Mr. Berk (Berk Trembisky)\",male,32,0,0,SOTON/O.Q. 392078,8.05,E10,S\r\n431,1,1,\"Bjornstrom-Steffansson, Mr. Mauritz Hakan\",male,28,0,0,110564,26.55,C52,S\r\n432,1,3,\"Thorneycroft, Mrs. Percival (Florence Kate White)\",female,,1,0,376564,16.1,,S\r\n433,1,2,\"Louch, Mrs. Charles Alexander (Alice Adelaide Slow)\",female,42,1,0,SC/AH 3085,26,,S\r\n434,0,3,\"Kallio, Mr. Nikolai Erland\",male,17,0,0,STON/O 2. 3101274,7.125,,S\r\n435,0,1,\"Silvey, Mr. William Baird\",male,50,1,0,13507,55.9,E44,S\r\n436,1,1,\"Carter, Miss. Lucile Polk\",female,14,1,2,113760,120,B96 B98,S\r\n437,0,3,\"Ford, Miss. Doolina Margaret \"\"Daisy\"\"\",female,21,2,2,W./C. 6608,34.375,,S\r\n438,1,2,\"Richards, Mrs. Sidney (Emily Hocking)\",female,24,2,3,29106,18.75,,S\r\n439,0,1,\"Fortune, Mr. Mark\",male,64,1,4,19950,263,C23 C25 C27,S\r\n440,0,2,\"Kvillner, Mr. Johan Henrik Johannesson\",male,31,0,0,C.A. 18723,10.5,,S\r\n441,1,2,\"Hart, Mrs. Benjamin (Esther Ada Bloomfield)\",female,45,1,1,F.C.C. 13529,26.25,,S\r\n442,0,3,\"Hampe, Mr. Leon\",male,20,0,0,345769,9.5,,S\r\n443,0,3,\"Petterson, Mr. Johan Emil\",male,25,1,0,347076,7.775,,S\r\n444,1,2,\"Reynaldo, Ms. Encarnacion\",female,28,0,0,230434,13,,S\r\n445,1,3,\"Johannesen-Bratthammer, Mr. Bernt\",male,,0,0,65306,8.1125,,S\r\n446,1,1,\"Dodge, Master. Washington\",male,4,0,2,33638,81.8583,A34,S\r\n447,1,2,\"Mellinger, Miss. Madeleine Violet\",female,13,0,1,250644,19.5,,S\r\n448,1,1,\"Seward, Mr. Frederic Kimber\",male,34,0,0,113794,26.55,,S\r\n449,1,3,\"Baclini, Miss. Marie Catherine\",female,5,2,1,2666,19.2583,,C\r\n450,1,1,\"Peuchen, Major. Arthur Godfrey\",male,52,0,0,113786,30.5,C104,S\r\n451,0,2,\"West, Mr. Edwy Arthur\",male,36,1,2,C.A. 34651,27.75,,S\r\n452,0,3,\"Hagland, Mr. Ingvald Olai Olsen\",male,,1,0,65303,19.9667,,S\r\n453,0,1,\"Foreman, Mr. Benjamin Laventall\",male,30,0,0,113051,27.75,C111,C\r\n454,1,1,\"Goldenberg, Mr. Samuel L\",male,49,1,0,17453,89.1042,C92,C\r\n455,0,3,\"Peduzzi, Mr. Joseph\",male,,0,0,A/5 2817,8.05,,S\r\n456,1,3,\"Jalsevac, Mr. Ivan\",male,29,0,0,349240,7.8958,,C\r\n457,0,1,\"Millet, Mr. Francis Davis\",male,65,0,0,13509,26.55,E38,S\r\n458,1,1,\"Kenyon, Mrs. Frederick R (Marion)\",female,,1,0,17464,51.8625,D21,S\r\n459,1,2,\"Toomey, Miss. Ellen\",female,50,0,0,F.C.C. 13531,10.5,,S\r\n460,0,3,\"O'Connor, Mr. Maurice\",male,,0,0,371060,7.75,,Q\r\n461,1,1,\"Anderson, Mr. Harry\",male,48,0,0,19952,26.55,E12,S\r\n462,0,3,\"Morley, Mr. William\",male,34,0,0,364506,8.05,,S\r\n463,0,1,\"Gee, Mr. Arthur H\",male,47,0,0,111320,38.5,E63,S\r\n464,0,2,\"Milling, Mr. Jacob Christian\",male,48,0,0,234360,13,,S\r\n465,0,3,\"Maisner, Mr. Simon\",male,,0,0,A/S 2816,8.05,,S\r\n466,0,3,\"Goncalves, Mr. Manuel Estanslas\",male,38,0,0,SOTON/O.Q. 3101306,7.05,,S\r\n467,0,2,\"Campbell, Mr. William\",male,,0,0,239853,0,,S\r\n468,0,1,\"Smart, Mr. John Montgomery\",male,56,0,0,113792,26.55,,S\r\n469,0,3,\"Scanlan, Mr. James\",male,,0,0,36209,7.725,,Q\r\n470,1,3,\"Baclini, Miss. Helene Barbara\",female,0.75,2,1,2666,19.2583,,C\r\n471,0,3,\"Keefe, Mr. Arthur\",male,,0,0,323592,7.25,,S\r\n472,0,3,\"Cacic, Mr. Luka\",male,38,0,0,315089,8.6625,,S\r\n473,1,2,\"West, Mrs. Edwy Arthur (Ada Mary Worth)\",female,33,1,2,C.A. 34651,27.75,,S\r\n474,1,2,\"Jerwan, Mrs. Amin S (Marie Marthe Thuillard)\",female,23,0,0,SC/AH Basle 541,13.7917,D,C\r\n475,0,3,\"Strandberg, Miss. Ida Sofia\",female,22,0,0,7553,9.8375,,S\r\n476,0,1,\"Clifford, Mr. George Quincy\",male,,0,0,110465,52,A14,S\r\n477,0,2,\"Renouf, Mr. Peter Henry\",male,34,1,0,31027,21,,S\r\n478,0,3,\"Braund, Mr. Lewis Richard\",male,29,1,0,3460,7.0458,,S\r\n479,0,3,\"Karlsson, Mr. Nils August\",male,22,0,0,350060,7.5208,,S\r\n480,1,3,\"Hirvonen, Miss. Hildur E\",female,2,0,1,3101298,12.2875,,S\r\n481,0,3,\"Goodwin, Master. Harold Victor\",male,9,5,2,CA 2144,46.9,,S\r\n482,0,2,\"Frost, Mr. Anthony Wood \"\"Archie\"\"\",male,,0,0,239854,0,,S\r\n483,0,3,\"Rouse, Mr. Richard Henry\",male,50,0,0,A/5 3594,8.05,,S\r\n484,1,3,\"Turkula, Mrs. (Hedwig)\",female,63,0,0,4134,9.5875,,S\r\n485,1,1,\"Bishop, Mr. Dickinson H\",male,25,1,0,11967,91.0792,B49,C\r\n486,0,3,\"Lefebre, Miss. Jeannie\",female,,3,1,4133,25.4667,,S\r\n487,1,1,\"Hoyt, Mrs. Frederick Maxfield (Jane Anne Forby)\",female,35,1,0,19943,90,C93,S\r\n488,0,1,\"Kent, Mr. Edward Austin\",male,58,0,0,11771,29.7,B37,C\r\n489,0,3,\"Somerton, Mr. Francis William\",male,30,0,0,A.5. 18509,8.05,,S\r\n490,1,3,\"Coutts, Master. Eden Leslie \"\"Neville\"\"\",male,9,1,1,C.A. 37671,15.9,,S\r\n491,0,3,\"Hagland, Mr. Konrad Mathias Reiersen\",male,,1,0,65304,19.9667,,S\r\n492,0,3,\"Windelov, Mr. Einar\",male,21,0,0,SOTON/OQ 3101317,7.25,,S\r\n493,0,1,\"Molson, Mr. Harry Markland\",male,55,0,0,113787,30.5,C30,S\r\n494,0,1,\"Artagaveytia, Mr. Ramon\",male,71,0,0,PC 17609,49.5042,,C\r\n495,0,3,\"Stanley, Mr. Edward Roland\",male,21,0,0,A/4 45380,8.05,,S\r\n496,0,3,\"Yousseff, Mr. Gerious\",male,,0,0,2627,14.4583,,C\r\n497,1,1,\"Eustis, Miss. Elizabeth Mussey\",female,54,1,0,36947,78.2667,D20,C\r\n498,0,3,\"Shellard, Mr. Frederick William\",male,,0,0,C.A. 6212,15.1,,S\r\n499,0,1,\"Allison, Mrs. Hudson J C (Bessie Waldo Daniels)\",female,25,1,2,113781,151.55,C22 C26,S\r\n500,0,3,\"Svensson, Mr. Olof\",male,24,0,0,350035,7.7958,,S\r\n501,0,3,\"Calic, Mr. Petar\",male,17,0,0,315086,8.6625,,S\r\n502,0,3,\"Canavan, Miss. Mary\",female,21,0,0,364846,7.75,,Q\r\n503,0,3,\"O'Sullivan, Miss. Bridget Mary\",female,,0,0,330909,7.6292,,Q\r\n504,0,3,\"Laitinen, Miss. Kristina Sofia\",female,37,0,0,4135,9.5875,,S\r\n505,1,1,\"Maioni, Miss. Roberta\",female,16,0,0,110152,86.5,B79,S\r\n506,0,1,\"Penasco y Castellana, Mr. Victor de Satode\",male,18,1,0,PC 17758,108.9,C65,C\r\n507,1,2,\"Quick, Mrs. Frederick Charles (Jane Richards)\",female,33,0,2,26360,26,,S\r\n508,1,1,\"Bradley, Mr. George (\"\"George Arthur Brayton\"\")\",male,,0,0,111427,26.55,,S\r\n509,0,3,\"Olsen, Mr. Henry Margido\",male,28,0,0,C 4001,22.525,,S\r\n510,1,3,\"Lang, Mr. Fang\",male,26,0,0,1601,56.4958,,S\r\n511,1,3,\"Daly, Mr. Eugene Patrick\",male,29,0,0,382651,7.75,,Q\r\n512,0,3,\"Webber, Mr. James\",male,,0,0,SOTON/OQ 3101316,8.05,,S\r\n513,1,1,\"McGough, Mr. James Robert\",male,36,0,0,PC 17473,26.2875,E25,S\r\n514,1,1,\"Rothschild, Mrs. Martin (Elizabeth L. Barrett)\",female,54,1,0,PC 17603,59.4,,C\r\n515,0,3,\"Coleff, Mr. Satio\",male,24,0,0,349209,7.4958,,S\r\n516,0,1,\"Walker, Mr. William Anderson\",male,47,0,0,36967,34.0208,D46,S\r\n517,1,2,\"Lemore, Mrs. (Amelia Milley)\",female,34,0,0,C.A. 34260,10.5,F33,S\r\n518,0,3,\"Ryan, Mr. Patrick\",male,,0,0,371110,24.15,,Q\r\n519,1,2,\"Angle, Mrs. William A (Florence \"\"Mary\"\" Agnes Hughes)\",female,36,1,0,226875,26,,S\r\n520,0,3,\"Pavlovic, Mr. Stefo\",male,32,0,0,349242,7.8958,,S\r\n521,1,1,\"Perreault, Miss. Anne\",female,30,0,0,12749,93.5,B73,S\r\n522,0,3,\"Vovk, Mr. Janko\",male,22,0,0,349252,7.8958,,S\r\n523,0,3,\"Lahoud, Mr. Sarkis\",male,,0,0,2624,7.225,,C\r\n524,1,1,\"Hippach, Mrs. Louis Albert (Ida Sophia Fischer)\",female,44,0,1,111361,57.9792,B18,C\r\n525,0,3,\"Kassem, Mr. Fared\",male,,0,0,2700,7.2292,,C\r\n526,0,3,\"Farrell, Mr. James\",male,40.5,0,0,367232,7.75,,Q\r\n527,1,2,\"Ridsdale, Miss. Lucy\",female,50,0,0,W./C. 14258,10.5,,S\r\n528,0,1,\"Farthing, Mr. John\",male,,0,0,PC 17483,221.7792,C95,S\r\n529,0,3,\"Salonen, Mr. Johan Werner\",male,39,0,0,3101296,7.925,,S\r\n530,0,2,\"Hocking, Mr. Richard George\",male,23,2,1,29104,11.5,,S\r\n531,1,2,\"Quick, Miss. Phyllis May\",female,2,1,1,26360,26,,S\r\n532,0,3,\"Toufik, Mr. Nakli\",male,,0,0,2641,7.2292,,C\r\n533,0,3,\"Elias, Mr. Joseph Jr\",male,17,1,1,2690,7.2292,,C\r\n534,1,3,\"Peter, Mrs. Catherine (Catherine Rizk)\",female,,0,2,2668,22.3583,,C\r\n535,0,3,\"Cacic, Miss. Marija\",female,30,0,0,315084,8.6625,,S\r\n536,1,2,\"Hart, Miss. Eva Miriam\",female,7,0,2,F.C.C. 13529,26.25,,S\r\n537,0,1,\"Butt, Major. Archibald Willingham\",male,45,0,0,113050,26.55,B38,S\r\n538,1,1,\"LeRoy, Miss. Bertha\",female,30,0,0,PC 17761,106.425,,C\r\n539,0,3,\"Risien, Mr. Samuel Beard\",male,,0,0,364498,14.5,,S\r\n540,1,1,\"Frolicher, Miss. Hedwig Margaritha\",female,22,0,2,13568,49.5,B39,C\r\n541,1,1,\"Crosby, Miss. Harriet R\",female,36,0,2,WE/P 5735,71,B22,S\r\n542,0,3,\"Andersson, Miss. Ingeborg Constanzia\",female,9,4,2,347082,31.275,,S\r\n543,0,3,\"Andersson, Miss. Sigrid Elisabeth\",female,11,4,2,347082,31.275,,S\r\n544,1,2,\"Beane, Mr. Edward\",male,32,1,0,2908,26,,S\r\n545,0,1,\"Douglas, Mr. Walter Donald\",male,50,1,0,PC 17761,106.425,C86,C\r\n546,0,1,\"Nicholson, Mr. Arthur Ernest\",male,64,0,0,693,26,,S\r\n547,1,2,\"Beane, Mrs. Edward (Ethel Clarke)\",female,19,1,0,2908,26,,S\r\n548,1,2,\"Padro y Manent, Mr. Julian\",male,,0,0,SC/PARIS 2146,13.8625,,C\r\n549,0,3,\"Goldsmith, Mr. Frank John\",male,33,1,1,363291,20.525,,S\r\n550,1,2,\"Davies, Master. John Morgan Jr\",male,8,1,1,C.A. 33112,36.75,,S\r\n551,1,1,\"Thayer, Mr. John Borland Jr\",male,17,0,2,17421,110.8833,C70,C\r\n552,0,2,\"Sharp, Mr. Percival James R\",male,27,0,0,244358,26,,S\r\n553,0,3,\"O'Brien, Mr. Timothy\",male,,0,0,330979,7.8292,,Q\r\n554,1,3,\"Leeni, Mr. Fahim (\"\"Philip Zenni\"\")\",male,22,0,0,2620,7.225,,C\r\n555,1,3,\"Ohman, Miss. Velin\",female,22,0,0,347085,7.775,,S\r\n556,0,1,\"Wright, Mr. George\",male,62,0,0,113807,26.55,,S\r\n557,1,1,\"Duff Gordon, Lady. (Lucille Christiana Sutherland) (\"\"Mrs Morgan\"\")\",female,48,1,0,11755,39.6,A16,C\r\n558,0,1,\"Robbins, Mr. Victor\",male,,0,0,PC 17757,227.525,,C\r\n559,1,1,\"Taussig, Mrs. Emil (Tillie Mandelbaum)\",female,39,1,1,110413,79.65,E67,S\r\n560,1,3,\"de Messemaeker, Mrs. Guillaume Joseph (Emma)\",female,36,1,0,345572,17.4,,S\r\n561,0,3,\"Morrow, Mr. Thomas Rowan\",male,,0,0,372622,7.75,,Q\r\n562,0,3,\"Sivic, Mr. Husein\",male,40,0,0,349251,7.8958,,S\r\n563,0,2,\"Norman, Mr. Robert Douglas\",male,28,0,0,218629,13.5,,S\r\n564,0,3,\"Simmons, Mr. John\",male,,0,0,SOTON/OQ 392082,8.05,,S\r\n565,0,3,\"Meanwell, Miss. (Marion Ogden)\",female,,0,0,SOTON/O.Q. 392087,8.05,,S\r\n566,0,3,\"Davies, Mr. Alfred J\",male,24,2,0,A/4 48871,24.15,,S\r\n567,0,3,\"Stoytcheff, Mr. Ilia\",male,19,0,0,349205,7.8958,,S\r\n568,0,3,\"Palsson, Mrs. Nils (Alma Cornelia Berglund)\",female,29,0,4,349909,21.075,,S\r\n569,0,3,\"Doharr, Mr. Tannous\",male,,0,0,2686,7.2292,,C\r\n570,1,3,\"Jonsson, Mr. Carl\",male,32,0,0,350417,7.8542,,S\r\n571,1,2,\"Harris, Mr. George\",male,62,0,0,S.W./PP 752,10.5,,S\r\n572,1,1,\"Appleton, Mrs. Edward Dale (Charlotte Lamson)\",female,53,2,0,11769,51.4792,C101,S\r\n573,1,1,\"Flynn, Mr. John Irwin (\"\"Irving\"\")\",male,36,0,0,PC 17474,26.3875,E25,S\r\n574,1,3,\"Kelly, Miss. Mary\",female,,0,0,14312,7.75,,Q\r\n575,0,3,\"Rush, Mr. Alfred George John\",male,16,0,0,A/4. 20589,8.05,,S\r\n576,0,3,\"Patchett, Mr. George\",male,19,0,0,358585,14.5,,S\r\n577,1,2,\"Garside, Miss. Ethel\",female,34,0,0,243880,13,,S\r\n578,1,1,\"Silvey, Mrs. William Baird (Alice Munger)\",female,39,1,0,13507,55.9,E44,S\r\n579,0,3,\"Caram, Mrs. Joseph (Maria Elias)\",female,,1,0,2689,14.4583,,C\r\n580,1,3,\"Jussila, Mr. Eiriik\",male,32,0,0,STON/O 2. 3101286,7.925,,S\r\n581,1,2,\"Christy, Miss. Julie Rachel\",female,25,1,1,237789,30,,S\r\n582,1,1,\"Thayer, Mrs. John Borland (Marian Longstreth Morris)\",female,39,1,1,17421,110.8833,C68,C\r\n583,0,2,\"Downton, Mr. William James\",male,54,0,0,28403,26,,S\r\n584,0,1,\"Ross, Mr. John Hugo\",male,36,0,0,13049,40.125,A10,C\r\n585,0,3,\"Paulner, Mr. Uscher\",male,,0,0,3411,8.7125,,C\r\n586,1,1,\"Taussig, Miss. Ruth\",female,18,0,2,110413,79.65,E68,S\r\n587,0,2,\"Jarvis, Mr. John Denzil\",male,47,0,0,237565,15,,S\r\n588,1,1,\"Frolicher-Stehli, Mr. Maxmillian\",male,60,1,1,13567,79.2,B41,C\r\n589,0,3,\"Gilinski, Mr. Eliezer\",male,22,0,0,14973,8.05,,S\r\n590,0,3,\"Murdlin, Mr. Joseph\",male,,0,0,A./5. 3235,8.05,,S\r\n591,0,3,\"Rintamaki, Mr. Matti\",male,35,0,0,STON/O 2. 3101273,7.125,,S\r\n592,1,1,\"Stephenson, Mrs. Walter Bertram (Martha Eustis)\",female,52,1,0,36947,78.2667,D20,C\r\n593,0,3,\"Elsbury, Mr. William James\",male,47,0,0,A/5 3902,7.25,,S\r\n594,0,3,\"Bourke, Miss. Mary\",female,,0,2,364848,7.75,,Q\r\n595,0,2,\"Chapman, Mr. John Henry\",male,37,1,0,SC/AH 29037,26,,S\r\n596,0,3,\"Van Impe, Mr. Jean Baptiste\",male,36,1,1,345773,24.15,,S\r\n597,1,2,\"Leitch, Miss. Jessie Wills\",female,,0,0,248727,33,,S\r\n598,0,3,\"Johnson, Mr. Alfred\",male,49,0,0,LINE,0,,S\r\n599,0,3,\"Boulos, Mr. Hanna\",male,,0,0,2664,7.225,,C\r\n600,1,1,\"Duff Gordon, Sir. Cosmo Edmund (\"\"Mr Morgan\"\")\",male,49,1,0,PC 17485,56.9292,A20,C\r\n601,1,2,\"Jacobsohn, Mrs. Sidney Samuel (Amy Frances Christy)\",female,24,2,1,243847,27,,S\r\n602,0,3,\"Slabenoff, Mr. Petco\",male,,0,0,349214,7.8958,,S\r\n603,0,1,\"Harrington, Mr. Charles H\",male,,0,0,113796,42.4,,S\r\n604,0,3,\"Torber, Mr. Ernst William\",male,44,0,0,364511,8.05,,S\r\n605,1,1,\"Homer, Mr. Harry (\"\"Mr E Haven\"\")\",male,35,0,0,111426,26.55,,C\r\n606,0,3,\"Lindell, Mr. Edvard Bengtsson\",male,36,1,0,349910,15.55,,S\r\n607,0,3,\"Karaic, Mr. Milan\",male,30,0,0,349246,7.8958,,S\r\n608,1,1,\"Daniel, Mr. Robert Williams\",male,27,0,0,113804,30.5,,S\r\n609,1,2,\"Laroche, Mrs. Joseph (Juliette Marie Louise Lafargue)\",female,22,1,2,SC/Paris 2123,41.5792,,C\r\n610,1,1,\"Shutes, Miss. Elizabeth W\",female,40,0,0,PC 17582,153.4625,C125,S\r\n611,0,3,\"Andersson, Mrs. Anders Johan (Alfrida Konstantia Brogren)\",female,39,1,5,347082,31.275,,S\r\n612,0,3,\"Jardin, Mr. Jose Neto\",male,,0,0,SOTON/O.Q. 3101305,7.05,,S\r\n613,1,3,\"Murphy, Miss. Margaret Jane\",female,,1,0,367230,15.5,,Q\r\n614,0,3,\"Horgan, Mr. John\",male,,0,0,370377,7.75,,Q\r\n615,0,3,\"Brocklebank, Mr. William Alfred\",male,35,0,0,364512,8.05,,S\r\n616,1,2,\"Herman, Miss. Alice\",female,24,1,2,220845,65,,S\r\n617,0,3,\"Danbom, Mr. Ernst Gilbert\",male,34,1,1,347080,14.4,,S\r\n618,0,3,\"Lobb, Mrs. William Arthur (Cordelia K Stanlick)\",female,26,1,0,A/5. 3336,16.1,,S\r\n619,1,2,\"Becker, Miss. Marion Louise\",female,4,2,1,230136,39,F4,S\r\n620,0,2,\"Gavey, Mr. Lawrence\",male,26,0,0,31028,10.5,,S\r\n621,0,3,\"Yasbeck, Mr. Antoni\",male,27,1,0,2659,14.4542,,C\r\n622,1,1,\"Kimball, Mr. Edwin Nelson Jr\",male,42,1,0,11753,52.5542,D19,S\r\n623,1,3,\"Nakid, Mr. Sahid\",male,20,1,1,2653,15.7417,,C\r\n624,0,3,\"Hansen, Mr. Henry Damsgaard\",male,21,0,0,350029,7.8542,,S\r\n625,0,3,\"Bowen, Mr. David John \"\"Dai\"\"\",male,21,0,0,54636,16.1,,S\r\n626,0,1,\"Sutton, Mr. Frederick\",male,61,0,0,36963,32.3208,D50,S\r\n627,0,2,\"Kirkland, Rev. Charles Leonard\",male,57,0,0,219533,12.35,,Q\r\n628,1,1,\"Longley, Miss. Gretchen Fiske\",female,21,0,0,13502,77.9583,D9,S\r\n629,0,3,\"Bostandyeff, Mr. Guentcho\",male,26,0,0,349224,7.8958,,S\r\n630,0,3,\"O'Connell, Mr. Patrick D\",male,,0,0,334912,7.7333,,Q\r\n631,1,1,\"Barkworth, Mr. Algernon Henry Wilson\",male,80,0,0,27042,30,A23,S\r\n632,0,3,\"Lundahl, Mr. Johan Svensson\",male,51,0,0,347743,7.0542,,S\r\n633,1,1,\"Stahelin-Maeglin, Dr. Max\",male,32,0,0,13214,30.5,B50,C\r\n634,0,1,\"Parr, Mr. William Henry Marsh\",male,,0,0,112052,0,,S\r\n635,0,3,\"Skoog, Miss. Mabel\",female,9,3,2,347088,27.9,,S\r\n636,1,2,\"Davis, Miss. Mary\",female,28,0,0,237668,13,,S\r\n637,0,3,\"Leinonen, Mr. Antti Gustaf\",male,32,0,0,STON/O 2. 3101292,7.925,,S\r\n638,0,2,\"Collyer, Mr. Harvey\",male,31,1,1,C.A. 31921,26.25,,S\r\n639,0,3,\"Panula, Mrs. Juha (Maria Emilia Ojala)\",female,41,0,5,3101295,39.6875,,S\r\n640,0,3,\"Thorneycroft, Mr. Percival\",male,,1,0,376564,16.1,,S\r\n641,0,3,\"Jensen, Mr. Hans Peder\",male,20,0,0,350050,7.8542,,S\r\n642,1,1,\"Sagesser, Mlle. Emma\",female,24,0,0,PC 17477,69.3,B35,C\r\n643,0,3,\"Skoog, Miss. Margit Elizabeth\",female,2,3,2,347088,27.9,,S\r\n644,1,3,\"Foo, Mr. Choong\",male,,0,0,1601,56.4958,,S\r\n645,1,3,\"Baclini, Miss. Eugenie\",female,0.75,2,1,2666,19.2583,,C\r\n646,1,1,\"Harper, Mr. Henry Sleeper\",male,48,1,0,PC 17572,76.7292,D33,C\r\n647,0,3,\"Cor, Mr. Liudevit\",male,19,0,0,349231,7.8958,,S\r\n648,1,1,\"Simonius-Blumer, Col. Oberst Alfons\",male,56,0,0,13213,35.5,A26,C\r\n649,0,3,\"Willey, Mr. Edward\",male,,0,0,S.O./P.P. 751,7.55,,S\r\n650,1,3,\"Stanley, Miss. Amy Zillah Elsie\",female,23,0,0,CA. 2314,7.55,,S\r\n651,0,3,\"Mitkoff, Mr. Mito\",male,,0,0,349221,7.8958,,S\r\n652,1,2,\"Doling, Miss. Elsie\",female,18,0,1,231919,23,,S\r\n653,0,3,\"Kalvik, Mr. Johannes Halvorsen\",male,21,0,0,8475,8.4333,,S\r\n654,1,3,\"O'Leary, Miss. Hanora \"\"Norah\"\"\",female,,0,0,330919,7.8292,,Q\r\n655,0,3,\"Hegarty, Miss. Hanora \"\"Nora\"\"\",female,18,0,0,365226,6.75,,Q\r\n656,0,2,\"Hickman, Mr. Leonard Mark\",male,24,2,0,S.O.C. 14879,73.5,,S\r\n657,0,3,\"Radeff, Mr. Alexander\",male,,0,0,349223,7.8958,,S\r\n658,0,3,\"Bourke, Mrs. John (Catherine)\",female,32,1,1,364849,15.5,,Q\r\n659,0,2,\"Eitemiller, Mr. George Floyd\",male,23,0,0,29751,13,,S\r\n660,0,1,\"Newell, Mr. Arthur Webster\",male,58,0,2,35273,113.275,D48,C\r\n661,1,1,\"Frauenthal, Dr. Henry William\",male,50,2,0,PC 17611,133.65,,S\r\n662,0,3,\"Badt, Mr. Mohamed\",male,40,0,0,2623,7.225,,C\r\n663,0,1,\"Colley, Mr. Edward Pomeroy\",male,47,0,0,5727,25.5875,E58,S\r\n664,0,3,\"Coleff, Mr. Peju\",male,36,0,0,349210,7.4958,,S\r\n665,1,3,\"Lindqvist, Mr. Eino William\",male,20,1,0,STON/O 2. 3101285,7.925,,S\r\n666,0,2,\"Hickman, Mr. Lewis\",male,32,2,0,S.O.C. 14879,73.5,,S\r\n667,0,2,\"Butler, Mr. Reginald Fenton\",male,25,0,0,234686,13,,S\r\n668,0,3,\"Rommetvedt, Mr. Knud Paust\",male,,0,0,312993,7.775,,S\r\n669,0,3,\"Cook, Mr. Jacob\",male,43,0,0,A/5 3536,8.05,,S\r\n670,1,1,\"Taylor, Mrs. Elmer Zebley (Juliet Cummins Wright)\",female,,1,0,19996,52,C126,S\r\n671,1,2,\"Brown, Mrs. Thomas William Solomon (Elizabeth Catherine Ford)\",female,40,1,1,29750,39,,S\r\n672,0,1,\"Davidson, Mr. Thornton\",male,31,1,0,F.C. 12750,52,B71,S\r\n673,0,2,\"Mitchell, Mr. Henry Michael\",male,70,0,0,C.A. 24580,10.5,,S\r\n674,1,2,\"Wilhelms, Mr. Charles\",male,31,0,0,244270,13,,S\r\n675,0,2,\"Watson, Mr. Ennis Hastings\",male,,0,0,239856,0,,S\r\n676,0,3,\"Edvardsson, Mr. Gustaf Hjalmar\",male,18,0,0,349912,7.775,,S\r\n677,0,3,\"Sawyer, Mr. Frederick Charles\",male,24.5,0,0,342826,8.05,,S\r\n678,1,3,\"Turja, Miss. Anna Sofia\",female,18,0,0,4138,9.8417,,S\r\n679,0,3,\"Goodwin, Mrs. Frederick (Augusta Tyler)\",female,43,1,6,CA 2144,46.9,,S\r\n680,1,1,\"Cardeza, Mr. Thomas Drake Martinez\",male,36,0,1,PC 17755,512.3292,B51 B53 B55,C\r\n681,0,3,\"Peters, Miss. Katie\",female,,0,0,330935,8.1375,,Q\r\n682,1,1,\"Hassab, Mr. Hammad\",male,27,0,0,PC 17572,76.7292,D49,C\r\n683,0,3,\"Olsvigen, Mr. Thor Anderson\",male,20,0,0,6563,9.225,,S\r\n684,0,3,\"Goodwin, Mr. Charles Edward\",male,14,5,2,CA 2144,46.9,,S\r\n685,0,2,\"Brown, Mr. Thomas William Solomon\",male,60,1,1,29750,39,,S\r\n686,0,2,\"Laroche, Mr. Joseph Philippe Lemercier\",male,25,1,2,SC/Paris 2123,41.5792,,C\r\n687,0,3,\"Panula, Mr. Jaako Arnold\",male,14,4,1,3101295,39.6875,,S\r\n688,0,3,\"Dakic, Mr. Branko\",male,19,0,0,349228,10.1708,,S\r\n689,0,3,\"Fischer, Mr. Eberhard Thelander\",male,18,0,0,350036,7.7958,,S\r\n690,1,1,\"Madill, Miss. Georgette Alexandra\",female,15,0,1,24160,211.3375,B5,S\r\n691,1,1,\"Dick, Mr. Albert Adrian\",male,31,1,0,17474,57,B20,S\r\n692,1,3,\"Karun, Miss. Manca\",female,4,0,1,349256,13.4167,,C\r\n693,1,3,\"Lam, Mr. Ali\",male,,0,0,1601,56.4958,,S\r\n694,0,3,\"Saad, Mr. Khalil\",male,25,0,0,2672,7.225,,C\r\n695,0,1,\"Weir, Col. John\",male,60,0,0,113800,26.55,,S\r\n696,0,2,\"Chapman, Mr. Charles Henry\",male,52,0,0,248731,13.5,,S\r\n697,0,3,\"Kelly, Mr. James\",male,44,0,0,363592,8.05,,S\r\n698,1,3,\"Mullens, Miss. Katherine \"\"Katie\"\"\",female,,0,0,35852,7.7333,,Q\r\n699,0,1,\"Thayer, Mr. John Borland\",male,49,1,1,17421,110.8833,C68,C\r\n700,0,3,\"Humblen, Mr. Adolf Mathias Nicolai Olsen\",male,42,0,0,348121,7.65,F G63,S\r\n701,1,1,\"Astor, Mrs. John Jacob (Madeleine Talmadge Force)\",female,18,1,0,PC 17757,227.525,C62 C64,C\r\n702,1,1,\"Silverthorne, Mr. Spencer Victor\",male,35,0,0,PC 17475,26.2875,E24,S\r\n703,0,3,\"Barbara, Miss. Saiide\",female,18,0,1,2691,14.4542,,C\r\n704,0,3,\"Gallagher, Mr. Martin\",male,25,0,0,36864,7.7417,,Q\r\n705,0,3,\"Hansen, Mr. Henrik Juul\",male,26,1,0,350025,7.8542,,S\r\n706,0,2,\"Morley, Mr. Henry Samuel (\"\"Mr Henry Marshall\"\")\",male,39,0,0,250655,26,,S\r\n707,1,2,\"Kelly, Mrs. Florence \"\"Fannie\"\"\",female,45,0,0,223596,13.5,,S\r\n708,1,1,\"Calderhead, Mr. Edward Pennington\",male,42,0,0,PC 17476,26.2875,E24,S\r\n709,1,1,\"Cleaver, Miss. Alice\",female,22,0,0,113781,151.55,,S\r\n710,1,3,\"Moubarek, Master. Halim Gonios (\"\"William George\"\")\",male,,1,1,2661,15.2458,,C\r\n711,1,1,\"Mayne, Mlle. Berthe Antonine (\"\"Mrs de Villiers\"\")\",female,24,0,0,PC 17482,49.5042,C90,C\r\n712,0,1,\"Klaber, Mr. Herman\",male,,0,0,113028,26.55,C124,S\r\n713,1,1,\"Taylor, Mr. Elmer Zebley\",male,48,1,0,19996,52,C126,S\r\n714,0,3,\"Larsson, Mr. August Viktor\",male,29,0,0,7545,9.4833,,S\r\n715,0,2,\"Greenberg, Mr. Samuel\",male,52,0,0,250647,13,,S\r\n716,0,3,\"Soholt, Mr. Peter Andreas Lauritz Andersen\",male,19,0,0,348124,7.65,F G73,S\r\n717,1,1,\"Endres, Miss. Caroline Louise\",female,38,0,0,PC 17757,227.525,C45,C\r\n718,1,2,\"Troutt, Miss. Edwina Celia \"\"Winnie\"\"\",female,27,0,0,34218,10.5,E101,S\r\n719,0,3,\"McEvoy, Mr. Michael\",male,,0,0,36568,15.5,,Q\r\n720,0,3,\"Johnson, Mr. Malkolm Joackim\",male,33,0,0,347062,7.775,,S\r\n721,1,2,\"Harper, Miss. Annie Jessie \"\"Nina\"\"\",female,6,0,1,248727,33,,S\r\n722,0,3,\"Jensen, Mr. Svend Lauritz\",male,17,1,0,350048,7.0542,,S\r\n723,0,2,\"Gillespie, Mr. William Henry\",male,34,0,0,12233,13,,S\r\n724,0,2,\"Hodges, Mr. Henry Price\",male,50,0,0,250643,13,,S\r\n725,1,1,\"Chambers, Mr. Norman Campbell\",male,27,1,0,113806,53.1,E8,S\r\n726,0,3,\"Oreskovic, Mr. Luka\",male,20,0,0,315094,8.6625,,S\r\n727,1,2,\"Renouf, Mrs. Peter Henry (Lillian Jefferys)\",female,30,3,0,31027,21,,S\r\n728,1,3,\"Mannion, Miss. Margareth\",female,,0,0,36866,7.7375,,Q\r\n729,0,2,\"Bryhl, Mr. Kurt Arnold Gottfrid\",male,25,1,0,236853,26,,S\r\n730,0,3,\"Ilmakangas, Miss. Pieta Sofia\",female,25,1,0,STON/O2. 3101271,7.925,,S\r\n731,1,1,\"Allen, Miss. Elisabeth Walton\",female,29,0,0,24160,211.3375,B5,S\r\n732,0,3,\"Hassan, Mr. Houssein G N\",male,11,0,0,2699,18.7875,,C\r\n733,0,2,\"Knight, Mr. Robert J\",male,,0,0,239855,0,,S\r\n734,0,2,\"Berriman, Mr. William John\",male,23,0,0,28425,13,,S\r\n735,0,2,\"Troupiansky, Mr. Moses Aaron\",male,23,0,0,233639,13,,S\r\n736,0,3,\"Williams, Mr. Leslie\",male,28.5,0,0,54636,16.1,,S\r\n737,0,3,\"Ford, Mrs. Edward (Margaret Ann Watson)\",female,48,1,3,W./C. 6608,34.375,,S\r\n738,1,1,\"Lesurer, Mr. Gustave J\",male,35,0,0,PC 17755,512.3292,B101,C\r\n739,0,3,\"Ivanoff, Mr. Kanio\",male,,0,0,349201,7.8958,,S\r\n740,0,3,\"Nankoff, Mr. Minko\",male,,0,0,349218,7.8958,,S\r\n741,1,1,\"Hawksford, Mr. Walter James\",male,,0,0,16988,30,D45,S\r\n742,0,1,\"Cavendish, Mr. Tyrell William\",male,36,1,0,19877,78.85,C46,S\r\n743,1,1,\"Ryerson, Miss. Susan Parker \"\"Suzette\"\"\",female,21,2,2,PC 17608,262.375,B57 B59 B63 B66,C\r\n744,0,3,\"McNamee, Mr. Neal\",male,24,1,0,376566,16.1,,S\r\n745,1,3,\"Stranden, Mr. Juho\",male,31,0,0,STON/O 2. 3101288,7.925,,S\r\n746,0,1,\"Crosby, Capt. Edward Gifford\",male,70,1,1,WE/P 5735,71,B22,S\r\n747,0,3,\"Abbott, Mr. Rossmore Edward\",male,16,1,1,C.A. 2673,20.25,,S\r\n748,1,2,\"Sinkkonen, Miss. Anna\",female,30,0,0,250648,13,,S\r\n749,0,1,\"Marvin, Mr. Daniel Warner\",male,19,1,0,113773,53.1,D30,S\r\n750,0,3,\"Connaghton, Mr. Michael\",male,31,0,0,335097,7.75,,Q\r\n751,1,2,\"Wells, Miss. Joan\",female,4,1,1,29103,23,,S\r\n752,1,3,\"Moor, Master. Meier\",male,6,0,1,392096,12.475,E121,S\r\n753,0,3,\"Vande Velde, Mr. Johannes Joseph\",male,33,0,0,345780,9.5,,S\r\n754,0,3,\"Jonkoff, Mr. Lalio\",male,23,0,0,349204,7.8958,,S\r\n755,1,2,\"Herman, Mrs. Samuel (Jane Laver)\",female,48,1,2,220845,65,,S\r\n756,1,2,\"Hamalainen, Master. Viljo\",male,0.67,1,1,250649,14.5,,S\r\n757,0,3,\"Carlsson, Mr. August Sigfrid\",male,28,0,0,350042,7.7958,,S\r\n758,0,2,\"Bailey, Mr. Percy Andrew\",male,18,0,0,29108,11.5,,S\r\n759,0,3,\"Theobald, Mr. Thomas Leonard\",male,34,0,0,363294,8.05,,S\r\n760,1,1,\"Rothes, the Countess. of (Lucy Noel Martha Dyer-Edwards)\",female,33,0,0,110152,86.5,B77,S\r\n761,0,3,\"Garfirth, Mr. John\",male,,0,0,358585,14.5,,S\r\n762,0,3,\"Nirva, Mr. Iisakki Antino Aijo\",male,41,0,0,SOTON/O2 3101272,7.125,,S\r\n763,1,3,\"Barah, Mr. Hanna Assi\",male,20,0,0,2663,7.2292,,C\r\n764,1,1,\"Carter, Mrs. William Ernest (Lucile Polk)\",female,36,1,2,113760,120,B96 B98,S\r\n765,0,3,\"Eklund, Mr. Hans Linus\",male,16,0,0,347074,7.775,,S\r\n766,1,1,\"Hogeboom, Mrs. John C (Anna Andrews)\",female,51,1,0,13502,77.9583,D11,S\r\n767,0,1,\"Brewe, Dr. Arthur Jackson\",male,,0,0,112379,39.6,,C\r\n768,0,3,\"Mangan, Miss. Mary\",female,30.5,0,0,364850,7.75,,Q\r\n769,0,3,\"Moran, Mr. Daniel J\",male,,1,0,371110,24.15,,Q\r\n770,0,3,\"Gronnestad, Mr. Daniel Danielsen\",male,32,0,0,8471,8.3625,,S\r\n771,0,3,\"Lievens, Mr. Rene Aime\",male,24,0,0,345781,9.5,,S\r\n772,0,3,\"Jensen, Mr. Niels Peder\",male,48,0,0,350047,7.8542,,S\r\n773,0,2,\"Mack, Mrs. (Mary)\",female,57,0,0,S.O./P.P. 3,10.5,E77,S\r\n774,0,3,\"Elias, Mr. Dibo\",male,,0,0,2674,7.225,,C\r\n775,1,2,\"Hocking, Mrs. Elizabeth (Eliza Needs)\",female,54,1,3,29105,23,,S\r\n776,0,3,\"Myhrman, Mr. Pehr Fabian Oliver Malkolm\",male,18,0,0,347078,7.75,,S\r\n777,0,3,\"Tobin, Mr. Roger\",male,,0,0,383121,7.75,F38,Q\r\n778,1,3,\"Emanuel, Miss. Virginia Ethel\",female,5,0,0,364516,12.475,,S\r\n779,0,3,\"Kilgannon, Mr. Thomas J\",male,,0,0,36865,7.7375,,Q\r\n780,1,1,\"Robert, Mrs. Edward Scott (Elisabeth Walton McMillan)\",female,43,0,1,24160,211.3375,B3,S\r\n781,1,3,\"Ayoub, Miss. Banoura\",female,13,0,0,2687,7.2292,,C\r\n782,1,1,\"Dick, Mrs. Albert Adrian (Vera Gillespie)\",female,17,1,0,17474,57,B20,S\r\n783,0,1,\"Long, Mr. Milton Clyde\",male,29,0,0,113501,30,D6,S\r\n784,0,3,\"Johnston, Mr. Andrew G\",male,,1,2,W./C. 6607,23.45,,S\r\n785,0,3,\"Ali, Mr. William\",male,25,0,0,SOTON/O.Q. 3101312,7.05,,S\r\n786,0,3,\"Harmer, Mr. Abraham (David Lishin)\",male,25,0,0,374887,7.25,,S\r\n787,1,3,\"Sjoblom, Miss. Anna Sofia\",female,18,0,0,3101265,7.4958,,S\r\n788,0,3,\"Rice, Master. George Hugh\",male,8,4,1,382652,29.125,,Q\r\n789,1,3,\"Dean, Master. Bertram Vere\",male,1,1,2,C.A. 2315,20.575,,S\r\n790,0,1,\"Guggenheim, Mr. Benjamin\",male,46,0,0,PC 17593,79.2,B82 B84,C\r\n791,0,3,\"Keane, Mr. Andrew \"\"Andy\"\"\",male,,0,0,12460,7.75,,Q\r\n792,0,2,\"Gaskell, Mr. Alfred\",male,16,0,0,239865,26,,S\r\n793,0,3,\"Sage, Miss. Stella Anna\",female,,8,2,CA. 2343,69.55,,S\r\n794,0,1,\"Hoyt, Mr. William Fisher\",male,,0,0,PC 17600,30.6958,,C\r\n795,0,3,\"Dantcheff, Mr. Ristiu\",male,25,0,0,349203,7.8958,,S\r\n796,0,2,\"Otter, Mr. Richard\",male,39,0,0,28213,13,,S\r\n797,1,1,\"Leader, Dr. Alice (Farnham)\",female,49,0,0,17465,25.9292,D17,S\r\n798,1,3,\"Osman, Mrs. Mara\",female,31,0,0,349244,8.6833,,S\r\n799,0,3,\"Ibrahim Shawah, Mr. Yousseff\",male,30,0,0,2685,7.2292,,C\r\n800,0,3,\"Van Impe, Mrs. Jean Baptiste (Rosalie Paula Govaert)\",female,30,1,1,345773,24.15,,S\r\n801,0,2,\"Ponesell, Mr. Martin\",male,34,0,0,250647,13,,S\r\n802,1,2,\"Collyer, Mrs. Harvey (Charlotte Annie Tate)\",female,31,1,1,C.A. 31921,26.25,,S\r\n803,1,1,\"Carter, Master. William Thornton II\",male,11,1,2,113760,120,B96 B98,S\r\n804,1,3,\"Thomas, Master. Assad Alexander\",male,0.42,0,1,2625,8.5167,,C\r\n805,1,3,\"Hedman, Mr. Oskar Arvid\",male,27,0,0,347089,6.975,,S\r\n806,0,3,\"Johansson, Mr. Karl Johan\",male,31,0,0,347063,7.775,,S\r\n807,0,1,\"Andrews, Mr. Thomas Jr\",male,39,0,0,112050,0,A36,S\r\n808,0,3,\"Pettersson, Miss. Ellen Natalia\",female,18,0,0,347087,7.775,,S\r\n809,0,2,\"Meyer, Mr. August\",male,39,0,0,248723,13,,S\r\n810,1,1,\"Chambers, Mrs. Norman Campbell (Bertha Griggs)\",female,33,1,0,113806,53.1,E8,S\r\n811,0,3,\"Alexander, Mr. William\",male,26,0,0,3474,7.8875,,S\r\n812,0,3,\"Lester, Mr. James\",male,39,0,0,A/4 48871,24.15,,S\r\n813,0,2,\"Slemen, Mr. Richard James\",male,35,0,0,28206,10.5,,S\r\n814,0,3,\"Andersson, Miss. Ebba Iris Alfrida\",female,6,4,2,347082,31.275,,S\r\n815,0,3,\"Tomlin, Mr. Ernest Portage\",male,30.5,0,0,364499,8.05,,S\r\n816,0,1,\"Fry, Mr. Richard\",male,,0,0,112058,0,B102,S\r\n817,0,3,\"Heininen, Miss. Wendla Maria\",female,23,0,0,STON/O2. 3101290,7.925,,S\r\n818,0,2,\"Mallet, Mr. Albert\",male,31,1,1,S.C./PARIS 2079,37.0042,,C\r\n819,0,3,\"Holm, Mr. John Fredrik Alexander\",male,43,0,0,C 7075,6.45,,S\r\n820,0,3,\"Skoog, Master. Karl Thorsten\",male,10,3,2,347088,27.9,,S\r\n821,1,1,\"Hays, Mrs. Charles Melville (Clara Jennings Gregg)\",female,52,1,1,12749,93.5,B69,S\r\n822,1,3,\"Lulic, Mr. Nikola\",male,27,0,0,315098,8.6625,,S\r\n823,0,1,\"Reuchlin, Jonkheer. John George\",male,38,0,0,19972,0,,S\r\n824,1,3,\"Moor, Mrs. (Beila)\",female,27,0,1,392096,12.475,E121,S\r\n825,0,3,\"Panula, Master. Urho Abraham\",male,2,4,1,3101295,39.6875,,S\r\n826,0,3,\"Flynn, Mr. John\",male,,0,0,368323,6.95,,Q\r\n827,0,3,\"Lam, Mr. Len\",male,,0,0,1601,56.4958,,S\r\n828,1,2,\"Mallet, Master. Andre\",male,1,0,2,S.C./PARIS 2079,37.0042,,C\r\n829,1,3,\"McCormack, Mr. Thomas Joseph\",male,,0,0,367228,7.75,,Q\r\n830,1,1,\"Stone, Mrs. George Nelson (Martha Evelyn)\",female,62,0,0,113572,80,B28,\r\n831,1,3,\"Yasbeck, Mrs. Antoni (Selini Alexander)\",female,15,1,0,2659,14.4542,,C\r\n832,1,2,\"Richards, Master. George Sibley\",male,0.83,1,1,29106,18.75,,S\r\n833,0,3,\"Saad, Mr. Amin\",male,,0,0,2671,7.2292,,C\r\n834,0,3,\"Augustsson, Mr. Albert\",male,23,0,0,347468,7.8542,,S\r\n835,0,3,\"Allum, Mr. Owen George\",male,18,0,0,2223,8.3,,S\r\n836,1,1,\"Compton, Miss. Sara Rebecca\",female,39,1,1,PC 17756,83.1583,E49,C\r\n837,0,3,\"Pasic, Mr. Jakob\",male,21,0,0,315097,8.6625,,S\r\n838,0,3,\"Sirota, Mr. Maurice\",male,,0,0,392092,8.05,,S\r\n839,1,3,\"Chip, Mr. Chang\",male,32,0,0,1601,56.4958,,S\r\n840,1,1,\"Marechal, Mr. Pierre\",male,,0,0,11774,29.7,C47,C\r\n841,0,3,\"Alhomaki, Mr. Ilmari Rudolf\",male,20,0,0,SOTON/O2 3101287,7.925,,S\r\n842,0,2,\"Mudd, Mr. Thomas Charles\",male,16,0,0,S.O./P.P. 3,10.5,,S\r\n843,1,1,\"Serepeca, Miss. Augusta\",female,30,0,0,113798,31,,C\r\n844,0,3,\"Lemberopolous, Mr. Peter L\",male,34.5,0,0,2683,6.4375,,C\r\n845,0,3,\"Culumovic, Mr. Jeso\",male,17,0,0,315090,8.6625,,S\r\n846,0,3,\"Abbing, Mr. Anthony\",male,42,0,0,C.A. 5547,7.55,,S\r\n847,0,3,\"Sage, Mr. Douglas Bullen\",male,,8,2,CA. 2343,69.55,,S\r\n848,0,3,\"Markoff, Mr. Marin\",male,35,0,0,349213,7.8958,,C\r\n849,0,2,\"Harper, Rev. John\",male,28,0,1,248727,33,,S\r\n850,1,1,\"Goldenberg, Mrs. Samuel L (Edwiga Grabowska)\",female,,1,0,17453,89.1042,C92,C\r\n851,0,3,\"Andersson, Master. Sigvard Harald Elias\",male,4,4,2,347082,31.275,,S\r\n852,0,3,\"Svensson, Mr. Johan\",male,74,0,0,347060,7.775,,S\r\n853,0,3,\"Boulos, Miss. Nourelain\",female,9,1,1,2678,15.2458,,C\r\n854,1,1,\"Lines, Miss. Mary Conover\",female,16,0,1,PC 17592,39.4,D28,S\r\n855,0,2,\"Carter, Mrs. Ernest Courtenay (Lilian Hughes)\",female,44,1,0,244252,26,,S\r\n856,1,3,\"Aks, Mrs. Sam (Leah Rosen)\",female,18,0,1,392091,9.35,,S\r\n857,1,1,\"Wick, Mrs. George Dennick (Mary Hitchcock)\",female,45,1,1,36928,164.8667,,S\r\n858,1,1,\"Daly, Mr. Peter Denis \",male,51,0,0,113055,26.55,E17,S\r\n859,1,3,\"Baclini, Mrs. Solomon (Latifa Qurban)\",female,24,0,3,2666,19.2583,,C\r\n860,0,3,\"Razi, Mr. Raihed\",male,,0,0,2629,7.2292,,C\r\n861,0,3,\"Hansen, Mr. Claus Peter\",male,41,2,0,350026,14.1083,,S\r\n862,0,2,\"Giles, Mr. Frederick Edward\",male,21,1,0,28134,11.5,,S\r\n863,1,1,\"Swift, Mrs. Frederick Joel (Margaret Welles Barron)\",female,48,0,0,17466,25.9292,D17,S\r\n864,0,3,\"Sage, Miss. Dorothy Edith \"\"Dolly\"\"\",female,,8,2,CA. 2343,69.55,,S\r\n865,0,2,\"Gill, Mr. John William\",male,24,0,0,233866,13,,S\r\n866,1,2,\"Bystrom, Mrs. (Karolina)\",female,42,0,0,236852,13,,S\r\n867,1,2,\"Duran y More, Miss. Asuncion\",female,27,1,0,SC/PARIS 2149,13.8583,,C\r\n868,0,1,\"Roebling, Mr. Washington Augustus II\",male,31,0,0,PC 17590,50.4958,A24,S\r\n869,0,3,\"van Melkebeke, Mr. Philemon\",male,,0,0,345777,9.5,,S\r\n870,1,3,\"Johnson, Master. Harold Theodor\",male,4,1,1,347742,11.1333,,S\r\n871,0,3,\"Balkic, Mr. Cerin\",male,26,0,0,349248,7.8958,,S\r\n872,1,1,\"Beckwith, Mrs. Richard Leonard (Sallie Monypeny)\",female,47,1,1,11751,52.5542,D35,S\r\n873,0,1,\"Carlsson, Mr. Frans Olof\",male,33,0,0,695,5,B51 B53 B55,S\r\n874,0,3,\"Vander Cruyssen, Mr. Victor\",male,47,0,0,345765,9,,S\r\n875,1,2,\"Abelson, Mrs. Samuel (Hannah Wizosky)\",female,28,1,0,P/PP 3381,24,,C\r\n876,1,3,\"Najib, Miss. Adele Kiamie \"\"Jane\"\"\",female,15,0,0,2667,7.225,,C\r\n877,0,3,\"Gustafsson, Mr. Alfred Ossian\",male,20,0,0,7534,9.8458,,S\r\n878,0,3,\"Petroff, Mr. Nedelio\",male,19,0,0,349212,7.8958,,S\r\n879,0,3,\"Laleff, Mr. Kristo\",male,,0,0,349217,7.8958,,S\r\n880,1,1,\"Potter, Mrs. Thomas Jr (Lily Alexenia Wilson)\",female,56,0,1,11767,83.1583,C50,C\r\n881,1,2,\"Shelley, Mrs. William (Imanita Parrish Hall)\",female,25,0,1,230433,26,,S\r\n882,0,3,\"Markun, Mr. Johann\",male,33,0,0,349257,7.8958,,S\r\n883,0,3,\"Dahlberg, Miss. Gerda Ulrika\",female,22,0,0,7552,10.5167,,S\r\n884,0,2,\"Banfield, Mr. Frederick James\",male,28,0,0,C.A./SOTON 34068,10.5,,S\r\n885,0,3,\"Sutehall, Mr. Henry Jr\",male,25,0,0,SOTON/OQ 392076,7.05,,S\r\n886,0,3,\"Rice, Mrs. William (Margaret Norton)\",female,39,0,5,382652,29.125,,Q\r\n887,0,2,\"Montvila, Rev. Juozas\",male,27,0,0,211536,13,,S\r\n888,1,1,\"Graham, Miss. Margaret Edith\",female,19,0,0,112053,30,B42,S\r\n889,0,3,\"Johnston, Miss. Catherine Helen \"\"Carrie\"\"\",female,,1,2,W./C. 6607,23.45,,S\r\n890,1,1,\"Behr, Mr. Karl Howell\",male,26,0,0,111369,30,C148,C\r\n891,0,3,\"Dooley, Mr. Patrick\",male,32,0,0,370376,7.75,,Q\r\n\n" ], [ "from requests import session\n# payload\npayload = {\n 'action': 'login',\n 'username': os.environ.get(\"KAGGLE_USERNAME\"),\n 'password': os.environ.get(\"KAGGLE_PASSWORD\")\n}\n\n\ndef extract_data(url, file_path):\n '''\n extract data from kaggle\n '''\n # setup session\n with session() as c:\n c.post('https://www.kaggle.com/account/login', data=payload)\n # oppen file to write\n with open(file_path, 'w') as handle:\n response = c.get(url, stream=True)\n for block in response.iter_content(1024):\n handle.write(block)\n \n", "_____no_output_____" ], [ "# urls\ntrain_url = 'https://www.kaggle.com/c/titanic/download/train.csv'\ntest_url = 'https://www.kaggle.com/c/titanic/download/test.csv'\n\n# file paths\nraw_data_path = os.path.join(os.path.pardir,'data','raw')\ntrain_data_path = os.path.join(raw_data_path, 'train.csv')\ntest_data_path = os.path.join(raw_data_path, 'test.csv')\n\n# extract data\nextract_data(train_url,train_data_path)\nextract_data(test_url,test_data_path)", "_____no_output_____" ], [ "!ls -l ../data/raw", "'ls' is not recognized as an internal or external command,\noperable program or batch file.\n" ] ], [ [ "### Builiding the file script", "_____no_output_____" ] ], [ [ "get_raw_data_script_file = os.path.join(os.path.pardir,'src','data','get_raw_data.py')", "_____no_output_____" ], [ "%%writefile $get_raw_data_script_file\n# -*- coding: utf-8 -*-\nimport os\nfrom dotenv import find_dotenv, load_dotenv\nfrom requests import session\nimport logging\n\n\n# payload for login to kaggle\npayload = {\n 'action': 'login',\n 'username': os.environ.get(\"KAGGLE_USERNAME\"),\n 'password': os.environ.get(\"KAGGLE_PASSWORD\")\n}\n\n\ndef extract_data(url, file_path):\n '''\n method to extract data\n '''\n with session() as c:\n c.post('https://www.kaggle.com/account/login', data=payload)\n with open(file_path, 'w') as handle:\n response = c.get(url, stream=True)\n for block in response.iter_content(1024):\n handle.write(block)\n\n\n \ndef main(project_dir):\n '''\n main method\n '''\n # get logger\n logger = logging.getLogger(__name__)\n logger.info('getting raw data')\n \n # urls\n train_url = 'https://www.kaggle.com/c/titanic/download/train.csv'\n test_url = 'https://www.kaggle.com/c/titanic/download/test.csv'\n\n # file paths\n raw_data_path = os.path.join(project_dir,'data','raw')\n train_data_path = os.path.join(raw_data_path, 'train.csv')\n test_data_path = os.path.join(raw_data_path, 'test.csv')\n\n # extract data\n extract_data(train_url,train_data_path)\n extract_data(test_url,test_data_path)\n logger.info('downloaded raw training and test data')\n\n\nif __name__ == '__main__':\n # getting root directory\n project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)\n \n # setup logger\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n # find .env automatically by walking up directories until it's found\n dotenv_path = find_dotenv()\n # load up the entries as environment variables\n load_dotenv(dotenv_path)\n\n # call the main\n main(project_dir)\n", "Overwriting ..\\src\\data\\get_raw_data.py\n" ], [ "!python $get_raw_data_script_file", "2018-03-08 13:42:50,661 - __main__ - INFO - getting raw data\n2018-03-08 13:43:01,605 - __main__ - INFO - downloaded raw training and test data\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
c5227e0b60882aa7b6e61ac7a6caa806c3c88793
108,622
ipynb
Jupyter Notebook
examples/example-02.ipynb
millen1m/pysra
4a6919fa41241c3b7201adbc2190db5a10b35386
[ "MIT" ]
null
null
null
examples/example-02.ipynb
millen1m/pysra
4a6919fa41241c3b7201adbc2190db5a10b35386
[ "MIT" ]
null
null
null
examples/example-02.ipynb
millen1m/pysra
4a6919fa41241c3b7201adbc2190db5a10b35386
[ "MIT" ]
null
null
null
374.558621
37,828
0.942544
[ [ [ "# Example 1: Random vibration theory SRA\n\nRandom vibration theory analysis to compute surface response spectrum and site \namplification functions.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\n\nimport pysra\n\n%matplotlib inline", "_____no_output_____" ], [ "# Increased figure sizes\nplt.rcParams['figure.dpi'] = 120", "_____no_output_____" ] ], [ [ "## Create a point source theory RVT motion", "_____no_output_____" ] ], [ [ "m = pysra.motion.SourceTheoryRvtMotion(6.0, 30, 'wna')\nm.calc_fourier_amps()", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nax.plot(m.freqs, m.fourier_amps)\nax.set(xlabel='Frequency (Hz)', xscale='log', ylabel='Fourier Ampl. (g-s)', yscale='log')\nfig.tight_layout();", "_____no_output_____" ] ], [ [ "## Create site profile\n\nThis is about the simplest profile that we can create. Linear-elastic soil and rock.", "_____no_output_____" ] ], [ [ "profile = pysra.site.Profile([\n pysra.site.Layer(\n pysra.site.SoilType(\n 'Soil', 18., None, 0.05\n ),\n 30, 400\n ),\n pysra.site.Layer(\n pysra.site.SoilType(\n 'Rock', 24., None, 0.01\n ),\n 0, 1200\n ),\n])", "_____no_output_____" ] ], [ [ "## Create the site response calculator", "_____no_output_____" ] ], [ [ "calc = pysra.propagation.LinearElasticCalculator()", "_____no_output_____" ] ], [ [ "## Specify the output", "_____no_output_____" ] ], [ [ "freqs = np.logspace(-1, 2, num=500)\n\noutputs = pysra.output.OutputCollection([\n pysra.output.ResponseSpectrumOutput(\n # Frequency\n freqs,\n # Location of the output\n pysra.output.OutputLocation('outcrop', index=0),\n # Damping\n 0.05),\n pysra.output.ResponseSpectrumRatioOutput(\n # Frequency\n freqs,\n # Location in (denominator),\n pysra.output.OutputLocation('outcrop', index=-1),\n # Location out (numerator)\n pysra.output.OutputLocation('outcrop', index=0),\n # Damping\n 0.05)\n])", "_____no_output_____" ] ], [ [ "## Perform the calculation", "_____no_output_____" ], [ "Compute the response of the site, and store the state within the calculation object. Nothing is provided.", "_____no_output_____" ] ], [ [ "calc(m, profile, profile.location('outcrop', index=-1))", "_____no_output_____" ] ], [ [ "Calculate all of the outputs from the calculation object.", "_____no_output_____" ] ], [ [ "outputs(calc)", "_____no_output_____" ] ], [ [ "## Plot the outputs\n\nCreate a few plots of the output.", "_____no_output_____" ] ], [ [ "for o in outputs:\n fig, ax = plt.subplots()\n ax.plot(o.refs, o.values)\n ax.set(xlabel=o.xlabel, xscale='log', ylabel=o.ylabel)\n fig.tight_layout();", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
c5229455baaa1ed3d2f7d073dcd7c850bf1db90d
757,672
ipynb
Jupyter Notebook
Main/Singles.ipynb
mathiassunesen/Speciale_retirement
9db901a3791b9b75f228d1cec6c180e917be93e8
[ "MIT" ]
1
2020-01-14T22:19:42.000Z
2020-01-14T22:19:42.000Z
Main/Singles.ipynb
mathiassunesen/Speciale_retirement
9db901a3791b9b75f228d1cec6c180e917be93e8
[ "MIT" ]
null
null
null
Main/Singles.ipynb
mathiassunesen/Speciale_retirement
9db901a3791b9b75f228d1cec6c180e917be93e8
[ "MIT" ]
1
2020-01-14T22:19:46.000Z
2020-01-14T22:19:46.000Z
763.013092
56,060
0.953995
[ [ [ "# Single model", "_____no_output_____" ] ], [ [ "from consav import runtools\nruntools.write_numba_config(disable=0,threads=4)\n\n%matplotlib inline\n%load_ext autoreload\n%autoreload 2\n\n# Local modules\nfrom Model import RetirementClass\nimport SimulatedMinimumDistance as SMD\nimport figs\nimport funs\n\n# Global modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport time", "_____no_output_____" ] ], [ [ "### Solve and simulate model", "_____no_output_____" ] ], [ [ "tic1 = time.time()\nSingle = RetirementClass()\ntic2 = time.time()\nSingle.recompute()\ntic3 = time.time()\nSingle.solve()\ntic4 = time.time()\nSingle.simulate(accuracy=True,tax=True)\ntic5 = time.time()\nprint('Class :', round(tic2-tic1,2))\nprint('Precompute:', round(tic3-tic2,2))\nprint('Solve :', round(tic4-tic3,2))\nprint('Simulate :', round(tic5-tic4,2))", "Class : 49.64\nPrecompute: 1.45\nSolve : 41.68\nSimulate : 66.98\n" ], [ "tic1 = time.time()\nSingle.solve()\ntic2 = time.time()\nSingle.simulate(accuracy=True,tax=True)\ntic3 = time.time()\nprint('Solve :', round(tic2-tic1,2))\nprint('Simulate :', round(tic3-tic2,2))", "Solve : 0.13\nSimulate : 2.46\n" ] ], [ [ "### Retirement probabilities from solution", "_____no_output_____" ], [ "Women", "_____no_output_____" ] ], [ [ "G = figs.choice_probs(Single,ma=0)\nG['legendsize'] = 12\nG['marker'] = 'o'\nfigs.MyPlot(G,linewidth=3).savefig('figs/Model/Single_ChoiceProb_Women.png')", "_____no_output_____" ] ], [ [ "Men", "_____no_output_____" ] ], [ [ "G = figs.choice_probs(Single,ma=1)\nG['legendsize'] = 12\nG['marker'] = 'o'\nfigs.MyPlot(G,linewidth=3).savefig('figs/Model/Single_ChoiceProb_Men.png')", "_____no_output_____" ] ], [ [ "### Simulation", "_____no_output_____" ] ], [ [ "def rename_gender(G_lst):\n G_lst[0]['label'] = ['Women']\n G_lst[1]['label'] = ['Men']", "_____no_output_____" ], [ "936092.2561647706 - np.nansum(Single.sol.c)", "_____no_output_____" ], [ "37833823.081779644 - np.nansum(Single.sol.v)", "_____no_output_____" ], [ "print(np.nansum(Single.par.labor))\nprint(np.nansum(Single.par.erp))\nprint(np.nansum(Single.par.oap))", "1433.3479374437254\n68.5162239335012\n41.58541459199998\n" ], [ "Single.par.T_erp", "_____no_output_____" ], [ "68.51622393567519 - np.nansum(Single.par.erp)", "_____no_output_____" ], [ "Single.par.pension_male = np.array([10.8277686, 18.94859504])\nSingle.par.pension_female = np.array([ 6.6438835, 11.62679612])\ntransitions.precompute_inc_single(Single.par)\nSingle.solve()\nSingle.simulate()", "_____no_output_____" ], [ "Single.par.start_T = 53\nSingle.par.simT = Single.par.end_T - Single.par.start_T + 1\nSingle.par.var = np.array([0.202, 0.161])\nSingle.par.reg_labor_male = np.array((1.166, 0.360, 0.432, -0.406))\nSingle.par.reg_labor_female = np.array((4.261, 0.326, 0.303, -0.289))\nSingle.par.priv_pension_female = 728*1000/Single.par.denom\nSingle.par.priv_pension_male = 1236*1000/Single.par.denom \nSingle.solve(recompute=True)\nSingle.simulate()", "_____no_output_____" ], [ "np.nanmean(Single.sim.m[:,0])", "_____no_output_____" ], [ "Single.sim.m[:,0] = 20\nSingle.simulate()", "_____no_output_____" ], [ "Gw = figs.retirement_probs(Single,MA=[0])\nGm = figs.retirement_probs(Single,MA=[1])\nrename_gender([Gw,Gm])\nfigs.MyPlot([Gw,Gm],linewidth=3).savefig('figs/Model/SimSingleProbs')", "_____no_output_____" ], [ "Gw = figs.retirement_probs(Single,MA=[0])\nGm = figs.retirement_probs(Single,MA=[1])\nrename_gender([Gw,Gm])\nfigs.MyPlot([Gw,Gm],linewidth=3).savefig('figs/Model/SimSingleProbs')", "_____no_output_____" ], [ "Gw = figs.retirement_probs(Single,MA=[0])\nGm = figs.retirement_probs(Single,MA=[1])\nrename_gender([Gw,Gm])\nfigs.MyPlot([Gw,Gm],linewidth=3).savefig('figs/Model/SimSingleProbs')", "_____no_output_____" ], [ "Gw = figs.lifecycle(Single,var='m',MA=[0],ages=[57,80])\nGm = figs.lifecycle(Single,var='m',MA=[1],ages=[57,80])\nrename_gender([Gw,Gm])\nfigs.MyPlot([Gw,Gm],linewidth=3,save=False)", "_____no_output_____" ], [ "Gw = figs.lifecycle(Single,var='c',MA=[0],ages=[57,80])\nGm = figs.lifecycle(Single,var='c',MA=[1],ages=[57,80])\nrename_gender([Gw,Gm])\nfigs.MyPlot([Gw,Gm],linewidth=3,save=False)", "_____no_output_____" ] ], [ [ "### Consumption functions", "_____no_output_____" ], [ "Retired", "_____no_output_____" ] ], [ [ "G = figs.policy(Single,var='c',T=list(range(77,87))[::2],MA=[0],ST=[3],RA=[0],D=[0],label=['t'])\nG['legendsize'] = 12\nfigs.MyPlot(G,ylim=[0,12],save=False)", "_____no_output_____" ], [ "G = figs.policy(Single,var='c',T=list(range(97,111))[::2],MA=[0],ST=[3],RA=[0],D=[0],label=['t'])\nG['legendsize'] = 12\nfigs.MyPlot(G,ylim=[0,16],save=False)", "_____no_output_____" ] ], [ [ "Working", "_____no_output_____" ] ], [ [ "G = figs.policy(Single,var='c',T=list(range(57,67))[::2],MA=[0],ST=[3],RA=[0],D=[1],label=['t'])\nG['legendsize'] = 12\nfigs.MyPlot(G,ylim=[0,8],save=False)", "_____no_output_____" ], [ "G = figs.policy(Single,var='c',T=list(range(67,75))[::2],MA=[0],ST=[3],RA=[0],D=[1],label=['t'])\nG['legendsize'] = 12\nfigs.MyPlot(G,ylim=[0,10],save=False)", "_____no_output_____" ] ], [ [ "### Simulation - Retirement", "_____no_output_____" ] ], [ [ "def rename(G_lst):\n G_lst[0]['label'] = ['High skilled']\n G_lst[1]['label'] = ['Base']\n G_lst[2]['label'] = ['Low skilled']", "_____no_output_____" ] ], [ [ "Women", "_____no_output_____" ] ], [ [ "G_hs = figs.retirement_probs(Single,MA=[0],ST=[1,3])\nG_base = figs.retirement_probs(Single,MA=[0])\nG_ls = figs.retirement_probs(Single,MA=[0],ST=[0,2])\nrename([G_hs,G_base,G_ls])\nfigs.MyPlot([G_hs,G_base,G_ls],linewidth=3,save=False)", "_____no_output_____" ] ], [ [ "Men", "_____no_output_____" ] ], [ [ "G_hs = figs.retirement_probs(Single,MA=[1],ST=[1,3])\nG_base = figs.retirement_probs(Single,MA=[1])\nG_ls = figs.retirement_probs(Single,MA=[1],ST=[0,2])\nrename([G_hs,G_base,G_ls])\nfigs.MyPlot([G_hs,G_base,G_ls],linewidth=3,save=False)", "_____no_output_____" ] ], [ [ "### Simulation - Consumption", "_____no_output_____" ], [ "Women", "_____no_output_____" ] ], [ [ "G_hs = figs.lifecycle(Single,var='c',MA=[0],ST=[1,3],ages=[57,80])\nG_base = figs.lifecycle(Single,var='c',MA=[0],ages=[57,80])\nG_ls = figs.lifecycle(Single,var='c',MA=[0],ST=[0,2],ages=[57,80])\nrename([G_hs,G_base,G_ls])\nfigs.MyPlot([G_hs,G_base,G_ls],linewidth=3,save=False)", "_____no_output_____" ] ], [ [ "Men", "_____no_output_____" ] ], [ [ "G_hs = figs.lifecycle(Single,var='c',MA=[1],ST=[1,3],ages=[57,80])\nG_base = figs.lifecycle(Single,var='c',MA=[1],ages=[57,80])\nG_ls = figs.lifecycle(Single,var='c',MA=[1],ST=[0,2],ages=[57,80])\nrename([G_hs,G_base,G_ls])\nfigs.MyPlot([G_hs,G_base,G_ls],linewidth=3,save=False)", "_____no_output_____" ] ], [ [ "### Simulation - Wealth", "_____no_output_____" ], [ "Women", "_____no_output_____" ] ], [ [ "G_hs = figs.lifecycle(Single,var='m',MA=[0],ST=[1,3],ages=[57,68])\nG_base = figs.lifecycle(Single,var='m',MA=[0],ages=[57,68])\nG_ls = figs.lifecycle(Single,var='m',MA=[0],ST=[0,2],ages=[57,68])\nrename([G_hs,G_base,G_ls])\nfigs.MyPlot([G_hs,G_base,G_ls],linewidth=3,save=False)", "_____no_output_____" ] ], [ [ "Men", "_____no_output_____" ] ], [ [ "G_hs = figs.lifecycle(Single,var='m',MA=[1],ST=[1,3],ages=[57,68])\nG_base = figs.lifecycle(Single,var='m',MA=[1],ages=[57,68])\nG_ls = figs.lifecycle(Single,var='m',MA=[1],ST=[0,2],ages=[57,68])\nrename([G_hs,G_base,G_ls])\nfigs.MyPlot([G_hs,G_base,G_ls],linewidth=3,save=False)", "_____no_output_____" ] ], [ [ "### Euler errors", "_____no_output_____" ] ], [ [ "MA = [0,1]\nST = [0,1,2,3]\nages = [Single.par.start_T,Single.par.end_T-1]\nfor ma in MA:\n for st in ST:\n funs.log_euler(Single,MA=[ma],ST=[st],ages=ages,plot=True)\nprint('Total:',funs.log_euler(Single,ages=ages)[0])", "Total: -5.38451274155166\n" ], [ "MA = [0,1]\nST = [0,1,2,3]\nages = [Single.par.start_T,Single.par.end_T-1]\nfor ma in MA:\n for st in ST:\n funs.log_euler(Single,MA=[ma],ST=[st],ages=ages,plot=True)\nprint('Total:',funs.log_euler(Single,ages=ages)[0])", "Total: -5.384790359150737\n" ], [ "Na = Single.par.Na\nfuns.resolve(Single,Na=np.linspace(50,1000))\nSingle.par.Na = Na\nSingle.recompute() # reset", "_____no_output_____" ], [ "a_phi = test.par.a_phi\nfuns.resolve(test,a_phi = np.linspace(1.0,2.0,num=10))\ntest.par.a_phi = a_phi\ntest.solve(recompute=True) # reset", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
c5229b3247d718d7f2b6c50217955451fdac26fe
891
ipynb
Jupyter Notebook
pset_pandas_ext/101problems/solutions/nb/p49.ipynb
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
5
2019-04-08T20:05:37.000Z
2019-12-04T20:48:45.000Z
pset_pandas_ext/101problems/solutions/nb/p49.ipynb
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
8
2019-04-15T15:16:05.000Z
2022-02-12T10:33:32.000Z
pset_pandas_ext/101problems/solutions/nb/p49.ipynb
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
2
2019-04-10T00:14:42.000Z
2020-02-26T20:35:21.000Z
24.081081
109
0.491582
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
c5229cec2c46a67757f9b5a2fc9e973a61f370f9
82,445
ipynb
Jupyter Notebook
text_classification/logistic.ipynb
rebecadieb/machine-learning
882e66a26c84afac295428db852da4917be5cf3c
[ "MIT" ]
null
null
null
text_classification/logistic.ipynb
rebecadieb/machine-learning
882e66a26c84afac295428db852da4917be5cf3c
[ "MIT" ]
null
null
null
text_classification/logistic.ipynb
rebecadieb/machine-learning
882e66a26c84afac295428db852da4917be5cf3c
[ "MIT" ]
1
2020-10-21T17:25:15.000Z
2020-10-21T17:25:15.000Z
67.577869
29,048
0.696307
[ [ [ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Logistic-Regression\" data-toc-modified-id=\"Logistic-Regression-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Logistic Regression</a></span><ul class=\"toc-item\"><li><span><a href=\"#Logistic-Function\" data-toc-modified-id=\"Logistic-Function-1.1\"><span class=\"toc-item-num\">1.1&nbsp;&nbsp;</span>Logistic Function</a></span></li><li><span><a href=\"#Interpreting-the-Intercept\" data-toc-modified-id=\"Interpreting-the-Intercept-1.2\"><span class=\"toc-item-num\">1.2&nbsp;&nbsp;</span>Interpreting the Intercept</a></span></li><li><span><a href=\"#Defining-The-Cost-Function\" data-toc-modified-id=\"Defining-The-Cost-Function-1.3\"><span class=\"toc-item-num\">1.3&nbsp;&nbsp;</span>Defining The Cost Function</a></span></li><li><span><a href=\"#Gradient\" data-toc-modified-id=\"Gradient-1.4\"><span class=\"toc-item-num\">1.4&nbsp;&nbsp;</span>Gradient</a></span></li><li><span><a href=\"#Stochastic/Mini-batch-Gradient\" data-toc-modified-id=\"Stochastic/Mini-batch-Gradient-1.5\"><span class=\"toc-item-num\">1.5&nbsp;&nbsp;</span>Stochastic/Mini-batch Gradient</a></span></li><li><span><a href=\"#Implementation\" data-toc-modified-id=\"Implementation-1.6\"><span class=\"toc-item-num\">1.6&nbsp;&nbsp;</span>Implementation</a></span></li><li><span><a href=\"#Comparing-Result-and-Convergence-Behavior\" data-toc-modified-id=\"Comparing-Result-and-Convergence-Behavior-1.7\"><span class=\"toc-item-num\">1.7&nbsp;&nbsp;</span>Comparing Result and Convergence Behavior</a></span></li><li><span><a href=\"#Pros-and-Cons-of-Logistic-Regression\" data-toc-modified-id=\"Pros-and-Cons-of-Logistic-Regression-1.8\"><span class=\"toc-item-num\">1.8&nbsp;&nbsp;</span>Pros and Cons of Logistic Regression</a></span></li></ul></li><li><span><a href=\"#Reference\" data-toc-modified-id=\"Reference-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Reference</a></span></li></ul></div>", "_____no_output_____" ] ], [ [ "# code for loading the format for the notebook\nimport os\n\n# path : store the current path to convert back to it later\npath = os.getcwd()\nos.chdir(os.path.join('..', 'notebook_format'))\n\nfrom formats import load_style\nload_style(plot_style = False)", "_____no_output_____" ], [ "os.chdir(path)\n\n# 1. magic for inline plot\n# 2. magic to print version\n# 3. magic so that the notebook will reload external python modules\n# 4. magic to enable retina (high resolution) plots\n# https://gist.github.com/minrk/3301035\n%matplotlib inline\n%load_ext watermark\n%load_ext autoreload\n%autoreload 2\n%config InlineBackend.figure_format = 'retina'\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nfrom sklearn.linear_model import LogisticRegression\n\n%watermark -a 'Ethen' -d -t -v -p numpy,pandas,matplotlib,sklearn", "Ethen 2018-09-15 15:50:42 \n\nCPython 3.6.4\nIPython 6.4.0\n\nnumpy 1.14.1\npandas 0.23.0\nmatplotlib 2.2.2\nsklearn 0.19.1\n" ] ], [ [ "# Logistic Regression", "_____no_output_____" ], [ "**Logistic regression** is an excellent tool to know for classification problems, which are problems where the output value that we wish to predict only takes on only a small number of discrete values. Here we'll focus on the binary classification problem, where the output can take on only two distinct classes. To make our examples more concrete, we will consider the Glass dataset.", "_____no_output_____" ] ], [ [ "url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/glass/glass.data'\ncol_names = ['id', 'ri', 'na', 'mg', 'al', 'si', 'k', 'ca', 'ba', 'fe', 'glass_type']\nglass = pd.read_csv(url, names = col_names, index_col = 'id')\nglass.sort_values('al', inplace = True)\n\n# convert the glass type into binary outcome\n# types 1, 2, 3 are window glass\n# types 5, 6, 7 are household glass\nglass['household'] = glass['glass_type'].map({1: 0, 2: 0, 3: 0, 5: 1, 6: 1, 7: 1})\nglass.head()", "_____no_output_____" ] ], [ [ "Our task is to predict the `household` column using the `al` column. Let's visualize the relationship between the input and output and also train the logsitic regression to see the outcome that it produces.", "_____no_output_____" ] ], [ [ "logreg = LogisticRegression(C = 1e9)\nX = glass['al'].reshape(-1, 1) # sklearn doesn't accept 1d-array, convert it to 2d\ny = np.array(glass['household'])\nlogreg.fit(X, y)\n\n# predict the probability that each observation belongs to class 1\n# The first column indicates the predicted probability of class 0, \n# and the second column indicates the predicted probability of class 1\nglass['household_pred_prob'] = logreg.predict_proba(X)[:, 1]", "_____no_output_____" ], [ "# plot the predicted probability (familiarize yourself with the S-shape)\n# change default figure and font size\nplt.rcParams['figure.figsize'] = 8, 6 \nplt.rcParams['font.size'] = 12\n\nplt.scatter(glass['al'], glass['household'])\nplt.plot(glass['al'], glass['household_pred_prob'])\nplt.xlabel('al')\nplt.ylabel('household')\nplt.show()", "_____no_output_____" ] ], [ [ "As we can see, logistic regression can output the probabilities of observation belonging to a specific class and these probabilities can be converted into class predictions by choosing a cutoff value (e.g. probability higher than 0.5 is classified as class 1).", "_____no_output_____" ], [ "## Logistic Function", "_____no_output_____" ], [ "In **Logistic Regression**, the log-odds of a categorical response being \"true\" (1) is modeled as a linear combination of the features:\n\n\\begin{align*}\n \\log \\left({p\\over 1-p}\\right) &= w_0 + w_1x_1, ..., w_jx_j \\nonumber \\\\\n &= w^Tx \\nonumber\n\\end{align*}\n\nWhere:\n\n- $w_{0}$ is the intercept term, and $w_1$ to $w_j$ represents the parameters for all the other features (a total of j features).\n- By convention of we can assume that $x_0 = 1$, so that we can re-write the whole thing using the matrix notation $w^Tx$.\n\nThis is called the **logit function**. The equation can be re-arranged into the **logistic function**:\n\n$$p = \\frac{e^{w^Tx}} {1 + e^{w^Tx}}$$\n\nOr in the more commonly seen form:\n\n$$h_w(x) = \\frac{1}{ 1 + e^{-w^Tx} }$$ \n\nLet's take a look at the plot of the function:", "_____no_output_____" ] ], [ [ "x_values = np.linspace(-5, 5, 100)\ny_values = [1 / (1 + np.exp(-x)) for x in x_values]\nplt.plot(x_values, y_values)\nplt.title('Logsitic Function')\nplt.show()", "_____no_output_____" ] ], [ [ "The **logistic function** has some nice properties. The y-value represents the probability and it is always bounded between 0 and 1, which is want we wanted for probabilities. For an x value of 0 you get a 0.5 probability. Also as you get more positive x value you get a higher probability, on the other hand, a more negative x value results in a lower probability.\n\nToy sample code of how to predict the probability given the data and the weight is provided below.", "_____no_output_____" ] ], [ [ "def predict_probability(data, weights):\n \"\"\"probability predicted by the logistic regression\"\"\"\n score = np.dot(data, weights)\n predictions = 1 / (1 + np.exp(-score))\n return predictions", "_____no_output_____" ] ], [ [ "## Interpreting the Intercept", "_____no_output_____" ], [ "We can check logistic regression's coefficient does in fact generate the log-odds.", "_____no_output_____" ] ], [ [ "# compute predicted log-odds for al = 2 using the equation\n# convert log-odds to odds\n# convert odds to probability\nlogodds = logreg.intercept_ + logreg.coef_[0] * 2\nodds = np.exp(logodds)\nprob = odds / (1 + odds)\nprint(prob)\n\nlogreg.predict_proba(2)[:, 1]", "_____no_output_____" ], [ "# examine the coefficient for al\nprint('a1', logreg.coef_[0])", "_____no_output_____" ] ], [ [ "**Interpretation:** 1 unit increase in `al` is associated with a 4.18 unit increase in the log-odds of the observation being classified as `household 1`. We can confirm that again by doing the calculation ourselves.", "_____no_output_____" ] ], [ [ "# increasing al by 1 (so that al now becomes 3)\n# increases the log-odds by 4.18\nlogodds = logodds + logreg.coef_[0]\nodds = np.exp(logodds)\nprob = odds / (1 + odds)\nprint(prob)\n\nlogreg.predict_proba(3)[:, 1]", "_____no_output_____" ] ], [ [ "## Defining The Cost Function", "_____no_output_____" ], [ "When utilizing logistic regression, we are trying to learn the $w$ values in order to maximize the probability of correctly classifying our glasses. Let's say someone did give us some $w$ values of the logistic regression model, how would we determine if they were good values or not? What we would hope is that for the household of class 1, the probability values are close to 1 and for the household of class 0 the probability is close to 0.\n\nBut we don't care about getting the correct probability for just one observation, we want to correctly classify all our observations. If we assume our data are independent and identically distributed (think of it as all of them are treated equally), we can just take the product of all our individually calculated probabilities and that becomes the objective function we want to maximize. So in math: \n\n$$\\prod_{class1}h_w(x)\\prod_{class0}1 - h_w(x)$$ \n\nThe $\\prod$ symbol means take the product of the $h_w(x)$ for the observations that are classified as that class. You will notice that for observations that are labeled as class 0, we are taking 1 minus the logistic function. That is because we are trying to find a value to maximize, and since observations that are labeled as class 0 should have a probability close to zero, 1 minus the probability should be close to 1. This procedure is also known as the **maximum likelihood estimation** and the following link contains a nice discussion of maximum likelihood using linear regression as an example. [Blog: The Principle of Maximum Likelihood](http://suriyadeepan.github.io/2017-01-22-mle-linear-regression/)\n\nNext we will re-write the original cost function as:\n\n$$\\ell(w) = \\sum_{i=1}^{N}y_{i}log(h_w(x_{i})) + (1-y_{i})log(1-h_w(x_{i}))$$\n\nWhere:\n\n- We define $y_{i}$ to be 1 when the $i_{th}$ observation is labeled class 1 and 0 when labeled as class 0, then we only compute $h_w(x_{i})$ for observations that are labeled class 1 and $1 - h_w(x_{i})$ for observations that are labeled class 0, which is still the same idea as the original function.\n- Next we'll transform the original $h_w(x_{i})$ by taking the log. As we'll later see this logarithm transformation will make our cost function more convenient to work with, and because the logarithm is a monotonically increasing function, the logarithm of a function achieves its maximum value at the same points as the function itself. When we take the log, our product across all data points, it becomes a sum. See [log rules](http://www.mathwords.com/l/logarithm_rules.htm) for more details (Hint: log(ab) = log(a) + log(b)).\n- The $N$ simply represents the total number of the data.\n\nOften times you'll also see the notation above be simplified in the form of a maximum likelihood estimator:\n\n$$ \\ell(w) = \\sum_{i=1}^{N} log \\big( P( y_i \\mid x_i, w ) \\big) $$\n\nThe equation above simply denotes the idea that , $\\mathbf{w}$ represents the parameters we would like to estimate the parameters $w$ by maximizing conditional probability of $y_i$ given $x_i$.\n\nNow by definition of probability in the logistic regression model: $h_w(x_{i}) = 1 \\big/ 1 + e^{-w^T x_i}$ and $1- h_w(x_{i}) = e^{ -w^T x_i } \\big/ ( 1 + e^{ -w^T x_i } )$. By substituting these expressions into our $\\ell(w)$ equation and simplifying it further we can obtain a simpler expression.", "_____no_output_____" ], [ "$$\n\\begin{align}\n\\ell(w)\n&= \\sum_{i=1}^{N}y_{i}log(h_w(x_{i})) + (1-y_{i})log(1-h_w(x_{i})) \\nonumber \\\\\n&= \\sum_{i=1}^{N} y_{i} log( \\frac{1}{ 1 + e^{ -w^T x_i } } ) + ( 1 - y_{i} )\nlog( \\frac{ e^{ -w^T x_i } }{ 1 + e^{ -w^T x_i } } ) \\nonumber \\\\\n&= \\sum_{i=1}^{N} -y_{i} log( 1 + e^{ -w^T x_i } ) + ( 1 - y_{i} )\n( -w^T x_i - log( 1 + e^{ -w^T x_i } ) ) \\nonumber \\\\\n&= \\sum_{i=1}^{N} ( y_{i} - 1 ) ( w^T x_i ) - log( 1 + e^{ -w^T x_i } ) \\nonumber\n\\end{align}\n$$", "_____no_output_____" ], [ "We'll use the formula above to compute the log likelihood for the entire dataset, which is used to assess the convergence of the algorithm. Toy code provided below.", "_____no_output_____" ] ], [ [ "def compute_avg_log_likelihood(data, label, weights):\n \"\"\"\n the function uses a simple check to prevent overflow problem, \n where numbers gets too large to represent and is converted to inf\n an example of overflow is provided below, when this problem occurs,\n simply use the original score (without taking the exponential)\n \n scores = np.array( [ -10000, 200, 300 ] )\n logexp = np.log( 1 + np.exp(-scores) )\n logexp \n \"\"\"\n scores = np.dot(data, weights)\n logexp = np.log(1 + np.exp(-scores))\n \n # simple check to prevent overflow\n mask = np.isinf(logexp)\n logexp[mask] = -scores[mask]\n \n log_likelihood = np.sum((label - 1) * scores - logexp) / data.shape[0] \n return log_likelihood", "_____no_output_____" ] ], [ [ "**Note:** We made one tiny modification to the log likelihood function We added a ${1/N}$ term which averages the log likelihood across all data points. The ${1/N}$ term will make it easier for us to compare stochastic gradient ascent with batch gradient ascent later.", "_____no_output_____" ], [ "## Gradient", "_____no_output_____" ], [ "Now that we obtain the formula to assess our algorithm, we'll dive into the meat of the algorithm, which is to derive the gradient for the formula (the derivative of the formula with respect to each coefficient):\n\n$$\\ell(w) = \\sum_{i=1}^{N} ( y_{i} - 1 ) ( w^T x_i ) - log( 1 + e^{ -w^T x_i } )$$\n\nAnd it turns out the derivative of log likelihood with respect to to a single coefficient $w_j$ is as follows (the form is the same for all coefficients):\n\n$$\n\\frac{\\partial\\ell(w)}{\\partial w_j} = \\sum_{i=1}^N (x_{ij})\\left( y_i - \\frac{1}{ 1 + e^{-w^Tx_i} } \\right )\n$$\n\nTo compute it, you simply need the following two terms:\n\n- $\\left( y_i - \\frac{1}{ 1 + e^{-w^Tx_i} } \\right )$ is the vector containing the difference between the predicted probability and the original label.\n- $x_{ij}$ is the vector containing the $j_{th}$ feature's value.\n \nFor a step by step derivation, consider going through the following link. [Blog: Maximum likelihood and gradient descent demonstration](https://zlatankr.github.io/posts/2017/03/06/mle-gradient-descent), it uses a slightly different notation, but the walkthrough should still be pretty clear.", "_____no_output_____" ], [ "## Stochastic/Mini-batch Gradient", "_____no_output_____" ], [ "The problem with computing the gradient (or so called batched gradient) is the term $\\sum_{i=1}^{N}$. This means that we must sum the contributions over all the data points to calculate the gradient, and this can be problematic if the dataset we're studying is extremely large. Thus, in stochastic gradient, we can use a single point as an approximation to the gradient:\n\n$$\n\\frac{\\partial\\ell_i(w)}{\\partial w_j} = (x_{ij})\\left( y_i - \\frac{1}{ 1 + e^{-w^Tx_i} } \\right )\n$$\n\n**Note1:** Because the **Stochastic Gradient** algorithm uses each row of data in turn to update the gradient, if our data has some sort of implicit ordering, this will negatively affect the convergence of the algorithm. At an extreme, what if we had the data sorted so that all positive reviews came before negative reviews? In that case, even if most reviews are negative, we might converge on an answer of +1 because we never get to see the other data. To avoid this, one practical trick is to shuffle the data before we begin so the rows are in random order.\n\n**Note2:** Stochastic gradient compute the gradient using only 1 data point to update the the parameters, while batch gradient uses all $N$ data points. An alternative to these two extremes is a simple change that allows us to use a **mini-batch** of $B \\leq N$ data points to calculate the gradient. This simple approach is faster than batch gradient but less noisy than stochastic gradient that uses only 1 data point. Given a mini-batch (or a set of data points) $\\mathbf{x}_{i}, \\mathbf{x}_{i+1} \\ldots \\mathbf{x}_{i+B}$, the gradient function for this mini-batch of data points is given by:\n\n$$\n\\sum_{s = i}^{i+B} \\frac{\\partial\\ell_s(w)}{\\partial w_j} = \\frac{1}{B} \\sum_{s = i}^{i+B} (x_{sj})\\left( y_i - \\frac{1}{ 1 + e^{-w^Tx_i} } \\right )\n$$\n\nHere, the $\\frac{1}{B}$ means that we are normalizing the gradient update rule by the batch size $B$. In other words, we update the coefficients using the **average gradient over data points** (instead of using a pure summation). By using the average gradient, we ensure that the magnitude of the gradient is approximately the same for all batch sizes. This way, we can more easily compare various batch sizes and study the effect it has on the algorithm.", "_____no_output_____" ], [ "## Implementation", "_____no_output_____" ], [ "Recall our task is to find the optimal value for each individual weight to lower the cost. This requires taking the partial derivative of the cost/error function with respect to a single weight, and then running gradient descent for each individual weight to update them. Thus, for any individual weight $w_j$, we'll compute the following:\n\n$$ w_j^{(t + 1)} = w_j^{(t)} + \\alpha * \\sum_{s = i}^{i+B} \\frac{\\partial\\ell_s(w)}{\\partial w_j}$$ \n\nWhere:\n\n- $\\alpha$ denotes the the learning rate or so called step size, in other places you'll see it denoted as $\\eta$.\n- $w_j^{(t)}$ denotes the weight of the $j_{th}$ feature at iteration $t$.\n\nAnd we'll do this iteratively for each weight, many times, until the whole network's cost function is minimized.", "_____no_output_____" ] ], [ [ "# put the code together into one cell\n\ndef predict_probability(data, weights):\n \"\"\"probability predicted by the logistic regression\"\"\"\n score = np.dot(data, weights)\n predictions = 1 / (1 + np.exp(-score))\n return predictions\n\ndef compute_avg_log_likelihood(data, label, weights):\n \"\"\"\n the function uses a simple check to prevent overflow problem, \n where numbers gets too large to represent and is converted to inf\n an example of overflow is provided below, when this problem occurs,\n simply use the original score (without taking the exponential)\n \n scores = np.array([-10000, 200, 300])\n logexp = np.log(1 + np.exp(-scores))\n logexp \n \"\"\"\n scores = np.dot(data, weights)\n logexp = np.log(1 + np.exp(-scores))\n \n # simple check to prevent overflow\n mask = np.isinf(logexp)\n logexp[mask] = -scores[mask]\n \n log_likelihood = np.sum((label - 1) * scores - logexp) / data.shape[0] \n return log_likelihood", "_____no_output_____" ], [ "def logistic_regression(data, label, step_size, batch_size, max_iter):\n \n # weights of the model are initialized as zero\n data_num = data.shape[0]\n feature_num = data.shape[1] \n weights = np.zeros(data.shape[1])\n \n # `i` keeps track of the starting index of current batch\n # and shuffle the data before starting\n i = 0 \n permutation = np.random.permutation(data_num)\n data, label = data[permutation], label[permutation]\n \n # do a linear scan over data, for each iteration update the weight using \n # batches of data, and store the log likelihood record to visualize convergence\n log_likelihood_record = []\n for _ in range(max_iter):\n \n # extract the batched data and label use it to compute\n # the predicted probability using the current weight and the errors\n batch = slice(i, i + batch_size)\n batch_data, batch_label = data[batch], label[batch]\n predictions = predict_probability(batch_data, weights)\n errors = batch_label - predictions\n \n # loop over each coefficient to compute the derivative and update the weight\n for j in range(feature_num): \n derivative = np.dot(errors, batch_data[:, j]) \n weights[j] += step_size * derivative / batch_size\n \n # track whether log likelihood is increasing after\n # each weight update\n log_likelihood = compute_avg_log_likelihood(\n data = batch_data, \n label = batch_label,\n weights = weights\n )\n log_likelihood_record.append(log_likelihood)\n \n # update starting index of for the batches\n # and if we made a complete pass over data, shuffle again \n # and refresh the index that keeps track of the batch\n i += batch_size\n if i + batch_size > data_num:\n permutation = np.random.permutation(data_num)\n data, label = data[permutation], label[permutation]\n i = 0\n \n # We return the list of log likelihoods for plotting purposes.\n return weights, log_likelihood_record", "_____no_output_____" ] ], [ [ "## Comparing Result and Convergence Behavior", "_____no_output_____" ], [ "We'll use the logistic regression code that we've implemented and compare the predicted auc score with scikit-learn's implementation. This only serves to check that the predicted results are similar and that our toy code is correctly implemented. Then we'll also explore the convergence difference between batch gradient descent and stochastic gradient descent.", "_____no_output_____" ] ], [ [ "# manually append the coefficient term,\n# every good open-source library does not\n# require this additional step from the user\ndata = np.c_[np.ones(X.shape[0]), X]\n\n# using our logistic regression code\nweights_batch, log_likelihood_batch = logistic_regression(\n data = data,\n label = np.array(y),\n step_size = 5e-1, \n batch_size = X.shape[0], # batch gradient descent\n max_iter = 200\n)", "_____no_output_____" ], [ "# compare both logistic regression's auc score\nlogreg = LogisticRegression(C = 1e9)\nlogreg.fit(X, y)\npred_prob = logreg.predict_proba(X)[:, 1]\n\nproba = predict_probability(data, weights_batch)\n\n# check that the auc score is similar\nauc1 = metrics.roc_auc_score(y, pred_prob)\nauc2 = metrics.roc_auc_score(y, proba)\nprint('auc', auc1, auc2)", "_____no_output_____" ], [ "weights_sgd, log_likelihood_sgd = logistic_regression(\n data = data,\n label = y,\n step_size = 5e-1, \n batch_size = 30, # stochastic gradient descent\n max_iter = 200\n)", "_____no_output_____" ], [ "weights_minibatch, log_likelihood_minibatch = logistic_regression(\n data = data,\n label = y,\n step_size = 5e-1, \n batch_size = 100, # mini-batch gradient descent\n max_iter = 200\n)", "_____no_output_____" ], [ "plt.figure(figsize = (10, 7))\nplt.plot(log_likelihood_sgd, label = 'stochastic gradient descent')\nplt.plot(log_likelihood_batch, label = 'batch gradient descent')\nplt.plot(log_likelihood_minibatch, label = 'mini-batch gradient descent')\nplt.legend(loc = 'best')\nplt.xlabel('# of iterations')\nplt.ylabel('Average log likelihood')\nplt.title('Convergence Plot')\nplt.show()", "_____no_output_____" ] ], [ [ "Based on the convergence plot above, we can see that the it's a good idea to use mini-batch gradient descent since it strikes a good balance between batch gradient, which convergences steadily but can be computationly too expensive when the dataset is too large, and stochastic gradient, which is faster to train, but the result can be too noisy.", "_____no_output_____" ], [ "## Pros and Cons of Logistic Regression", "_____no_output_____" ], [ "We'll end this notebook listing out some pros and cons of this method.\n\n**Pros:**\n\n- Highly interpretable (if you remember how).\n- Model training and prediction are fast. Thus can be desirable in large-scale applications when we're dealing with millions of parameters.\n- Almost no parameter tuning is required (excluding regularization).\n- Outputs well-calibrated predicted probabilities.\n\n**Cons:**\n\n- Presumes a linear relationship between the features\n- Performance is (generally) not competitive with the best supervised learning methods.\n- Can't automatically learn feature interactions.", "_____no_output_____" ], [ "# Reference", "_____no_output_____" ], [ "- [Notebook: Logistic Regression](http://nbviewer.jupyter.org/github/justmarkham/DAT8/blob/master/notebooks/12_logistic_regression.ipynb)\n- [Coursersa: Washington Classification](https://www.coursera.org/learn/ml-classification)\n- [Blog: Maximum likelihood and gradient descent demonstration](https://zlatankr.github.io/posts/2017/03/06/mle-gradient-descent)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
c5229d1fa531ce20d1327be84a7d4c418a73fb4c
111,502
ipynb
Jupyter Notebook
docs/_build/html/notebooks/api_low_level.ipynb
RichardScottOZ/AIQC
f654e23d0db824aee518c7f98681ca5da76ed10d
[ "BSD-3-Clause" ]
null
null
null
docs/_build/html/notebooks/api_low_level.ipynb
RichardScottOZ/AIQC
f654e23d0db824aee518c7f98681ca5da76ed10d
[ "BSD-3-Clause" ]
null
null
null
docs/_build/html/notebooks/api_low_level.ipynb
RichardScottOZ/AIQC
f654e23d0db824aee518c7f98681ca5da76ed10d
[ "BSD-3-Clause" ]
null
null
null
34.056811
15,768
0.585254
[ [ [ "# Low-Level API", "_____no_output_____" ], [ "## Prerequisites", "_____no_output_____" ], [ "If you've already completed the instructions on the **Installation** page, then let's get started.", "_____no_output_____" ] ], [ [ "import aiqc\nfrom aiqc import datum", "_____no_output_____" ] ], [ [ "## 1. Ingest a `Dataset`", "_____no_output_____" ], [ "### Object Relational Model (ORM)", "_____no_output_____" ], [ "At the moment, AIQC supports the following types of Datasets:\n\n* Single-file tabular/ flat/ delimited datasets.\n* Multi-file image datasets.", "_____no_output_____" ], [ "End users only need to worry about passing the right inputs to the Dataset class, but there are a few objects doing the legwork beneath the hood:\n\n* `Dataset` ORM class with subclasses of either `Tabular` or `Image`.\n * `File` ORM class one or more files with subclasses of either `Tabular` or `Image`.\n * Dedicated `Tabular` and `Image` ORM classes for attributes specific to those data types (e.g. dtype mappings for flat files and colorscale for images).", "_____no_output_____" ], [ "> Considering these types in the future: Sequence/ time series: multi-file tabular (e.g. 3D numpy, HDF5). Graph: multi-file nodes and multi-file edges (e.g. DGL).", "_____no_output_____" ], [ "### Persisting and Compressing Structured Data", "_____no_output_____" ], [ "By default the actual bytes of the file are persisted to the SQLite `BlobField`. It gets gzip compressed, reducing the size by up to 90%. Maximum BlobField size is 2.147 GB, but once you factor in compression, your bottleneck is more likely to be memory beyond that size. The bytes themselves are Parquet (single-partitioned) because, using the PyArrow engine, it preserves every dtype except certain datetimes (which are honestly better off parsed into floats/ ordinal ints). Parquet is also integrated nicely into both Spark and Dask; frameworks for distributed, in-memory computation.\n\nPersisting the file ensures reproducibility by: (a) keeping the data packaged alongside the experiment, and (b) helping entry-level users move away from relying on mutable dataframes they have had in-memory for extended periods of time or floating around on shared file systems.\n\n> *However, we realize that a different approach will be required at scale, so the `source_path` of the file is recorded whenever possible. In the future we could just read the data from that path (e.g. NFS, RDBMS, HDFS, S3) if the BlobField is none. Or just switch our data fetching/ filtering to Dask because it uses the Pandas API and Parquet.*", "_____no_output_____" ], [ "### Data Sources", "_____no_output_____" ], [ "You can make a dataset from either:\n\n* `Dataset.Tabular`\n\n * In-memory data structures (pandas dataframe, numpy array).\n \n * Flat files (csv, tsv, parquet).\n \n * Accepts urls.\n\n\n* `Dataset.Image`\n\n * Any image file format that can be read by the Pillow library.\n \n * Accepts urls.", "_____no_output_____" ], [ "#### `Dataset.Tabular.from_pandas()`", "_____no_output_____" ] ], [ [ "df = datum.to_pandas('iris.tsv')\n\ndataset = aiqc.Dataset.Tabular.from_pandas(\n\tdataframe = df\n , name = 'tab separated plants'\n , dtype = None #passed to pd.Dataframe(dtype)/ inferred\n , column_names = None #passed to pd.Dataframe(columns)\n)", "_____no_output_____" ] ], [ [ "> Optionally, `dtype`, as seen in the `pandas.DataFrame.astype(dtype)` [docs](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.astype.html), can be specified as either a single type for all columns, or as a dictionary that maps a specific type to each column name. This encodes features for analysis. We read NumPy into Pandas before persisting it, so `columns` and `dtype` are read directly by `pd.DataFrame()`.", "_____no_output_____" ], [ "#### `Dataset.Tabular.from_numpy()`", "_____no_output_____" ], [ "Must be a 2D NumPy N-Dimensional Array.\n\n> *In the future, we may add support for ingesting 3D arrays as multi-file sequences.*\n\nRegular *ndarrays* don't have column names, and I didn't like the API for *structured arrays* so you have to pass in columns names as a list. If you don't then column names will be numerically assigned in ascending order (zero-based index), but I didn't like the range object, so I stringified numerically assigned columns to string-based numbers.", "_____no_output_____" ] ], [ [ "arr = df.to_numpy()\ncols = list(df.columns)\n\nother_dataset = aiqc.Dataset.Tabular.from_numpy(\n\tndarray = arr\n , name = None\n , dtype = None #passed to pd.Dataframe(dtype)/ inferred \n , column_names = cols #passed to pd.Dataframe(columns)\n)", "_____no_output_____" ] ], [ [ "#### `Dataset.Tabular.from_path`", "_____no_output_____" ], [ "Intended for flat files, delimited text, and structured tabular data. It's read in via Pandas, so it supports URLs to raw data and bytes as well.", "_____no_output_____" ] ], [ [ "file_path = datum.get_path('iris_10x.tsv')\n\n# We'll keep this larger dataset handy for `Foldset` creation later.\nbig_dataset = aiqc.Dataset.Tabular.from_path(\n file_path = file_path\n , source_file_format = 'tsv'\n , name = None\n , dtype = None\n , column_names = None\n , skip_header_rows = 'infer' #passed to `pd.read_csv(header)`. Incompatible w Parquet.\n)", "_____no_output_____" ] ], [ [ "> If you leave `name` blank, it will default to a human-readble timestamp with the appropriate file extension (e.g. '2020_10_13-01_28_13_PM.tsv').", "_____no_output_____" ], [ "#### Image Datasets", "_____no_output_____" ], [ "Image datasets are somewhat multi-modal in that, in order to perform supervised learning on them, they require a loosely coupled `Dataset.Tabular` that contains their labels.", "_____no_output_____" ] ], [ [ "df = datum.to_pandas(name='brain_tumor.csv')\ndf.head()", "_____no_output_____" ] ], [ [ "The `['status']` column of this dataframe serves as the Label of that sample. We'll construct a `Dataset.Tabular` from this.", "_____no_output_____" ] ], [ [ "tabular_dataset = aiqc.Dataset.Tabular.from_pandas(dataframe=df)", "_____no_output_____" ], [ "tabular_label = tabular_dataset.make_label(columns=['status'])", "_____no_output_____" ] ], [ [ "#### `Dataset.Image.from_urls()`", "_____no_output_____" ], [ "During ingestion, all image files must have the same `Image.mode` and `Image.size` according to the Pillow library.\n\n> https://pillow.readthedocs.io/en/stable/handbook/concepts.html", "_____no_output_____" ], [ "`from_urls(urls:list)` needs a list of urls. In order to perform supervised learning, the order of this list must line up with the samples in your Tabular dataset.\n> We happen to have this list prepared in the `['url']` column of the dataframe above. acts as a manifest in that it contains the URL of the image file for that sample, solely for the purposes of initial ingestion. We'll construct a `Dataset.Image` from this.", "_____no_output_____" ] ], [ [ "image_urls = datum.get_remote_urls(manifest_name='brain_tumor.csv')", "_____no_output_____" ], [ "image_dataset = aiqc.Dataset.Image.from_urls(urls=image_urls)", "🖼️ Validating Images 🖼️: 100%|███████████████████████| 80/80 [00:34<00:00, 2.32it/s]\n🖼️ Ingesting Images 🖼️: 100%|████████████████████████| 80/80 [00:24<00:00, 3.29it/s]\n" ], [ "image_featureset = image_dataset.make_featureset()", "_____no_output_____" ] ], [ [ "Skipping forward a bit, we bring the heterogenous `Featureset` and `Label` together in the `Splitset`, and they can be used as normal. You can even construct a `Foldset` from this splitset.", "_____no_output_____" ] ], [ [ "image_splitset = image_featureset.make_splitset(\n label_id = tabular_label.id\n , size_test = 0.24\n , size_validation = 0.12\n)", "_____no_output_____" ] ], [ [ "#### `Dataset.Image.from_folder()`", "_____no_output_____" ], [ "When reading images from a locally accessible folder, the fantastic `natsort.natsorted` library is used as the source of truth for the order of the files.\n> Python reads files by insertion order rather than alpha-numerically, which isn't intuitive for humans. So make sure your tabular manifest has the same order as `natsorted`. https://natsort.readthedocs.io/en/master/api.html#natsort.natsorted", "_____no_output_____" ] ], [ [ "image_dataset = aiqc.Dataset.Image.from_folder(\"/Users/layne/desktop/brain_tumor_preprocessed\")", "🖼️ Validating Images 🖼️: 100%|█████████████████████| 80/80 [00:00<00:00, 2670.23it/s]\n🖼️ Ingesting Images 🖼️: 100%|███████████████████████| 80/80 [00:00<00:00, 232.15it/s]\n" ] ], [ [ "Here you can see the first 3 files that comprise that dataset.", "_____no_output_____" ] ], [ [ "image_dataset.files[:3]", "_____no_output_____" ], [ "image_featureset = image_dataset.make_featureset()", "_____no_output_____" ], [ "image_splitset = image_featureset.make_splitset(\n label_id = tabular_label.id\n , size_test = 0.24\n , size_validation = 0.12\n)", "_____no_output_____" ] ], [ [ "### Reading Datasets", "_____no_output_____" ], [ "All of the sample-related objects in the API have `to_numpy()` and `to_pandas()` methods that accept the following arguments:\n\n* `samples=[]` list of indices to fetch.\n* `columns=[]` list of columns to fetch.\n* In some cases you can specify a `split`/ `fold` name.\n\nFor structured data, since the `Dataset` itself is fairly removed from the `File.Tabular` it creates, you can get that tabular file with `Dataset.Tabular.get_main_tabular(dataset_id)` to inspect attributes like `dtypes` and `columns`.", "_____no_output_____" ], [ "Later, we'll see how these arguments allow downstream objects like `Splitset` and `Foldset` to slice up the data.", "_____no_output_____" ], [ "### `Dataset.Tabular.to_pandas()`", "_____no_output_____" ] ], [ [ "df = dataset.to_pandas()\ndf.head()", "_____no_output_____" ], [ "df = aiqc.Dataset.to_pandas(\n id = dataset.id \n , samples = [0,13,29,79]\n , columns = ['sepal_length', 'sepal_width']\n)\ndf.tail()", "_____no_output_____" ] ], [ [ "### `Dataset.Tabular.to_numpy()`", "_____no_output_____" ] ], [ [ "arr = dataset.to_numpy(\n samples = [0,13,29,79] \n , columns = ['petal_length', 'petal_width']\n)\narr[:4]", "_____no_output_____" ], [ "arr = aiqc.Dataset.to_numpy(id=dataset.id)\narr[:4]", "_____no_output_____" ] ], [ [ "### `Dataset.Image.to_pillow()`", "_____no_output_____" ], [ "Returns a list of `PIL.Image`'s. You can actually see the image when you call them. ", "_____no_output_____" ] ], [ [ "images_pillow = aiqc.Dataset.Image.to_pillow(id=image_dataset.id, samples=[60,61,62])\nimages_pillow[1]", "_____no_output_____" ] ], [ [ "### `Dataset.Image.to_numpy()`", "_____no_output_____" ], [ "This simply performs `np.array(Pillow.Image)`. Returns an N-dimensional array where the dimensions vary based on the `mode` aka colorscale of the image. For example, it returns '3D of 2Ds for black and white' or '4D of 3Ds for colored' - which would change the class of convultional layer you would use (`Conv1D`:`Conv3D`).\n\n", "_____no_output_____" ] ], [ [ "images_pillow = aiqc.Dataset.Image.to_numpy(id=image_dataset.id, samples=[60,61,62])\nimages_pillow[1]", "_____no_output_____" ] ], [ [ "> At the moment, we haven't found it necessary to provide a `to_pandas` method for images as they have no need for column names, the dtypes are homogenous, images are used as a whole so there is no filtering, Pandas isn't great with 3D data, and Pillow is integrated with NumPy.", "_____no_output_____" ], [ "## 2. Select the `Label` column(s).", "_____no_output_____" ], [ "### ORM", "_____no_output_____" ], [ "From a Dataset, pick the column(s) that you want to predict/ train against. Creating a `Label` won't duplicate your data! It simply marks the Dataset `columns` to be used for supervised learning. ", "_____no_output_____" ], [ "Later, we'll see that a `Label` triggers:\n\n* The `supervision` attribute of a `Splitset` to be either `'unsupervised'`/`'supervised'`.\n\n* Approval/ rejection of the `Algorithm.analysis_type`. For example, you wouldn't perform regression on a string label.", "_____no_output_____" ], [ "Part of the magic of this library is that it prevents you from making silly mistakes like these so that you aren't faced with some obscure NumPy/ Tensor, dtype/ dimensionality error on the nth layer of your neural network.", "_____no_output_____" ], [ "For categorical labels, but not for continuous/float labels, the `Label.unique_classes` are recorded.", "_____no_output_____" ], [ "### Deriving Labels", "_____no_output_____" ], [ "Keep the name of the label column handy as you may want to re-use it later when excluding features.", "_____no_output_____" ] ], [ [ "label_column = 'species'", "_____no_output_____" ] ], [ [ "Implicit IDs", "_____no_output_____" ] ], [ [ "label = dataset.make_label(columns=[label_column])", "_____no_output_____" ] ], [ [ "> `columns=[label_column]` is a list in case users have already OneHotEncoded (OHEd) their label. If multiple columns are provided, then they must already be in OHE format. I'm not keen on supporting multi-label/ simultaneous analysis, but that could changed based on feasibility and user demand.", "_____no_output_____" ], [ "Explicit IDs", "_____no_output_____" ] ], [ [ "other_label = aiqc.Label.from_dataset(\n\tdataset_id=other_dataset.id\n\t, columns=[label_column]\n)", "_____no_output_____" ] ], [ [ "### Reading Labels", "_____no_output_____" ], [ "The `Label` comes in handy when we need to fetch what is traditionally referred to as '*Y*' in tutorials. It also accepts a `samples` argument, so that `Splitset` can subset it.", "_____no_output_____" ] ], [ [ "label.to_pandas().tail()", "_____no_output_____" ], [ "label.to_numpy(samples=[0,33,66,99,132])[:5]", "_____no_output_____" ] ], [ [ "## 3. Select the `Featureset` column(s).", "_____no_output_____" ], [ "### ORM", "_____no_output_____" ], [ "Creating a Featureset won't duplicate your data! It simply records the Dataset `columns` to be used as features during training. \n\nThere are three ways to define which columns you want to use as features:\n\n- `exclude_columns=[]` e.g. use all columns except the label column.\n- `include_columns=[]` e.g. only use these columns that I think are informative.\n- Leave both of the above blank and all columns will be used (e.g. images or unsupervised leanring).", "_____no_output_____" ], [ "For structured data, since the Featureset is far removed from the `File.Tabular` that it is derived from, there is a `Featureset.get_dtypes()` method. This will come in handy when we are selecting dtypes/columns to include/ exclude in our `Featurecoder`(s).", "_____no_output_____" ], [ "### Deriving Labels", "_____no_output_____" ], [ "Via `include_columns=[]`", "_____no_output_____" ] ], [ [ "include_columns = [\n 'sepal_length',\n 'petal_length',\n 'petal_width'\n]", "_____no_output_____" ], [ "featureset = dataset.make_featureset(include_columns=include_columns)", "_____no_output_____" ] ], [ [ "Via `exclude_columns=[]`", "_____no_output_____" ] ], [ [ "featureset = dataset.make_featureset(exclude_columns=[label_column])", "_____no_output_____" ], [ "featureset.columns", "_____no_output_____" ] ], [ [ "Either way, any excluded columns will be recorded since they are used for dropping.", "_____no_output_____" ] ], [ [ "featureset.columns_excluded", "_____no_output_____" ] ], [ [ "Again, for images, just perform `Dataset.Image.make_featureset()` since you'll likely want to include all pixels and your label column is in a separate, coupled Dataset.", "_____no_output_____" ], [ "### Reading Featuresets", "_____no_output_____" ] ], [ [ "featureset.to_numpy()[:4]", "_____no_output_____" ], [ "featureset.to_pandas(samples=[0,16,32,64]).tail()", "_____no_output_____" ] ], [ [ "## 4. Slice samples with a `Splitset`.", "_____no_output_____" ], [ "A `Splitset` divides a the samples of the Dataset into the following *splits* in the table below. It is the central object of the data preparation side of the ORM in that it touches `Label`, `Featureset`, `Foldset`, and `Encoderset`. It is the only mandatory data preparation object required by the training `Batch`.", "_____no_output_____" ], [ "Both contiuous and categorical `Labels` are automatically stratified.", "_____no_output_____" ], [ "| Split | Description |\n|-----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| train | The samples that the model will be trained upon. <br/>Later, we’ll see how we can make *cross-folds from our training split*. <br/>Unsupervised learning will only have a training split. |\n| validation (optional) | The samples used for training evaluation. <br/>Ensures that the test set is not revealed to the model during training. |\n| test (optional) | The samples the model has never seen during training. <br/>Used to assess how well the model will perform on unobserved, natural data when it is applied in the real world aka how generalizable it is. |", "_____no_output_____" ], [ "Again, creating a Splitset won't duplicate your data. It simply denotes the sample indices (aka rows) to be used in the splits that you specify!", "_____no_output_____" ], [ "### Split Strategies", "_____no_output_____" ], [ "#### a) Default supervised 70-30 split.", "_____no_output_____" ], [ "If you only provide a Label, then 70:30 train:test splits will be generated.", "_____no_output_____" ] ], [ [ "splitset = featureset.make_splitset(label_id=label.id)", "_____no_output_____" ] ], [ [ "#### b) Specifying test size.", "_____no_output_____" ] ], [ [ "splitset = featureset.make_splitset(\n\tlabel_id = label.id\n\t, size_test = 0.30\n)", "_____no_output_____" ] ], [ [ "#### c) Specifying validation size.", "_____no_output_____" ] ], [ [ "splitset = featureset.make_splitset(\n\tlabel_id = label.id\n\t, size_test = 0.20\n\t, size_validation = 0.12\n)", "_____no_output_____" ] ], [ [ "#### d) Taking the whole dataset as a training split.", "_____no_output_____" ] ], [ [ "splitset_unsupervised = featureset.make_splitset()", "_____no_output_____" ] ], [ [ "> Label-based stratification is used to ensure equally distributed label classes for both categorical and continuous data.\n>\n> If you want more control over stratification of continuous splits, specify the number of `continuous_bin_count` for grouping.", "_____no_output_____" ], [ "#### e) Stratification of continuous labels.", "_____no_output_____" ], [ "All splits are stratified by default in that they contain similar distributions of unique label classes so that each split is a statistically accurate representation of the population as a whole.\n\nIn order to support this process for continuous labels, binning/ discretization is utilized. For example, if 4 bins are used, values from *0.0 to 1.0* would be binned as *[0.0-0.25, 0.25-0.50, 0.50-0.75, 0.75-1.0]*. This is controlled by the `make_splitset(bin_count:int)` argument. \n\n> Reference the handy `Pandas.qcut()` and the source code `pd.qcut(x=array_to_bin, q=bin_count, labels=False, duplicates='drop')` for more detail.", "_____no_output_____" ], [ "### Reading Splitsets", "_____no_output_____" ] ], [ [ "splitset.samples.keys()", "_____no_output_____" ] ], [ [ "`.keys()` of 1st layer are referred to as \"split_name\" in the source code: e.g. 'train' as well as, optionally, 'validation' and 'test'.\n \n`Splitset.samples` on disk:\n```\n {\n 'train': [<sample_indices>],\n 'validation': [<sample_indices>],\n 'test': [<sample_indices>]\n }\n```", "_____no_output_____" ], [ "You can also verify the actual size of your splits.", "_____no_output_____" ] ], [ [ "splitset.sizes", "_____no_output_____" ] ], [ [ "The main attribute of the splitset is the `samples` dictionary. Again, on-disk this only contains sample indices. The dictionary is structured like so:", "_____no_output_____" ], [ "### `Splitset.to_numpy()`", "_____no_output_____" ], [ "When fetched to memory, the `.keys()` of the 2nd layer are: 'features' and, optionally, 'labels'.\n\nNote that if you do not specified neither a `size_validation` nor `size_test`, then your dictionary will contain neither a `['validation']` nor `['test']` split.", "_____no_output_____" ] ], [ [ "splitset.to_numpy()['train']['features'][:4]", "_____no_output_____" ] ], [ [ "### `Splitset.to_pandas()`", "_____no_output_____" ], [ "Getting more fine-tuned, both the numpy and pandas methods support a few optional filters for the sake of memory-efficiency when fetching larger splits.", "_____no_output_____" ], [ "For example, imagine you are fetching data to specifically encode the only float column in the featureset of the test split. You don't need the labels and you don't need the other columns.", "_____no_output_____" ] ], [ [ "splitset.to_pandas(\n\tsplits = ['test']\n\t, include_label = False\n\t, include_featureset = True\n\t, feature_columns = ['sepal_width']\n)['test']['features'].head()", "_____no_output_____" ] ], [ [ "## 5. Optionally, create a `Foldset` for cross-validation.", "_____no_output_____" ], [ "### ORM", "_____no_output_____" ], [ "*Reference the [scikit-learn documentation](https://scikit-learn.org/stable/modules/cross_validation.html) to learn more about folding.*\n\n![Cross Folds](../images/cross_fold.png)", "_____no_output_____" ], [ "We refer to the left out fold (blue) as the `fold_validation` and the remaining training data as the `folds_train_combined` (green).", "_____no_output_____" ], [ "> *In the future, we may introduce more folding `strategies` aside from leave-one-out.*", "_____no_output_____" ], [ "#### `Fold` objects", "_____no_output_____" ], [ "For the sake of determining which samples get trained upon, the only thing that matters is the slice of data that gets left out.", "_____no_output_____" ], [ "> Tip - DO NOT use a `Foldset` unless your *(total sample count / fold_count)* still gives you an accurate representation of your sample population. If you are ignoring that advice and stretching to perform cross-validation, then at least ensure that *(total sample count / fold_count)* is evenly divisible. Both of these tips help avoid poorly stratified/ undersized folds that perform either too well (only most common label class present) or poorly (handful of samples and a few inaccurate prediction on a normally good model).\n>\n> Tip - The sample indices of the validation fold are not discarded. In fact, `fold_validation` can actually be used alongside a split `validation` for double validation 🤘. However, it's more sensible to skip the validation split when cross-validating because you'll want each `fold_validation` to be as large (representative of the population) as possible. Folds naturally have fewer samples, so a handful of incorrect predictions have the potential to offset your aggregate metrics.\n> \n> Candidly, if you've ever performed cross-validation manually, let alone systematically, you'll know that, barring stratification of continuous labels, it's easy enough to construct the folds, but then it's a pain to generate performance metrics (e.g. `zero_division`, absent OHE classes) due to the absence of outlying classes and bins. Time has been invested to handle these scenarios elegantly so that folds can be treated as first-class-citizens alongside splits. That being said, if you try to do something undersized like \"150 samples in their dataset and a `fold_count` > 3 with `unique_classes` > 4,\" then you may run into edge cases.", "_____no_output_____" ], [ "Similar to `Splitset.samples`, there is a `Fold.samples` dictionary of sample indices with the following `.keys()`:\n* `samples['folds_train_combined']` - all the included folds.\n* `samples['fold_validation']` - the fold that got left out.", "_____no_output_____" ], [ "![cross fold objects](../images/cross_fold_objects.png)", "_____no_output_____" ], [ "### Deriving Foldsets", "_____no_output_____" ] ], [ [ "big_label = big_dataset.make_label(columns=[label_column])\nbig_fset = big_dataset.make_featureset(exclude_columns=[label_column])\nbig_splits = big_fset.make_splitset(\n\tlabel_id = big_label.id\n\t, size_test = 0.30\n , bin_count=3\n)", "_____no_output_____" ] ], [ [ "Now we are ready to generate 5 `Fold` objects that belong to the `Foldset`.", "_____no_output_____" ] ], [ [ "foldset = big_splits.make_foldset(fold_count=5, bin_count=3)", "_____no_output_____" ], [ "list(foldset.folds)", "_____no_output_____" ] ], [ [ "### Reading Foldsets", "_____no_output_____" ], [ "##### Sample indices of each Fold:", "_____no_output_____" ] ], [ [ "foldset.folds[0].samples['folds_train_combined'][:10]", "_____no_output_____" ], [ "foldset.folds[0].samples['fold_validation'][:10]", "_____no_output_____" ] ], [ [ "### `Foldset.to_numpy()`", "_____no_output_____" ], [ "In order to reduce memory footprint the `to_numpy()` and `to_pandas()` methods introduce the `fold_index` argument.", "_____no_output_____" ], [ "If no fold_index is specified, then it will fetch all folds and give each fold a numeric key according to its index.", "_____no_output_____" ], [ "So you need to specify the `fold_index` as the first key when accessing the dictionary.", "_____no_output_____" ] ], [ [ "foldset.to_numpy(fold_index=0)[0]['fold_validation']['features'][:4]", "_____no_output_____" ] ], [ [ "### `Foldset.to_pandas()`", "_____no_output_____" ], [ "Similar to `splitset.to_numpy(splits:list)`, the `foldset.to_numpy(fold_names:list)` argument allows you to pluck the `['folds_train_combined]` and `['fold_validation]` slices. Just make sure you remember to specific all 3 levels of keys when accessing the result.", "_____no_output_____" ] ], [ [ "foldset.to_pandas(\n fold_index = 0\n\t, fold_names = ['folds_train_combined']\n\t, include_label = True\n\t, include_featureset = False\n)[0]['folds_train_combined']['labels'].tail()", "_____no_output_____" ] ], [ [ "## 6. Optionally, stage an `Encoderset` for encoding.", "_____no_output_____" ], [ "### Background", "_____no_output_____" ], [ "Certain algorithms either (a) require features and/ or labels formatted a certain way, or (b) perform MUCH better when their values are normalized. For example:\n\n* Converting ordinal or categorical string data `[dog, cat, fish]` into one-hot encoded format `[[1,0,0][0,1,0][0,0,1]]`.\n* Scaling continuous features from (-1 to 1) or (0.0 to 1.0). Or transforming them to resemble a more Gaussian distribution.\n\nThere are two phases of encoding:\n1. `fit` - where the encoder learns about the values of the samples made available to it. Ideally, you only want to `fit` aka learn from your training split so that you are not *\"leaking\"* information from your validation and test spits into your encoder!\n2. `transform` - where the encoder transforms all of the samples in the population.\n\nAIQC has solved the following challenges related to encoding:\n\n* How does one dynamically `fit` on only the training samples in advanced scenarios like cross-validation where a different fold is used for validation each time?\n\n* For certain encoders, especially categorical ones, there is arguably no leakage. If an encoder is arbitrarilly assigning values/ tags to a sample through a process that is not aggregate-informed, then the information that is reveal to the `fit` is largely irrelevant. As an analogy, if we are examining swan color and all of a sudden there is a black swan... it's clearly not white, so slap a non-white label on it and move on. In fact, the prediction process and performance metric calucatlion may fail if it doesn't know how to handle the previously unseen category.\n\n* Certain encoders only accept certain dtypes. Certain encoders only accept certain dimensionality (e.g. 1D, 2D, 3D) or shape patterns (odd-by-odd square). Unfortunately, there is not much uniformity here.\n\n* Certain encoders output extraneous objects that don't work with deep learning libraries.\n\n> *For now, only `sklearn.preprocessing` methods are supported. That may change as we add support for more low-level tensor-based frameworks like PyTorch.*", "_____no_output_____" ], [ "Keeping this in mind, we create an `Encoderset` for our `Splitset`. We can attach a `Labelcoder` and/ or `Featurecoder`(s).", "_____no_output_____" ] ], [ [ "encoderset = splitset.make_encoderset()", "_____no_output_____" ] ], [ [ "And then import any scikit-learn encoders that you need. AIQC only supports the uppercase methods (e.g. `RobustScaler`, but not `robust_scale`) because the lowercase methods do not separate the `fit` and `transform` steps. FYI, most of the uppercase methods have a combined `fit_transform` method if you need them. \n\n> https://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import *", "_____no_output_____" ] ], [ [ "## 7. Optionally, set a single `Labelcoder`.", "_____no_output_____" ], [ "### Background", "_____no_output_____" ], [ "The simplistic `Labelcoder` is a good warmup for the moe advanced `Featurecoder`.", "_____no_output_____" ], [ "Of course, you cannot encode Labels if your `Splitset` does not have labels in the first place.", "_____no_output_____" ], [ "The process is straightforward. You provide an instantiated encoder [e.g. `StandardScaler()` not `StandardScaler`], and then AIQC will:\n\n* Verify that the encoder works with your `Label`'s dtype, sample values, and figure out what dimensionality it needs in order to succeed.\n\n* Validate the attributes of your encoder to smooth out any common errors they would cause.\n\n* Determine whether the encoder should be `fit` either (a) exclusively on the train split, or (b) if it is not prone to leakage, inclusively on the entire dataset thereby reducing the chance of errors arising.", "_____no_output_____" ] ], [ [ "labelcoder = encoderset.make_labelcoder(\n sklearn_preprocess = OneHotEncoder(sparse=False)\n)", "_____no_output_____" ] ], [ [ "## 8. Optionally, determine a sequence of `Featurecoder`(s).", "_____no_output_____" ], [ "### Background", "_____no_output_____" ], [ "The `Featurecoder` has the same validation process as the `Labelcoder`. However, it is not without its own challenges:\n\n* We want to be able to apply different encoders to features of different dtypes. So it's likely that the same encoder will neither be applied to all columns, nor will all encoders be applied at the same exact time.\n\n* Additionally, even within the same dtype (e.g. float/ continuous), different distributions call for different encoders.\n\n* Commonly used encoders such a `OneHotEncoder` can ouput multiple columns from a single column input. Therefore, the structure of the features columns is not fixed during encoding.\n\n* And finally, throughout this entire process, we need to avoid data leakage.\n\nFor these reasons, `Featurecoder`'s are applied sequentially; in an ordered chain, one after the other. After an encoder is applied, its columns are removed from the raw featureset and placed into an intermediary cache specific to each split/ fold. ", "_____no_output_____" ], [ "> Right now, `Featurecoder` cannot be created for `Dataset.Image.Featureset`. I'm not opposed to changing this, but I would just have to account for 3D arrays.", "_____no_output_____" ], [ "### Filtering feature columns", "_____no_output_____" ], [ "The filtering mode is either:\n\n* Inclusive (`include=True`) encode columns that match the filter.\n\n* Exclusive (`include=False`) encode columns outside of the filter.\n\nThen you can select:\n\n1. An optional list of `dtypes`.\n\n2. An optional list of `columns` name.\n\n * The column filter is applied after the dtype filter. \n \n> You can create a filter for all columns by setting `include=False` and then seting both `dtypes` and `columns` to `None`.", "_____no_output_____" ], [ "After submitting your encoder, if `verbose=True` is enabled:\n* The validation rules help determine why it may have failed.\n* The print statements help determine which columns your current filter matched, and which raw columns remain. ", "_____no_output_____" ] ], [ [ "featurecoder = encoderset.make_featurecoder(\n sklearn_preprocess = PowerTransformer(method='yeo-johnson', copy=False)\n , include = True\n , dtypes = ['float64']\n , columns = None\n , verbose = True\n)", "\n___/ featurecoder_index: 0 \\_________\n\n=> The column(s) below matched your filter(s) and were ran through a test-encoding successfully.\n['sepal_length', 'sepal_width', 'petal_length', 'petal_width']\n\n=> Nice! Now all feature column(s) have encoder(s) associated with them.\nNo more Featurecoders can be added to this Encoderset.\n\n" ] ], [ [ "You can also view this information via the following attributes: `matching_columns`, `leftover_dtypes`, and `leftover_columns`.", "_____no_output_____" ], [ "## 9. Create an `Algorithm` aka model.", "_____no_output_____" ], [ "### ORM", "_____no_output_____" ], [ "Now that our data has been prepared, we transition to the other half of the ORM where the focus is the logic that will be applied to that data.", "_____no_output_____" ], [ "> An `Algorithm` is the ORM's codename for a machine learning model since *Model* is the most important *reserved word* for ORMs.", "_____no_output_____" ], [ "The following attributes tell AIQC how to handle the Algorithm behind the scenes:\n\n* `library` - right now, only 'keras' is supported.\n\n * Each library's model object and callbacks (history, early stopping) need to be handled differently.\n \n \n* `analysis_type` - right now, these types are supported:\n\n * `'classification_multi'`, `'classification_binary'`, `'regression'`.\n \n * Used to determine which performance metrics to run.\n \n * Must be compatible with the type of label fed to it.", "_____no_output_____" ], [ "### Model Definition", "_____no_output_____" ], [ "The `Algorithm` is composed of the functions:\n\n* `function_model_build`.\n\n* `function_model_train`.\n\n* `function_model_evaluate` (optional, inferred by `analyis_type`). \n\n* `function_model_predict` (optional, inferred by `analyis_type`).\n\n> May provide overridable defaults for build and train in the future.", "_____no_output_____" ], [ "You can name the functions whatever you want, but do not change the predetermined arguments (e.g. `input_shape`,`**hyperparameters`, `model`, etc.).\n\nAs we define these functions, we'll see that we can pass *hyperparameters* into these function like so: `hyperparameters['<some_variable_name>']` using the `**hyperparameters` kwarg. Later, we'll provide a list of values for each entry in the hyperparameters dictionary.", "_____no_output_____" ], [ "Let's import the modules that we need.", "_____no_output_____" ] ], [ [ "import keras\nfrom keras import metrics\nfrom keras.models import Sequential\nfrom keras.callbacks import History\nfrom keras.layers import Dense, Dropout", "_____no_output_____" ] ], [ [ "> Later, when running your `Job`'s, if you receive a \"module not found\" error, then you can try troubleshooting by importing that module directly within the function where it is used.", "_____no_output_____" ], [ "#### Function to build model", "_____no_output_____" ], [ "If you normally don't wrap the building of your model in a function, don't be scared, all you have to do is add `return model` to the bottom of it, and AIQC will handle the rest. Also, if using `hyperparameters` feels intimidating, you can skip them.", "_____no_output_____" ], [ "The automatically provided `features_shape` and `label_shape` are handy because:\n\n* The number of feature/ label columns is mutable due to encoders (e.g. OHE). \n\n* Shapes can be less obvious in multi-dimensional scenarios like colored images.", "_____no_output_____" ], [ "> You can customize the metrics if you so desire (e.g. change the loss or accuracy), but they will only be applied to the training process/ `History` callback. We'll see later that AIQC will calculate metrics for you automatically.", "_____no_output_____" ] ], [ [ "def function_model_build(features_shape, label_shape, **hyperparameters):\n \n model = Sequential()\n model.add(Dense(units=hyperparameters['neuron_count'], input_shape=features_shape, activation='relu', kernel_initializer='he_uniform'))\n model.add(Dropout(0.2))\n model.add(Dense(units=hyperparameters['neuron_count'], activation='relu', kernel_initializer='he_uniform'))\n model.add(Dense(units=label_shape[0], activation='softmax'))\n\n opt = keras.optimizers.Adamax(hyperparameters['learning_rate'])\n model.compile(\n loss = 'categorical_crossentropy'\n , optimizer = opt\n , metrics = ['accuracy']\n )\n return model", "_____no_output_____" ] ], [ [ "#### Function to train model", "_____no_output_____" ], [ "* `samples_train` - the appropriate data will be fed into the training cycle. For example, `Foldset.samples[fold_index]['folds_train_combined']` or `Splitset.samples['train']`.\n\n* `samples_evaluate` - the appropriate data is made available for evaluation. For example, `Foldset.samples[fold_index]['fold_validation']`, `Splitset.samples['validation']`, or `Splitset.samples['test']`.", "_____no_output_____" ] ], [ [ "def function_model_train(model, samples_train, samples_evaluate, **hyperparameters):\n \n\tmodel.fit(\n\t\tsamples_train[\"features\"]\n\t\t, samples_train[\"labels\"]\n\t\t, validation_data = (\n\t\t\tsamples_evaluate[\"features\"]\n\t\t\t, samples_evaluate[\"labels\"]\n\t\t)\n\t\t, verbose = 0\n\t\t, batch_size = 3\n\t\t, epochs = hyperparameters['epoch_count']\n\t\t, callbacks=[History()]\n\t)\n \n\treturn model", "_____no_output_____" ] ], [ [ "##### Optional, callback to stop training early.", "_____no_output_____" ], [ "*Early stopping* isn't just about efficiency in reducing the number of `epochs`. If you've specified 300 epochs, there's a chance your model catches on to the underlying patterns early, say around 75-125 epochs. At this point, there's also good chance what it learns in the remaining epochs will cause it to overfit on patterns that are specific to the training data, and thereby and lose it's simplicity/ generalizability.\n\n> The `val_` prefix refers to the evaluation samples.\n>\n> Remember, regression does not have accuracy metrics.\n>\n> `TrainingCallback.Keras.MetricCutoff` is a custom class we wrote to make multi-metric cutoffs easier, so you won't find information about it in the official Keras documentation.", "_____no_output_____" ] ], [ [ "def function_model_train(model, samples_train, samples_evaluate, **hyperparameters):\n #Define one or more metrics to monitor.\n metrics_cuttoffs = [\n {\"metric\":\"val_accuracy\", \"cutoff\":0.9, \"above_or_below\":\"above\"},\n {\"metric\":\"val_loss\", \"cutoff\":0.2, \"above_or_below\":\"below\"}\n ]\n cutoffs = aiqc.TrainingCallback.Keras.MetricCutoff(metrics_cuttoffs)\n # Remember to append `cutoffs` to the list of callbacks.\n callbacks=[History(), cutoffs]\n callbacks=[History()]\n \n # No changes here.\n model.fit(\n samples_train[\"features\"]\n , samples_train[\"labels\"]\n , validation_data = (\n samples_evaluate[\"features\"]\n , samples_evaluate[\"labels\"]\n )\n , verbose = 0\n , batch_size = 3\n , epochs = hyperparameters['epoch_count']\n , callbacks = callbacks\n )\n\n return model", "_____no_output_____" ] ], [ [ "#### Optional, function to predict samples", "_____no_output_____" ], [ "`function_model_predict` will be generated for you automatically if set to `None`. The `analysis_type` and `library` of the Algorithm help determine how to handle the predictions.", "_____no_output_____" ], [ "##### a) Regression default.", "_____no_output_____" ] ], [ [ "def function_model_predict(model, samples_predict):\n predictions = model.predict(samples_predict['features'])\n return predictions", "_____no_output_____" ] ], [ [ "##### b) Classification binary default.", "_____no_output_____" ], [ "All classification `predictions`, both mutliclass and binary, must be returned in ordinal format. \n\n> For most libraries, classification algorithms output *probabilities* as opposed to actual predictions when running `model.predict()`. We want to return both of these object `predictions, probabilities` (the order matters) to generate performance metrics behind the scenes.", "_____no_output_____" ] ], [ [ "def function_model_predict(model, samples_predict):\n probabilities = model.predict(samples_predict['features'])\n # This is the official keras replacement for binary classes `.predict_classes()`\n # It returns one array per sample: `[[0][1][0][1]]` \n predictions = (probabilities > 0.5).astype(\"int32\")\n \n return predictions, probabilities", "_____no_output_____" ] ], [ [ "##### c) Classification multiclass default.", "_____no_output_____" ] ], [ [ "def function_model_predict(model, samples_predict):\n import numpy as np\n probabilities = model.predict(samples_predict['features'])\n # This is the official keras replacement for multiclass `.predict_classes()`\n # It returns one ordinal array per sample: `[[0][2][1][2]]` \n predictions = np.argmax(probabilities, axis=-1)\n \n return predictions, probabilities", "_____no_output_____" ] ], [ [ "#### Optional, function to calculate loss", "_____no_output_____" ], [ "When creating an `Algorithm`, the evaluate function will be generated for you automatically if set to `None`. The `analysis_type` and `library` of the Algorithm help determine how to handle the predictions.", "_____no_output_____" ], [ "The only trick thing here is when `keras.metrics` returns multiple metrics, like *accuracy* and/ or *R^2*. All we are after in this case is the loss for the split/ fold in question.", "_____no_output_____" ] ], [ [ "def function_model_loss(model, samples_evaluate):\n metrics = model.evaluate(samples_evaluate['features'], samples_evaluate['labels'], verbose=0)\n if (isinstance(metrics, list)):\n loss = metrics[0]\n elif (isinstance(metrics, float)):\n loss = metrics\n else:\n raise ValueError(f\"\\nYikes - The 'metrics' returned are neither a list nor a float:\\n{metrics}\\n\")\n return loss", "_____no_output_____" ] ], [ [ "> In contrast to openly specifying a loss function, for example `keras.losses.<loss_fn>()`, the use of `.evaluate()` is consistent because it comes from the compiled model. Also, although `model.compiled_loss` would be more efficient, it requires making encoded `y_true` and `y_pred` available to the user, whereas `.evaluate()` can be called with the same arugments as the other `function_model_*` and many deep learning libraries support this approach. ", "_____no_output_____" ], [ "#### Group the functions together in an `Algorithm`!", "_____no_output_____" ] ], [ [ "algorithm = aiqc.Algorithm.make(\n library = \"keras\"\n\t, analysis_type = \"classification_multi\"\n\t, function_model_build = function_model_build\n\t, function_model_train = function_model_train\n\t, function_model_predict = function_model_predict # Optional\n\t, function_model_loss = function_model_loss # Optional\n)", "_____no_output_____" ] ], [ [ "> <!> Remember to use `make` and not `create`. Deceptively, `create` runs because it is a standard, built-in ORM method. However, it does so without any validation logic.", "_____no_output_____" ], [ "## 8. Optional, associate a `Hyperparamset` with your model.", "_____no_output_____" ], [ "The `hyperparameters` below will be automatically fed into the functions above as `**kwargs` via the `**hyperparameters` argument we saw earlier.\n\nFor example, wherever you see `hyperparameters['neuron_count']`, it will pull from the *key:value* pair `\"neuron_count\": [9, 12]` seen below. Where \"model A\" would have 9 neurons and \"model B\" would have 12 neurons.", "_____no_output_____" ] ], [ [ "hyperparameters = {\n\t\"neuron_count\": [12]\n\t, \"epoch_count\": [30, 60]\n , \"learning_rate\": [0.01, 0.03]\n}\n\nhyperparamset = aiqc.Hyperparamset.from_algorithm(\n\talgorithm_id = algorithm.id\n\t, hyperparameters = hyperparameters\n)", "_____no_output_____" ] ], [ [ "> The number of unique combinations escalates quickly, so in the future, we will provide different strategies for generating and selecting parameters to experiment with. ", "_____no_output_____" ], [ "#### `Hyperparamcombo` objects.", "_____no_output_____" ], [ "Each unique combination of hyperparameters is recorded as a `Hyperparamcombo`.\n\nUltimately, a training `Job` will be constructed for each unique combinanation of hyperparameters aka `Hyperparamcombo`.", "_____no_output_____" ] ], [ [ "hyperparamset.hyperparamcombo_count", "_____no_output_____" ], [ "hyperparamcombos = hyperparamset.hyperparamcombos\n\nfor h in hyperparamcombos:\n print(h.hyperparameters)", "{'neuron_count': 12, 'epoch_count': 30, 'learning_rate': 0.01}\n{'neuron_count': 12, 'epoch_count': 30, 'learning_rate': 0.03}\n{'neuron_count': 12, 'epoch_count': 60, 'learning_rate': 0.01}\n{'neuron_count': 12, 'epoch_count': 60, 'learning_rate': 0.03}\n" ], [ "hyperparamcombos[0].get_hyperparameters(as_pandas=True)", "_____no_output_____" ] ], [ [ "## 9. Create a `Batch` of training `Jobs`.", "_____no_output_____" ], [ "The `Batch` is the central object of the \"logic side\" of the ORM. It ties together everything we need for training and hyperparameter tuning.", "_____no_output_____" ] ], [ [ "batch = aiqc.Batch.from_algorithm(\n\talgorithm_id = algorithm.id\n\t, splitset_id = splitset.id\n\t, hyperparamset_id = hyperparamset.id # Optional.\n\t, foldset_id = None # Optional.\n\t, encoderset_id = encoderset.id # Optional.\n , repeat_count = 3\n)", "_____no_output_____" ] ], [ [ "* `repeat_count:int` allows us to run the same `Job` multiple times. Normally, each `Job` has 1 `Result` object associated with it upon completion. However, when `repeat_count` (> 1 of course) is used, a single `Job` will have multiple `Results`.\n\n> Due to the fact that training is a *nondeterministic* process, we are likely to get different results each time we train a model, even if we use the same set of parameters. Perhaps you've have the right topology and parameters, but, this time around, the model just didn't recgonize the patterns. Similar to flipping a coin, there is a degree of chance in it, but the real trend averages out upon repetition. ", "_____no_output_____" ], [ "* `hide_test:bool` excludes the test split from the performance metrics and visualizations. This avoids data leakage by forcing the user to make decisions based on the performance on their model on the training and evaluation samples.", "_____no_output_____" ], [ "### `Job` objects.", "_____no_output_____" ], [ "Each `Job` in the Batch represents a `Hyperparamcombo` that needs to be trained.\n\n> If a `Foldset` is used during `Batch` creation, then (a) the number of jobs is multiplied by the `hyperparamcombo_count` and the `fold_count`, (b) each Job will have a `Fold`. Additionally, a superficial `Jobset` will be used to keep track of all Jobs related to that Foldset.", "_____no_output_____" ], [ "`poll_statuses(as_pandas:bool=False)` is used to determine which Job-repeats have been completed.", "_____no_output_____" ] ], [ [ "batch.poll_statuses(as_pandas=True)", "_____no_output_____" ] ], [ [ "### Execute all `Jobs`.\n\nThere are two ways to execute a Batch of Jobs:\n\n#### 1. `batch.run_jobs(in_background=False)`\n\n* Jobs are simply ran on a loop on the main *Process*.\n\n* Stop the Jobs with a keyboard interrupt e.g. `ctrl+Z/D/C` in Python shell or `i,i` in Jupyter.\n\n* It is the more reliable approach on Win/Mac/Lin.\n\n* Although this locks your main process (can't write more code) while models train, you can still fire up a second shell session or notebook.\n\n* Prototype your training jobs in this method so that you can see any errors that arise in the console.\n\n\n#### 2. `batch.run_jobs(in_background=True)`; experimental\n\n* The Jobs loop is executed on a separate, parallel `multiprocessing.Process`\n\n* Stop the Jobs with `batch.stop_jobs()`, which kills the parallel *Process* unless it already failed.\n\n* The benefit is that you can continue to code while your models are trained. There is no performance boost.\n\n* On Mac and Linux (Unix), `'fork'` multiprocessing is used (`force=True`), which allows us to display the progress bar. FYI, even in 'fork' mode, Python multiprocessing is much more fragile in Python 3.8, which seems to be caused by how pickling is handled in passing variables to the child process.\n\n* On Windows, `'spawn'` multiprocessing is used, which requires polling:\n\n * `batch.poll_statuses()`\n \n * `batch.poll_progress(raw:bool=False, loop:bool=False, loop_delay:int=3)` where `raw=True` is just a float, `loop=True` won't stop checking jobs until they are all complete, and `loop_delay=3` checks the progress every 3 seconds. \n \n* It is a known bug that the `aiqc.TrainingCallbacks.Keras.MetricCutoff` class does not work with `in_background=True` as of Python 3.8.\n\n* Also, during stress tests, I observed that when running multiple batches at the same time, the SQLite database would lock when simultaneous writes were attempted.\n\n#### 3. Future, distributed cloud execution.\n\n* In the future, we look to provide options for horizontal and vertical scale via either AWS or Azure.", "_____no_output_____" ] ], [ [ "batch.run_jobs(in_background=False)", "🔮 Training Models 🔮: 100%|████████████████████████████████████████| 12/12 [00:49<00:00, 4.14s/it]\n" ] ], [ [ "The queue is interuptable. You can stop the execution of a batch and resume it later.\n\n> This also comes in handy if either your machine or Python kernel either crashes or are interupted by accident. Whatever the reason, rest easy, just `run_jobs()` again to pick up where you left off. Be aware that the `tqdm` iteration time in the progress bar will be wrong because it will be divided by the jobs already ran.", "_____no_output_____" ], [ "## 10. Assess the `Results`.", "_____no_output_____" ], [ "Each `Job` has a `Result`. The following attributes are automatically written to the `Result` after training.\n \n* `model_file`: HDF5 bytes of the model.\n\n* `history`: per epoch metrics recorded during training.\n\n* `predictions`: dictionary of predictions per split/ fold.\n\n* `probabilities`: dictionary of prediction probabilities per split/ fold.\n\n* `metrics`: dictionary of single-value metrics depending on the analysis_type.\n\n* `plot_data`: metrics readily formatted for plotting.", "_____no_output_____" ], [ "> The dictionary attributes use split/ fold-based keys.", "_____no_output_____" ], [ "### Fetching the trained model.", "_____no_output_____" ] ], [ [ "compiled_model = batch.jobs[0].results[0].get_model()\ncompiled_model", "_____no_output_____" ] ], [ [ "### Fetching metrics.", "_____no_output_____" ] ], [ [ "batch.jobs[0].results[0].metrics", "_____no_output_____" ] ], [ [ "## 11. Metrics & Visualization", "_____no_output_____" ], [ "For more information on visualization of performance metrics, reference the [Visualization & Metrics](visualization.html) documentation.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
c522a5be8dba583b61b8ab4e4d02ead975a86215
15,354
ipynb
Jupyter Notebook
python/appyters/TCRD_Intro/data/output/a4d89936fa2a7dc0b5d62088a2145a58c460deb5/TCRD_Intro_appyter.ipynb
JessBinder/idg-cfde
efbbe03c2cdd9fd9fef662fe3b9ff3849aa679a4
[ "BSD-2-Clause" ]
null
null
null
python/appyters/TCRD_Intro/data/output/a4d89936fa2a7dc0b5d62088a2145a58c460deb5/TCRD_Intro_appyter.ipynb
JessBinder/idg-cfde
efbbe03c2cdd9fd9fef662fe3b9ff3849aa679a4
[ "BSD-2-Clause" ]
null
null
null
python/appyters/TCRD_Intro/data/output/a4d89936fa2a7dc0b5d62088a2145a58c460deb5/TCRD_Intro_appyter.ipynb
JessBinder/idg-cfde
efbbe03c2cdd9fd9fef662fe3b9ff3849aa679a4
[ "BSD-2-Clause" ]
1
2022-01-06T22:27:36.000Z
2022-01-06T22:27:36.000Z
52.047458
1,339
0.622769
[ [ [ "<img align=\"right\" src=\"https://druggablegenome.net/IDG_Images_Index/IDGLOGO.png\" width=\"400\">\n\n# TCRD Introduction\n\n* http://juniper.health.unm.edu/tcrd/\n* http://juniper.health.unm.edu/tcrd/api.html\n* https://pharos.nih.gov/idg/api", "_____no_output_____" ], [ "### Imports and installs", "_____no_output_____" ] ], [ [ "import sys, json, urllib.request\nimport pandas as pd\nimport matplotlib.pyplot\nimport mysql.connector as mysql", "_____no_output_____" ] ], [ [ "### DB Specs", "_____no_output_____" ] ], [ [ "DBHOST = \"tcrd.kmc.io\"\nDBNAME = \"tcrd6110\"\nDBUSR = \"tcrd\"", "_____no_output_____" ] ], [ [ "### Connect", "_____no_output_____" ] ], [ [ "dbcon = mysql.connect(host=DBHOST, db=DBNAME, user=DBUSR)", "_____no_output_____" ] ], [ [ "### Db Info", "_____no_output_____" ] ], [ [ "pd.read_sql(\"SELECT * FROM dbinfo\", dbcon)", "_____no_output_____" ] ], [ [ "### Select target", "_____no_output_____" ] ], [ [ "sql = \"\"\"SELECT * FROM target\nLEFT OUTER JOIN t2tc ON t2tc.target_id = target.id\nLEFT OUTER JOIN protein ON protein.id = t2tc.protein_id\nWHERE p.uniprot = 'P06493'\"\"\"\ndf = pd.read_sql(sql, dbcon)\ndf", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
c522a6f3a505f1383063725aca8c6c7fa900ea00
3,667
ipynb
Jupyter Notebook
_build/html/_sources/GettingStarted/MarkdownIntro.ipynb
ChristopherDavisUCI/UCI-Math-10-S22
148b209c92c804202430b022d9de5ba1d7b72b91
[ "BSD-3-Clause" ]
null
null
null
_build/html/_sources/GettingStarted/MarkdownIntro.ipynb
ChristopherDavisUCI/UCI-Math-10-S22
148b209c92c804202430b022d9de5ba1d7b72b91
[ "BSD-3-Clause" ]
null
null
null
_build/html/_sources/GettingStarted/MarkdownIntro.ipynb
ChristopherDavisUCI/UCI-Math-10-S22
148b209c92c804202430b022d9de5ba1d7b72b91
[ "BSD-3-Clause" ]
null
null
null
31.886957
306
0.614399
[ [ [ "# Markdown Introduction\n\nThis notebook is meant to introduce some basic functionality of markdown in Deepnote.\n\nIf you want to make edits in this notebook, click the *Duplicate* button at the top-right.\n\nThere are two basic types of cells, *code cells* and *markdown cells*. This content is being written in a markdown cell, whereas the next part, starting with `a = 5`, is an example of a code cell.", "_____no_output_____" ] ], [ [ "a = 5\nprint(a+2)", "7\n" ] ], [ [ "To execute a code cell, put your cursor in it (the cursor doesn't have to be at the end of the cell) and hit `shift+return`. It might take about 30 seconds to start the hardware. For this reason, I often run a short cell like `2+2` right when I open a notebook, even before I've decided what to do.", "_____no_output_____" ], [ "Here is a basic [guide to markdown](https://www.markdownguide.org/basic-syntax). (**Warning**: there are many different flavors of markdown, and while the basic functionality is the same, things that work on GitHub, for example, might not work on Deepnote.)\n\nAs an example, to make the above text, I used the following:\n```\nHere is a basic [guide to markdown](https://www.markdownguide.org/basic-syntax). (**Warning**: there are many ...\n```\n<br>\n\nTo make a short code example, like `pd.read_csv(\"penguins.csv\")`, surround it in backticks `. On my keyboard, the backtick symbol is above the tab key. (Backtick is not the same as apostrophe.) To make a longer code block, surround it by triple backticks. \n\n```\nfor i in range(6):\n print(i)\n```\n\nEven though the for loop looks like a code cell, it was written in markdown, so you can't execute it. If you want to see how it was made, double-click anywhere in this markdown cell.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
c522aa39fe55bc8959fa2ecb95ab50b7e4f3daaf
23,123
ipynb
Jupyter Notebook
understanding.ipynb
joshy/aorta
a38d749832ba42562ca891ec89ffa9215c352242
[ "MIT" ]
null
null
null
understanding.ipynb
joshy/aorta
a38d749832ba42562ca891ec89ffa9215c352242
[ "MIT" ]
null
null
null
understanding.ipynb
joshy/aorta
a38d749832ba42562ca891ec89ffa9215c352242
[ "MIT" ]
null
null
null
57.5199
5,432
0.805432
[ [ [ "This is to understand the output of the unet. It give a 2 Channel output meaning, one is the probability given the pixel is background the other is the probability given the pixel is foreground. Therefore the ground truth needs also to be inputted in that way.", "_____no_output_____" ] ], [ [ "import unet\nfrom torch.utils.data import DataLoader\nfrom AortaDataset import AortaDataset\n", "/home/joshy/anaconda3/envs/deep/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n" ], [ "aorta_dataset = AortaDataset()\naorta_loader = DataLoader(aorta_dataset, batch_size=3, shuffle=False)", "_____no_output_____" ], [ "type(aorta_loader)", "_____no_output_____" ], [ "image, mask = aorta_dataset.__getitem__(0)", "/home/joshy/anaconda3/envs/deep/lib/python3.6/site-packages/skimage/transform/_warps.py:84: UserWarning: The default mode, 'constant', will be changed to 'reflect' in skimage 0.15.\n warn(\"The default mode, 'constant', will be changed to 'reflect' in \"\npixdim[1,2,3] should be non-zero; setting 0 dims to 1\n" ], [ "mask", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "mask.nonzero()[0].sum()", "_____no_output_____" ], [ "mask.shape", "_____no_output_____" ], [ "388*388", "_____no_output_____" ], [ "plt.imshow(mask, cmap='Greys')", "_____no_output_____" ], [ "foreground = np.where(mask > 0, 1, 0)", "_____no_output_____" ], [ "plt.imshow(foreground, cmap='Greys')", "_____no_output_____" ], [ "foreground[0][0]", "_____no_output_____" ], [ "foreground[100][200]", "_____no_output_____" ], [ "foreground[101][201]", "_____no_output_____" ], [ "background = np.where(mask == 0, 1, 0)", "_____no_output_____" ], [ "plt.imshow(background, cmap='Greys')", "_____no_output_____" ], [ "combined = np.stack((foreground, background))", "_____no_output_____" ], [ "combined.shape", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c522b85bda593970aa8861257e9ecf2de53ef449
12,635
ipynb
Jupyter Notebook
Tutorials/CNTK_201A_CIFAR-10_DataLoader.ipynb
vschs007/CNTK
894d9e1a5d65d30cd33803c06a988844bb87fcb7
[ "RSA-MD" ]
5
2017-08-28T08:27:18.000Z
2021-04-20T21:12:52.000Z
Tutorials/CNTK_201A_CIFAR-10_DataLoader.ipynb
vschs007/CNTK
894d9e1a5d65d30cd33803c06a988844bb87fcb7
[ "RSA-MD" ]
null
null
null
Tutorials/CNTK_201A_CIFAR-10_DataLoader.ipynb
vschs007/CNTK
894d9e1a5d65d30cd33803c06a988844bb87fcb7
[ "RSA-MD" ]
3
2019-08-23T11:42:14.000Z
2022-01-06T08:41:32.000Z
37.052786
400
0.539612
[ [ [ "# CNTK 201A Part A: CIFAR-10 Data Loader\n\nThis tutorial will show how to prepare image data sets for use with deep learning algorithms in CNTK. The CIFAR-10 dataset (http://www.cs.toronto.edu/~kriz/cifar.html) is a popular dataset for image classification, collected by Alex Krizhevsky, Vinod Nair, and Geoffrey Hinton. It is a labeled subset of the [80 million tiny images](http://people.csail.mit.edu/torralba/tinyimages/) dataset.\n\nThe CIFAR-10 dataset is not included in the CNTK distribution but can be easily downloaded and converted to CNTK-supported format \n\nCNTK 201A tutorial is divided into two parts:\n- Part A: Familiarizes you with the CIFAR-10 data and converts them into CNTK supported format. This data will be used later in the tutorial for image classification tasks.\n- Part B: We will introduce image understanding tutorials.\n\nIf you are curious about how well computers can perform on CIFAR-10 today, Rodrigo Benenson maintains a [blog](http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html#43494641522d3130) on the state-of-the-art performance of various algorithms.\n", "_____no_output_____" ] ], [ [ "from __future__ import print_function\n\nfrom PIL import Image\nimport getopt\nimport numpy as np\nimport pickle as cp\nimport os\nimport shutil\nimport struct\nimport sys\nimport tarfile\nimport xml.etree.cElementTree as et\nimport xml.dom.minidom\n\ntry: \n from urllib.request import urlretrieve \nexcept ImportError: \n from urllib import urlretrieve\n\n# Config matplotlib for inline plotting\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Data download\n\nThe CIFAR-10 dataset consists of 60,000 32x32 color images in 10 classes, with 6,000 images per class. \nThere are 50,000 training images and 10,000 test images. The 10 classes are: airplane, automobile, bird, \ncat, deer, dog, frog, horse, ship, and truck.", "_____no_output_____" ] ], [ [ "# CIFAR Image data\nimgSize = 32\nnumFeature = imgSize * imgSize * 3", "_____no_output_____" ] ], [ [ "We first setup a few helper functions to download the CIFAR data. The archive contains the files data_batch_1, data_batch_2, ..., data_batch_5, as well as test_batch. Each of these files is a Python \"pickled\" object produced with cPickle. To prepare the input data for use in CNTK we use three oprations:\n> `readBatch`: Unpack the pickle files\n\n> `loadData`: Compose the data into single train and test objects\n\n> `saveTxt`: As the name suggests, saves the label and the features into text files for both training and testing. \n ", "_____no_output_____" ] ], [ [ "def readBatch(src):\n with open(src, 'rb') as f:\n if sys.version_info[0] < 3: \n d = cp.load(f) \n else:\n d = cp.load(f, encoding='latin1')\n data = d['data']\n feat = data\n res = np.hstack((feat, np.reshape(d['labels'], (len(d['labels']), 1))))\n return res.astype(np.int)\n\ndef loadData(src):\n print ('Downloading ' + src)\n fname, h = urlretrieve(src, './delete.me')\n print ('Done.')\n try:\n print ('Extracting files...')\n with tarfile.open(fname) as tar:\n tar.extractall()\n print ('Done.')\n print ('Preparing train set...')\n trn = np.empty((0, numFeature + 1), dtype=np.int)\n for i in range(5):\n batchName = './cifar-10-batches-py/data_batch_{0}'.format(i + 1)\n trn = np.vstack((trn, readBatch(batchName)))\n print ('Done.')\n print ('Preparing test set...')\n tst = readBatch('./cifar-10-batches-py/test_batch')\n print ('Done.')\n finally:\n os.remove(fname)\n return (trn, tst)\n\ndef saveTxt(filename, ndarray):\n with open(filename, 'w') as f:\n labels = list(map(' '.join, np.eye(10, dtype=np.uint).astype(str)))\n for row in ndarray:\n row_str = row.astype(str)\n label_str = labels[row[-1]]\n feature_str = ' '.join(row_str[:-1])\n f.write('|labels {} |features {}\\n'.format(label_str, feature_str))", "_____no_output_____" ] ], [ [ "In addition to saving the images in the text format, we would save the images in PNG format. In addition we also compute the mean of the image. `saveImage` and `saveMean` are two functions used for this purpose.", "_____no_output_____" ] ], [ [ "def saveImage(fname, data, label, mapFile, regrFile, pad, **key_parms):\n # data in CIFAR-10 dataset is in CHW format.\n pixData = data.reshape((3, imgSize, imgSize))\n if ('mean' in key_parms):\n key_parms['mean'] += pixData\n\n if pad > 0:\n pixData = np.pad(pixData, ((0, 0), (pad, pad), (pad, pad)), mode='constant', constant_values=128) \n\n img = Image.new('RGB', (imgSize + 2 * pad, imgSize + 2 * pad))\n pixels = img.load()\n for x in range(img.size[0]):\n for y in range(img.size[1]):\n pixels[x, y] = (pixData[0][y][x], pixData[1][y][x], pixData[2][y][x])\n img.save(fname)\n mapFile.write(\"%s\\t%d\\n\" % (fname, label))\n \n # compute per channel mean and store for regression example\n channelMean = np.mean(pixData, axis=(1,2))\n regrFile.write(\"|regrLabels\\t%f\\t%f\\t%f\\n\" % (channelMean[0]/255.0, channelMean[1]/255.0, channelMean[2]/255.0))\n \ndef saveMean(fname, data):\n root = et.Element('opencv_storage')\n et.SubElement(root, 'Channel').text = '3'\n et.SubElement(root, 'Row').text = str(imgSize)\n et.SubElement(root, 'Col').text = str(imgSize)\n meanImg = et.SubElement(root, 'MeanImg', type_id='opencv-matrix')\n et.SubElement(meanImg, 'rows').text = '1'\n et.SubElement(meanImg, 'cols').text = str(imgSize * imgSize * 3)\n et.SubElement(meanImg, 'dt').text = 'f'\n et.SubElement(meanImg, 'data').text = ' '.join(['%e' % n for n in np.reshape(data, (imgSize * imgSize * 3))])\n\n tree = et.ElementTree(root)\n tree.write(fname)\n x = xml.dom.minidom.parse(fname)\n with open(fname, 'w') as f:\n f.write(x.toprettyxml(indent = ' '))\n", "_____no_output_____" ] ], [ [ "`saveTrainImages` and `saveTestImages` are simple wrapper functions to iterate through the data set.", "_____no_output_____" ] ], [ [ "def saveTrainImages(filename, foldername):\n if not os.path.exists(foldername):\n os.makedirs(foldername)\n data = {}\n dataMean = np.zeros((3, imgSize, imgSize)) # mean is in CHW format.\n with open('train_map.txt', 'w') as mapFile:\n with open('train_regrLabels.txt', 'w') as regrFile:\n for ifile in range(1, 6):\n with open(os.path.join('./cifar-10-batches-py', 'data_batch_' + str(ifile)), 'rb') as f:\n if sys.version_info[0] < 3: \n data = cp.load(f)\n else: \n data = cp.load(f, encoding='latin1')\n for i in range(10000):\n fname = os.path.join(os.path.abspath(foldername), ('%05d.png' % (i + (ifile - 1) * 10000)))\n saveImage(fname, data['data'][i, :], data['labels'][i], mapFile, regrFile, 4, mean=dataMean)\n dataMean = dataMean / (50 * 1000)\n saveMean('CIFAR-10_mean.xml', dataMean)\n\ndef saveTestImages(filename, foldername):\n if not os.path.exists(foldername):\n os.makedirs(foldername)\n with open('test_map.txt', 'w') as mapFile:\n with open('test_regrLabels.txt', 'w') as regrFile:\n with open(os.path.join('./cifar-10-batches-py', 'test_batch'), 'rb') as f:\n if sys.version_info[0] < 3: \n data = cp.load(f)\n else: \n data = cp.load(f, encoding='latin1')\n for i in range(10000):\n fname = os.path.join(os.path.abspath(foldername), ('%05d.png' % i))\n saveImage(fname, data['data'][i, :], data['labels'][i], mapFile, regrFile, 0)", "_____no_output_____" ], [ "# URLs for the train image and labels data\nurl_cifar_data = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n\n# Paths for saving the text files\ndata_dir = './data/CIFAR-10/'\ntrain_filename = data_dir + '/Train_cntk_text.txt'\ntest_filename = data_dir + '/Test_cntk_text.txt'\n\ntrain_img_directory = data_dir + '/Train'\ntest_img_directory = data_dir + '/Test'\n\nroot_dir = os.getcwd()", "_____no_output_____" ], [ "if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n\ntry:\n os.chdir(data_dir) \n trn, tst= loadData('http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz')\n print ('Writing train text file...')\n saveTxt(r'./Train_cntk_text.txt', trn)\n print ('Done.')\n print ('Writing test text file...')\n saveTxt(r'./Test_cntk_text.txt', tst)\n print ('Done.')\n print ('Converting train data to png images...')\n saveTrainImages(r'./Train_cntk_text.txt', 'train')\n print ('Done.')\n print ('Converting test data to png images...')\n saveTestImages(r'./Test_cntk_text.txt', 'test')\n print ('Done.')\nfinally:\n os.chdir(\"../..\")", "Downloading http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\nDone.\nExtracting files...\nDone.\nPreparing train set...\nDone.\nPreparing test set...\nDone.\nWriting train text file...\nDone.\nWriting test text file...\nDone.\nConverting train data to png images...\nDone.\nConverting test data to png images...\nDone.\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
c522b891040d98d6423bcaa6c85ec07b0ff9f627
31,442
ipynb
Jupyter Notebook
Math/Ellipse Perimeter.ipynb
AlephEleven/awesome-projects
871c9cd6ef12ad7b0ee9f1bf4b296e2d1ff78493
[ "MIT" ]
null
null
null
Math/Ellipse Perimeter.ipynb
AlephEleven/awesome-projects
871c9cd6ef12ad7b0ee9f1bf4b296e2d1ff78493
[ "MIT" ]
null
null
null
Math/Ellipse Perimeter.ipynb
AlephEleven/awesome-projects
871c9cd6ef12ad7b0ee9f1bf4b296e2d1ff78493
[ "MIT" ]
null
null
null
80.209184
5,717
0.574741
[ [ [ "from math import *\n#Semi-Major Axis\na = 10424.1\n#Semi-Minor Axis\nb = 9579.42\n#Eccentricity\necc = sqrt(1 - (b**2)/(a**2))\nprint(ecc)", "0.39433084466244295\n" ], [ "#Circumference\ndef ellipse_circum(acc=5):\n temp = []\n S_a = 2*pi*a\n S_b = 1\n for x in range(2, acc):\n S_n_a = 2/sqrt(pi)\n S_n_b = (ecc**(2*x-2) * x)/(4*x**2 - 8*x + 3)\n S_n_c = (gamma(0.5 + x))/(factorial(x))\n temp.append(-(S_n_a*S_n_b*S_n_c)/2)\n \n print(temp)\n \n return S_a*(S_b + sum(temp))\n ", "_____no_output_____" ], [ "ellipse_circum(10)", "[-0.03887420376304893, -0.0015112037182110469, -0.00011749368253842683, -1.1418683389674473e-05, -1.242898229428654e-06, -1.4495003708262662e-07, -1.7709425727750508e-08, -2.237429428868252e-09]\n" ], [ "from sympy import *", "_____no_output_____" ], [ "n, A, B, E = Symbol('n'),Symbol('A'),Symbol('B'),Symbol('E')", "_____no_output_____" ], [ "#Circumference\ndef ellipse_circum_sympy(acc=5):\n temp = []\n S_a = 2*pi*A\n S_b = 1\n for x in range(2, acc):\n S_n_a = 2/sqrt(pi)\n S_n_b = (sqrt(1 - (B**2)/(A**2))**(2*x-2) * x)/(4*x**2 - 8*x + 3)\n S_n_c = (gamma(0.5 + x))/(factorial(x))\n temp.append(-(S_n_a*S_n_b*S_n_c)/2)\n \n print(temp)\n \n return S_a*(S_b + sum(temp))", "_____no_output_____" ], [ "#N=3\nfactor(ellipse_circum_sympy(3))", "[-0.664670194089569*(2/3 - 2*B**2/(3*A**2))/sqrt(pi)]\n" ], [ "simplify(ellipse_circum_sympy(3))", "[-0.664670194089569*(2/3 - 2*B**2/(3*A**2))/sqrt(pi)]\n" ], [ "#N=4\nfactor(ellipse_circum_sympy(4))", "[-0.664670194089569*(2/3 - 2*B**2/(3*A**2))/sqrt(pi), -0.110778365681595*(1 - B**2/A**2)**2/sqrt(pi)]\n" ], [ "simplify(ellipse_circum_sympy(4))", "[-0.664670194089569*(2/3 - 2*B**2/(3*A**2))/sqrt(pi), -0.110778365681595*(1 - B**2/A**2)**2/sqrt(pi)]\n" ], [ "#N=5\nfactor(ellipse_circum_sympy(5))", "[-0.664670194089569*(2/3 - 2*B**2/(3*A**2))/sqrt(pi), -0.110778365681595*(1 - B**2/A**2)**2/sqrt(pi), -0.0553891828407974*(1 - B**2/A**2)**3/sqrt(pi)]\n" ], [ "simplify(ellipse_circum_sympy(5))", "[-0.664670194089569*(2/3 - 2*B**2/(3*A**2))/sqrt(pi), -0.110778365681595*(1 - B**2/A**2)**2/sqrt(pi), -0.0553891828407974*(1 - B**2/A**2)**3/sqrt(pi)]\n" ], [ "simplify(ellipse_circum_sympy(50))", "[-0.664670194089569*(2/3 - 2*B**2/(3*A**2))/sqrt(pi), -0.110778365681595*(1 - B**2/A**2)**2/sqrt(pi), -0.0553891828407974*(1 - B**2/A**2)**3/sqrt(pi), -0.0346182392754984*(1 - B**2/A**2)**4/sqrt(pi), -0.0242327674928489*(1 - B**2/A**2)**5/sqrt(pi), -0.0181745756196366*(1 - B**2/A**2)**6/sqrt(pi), -0.0142800237011431*(1 - B**2/A**2)**7/sqrt(pi), -0.0116025192571787*(1 - B**2/A**2)**8/sqrt(pi), -0.00966876604764896*(1 - B**2/A**2)**9/sqrt(pi), -0.00821845114050161*(1 - B**2/A**2)**10/sqrt(pi), -0.00709775325770594*(1 - B**2/A**2)**11/sqrt(pi), -0.0062105341004927*(1 - B**2/A**2)**12/sqrt(pi), -0.00549393401197431*(1 - B**2/A**2)**13/sqrt(pi), -0.00490529822497706*(1 - B**2/A**2)**14/sqrt(pi), -0.00441476840247935*(1 - B**2/A**2)**15/sqrt(pi), -0.00400088386474691*(1 - B**2/A**2)**16/sqrt(pi), -0.00364786470021042*(1 - B**2/A**2)**17/sqrt(pi), -0.00334387597519289*(1 - B**2/A**2)**18/sqrt(pi), -0.00307988576662503*(1 - B**2/A**2)**19/sqrt(pi), -0.00284889433412815*(1 - B**2/A**2)**20/sqrt(pi), -0.00264540188169043*(1 - B**2/A**2)**21/sqrt(pi), -0.00246503357157517*(1 - B**2/A**2)**22/sqrt(pi), -0.0023042705125594*(1 - B**2/A**2)**23/sqrt(pi), -0.00216025360552443*(1 - B**2/A**2)**24/sqrt(pi), -0.00203063838919297*(1 - B**2/A**2)**25/sqrt(pi), -0.00191348617443184*(1 - B**2/A**2)**26/sqrt(pi), -0.0018071813869634*(1 - B**2/A**2)**27/sqrt(pi), -0.00171036809837608*(1 - B**2/A**2)**28/sqrt(pi), -0.00162190078294283*(1 - B**2/A**2)**29/sqrt(pi), -0.00154080574379569*(1 - B**2/A**2)**30/sqrt(pi), -0.00146625062716041*(1 - B**2/A**2)**31/sqrt(pi), -0.00139752012901227*(1 - B**2/A**2)**32/sqrt(pi), -0.00133399648678444*(1 - B**2/A**2)**33/sqrt(pi), -0.00127514370060277*(1 - B**2/A**2)**34/sqrt(pi), -0.00122049468486265*(1 - B**2/A**2)**35/sqrt(pi), -0.00116964073966004*(1 - B**2/A**2)**36/sqrt(pi), -0.00112222287183599*(1 - B**2/A**2)**37/sqrt(pi), -0.0010779246005793*(1 - B**2/A**2)**38/sqrt(pi), -0.00103646596209548*(1 - B**2/A**2)**39/sqrt(pi), -0.000997598488516904*(1 - B**2/A**2)**40/sqrt(pi), -0.000961100982839456*(1 - B**2/A**2)**41/sqrt(pi), -0.000926775947738047*(1 - B**2/A**2)**42/sqrt(pi), -0.000894446554212301*(1 - B**2/A**2)**43/sqrt(pi), -0.000863954058045972*(1 - B**2/A**2)**44/sqrt(pi), -0.00083515558944444*(1 - B**2/A**2)**45/sqrt(pi), -0.000807922255006034*(1 - B**2/A**2)**46/sqrt(pi), -0.000782137502186693*(1 - B**2/A**2)**47/sqrt(pi), -0.000757695705243359*(1 - B**2/A**2)**48/sqrt(pi)]\n" ], [ "factor(simplify(ellipse_circum_sympy(100)))", "[-0.664670194089569*(2/3 - 2*B**2/(3*A**2))/sqrt(pi), -0.110778365681595*(1 - B**2/A**2)**2/sqrt(pi), -0.0553891828407974*(1 - B**2/A**2)**3/sqrt(pi), -0.0346182392754984*(1 - B**2/A**2)**4/sqrt(pi), -0.0242327674928489*(1 - B**2/A**2)**5/sqrt(pi), -0.0181745756196366*(1 - B**2/A**2)**6/sqrt(pi), -0.0142800237011431*(1 - B**2/A**2)**7/sqrt(pi), -0.0116025192571787*(1 - B**2/A**2)**8/sqrt(pi), -0.00966876604764896*(1 - B**2/A**2)**9/sqrt(pi), -0.00821845114050161*(1 - B**2/A**2)**10/sqrt(pi), -0.00709775325770594*(1 - B**2/A**2)**11/sqrt(pi), -0.0062105341004927*(1 - B**2/A**2)**12/sqrt(pi), -0.00549393401197431*(1 - B**2/A**2)**13/sqrt(pi), -0.00490529822497706*(1 - B**2/A**2)**14/sqrt(pi), -0.00441476840247935*(1 - B**2/A**2)**15/sqrt(pi), -0.00400088386474691*(1 - B**2/A**2)**16/sqrt(pi), -0.00364786470021042*(1 - B**2/A**2)**17/sqrt(pi), -0.00334387597519289*(1 - B**2/A**2)**18/sqrt(pi), -0.00307988576662503*(1 - B**2/A**2)**19/sqrt(pi), -0.00284889433412815*(1 - B**2/A**2)**20/sqrt(pi), -0.00264540188169043*(1 - B**2/A**2)**21/sqrt(pi), -0.00246503357157517*(1 - B**2/A**2)**22/sqrt(pi), -0.0023042705125594*(1 - B**2/A**2)**23/sqrt(pi), -0.00216025360552443*(1 - B**2/A**2)**24/sqrt(pi), -0.00203063838919297*(1 - B**2/A**2)**25/sqrt(pi), -0.00191348617443184*(1 - B**2/A**2)**26/sqrt(pi), -0.0018071813869634*(1 - B**2/A**2)**27/sqrt(pi), -0.00171036809837608*(1 - B**2/A**2)**28/sqrt(pi), -0.00162190078294283*(1 - B**2/A**2)**29/sqrt(pi), -0.00154080574379569*(1 - B**2/A**2)**30/sqrt(pi), -0.00146625062716041*(1 - B**2/A**2)**31/sqrt(pi), -0.00139752012901227*(1 - B**2/A**2)**32/sqrt(pi), -0.00133399648678444*(1 - B**2/A**2)**33/sqrt(pi), -0.00127514370060277*(1 - B**2/A**2)**34/sqrt(pi), -0.00122049468486265*(1 - B**2/A**2)**35/sqrt(pi), -0.00116964073966004*(1 - B**2/A**2)**36/sqrt(pi), -0.00112222287183599*(1 - B**2/A**2)**37/sqrt(pi), -0.0010779246005793*(1 - B**2/A**2)**38/sqrt(pi), -0.00103646596209548*(1 - B**2/A**2)**39/sqrt(pi), -0.000997598488516904*(1 - B**2/A**2)**40/sqrt(pi), -0.000961100982839456*(1 - B**2/A**2)**41/sqrt(pi), -0.000926775947738047*(1 - B**2/A**2)**42/sqrt(pi), -0.000894446554212301*(1 - B**2/A**2)**43/sqrt(pi), -0.000863954058045972*(1 - B**2/A**2)**44/sqrt(pi), -0.00083515558944444*(1 - B**2/A**2)**45/sqrt(pi), -0.000807922255006034*(1 - B**2/A**2)**46/sqrt(pi), -0.000782137502186693*(1 - B**2/A**2)**47/sqrt(pi), -0.000757695705243359*(1 - B**2/A**2)**48/sqrt(pi), -0.000734500938756317*(1 - B**2/A**2)**49/sqrt(pi), -0.000712465910593628*(1 - B**2/A**2)**50/sqrt(pi), -0.000691511030870286*(1 - B**2/A**2)**51/sqrt(pi), -0.000671563597287489*(1 - B**2/A**2)**52/sqrt(pi), -0.000652557080383126*(1 - B**2/A**2)**53/sqrt(pi), -0.000634430494816928*(1 - B**2/A**2)**54/sqrt(pi), -0.000617127844958285*(1 - B**2/A**2)**55/sqrt(pi), -0.000600597634825473*(1 - B**2/A**2)**56/sqrt(pi), -0.000584792433909014*(1 - B**2/A**2)**57/sqrt(pi), -0.000569668491652746*(1 - B**2/A**2)**58/sqrt(pi), -0.000555185394407337*(1 - B**2/A**2)**59/sqrt(pi), -0.000541305759547154*(1 - B**2/A**2)**60/sqrt(pi), -0.00052799496218124*(1 - B**2/A**2)**61/sqrt(pi), -0.000515220890515565*(1 - B**2/A**2)**62/sqrt(pi), -0.000502953726455671*(1 - B**2/A**2)**63/sqrt(pi), -0.000491165748491866*(1 - B**2/A**2)**64/sqrt(pi), -0.0004798311542959*(1 - B**2/A**2)**65/sqrt(pi), -0.000468925900789175*(1 - B**2/A**2)**66/sqrt(pi), -0.00045842755972673*(1 - B**2/A**2)**67/sqrt(pi), -0.0004483151870857*(1 - B**2/A**2)**68/sqrt(pi), -0.00043856920475775*(1 - B**2/A**2)**69/sqrt(pi), -0.000429171293227226*(1 - B**2/A**2)**70/sqrt(pi), -0.000420104294074539*(1 - B**2/A**2)**71/sqrt(pi), -0.000411352121281319*(1 - B**2/A**2)**72/sqrt(pi), -0.000402899680433073*(1 - B**2/A**2)**73/sqrt(pi), -0.000394732795018889*(1 - B**2/A**2)**74/sqrt(pi), -0.000386838139118511*(1 - B**2/A**2)**75/sqrt(pi), -0.000379203175846435*(1 - B**2/A**2)**76/sqrt(pi), -0.000371816100992284*(1 - B**2/A**2)**77/sqrt(pi), -0.000364665791357817*(1 - B**2/A**2)**78/sqrt(pi), -0.000357741757344694*(1 - B**2/A**2)**79/sqrt(pi), -0.000351034099394481*(1 - B**2/A**2)**80/sqrt(pi), -0.000344533467924213*(1 - B**2/A**2)**81/sqrt(pi), -0.000338231026437794*(1 - B**2/A**2)**82/sqrt(pi), -0.000332118417526268*(1 - B**2/A**2)**83/sqrt(pi), -0.000326187731499013*(1 - B**2/A**2)**84/sqrt(pi), -0.000320431477413736*(1 - B**2/A**2)**85/sqrt(pi), -0.000314842556296055*(1 - B**2/A**2)**86/sqrt(pi), -0.000309414236359916*(1 - B**2/A**2)**87/sqrt(pi), -0.000304140130058326*(1 - B**2/A**2)**88/sqrt(pi), -0.000299014172810152*(1 - B**2/A**2)**89/sqrt(pi), -0.000294030603263316*(1 - B**2/A**2)**90/sqrt(pi), -0.000289183944967767*(1 - B**2/A**2)**91/sqrt(pi), -0.000284468989343293*(1 - B**2/A**2)**92/sqrt(pi), -0.000279880779837756*(1 - B**2/A**2)**93/sqrt(pi), -0.00027541459718077*(1 - B**2/A**2)**94/sqrt(pi), -0.000271065945646337*(1 - B**2/A**2)**95/sqrt(pi), -0.000266830540245613*(1 - B**2/A**2)**96/sqrt(pi), -0.000262704294777898*(1 - B**2/A**2)**97/sqrt(pi), -0.000258683310674154*(1 - B**2/A**2)**98/sqrt(pi)]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
c522cd8395cb7b8354ee9418f4bd224842784c11
40,993
ipynb
Jupyter Notebook
site/en/guide/eager.ipynb
ADBalici/tf-docs
cc5b583c780906354f8422a66b542ccff195db45
[ "Apache-2.0" ]
1
2020-01-12T10:42:48.000Z
2020-01-12T10:42:48.000Z
site/en/guide/eager.ipynb
ADBalici/tf-docs
cc5b583c780906354f8422a66b542ccff195db45
[ "Apache-2.0" ]
null
null
null
site/en/guide/eager.ipynb
ADBalici/tf-docs
cc5b583c780906354f8422a66b542ccff195db45
[ "Apache-2.0" ]
null
null
null
29.769789
343
0.49206
[ [ [ "##### Copyright 2018 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Eager execution\n", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/guide/eager\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/eager.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/guide/eager.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/eager.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "\n\nTensorFlow's eager execution is an imperative programming environment that\nevaluates operations immediately, without building graphs: operations return\nconcrete values instead of constructing a computational graph to run later. This\nmakes it easy to get started with TensorFlow and debug models, and it\nreduces boilerplate as well. To follow along with this guide, run the code\nsamples below in an interactive `python` interpreter.\n\nEager execution is a flexible machine learning platform for research and\nexperimentation, providing:\n\n* *An intuitive interface*—Structure your code naturally and use Python data\n structures. Quickly iterate on small models and small data.\n* *Easier debugging*—Call ops directly to inspect running models and test\n changes. Use standard Python debugging tools for immediate error reporting.\n* *Natural control flow*—Use Python control flow instead of graph control\n flow, simplifying the specification of dynamic models.\n\nEager execution supports most TensorFlow operations and GPU acceleration.\n\nNote: Some models may experience increased overhead with eager execution\nenabled. Performance improvements are ongoing, but please\n[file a bug](https://github.com/tensorflow/tensorflow/issues) if you find a\nproblem and share your benchmarks.", "_____no_output_____" ], [ "## Setup and basic usage", "_____no_output_____" ] ], [ [ "from __future__ import absolute_import, division, print_function, unicode_literals\nimport os\n\ntry:\n # %tensorflow_version only exists in Colab.\n %tensorflow_version 2.x #gpu\nexcept Exception:\n pass\nimport tensorflow as tf\n\nimport cProfile", "_____no_output_____" ] ], [ [ "In Tensorflow 2.0, eager execution is enabled by default.", "_____no_output_____" ] ], [ [ "tf.executing_eagerly()", "_____no_output_____" ] ], [ [ "Now you can run TensorFlow operations and the results will return immediately:", "_____no_output_____" ] ], [ [ "x = [[2.]]\nm = tf.matmul(x, x)\nprint(\"hello, {}\".format(m))", "_____no_output_____" ] ], [ [ "Enabling eager execution changes how TensorFlow operations behave—now they\nimmediately evaluate and return their values to Python. `tf.Tensor` objects\nreference concrete values instead of symbolic handles to nodes in a computational\ngraph. Since there isn't a computational graph to build and run later in a\nsession, it's easy to inspect results using `print()` or a debugger. Evaluating,\nprinting, and checking tensor values does not break the flow for computing\ngradients.\n\nEager execution works nicely with [NumPy](http://www.numpy.org/). NumPy\noperations accept `tf.Tensor` arguments. TensorFlow\n[math operations](https://www.tensorflow.org/api_guides/python/math_ops) convert\nPython objects and NumPy arrays to `tf.Tensor` objects. The\n`tf.Tensor.numpy` method returns the object's value as a NumPy `ndarray`.", "_____no_output_____" ] ], [ [ "a = tf.constant([[1, 2],\n [3, 4]])\nprint(a)", "_____no_output_____" ], [ "# Broadcasting support\nb = tf.add(a, 1)\nprint(b)", "_____no_output_____" ], [ "# Operator overloading is supported\nprint(a * b)", "_____no_output_____" ], [ "# Use NumPy values\nimport numpy as np\n\nc = np.multiply(a, b)\nprint(c)", "_____no_output_____" ], [ "# Obtain numpy value from a tensor:\nprint(a.numpy())\n# => [[1 2]\n# [3 4]]", "_____no_output_____" ] ], [ [ "## Dynamic control flow\n\nA major benefit of eager execution is that all the functionality of the host\nlanguage is available while your model is executing. So, for example,\nit is easy to write [fizzbuzz](https://en.wikipedia.org/wiki/Fizz_buzz):", "_____no_output_____" ] ], [ [ "def fizzbuzz(max_num):\n counter = tf.constant(0)\n max_num = tf.convert_to_tensor(max_num)\n for num in range(1, max_num.numpy()+1):\n num = tf.constant(num)\n if int(num % 3) == 0 and int(num % 5) == 0:\n print('FizzBuzz')\n elif int(num % 3) == 0:\n print('Fizz')\n elif int(num % 5) == 0:\n print('Buzz')\n else:\n print(num.numpy())\n counter += 1", "_____no_output_____" ], [ "fizzbuzz(15)", "_____no_output_____" ] ], [ [ "This has conditionals that depend on tensor values and it prints these values\nat runtime.", "_____no_output_____" ], [ "## Eager training", "_____no_output_____" ], [ "### Computing gradients\n\n[Automatic differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation)\nis useful for implementing machine learning algorithms such as\n[backpropagation](https://en.wikipedia.org/wiki/Backpropagation) for training\nneural networks. During eager execution, use `tf.GradientTape` to trace\noperations for computing gradients later.\n\nYou can use `tf.GradientTape` to train and/or compute gradients in eager. It is especially useful for complicated training loops. \n\nSince different operations can occur during each call, all\nforward-pass operations get recorded to a \"tape\". To compute the gradient, play\nthe tape backwards and then discard. A particular `tf.GradientTape` can only\ncompute one gradient; subsequent calls throw a runtime error.", "_____no_output_____" ] ], [ [ "w = tf.Variable([[1.0]])\nwith tf.GradientTape() as tape:\n loss = w * w\n\ngrad = tape.gradient(loss, w)\nprint(grad) # => tf.Tensor([[ 2.]], shape=(1, 1), dtype=float32)", "_____no_output_____" ] ], [ [ "### Train a model\n\nThe following example creates a multi-layer model that classifies the standard\nMNIST handwritten digits. It demonstrates the optimizer and layer APIs to build\ntrainable graphs in an eager execution environment.", "_____no_output_____" ] ], [ [ "# Fetch and format the mnist data\n(mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data()\n\ndataset = tf.data.Dataset.from_tensor_slices(\n (tf.cast(mnist_images[...,tf.newaxis]/255, tf.float32),\n tf.cast(mnist_labels,tf.int64)))\ndataset = dataset.shuffle(1000).batch(32)", "_____no_output_____" ], [ "# Build the model\nmnist_model = tf.keras.Sequential([\n tf.keras.layers.Conv2D(16,[3,3], activation='relu',\n input_shape=(None, None, 1)),\n tf.keras.layers.Conv2D(16,[3,3], activation='relu'),\n tf.keras.layers.GlobalAveragePooling2D(),\n tf.keras.layers.Dense(10)\n])", "_____no_output_____" ] ], [ [ "\nEven without training, call the model and inspect the output in eager execution:", "_____no_output_____" ] ], [ [ "for images,labels in dataset.take(1):\n print(\"Logits: \", mnist_model(images[0:1]).numpy())", "_____no_output_____" ] ], [ [ "While keras models have a builtin training loop (using the `fit` method), sometimes you need more customization. Here's an example, of a training loop implemented with eager:", "_____no_output_____" ] ], [ [ "optimizer = tf.keras.optimizers.Adam()\nloss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n\nloss_history = []", "_____no_output_____" ] ], [ [ "Note: Use the assert functions in `tf.debugging` to check if a condition holds up. This works in eager and graph execution.", "_____no_output_____" ] ], [ [ "def train_step(images, labels):\n with tf.GradientTape() as tape:\n logits = mnist_model(images, training=True)\n \n # Add asserts to check the shape of the output.\n tf.debugging.assert_equal(logits.shape, (32, 10))\n \n loss_value = loss_object(labels, logits)\n\n loss_history.append(loss_value.numpy().mean())\n grads = tape.gradient(loss_value, mnist_model.trainable_variables)\n optimizer.apply_gradients(zip(grads, mnist_model.trainable_variables))", "_____no_output_____" ], [ "def train(epochs):\n for epoch in range(epochs):\n for (batch, (images, labels)) in enumerate(dataset):\n train_step(images, labels)\n print ('Epoch {} finished'.format(epoch))", "_____no_output_____" ], [ "train(epochs = 3)", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\nplt.plot(loss_history)\nplt.xlabel('Batch #')\nplt.ylabel('Loss [entropy]')", "_____no_output_____" ] ], [ [ "### Variables and optimizers\n\n`tf.Variable` objects store mutable `tf.Tensor`-like values accessed during\ntraining to make automatic differentiation easier. \n\nThe collections of variables can be encapsulated into layers or models, along with methods that operate on them. See [Custom Keras layers and models](./keras/custom_layers_and_models.ipynb) for details. The main difference between layers and models is that models add methods like `Model.fit`, `Model.evaluate`, and `Model.save`.\n\nFor example, the automatic differentiation example above\ncan be rewritten:", "_____no_output_____" ] ], [ [ "class Linear(tf.keras.Model):\n def __init__(self):\n super(Linear, self).__init__()\n self.W = tf.Variable(5., name='weight')\n self.B = tf.Variable(10., name='bias')\n def call(self, inputs):\n return inputs * self.W + self.B", "_____no_output_____" ], [ "# A toy dataset of points around 3 * x + 2\nNUM_EXAMPLES = 2000\ntraining_inputs = tf.random.normal([NUM_EXAMPLES])\nnoise = tf.random.normal([NUM_EXAMPLES])\ntraining_outputs = training_inputs * 3 + 2 + noise\n\n# The loss function to be optimized\ndef loss(model, inputs, targets):\n error = model(inputs) - targets\n return tf.reduce_mean(tf.square(error))\n\ndef grad(model, inputs, targets):\n with tf.GradientTape() as tape:\n loss_value = loss(model, inputs, targets)\n return tape.gradient(loss_value, [model.W, model.B])", "_____no_output_____" ] ], [ [ "Next:\n\n1. Create the model.\n2. The Derivatives of a loss function with respect to model parameters.\n3. A strategy for updating the variables based on the derivatives.", "_____no_output_____" ] ], [ [ "model = Linear()\noptimizer = tf.keras.optimizers.SGD(learning_rate=0.01)\n\nprint(\"Initial loss: {:.3f}\".format(loss(model, training_inputs, training_outputs)))\n\nsteps = 300\nfor i in range(steps):\n grads = grad(model, training_inputs, training_outputs)\n optimizer.apply_gradients(zip(grads, [model.W, model.B]))\n if i % 20 == 0:\n print(\"Loss at step {:03d}: {:.3f}\".format(i, loss(model, training_inputs, training_outputs)))", "_____no_output_____" ], [ "print(\"Final loss: {:.3f}\".format(loss(model, training_inputs, training_outputs)))", "_____no_output_____" ], [ "print(\"W = {}, B = {}\".format(model.W.numpy(), model.B.numpy()))", "_____no_output_____" ] ], [ [ "\nNote: Variables persist until the last reference to the python object\nis removed, and is the variable is deleted.", "_____no_output_____" ], [ "### Object-based saving\n\n\n\n", "_____no_output_____" ], [ "A `tf.keras.Model` includes a covienient `save_weights` method allowing you to easily create a checkpoint: ", "_____no_output_____" ] ], [ [ "model.save_weights('weights')\nstatus = model.load_weights('weights')", "_____no_output_____" ] ], [ [ "Using `tf.train.Checkpoint` you can take full control over this process.\n\nThis section is an abbreviated version of the [guide to training checkpoints](./checkpoint.ipynb).\n", "_____no_output_____" ] ], [ [ "x = tf.Variable(10.)\ncheckpoint = tf.train.Checkpoint(x=x)", "_____no_output_____" ], [ "x.assign(2.) # Assign a new value to the variables and save.\ncheckpoint_path = './ckpt/'\ncheckpoint.save('./ckpt/')", "_____no_output_____" ], [ "x.assign(11.) # Change the variable after saving.\n\n# Restore values from the checkpoint\ncheckpoint.restore(tf.train.latest_checkpoint(checkpoint_path))\n\nprint(x) # => 2.0", "_____no_output_____" ] ], [ [ "To save and load models, `tf.train.Checkpoint` stores the internal state of objects,\nwithout requiring hidden variables. To record the state of a `model`,\nan `optimizer`, and a global step, pass them to a `tf.train.Checkpoint`:", "_____no_output_____" ] ], [ [ "model = tf.keras.Sequential([\n tf.keras.layers.Conv2D(16,[3,3], activation='relu'),\n tf.keras.layers.GlobalAveragePooling2D(),\n tf.keras.layers.Dense(10)\n])\noptimizer = tf.keras.optimizers.Adam(learning_rate=0.001)\ncheckpoint_dir = 'path/to/model_dir'\nif not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\nroot = tf.train.Checkpoint(optimizer=optimizer,\n model=model)\n\nroot.save(checkpoint_prefix)\nroot.restore(tf.train.latest_checkpoint(checkpoint_dir))", "_____no_output_____" ] ], [ [ "Note: In many training loops, variables are created after `tf.train.Checkpoint.restore` is called. These variables will be restored as soon as they are created, and assertions are available to ensure that a checkpoint has been fully loaded. See the [guide to training checkpoints](./checkpoint.ipynb) for details.", "_____no_output_____" ], [ "### Object-oriented metrics\n\n`tf.keras.metrics` are stored as objects. Update a metric by passing the new data to\nthe callable, and retrieve the result using the `tf.keras.metrics.result` method,\nfor example:", "_____no_output_____" ] ], [ [ "m = tf.keras.metrics.Mean(\"loss\")\nm(0)\nm(5)\nm.result() # => 2.5\nm([8, 9])\nm.result() # => 5.5", "_____no_output_____" ] ], [ [ "### Summaries and TensorBoard\n\n[TensorBoard](https://tensorflow.org/tensorboard) is a visualization tool for\nunderstanding, debugging and optimizing the model training process. It uses\nsummary events that are written while executing the program.\n\nYou can use `tf.summary` to record summaries of variable in eager execution.\nFor example, to record summaries of `loss` once every 100 training steps:", "_____no_output_____" ] ], [ [ "logdir = \"./tb/\"\nwriter = tf.summary.create_file_writer(logdir)\n\nsteps = 1000\nwith writer.as_default(): # or call writer.set_as_default() before the loop.\n for i in range(steps):\n step = i + 1\n # Calculate loss with your real train function.\n loss = 1 - 0.001 * step\n if step % 100 == 0:\n tf.summary.scalar('loss', loss, step=step)", "_____no_output_____" ], [ "!ls tb/", "_____no_output_____" ] ], [ [ "## Advanced automatic differentiation topics\n\n### Dynamic models\n\n`tf.GradientTape` can also be used in dynamic models. This example for a\n[backtracking line search](https://wikipedia.org/wiki/Backtracking_line_search)\nalgorithm looks like normal NumPy code, except there are gradients and is\ndifferentiable, despite the complex control flow:", "_____no_output_____" ] ], [ [ "def line_search_step(fn, init_x, rate=1.0):\n with tf.GradientTape() as tape:\n # Variables are automatically tracked.\n # But to calculate a gradient from a tensor, you must `watch` it.\n tape.watch(init_x)\n value = fn(init_x)\n grad = tape.gradient(value, init_x)\n grad_norm = tf.reduce_sum(grad * grad)\n init_value = value\n while value > init_value - rate * grad_norm:\n x = init_x - rate * grad\n value = fn(x)\n rate /= 2.0\n return x, value", "_____no_output_____" ] ], [ [ "### Custom gradients\n\nCustom gradients are an easy way to override gradients. Within the forward function, define the gradient with respect to the\ninputs, outputs, or intermediate results. For example, here's an easy way to clip\nthe norm of the gradients in the backward pass:", "_____no_output_____" ] ], [ [ "@tf.custom_gradient\ndef clip_gradient_by_norm(x, norm):\n y = tf.identity(x)\n def grad_fn(dresult):\n return [tf.clip_by_norm(dresult, norm), None]\n return y, grad_fn", "_____no_output_____" ] ], [ [ "Custom gradients are commonly used to provide a numerically stable gradient for a\nsequence of operations:", "_____no_output_____" ] ], [ [ "def log1pexp(x):\n return tf.math.log(1 + tf.exp(x))\n\ndef grad_log1pexp(x):\n with tf.GradientTape() as tape:\n tape.watch(x)\n value = log1pexp(x)\n return tape.gradient(value, x)\n", "_____no_output_____" ], [ "# The gradient computation works fine at x = 0.\ngrad_log1pexp(tf.constant(0.)).numpy()", "_____no_output_____" ], [ "# However, x = 100 fails because of numerical instability.\ngrad_log1pexp(tf.constant(100.)).numpy()", "_____no_output_____" ] ], [ [ "Here, the `log1pexp` function can be analytically simplified with a custom\ngradient. The implementation below reuses the value for `tf.exp(x)` that is\ncomputed during the forward pass—making it more efficient by eliminating\nredundant calculations:", "_____no_output_____" ] ], [ [ "@tf.custom_gradient\ndef log1pexp(x):\n e = tf.exp(x)\n def grad(dy):\n return dy * (1 - 1 / (1 + e))\n return tf.math.log(1 + e), grad\n\ndef grad_log1pexp(x):\n with tf.GradientTape() as tape:\n tape.watch(x)\n value = log1pexp(x)\n return tape.gradient(value, x)\n", "_____no_output_____" ], [ "# As before, the gradient computation works fine at x = 0.\ngrad_log1pexp(tf.constant(0.)).numpy()", "_____no_output_____" ], [ "# And the gradient computation also works at x = 100.\ngrad_log1pexp(tf.constant(100.)).numpy()", "_____no_output_____" ] ], [ [ "## Performance\n\nComputation is automatically offloaded to GPUs during eager execution. If you\nwant control over where a computation runs you can enclose it in a\n`tf.device('/gpu:0')` block (or the CPU equivalent):", "_____no_output_____" ] ], [ [ "import time\n\ndef measure(x, steps):\n # TensorFlow initializes a GPU the first time it's used, exclude from timing.\n tf.matmul(x, x)\n start = time.time()\n for i in range(steps):\n x = tf.matmul(x, x)\n # tf.matmul can return before completing the matrix multiplication\n # (e.g., can return after enqueing the operation on a CUDA stream).\n # The x.numpy() call below will ensure that all enqueued operations\n # have completed (and will also copy the result to host memory,\n # so we're including a little more than just the matmul operation\n # time).\n _ = x.numpy()\n end = time.time()\n return end - start\n\nshape = (1000, 1000)\nsteps = 200\nprint(\"Time to multiply a {} matrix by itself {} times:\".format(shape, steps))\n\n# Run on CPU:\nwith tf.device(\"/cpu:0\"):\n print(\"CPU: {} secs\".format(measure(tf.random.normal(shape), steps)))\n\n# Run on GPU, if available:\nif tf.config.experimental.list_physical_devices(\"GPU\"):\n with tf.device(\"/gpu:0\"):\n print(\"GPU: {} secs\".format(measure(tf.random.normal(shape), steps)))\nelse:\n print(\"GPU: not found\")", "_____no_output_____" ] ], [ [ "A `tf.Tensor` object can be copied to a different device to execute its\noperations:", "_____no_output_____" ] ], [ [ "if tf.config.experimental.list_physical_devices(\"GPU\"):\n x = tf.random.normal([10, 10])\n\n x_gpu0 = x.gpu()\n x_cpu = x.cpu()\n\n _ = tf.matmul(x_cpu, x_cpu) # Runs on CPU\n _ = tf.matmul(x_gpu0, x_gpu0) # Runs on GPU:0", "_____no_output_____" ] ], [ [ "### Benchmarks\n\nFor compute-heavy models, such as\n[ResNet50](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/resnet50)\ntraining on a GPU, eager execution performance is comparable to `tf.function` execution.\nBut this gap grows larger for models with less computation and there is work to\nbe done for optimizing hot code paths for models with lots of small operations.\n\n## Work with functions\n\nWhile eager execution makes development and debugging more interactive,\nTensorFlow 1.x style graph execution has advantages for distributed training, performance\noptimizations, and production deployment. To bridge this gap, TensorFlow 2.0 introduces `function`s via the `tf.function` API. For more information, see the [tf.function](./function.ipynb) guide.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
c522e38e9d080cfcfea92754cf41f790f4ccb10f
20,437
ipynb
Jupyter Notebook
dlaicourse-master/TensorFlow Deployment/Course 4 - TensorFlow Serving/Week 1/Exercises/TFServing_Week1_Exercise_Answer.ipynb
flight505/Google_TF_Certificate
b0ff1605b5159ea3dec33dbf2c9d112e5b30dd28
[ "MIT" ]
5
2020-04-19T02:47:41.000Z
2021-09-05T10:39:37.000Z
dlaicourse-master/TensorFlow Deployment/Course 4 - TensorFlow Serving/Week 1/Exercises/TFServing_Week1_Exercise_Answer.ipynb
flight505/Google_TF_Certificate
b0ff1605b5159ea3dec33dbf2c9d112e5b30dd28
[ "MIT" ]
13
2021-06-08T21:46:47.000Z
2022-03-12T00:35:22.000Z
dlaicourse-master/TensorFlow Deployment/Course 4 - TensorFlow Serving/Week 1/Exercises/TFServing_Week1_Exercise_Answer.ipynb
flight505/Google_TF_Certificate
b0ff1605b5159ea3dec33dbf2c9d112e5b30dd28
[ "MIT" ]
2
2021-01-19T03:00:07.000Z
2021-10-15T14:01:11.000Z
30.187592
286
0.505456
[ [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Train Your Own Model and Serve It With TensorFlow Serving\n\nIn this notebook, you will train a neural network to classify images of handwritten digits from the [MNIST](http://yann.lecun.com/exdb/mnist/) dataset. You will then save the trained model, and serve it using [TensorFlow Serving](https://www.tensorflow.org/tfx/guide/serving).", "_____no_output_____" ], [ "**Warning: This notebook is designed to be run in a Google Colab only**. It installs packages on the system and requires root access. If you want to run it in a local Jupyter notebook, please proceed with caution.", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/lmoroney/dlaicourse/blob/master/TensorFlow%20Deployment/Course%204%20-%20TensorFlow%20Serving/Week%201/Exercises/TFServing_Week1_Exercise_Answer.ipynb\">\n <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />\n Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/lmoroney/dlaicourse/blob/master/TensorFlow%20Deployment/Course%204%20-%20TensorFlow%20Serving/Week%201/Exercises/TFServing_Week1_Exercise_Answer.ipynb\">\n <img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />\n View source on GitHub</a>\n </td>\n</table>", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "try:\n %tensorflow_version 2.x\nexcept:\n pass", "_____no_output_____" ], [ "import os\nimport json\nimport tempfile\nimport requests\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\n\nprint(\"\\u2022 Using TensorFlow Version:\", tf.__version__)", "_____no_output_____" ] ], [ [ "## Import the MNIST Dataset\n\nThe [MNIST](http://yann.lecun.com/exdb/mnist/) dataset contains 70,000 grayscale images of the digits 0 through 9. The images show individual digits at a low resolution (28 by 28 pixels). \n\nEven though these are really images, we will load them as NumPy arrays and not as binary image objects.", "_____no_output_____" ] ], [ [ "mnist = tf.keras.datasets.mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()", "_____no_output_____" ], [ "# EXERCISE: Scale the values of the arrays below to be between 0.0 and 1.0.\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0", "_____no_output_____" ] ], [ [ "In the cell below use the `.reshape` method to resize the arrays to the following sizes:\n\n```python\ntrain_images.shape: (60000, 28, 28, 1)\ntest_images.shape: (10000, 28, 28, 1)\n```", "_____no_output_____" ] ], [ [ "# EXERCISE: Reshape the arrays below.\ntrain_images = train_images.reshape(train_images.shape[0], 28, 28, 1)\ntest_images = test_images.reshape(test_images.shape[0], 28, 28, 1)", "_____no_output_____" ], [ "print('\\ntrain_images.shape: {}, of {}'.format(train_images.shape, train_images.dtype))\nprint('test_images.shape: {}, of {}'.format(test_images.shape, test_images.dtype))", "_____no_output_____" ] ], [ [ "## Look at a Sample Image", "_____no_output_____" ] ], [ [ "idx = 42\n\nplt.imshow(test_images[idx].reshape(28,28), cmap=plt.cm.binary)\nplt.title('True Label: {}'.format(test_labels[idx]), fontdict={'size': 16})\nplt.show()", "_____no_output_____" ] ], [ [ "## Build a Model\n\nIn the cell below build a `tf.keras.Sequential` model that can be used to classify the images of the MNIST dataset. Feel free to use the simplest possible CNN. Make sure your model has the correct `input_shape` and the correct number of output units.", "_____no_output_____" ] ], [ [ "# EXERCISE: Create a model.\nmodel = tf.keras.Sequential([\n tf.keras.layers.Conv2D(input_shape=(28,28,1), filters=8, kernel_size=3,\n strides=2, activation='relu', name='Conv1'),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax, name='Softmax')\n])\n\nmodel.summary()", "_____no_output_____" ] ], [ [ "## Train the Model\n\nIn the cell below configure your model for training using the `adam` optimizer, `sparse_categorical_crossentropy` as the loss, and `accuracy` for your metrics. Then train the model for the given number of epochs, using the `train_images` array.", "_____no_output_____" ] ], [ [ "# EXERCISE: Configure the model for training.\nmodel.compile(optimizer='adam', \n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nepochs = 5\n\n# EXERCISE: Train the model.\nhistory = model.fit(train_images, train_labels, epochs=epochs)", "_____no_output_____" ] ], [ [ "## Evaluate the Model", "_____no_output_____" ] ], [ [ "# EXERCISE: Evaluate the model on the test images.\nresults_eval = model.evaluate(test_images, test_labels, verbose=0)\n\nfor metric, value in zip(model.metrics_names, results_eval):\n print(metric + ': {:.3}'.format(value))", "_____no_output_____" ] ], [ [ "## Save the Model", "_____no_output_____" ] ], [ [ "MODEL_DIR = tempfile.gettempdir()\n\nversion = 1\n\nexport_path = os.path.join(MODEL_DIR, str(version))\n\nif os.path.isdir(export_path):\n print('\\nAlready saved a model, cleaning up\\n')\n !rm -r {export_path}\n\nmodel.save(export_path, save_format=\"tf\")\n\nprint('\\nexport_path = {}'.format(export_path))\n!ls -l {export_path}", "_____no_output_____" ] ], [ [ "## Examine Your Saved Model", "_____no_output_____" ] ], [ [ "!saved_model_cli show --dir {export_path} --all", "_____no_output_____" ] ], [ [ "## Add TensorFlow Serving Distribution URI as a Package Source", "_____no_output_____" ] ], [ [ "# This is the same as you would do from your command line, but without the [arch=amd64], and no sudo\n# You would instead do:\n# echo \"deb [arch=amd64] http://storage.googleapis.com/tensorflow-serving-apt stable tensorflow-model-server tensorflow-model-server-universal\" | sudo tee /etc/apt/sources.list.d/tensorflow-serving.list && \\\n# curl https://storage.googleapis.com/tensorflow-serving-apt/tensorflow-serving.release.pub.gpg | sudo apt-key add -\n\n!echo \"deb http://storage.googleapis.com/tensorflow-serving-apt stable tensorflow-model-server tensorflow-model-server-universal\" | tee /etc/apt/sources.list.d/tensorflow-serving.list && \\\ncurl https://storage.googleapis.com/tensorflow-serving-apt/tensorflow-serving.release.pub.gpg | apt-key add -\n!apt update", "_____no_output_____" ] ], [ [ "## Install TensorFlow Serving", "_____no_output_____" ] ], [ [ "!apt-get install tensorflow-model-server", "_____no_output_____" ] ], [ [ "## Run the TensorFlow Model Server\n\nYou will now launch the TensorFlow model server with a bash script. In the cell below use the following parameters when running the TensorFlow model server:\n\n* `rest_api_port`: Use port `8501` for your requests.\n\n\n* `model_name`: Use `digits_model` as your model name. \n\n\n* `model_base_path`: Use the environment variable `MODEL_DIR` defined below as the base path to the saved model.", "_____no_output_____" ] ], [ [ "os.environ[\"MODEL_DIR\"] = MODEL_DIR", "_____no_output_____" ], [ "# EXERCISE: Fill in the missing code below.\n%%bash --bg \nnohup tensorflow_model_server \\\n --rest_api_port=8501 \\\n --model_name=digits_model \\\n --model_base_path=\"${MODEL_DIR}\" >server.log 2>&1", "_____no_output_____" ], [ "!tail server.log", "_____no_output_____" ] ], [ [ "## Create JSON Object with Test Images\n\nIn the cell below construct a JSON object and use the first three images of the testing set (`test_images`) as your data.", "_____no_output_____" ] ], [ [ "# EXERCISE: Create JSON Object\ndata = json.dumps({\"signature_name\": \"serving_default\", \"instances\": test_images[0:3].tolist()})", "_____no_output_____" ] ], [ [ "## Make Inference Request\n\nIn the cell below, send a predict request as a POST to the server's REST endpoint, and pass it your test data. You should ask the server to give you the latest version of your model.", "_____no_output_____" ] ], [ [ "# EXERCISE: Fill in the code below\nheaders = {\"content-type\": \"application/json\"}\njson_response = requests.post('http://localhost:8501/v1/models/digits_model:predict', data=data, headers=headers)\n\npredictions = json.loads(json_response.text)['predictions']", "_____no_output_____" ] ], [ [ "## Plot Predictions", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10,15))\n\nfor i in range(3):\n plt.subplot(1,3,i+1)\n plt.imshow(test_images[i].reshape(28,28), cmap = plt.cm.binary)\n plt.axis('off')\n color = 'green' if np.argmax(predictions[i]) == test_labels[i] else 'red'\n plt.title('Prediction: {}\\nTrue Label: {}'.format(np.argmax(predictions[i]), test_labels[i]), color=color)\n \nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]