hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
4a3e373f6560f68e708189725f7ee707ddb63b43
1,586
ipynb
Jupyter Notebook
Determinant_of_Matrix.ipynb
joeusebio/Linear-Algebra-58020
77ab5d6981335e0adca7b75ffca772b6b5dd1053
[ "Apache-2.0" ]
null
null
null
Determinant_of_Matrix.ipynb
joeusebio/Linear-Algebra-58020
77ab5d6981335e0adca7b75ffca772b6b5dd1053
[ "Apache-2.0" ]
null
null
null
Determinant_of_Matrix.ipynb
joeusebio/Linear-Algebra-58020
77ab5d6981335e0adca7b75ffca772b6b5dd1053
[ "Apache-2.0" ]
null
null
null
25.174603
248
0.455864
[ [ [ "<a href=\"https://colab.research.google.com/github/joeusebio/Linear-Algebra-58020/blob/main/Determinant_of_Matrix.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import numpy as np\n\nA = np.array([[1,2,-1],[4,6,-2],[-1,3,3]])\nprint(A)\nprint(np.linalg.det(A))\nprint(round(np.linalg.det(A)))", "[[ 1 2 -1]\n [ 4 6 -2]\n [-1 3 3]]\n-13.999999999999996\n-14\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
4a3e7d751e1d1fd7beac902792e0455a36b80517
20,629
ipynb
Jupyter Notebook
tests/Untitled1.ipynb
pabloreyesrobles/py-hyperneat
3a651b5955fe5d5b4abe2d6abeb161a4d1e6845a
[ "MIT" ]
1
2020-02-28T23:20:16.000Z
2020-02-28T23:20:16.000Z
tests/Untitled1.ipynb
pabloreyesrobles/py-hyperneat
3a651b5955fe5d5b4abe2d6abeb161a4d1e6845a
[ "MIT" ]
null
null
null
tests/Untitled1.ipynb
pabloreyesrobles/py-hyperneat
3a651b5955fe5d5b4abe2d6abeb161a4d1e6845a
[ "MIT" ]
null
null
null
64.065217
12,712
0.764603
[ [ [ "from hyperneat.spatial_node import SpatialNode, SpatialNodeType\nfrom hyperneat.substrate import Substrate\nfrom hyperneat.evolution import Hyperneat\n\nfrom neat.genes import ConnectionGene, NodeGene, NodeType\nfrom neat.genome import Genome\nfrom neat.activation_functions import ActivationFunction\nfrom neat.neural_network import NeuralNetwork\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\n\n# Genome\ngenome = Genome(num_layers=15, weights_range=[-3.0, 3.0])\ngenome.create_genome_by_size(8, 3)\nnet = genome.build_phenotype()\n\n# Substrate setting\n# Init substrate set\nsubstrate_set = []\nfor i in range(2):\n\ts = Substrate()\n\ts.activation_function = ActivationFunction().get('TANH')\n\n\t# Must create new objects or deep copies\n\ts.input_nodes = [SpatialNode(0, SpatialNodeType.INPUT, [0.0, -0.5], ActivationFunction().get('TANH'), 0)]\n\ts.output_nodes = [SpatialNode(1, SpatialNodeType.OUTPUT, [-0.5, 0.5], ActivationFunction().get('TANH'), 2),\n\t\t\t\t\t SpatialNode(2, SpatialNodeType.OUTPUT, [0.5, 0.5], ActivationFunction().get('TANH'), 2)]\n\ts.hidden_nodes = [SpatialNode(3, SpatialNodeType.HIDDEN, [-0.5, 0.0], ActivationFunction().get('TANH'), 1),\n\t\t\t\t\t SpatialNode(4, SpatialNodeType.HIDDEN, [0.5, 0.0], ActivationFunction().get('TANH'), 1)]\n\n\ts.input_count = 1\n\ts.output_count = 2\n\ts.hidden_count = 2\n\n\ts.extend_nodes_list()\n\tsubstrate_set.append(s)\n\nsubstrate_set[0].coordinates = (-0.5, 0.5)\nsubstrate_set[1].coordinates = (0.5, 0.5)\n\nintra_substrate_conn = [[0, 1], [0, 2], [0, 3], [0, 4], [3, 1], [3, 2], [3, 4], [4, 1], [4, 2], [4, 3]]\ninter_substrate_conn = [[0, 4, 1, 3], [1, 3, 0, 4]]\n\nea = Hyperneat()\nea.connection_threshold = 0.05\nea.max_connection_weight = 0.5\nea.max_bias = 0.06\nea.max_delay = 0.2\n\nnet = ea.build_modular_substrate(genome, substrate_set, intra_substrate_conn, inter_substrate_conn)\nnet.reset_values()\n\ntime = np.linspace(0, 20, 20 / 0.05)\nsignal_1 = np.sin(time)\nsignal_2 = np.cos(time)", "C:\\Users\\pablo\\AppData\\Roaming\\Python\\Python37\\site-packages\\ipykernel_launcher.py:56: DeprecationWarning: object of type <class 'float'> cannot be safely interpreted as an integer.\n" ], [ "output_signal = np.zeros([4, time.shape[0]])\nout_id = net.out_neurons\n\nfor t, _ in enumerate(time):\n net.input([signal_1[t], signal_2[t]])\n net.activate_net(0.05)\n for o, oid in enumerate(out_id):\n output_signal[o, t] = net.neurons[oid].output", "_____no_output_____" ], [ "fig, ax = plt.subplots(2, 2)\nax[0, 0].plot(output_signal[0])\nax[0, 1].plot(output_signal[1])\nax[1, 0].plot(output_signal[2])\nax[1, 1].plot(output_signal[3])\nplt.tight_layout()", "_____no_output_____" ], [ "np.linspace(0, 1, int(6 / 0.05), endpoint=False).shape[0]", "_____no_output_____" ], [ "inter_substrate_conn = [[0, 1], [0, 2], [0, 3], [0, 4], [3, 1], [3, 2], [3, 4], [4, 1], [4, 2], [4, 3]]\ninter_substrate_conn = inter_substrate_conn * 4, [[1, 0], [2, 0], [3, 0], [4, 0]]\ninter_substrate_conn[0]", "_____no_output_____" ], [ "arr = []\nfor _ in range(4):\n arr.append(inter_substrate_conn)\narr.append([[1, 0], [2, 0], [3, 0], [4, 0]])\narr", "_____no_output_____" ], [ "for item in arr[0]:\n print(item)", "[0, 1]\n[0, 2]\n[0, 3]\n[0, 4]\n[3, 1]\n[3, 2]\n[3, 4]\n[4, 1]\n[4, 2]\n[4, 3]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
4a3e80409c968ddd11987ce0956edd27f143fab3
26,426
ipynb
Jupyter Notebook
docs/tutorials/06-find_label_errors.ipynb
issam9/rubrix
16cafb10ca60bb75dd716a33eb8e5149bdbedb7b
[ "Apache-2.0" ]
null
null
null
docs/tutorials/06-find_label_errors.ipynb
issam9/rubrix
16cafb10ca60bb75dd716a33eb8e5149bdbedb7b
[ "Apache-2.0" ]
null
null
null
docs/tutorials/06-find_label_errors.ipynb
issam9/rubrix
16cafb10ca60bb75dd716a33eb8e5149bdbedb7b
[ "Apache-2.0" ]
null
null
null
35.281709
355
0.544804
[ [ [ "# 🧐 Find label errors with cleanlab", "_____no_output_____" ], [ "In this tutorial, we will show you how you can find possible labeling errors in your data set with the help of [*cleanlab*](https://github.com/cgnorthcutt/cleanlab) and *Rubrix*.", "_____no_output_____" ], [ "## Introduction", "_____no_output_____" ], [ "As shown recently by [Curtis G. Northcutt et al.](https://arxiv.org/abs/2103.14749) label errors are pervasive even in the most-cited test sets used to benchmark the progress of the field of machine learning.\nIn the worst-case scenario, these label errors can destabilize benchmarks and tend to favor more complex models with a higher capacity over lower capacity models.\n\nThey introduce a new principled framework to “identify label errors, characterize label noise, and learn with noisy labels” called **confident learning**. It is open-sourced as the [cleanlab Python package](https://github.com/cgnorthcutt/cleanlab) that supports finding, quantifying, and learning with label errors in data sets.\n\nThis tutorial walks you through 5 basic steps to find and correct label errors in your data set:\n\n1. 💾 Load the data set you want to check, and a model trained on it;\n2. 💻 Make predictions for the test split of your data set;\n3. 🧐 Get label error candidates with *cleanlab*;\n4. 🔦 Uncover label errors with *Rubrix*;\n5. 🖍 Correct label errors and load the corrected data set;", "_____no_output_____" ], [ "## Setup Rubrix", "_____no_output_____" ], [ "If you are new to Rubrix, visit and star Rubrix for updates: ⭐ [Github repository](https://github.com/recognai/rubrix)\n\nIf you have not installed and launched Rubrix, check the [Setup and Installation guide](../getting_started/setup&installation.rst).\n\nOnce installed, you only need to import Rubrix:", "_____no_output_____" ] ], [ [ "import rubrix as rb", "_____no_output_____" ] ], [ [ "### Install tutorial dependencies", "_____no_output_____" ], [ "Apart from [cleanlab](https://github.com/cgnorthcutt/cleanlab), we will also install the Hugging Face libraries [transformers](https://github.com/huggingface/transformers) and [datasets](https://github.com/huggingface/datasets), as well as [PyTorch](https://pytorch.org/), that provide us with the model and the data set we are going to investigate.", "_____no_output_____" ] ], [ [ "!pip install cleanlab torch transformers datasets\nexit(0)", "_____no_output_____" ] ], [ [ "### Imports", "_____no_output_____" ], [ "Let us import all the necessary stuff in the beginning.", "_____no_output_____" ] ], [ [ "import rubrix as rb\nfrom cleanlab.pruning import get_noise_indices\n\nimport torch\nimport datasets\nfrom transformers import AutoTokenizer, AutoModelForSequenceClassification", "_____no_output_____" ] ], [ [ "## 1. Load model and data set", "_____no_output_____" ], [ "For this tutorial we will use the well studied [Microsoft Research Paraphrase Corpus](https://microsoft.com/en-us/download/details.aspx?id=52398) (MRPC) data set that forms part of the [GLUE benchmark](https://gluebenchmark.com/), and a pre-trained model from the Hugging Face Hub that was fine-tuned on this specific data set.\n\nLet us first get the model and its corresponding tokenizer to be able to make predictions. For a detailed guide on how to use the 🤗 *transformers* library, please refer to their excellent [documentation](https://huggingface.co/transformers/task_summary.html#sequence-classification).", "_____no_output_____" ] ], [ [ "model_name = \"textattack/roberta-base-MRPC\"\n\ntokenizer = AutoTokenizer.from_pretrained(model_name)\nmodel = AutoModelForSequenceClassification.from_pretrained(model_name)", "_____no_output_____" ] ], [ [ "We then get the test split of the MRPC data set, that we will scan for label errors.", "_____no_output_____" ] ], [ [ "dataset = datasets.load_dataset(\"glue\", \"mrpc\", split=\"test\")", "_____no_output_____" ] ], [ [ "Let us have a quick look at the format of the data set. Label `1` means that both `sentence1` and `sentence2` are *semantically equivalent*, a `0` as label implies that the sentence pair is *not equivalent*. ", "_____no_output_____" ] ], [ [ "dataset.to_pandas().head()", "_____no_output_____" ] ], [ [ "## 2. Make predictions", "_____no_output_____" ], [ "Now let us use the model to get predictions for our data set, and add those to our dataset instance. We will use the `.map` functionality of the *datasets* library to process our data batch-wise.", "_____no_output_____" ] ], [ [ "def get_model_predictions(batch):\n # batch is a dictionary of lists\n tokenized_input = tokenizer(\n batch[\"sentence1\"], batch[\"sentence2\"], padding=True, return_tensors=\"pt\"\n )\n # get logits of the model prediction\n logits = model(**tokenized_input).logits\n # convert logits to probabilities\n probabilities = torch.softmax(logits, dim=1).detach().numpy()\n \n return {\"probabilities\": probabilities}\n \n# Apply predictions batch-wise\ndataset = dataset.map(\n get_model_predictions,\n batched=True,\n batch_size=16,\n)", "_____no_output_____" ] ], [ [ "## 3. Get label error candidates", "_____no_output_____" ], [ "To identify label error candidates the cleanlab framework simply needs the probability matrix of our predictions (`n x m`, where `n` is the number of examples and `m` the number of labels), and the potentially noisy labels.", "_____no_output_____" ] ], [ [ "# Output the data as numpy arrays\ndataset.set_format(\"numpy\")\n\n# Get a boolean array of label error candidates\nlabel_error_candidates = get_noise_indices(\n s=dataset[\"label\"],\n psx=dataset[\"probabilities\"],\n)", "_____no_output_____" ] ], [ [ "This one line of code provides us with a boolean array of label error candidates that we can investigate further. \nOut of the **1725 sentence pairs** present in the test data set we obtain **129 candidates** (7.5%) for possible label errors.", "_____no_output_____" ] ], [ [ "frac = label_error_candidates.sum()/len(dataset)\nprint(\n f\"Total: {len(dataset)}\\n\"\n f\"Candidates: {label_error_candidates.sum()} ({100*frac:0.1f}%)\"\n)", "Total: 1725\nCandidates: 129 (7.5%)\n" ] ], [ [ "## 4. Uncover label errors in Rubrix", "_____no_output_____" ], [ "Now that we have a list of potential candidates, let us log them to *Rubrix* to uncover and correct the label errors.\nFirst we switch to a pandas DataFrame to filter out our candidates.", "_____no_output_____" ] ], [ [ "candidates = dataset.to_pandas()[label_error_candidates]", "_____no_output_____" ] ], [ [ "Then we will turn those candidates into [TextClassificationRecords](../reference/python_client_api.rst#rubrix.client.models.TextClassificationRecord) that we will log to *Rubrix*.", "_____no_output_____" ] ], [ [ "def make_record(row):\n prediction = list(zip([\"Not equivalent\", \"Equivalent\"], row.probabilities))\n annotation = \"Not equivalent\"\n if row.label == 1:\n annotation = \"Equivalent\"\n \n return rb.TextClassificationRecord(\n inputs={\"sentence1\": row.sentence1, \"sentence2\": row.sentence2}, \n prediction=prediction, \n prediction_agent=\"textattack/roberta-base-MRPC\", \n annotation=annotation, \n annotation_agent=\"MRPC\"\n )\n \nrecords = candidates.apply(make_record, axis=1)", "_____no_output_____" ] ], [ [ "Having our records at hand we can now log them to *Rubrix* and save them in a dataset that we call `\"mrpc_label_error\"`. ", "_____no_output_____" ] ], [ [ "rb.log(records, name=\"mrpc_label_error\")", "_____no_output_____" ] ], [ [ "Scanning through the records in the [*Explore Mode*](../reference/rubrix_webapp_reference.rst#explore-mode) of *Rubrix*, we were able to find at least **30 clear cases** of label errors. \nA couple of examples are shown below, in which the noisy labels are shown in the upper right corner of each example.\nThe predictions of the model together with their probabilities are shown below each sentence pair.", "_____no_output_____" ], [ "![Examples of label errors in the test set uncovered with Rubrix](./img/find_label_errors/test_sample_examples.png \"Examples of label errors in the test set uncovered with Rubrix\")", "_____no_output_____" ], [ "If your model is not terribly over-fitted, you can also try to run the candidate search over your training data to find very obvious label errors. \nIf we repeat the steps above on the training split of the MRPC data set (3668 examples), we obtain **9 candidates** (this low number is expected) out of which **5 examples** were clear cases of label errors.\nA couple of examples are shown below.", "_____no_output_____" ], [ "![Examples of label errors in the training set uncovered with Rubrix](./img/find_label_errors/train_sample_examples.png \"Examples of label errors in the training set uncovered with Rubrix\")", "_____no_output_____" ], [ "## 5. Correct label errors\n\nWith *Rubrix* it is very easy to correct those label errors.\nJust switch on the [*Annotation Mode*](../reference/rubrix_webapp_reference.rst#annotation-mode), correct the noisy labels and load the dataset back into your notebook.", "_____no_output_____" ] ], [ [ "# Load the dataset into a pandas DataFrame\ndataset_with_corrected_labels = rb.load(\"mrpc_label_error\")\n\ndataset_with_corrected_labels.head()", "_____no_output_____" ] ], [ [ "Now you can use the corrected data set to repeat your benchmarks and measure your model's \"real-word performance\" you care about in practice.", "_____no_output_____" ], [ "## Summary\n\nIn this tutorial we saw how to leverage *cleanlab* and *Rubrix* to uncover label errors in your data set.\nIn just a few steps you can quickly check if your test data set is seriously affected by label errors and if your benchmarks are really meaningful in practice.\nMaybe your less complex models turns out to beat your resource hungry super model, and the deployment process just got a little bit easier 😀.\n\n*Cleanlab* and *Rubrix* do not care about the model architecture or the framework you are working with.\nThey just care about the underlying data and allow you to put more humans in the loop of your AI Lifecycle. ", "_____no_output_____" ], [ "## Next steps\n\n### 📚 [Rubrix documentation](https://docs.rubrix.ml) for more guides and tutorials.\n\n### 🙋‍♀️ Join the Rubrix community! A good place to start is the [discussion forum](https://github.com/recognai/rubrix/discussions).\n\n### ⭐ Rubrix [Github repo](https://github.com/recognai/rubrix) to stay updated.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
4a3e84e81f78d5d54d71274ee895cdf343b5cd9f
9,429
ipynb
Jupyter Notebook
Solutions + Numpy Basics/task_3_videos.ipynb
DeepConnectAI/challenge-week-1
33c3fc296c28dfb75b1c47baef61ebfa877a3668
[ "MIT" ]
1
2020-08-11T05:03:36.000Z
2020-08-11T05:03:36.000Z
Solutions + Numpy Basics/task_3_videos.ipynb
DeepConnectAI/challenge-week-1
33c3fc296c28dfb75b1c47baef61ebfa877a3668
[ "MIT" ]
null
null
null
Solutions + Numpy Basics/task_3_videos.ipynb
DeepConnectAI/challenge-week-1
33c3fc296c28dfb75b1c47baef61ebfa877a3668
[ "MIT" ]
15
2020-08-10T15:59:25.000Z
2020-08-20T09:14:00.000Z
27.489796
261
0.561353
[ [ [ "### Task Video :\n\n#### Dataset Link:\nDataset can be found at \" /data/videos/ \" in the respective challenge's repo.\n\n#### Description:\nVideo series is just a sequence of images arranged in a specific order. Images of that sequence are called frames. Therefore, in video intelligence tasks, we take advantage of the temporal nature of video and semantic content in consecutive frames.\n\n#### Objective:\nHow to read video data and convert it into useable format for machine learning\n\n#### Tasks:\n- Load dataset from provided link. Videos are in “.mp4” format.\n- Extract frames from video at fps=10 (opencv’s VideoCapture Class)\n- Plot 4th frame of 'VID_2.mp4' (matplotlib or Pillow library)\n- Print dimensions of any single frame of 'VID_6.mp4'\n- Print all pixel values of 10th frame of 'VID_14.mp4'\n- Perform sanity check for each video whether all frames have same dimensions or not\n\n#### Further fun (will not be evaluated):\n_Prerequisites: CNN and image processing_\n\n- We will perform video classification for fun on this sample dataset. You can download labels here: _(Link to be added soon or self-annotation for small dataset is also possible)_\n- Train image classifier on all frames extracted at fps=10 from all videos.\n- The naive approach to do video classification would be to classify each frame and save results in sequential format, and that is it !! Obviously there are much better ways of doing video classification taking advantage of the temporal nature of data.\n\n#### Helpful Links:\n- Detailed description of how to process video frames: https://www.youtube.com/watch?v=tQetgoLy70s\n- Nice tutorial on video classification: https://www.analyticsvidhya.com/blog/2018/09/deep-learning-video-classification-python/\n- Used .avi format but the idea is same: https://www.analyticsvidhya.com/blog/2019/09/step-by-step-deep-learning-tutorial-video-classification-python/\n- Line-by-Line explanation of video classification code: https://www.pyimagesearch.com/2019/07/15/video-classification-with-keras-and-deep-learning/", "_____no_output_____" ] ], [ [ "import cv2 # For handling videos\nimport matplotlib.pyplot as plt # For plotting images, you can use pillow library as well\nimport numpy as np # For mathematical operations ", "_____no_output_____" ], [ "# Capture the video from a file\nvideoFile = 'data/videos/VID_2.mp4'\ncap = cv2.VideoCapture(videoFile)", "_____no_output_____" ], [ "# Get frame rate of video\nframeRate = cap.get(5)\nprint(\"Frame rate of video:\", frameRate)", "_____no_output_____" ], [ "# Get time length of video\ntotal_frames = cap.get(7)\nprint(\"Total frames:\", total_frames)\nprint(\"Length of video: %.2f seconds\" % (total_frames/frameRate))\n# https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-get", "_____no_output_____" ], [ "# Get frame width and height\nwidth = cap.get(3)\nheight = cap.get(4)\nprint(\"(width, height) = \", (width,height))", "_____no_output_____" ], [ "# Defining desired fps\ndesired_fps = 10\nframe_skipping_rate = int(np.ceil(frameRate / desired_fps))\nprint(\"Frame skipping rate:\", frame_skipping_rate, \"frames\")", "_____no_output_____" ], [ "# Store frames\nframes = []\n# Start extracting frames till we reach the end of the loop\nwhile(cap.isOpened()):\n # Get the current frame number\n frameId = cap.get(1)\n # Reads the next incoming frame\n ret, frame = cap.read()\n # If we reached the end of the video, then ret returns true\n if (ret != True):\n break\n \n if (frameId % frame_skipping_rate == 0):\n frames.append(frame)\n\ncap.release()\nprint (\"Done!\")", "_____no_output_____" ], [ "# NHWC\nsingle_video = np.array(frames)\nprint(\"NHWC format:\", single_video.shape)", "_____no_output_____" ], [ "print(\"Plotted 4th frame of 2nd video\")\nplt.imshow(single_video[3,:,:,:])\nplt.show()", "_____no_output_____" ], [ "print(\"Dimensions of 5th frame of 6th video\")\nsingle_video[4,:,:,:].shape", "_____no_output_____" ], [ "single_video[13,:,:,:]", "_____no_output_____" ] ], [ [ "### Here's the solution now", "_____no_output_____" ] ], [ [ "import glob\nfilenames = glob.glob('data/videos/*.mp4')\nprint(filenames)", "_____no_output_____" ], [ "videos = {}\nfor file in filenames:\n cap = cv2.VideoCapture(file)\n frameRate = cap.get(5)\n desired_fps = 10\n frame_skipping_rate = int(np.ceil(frameRate / desired_fps))\n # Store frames\n frames = []\n # Start extracting frames till we reach the end of the loop\n while(cap.isOpened()):\n # Get the current frame number\n frameId = cap.get(1)\n # Reads the next incoming frame\n ret, frame = cap.read()\n # If we reached the end of the video, then ret returns true\n if (ret != True):\n break\n\n if (frameId % frame_skipping_rate == 0):\n frames.append(frame)\n\n cap.release()\n \n frames = np.array(frames)\n videos[file] = frames\n", "_____no_output_____" ], [ "print(\"Number of videos:\", len(videos))", "_____no_output_____" ], [ "plt.imshow(videos[\"data/videos\\\\VID_2.mp4\"][3,:,:,:])\nplt.show()", "_____no_output_____" ], [ "videos[\"data/videos\\\\VID_6.mp4\"][4,:,:,:].shape", "_____no_output_____" ], [ "videos[\"data/videos\\\\VID_14.mp4\"][13,:,:,:]", "_____no_output_____" ], [ "sanity_check = True\ndim_set = set()\nfor video in videos.values():\n dim_set.add(video[0].shape) # Get dimensions of first frame and add it in set\nif len(dim_set)>1:\n sanity_check = False\nprint(\"Sanity check:\", sanity_check)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
4a3e869054fb3895272978ee1902f9f9b732c708
4,849
ipynb
Jupyter Notebook
Gaurav/Emotion_Detection_Models/pickle.ipynb
GrayFlash/TUNEX
9ef0f592eda0dea447b823f9e02e819bb0bf4d01
[ "MIT" ]
7
2020-11-21T03:39:47.000Z
2022-02-14T16:59:44.000Z
Gaurav/Emotion_Detection_Models/pickle.ipynb
GrayFlash/TUNEX
9ef0f592eda0dea447b823f9e02e819bb0bf4d01
[ "MIT" ]
null
null
null
Gaurav/Emotion_Detection_Models/pickle.ipynb
GrayFlash/TUNEX
9ef0f592eda0dea447b823f9e02e819bb0bf4d01
[ "MIT" ]
3
2021-01-02T19:19:02.000Z
2022-02-13T17:20:51.000Z
21.082609
101
0.479893
[ [ [ "import numpy as np\nimport pandas as pd\nimport pickle\nimport cv2\nimport tqdm\nimport os\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "base_dir = 'images/'\nli = [str(i) for i in range(0,7)]\nIMG_SIZE = 48", "_____no_output_____" ], [ "training_data = []\nvalidation_data = []", "_____no_output_____" ], [ "def create_dataset(arr, directory):\n for i in li:\n curr_dir = os.path.join(directory, i)\n \n label = int(i)\n print(curr_dir)\n flag = 0\n for x,y,z in os.walk(curr_dir):\n if(flag == 0):\n flag = 1\n for img in z:\n# print(img)\n img_array = cv2.imread(os.path.join(curr_dir,img) ,cv2.IMREAD_GRAYSCALE)\n img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) \n arr.append([img_array, label])\n return arr", "_____no_output_____" ], [ "training_data = create_dataset(training_data, base_dir+'train')\nvalidation_data = create_dataset(validation_data, base_dir+'validation')", "images/train/0\nimages/train/1\nimages/train/2\nimages/train/3\nimages/train/4\nimages/train/5\nimages/train/6\nimages/validation/0\nimages/validation/1\nimages/validation/2\nimages/validation/3\nimages/validation/4\nimages/validation/5\nimages/validation/6\n" ], [ "len(training_data)", "_____no_output_____" ], [ "len(validation_data)", "_____no_output_____" ], [ "import random\nrandom.shuffle(training_data)\nrandom.shuffle(validation_data)", "_____no_output_____" ], [ "def X_y_split(dataset):\n X = []\n y = []\n \n for img, label in dataset:\n X.append(img)\n y.append(label)\n \n return(X, y)", "_____no_output_____" ], [ "X_train, y_train = X_y_split(training_data)\nX_val, y_val = X_y_split(validation_data)", "_____no_output_____" ], [ "def pickling(data, file_name):\n try:\n f = open(file_name,\"wb\")\n pickle.dump(data, f)\n f.close()\n except Exception as e:\n print(e)\n pass", "_____no_output_____" ], [ "pickling(X_train,'X_Train.pickle')\npickling(y_train,'y_Train.pickle')\npickling(X_val,'X_val.pickle')\npickling(y_val,'y_val.pickle')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a3e90324c8663b4873fc1fdf1ec2ec8ba911925
12,623
ipynb
Jupyter Notebook
notebooks/top_2000_spotify_api.ipynb
AzucenaMV/top2000-dashboard
d1fa465469024e7b97d8db8160ae85199b8f5642
[ "MIT" ]
null
null
null
notebooks/top_2000_spotify_api.ipynb
AzucenaMV/top2000-dashboard
d1fa465469024e7b97d8db8160ae85199b8f5642
[ "MIT" ]
null
null
null
notebooks/top_2000_spotify_api.ipynb
AzucenaMV/top2000-dashboard
d1fa465469024e7b97d8db8160ae85199b8f5642
[ "MIT" ]
null
null
null
27.865342
244
0.47049
[ [ [ "<a href=\"https://colab.research.google.com/github/AzucenaMV/top2000-dashboard/blob/main/top_2000_spotify_api.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport requests\nimport os", "_____no_output_____" ], [ "from google.colab import drive\ndrive.mount('/content/drive')", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ], [ "path = 'drive/MyDrive/JADS/DataVizProject/Code/'\nfile_top = \"top2000.csv\"\nfile_features = 'song_features_v2.csv'\nfile_id = 'song_id.csv'\nfile_artist = 'artist_features.csv'", "_____no_output_____" ], [ "df = pd.read_csv(os.path.join(path,file_top))", "_____no_output_____" ], [ "def clean_name(df, new_col = 'song_clean', old_col = 'song'):\n df.dropna(subset = [old_col], inplace = True)\n df[new_col] = df[old_col].str.lower()\n df[new_col] = df[new_col].str.lstrip()\n df[new_col] = df[new_col].str.replace(\"'\",\"\")\n df[new_col] = df[new_col].str.normalize('NFKD').str.encode('ascii',errors='ignore').str.decode('utf-8')\n return df", "_____no_output_____" ], [ "df = clean_name(df)\ndf = clean_name(df, new_col = 'artist_clean', old_col = 'artist')", "_____no_output_____" ], [ "# manually cleaning song names\ndf.loc[232,'song_clean'] = 'kronenburg park'\ndf.loc[2278,'song_clean'] = \"rainy day woman 12\"\ndf.loc[2817,'song_clean'] = 'everlong acoustic'\ndf.loc[3456,'song_clean'] = 'abergavernny'\ndf.loc[3482,'song_clean'] = \"dont you write her off\"\ndf.loc[3614,'song_clean'] = 'ein bisschen frieden'\ndf.loc[4205,'song_clean'] = 'everybody knows'", "_____no_output_____" ], [ "TOKEN = \"\"", "_____no_output_____" ], [ "# getting song id by name of the song (and artist)\ndata_list = []\n\nfor ind, (artist, song) in enumerate(zip(df['artist_clean'],df['song_clean'])):\n r = requests.get(f'https://api.spotify.com/v1/search?q=track:{song}%20artist:{artist}&type=track&limit=1', headers={'Authorization': f'Bearer {TOKEN}'})\n try: \n json = r.json()['tracks']['items'][0]\n data_list.append([ind, json['id'],json['name'],json['artists'][0]['name'],json['artists'][0]['id'],json['album']['name'],json['album']['release_date'], json['popularity'],json['duration_ms'],1])\n except:\n r = requests.get(f'https://api.spotify.com/v1/search?q=track:{song}&type=track&limit=1', headers={'Authorization': f'Bearer {TOKEN}'})\n \n try:\n json = r.json()['tracks']['items'][0]\n data_list.append([ind, json['id'],json['name'],json['artists'][0]['name'],json['artists'][0]['id'],json['album']['name'],json['album']['release_date'], json['popularity'],json['duration_ms'],0])\n except:\n print(ind)\n data_list.append([ind] + [''] * 9)", "1386\n1918\n2320\n2501\n3358\n3456\n3660\n4320\n4528\n4529\n" ], [ "df_id = pd.DataFrame (data_list, columns = ['index', 'song_id','song_name','artist_name','artist_id','album_name','album_date','song_popularity','duration_ms','search_includes_artist'])", "_____no_output_____" ], [ "# saving file\ndf_id.to_csv(os.path.join(path,file_id), index = False)", "_____no_output_____" ], [ "df_id = pd.read_csv(os.path.join(path,file_id))", "_____no_output_____" ], [ "TOKEN = \"\"", "_____no_output_____" ], [ "# getting audio features with song id\nimport time\n\nfeatures_list = []\nsong_ids = df_id[df_id.song_id.notna()].song_id\n\nfor id in song_ids:\n if id != '':\n time.sleep(.5)\n x = requests.get(f'https://api.spotify.com/v1/audio-features/{id}', \n headers={'Authorization': f'Bearer {TOKEN}'})\n features_list.append(x.json())", "_____no_output_____" ], [ "df_features = pd.DataFrame(features_list)", "_____no_output_____" ], [ "df_features.shape", "_____no_output_____" ], [ "# saving file\ndf_features.dropna(subset = ['id'], inplace = True)\ndf_features.to_csv(os.path.join(path,file_features), index = False)", "_____no_output_____" ], [ "TOKEN = \"\"", "_____no_output_____" ], [ "import time\nartist_list = []\nartist_ids = df_id.artist_id.unique()\n\nfor id in artist_ids:\n if id != '':\n time.sleep(.5)\n x = requests.get(f'https://api.spotify.com/v1/artists/{id}', \n headers={'Authorization': f'Bearer {TOKEN}'})\n response = x.json()\n artist_list.append([id,response['genres'],response['popularity'],response['name']])\n\n", "_____no_output_____" ], [ "df_artist = pd.DataFrame (artist_list, columns = ['artist_id', 'artist_genre','artist_popularity','artist_name'])", "_____no_output_____" ], [ "# saving artist features file\ndf_artist.to_csv(os.path.join(path,file_artist), index = False)", "_____no_output_____" ], [ "df_id = pd.read_csv(os.path.join(path,file_id))\ndf_features = pd.read_csv(os.path.join(path,file_features))\ndf_artist = pd.read_csv(os.path.join(path,file_artist))", "_____no_output_____" ], [ "# Removing possible duplicates\ndf_id = df_id.drop_duplicates(subset = ['song_id'], keep = 'first')\ndf_features = df_features.drop_duplicates(subset = ['id'], keep = 'first')", "_____no_output_____" ], [ "# Merging all dfs\ndf_merged = df_id.merge(df_features, how = 'left', left_on = 'song_id', right_on = 'id', suffixes = (\"\",\"_feature\"))\ndf_final = df_merged.merge(df_artist, how = 'left', on = 'artist_id', suffixes = (\"\",\"_artist\"))", "_____no_output_____" ], [ "df_final.shape", "_____no_output_____" ], [ "# Saving final df\nfile_spotify = \"spotify_features.csv\"\ndf_final.to_csv(os.path.join(path,file_spotify), index = False)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a3e939c8b534243654de22601400ed1202cb22c
12,759
ipynb
Jupyter Notebook
Untitled.ipynb
raffettealston/dsc-mod-1-project-v2-1-onl01-dtsc-pt-041320
83da87868904a5d67555c62b502da10bdd23dcf3
[ "RSA-MD" ]
null
null
null
Untitled.ipynb
raffettealston/dsc-mod-1-project-v2-1-onl01-dtsc-pt-041320
83da87868904a5d67555c62b502da10bdd23dcf3
[ "RSA-MD" ]
null
null
null
Untitled.ipynb
raffettealston/dsc-mod-1-project-v2-1-onl01-dtsc-pt-041320
83da87868904a5d67555c62b502da10bdd23dcf3
[ "RSA-MD" ]
null
null
null
66.453125
2,023
0.633984
[ [ [ "# import the required libraries\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# some editing on the display form \nplt.style.use('seaborn-notebook')\n\n\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n", "_____no_output_____" ], [ "df = pd.read_csv('/Users/raffette/datasets_95290_221883_blockbusters.csv')\nprint(df.head())", " datasets_95290_221883_blockbusters\nMain_Genre Genre_2 Genre_3 imdb_rating length rank_in_year rating studio title worldwide_gross year\nAction Adventure Drama 7.4 135 1 PG-13 Walt Disney Pictures Black Panther $700,059,566.00 2018\n Sci-Fi 8.5 156 2 PG-13 Walt Disney Pictures Avengers: Infinity War $678,815,482.00 2018\nAnimation Action Adventure 7.8 118 3 PG Pixar Incredibles 2 $608,581,744.00 2018\nAction Adventure Drama 6.2 129 4 PG-13 Universal Pictures Jurassic World: Fallen Kingdom $416,769,345.00 2018\n" ], [ "null = pd.DataFrame(df.isnull().sum() / len(df) * 100).transpose()\nprint(null)", " datasets_95290_221883_blockbusters\n0 0.0\n" ], [ "df.fillna('None', axis=0, inplace=True)", "_____no_output_____" ], [ "# First let's add the year that the film was made to every title for better look (:\ndf[\"title\"] = df[\"title\"] + ' (' + df[\"year\"].astype(str) + ')'\n\ntop_10 = df[['title', 'imdb_rating', 'year', 'length', 'worldwide_gross']].sort_values(by='imdb_rating', ascending=False)\nprint(top_10.head())", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
4a3e9abf99f06a10b40481dcab5cbdad75aa1fbc
7,630
ipynb
Jupyter Notebook
Vector_Representation.ipynb
AloysiusButacAdu/Linear-Algebra-58020
e606f3f6712fce5a2ba2aa449ad3bed1ed0c76de
[ "Apache-2.0" ]
null
null
null
Vector_Representation.ipynb
AloysiusButacAdu/Linear-Algebra-58020
e606f3f6712fce5a2ba2aa449ad3bed1ed0c76de
[ "Apache-2.0" ]
null
null
null
Vector_Representation.ipynb
AloysiusButacAdu/Linear-Algebra-58020
e606f3f6712fce5a2ba2aa449ad3bed1ed0c76de
[ "Apache-2.0" ]
null
null
null
23.549383
82
0.379423
[ [ [ "# Matrix and its Operations\n", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ], [ "## Creating a 1x3 array (matrix)\na = np.array([1,1,1])\nprint(a)", "[1 1 1]\n" ], [ "## Creating a 2x3 matrix\nb = np.array([[1,1,1],[2,2,2]]) ## <-- Rectangular array (matrix)\nprint(b)", "[[1 1 1]\n [2 2 2]]\n" ], [ "## Creating a 3x3 matrix\nc = np.array([[1,1,1],[2,2,2],[3,3,3]])\nprint(c)", "[[1 1 1]\n [2 2 2]\n [3 3 3]]\n" ], [ "## Creating a constant array with all values are constant\nd =np.full([3,3],8) ## can use []\nd_2 =np.full((3,3),0) ## <-- Square Matrix\nprint(d)\nprint(d_2)", "[[8 8 8]\n [8 8 8]\n [8 8 8]]\n[[0 0 0]\n [0 0 0]\n [0 0 0]]\n" ], [ "## Creating a diagonal matrix\ne = np.diagonal(c)\nprint(c)\nprint(e)", "[[1 1 1]\n [2 2 2]\n [3 3 3]]\n[1 2 3]\n" ], [ "## Creating an identity matrix\nf = np.eye(6,dtype=int)\nprint(f)", "[[1 0 0 0 0 0]\n [0 1 0 0 0 0]\n [0 0 1 0 0 0]\n [0 0 0 1 0 0]\n [0 0 0 0 1 0]\n [0 0 0 0 0 1]]\n" ], [ "## Creating a zero matrix\ng = np.zeros((4,8),dtype=int)\nprint(g)", "[[0 0 0 0 0 0 0 0]\n [0 0 0 0 0 0 0 0]\n [0 0 0 0 0 0 0 0]\n [0 0 0 0 0 0 0 0]]\n" ], [ "## Creating an empty matrix\nh = np.empty((0,99))\nh_2 = np.empty((5,2)) ## ??????\nprint(h)\nprint(h_2)", "[]\n[[0.00000000e+000 1.82804289e-322]\n [0.00000000e+000 0.00000000e+000]\n [0.00000000e+000 1.16096346e-028]\n [9.82205649e+252 1.11789342e+253]\n [3.00312108e-067 2.65534875e-312]]\n" ], [ "## Addition of Matrices\ni = np.array([[1,2,3],[4,5,6],[7,8,9]])\nj = np.zeros((3,3),dtype=int) ## float + int = float; int + int = int\nprint(i+j)", "[[1 2 3]\n [4 5 6]\n [7 8 9]]\n" ], [ "## Subtraction of Matrices\nj = np.array([[-1,-2,-3],[-4,-5,-6],[-7,-8,-9]])\nprint(i-j)\n", "[[ 2 4 6]\n [ 8 10 12]\n [14 16 18]]\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a3ec51da28eedb6c56f34104ce194089536beb8
117,561
ipynb
Jupyter Notebook
assets/data/Work/Presentations/2021-03-26 - How to Simulate Crowds/Figures/PedestrianStreamSimulations/Validation/UncertaintyQuantification-Forward-Histograms.ipynb
Schwefelsaeure/portfolio
e5455b83045dc683f752160f96d5eaa0a69727ed
[ "MIT" ]
null
null
null
assets/data/Work/Presentations/2021-03-26 - How to Simulate Crowds/Figures/PedestrianStreamSimulations/Validation/UncertaintyQuantification-Forward-Histograms.ipynb
Schwefelsaeure/portfolio
e5455b83045dc683f752160f96d5eaa0a69727ed
[ "MIT" ]
null
null
null
assets/data/Work/Presentations/2021-03-26 - How to Simulate Crowds/Figures/PedestrianStreamSimulations/Validation/UncertaintyQuantification-Forward-Histograms.ipynb
Schwefelsaeure/portfolio
e5455b83045dc683f752160f96d5eaa0a69727ed
[ "MIT" ]
null
null
null
481.807377
20,784
0.940057
[ [ [ "# Uncertainty Quantification (UQ)\n\nApproach:\n\n1. Select some parameters to vary (e.g., the mean speed of pedestrians).\n2. Use different distributions to estimate selected parameters.\n3. Test effect on a so called quantity of intereset (e.g., the density).\n\nThat is, you feed different input distributions, simulate and check output. Create a figure of this idea by producing multiple input distributions represented as histograms. Then, use Inkscape or LibreOffice Draw to combine the three steps here:\n\n```\n+------------+ +------------+ +------------+\n| Different | | | | Distribut- |\n| input | | | | ion of |\n| distribut- | ---> | Vadere | ---> | quantity |\n| ions / hi- | | | | of |\n| stograms | | | | interest |\n+------------+ +------------+ +------------+\n```", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom scipy import stats", "_____no_output_____" ], [ "def use_custom_plot_settings(font_weight=\"normal\"):\n font_size_extra_small = 12\n font_size_small = 16\n font_size_medium = 18\n font_size_big = 20\n\n plt.style.use(\"default\")\n \n plt.rc(\"font\", size=font_size_small, weight=font_weight)\n plt.rc(\"axes\", titlesize=font_size_big, titleweight=font_weight)\n plt.rc(\"axes\", labelsize=font_size_medium, labelweight=font_weight)\n plt.rc(\"xtick\", labelsize=font_size_small)\n plt.rc(\"ytick\", labelsize=font_size_small)\n plt.rc(\"legend\", fontsize=font_size_extra_small)\n plt.rc(\"figure\", titlesize=font_size_big, titleweight=font_weight)\n\ndef use_default_plot_settings():\n plt.rcdefaults()\n \nuse_custom_plot_settings(font_weight=\"normal\")\nprint(plt.style.available)\nsns.set_style(style=\"white\")", "['seaborn-poster', 'seaborn-dark', 'seaborn-colorblind', 'seaborn-notebook', 'seaborn-darkgrid', 'Solarize_Light2', 'grayscale', '_classic_test', 'bmh', 'seaborn-deep', 'tableau-colorblind10', 'fast', 'seaborn', 'seaborn-pastel', 'seaborn-muted', 'seaborn-ticks', 'dark_background', 'seaborn-bright', 'classic', 'seaborn-talk', 'seaborn-white', 'ggplot', 'seaborn-paper', 'seaborn-dark-palette', 'seaborn-whitegrid', 'fivethirtyeight']\n" ], [ "input_distributions = [\n (np.random.normal, {}),\n (np.random.uniform, {\"low\": -3, \"high\": 3}),\n (np.random.triangular, {\"left\": 0, \"mode\": 0, \"right\": 3}),\n]\n\nsample_size = 200\nfor i, distribution in enumerate(input_distributions):\n plt.figure(i)\n \n f = distribution[0]\n params = distribution[1]\n x = f(**params, size=sample_size)\n \n color=\"black\" # color=\"C\" + str(i)\n kde_kws={\"lw\": 4}\n ax = sns.distplot(x, bins=5, color=color, kde_kws=kde_kws, label=f.__name__)\n\n plt.xticks([]) # labels \n plt.yticks([])\n ax.xaxis.set_ticks_position('none') # tick markers\n ax.yaxis.set_ticks_position('none')\n\n fig = ax.get_figure()\n filename = \"Input-Distribution-{}.pdf\".format(f.__name__.capitalize())\n fig.savefig(filename, bbox_inches=\"tight\")\n \nplt.show()", "/usr/local/lib/python3.6/dist-packages/scipy/stats/stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n" ], [ "output_distribution = [\n (np.random.poisson, {\"lam\": 1}),\n (np.random.normal, {}),\n (np.random.exponential, {}),\n]\n\nsample_size = 200\nfor i, distribution in enumerate(output_distribution):\n plt.figure(i)\n \n f = distribution[0]\n params = distribution[1]\n x = f(**params, size=sample_size)\n \n color=\"red\" # color=\"C\" + str(i)\n kde_kws={\"lw\": 4}\n ax = sns.distplot(x, bins=5, color=color, kde_kws=kde_kws, label=f.__name__)\n\n plt.xticks([]) # labels \n plt.yticks([])\n ax.xaxis.set_ticks_position('none') # tick markers\n ax.yaxis.set_ticks_position('none')\n\n fig = ax.get_figure()\n filename = \"Output-Distribution-{}.pdf\".format(f.__name__.capitalize())\n fig.savefig(filename, bbox_inches=\"tight\")\n \nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ] ]
4a3ec77bb9de9390d2f869a31376d46187692edb
154,088
ipynb
Jupyter Notebook
digits.ipynb
jeremyrcouch/digitrecognition
6d39483ed4cc6dd505fddb9d3058c915e04c14f0
[ "MIT" ]
null
null
null
digits.ipynb
jeremyrcouch/digitrecognition
6d39483ed4cc6dd505fddb9d3058c915e04c14f0
[ "MIT" ]
4
2020-11-13T18:44:08.000Z
2022-02-10T01:18:10.000Z
digits.ipynb
jeremyrcouch/digitrecognition
6d39483ed4cc6dd505fddb9d3058c915e04c14f0
[ "MIT" ]
null
null
null
191.651741
41,804
0.885208
[ [ [ "# Handwritten Digit Recognition With Deep Learning\n#### A classic image recognition problem. Exploratory project - [repo here.](https://github.com/jeremyrcouch/digitrecognition)\n---\nThe [MNIST](http://yann.lecun.com/exdb/mnist/) database is a collection of 70,000 handwritten digits (0 to 9). The goal is to build a model capable of recognizing a digit given only the image information. Deep learning is well suited to this task, so we're going to build a neural network to classify the digits.", "_____no_output_____" ] ], [ [ "import math\nfrom typing import List\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nfrom keras import layers, backend\nfrom keras.datasets import mnist\nfrom keras.models import Model\nfrom keras.utils import np_utils\nfrom keras.callbacks import Callback, LearningRateScheduler, EarlyStopping", "Using TensorFlow backend.\n" ], [ "RANDOM_SEED = 17\nVAL_SET_RATIO = 0.125\n\nFONTSIZE = 20\nLINEWIDTH = 5\nMARKERSIZE = 12", "_____no_output_____" ], [ "np.random.seed(RANDOM_SEED) # for repeatability\nbackend.set_image_data_format('channels_first')", "_____no_output_____" ], [ "def reshape_input_data(data: np.ndarray) -> np.ndarray:\n \"\"\"Reshape and scale input (image) data for use.\n \n Args:\n data: numpy array, input data\n \n Returns:\n _: numpy array, reshaped and scaled input data\n \"\"\"\n \n return data.reshape(data.shape[0], 1, data.shape[1], data.shape[2])/255\n \n \ndef split_val_set_from_train(X_train_in: np.ndarray, y_train_in: np.ndarray,\n val_ratio: float = 0.125):\n \"\"\"Split a validation set out of the training set.\n\n Args:\n X_train_in: numpy array, training set images\n y_train_in: numpy array, training set labels\n val_ratio: float, ratio of training set to use as validation set\n\n Returns:\n (X_train, y_train), (X_val, y_val): numpy arrays of images and labels\n \"\"\"\n \n val_size = int(val_ratio*X_train_in.shape[0])\n X_val = X_train_in[-val_size:, ...]\n y_val = y_train_in[-val_size:, ...]\n X_train = X_train_in[:X_train_in.shape[0] - val_size, ...]\n y_train = y_train_in[:y_train_in.shape[0] - val_size, ...]\n\n return (X_train, y_train), (X_val, y_val)\n\n\ndef visualize_fit(metrics):\n \"\"\"Visualize fitting process.\"\"\"\n \n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(9, 9))\n ax1.plot(metrics.acc, 'g', linewidth=LINEWIDTH,\n markersize=MARKERSIZE, label='train')\n ax1.plot(metrics.val_acc, 'b', linewidth=LINEWIDTH,\n markersize=MARKERSIZE, label='val')\n ax1.legend(fontsize=FONTSIZE-4)\n ax1.grid()\n ax1.set_ylabel('Accuracy', fontsize=FONTSIZE)\n ax1.tick_params(labelsize=FONTSIZE-4)\n ax2.plot(metrics.lr, 'r', linewidth=LINEWIDTH,\n markersize=MARKERSIZE)\n ax2.grid()\n ax2.set_xlabel('Epoch', fontsize=FONTSIZE)\n ax2.set_ylabel('Learning Rate', fontsize=FONTSIZE)\n ax2.tick_params(labelsize=FONTSIZE-4)\n fig.tight_layout()\n\n\ndef lr_function(epoch: int) -> float:\n \"\"\"Learning rate function.\n \n Args:\n epoch: int, epoch number\n \n Returns:\n _: float, learning rate\n \"\"\"\n \n init = 0.001\n drop = 0.5\n epochs_drop = 5.0\n \n return init * math.pow(drop, math.floor((1 + epoch)/epochs_drop))\n\n\nclass recordMetrics(Callback):\n \"\"\"Class to record metrics during model training process.\"\"\"\n\n def on_train_begin(self, logs={}):\n self.lr = []\n self.losses = []\n self.acc = []\n self.val_losses = []\n self.val_acc = []\n\n def on_epoch_end(self, batch, logs={}):\n self.lr.append(self.model.optimizer.get_config()['lr'])\n self.losses.append(logs.get('loss'))\n self.acc.append(logs.get('acc'))\n self.val_losses.append(logs.get('val_loss'))\n self.val_acc.append(logs.get('val_acc'))\n\n\ndef make_mosaic(imgs: np.ndarray, nrows: int, ncols: int, border: int = 1) -> np.ndarray:\n \"\"\"Given a set of images with all the same shape, makes a mosaic.\n \n Args:\n imgs: array of floats, image data\n nrows: int, number of rows of mosaic\n ncols: int, number of cols of mosaic\n border: int, border pixels\n \n Returns:\n mosaic: masked array, image mosaic\n \"\"\"\n\n nimgs = imgs.shape[0]\n imshape = imgs.shape[1:]\n\n mosaic = np.ma.masked_all((nrows * imshape[0] + (nrows - 1) * border,\n ncols * imshape[1] + (ncols - 1) * border),\n dtype=np.float32)\n\n paddedh = imshape[0] + border\n paddedw = imshape[1] + border\n for i in range(nimgs):\n row = int(np.floor(i / ncols))\n col = i % ncols\n\n mosaic[row*paddedh:row*paddedh + imshape[0],\n col*paddedw:col*paddedw + imshape[1]] = imgs[i]\n\n return mosaic\n\n\ndef classification_heat_map(y_test: np.ndarray, y_pred: np.ndarray) -> np.ndarray:\n \"\"\"Builds classification heat map.\n \n Args:\n y_test: array of ints, true labels\n y_pred: array of floats, predicted labels\n \n Returns:\n heat: array of ints, true vs predicted counts\n \"\"\"\n\n cat_num = len(np.unique(y_test))\n heat = np.zeros((cat_num, cat_num), dtype='int')\n for test, pred in zip(y_test, y_pred):\n heat[test, pred] += 1\n\n return heat\n\n\ndef plot_heat(zero_heat):\n \"\"\"Plot heat map.\"\"\"\n\n fig = plt.figure(figsize=(9, 9))\n img = plt.imshow(zero_heat, cmap='viridis')\n plt.xlabel('Predicted', fontsize=FONTSIZE)\n plt.ylabel('True', fontsize=FONTSIZE)\n plt.xticks([i for i in range(10)])\n plt.yticks([i for i in range(10)])\n plt.tick_params(axis='both', labelsize=FONTSIZE)\n ax = plt.gca()\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.1)\n cbar = plt.colorbar(img, cax=cax)\n cbar.ax.tick_params(labelsize=FONTSIZE-4)\n cbar.ax.get_xaxis().labelpad = 7\n cbar.ax.set_xlabel('Count', fontsize=FONTSIZE-4)\n cbar.outline.set_visible(False)\n fig.tight_layout()\n\n\ndef plot_weights(weights):\n \"\"\"Plot weights.\"\"\"\n\n fig = plt.figure(figsize=(9, 9))\n img = plt.imshow(weights, cmap='viridis')\n plt.tick_params(axis='both', labelsize=FONTSIZE)\n ax = fig.gca()\n ax.axis('off')\n fig.tight_layout()\n\n\ndef plot_layer_outputs(model, layer, X_test: np.ndarray, test_ind: int):\n \"\"\"Plot layer outputs.\n \n Args:\n model: keras model\n layer: keras layer\n X_test: numpy array, test images\n test_ind: int, input index\n \n Returns:\n image of layer output\n \"\"\"\n \n functor = backend.function([model.input] + [backend.learning_phase()], [layer.output])\n X = X_test[test_ind, ...][np.newaxis, ...]\n layer_result = np.squeeze(functor([X, 1])[0])\n\n mosaic_dim = int(np.ceil(np.sqrt(layer_result.shape[0])))\n mosaic = make_mosaic(layer_result, mosaic_dim, mosaic_dim)\n plot_weights(mosaic)\n\n\ndef plot_single_input(ax, X: np.ndarray, y: int, y_pred: int = None, cmap: str = 'gray_r'):\n \"\"\"Plot single input image.\n \n Args:\n ax: axis\n X: numpy array, image\n y: int, label\n y_pred: int, predicted label\n cmap: str, colormap to use\n \n Returns:\n input image\n \"\"\"\n \n plt.imshow(np.squeeze(X), cmap=cmap)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n title = 'True {}'.format(y)\n title += ', Predicted {}'.format(y_pred) if y_pred is not None else ''\n plt.title(title, fontsize=FONTSIZE)\n \n\ndef plot_input_images(X: np.ndarray, y: np.ndarray, inds: List[int],\n y_pred: np.ndarray = None, cmap: str = 'gray_r'):\n \"\"\"Plot input image(s).\n \n Args:\n X: numpy array, input data\n y: numpy array, label data\n inds: list of int, indices to plot\n y_pred: numpy array, predicted labels\n cmap: str, color map to use\n \n Returns:\n input images\n \"\"\"\n \n ncols = min(3, len(inds))\n nrows = np.ceil(len(inds)/ncols)\n fig = plt.figure(figsize=(4*ncols, 4*nrows))\n for i, ind in enumerate(inds):\n ax = plt.subplot(nrows, ncols, i+1)\n pred = y_pred[ind] if y_pred is not None else None\n plot_single_input(ax, X[ind, ...], y[ind], pred, cmap)\n\n\ndef get_worst_predictions(y_test: np.ndarray, y_pred: np.ndarray, num: int = 5) -> np.ndarray:\n \"\"\"Get worst prediction indices.\n \n Args:\n y_test: numpy array, test input data\n y_pred: numpy array, test label data\n num: int, number of indices to return\n \n Returns:\n _: numpy array, worst indices by confidence\n \"\"\"\n\n pred_scores = np.array([(1-y_pred[i, y_test[i]]) for i in range(len(y_pred))])\n return np.argsort(pred_scores)[-num:]", "_____no_output_____" ], [ "(X_train_raw, y_train_raw), (X_test_raw, y_test_raw) = mnist.load_data()\nX_train_prep = reshape_input_data(X_train_raw)\nX_test = reshape_input_data(X_test_raw)\ny_train_prep = np_utils.to_categorical(y_train_raw).astype(int)\ny_test = np_utils.to_categorical(y_test_raw).astype(int)\n(X_train, y_train), (X_val, y_val) = split_val_set_from_train(X_train_prep, y_train_prep, VAL_SET_RATIO)", "_____no_output_____" ], [ "# look at a couple random digits from the training set\nrand_inds = list(np.random.randint(0, high=len(X_train), size=6))\nplot_input_images(X_train, np.array([np.argmax(y) for y in y_train]), rand_inds)", "_____no_output_____" ] ], [ [ "Above are some examples of the handwritten digits. For each, we have 28x28 pixels with one color channel (greyscale). I separated the data into training, validation and test sets - taking a portion of the samples from the training set for the validation set to use for model architecture selection and parameter tuning. The samples in the test set will be held out for final model evaluation.\n\nAs this is just an exploratory project, I wanted to manually explore the model architecture space rather than use any sort of automated search. I monitored the accuracy on my validation set while making changes.", "_____no_output_____" ] ], [ [ "# model architecture\nDENSE_LAYERS = 1\nDENSE_NODES = 256 # generally keep between input and output sizes\nACTIVATION = 'relu'\nCONV_LAYERS = 2\nFILTERS = 16\nPATCHES = 3\n\nimg_dim = X_train.shape[-1]", "_____no_output_____" ], [ "inputs = layers.Input(shape=(1, img_dim, img_dim), name='inputs')\n\n# convolutional layers with max pooling after each\nfor lay_i in range(CONV_LAYERS):\n layer_in = inputs if lay_i == 0 else x\n x = layers.Conv2D(FILTERS, (PATCHES, PATCHES), activation=ACTIVATION,\n data_format='channels_first', name='conv{}'.format(lay_i))(layer_in)\n x = layers.MaxPooling2D(pool_size=(2, 2), name='pool{}'.format(lay_i))(x)\n\n# dropout if there was a convolutional layer before flatten for dense input\nif CONV_LAYERS > 0:\n x = layers.Dropout(0.2, name='conv_dropout')(x)\n x = layers.Flatten(name='flatten')(x)\nelse:\n x = layers.Flatten(name='flatten')(inputs)\n\n# dense layers with dropout after each\nfor lay_i in range(DENSE_LAYERS):\n dense = int(DENSE_NODES/(lay_i+1))\n x = layers.Dense(dense, activation=ACTIVATION, name='dense{}'.format(lay_i))(x)\n x = layers.Dropout(0.25, name='dense_dropout{}'.format(lay_i))(x)\n\noutput = layers.Dense(10, activation='softmax', name='output')(x)\n\nmodel = Model(inputs=inputs, outputs=output)\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "I used Keras' functional API for this neural network because it provides a straightforward way to build up a model in a dynamic way. First, let's take a look at the model I landed on.", "_____no_output_____" ] ], [ [ "model.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninputs (InputLayer) (None, 1, 28, 28) 0 \n_________________________________________________________________\nconv0 (Conv2D) (None, 16, 26, 26) 160 \n_________________________________________________________________\npool0 (MaxPooling2D) (None, 16, 13, 13) 0 \n_________________________________________________________________\nconv1 (Conv2D) (None, 16, 11, 11) 2320 \n_________________________________________________________________\npool1 (MaxPooling2D) (None, 16, 5, 5) 0 \n_________________________________________________________________\nconv_dropout (Dropout) (None, 16, 5, 5) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 400) 0 \n_________________________________________________________________\ndense0 (Dense) (None, 256) 102656 \n_________________________________________________________________\ndense_dropout0 (Dropout) (None, 256) 0 \n_________________________________________________________________\noutput (Dense) (None, 10) 2570 \n=================================================================\nTotal params: 107,706\nTrainable params: 107,706\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "There's a total of 10 layers, summarized below:\n1. Input image\n2. First convolutional layer, detects low level shapes\n3. Max pooling layer down samples and provides invariance to translation, rotation and scale\n4. Second convolutional layer, detects higher level features\n5. Max pooling layer down samples and provides invariance to translation, rotation and scale\n6. Dropout layer mitigates overfitting\n7. Flatten layer gets the data in the right shape for dense layers\n8. Dense layer\n9. Dropout layer mitigates overfitting\n10. Output layer with softmax activation acts as a classifier\n\nDeciding on a model structure and tuning the parameters is an iterative process. I set up an early stopping callback for both the training and validation data, so that if the accuracy of either one stalled it would stop the fit. I also defined a simple step style learning rate scheduler that cuts the learning rate in half every couple epochs. This helps the optimizer continue to improve the model's accuracy as the fitting process wears on. I recorded the training and validation data losses and accuracy along with the learning rate every epoch, and at the end of the fit I plotted a simple visualization to see how training went.\n\nThe main thing I was looking for after fitting the model is how well the validation set accuracy tracked the training set accuracy. If the training accuracy is well above the validation accuracy, then we know we're likely overfitting.", "_____no_output_____" ] ], [ [ "early_stop_train = EarlyStopping(monitor='acc', min_delta=0.001, patience=3,\n verbose=0, mode='auto')\nearly_stop_test = EarlyStopping(monitor='val_acc', min_delta=0.001, patience=3,\n verbose=0, mode='auto')\nlr_scheduler = LearningRateScheduler(lr_function)\nmetrics = recordMetrics()\n\nEPOCHS = 20\nhistory = model.fit(X_train, y_train, epochs=EPOCHS, verbose=1,\n callbacks=[metrics, early_stop_train, early_stop_test, lr_scheduler],\n validation_data=(X_val, y_val))", "Train on 52500 samples, validate on 7500 samples\nEpoch 1/20\n52500/52500 [==============================] - 55s 1ms/step - loss: 0.2220 - acc: 0.9319 - val_loss: 0.0596 - val_acc: 0.9827\nEpoch 2/20\n52500/52500 [==============================] - 49s 942us/step - loss: 0.0824 - acc: 0.9744 - val_loss: 0.0523 - val_acc: 0.9852\nEpoch 3/20\n52500/52500 [==============================] - 52s 984us/step - loss: 0.0620 - acc: 0.9804 - val_loss: 0.0418 - val_acc: 0.9883\nEpoch 4/20\n52500/52500 [==============================] - 50s 946us/step - loss: 0.0520 - acc: 0.9837 - val_loss: 0.0372 - val_acc: 0.9900\nEpoch 5/20\n52500/52500 [==============================] - 48s 910us/step - loss: 0.0368 - acc: 0.9886 - val_loss: 0.0337 - val_acc: 0.9896\nEpoch 6/20\n52500/52500 [==============================] - 48s 906us/step - loss: 0.0322 - acc: 0.9899 - val_loss: 0.0326 - val_acc: 0.9909\nEpoch 7/20\n52500/52500 [==============================] - 48s 915us/step - loss: 0.0291 - acc: 0.9909 - val_loss: 0.0329 - val_acc: 0.9913\nEpoch 8/20\n52500/52500 [==============================] - 47s 896us/step - loss: 0.0278 - acc: 0.9910 - val_loss: 0.0278 - val_acc: 0.9933\nEpoch 9/20\n52500/52500 [==============================] - 53s 1ms/step - loss: 0.0246 - acc: 0.9922 - val_loss: 0.0307 - val_acc: 0.9923\nEpoch 10/20\n52500/52500 [==============================] - 44s 847us/step - loss: 0.0210 - acc: 0.9931 - val_loss: 0.0292 - val_acc: 0.9927\nEpoch 11/20\n52500/52500 [==============================] - 43s 817us/step - loss: 0.0174 - acc: 0.9945 - val_loss: 0.0280 - val_acc: 0.9929\n" ], [ "visualize_fit(metrics)", "_____no_output_____" ] ], [ [ "As you can see, it only took a couple epochs to get above 99% accuracy on both the training and validation sets. By lowering the learning rate every so often, we're helping the model get incrementally better without bouncing around. Importantly, the validation accuracy tracked with the training accuracy. I'm happy with the final model here - 99.33% accuracy on the held out test set isn't too shabby.\n\nLet's take a closer look at the model's performance.", "_____no_output_____" ] ], [ [ "y_pred = model.predict(X_test)\ny_pred_max = np.array([np.argmax(pred) for pred in y_pred])\nscores = model.evaluate(x=X_test, y=y_test)\nprint('Accuracy = {:.2%}'.format(scores[1]))", "10000/10000 [==============================] - 3s 333us/step\nAccuracy = 99.33%\n" ], [ "heat = classification_heat_map(y_test_raw, y_pred_max)\nmiss_heat = heat.copy()\nnp.fill_diagonal(miss_heat, val=0)\nplot_heat(miss_heat)", "_____no_output_____" ] ], [ [ "The y-axis of the heat map is the true digit and the x-axis is the predicted digit, with the color of the square corresponding to the number of samples for that pairing. Clearly, there were quite a few instances where we thought `9`s were `4`s, `7`s were `2`s, and `3`s were `5`s. These mixups are not unexpected, so that's a good sign the model isn't confusing numbers with significantly different shapes.\n\nLet's look at the digits we predicted incorrectly **and** had a high confidence about.", "_____no_output_____" ] ], [ [ "worst_inds = get_worst_predictions(y_test_raw, y_pred, num=6)\nplot_input_images(X_test, y_test_raw, list(worst_inds), y_pred_max, cmap='viridis')", "_____no_output_____" ] ], [ [ "I mean.. some of these are pretty poorly written digits. Others, like the lower row, middle column `6` does not look like a `5` like was predicted (even though it's a better `G` than a `6`). Depending on the application, missing digits like these may or may not be acceptable.\n\nTime to take a closer look at what the model is doing.", "_____no_output_____" ] ], [ [ "test_ind = 0", "_____no_output_____" ], [ "layer_info = {layer.name: layer for layer in model.layers}\nlayer = layer_info['conv0']\nlayer_weights = layer.get_weights()\nfilter_weights = np.array([np.squeeze(layer_weights[0][..., i]) for i in range(FILTERS)])\n\nmosaic_dim = int(np.ceil(np.sqrt(filter_weights.shape[0])))\nweights_mosaic = make_mosaic(filter_weights, mosaic_dim, mosaic_dim)\nplot_weights(weights_mosaic)", "_____no_output_____" ] ], [ [ "These are the weights of the filters for the first convolutional layer (I left out the axes and colorbar because only the relative values matter for right now). See [this great article](http://colah.github.io/posts/2014-07-Understanding-Convolutions/) for an explanation of convolutions, but in simple terms, imagine each of these filters sliding over each spot on the image. Where the filter and the image match up well, the output of the layer will \"light up.\" Technically, \"light up\" is a garbage way to describe it, but you'll see why I said that coming up. Next, we'll show a selected digit (the input to our model) and then the output of our convolutional layer.", "_____no_output_____" ] ], [ [ "# show test image and conv filter ouputs\nplot_input_images(X_test, y_test_raw, [test_ind], y_pred_max, cmap='viridis')\nplot_layer_outputs(model, layer, X_test, test_ind)", "_____no_output_____" ] ], [ [ "The filters activate the input image in different ways. Although each input image will activate the filters differently, *similar inputs will activate the same filters in similar locations*. Although I am aware of the flaws of max pooling[^1], it is, for better or worse, effective for making the neural network invariant to image translation, scaling and orientation.\n\nThe techniques used for this problem can be applied to a range of image recognition tasks. Experimenting with model architectures is often necessary to get good results, but there are a variety of resources out there to suggest architectures that are well suited to certain problem types.\n\n[^1]: Geoffrey Hinton argues \"The pooling operation used in convolutional neural networks is a big mistake and the fact that it works so well is a disaster.\"", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a3ed2fac72042c59a782657c5cd19b42ff56dc7
196,640
ipynb
Jupyter Notebook
notebooks/TensorComputing.ipynb
patricks1/MachineLearningStatistics
dbc4e55b2c9638b6b2814a34b87bc2c9d5fdddd4
[ "BSD-3-Clause" ]
null
null
null
notebooks/TensorComputing.ipynb
patricks1/MachineLearningStatistics
dbc4e55b2c9638b6b2814a34b87bc2c9d5fdddd4
[ "BSD-3-Clause" ]
null
null
null
notebooks/TensorComputing.ipynb
patricks1/MachineLearningStatistics
dbc4e55b2c9638b6b2814a34b87bc2c9d5fdddd4
[ "BSD-3-Clause" ]
null
null
null
126.701031
96,800
0.87949
[ [ [ "# Machine Learning and Statistics for Physicists", "_____no_output_____" ], [ "Material for a [UC Irvine](https://uci.edu/) course offered by the [Department of Physics and Astronomy](https://www.physics.uci.edu/).\n\nContent is maintained on [github](github.com/dkirkby/MachineLearningStatistics) and distributed under a [BSD3 license](https://opensource.org/licenses/BSD-3-Clause).\n\n##### &#9658; [View table of contents](Contents.ipynb)", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()\nimport numpy as np\nimport pandas as pd", "_____no_output_____" ] ], [ [ "## Tensor Computing", "_____no_output_____" ], [ "Most practical algorithms of ML can be decomposed into small steps where the calculations are expressed with linear algebra, i.e., linear combinations of scalars, vectors and matrices.\n\nFor example, a neural network can be built from layers that each calculate\n$$\n\\mathbf{x}_\\text{out} = \\max(0, W \\mathbf{x}_\\text{in} + \\mathbf{b}) \\; ,\n$$\nwhere $W$ is a matrix, and boldface symbols represent vectors. In typical applications, $\\mathbf{x}_\\text{out}$ and $\\mathbf{x}_\\text{in}$ are derived from **data** while $W$ and $\\mathbf{b}$ are considered **model parameters**. (This expression is not strictly linear: why?)\n\nThe python numeric and list types can represent arbitrary scalars, vectors, and matrices, but are designed for flexibility instead of efficiency.\n\nNumpy is instead optimized for the special case where all list elements are numeric values of the same type, which can be organized and accessed very efficiently in memory, with a specialized array type with lots of nice features. One downside of this approach is that most of builtin math functions are duplicated (e.g., `math.sin` and `np.sin`) to work with numpy arrays.", "_____no_output_____" ], [ "**EXERCISE:** Complete the function below using numpy to evaluate the neural-network layer defined above:", "_____no_output_____" ] ], [ [ "def xout(W, xin, b):\n return np.maximum(0, W.dot(xin) + b)\n#maximum takes two vectors and will component-wise find the maximum\n#max looks at all the numbers and finds the scalar max.", "_____no_output_____" ] ], [ [ "### Terminology", "_____no_output_____" ], [ "We frequently use $\\mathbf{r} = (x, y, z)$ in physics to represent an *arbitrary* position in three (continuous) dimensions.\n\nIn numpy, we cannot represent an *arbitrary* position but can easily represent a *specific* position, for example:", "_____no_output_____" ] ], [ [ "rvec = np.array([0.1, -0.2, 0.3])", "_____no_output_____" ] ], [ [ "However, `rvec` has only one (discrete) dimension, which we use to access its three array elements with indices 0,1,2:", "_____no_output_____" ] ], [ [ "rvec[0], rvec[1], rvec[2]", "_____no_output_____" ] ], [ [ "Note how we use the term **dimension** differently in these two cases!\n\nAll numpy arrays have a `shape` property that specifies the range of indices allowed for each of their (discrete) dimensions:", "_____no_output_____" ] ], [ [ "rvec.shape", "_____no_output_____" ], [ "rvec.ndim", "_____no_output_____" ] ], [ [ "Compare with a matrix represented in numpy:", "_____no_output_____" ] ], [ [ "matrix = np.identity(3)\nprint(matrix)", "[[1. 0. 0.]\n [0. 1. 0.]\n [0. 0. 1.]]\n" ], [ "matrix[1, 0], matrix[1, 1]", "_____no_output_____" ], [ "matrix.shape", "_____no_output_____" ], [ "matrix.ndim", "_____no_output_____" ] ], [ [ "Numpy supports arrays with any (finite) number of (discrete) dimensions. The general name for these arrays is a **tensor** (so, scalars, vectors and matrices are tensors). For example:", "_____no_output_____" ] ], [ [ "tensor = np.ones((2, 3, 4))\nprint(tensor)", "[[[1. 1. 1. 1.]\n [1. 1. 1. 1.]\n [1. 1. 1. 1.]]\n\n [[1. 1. 1. 1.]\n [1. 1. 1. 1.]\n [1. 1. 1. 1.]]]\n" ], [ "tensor[0, 0, 0], tensor[1, 2, 3]", "_____no_output_____" ], [ "tensor.shape", "_____no_output_____" ], [ "tensor.ndim", "_____no_output_____" ] ], [ [ "Tensors are used in physics also: for example, the tensor expression $g^{il} \\Gamma^m_{ki} x^k$ arises in [contravariant derivatives in General Relativity](https://en.wikipedia.org/wiki/Christoffel_symbols#Covariant_derivatives_of_tensors). What are the **dimensions** of $g$, $\\Gamma$ and $x$ in this expression? Note that numpy tensors do not make any distinction between upper or lower indices.", "_____no_output_____" ], [ "The numpy dimension is sometimes also referred to as the **rank**, but note that [array rank](https://en.wikipedia.org/wiki/Rank_(computer_programming)) is similar to but subtly different from [linear algebra rank](https://en.wikipedia.org/wiki/Rank_(linear_algebra)).", "_____no_output_____" ], [ "### Fundamental Operations", "_____no_output_____" ], [ "#### Tensor Creation\n\nThe most common ways you will create new arrays are:\n- Filled with a simple sequence of constant values\n- Filled with (reproducible) random values\n- Calculated as a mathematical function of existing arrays.", "_____no_output_____" ] ], [ [ "# Regular sequence of values\nshape = (3, 4)\nc1 = np.zeros(shape)\nc2 = np.ones(shape)\nc3 = np.full(shape, -1)\nc4 = np.arange(12)", "_____no_output_____" ], [ "# Reproducible \"random\" numbers\ngen = np.random.RandomState(seed=123)\nr1 = gen.uniform(size=shape)\nr2 = gen.normal(loc=-1, scale=2, size=shape)", "_____no_output_____" ], [ "# Calculated as function of existing array.\nf1 = r1 * np.sin(r2) ** c3", "_____no_output_____" ] ], [ [ "All the values contained within a tensors have the same [data type](https://docs.scipy.org/doc/numpy-1.15.0/user/basics.types.html), which you can inspect:", "_____no_output_____" ] ], [ [ "c1.dtype, c4.dtype", "_____no_output_____" ] ], [ [ "**EXERCISE:** Try to guess the `dtype` of `c3`, `r2` and `f1`, then check your answer. Deep learning often uses smaller (32 or 16 bit) float data types: what advantages and disadvantages might this have?", "_____no_output_____" ] ], [ [ "print(c3.dtype=='int')\nprint(r2.dtype=='float')\nprint(f1.dtype=='float')", "True\nTrue\nTrue\n" ] ], [ [ "<span style=\"color:blue\">\nGPUs have a special memory architecture that is expensive, so we ARE memory constrained.\n<br/><br/>\nAlso, by making the data more coarse, we can insert some stochasticity, the usefulness of which the lecture slides point to.\n</span>", "_____no_output_____" ], [ "**SOLUTION:** The `zeros` and `ones` functions default to `float64`, but `full` uses the type of the provided constant value. Integers are automatically promoted to floats in mixed expressions.", "_____no_output_____" ] ], [ [ "c3.dtype, r2.dtype, f1.dtype", "_____no_output_____" ] ], [ [ "Smaller floats allow more efficient use of limited (GPU) memory and faster calculations, at the cost of some accuracy. Since the training of a deep neural network is inherently noisy, this is generally a good tradeoff.", "_____no_output_____" ], [ "#### Tensor Reshaping\n\nIt is often useful to [reshape](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.reshape.html) a tensor without changing its total size, which can be done very efficiently since the layout of the tensor values in memory does not need to be changed:", "_____no_output_____" ] ], [ [ "c4.reshape(c1.shape)", "_____no_output_____" ] ], [ [ "**EXERCISE:** Predict the result of `c4.reshape(2, 3, 2)` then check your answer.", "_____no_output_____" ] ], [ [ "c4.reshape(2, 3, 2)", "_____no_output_____" ] ], [ [ "#### Tensor Broadcasting\n\nThe real power of tensor computing comes from expressions like this:", "_____no_output_____" ] ], [ [ "# Add one to each element.\nc4 + 1", "_____no_output_____" ], [ "# Scale each column of the 3x4 ones matrix by a different value.\nnp.ones(shape=(3, 4)) * np.arange(4)", "_____no_output_____" ] ], [ [ "The results are not surprising in these examples, but something non-trivial is going on behind the scenes to make this work since we are combining tensors with different shapes. This is called [broadcasting](https://docs.scipy.org/doc/numpy-1.15.0/user/basics.broadcasting.html) and has specific rules for how to handle less obvious cases.\n\nBroadcasting serves two purposes:\n - It allows more compact and easier to understand \"vectorized\" expressions, where loops over elements in each dimension are implicit.\n - It enables automatic optimizations to take advantage of the available hardware, since explicit python loops are generally a bottleneck.\n \nNot all expressions can be automatically broadcast, even if they seem to make sense. For example:", "_____no_output_____" ] ], [ [ "# Scale each row of the 3x4 ones matrix by a different value.\ntry:\n np.ones(shape=(3, 4)) * np.arange(3)\nexcept ValueError as e:\n print(e)", "operands could not be broadcast together with shapes (3,4) (3,) \n" ] ], [ [ "However, you can usually reshape the inputs to get the desired result:", "_____no_output_____" ] ], [ [ "np.ones(shape=(3, 4)) * np.arange(3).reshape(3, 1)", "_____no_output_____" ] ], [ [ "Another useful trick is to use `keepdims=True` with reducing functions, e.g.", "_____no_output_____" ] ], [ [ "print(np.ones((4, 3)).sum(axis=1))\nprint(np.ones((4, 3)).sum(axis=1, keepdims=True))", "[3. 3. 3. 3.]\n[[3.]\n [3.]\n [3.]\n [3.]]\n" ] ], [ [ "To experiment with broadcasting rules, define a function to try broadcasting two arbitrary tensor shapes:", "_____no_output_____" ] ], [ [ "def broadcast(shape1, shape2):\n array1 = np.ones(shape1)\n array2 = np.ones(shape2)\n try:\n array12 = array1 + array2\n print('shapes {} {} broadcast to {}'.format(shape1, shape2, array12.shape))\n except ValueError as e:\n print(e)", "_____no_output_____" ], [ "broadcast((1, 3), (3,))", "shapes (1, 3) (3,) broadcast to (1, 3)\n" ], [ "broadcast((1, 2), (3,))", "operands could not be broadcast together with shapes (1,2) (3,) \n" ] ], [ [ "**EXERCISE:** Predict the results of the following then check your answers:\n```\nbroadcast((3, 1, 2), (3, 2))\nbroadcast((2, 1, 3), (3, 2))\nbroadcast((3,), (2, 1))\nbroadcast((3,), (1, 2))\nbroadcast((3,), (1, 3))\n```", "_____no_output_____" ] ], [ [ "broadcast((3, 1, 2), (3, 2))\nbroadcast((2, 1, 3), (3, 2))\nbroadcast((3,), (2, 1))\nbroadcast((3,), (1, 2))\nbroadcast((3,), (1, 3))", "shapes (3, 1, 2) (3, 2) broadcast to (3, 3, 2)\noperands could not be broadcast together with shapes (2,1,3) (3,2) \nshapes (3,) (2, 1) broadcast to (2, 3)\noperands could not be broadcast together with shapes (3,) (1,2) \nshapes (3,) (1, 3) broadcast to (1, 3)\n" ] ], [ [ "### Tensor Frameworks", "_____no_output_____" ], [ "#### Numpy\n\nNumpy is an example of a framework for tensor computing that is widely supported and requires no special hardware. However, it still offers significant performance improvements by eliminating explicit python loops and using memory efficiently.\n\nFor example, let's calculate the opening angle separation between two unit vectors, each specified with (lat, lon) angles in radians (or RA,DEC for astronomers, as implemented [here](https://desisurvey.readthedocs.io/en/latest/api.html#desisurvey.utils.separation_matrix)). The [Haversine formula](https://en.wikipedia.org/wiki/Haversine_formula) is a good way to calculate this quantity.", "_____no_output_____" ], [ "Generate a large number of random unit vectors for benchmarking (are these uniformly distributed on the sphere?)", "_____no_output_____" ] ], [ [ "def generate(N, seed=123):\n gen = np.random.RandomState(seed=123)\n lats = gen.uniform(low=-np.pi / 2, high=+np.pi / 2, size=N)\n lons = gen.uniform(low=0, high=2 * np.pi, size=N)\n plt.plot(lons, lats, '.')\n return lats, lons\n\nlats, lons = generate(N=1000)", "_____no_output_____" ], [ "lats.shape", "_____no_output_____" ] ], [ [ "Use explicit python loops to calculate the (square) matrix of separation angles between all pairs of unit vectors:", "_____no_output_____" ] ], [ [ "def separation_matrix_loops():\n # Allocate memory for the matrix.\n N = len(lats)\n matrix = np.empty((N, N))\n for i, (lat1, lon1) in enumerate(zip(lats, lons)):\n for j, (lat2, lon2) in enumerate(zip(lats, lons)):\n # Evaluate the Haversine formula for matrix element [i, j].\n matrix[i, j] = 2 * np.arcsin(np.sqrt(\n np.sin(0.5 * (lat2 - lat1)) ** 2 +\n np.cos(lat1) * np.cos(lat2) * np.sin(0.5 * (lon2 - lon1)) ** 2))\n return matrix", "_____no_output_____" ], [ "%time S1 = separation_matrix_loops()", "CPU times: user 10 s, sys: 67.4 ms, total: 10.1 s\nWall time: 10.3 s\n" ] ], [ [ "Now calculate the same separations using numpy implicit loops:", "_____no_output_____" ] ], [ [ "def separation_matrix_numpy():\n lat1, lat2 = lats, lats.reshape(-1, 1)\n lon1, lon2 = lons, lons.reshape(-1, 1)\n return 2 * np.arcsin(np.sqrt(\n np.sin(0.5 * (lat2 - lat1)) ** 2 +\n np.cos(lat1) * np.cos(lat2) * np.sin(0.5 * (lon2 - lon1)) ** 2))", "_____no_output_____" ] ], [ [ "Check that both calculations give the same results:", "_____no_output_____" ] ], [ [ "np.allclose(S1, S2)", "_____no_output_____" ] ], [ [ "Since this is so much faster, increase the amount of computation (and memory) 100x for a better benchmark:", "_____no_output_____" ] ], [ [ "lats, lons = generate(N=10000)", "_____no_output_____" ], [ "%time S2 = separation_matrix_numpy()", "CPU times: user 9.21 s, sys: 516 ms, total: 9.73 s\nWall time: 9.87 s\n" ] ], [ [ "Therefore using implicit numpy loops speeds up the calculation by a factor of about 6.8 / 0.02 = 340. Since we are using the efficient numpy arrays in both cases, the speed up is entirely due to the loops!", "_____no_output_____" ], [ "#### Other Frameworks: PyTorch and TensorFlow\n\nMachine learning relies heavily on frameworks that copy the successful numpy design for tensor computing, while adding some important new features:\n - Automatic hardware acceleration.\n - Automatic calculation of derivatives.\n - Efficient deployment to other platforms (mobile, cloud).\n \nUnlike numpy, the default type in these frameworks is usually a 32-bit float, rather than a 64-bit float.\n\nThe two most popular tensor computing frameworks for machine learning today are [PyTorch](https://pytorch.org/) and [TensorFlow](https://www.tensorflow.org/). Both are large open-source projects, primarily developed by facebook (pytorch) and google (tensorflow). These frameworks were originally quite different, with pytorch preferred for research and tensorflow preferred for large-scale deployment, but they are gradually converging towards similar a feature set.\n\nBelow, we repeat our calculation of the separation matrix with both of these frameworks. You will notice that the new features come with some additional complexity.", "_____no_output_____" ], [ "#### PyTorch Example", "_____no_output_____" ] ], [ [ "import torch", "_____no_output_____" ], [ "device = torch.device(\"cuda\") if torch.cuda.is_available() else \"cpu\"\nprint(f'Using device: {device}.')\nlons_pt = torch.tensor(lons, device=device)\nlats_pt = torch.tensor(lats, device=device)", "Using device: cpu.\n" ], [ "def separation_matrix_torch():\n lat1, lat2 = lats_pt, lats_pt.reshape(-1, 1)\n lon1, lon2 = lons_pt, lons_pt.reshape(-1, 1)\n return 2 * torch.asin(torch.sqrt(\n torch.sin(0.5 * (lat2 - lat1)) ** 2 +\n torch.cos(lat1) * torch.cos(lat2) * torch.sin(0.5 * (lon2 - lon1)) ** 2))", "_____no_output_____" ], [ "%time S3 = separation_matrix_torch()", "CPU times: user 5.25 s, sys: 4.56 s, total: 9.82 s\nWall time: 5.31 s\n" ], [ "np.allclose(S2, S3.numpy())", "_____no_output_____" ] ], [ [ "#### TensorFlow Example", "_____no_output_____" ] ], [ [ "import tensorflow as tf", "INFO:tensorflow:Enabling eager execution\nINFO:tensorflow:Enabling v2 tensorshape\nINFO:tensorflow:Enabling resource variables\nINFO:tensorflow:Enabling tensor equality\nINFO:tensorflow:Enabling control flow v2\n" ], [ "device = 'GPU:0' if tf.config.list_physical_devices('GPU') else 'CPU:0'\nprint(f'Using device: {device}.')\nwith tf.device(device):\n lons_tf = tf.constant(lons)\n lats_tf = tf.constant(lats)", "WARNING:tensorflow:From <ipython-input-54-da08d9e0227e>:1: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse `tf.config.list_physical_devices('GPU')` instead.\nUsing device: CPU:0.\n" ], [ "def separation_matrix_tensorflow():\n lat1, lat2 = lats_tf, tf.reshape(lats_tf, [-1, 1])\n lon1, lon2 = lons_tf, tf.reshape(lons_tf, [-1, 1])\n return 2 * tf.asin(tf.sqrt(\n tf.sin(0.5 * (lat2 - lat1)) ** 2 +\n tf.cos(lat1) * tf.cos(lat2) * tf.sin(0.5 * (lon2 - lon1)) ** 2))", "_____no_output_____" ], [ "%time S4 = separation_matrix_tensorflow()", "CPU times: user 20.4 s, sys: 9.31 s, total: 29.7 s\nWall time: 3.54 s\n" ], [ "np.allclose(S2, S4.numpy())", "_____no_output_____" ] ], [ [ "#### Hardware Acceleration", "_____no_output_____" ], [ "Tensor computing can be sped up significantly (10-100x) using hardware that is optimized to perform tensor computing by distributing simple calculations (\"kernels\") across many independent processors (\"cores\") running in parallel.\n\nThe original driver for such hardware was to accelerate the 3D geometry calculations required to render real time 3D graphics, leading to the first [Graphics Processing Units (GPUs)](https://en.wikipedia.org/wiki/Graphics_processing_unit) in the 1990s. More recently, GPUs have been adopted for purely numerical calculations, with no display attached, leading to the development of specialized programming languages such as [CUDA](https://en.wikipedia.org/wiki/CUDA) and [OpenCL](https://en.wikipedia.org/wiki/OpenCL).\n\nCurrently, one vendor (Nvidia) dominates the use of GPUs for ML with its proprietary CUDA language. Google has also introduced an even more specialized [TPU](https://en.wikipedia.org/wiki/Tensor_processing_unit) architecture.", "_____no_output_____" ], [ "The table below shows some benchmarks for the separation matrix problem, running on different hardware with different frameworks. The speed ups obtained using PyTorch and TensorFlow with a GPU are typical. The two frameworks provide comparable GPU performance overall, but can differ on specific problems.\n\n\n| Test | Laptop |Server(GPU) | Collab(CPU) | Collab(GPU) |\n|------------|--------|------------|-------------|-------------|\n| numpy | 2.08s | 1.17s | 10.5s | 10.3s |\n| torch | 7.32s | 48.7ms | --- | --- |\n| tensorflow | --- | --- | 9.11s | 246ms | \n| ratio | 3.5 | 1 / 24 | 0.87 | 1 / 41 |", "_____no_output_____" ], [ "To benefit from this hardware, you can either add a GPU to a linux server, or use a cloud computing platform.\n\nCloud computing is the easiest way to get started. There are some free options, but generally you have to \"pay as you go\" to do a useful amount of work. Some good starting points are:\n - [Google Collaboratory](https://colab.research.google.com/): free research tool with a jupyter notebook front end.\n - [PaperSpace](https://www.paperspace.com/): reasonably priced and simple to get started.\n - [Amazon Web Services](https://aws.amazon.com/ec2/): free to try, very flexible and relatively complex.\n - [Google Cloud](https://cloud.google.com/): comparable to AWS.\n \n**Note: this is not a complete list, and pricing and capabilities are rapidly changing.**\n\nIf you are considering building your own GPU server, start [here](http://timdettmers.com/2018/11/05/which-gpu-for-deep-learning/). A single server can host 4 GPUs. Here is a single water-cooled [RTX 2080 Ti](https://www.nvidia.com/en-us/geforce/graphics-cards/rtx-2080-ti/) GPU installed in my office:\n\n![GPU server](img/TensorComputing/GPU-server.jpg)", "_____no_output_____" ], [ "### Automatic Derivatives", "_____no_output_____" ], [ "In addition to hardware acceleration, a key feature of tensor computing frameworks for ML is their ability to automate the calculation of derivatives, which then enable efficient and accurate gradient-based optimization algorithms.\n\nIn general, a derivate can be implemented in software three ways:\n - Analytically (using paper or mathematica) then copied into code: this is the most efficient and accurate but least generalizable.\n - Numerically, with [finite difference equations](https://en.wikipedia.org/wiki/Finite_difference): this is the least efficient and accurate, but most generalizable.\n - [Automatically](https://en.wikipedia.org/wiki/Automatic_differentiation): a hybrid approach where a small set of primitive functions (sin, cos, log, ...) are handled analytically, then the derivatives of expressions using these primitives are computed on the fly using the chain rule, product rule, etc. This is efficient and accurate, but requires that expressions are built entirely from primitives that support AD.", "_____no_output_____" ], [ "As a concrete example calculate the (un-normalized) Gaussian distribution\n$$\ny(x) = e^{-x^2}\n$$\nin PyTorch:", "_____no_output_____" ] ], [ [ "x = torch.linspace(-5, 5, 20, requires_grad=True)\ny = torch.exp(-x ** 2)", "_____no_output_____" ], [ "x", "_____no_output_____" ], [ "y", "_____no_output_____" ] ], [ [ "We specify `requires_grad=True` to enable AD for all tensors that depend on `x` (so just `y` in this case). To calculate partial derivatives (\"gradients\") of `y` wrt `x`, use:", "_____no_output_____" ] ], [ [ "y.backward(torch.ones_like(y))", "_____no_output_____" ] ], [ [ "The tensor `x.grad` now contains $y'(x)$ at each value of `x`:", "_____no_output_____" ] ], [ [ "x.grad", "_____no_output_____" ], [ "x_n = x.detach().numpy()\nyp_n = x.grad.detach().numpy()\ny_n = y.detach().numpy()\nplt.plot(x_n, y_n, 'o--', label='$y(x)$')\nplt.plot(x_n, yp_n, 'o:', label='$y^\\prime(x)$')\nplt.legend();", "_____no_output_____" ] ], [ [ "Note that these derivatives are calculated to full machine precision and not affected by the coarse spacing in $x$.", "_____no_output_____" ], [ "[Jax](https://github.com/google/jax) is a relatively new framework for automatic differentiation (developed by google but independent of tensorflow) that relies on \"just-in-time\" compilation and is designed for ML research.", "_____no_output_____" ], [ "### Higher-Level APIs for Tensor Computing", "_____no_output_____" ], [ "Although TensorFlow and PyTorch are both similar to numpy, they have different APIs so you are forced to choose one to take advantage of their unique features. However, for many calculations they are interchangeable, and a new ecosystem of higher-level APIs is growing to support this. For example, check out:\n - [Tensorly](http://tensorly.org/stable/index.html): \"*Tensor learning in python*\". Includes powerful [decomposition](https://arxiv.org/abs/1711.10781) (generalized PCA) and regression algorithms.\n - [einops](https://github.com/arogozhnikov/einops): \"*Deep learning operations reinvented*\". Supports compact expressions for complex indexing operations ([np.einsum](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html) on steroids).\n \nNeither of these packages are included in the MLS conda environment, but I encourage you to experiment with them if you want to write framework-independent tensor code.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
4a3ed5b86e2a965d3b5b1a03012614deb9c16dea
368,283
ipynb
Jupyter Notebook
AAM_Plotting_20CR.ipynb
andrewsm240/20CR
b62164941443a0c85ae66cf7a1e1debf022e149e
[ "MIT" ]
null
null
null
AAM_Plotting_20CR.ipynb
andrewsm240/20CR
b62164941443a0c85ae66cf7a1e1debf022e149e
[ "MIT" ]
null
null
null
AAM_Plotting_20CR.ipynb
andrewsm240/20CR
b62164941443a0c85ae66cf7a1e1debf022e149e
[ "MIT" ]
null
null
null
506.579092
165,520
0.929052
[ [ [ "# Plot Earth-Relative Atmospheric Angular Momentum\n#### This notebook plots daily earth-relative atmospheric angular momentum (AAM) calculated using data from the 20th Century Reanalysis Project Version 3 (see AAM_Calculation_20CR.ipynb).", "_____no_output_____" ], [ "#### Import the necessary libraries.", "_____no_output_____" ] ], [ [ "import xarray as xr\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport ipywidgets\nfrom ipywidgets import interact, interactive\nfrom datetime import datetime, timedelta", "_____no_output_____" ] ], [ [ "#### Read in the 1836-2015 AAM dataset using Xarray.", "_____no_output_____" ] ], [ [ "ds = xr.open_dataset('/home/scratch/20CR_v3/daily_aam_1836_2015.nc')\nds", "_____no_output_____" ] ], [ [ "### Plot the full time series of daily global Earth-relative AAM (1836-2015).", "_____no_output_____" ] ], [ [ "# Plot total AAM for the full 20CR period using the Xarray plotting function\nds.Mr.plot(figsize=(16,10))\nplt.title('Global Earth-Relative AAM: 1836-2015 ', fontsize=20)\nplt.xlabel('Date', fontsize=15)\nplt.ylabel(\"$M_R$\"+' '+\"$(kg \\cdot m^2 \\cdot s^{-1})$\", fontsize=15)\nplt.show()", "_____no_output_____" ] ], [ [ "#### Create a plot of global earth-relative AAM for a selected time period. Select desired start and end times between 01-01-1836 and 12-31-2015. ", "_____no_output_____" ] ], [ [ "start = ipywidgets.DatePicker(description = 'Start Date')\nend = ipywidgets.DatePicker(description = 'End Date')\n\ndisplay(start, end)", "_____no_output_____" ], [ "def Plot_Mr():\n \n # Converts start and end time to strings for file selection\n start_time = f'{start.value}T00:00:00.000000000'\n end_time = f'{end.value}T00:00:00.000000000'\n \n # Selects Mr for the desired date range\n Mr = ds.Mr.sel(time = slice(start_time, end_time))\n \n # Create an array of the selected dates in the date range\n date_generated = pd.date_range(start_time, end_time)\n\n # Plot daily Mr for the selected date range\n plt.figure(figsize=(12,8))\n plt.plot(date_generated, Mr)\n plt.title(f'Global Earth-Relative AAM: {start.value} to {end.value}', fontsize=20)\n plt.xlabel('Date', fontsize=15)\n plt.xticks(fontsize=12)\n plt.yticks(fontsize=12)\n plt.ylabel(\"$M_R$\"+' '+\"$(kg \\cdot m^2 \\cdot s^{-1})$\", fontsize=15)\n plt.show()\n \n# Call the Plot_Mr function to create the time series of Mr\nPlot_Mr()", "_____no_output_____" ] ], [ [ "### Plot the standardized anomalies of daily global Earth-relative AAM (1836-2015).", "_____no_output_____" ] ], [ [ "# Plot the standardized anomalies of global Mr for the full 20CR period\nds.Mr_stdanom.plot(figsize=(12,8), linewidth=0.5)\nplt.title('Standardized Anomalies of Global Earth-Relative AAM: 1836-2015', fontsize=20)\nplt.axhline(y=0.0, color='black', linestyle='-', linewidth=0.5)\nplt.xticks(fontsize=12)\nplt.xlabel('Date', fontsize=15)\nplt.yticks(fontsize=12)\nplt.ylabel(\"$M_R$ Standardized Anomalies\"+' '+\"$(kg \\cdot m^2 \\cdot s^{-1})$\", fontsize=15)\nplt.show()", "_____no_output_____" ] ], [ [ "### Plot Earth-relative AAM by latitude. ", "_____no_output_____" ] ], [ [ "# Get all the dates in the dataset\ndate = ds.time.values\n\n# The plotter function plots the relative AAM by latitude (Mr_by_lat) for each day in the dataset\ndef Plotter(date=date):\n var = ds.sel(time=date)\n fig = plt.figure(figsize=(12, 8))\n plt.plot(var.lat, var.Mr_by_lat, linewidth=3) # x-axis= latitude, y-axis= rel. AAM by latitude\n plt.title('Daily $M_R$ by Latitude', fontsize=20)\n plt.xlim((-90., 90.))\n plt.xticks(np.arange(-90, 100, 20))\n plt.xlabel('$\\phi$', fontsize=14)\n plt.ylim(ds.Mr_by_lat.min(), ds.Mr_by_lat.max())\n plt.ylabel(\"$M_R$\"+' '+\"$(kg \\cdot m^2 \\cdot s^{-1})$\", fontsize=14)", "_____no_output_____" ] ], [ [ "#### Create an IPython Widget to make an interactive plot. Use the drop down menu to select any date from 1836 to 2015.", "_____no_output_____" ] ], [ [ "# ipywidgets interactive function enables an interactive user interface; display the widget\nw = interactive(Plotter)\ndisplay(w)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a3ed791479087305b147b6806ce0956b2b60a58
884,647
ipynb
Jupyter Notebook
week-8_Stats-and-Visualization_done.ipynb
pcda17/pcda
3ba949d5eb90dd48a276cabc6ac38303aa2b6843
[ "CC0-1.0" ]
7
2017-09-20T15:31:41.000Z
2020-10-10T03:55:10.000Z
week-8_Stats-and-Visualization_done.ipynb
pcda17/pcda
3ba949d5eb90dd48a276cabc6ac38303aa2b6843
[ "CC0-1.0" ]
1
2017-09-22T16:14:16.000Z
2017-09-22T16:14:16.000Z
week-8_Stats-and-Visualization_done.ipynb
pcda17/pcda
3ba949d5eb90dd48a276cabc6ac38303aa2b6843
[ "CC0-1.0" ]
7
2017-09-22T15:14:18.000Z
2021-11-26T04:31:19.000Z
375.965576
204,190
0.923747
[ [ [ "# *Data Visualization and Statistics*\n\nGallery of Matplotlib examples: [https://matplotlib.org/gallery.html](https://matplotlib.org/gallery.html)", "_____no_output_____" ] ], [ [ "## First, let's import some packages.\n\nimport os\nfrom pprint import pprint\nfrom textblob import TextBlob\n\nimport numpy as np\nfrom scipy import stats\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n# The line above tells Jupyter to display Matplotlib graphics within the notebook.", "_____no_output_____" ], [ "## Download sample text corpora from GitHub, then unzip.\n\nos.chdir('/sharedfolder/')\n\n!wget -N https://github.com/pcda17/pcda17.github.io/blob/master/week/8/Sample_corpora.zip?raw=true -O Sample_corpora.zip\n!unzip -o Sample_corpora.zip", "WARNING: timestamping does nothing in combination with -O. See the manual\nfor details.\n\n--2017-11-03 14:51:07-- https://github.com/pcda17/pcda17.github.io/blob/master/week/8/Sample_corpora.zip?raw=true\nResolving github.com (github.com)... 192.30.253.112, 192.30.253.113\nConnecting to github.com (github.com)|192.30.253.112|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://github.com/pcda17/pcda17.github.io/raw/master/week/8/Sample_corpora.zip [following]\n--2017-11-03 14:51:08-- https://github.com/pcda17/pcda17.github.io/raw/master/week/8/Sample_corpora.zip\nReusing existing connection to github.com:443.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://raw.githubusercontent.com/pcda17/pcda17.github.io/master/week/8/Sample_corpora.zip [following]\n--2017-11-03 14:51:08-- https://raw.githubusercontent.com/pcda17/pcda17.github.io/master/week/8/Sample_corpora.zip\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.48.133\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.48.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 13170081 (13M) [application/zip]\nSaving to: ‘Sample_corpora.zip’\n\nSample_corpora.zip 100%[===================>] 12.56M 1.60MB/s in 9.2s \n\n2017-11-03 14:51:18 (1.37 MB/s) - ‘Sample_corpora.zip’ saved [13170081/13170081]\n\nArchive: Sample_corpora.zip\n creating: Sample_corpora/\n creating: Sample_corpora/Australian_Broadcasting_Commission_2006/\n inflating: Sample_corpora/Australian_Broadcasting_Commission_2006/rural.txt \n inflating: Sample_corpora/Australian_Broadcasting_Commission_2006/science.txt \n creating: Sample_corpora/George_Eliot/\n inflating: Sample_corpora/George_Eliot/Adam_Bede.txt \n inflating: Sample_corpora/George_Eliot/Daniel_Deronda.txt \n inflating: Sample_corpora/George_Eliot/Felix_Holt_the_Radical.txt \n inflating: Sample_corpora/George_Eliot/Middlemarch.txt \n inflating: Sample_corpora/George_Eliot/Romola.txt \n inflating: Sample_corpora/George_Eliot/Silas_Marner.txt \n inflating: Sample_corpora/George_Eliot/The_Mill_on_the_Floss.txt \n creating: Sample_corpora/Herman_Melville/\n inflating: Sample_corpora/Herman_Melville/Bartleby_The_Scrivener.txt \n inflating: Sample_corpora/Herman_Melville/Battle-Pieces_and_Aspects_of_the_War.txt \n inflating: Sample_corpora/Herman_Melville/Billy_Budd.txt \n inflating: Sample_corpora/Herman_Melville/Israel_Potter.txt \n inflating: Sample_corpora/Herman_Melville/Mardi_vol_1.txt \n inflating: Sample_corpora/Herman_Melville/Mardi_vol_2.txt \n inflating: Sample_corpora/Herman_Melville/Moby_Dick.txt \n inflating: Sample_corpora/Herman_Melville/Omoo_Adventures_in_the_South_Seas.txt \n inflating: Sample_corpora/Herman_Melville/Pierre_or_the_Ambiguities.txt \n inflating: Sample_corpora/Herman_Melville/Redburn_His_First_Voyage.txt \n inflating: Sample_corpora/Herman_Melville/The_Confidence-Man.txt \n inflating: Sample_corpora/Herman_Melville/Typee.txt \n inflating: Sample_corpora/Herman_Melville/White_Jacket.txt \n creating: Sample_corpora/Inaugural_Speeches/\n inflating: Sample_corpora/Inaugural_Speeches/01_washington_1789.txt \n inflating: Sample_corpora/Inaugural_Speeches/02_washington_1793.txt \n inflating: Sample_corpora/Inaugural_Speeches/03_adams_john_1797.txt \n inflating: Sample_corpora/Inaugural_Speeches/04_jefferson_1801.txt \n inflating: Sample_corpora/Inaugural_Speeches/05_jefferson_1805.txt \n inflating: Sample_corpora/Inaugural_Speeches/06_madison_1809.txt \n inflating: Sample_corpora/Inaugural_Speeches/07_madison_1813.txt \n inflating: Sample_corpora/Inaugural_Speeches/08_monroe_1817.txt \n inflating: Sample_corpora/Inaugural_Speeches/09_monroe_1821.txt \n inflating: Sample_corpora/Inaugural_Speeches/10_adams_john_quincy_1825.txt \n inflating: Sample_corpora/Inaugural_Speeches/11_jackson_1829.txt \n inflating: Sample_corpora/Inaugural_Speeches/12_jackson_1833.txt \n inflating: Sample_corpora/Inaugural_Speeches/13_van_buren_1837.txt \n inflating: Sample_corpora/Inaugural_Speeches/14_harrison_1841.txt \n inflating: Sample_corpora/Inaugural_Speeches/15_polk_1845.txt \n inflating: Sample_corpora/Inaugural_Speeches/16_taylor_1849.txt \n inflating: Sample_corpora/Inaugural_Speeches/17_pierce_1853.txt \n inflating: Sample_corpora/Inaugural_Speeches/18_buchanan_1857.txt \n inflating: Sample_corpora/Inaugural_Speeches/19_lincoln_1861.txt \n inflating: Sample_corpora/Inaugural_Speeches/20_lincoln_1865.txt \n inflating: Sample_corpora/Inaugural_Speeches/21_grant_1869.txt \n inflating: Sample_corpora/Inaugural_Speeches/22_grant_1873.txt \n inflating: Sample_corpora/Inaugural_Speeches/23_hayes_1877.txt \n inflating: Sample_corpora/Inaugural_Speeches/24_garfield_1881.txt \n inflating: Sample_corpora/Inaugural_Speeches/25_cleveland_1885.txt \n inflating: Sample_corpora/Inaugural_Speeches/26_harrison_1889.txt \n inflating: Sample_corpora/Inaugural_Speeches/27_cleveland_1893.txt \n inflating: Sample_corpora/Inaugural_Speeches/28_mckinley_1897.txt \n inflating: Sample_corpora/Inaugural_Speeches/29_mckinley_1901.txt \n inflating: Sample_corpora/Inaugural_Speeches/30_roosevelt_theodore_1905.txt \n inflating: Sample_corpora/Inaugural_Speeches/31_taft_1909.txt \n inflating: Sample_corpora/Inaugural_Speeches/32_wilson_1913.txt \n inflating: Sample_corpora/Inaugural_Speeches/33_wilson_1917.txt \n inflating: Sample_corpora/Inaugural_Speeches/34_harding_1921.txt \n inflating: Sample_corpora/Inaugural_Speeches/35_coolidge_1925.txt \n inflating: Sample_corpora/Inaugural_Speeches/36_hoover_1929.txt \n inflating: Sample_corpora/Inaugural_Speeches/37_roosevelt_franklin_1933.txt \n inflating: Sample_corpora/Inaugural_Speeches/38_roosevelt_franklin_1937.txt \n inflating: Sample_corpora/Inaugural_Speeches/39_roosevelt_franklin_1941.txt \n inflating: Sample_corpora/Inaugural_Speeches/40_roosevelt_franklin_1945.txt \n inflating: Sample_corpora/Inaugural_Speeches/41_truman_1949.txt \n inflating: Sample_corpora/Inaugural_Speeches/42_eisenhower_1953.txt \n inflating: Sample_corpora/Inaugural_Speeches/43_eisenhower_1957.txt \n inflating: Sample_corpora/Inaugural_Speeches/44_kennedy_1961.txt \n inflating: Sample_corpora/Inaugural_Speeches/45_johnson_1965.txt \n inflating: Sample_corpora/Inaugural_Speeches/46_nixon_1969.txt \n inflating: Sample_corpora/Inaugural_Speeches/47_nixon_1973.txt \n inflating: Sample_corpora/Inaugural_Speeches/48_carter_1977.txt \n inflating: Sample_corpora/Inaugural_Speeches/49_reagan_1981.txt \n inflating: Sample_corpora/Inaugural_Speeches/50_reagan_1985.txt \n inflating: Sample_corpora/Inaugural_Speeches/51_bush_george_h_w_1989.txt \n inflating: Sample_corpora/Inaugural_Speeches/52_clinton_1993.txt \n inflating: Sample_corpora/Inaugural_Speeches/53_clinton_1997.txt \n inflating: Sample_corpora/Inaugural_Speeches/54_bush_george_w_2001.txt \n inflating: Sample_corpora/Inaugural_Speeches/55_bush_george_w_2005.txt \n inflating: Sample_corpora/Inaugural_Speeches/56_obama_2009.txt \n inflating: Sample_corpora/Inaugural_Speeches/57_obama_2013.txt \n inflating: Sample_corpora/Inaugural_Speeches/58_trump_2017.txt \n creating: Sample_corpora/Jane_Austen/\n inflating: Sample_corpora/Jane_Austen/Emma.txt \n inflating: Sample_corpora/Jane_Austen/Mansfield_Park.txt \n inflating: Sample_corpora/Jane_Austen/Northanger_Abbey.txt \n inflating: Sample_corpora/Jane_Austen/Persuasion.txt \n inflating: Sample_corpora/Jane_Austen/Pride_and_Prejudice.txt \n inflating: Sample_corpora/Jane_Austen/Sense_and_Sensibility.txt \n creating: Sample_corpora/Joseph_Conrad/\n inflating: Sample_corpora/Joseph_Conrad/Almayer's_Folly.txt \n inflating: Sample_corpora/Joseph_Conrad/Chance.txt \n inflating: Sample_corpora/Joseph_Conrad/Heart_of_Darkness.txt \n inflating: Sample_corpora/Joseph_Conrad/Lord_Jim.txt \n inflating: Sample_corpora/Joseph_Conrad/Nostromo.txt \n inflating: Sample_corpora/Joseph_Conrad/Romance.txt \n inflating: Sample_corpora/Joseph_Conrad/The_Arrow_of_Gold.txt \n inflating: Sample_corpora/Joseph_Conrad/The_Inheritors.txt \n inflating: Sample_corpora/Joseph_Conrad/The_Nature_of_a_Crime.txt \n" ], [ "os.chdir('/sharedfolder/Sample_corpora')\n\nos.listdir('./')", "_____no_output_____" ], [ "!ls Jane_Austen", "Emma.txt\t Northanger_Abbey.txt Pride_and_Prejudice.txt\r\nMansfield_Park.txt Persuasion.txt\t Sense_and_Sensibility.txt\r\n" ], [ "!ls Herman_Melville", "Bartleby_The_Scrivener.txt\t\t Omoo_Adventures_in_the_South_Seas.txt\r\nBattle-Pieces_and_Aspects_of_the_War.txt Pierre_or_the_Ambiguities.txt\r\nBilly_Budd.txt\t\t\t\t Redburn_His_First_Voyage.txt\r\nIsrael_Potter.txt\t\t\t The_Confidence-Man.txt\r\nMardi_vol_1.txt\t\t\t\t Typee.txt\r\nMardi_vol_2.txt\t\t\t\t White_Jacket.txt\r\nMoby_Dick.txt\r\n" ], [ "## Loading a Melville novel as a TextBlob object\n\nmelville_path = 'Herman_Melville/Moby_Dick.txt'\n\nmelville_blob = TextBlob(open(melville_path).read().replace('\\n', ' '))", "_____no_output_____" ], [ "## Loading an Austen novel as a TextBlob object\n\nausten_path = 'Jane_Austen/Pride_and_Prejudice.txt'\n\nausten_blob = TextBlob(open(austen_path).read().replace('\\n', ' '))", "_____no_output_____" ], [ "## Recall that 'some_textblob_object.words' is a WordList object ...\n\nmelville_blob.words[5100:5140]", "_____no_output_____" ], [ "# ... which we can cast to an ordinary list.\n\nlist(melville_blob.words[5100:5140])", "_____no_output_____" ], [ "## And 'some_textblob_object.sentences' is a list of Sentence objects ...\n\nausten_blob.sentences[100:105]", "_____no_output_____" ], [ "# ... which we can convert to a list of strings using a list comprehension.\n\n[str(item) for item in austen_blob.sentences[100:105]]", "_____no_output_____" ], [ "## For reference, here's another example of a list comprehension:\n\nword_list = ['Call', 'me', 'Ishmael.']\n\nuppercase_list = [word.upper() for word in word_list]\n\nuppercase_list", "_____no_output_____" ], [ "## And one more for good measure:\n\nstring_nums = [str(i) for i in range(12)]\n\nstring_nums", "_____no_output_____" ] ], [ [ "### ▷ Sentiment analysis with TextBlob\n\nDetails on the training data that NLTK (via TextBlob) uses to measure polarity:\n[http://www.cs.cornell.edu/people/pabo/movie-review-data/](http://www.cs.cornell.edu/people/pabo/movie-review-data/)", "_____no_output_____" ] ], [ [ "## Negative sentiment polarity example\n# (result between -1 and +1)\n\nfrom textblob import TextBlob\n\ntext = \"This is a very mean and nasty sentence.\"\n\nblob = TextBlob(text)\n\nsentiment_score = blob.sentiment.polarity\n\nprint(sentiment_score)", "-0.703125\n" ], [ "## Positive sentiment polarity example\n# (result between -1 and +1)\n\ntext = \"This is a very nice and positive sentence.\"\n\nblob = TextBlob(text)\n\nsentiment_score = blob.sentiment.polarity\n\nprint(sentiment_score)", "0.5036363636363637\n" ], [ "## Neutral polarity / not enough information\n\ntext = \"What is this?\"\n\nblob = TextBlob(text)\n\nsentiment_score = blob.sentiment.polarity\n\nprint(sentiment_score)", "0.0\n" ], [ "## High subjectivity example\n# result between 0 and 1\n\ntext=\"This is a very mean and nasty sentence.\"\n\nblob = TextBlob(text)\n\nsentiment_score = blob.sentiment.subjectivity\n\nprint(sentiment_score)", "0.946875\n" ], [ "## Low subjectivity example\n# result between 0 and 1\n\ntext=\"This sentence states a fact, with an apparently objective adjective.\"\n\nblob = TextBlob(text)\n\nsentiment_score=blob.sentiment.subjectivity\n\nprint(sentiment_score)", "0.1\n" ] ], [ [ "### ▷ Plotting Sentiment Values\n\nLet's map sentiment polarity values across the course of a full novel.", "_____no_output_____" ] ], [ [ "## Viewing Pyplot style templates\n\npprint(plt.style.available)", "['fast',\n 'seaborn',\n 'seaborn-whitegrid',\n 'seaborn-pastel',\n 'seaborn-muted',\n 'seaborn-colorblind',\n 'seaborn-deep',\n 'seaborn-notebook',\n 'seaborn-dark-palette',\n 'seaborn-ticks',\n '_classic_test',\n 'grayscale',\n 'seaborn-darkgrid',\n 'classic',\n 'seaborn-white',\n 'Solarize_Light2',\n 'seaborn-poster',\n 'dark_background',\n 'seaborn-dark',\n 'ggplot',\n 'bmh',\n 'seaborn-talk',\n 'seaborn-bright',\n 'seaborn-paper',\n 'fivethirtyeight']\n" ], [ "## Selecting a Pyplot style\n\nplt.style.use('ggplot')\n\n# The 'ggplot' style imitates the R graphing package 'ggplot2.' (http://ggplot2.org)", "_____no_output_____" ], [ "austen_sentiments = [item.sentiment.polarity for item in austen_blob.sentences]\n\nausten_sentiments[:15]", "_____no_output_____" ], [ "## Austen sentiment values for first 60 sentences\n\nplt.figure(figsize=(18,8))\nplt.plot(austen_sentiments[:60])", "_____no_output_____" ], [ "austen_blob.sentences[30]", "_____no_output_____" ], [ "austen_blob.sentences[37]", "_____no_output_____" ], [ "## Plotting 'Pride and Prejudice' sentence sentiment values over full novel\n\nplt.figure(figsize=(18,8))\n\nplt.plot(austen_sentiments)\n\nplt.show()", "_____no_output_____" ], [ "## Finding the most 'positive' sentences in 'Pride and Prejudice' and printing them\n\nmax_sentiment = max(austen_sentiments)\n\nprint(max_sentiment) # max sentiment polarity value\nprint()\n\nfor sentence in austen_blob.sentences:\n if sentence.sentiment.polarity == max_sentiment:\n print(sentence)\n print()", "1.0\n\n\"What an excellent father you have, girls,\" said she, when the door was shut.\n\nHe walked here, and he walked there, fancying himself so very great!\n\nElizabeth assured him that she could suit herself perfectly with those in the room.\n\nHer performance on the piano-forte is exquisite.\"\n\nyes--I understand you perfectly.\"\n\n\"I am perfectly convinced by it that Mr. Darcy has no defect.\n\n\"It _is_ wonderful,\"--replied Wickham,--\"for almost all his actions may be traced to pride;--and pride has often been his best friend.\n\nFamily pride, and _filial_ pride, for he is very proud of what his father was, have done this.\n\nHow wonderfully these sort of things occur!\n\nShe owed her greatest relief to her friend Miss Lucas, who often joined them, and good-naturedly engaged Mr. Collins's conversation to herself.\n\n\"An excellent consolation in its way,\" said Elizabeth, \"but it will not do for _us_.\n\nThe improvement of spending a night in London was added in time, and the plan became perfect as plan could be.\n\nIt is the greatest of favours when Miss De Bourgh comes in.\"\n\nAnne would have been a delightful performer, had her health allowed her to learn.\"\n\n\"Perfectly so--I thank you.\"\n\nShe is a very great favourite with some ladies of my acquaintance, Mrs. Hurst and Miss Bingley.\n\ncried Elizabeth, with the greatest satisfaction.\n\nCharlotte is an excellent manager, I dare say.\n\n\"His father was an excellent man,\" said Mrs. Gardiner.\n\n\"He is the best landlord, and the best master,\" said she, \"that ever lived.\n\n\"He is perfectly well behaved, polite, and unassuming,\" said her uncle.\n\nOn reaching the house, they were shewn through the hall into the saloon, whose northern aspect rendered it delightful for summer.\n\nOur distress, my dear Lizzy, is very great.\n\nAnd tell my dear Lydia, not to give any directions about her clothes, till she has seen me, for she does not know which are the best warehouses.\n\nWe acted with the best intentions.\"\n\nIt now occurred to the girls that their mother was in all likelihood perfectly ignorant of what had happened.\n\nBut, however, he is very welcome to come to Netherfield, if he likes it.\n\nHappy shall I be, when his stay at Netherfield is over!\"\n\nYou will be a very happy woman.\"\n\nmy dear, dear Jane, I am so happy!\n\nIf I could but see _you_ as happy!\n\nHe is perfectly amiable.\n\nYour idea of the ponies is delightful.\n\n" ], [ "## Finding the most 'negative' sentences in 'Pride and Prejudice' and printing them\n\nmin_sentiment = min(austen_sentiments)\n\nprint(min_sentiment) # max sentiment polarity value\nprint()\n\nfor sentence in austen_blob.sentences:\n if sentence.sentiment.polarity == min_sentiment:\n print(sentence)\n print()", "-1.0\n\nshocking!\"\n\nEvery body is disgusted with his pride.\n\n\"But what,\" said she, after a pause, \"can have been his motive?--what can have induced him to behave so cruelly?\"\n\nHis disposition must be dreadful.\"\n\n\"You shall hear then--but prepare yourself for something very dreadful.\n\nThe pause was to Elizabeth's feelings dreadful.\n\n\"Wickham so very bad!\n\nThe separation between her and her family was rather noisy than pathetic.\n\nIt would be dreadful!\n\nIt is every way horrible!\"\n\n\"Oh, yes!--that, that is the worst of all.\n\n\"She is so fond of Mrs. Forster,\" said she, \"it will be quite shocking to send her away!\n\nIt was all over before I arrived; so my curiosity was not so dreadfully racked as _your's_ seems to have been.\n\nHe called it, therefore, his duty to step forward, and endeavour to remedy an evil, which had been brought on by himself.\n\n\"Hate you!\n\nYou were disgusted with the women who were always speaking and looking, and thinking for _your_ approbation alone.\n\n" ], [ "## Example: smoothing a list of numbers using the 'pandas' package\n\nsome_values = [5, 4, 5, 6, 6, 7, 6, 19, 4, 4, 3, 3, 3, 1, 5, 5, 6, 7, 0]\n\npandas_series = pd.Series(some_values)\n\nlist(pandas_series.rolling(window=4).mean())", "_____no_output_____" ], [ "## Smoothing our data before plotting\n\nausten_sentiments_pd = pd.Series(austen_sentiments)\n\nausten_sentiments_smooth = austen_sentiments_pd.rolling(window=200).mean()\n\nprint(austen_sentiments_smooth[190:220])", "190 NaN\n191 NaN\n192 NaN\n193 NaN\n194 NaN\n195 NaN\n196 NaN\n197 NaN\n198 NaN\n199 0.081394\n200 0.084519\n201 0.088019\n202 0.090519\n203 0.090519\n204 0.091019\n205 0.089811\n206 0.091477\n207 0.092477\n208 0.097477\n209 0.096852\n210 0.095805\n211 0.095767\n212 0.095767\n213 0.096417\n214 0.098784\n215 0.099784\n216 0.100284\n217 0.101934\n218 0.101934\n219 0.101934\ndtype: float64\n" ], [ "## Plotting smoothed sentiment polarity values for each sentence in 'Pride and Prejudice'\n\nplt.figure(figsize=(18,8))\n\nplt.plot(austen_sentiments_smooth)\n\nplt.show()", "_____no_output_____" ], [ "## Comparing 'Moby Dick' sentiment values\n\nmelville_sentiments = [item.sentiment.polarity for item in melville_blob.sentences]\n\nmelville_sentiments_pd = pd.Series(melville_sentiments)\n\nmelville_sentiments_smooth = melville_sentiments_pd.rolling(window=200).mean()\n\nplt.figure(figsize=(18,8))\n\nplt.plot(melville_sentiments_smooth)\n\nplt.show()", "_____no_output_____" ], [ "## Finding and printing the most 'negative' sentence in a list of smoothed sentiment values\n\nmin_sentiment = min(melville_sentiments_smooth[199:])\n\nprint(min_sentiment) # min sentiment polarity value\nprint()\n\nmin_sentiment_index = list(melville_sentiments_smooth).index(min_sentiment) # index position of the 'min_sentiment' value\n\nprint(melville_blob.sentences[min_sentiment_index])", "-0.0235120767543\n\nBut, away with child’s play; no more gaffs and pikes to-day.\n" ], [ "## Finding and printing the most 'positive' sentence in a list of smoothed sentiment values\n\nmax_sentiment = max(melville_sentiments_smooth[199:])\n\nprint(max_sentiment) # max sentiment polarity value\nprint()\n\nmax_sentiment_index = list(melville_sentiments_smooth).index(max_sentiment) # index position of the 'min_sentiment' value\n\nprint(melville_blob.sentences[max_sentiment_index])", "0.154155002255\n\nFor as this appalling ocean surrounds the verdant land, so in the soul of man there lies one insular Tahiti, full of peace and joy, but encompassed by all the horrors of the half known life.\n" ], [ "## Finding and printing the most 'positive' sentence in a list of smoothed sentiment values\n\nmax_sentiment = max(austen_sentiments_smooth[199:])\n\nprint(max_sentiment) # max sentiment polarity value\nprint()\n\nmax_sentiment_index = list(austen_sentiments_smooth).index(max_sentiment) # index position of the 'max_sentiment' value\n\nprint(austen_blob.sentences[max_sentiment_index])", "0.177298302219\n\n\"If I were as rich as Mr. Darcy,\" cried a young Lucas who came with his sisters, \"I should not care how proud I was.\n" ], [ "## Finding and printing the most 'negative' sentence in a list of smoothed sentiment values\n\nmin_sentiment = min(austen_sentiments_smooth[199:])\n\nprint(min_sentiment) # min sentiment polarity value\nprint()\n\nmin_sent_index=list(austen_sentiments_smooth).index(min_sentiment) # index position of the 'min_sentiment' value\n\nprint(austen_blob.sentences[min_sent_index])", "0.0231849860209\n\nAnd when I returned home, the ----shire was to leave Meryton in a week or fortnight's time.\n" ], [ "## Creating functions to expedite the steps we put together above process\n# This function accepts an optional second argument for smoothing window size. The default is 200 windows.\n\ndef plot_polarity(text_path, window=200):\n text_in = open(text_path).read().replace('\\n', ' ')\n blob = TextBlob(text_in)\n sentiments = [sentence.sentiment.polarity for sentence in blob.sentences]\n sentiments_pd = pd.Series(sentiments)\n sentiments_smooth = sentiments_pd.rolling(window).mean()\n plt.figure(figsize = (18,8))\n plt.plot(sentiments_smooth)\n plt.show()", "_____no_output_____" ], [ "!find ./", "./\n./.DS_Store\n./Australian_Broadcasting_Commission_2006\n./Australian_Broadcasting_Commission_2006/rural.txt\n./Australian_Broadcasting_Commission_2006/science.txt\n./George_Eliot\n./George_Eliot/Adam_Bede.txt\n./George_Eliot/Daniel_Deronda.txt\n./George_Eliot/Felix_Holt_the_Radical.txt\n./George_Eliot/Middlemarch.txt\n./George_Eliot/Romola.txt\n./George_Eliot/Silas_Marner.txt\n./George_Eliot/The_Mill_on_the_Floss.txt\n./Herman_Melville\n./Herman_Melville/Bartleby_The_Scrivener.txt\n./Herman_Melville/Battle-Pieces_and_Aspects_of_the_War.txt\n./Herman_Melville/Billy_Budd.txt\n./Herman_Melville/Israel_Potter.txt\n./Herman_Melville/Mardi_vol_1.txt\n./Herman_Melville/Mardi_vol_2.txt\n./Herman_Melville/Moby_Dick.txt\n./Herman_Melville/Omoo_Adventures_in_the_South_Seas.txt\n./Herman_Melville/Pierre_or_the_Ambiguities.txt\n./Herman_Melville/Redburn_His_First_Voyage.txt\n./Herman_Melville/The_Confidence-Man.txt\n./Herman_Melville/Typee.txt\n./Herman_Melville/White_Jacket.txt\n./Inaugural_Speeches\n./Inaugural_Speeches/01_washington_1789.txt\n./Inaugural_Speeches/02_washington_1793.txt\n./Inaugural_Speeches/03_adams_john_1797.txt\n./Inaugural_Speeches/04_jefferson_1801.txt\n./Inaugural_Speeches/05_jefferson_1805.txt\n./Inaugural_Speeches/06_madison_1809.txt\n./Inaugural_Speeches/07_madison_1813.txt\n./Inaugural_Speeches/08_monroe_1817.txt\n./Inaugural_Speeches/09_monroe_1821.txt\n./Inaugural_Speeches/10_adams_john_quincy_1825.txt\n./Inaugural_Speeches/11_jackson_1829.txt\n./Inaugural_Speeches/12_jackson_1833.txt\n./Inaugural_Speeches/13_van_buren_1837.txt\n./Inaugural_Speeches/14_harrison_1841.txt\n./Inaugural_Speeches/15_polk_1845.txt\n./Inaugural_Speeches/16_taylor_1849.txt\n./Inaugural_Speeches/17_pierce_1853.txt\n./Inaugural_Speeches/18_buchanan_1857.txt\n./Inaugural_Speeches/19_lincoln_1861.txt\n./Inaugural_Speeches/20_lincoln_1865.txt\n./Inaugural_Speeches/21_grant_1869.txt\n./Inaugural_Speeches/22_grant_1873.txt\n./Inaugural_Speeches/23_hayes_1877.txt\n./Inaugural_Speeches/24_garfield_1881.txt\n./Inaugural_Speeches/25_cleveland_1885.txt\n./Inaugural_Speeches/26_harrison_1889.txt\n./Inaugural_Speeches/27_cleveland_1893.txt\n./Inaugural_Speeches/28_mckinley_1897.txt\n./Inaugural_Speeches/29_mckinley_1901.txt\n./Inaugural_Speeches/30_roosevelt_theodore_1905.txt\n./Inaugural_Speeches/31_taft_1909.txt\n./Inaugural_Speeches/32_wilson_1913.txt\n./Inaugural_Speeches/33_wilson_1917.txt\n./Inaugural_Speeches/34_harding_1921.txt\n./Inaugural_Speeches/35_coolidge_1925.txt\n./Inaugural_Speeches/36_hoover_1929.txt\n./Inaugural_Speeches/37_roosevelt_franklin_1933.txt\n./Inaugural_Speeches/38_roosevelt_franklin_1937.txt\n./Inaugural_Speeches/39_roosevelt_franklin_1941.txt\n./Inaugural_Speeches/40_roosevelt_franklin_1945.txt\n./Inaugural_Speeches/41_truman_1949.txt\n./Inaugural_Speeches/42_eisenhower_1953.txt\n./Inaugural_Speeches/43_eisenhower_1957.txt\n./Inaugural_Speeches/44_kennedy_1961.txt\n./Inaugural_Speeches/45_johnson_1965.txt\n./Inaugural_Speeches/46_nixon_1969.txt\n./Inaugural_Speeches/47_nixon_1973.txt\n./Inaugural_Speeches/48_carter_1977.txt\n./Inaugural_Speeches/49_reagan_1981.txt\n./Inaugural_Speeches/50_reagan_1985.txt\n./Inaugural_Speeches/51_bush_george_h_w_1989.txt\n./Inaugural_Speeches/52_clinton_1993.txt\n./Inaugural_Speeches/53_clinton_1997.txt\n./Inaugural_Speeches/54_bush_george_w_2001.txt\n./Inaugural_Speeches/55_bush_george_w_2005.txt\n./Inaugural_Speeches/56_obama_2009.txt\n./Inaugural_Speeches/57_obama_2013.txt\n./Inaugural_Speeches/58_trump_2017.txt\n./Jane_Austen\n./Jane_Austen/Emma.txt\n./Jane_Austen/Mansfield_Park.txt\n./Jane_Austen/Northanger_Abbey.txt\n./Jane_Austen/Persuasion.txt\n./Jane_Austen/Pride_and_Prejudice.txt\n./Jane_Austen/Sense_and_Sensibility.txt\n./Joseph_Conrad\n./Joseph_Conrad/Almayer's_Folly.txt\n./Joseph_Conrad/Chance.txt\n./Joseph_Conrad/Heart_of_Darkness.txt\n./Joseph_Conrad/Lord_Jim.txt\n./Joseph_Conrad/Nostromo.txt\n./Joseph_Conrad/Romance.txt\n./Joseph_Conrad/The_Arrow_of_Gold.txt\n./Joseph_Conrad/The_Inheritors.txt\n./Joseph_Conrad/The_Nature_of_a_Crime.txt\n./Joseph_Conrad/The_Nigger_of_the_Narcissus.txt\n./Joseph_Conrad/The_Outcast_of_the_Islands.txt\n./Joseph_Conrad/The_Rescue.txt\n./Joseph_Conrad/The_Secret_Agent.txt\n./Joseph_Conrad/The_Shadow-Line.txt\n./Joseph_Conrad/Typhoon.txt\n./Joseph_Conrad/Under_Western_Eyes.txt\n./Joseph_Conrad/Victory.txt\n" ], [ "plot_polarity('George_Eliot/Silas_Marner.txt')", "_____no_output_____" ], [ "plot_polarity('Joseph_Conrad/Heart_of_Darkness.txt')", "_____no_output_____" ] ], [ [ "### ▷ Plotting smoothed random data (for comparison)", "_____no_output_____" ] ], [ [ "## Plotting completely random data\n\nrandom_vals = np.random.rand(4000)\n\nvals_pd = pd.Series(random_vals)\nvals_smooth = vals_pd.rolling(window=200).mean()\n\nplt.figure(figsize=(18,8))\nplt.plot(vals_smooth)", "_____no_output_____" ] ], [ [ "### ▷ Working with multiple files", "_____no_output_____" ] ], [ [ "!ls *", "Australian_Broadcasting_Commission_2006:\r\nrural.txt science.txt\r\n\r\nGeorge_Eliot:\r\nAdam_Bede.txt\t\t Middlemarch.txt The_Mill_on_the_Floss.txt\r\nDaniel_Deronda.txt\t Romola.txt\r\nFelix_Holt_the_Radical.txt Silas_Marner.txt\r\n\r\nHerman_Melville:\r\nBartleby_The_Scrivener.txt\t\t Omoo_Adventures_in_the_South_Seas.txt\r\nBattle-Pieces_and_Aspects_of_the_War.txt Pierre_or_the_Ambiguities.txt\r\nBilly_Budd.txt\t\t\t\t Redburn_His_First_Voyage.txt\r\nIsrael_Potter.txt\t\t\t The_Confidence-Man.txt\r\nMardi_vol_1.txt\t\t\t\t Typee.txt\r\nMardi_vol_2.txt\t\t\t\t White_Jacket.txt\r\nMoby_Dick.txt\r\n\r\nInaugural_Speeches:\r\n01_washington_1789.txt\t 30_roosevelt_theodore_1905.txt\r\n02_washington_1793.txt\t 31_taft_1909.txt\r\n03_adams_john_1797.txt\t 32_wilson_1913.txt\r\n04_jefferson_1801.txt\t 33_wilson_1917.txt\r\n05_jefferson_1805.txt\t 34_harding_1921.txt\r\n06_madison_1809.txt\t 35_coolidge_1925.txt\r\n07_madison_1813.txt\t 36_hoover_1929.txt\r\n08_monroe_1817.txt\t 37_roosevelt_franklin_1933.txt\r\n09_monroe_1821.txt\t 38_roosevelt_franklin_1937.txt\r\n10_adams_john_quincy_1825.txt 39_roosevelt_franklin_1941.txt\r\n11_jackson_1829.txt\t 40_roosevelt_franklin_1945.txt\r\n12_jackson_1833.txt\t 41_truman_1949.txt\r\n13_van_buren_1837.txt\t 42_eisenhower_1953.txt\r\n14_harrison_1841.txt\t 43_eisenhower_1957.txt\r\n15_polk_1845.txt\t 44_kennedy_1961.txt\r\n16_taylor_1849.txt\t 45_johnson_1965.txt\r\n17_pierce_1853.txt\t 46_nixon_1969.txt\r\n18_buchanan_1857.txt\t 47_nixon_1973.txt\r\n19_lincoln_1861.txt\t 48_carter_1977.txt\r\n20_lincoln_1865.txt\t 49_reagan_1981.txt\r\n21_grant_1869.txt\t 50_reagan_1985.txt\r\n22_grant_1873.txt\t 51_bush_george_h_w_1989.txt\r\n23_hayes_1877.txt\t 52_clinton_1993.txt\r\n24_garfield_1881.txt\t 53_clinton_1997.txt\r\n25_cleveland_1885.txt\t 54_bush_george_w_2001.txt\r\n26_harrison_1889.txt\t 55_bush_george_w_2005.txt\r\n27_cleveland_1893.txt\t 56_obama_2009.txt\r\n28_mckinley_1897.txt\t 57_obama_2013.txt\r\n29_mckinley_1901.txt\t 58_trump_2017.txt\r\n\r\nJane_Austen:\r\nEmma.txt\t Northanger_Abbey.txt Pride_and_Prejudice.txt\r\nMansfield_Park.txt Persuasion.txt\t Sense_and_Sensibility.txt\r\n\r\nJoseph_Conrad:\r\nAlmayer's_Folly.txt The_Arrow_of_Gold.txt\t\tThe_Secret_Agent.txt\r\nChance.txt\t The_Inheritors.txt\t\tThe_Shadow-Line.txt\r\nHeart_of_Darkness.txt The_Nature_of_a_Crime.txt\tTyphoon.txt\r\nLord_Jim.txt\t The_Nigger_of_the_Narcissus.txt\tUnder_Western_Eyes.txt\r\nNostromo.txt\t The_Outcast_of_the_Islands.txt\tVictory.txt\r\nRomance.txt\t The_Rescue.txt\r\n" ], [ "os.chdir('/sharedfolder/Sample_corpora/Inaugural_Speeches/')\nsorted(os.listdir('./'))", "_____no_output_____" ], [ "inaugural_filenames = sorted(os.listdir('./'))\n\ninaugural_sentiment_values = []\n\nfor filename in inaugural_filenames:\n inaugural_text = open(filename).read()\n sentiment_polarity_value = TextBlob(inaugural_text).sentiment.polarity\n inaugural_sentiment_values.append(sentiment_polarity_value)\n\nprint(inaugural_sentiment_values)", "[0.20409989875715678, 0.012777777777777782, 0.17090914396061452, 0.2354787414965986, 0.12462612618862617, 0.20941158900836318, 0.10505986625667475, 0.24822346940271453, 0.1737455427951296, 0.156065848965849, 0.1161323366555925, 0.1862532299741602, 0.13125812221966077, 0.15028613415191122, 0.1570634720773225, 0.16774426155240107, 0.1477585377585378, 0.18102136525605914, 0.13351624810068521, 0.0783103008103008, 0.2158637873754153, 0.20653868784936744, 0.16554095375523947, 0.1633560806477473, 0.20128613053613048, 0.17756533693709864, 0.13216615560365558, 0.1878832824734463, 0.16857971246860134, 0.14200352504638222, 0.13484039627537392, 0.16225635475635483, 0.20935103586889306, 0.15240879828326176, 0.17769726195042654, 0.14026330711541984, 0.14062265115836545, 0.13257654181567227, 0.14153127917833802, 0.053854723707664885, 0.18481650924832735, 0.1657912518953127, 0.1695810756572346, 0.15433497536945817, 0.20468173801507142, 0.17713651732882504, 0.23694143085911382, 0.20993388079265954, 0.18572275860104812, 0.15645454489675598, 0.20008490134727752, 0.15652230323282956, 0.1528953788235967, 0.18813316745559738, 0.17758723250744524, 0.08578566388911217, 0.1301566804407713, 0.16502488189132028]\n" ], [ "## Creating nicely formatted labels for the sentiment values above\n\ninaugural_labels = [item.replace('.txt','').replace('_', ' ').title() for item in inaugural_filenames]\n\ninaugural_labels", "_____no_output_____" ], [ "## Plotting presidential inaugural address sentiment values over time\n\nplt.figure(figsize = (20,8))\n\nplt.xticks(range(len(inaugural_sentiment_values)), inaugural_labels) # two arguments: tick positions, tick display list\n\nplt.xticks(rotation=-85)\n\nplt.ylabel('Sentiment Polarity Value')\n\nplt.plot(inaugural_sentiment_values)\n\nplt.show()", "_____no_output_____" ] ], [ [ "## ▷ Assignment\n\n For each author in our set of corpora, which is their most 'positive' novel? Their most 'negative'?", "_____no_output_____" ], [ "## ▷ Sentiment Histograms", "_____no_output_____" ] ], [ [ "os.chdir('/sharedfolder/Sample_corpora/')", "_____no_output_____" ], [ "text_in = open('Jane_Austen/Pride_and_Prejudice.txt').read().replace('\\n', ' ')\n\nblob = TextBlob(text_in)\nsentiments = [sentence.sentiment.polarity for sentence in blob.sentences]\nplt.figure(figsize=(20,10))\nplt.hist(sentiments, bins=25)\nplt.show()", "_____no_output_____" ], [ "text_in = open('Jane_Austen/Pride_and_Prejudice.txt').read().replace('\\n', ' ')\n\nblob = TextBlob(text_in)\nsentiments = [sentence.sentiment.subjectivity for sentence in blob.sentences]\nplt.figure(figsize=(20,10))\nplt.hist(sentiments, bins=25)\nplt.show()", "_____no_output_____" ] ], [ [ "## ▷ Cleaning sentiment values", "_____no_output_____" ] ], [ [ "text_in = open('Jane_Austen/Pride_and_Prejudice.txt').read().replace('\\n', ' ')\n\nblob = TextBlob(text_in)\nsentiments = [sentence.sentiment.polarity for sentence in blob.sentences]\nsentiments_cleaned = [value for value in sentiments if value!=0]\nplt.figure(figsize=(20,10))\nplt.hist(sentiments_cleaned, bins=25)\nplt.show()", "_____no_output_____" ], [ "def polarity_histogram_cleaned(text_path):\n text_in = open(text_path).read().replace('\\n', ' ')\n blob = TextBlob(text_in)\n sentiments = [sentence.sentiment.polarity for sentence in blob.sentences]\n sentiments_cleaned = [value for value in sentiments if value!=0]\n plt.figure(figsize=(20,10))\n plt.hist(sentiments_cleaned, bins=25)\n plt.show()", "_____no_output_____" ], [ "!find ./", "./\r\n./.DS_Store\r\n./Australian_Broadcasting_Commission_2006\r\n./Australian_Broadcasting_Commission_2006/rural.txt\r\n./Australian_Broadcasting_Commission_2006/science.txt\r\n./George_Eliot\r\n./George_Eliot/Adam_Bede.txt\r\n./George_Eliot/Daniel_Deronda.txt\r\n./George_Eliot/Felix_Holt_the_Radical.txt\r\n./George_Eliot/Middlemarch.txt\r\n./George_Eliot/Romola.txt\r\n./George_Eliot/Silas_Marner.txt\r\n./George_Eliot/The_Mill_on_the_Floss.txt\r\n./Herman_Melville\r\n./Herman_Melville/Bartleby_The_Scrivener.txt\r\n./Herman_Melville/Battle-Pieces_and_Aspects_of_the_War.txt\r\n./Herman_Melville/Billy_Budd.txt\r\n./Herman_Melville/Israel_Potter.txt\r\n./Herman_Melville/Mardi_vol_1.txt\r\n./Herman_Melville/Mardi_vol_2.txt\r\n./Herman_Melville/Moby_Dick.txt\r\n./Herman_Melville/Omoo_Adventures_in_the_South_Seas.txt\r\n./Herman_Melville/Pierre_or_the_Ambiguities.txt\r\n./Herman_Melville/Redburn_His_First_Voyage.txt\r\n./Herman_Melville/The_Confidence-Man.txt\r\n./Herman_Melville/Typee.txt\r\n./Herman_Melville/White_Jacket.txt\r\n./Inaugural_Speeches\r\n./Inaugural_Speeches/01_washington_1789.txt\r\n./Inaugural_Speeches/02_washington_1793.txt\r\n./Inaugural_Speeches/03_adams_john_1797.txt\r\n./Inaugural_Speeches/04_jefferson_1801.txt\r\n./Inaugural_Speeches/05_jefferson_1805.txt\r\n./Inaugural_Speeches/06_madison_1809.txt\r\n./Inaugural_Speeches/07_madison_1813.txt\r\n./Inaugural_Speeches/08_monroe_1817.txt\r\n./Inaugural_Speeches/09_monroe_1821.txt\r\n./Inaugural_Speeches/10_adams_john_quincy_1825.txt\r\n./Inaugural_Speeches/11_jackson_1829.txt\r\n./Inaugural_Speeches/12_jackson_1833.txt\r\n./Inaugural_Speeches/13_van_buren_1837.txt\r\n./Inaugural_Speeches/14_harrison_1841.txt\r\n./Inaugural_Speeches/15_polk_1845.txt\r\n./Inaugural_Speeches/16_taylor_1849.txt\r\n./Inaugural_Speeches/17_pierce_1853.txt\r\n./Inaugural_Speeches/18_buchanan_1857.txt\r\n./Inaugural_Speeches/19_lincoln_1861.txt\r\n./Inaugural_Speeches/20_lincoln_1865.txt\r\n./Inaugural_Speeches/21_grant_1869.txt\r\n./Inaugural_Speeches/22_grant_1873.txt\r\n./Inaugural_Speeches/23_hayes_1877.txt\r\n./Inaugural_Speeches/24_garfield_1881.txt\r\n./Inaugural_Speeches/25_cleveland_1885.txt\r\n./Inaugural_Speeches/26_harrison_1889.txt\r\n./Inaugural_Speeches/27_cleveland_1893.txt\r\n./Inaugural_Speeches/28_mckinley_1897.txt\r\n./Inaugural_Speeches/29_mckinley_1901.txt\r\n./Inaugural_Speeches/30_roosevelt_theodore_1905.txt\r\n./Inaugural_Speeches/31_taft_1909.txt\r\n./Inaugural_Speeches/32_wilson_1913.txt\r\n./Inaugural_Speeches/33_wilson_1917.txt\r\n./Inaugural_Speeches/34_harding_1921.txt\r\n./Inaugural_Speeches/35_coolidge_1925.txt\r\n./Inaugural_Speeches/36_hoover_1929.txt\r\n./Inaugural_Speeches/37_roosevelt_franklin_1933.txt\r\n./Inaugural_Speeches/38_roosevelt_franklin_1937.txt\r\n./Inaugural_Speeches/39_roosevelt_franklin_1941.txt\r\n./Inaugural_Speeches/40_roosevelt_franklin_1945.txt\r\n./Inaugural_Speeches/41_truman_1949.txt\r\n./Inaugural_Speeches/42_eisenhower_1953.txt\r\n./Inaugural_Speeches/43_eisenhower_1957.txt\r\n./Inaugural_Speeches/44_kennedy_1961.txt\r\n./Inaugural_Speeches/45_johnson_1965.txt\r\n./Inaugural_Speeches/46_nixon_1969.txt\r\n./Inaugural_Speeches/47_nixon_1973.txt\r\n./Inaugural_Speeches/48_carter_1977.txt\r\n./Inaugural_Speeches/49_reagan_1981.txt\r\n./Inaugural_Speeches/50_reagan_1985.txt\r\n./Inaugural_Speeches/51_bush_george_h_w_1989.txt\r\n./Inaugural_Speeches/52_clinton_1993.txt\r\n./Inaugural_Speeches/53_clinton_1997.txt\r\n./Inaugural_Speeches/54_bush_george_w_2001.txt\r\n./Inaugural_Speeches/55_bush_george_w_2005.txt\r\n./Inaugural_Speeches/56_obama_2009.txt\r\n./Inaugural_Speeches/57_obama_2013.txt\r\n./Inaugural_Speeches/58_trump_2017.txt\r\n./Jane_Austen\r\n./Jane_Austen/Emma.txt\r\n./Jane_Austen/Mansfield_Park.txt\r\n./Jane_Austen/Northanger_Abbey.txt\r\n./Jane_Austen/Persuasion.txt\r\n./Jane_Austen/Pride_and_Prejudice.txt\r\n./Jane_Austen/Sense_and_Sensibility.txt\r\n./Joseph_Conrad\r\n./Joseph_Conrad/Almayer's_Folly.txt\r\n./Joseph_Conrad/Chance.txt\r\n./Joseph_Conrad/Heart_of_Darkness.txt\r\n./Joseph_Conrad/Lord_Jim.txt\r\n./Joseph_Conrad/Nostromo.txt\r\n./Joseph_Conrad/Romance.txt\r\n./Joseph_Conrad/The_Arrow_of_Gold.txt\r\n./Joseph_Conrad/The_Inheritors.txt\r\n./Joseph_Conrad/The_Nature_of_a_Crime.txt\r\n./Joseph_Conrad/The_Nigger_of_the_Narcissus.txt\r\n./Joseph_Conrad/The_Outcast_of_the_Islands.txt\r\n./Joseph_Conrad/The_Rescue.txt\r\n./Joseph_Conrad/The_Secret_Agent.txt\r\n./Joseph_Conrad/The_Shadow-Line.txt\r\n./Joseph_Conrad/Typhoon.txt\r\n./Joseph_Conrad/Under_Western_Eyes.txt\r\n./Joseph_Conrad/Victory.txt\r\n" ], [ "polarity_histogram_cleaned('./Joseph_Conrad/The_Secret_Agent.txt')", "_____no_output_____" ] ], [ [ "## ▷ Comparing Sentiment Distributions", "_____no_output_____" ] ], [ [ "melville_blob = TextBlob(open('Herman_Melville/Moby_Dick.txt').read().replace('\\n', ' '))\nausten_blob = TextBlob(open('Jane_Austen/Pride_and_Prejudice.txt').read().replace('\\n', ' '))\n\nmelville_sentiments = [sentence.sentiment.polarity for sentence in melville_blob.sentences]\nmelville_sentiments_cleaned = [value for value in melville_sentiments if value!=0.0]\n\nausten_sentiments = [sentence.sentiment.polarity for sentence in austen_blob.sentences]\nausten_sentiments_cleaned = [value for value in austen_sentiments if value!=0.0]\n\nplt.figure(figsize=(15,8))\n\nplt.hist(melville_sentiments_cleaned, bins=25, alpha=0.5, label='Moby Dick')\nplt.hist(austen_sentiments_cleaned, bins=25, alpha=0.5, label='Pride and Prejudice')\n\nplt.legend(loc='upper right')\n\nplt.show()", "_____no_output_____" ], [ "print(np.mean(melville_sentiments_cleaned))\nprint(np.mean(austen_sentiments_cleaned))", "0.0888490586899\n0.145630394804\n" ] ], [ [ "## ▷ Statistical Tests", "_____no_output_____" ] ], [ [ "## t-test of independent values\n# (used to determine whether two *normally distributed* sets of values are significantly different)\n\nfrom scipy import stats\n\nstats.ttest_ind(melville_sentiments_cleaned, austen_sentiments_cleaned)", "_____no_output_____" ], [ "## Mann-Whitney U test\n# (used to test two sets of *non-normally distributed* values are significantly different)\n\nstats.mannwhitneyu(melville_sentiments, austen_sentiments)", "_____no_output_____" ] ], [ [ "## ▷ Assignment\n\n Is George Eliot significantly more subjective than Jane Austen?\n Is Herman Melville significantly more 'positive' than Joseph Conrad?", "_____no_output_____" ], [ "## ▷ Assignment\n\n Write a function that takes two texts' paths as arguments and \n (a) plots a histogram comparing their sentences' sentiment distributions\n (b) tests whether their sentiment values are significantly different", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
4a3edfd5e4027813cd1732c48fde7d7191a21f2d
5,847
ipynb
Jupyter Notebook
demo/fibonnaci_example/view.ipynb
kafonek/notebook_restified
161b39de4ac1c62d3a9974b468132d66d420e432
[ "BSD-3-Clause" ]
6
2019-06-27T02:18:50.000Z
2021-01-05T08:57:55.000Z
demo/fibonnaci_example/view.ipynb
kafonek/notebook_restified
161b39de4ac1c62d3a9974b468132d66d420e432
[ "BSD-3-Clause" ]
2
2019-06-06T16:26:12.000Z
2020-02-26T23:06:13.000Z
demo/fibonnaci_example/view.ipynb
kafonek/notebook_restified
161b39de4ac1c62d3a9974b468132d66d420e432
[ "BSD-3-Clause" ]
null
null
null
27.7109
436
0.555156
[ [ [ "### Introduction\n\nThis is a `View` Notebook to show an `IntSlider` widget either in an interactive Notebook or in a `Voila` Dashboard mode that will then print the [Fibonnaci sequence](https://en.wikipedia.org/wiki/Fibonacci_number) answer for that number. It will also show how long it takes each handler to calculate the number, which should demonstrate what kind of overhead is involved with `refactored code`, `PythonModel`, and `KernelModel`. ", "_____no_output_____" ] ], [ [ "import ipywidgets as widgets\ngrid = widgets.GridspecLayout(4, 3)\n\n# top row\ninput_label = widgets.Label(\"User Input\")\nuser_input = widgets.IntText(value=1, description='Fibonnaci n:')\ngrid[0, 0] = input_label\ngrid[0, 1:] = user_input\n\n# refactored code row\nlabel1 = widgets.Label('Refactored code')\noutput1 = widgets.Text(disabled=True, description='Result:')\ndebug1 = widgets.Text(disabled=True, description='Debug:')\ngrid[1, 0] = label1\ngrid[1, 1] = output1\ngrid[1, 2] = debug1\n\n# PythonModel row\nlabel2 = widgets.Label('PythonModel')\noutput2 = widgets.Text(disabled=True, description='Result:')\ndebug2 = widgets.Text(disabled=True, description='Debug:')\ngrid[2, 0] = label2\ngrid[2, 1] = output2\ngrid[2, 2] = debug2\n\n# KernelModel row\nlabel3 = widgets.Label('KernelModel')\noutput3 = widgets.Text(disabled=True, description='Result:')\ndebug3 = widgets.Text(disabled=True, description='Debug:')\ngrid[3, 0] = label3\ngrid[3, 1] = output3\ngrid[3, 2] = debug3\n\ngrid", "_____no_output_____" ], [ "import time", "_____no_output_____" ], [ "### Refactored code handler\ndef fibonacci_generator():\n \"A generator that yields the last number in the sequence plus the number before that\"\n a, b = 0, 1\n while True:\n yield a\n tmp_value = b\n b = a + b\n a = tmp_value\n\ndef handler1(ev):\n start = time.time()\n gen = fibonacci_generator()\n n = user_input.value\n for i in range(n+1):\n answer = next(gen)\n output1.value = str(answer)\n debug1.value = 'took %.4f seconds' % (time.time() - start)\n \nuser_input.observe(handler1, names='value')", "_____no_output_____" ], [ "### Create PythonModel and KernelModel objects\nimport notebook_restified\n\npm = notebook_restified.PythonModel('model.ipynb')\nkm = notebook_restified.KernelModel('model.ipynb')", "_____no_output_____" ], [ "### PythonModel handler\ndef handler2(ev):\n start = time.time()\n params = {'n' : user_input.value}\n result = pm.execute(params)\n output2.value = str(result)\n debug2.value = 'took %.4f seconds' % (time.time() - start)\n \nuser_input.observe(handler2, names='value') ", "_____no_output_____" ], [ "### KernelModel handler\ndef handler3(ev):\n start = time.time()\n params = {'n' : user_input.value}\n result = km.execute(params)\n output3.value = str(result)\n debug3.value = 'took %.4f seconds' % (time.time() - start)\n \nuser_input.observe(handler3, names='value') ", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
4a3eeee04614f0a7203f4aba2b74e3941cca7a6b
5,968
ipynb
Jupyter Notebook
docs/source/examples/Widget Alignment.ipynb
akhand1111/ipywidgets
a6228df8a24079bd4f8b6c1645b31e1c00218535
[ "BSD-3-Clause" ]
1
2019-09-08T18:11:03.000Z
2019-09-08T18:11:03.000Z
docs/source/examples/Widget Alignment.ipynb
akhand1111/ipywidgets
a6228df8a24079bd4f8b6c1645b31e1c00218535
[ "BSD-3-Clause" ]
3
2020-01-18T12:26:26.000Z
2020-01-20T13:17:32.000Z
docs/source/examples/Widget Alignment.ipynb
akhand1111/ipywidgets
a6228df8a24079bd4f8b6c1645b31e1c00218535
[ "BSD-3-Clause" ]
1
2021-01-28T05:58:42.000Z
2021-01-28T05:58:42.000Z
22.268657
112
0.516253
[ [ [ "from ipywidgets import *", "_____no_output_____" ] ], [ [ "**1.** `VBox(HBox)`", "_____no_output_____" ] ], [ [ "VBox([HBox([VBox([Dropdown(description='Choice', options=['foo', 'bar']), \n ColorPicker(description='Color'), \n HBox([Button(), Button()])]), \n Textarea(value=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit,\"\n\"sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. \"\n\"Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris \"\n\"nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in \"\n\"reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla \"\n\"pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa \"\n\"qui officia deserunt mollit anim id est laborum.\")]),\n HBox([Text(), Checkbox(description='Check box')]), \n IntSlider(), \n Controller()], background_color='#EEE')", "_____no_output_____" ] ], [ [ "**2.** `HBox(VBox)`", "_____no_output_____" ] ], [ [ "HBox([VBox([Button(description='Press'), Dropdown(options=['a', 'b']), Button(description='Button')]), \n VBox([Button(), Checkbox(), IntText()])], background_color='#EEE')", "_____no_output_____" ] ], [ [ "**3.** `VBox(HBox)` width sliders, range sliders and progress bars", "_____no_output_____" ] ], [ [ "VBox([HBox([Button(), FloatRangeSlider(), Text(), Button()]), \n HBox([Button(), FloatText(),\n FloatProgress(value=40), Checkbox(description='Check')]), \n HBox([ToggleButton(), IntSlider(description='Foobar'),\n Dropdown(options=['foo', 'bar']), Valid()]),\n ])", "_____no_output_____" ] ], [ [ "**4.** Dropdown resize", "_____no_output_____" ] ], [ [ "dd = Dropdown(description='Foobar', options=['foo', 'bar'])\ndd", "_____no_output_____" ], [ "dd.layout.width = '148px'", "_____no_output_____" ], [ "cp = ColorPicker(description='foobar')", "_____no_output_____" ] ], [ [ "**5.** Colorpicker alignment, concise and long version", "_____no_output_____" ] ], [ [ "VBox([HBox([Dropdown(width='148px', options=['foo', 'bar']),\n Button(description='Button')]), cp, HBox([Button(), Button()])])", "_____no_output_____" ], [ "cp.concise = True", "_____no_output_____" ], [ "cp.concise = False", "_____no_output_____" ], [ "cp2 = ColorPicker()", "_____no_output_____" ], [ "VBox([HBox([Button(), Button()]), cp2])", "_____no_output_____" ], [ "cp2.concise = True", "_____no_output_____" ], [ "cp2.concise = False", "_____no_output_____" ] ], [ [ "**6.** Vertical slider and progress bar alignment and resize", "_____no_output_____" ] ], [ [ "HBox([IntSlider(description='Slider', orientation='vertical', height='200px'),\n FloatProgress(description='Progress', value=50, orientation='vertical', height='200px')])", "_____no_output_____" ], [ "HBox([IntSlider(description='Slider', orientation='vertical'),\n FloatProgress(description='Progress', value=50, orientation='vertical')])", "_____no_output_____" ] ], [ [ "**7.** Tabs\n\n", "_____no_output_____" ] ], [ [ "t = Tab(children=[FloatText(), IntSlider()], _titles={0: 'Text', 1: 'Slider'})\nt", "_____no_output_____" ], [ "t.selected_index = 1", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4a3ef20b99c528cc6ee24dbcff76e15a0b5e3f72
105,674
ipynb
Jupyter Notebook
datacamp-project/Dr. Semmelweis and the Discovery of Handwashing/notebook.ipynb
sanikamal/awesome-ml-examples
41758a86b4d7762fef644510cae9752e20e42fa8
[ "MIT" ]
1
2020-07-07T23:36:46.000Z
2020-07-07T23:36:46.000Z
datacamp-project/Dr. Semmelweis and the Discovery of Handwashing/notebook.ipynb
sanikamal/machine-learning-atoz
7c53c91ca9fc15b02b51a24282e6e9cf8e6b63c7
[ "MIT" ]
null
null
null
datacamp-project/Dr. Semmelweis and the Discovery of Handwashing/notebook.ipynb
sanikamal/machine-learning-atoz
7c53c91ca9fc15b02b51a24282e6e9cf8e6b63c7
[ "MIT" ]
null
null
null
167.470681
33,192
0.894714
[ [ [ "## 1. Meet Dr. Ignaz Semmelweis\n<p><img style=\"float: left;margin:5px 20px 5px 1px\" src=\"https://s3.amazonaws.com/assets.datacamp.com/production/project_20/img/ignaz_semmelweis_1860.jpeg\"></p>\n<!--\n<img style=\"float: left;margin:5px 20px 5px 1px\" src=\"https://s3.amazonaws.com/assets.datacamp.com/production/project_20/datasets/ignaz_semmelweis_1860.jpeg\">\n-->\n<p>This is Dr. Ignaz Semmelweis, a Hungarian physician born in 1818 and active at the Vienna General Hospital. If Dr. Semmelweis looks troubled it's probably because he's thinking about <em>childbed fever</em>: A deadly disease affecting women that just have given birth. He is thinking about it because in the early 1840s at the Vienna General Hospital as many as 10% of the women giving birth die from it. He is thinking about it because he knows the cause of childbed fever: It's the contaminated hands of the doctors delivering the babies. And they won't listen to him and <em>wash their hands</em>!</p>\n<p>In this notebook, we're going to reanalyze the data that made Semmelweis discover the importance of <em>handwashing</em>. Let's start by looking at the data that made Semmelweis realize that something was wrong with the procedures at Vienna General Hospital.</p>", "_____no_output_____" ] ], [ [ "# importing modules\nimport pandas as pd\n\n# Read datasets/yearly_deaths_by_clinic.csv into yearly\nyearly = pd.read_csv('datasets/yearly_deaths_by_clinic.csv')\n\n# Print out yearly\nprint(yearly)", " year births deaths clinic\n0 1841 3036 237 clinic 1\n1 1842 3287 518 clinic 1\n2 1843 3060 274 clinic 1\n3 1844 3157 260 clinic 1\n4 1845 3492 241 clinic 1\n5 1846 4010 459 clinic 1\n6 1841 2442 86 clinic 2\n7 1842 2659 202 clinic 2\n8 1843 2739 164 clinic 2\n9 1844 2956 68 clinic 2\n10 1845 3241 66 clinic 2\n11 1846 3754 105 clinic 2\n" ] ], [ [ "## 2. The alarming number of deaths\n<p>The table above shows the number of women giving birth at the two clinics at the Vienna General Hospital for the years 1841 to 1846. You'll notice that giving birth was very dangerous; an <em>alarming</em> number of women died as the result of childbirth, most of them from childbed fever.</p>\n<p>We see this more clearly if we look at the <em>proportion of deaths</em> out of the number of women giving birth. Let's zoom in on the proportion of deaths at Clinic 1.</p>", "_____no_output_____" ] ], [ [ "# Calculate proportion of deaths per no. births\nyearly[\"proportion_deaths\"]=yearly['deaths']/yearly['births']\n# Extract clinic 1 data into yearly1 and clinic 2 data into yearly2\nyearly1 = yearly[yearly[\"clinic\"] == \"clinic 1\"]\nyearly2 = yearly[yearly[\"clinic\"] == \"clinic 2\"]\n\n# Print out yearly1\nprint(yearly1)", " year births deaths clinic proportion_deaths\n0 1841 3036 237 clinic 1 0.078063\n1 1842 3287 518 clinic 1 0.157591\n2 1843 3060 274 clinic 1 0.089542\n3 1844 3157 260 clinic 1 0.082357\n4 1845 3492 241 clinic 1 0.069015\n5 1846 4010 459 clinic 1 0.114464\n" ] ], [ [ "## 3. Death at the clinics\n<p>If we now plot the proportion of deaths at both clinic 1 and clinic 2 we'll see a curious pattern...</p>", "_____no_output_____" ] ], [ [ "# This makes plots appear in the notebook\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\n# Plot yearly proportion of deaths at the two clinics\nax = yearly1.plot(x=\"year\", \n y=\"proportion_deaths\", \n label=\"Clinic 1\")\nyearly2.plot(x=\"year\", y=\"proportion_deaths\",\n label=\"Clinic 2\", ax=ax)\nax.set_ylabel(\"Proportion deaths\")", "_____no_output_____" ] ], [ [ "## 4. The handwashing begins\n<p>Why is the proportion of deaths constantly so much higher in Clinic 1? Semmelweis saw the same pattern and was puzzled and distressed. The only difference between the clinics was that many medical students served at Clinic 1, while mostly midwife students served at Clinic 2. While the midwives only tended to the women giving birth, the medical students also spent time in the autopsy rooms examining corpses. </p>\n<p>Semmelweis started to suspect that something on the corpses, spread from the hands of the medical students, caused childbed fever. So in a desperate attempt to stop the high mortality rates, he decreed: <em>Wash your hands!</em> This was an unorthodox and controversial request, nobody in Vienna knew about bacteria at this point in time. </p>\n<p>Let's load in monthly data from Clinic 1 to see if the handwashing had any effect.</p>", "_____no_output_____" ] ], [ [ "# Read datasets/monthly_deaths.csv into monthly\nmonthly = pd.read_csv(\"datasets/monthly_deaths.csv\", parse_dates=[\"date\"])\n\n\n# Calculate proportion of deaths per no. births\nmonthly[\"proportion_deaths\"] = monthly['deaths']/ monthly['births']\n\n# Print out the first rows in monthly\nmonthly.head()", "_____no_output_____" ] ], [ [ "## 5. The effect of handwashing\n<p>With the data loaded we can now look at the proportion of deaths over time. In the plot below we haven't marked where obligatory handwashing started, but it reduced the proportion of deaths to such a degree that you should be able to spot it!</p>", "_____no_output_____" ] ], [ [ "# Plot monthly proportion of deaths\nax = monthly.plot(x=\"date\", y=\"proportion_deaths\")\nax.set_ylabel(\"Proportion deaths\")", "_____no_output_____" ] ], [ [ "## 6. The effect of handwashing highlighted\n<p>Starting from the summer of 1847 the proportion of deaths is drastically reduced and, yes, this was when Semmelweis made handwashing obligatory. </p>\n<p>The effect of handwashing is made even more clear if we highlight this in the graph.</p>", "_____no_output_____" ] ], [ [ "# Date when handwashing was made mandatory\nimport pandas as pd\nhandwashing_start = pd.to_datetime('1847-06-01')\n\n# Split monthly into before and after handwashing_start\nbefore_washing = monthly[monthly[\"date\"] < handwashing_start]\nafter_washing = monthly[monthly[\"date\"] >= handwashing_start]\n\n# Plot monthly proportion of deaths before and after handwashing\nax = before_washing.plot(x=\"date\", y=\"proportion_deaths\",\n label=\"Before handwashing\")\nafter_washing.plot(x=\"date\", y=\"proportion_deaths\",\n label=\"After handwashing\", ax=ax)\nax.set_ylabel(\"Proportion deaths\")", "_____no_output_____" ] ], [ [ "## 7. More handwashing, fewer deaths?\n<p>Again, the graph shows that handwashing had a huge effect. How much did it reduce the monthly proportion of deaths on average?</p>", "_____no_output_____" ] ], [ [ "# Difference in mean monthly proportion of deaths due to handwashing\nbefore_proportion = before_washing[\"proportion_deaths\"]\nafter_proportion = after_washing[\"proportion_deaths\"]\nmean_diff = after_proportion.mean() - before_proportion.mean()\nmean_diff", "_____no_output_____" ] ], [ [ "## 8. A Bootstrap analysis of Semmelweis handwashing data\n<p>It reduced the proportion of deaths by around 8 percentage points! From 10% on average to just 2% (which is still a high number by modern standards). </p>\n<p>To get a feeling for the uncertainty around how much handwashing reduces mortalities we could look at a confidence interval (here calculated using the bootstrap method).</p>", "_____no_output_____" ] ], [ [ "# A bootstrap analysis of the reduction of deaths due to handwashing\nboot_mean_diff = []\nfor i in range(3000):\n boot_before = before_proportion.sample(frac=1, replace=True)\n boot_after = after_proportion.sample(frac=1, replace=True)\n boot_mean_diff.append( boot_after.mean() - boot_before.mean() )\n\n# Calculating a 95% confidence interval from boot_mean_diff \nconfidence_interval = pd.Series(boot_mean_diff).quantile([0.025, 0.975])\nconfidence_interval", "_____no_output_____" ] ], [ [ "## 9. The fate of Dr. Semmelweis\n<p>So handwashing reduced the proportion of deaths by between 6.7 and 10 percentage points, according to a 95% confidence interval. All in all, it would seem that Semmelweis had solid evidence that handwashing was a simple but highly effective procedure that could save many lives.</p>\n<p>The tragedy is that, despite the evidence, Semmelweis' theory — that childbed fever was caused by some \"substance\" (what we today know as <em>bacteria</em>) from autopsy room corpses — was ridiculed by contemporary scientists. The medical community largely rejected his discovery and in 1849 he was forced to leave the Vienna General Hospital for good.</p>\n<p>One reason for this was that statistics and statistical arguments were uncommon in medical science in the 1800s. Semmelweis only published his data as long tables of raw data, but he didn't show any graphs nor confidence intervals. If he would have had access to the analysis we've just put together he might have been more successful in getting the Viennese doctors to wash their hands.</p>", "_____no_output_____" ] ], [ [ "# The data Semmelweis collected points to that:\ndoctors_should_wash_their_hands = True", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a3ef7c52d181361caedb2e27e0471b7d06c298b
570,438
ipynb
Jupyter Notebook
Data Visualization with Python/Week2/Excercise2/DV0101EN-2-3-1-Pie-Charts-Box-Plots-Scatter-Plots-and-Bubble-Plots-py-v2.0.ipynb
pietromosca1994/IBM_Data_Science_Professional_Certificate
4643bac74ffa58cfdfbd119ff68aef80ca87d8d3
[ "MIT" ]
null
null
null
Data Visualization with Python/Week2/Excercise2/DV0101EN-2-3-1-Pie-Charts-Box-Plots-Scatter-Plots-and-Bubble-Plots-py-v2.0.ipynb
pietromosca1994/IBM_Data_Science_Professional_Certificate
4643bac74ffa58cfdfbd119ff68aef80ca87d8d3
[ "MIT" ]
null
null
null
Data Visualization with Python/Week2/Excercise2/DV0101EN-2-3-1-Pie-Charts-Box-Plots-Scatter-Plots-and-Bubble-Plots-py-v2.0.ipynb
pietromosca1994/IBM_Data_Science_Professional_Certificate
4643bac74ffa58cfdfbd119ff68aef80ca87d8d3
[ "MIT" ]
null
null
null
139.165162
77,568
0.85129
[ [ [ "<center>\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/Logos/organization_logo/organization_logo.png\" width=\"300\" alt=\"cognitiveclass.ai logo\" />\n</center>\n\n# Pie Charts, Box Plots, Scatter Plots, and Bubble Plots\n\nEstimated time needed: **30** minutes\n\n## Objectives\n\nAfter completing this lab you will be able to:\n\n- Explore Matplotlib library further\n- Create pie charts, box plots, scatter plots and bubble charts\n", "_____no_output_____" ], [ "## Table of Contents\n\n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n\n1. [Exploring Datasets with _p_andas](#0)<br>\n2. [Downloading and Prepping Data](#2)<br>\n3. [Visualizing Data using Matplotlib](#4) <br>\n4. [Pie Charts](#6) <br>\n5. [Box Plots](#8) <br>\n6. [Scatter Plots](#10) <br>\n7. [Bubble Plots](#12) <br> \n </div>\n <hr>\n", "_____no_output_____" ], [ "# Exploring Datasets with _pandas_ and Matplotlib<a id=\"0\"></a>\n\nToolkits: The course heavily relies on [_pandas_](http://pandas.pydata.org?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) and [**Numpy**](http://www.numpy.org?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) for data wrangling, analysis, and visualization. The primary plotting library we will explore in the course is [Matplotlib](http://matplotlib.org?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ).\n\nDataset: Immigration to Canada from 1980 to 2013 - [International migration flows to and from selected countries - The 2015 revision](http://www.un.org/en/development/desa/population/migration/data/empirical2/migrationflows.shtml?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) from United Nation's website.\n\nThe dataset contains annual data on the flows of international migrants as recorded by the countries of destination. The data presents both inflows and outflows according to the place of birth, citizenship or place of previous / next residence both for foreigners and nationals. In this lab, we will focus on the Canadian Immigration data.\n", "_____no_output_____" ], [ "# Downloading and Prepping Data <a id=\"2\"></a>\n", "_____no_output_____" ], [ "Import primary modules.\n", "_____no_output_____" ] ], [ [ "import numpy as np # useful for many scientific computing in Python\nimport pandas as pd # primary data structure library", "_____no_output_____" ] ], [ [ "Let's download and import our primary Canadian Immigration dataset using _pandas_ `read_excel()` method. Normally, before we can do that, we would need to download a module which _pandas_ requires to read in excel files. This module is **xlrd**. For your convenience, we have pre-installed this module, so you would not have to worry about that. Otherwise, you would need to run the following line of code to install the **xlrd** module:\n\n```\n!conda install -c anaconda xlrd --yes\n```\n", "_____no_output_____" ], [ "Download the dataset and read it into a _pandas_ dataframe.\n", "_____no_output_____" ] ], [ [ "df_can = pd.read_excel('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/Data%20Files/Canada.xlsx',\n sheet_name='Canada by Citizenship',\n skiprows=range(20),\n skipfooter=2\n )\n\nprint('Data downloaded and read into a dataframe!')", "Data downloaded and read into a dataframe!\n" ] ], [ [ "Let's take a look at the first five items in our dataset.\n", "_____no_output_____" ] ], [ [ "df_can.head()", "_____no_output_____" ] ], [ [ "Let's find out how many entries there are in our dataset.\n", "_____no_output_____" ] ], [ [ "# print the dimensions of the dataframe\nprint(df_can.shape)", "(195, 43)\n" ] ], [ [ "Clean up data. We will make some modifications to the original dataset to make it easier to create our visualizations. Refer to _Introduction to Matplotlib and Line Plots_ and _Area Plots, Histograms, and Bar Plots_ for a detailed description of this preprocessing.\n", "_____no_output_____" ] ], [ [ "# clean up the dataset to remove unnecessary columns (eg. REG) \ndf_can.drop(['AREA', 'REG', 'DEV', 'Type', 'Coverage'], axis=1, inplace=True)\n\n# let's rename the columns so that they make sense\ndf_can.rename(columns={'OdName':'Country', 'AreaName':'Continent','RegName':'Region'}, inplace=True)\n\n# for sake of consistency, let's also make all column labels of type string\ndf_can.columns = list(map(str, df_can.columns))\n\n# set the country name as index - useful for quickly looking up countries using .loc method\ndf_can.set_index('Country', inplace=True)\n\n# add total column\ndf_can['Total'] = df_can.sum(axis=1)\n\n# years that we will be using in this lesson - useful for plotting later on\nyears = list(map(str, range(1980, 2014)))\nprint('data dimensions:', df_can.shape)", "data dimensions: (195, 38)\n" ] ], [ [ "# Visualizing Data using Matplotlib<a id=\"4\"></a>\n", "_____no_output_____" ], [ "Import `Matplotlib`.\n", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nmpl.style.use('ggplot') # optional: for ggplot-like style\n\n# check for latest version of Matplotlib\nprint('Matplotlib version: ', mpl.__version__) # >= 2.0.0", "Matplotlib version: 3.3.3\n" ] ], [ [ "# Pie Charts <a id=\"6\"></a>\n\nA `pie chart` is a circualr graphic that displays numeric proportions by dividing a circle (or pie) into proportional slices. You are most likely already familiar with pie charts as it is widely used in business and media. We can create pie charts in Matplotlib by passing in the `kind=pie` keyword.\n\nLet's use a pie chart to explore the proportion (percentage) of new immigrants grouped by continents for the entire time period from 1980 to 2013. \n", "_____no_output_____" ], [ "Step 1: Gather data. \n\nWe will use _pandas_ `groupby` method to summarize the immigration data by `Continent`. The general process of `groupby` involves the following steps:\n\n1. **Split:** Splitting the data into groups based on some criteria.\n2. **Apply:** Applying a function to each group independently:\n .sum()\n .count()\n .mean() \n .std() \n .aggregate()\n .apply()\n .etc..\n3. **Combine:** Combining the results into a data structure.\n", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DV0101EN/labs/Images/Mod3Fig4SplitApplyCombine.png\" height=400 align=\"center\">\n", "_____no_output_____" ] ], [ [ "# group countries by continents and apply sum() function \ndf_continents = df_can.groupby('Continent', axis=0).sum()\n\n# note: the output of the groupby method is a `groupby' object. \n# we can not use it further until we apply a function (eg .sum())\nprint(type(df_can.groupby('Continent', axis=0)))\n\ndf_continents.head()", "pandas.core.groupby.generic.DataFrameGroupBy\n" ] ], [ [ "Step 2: Plot the data. We will pass in `kind = 'pie'` keyword, along with the following additional parameters:\n\n- `autopct` - is a string or function used to label the wedges with their numeric value. The label will be placed inside the wedge. If it is a format string, the label will be `fmt%pct`.\n- `startangle` - rotates the start of the pie chart by angle degrees counterclockwise from the x-axis.\n- `shadow` - Draws a shadow beneath the pie (to give a 3D feel).\n", "_____no_output_____" ] ], [ [ "# autopct create %, start angle represent starting point\ndf_continents['Total'].plot(kind='pie',\n figsize=(5, 6),\n autopct='%1.1f%%', # add in percentages\n startangle=90, # start angle 90° (Africa)\n shadow=True, # add shadow \n )\n\nplt.title('Immigration to Canada by Continent [1980 - 2013]')\nplt.axis('equal') # Sets the pie chart to look like a circle.\n\nplt.show()", "_____no_output_____" ] ], [ [ "The above visual is not very clear, the numbers and text overlap in some instances. Let's make a few modifications to improve the visuals:\n\n- Remove the text labels on the pie chart by passing in `legend` and add it as a seperate legend using `plt.legend()`.\n- Push out the percentages to sit just outside the pie chart by passing in `pctdistance` parameter.\n- Pass in a custom set of colors for continents by passing in `colors` parameter.\n- **Explode** the pie chart to emphasize the lowest three continents (Africa, North America, and Latin America and Carribbean) by pasing in `explode` parameter.\n", "_____no_output_____" ] ], [ [ "colors_list = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', 'lightgreen', 'pink']\nexplode_list = [0.1, 0, 0, 0, 0.1, 0.1] # ratio for each continent with which to offset each wedge.\n\ndf_continents['Total'].plot(kind='pie',\n figsize=(15, 6),\n autopct='%1.1f%%', \n startangle=90, \n shadow=True, \n labels=None, # turn off labels on pie chart\n pctdistance=1.12, # the ratio between the center of each pie slice and the start of the text generated by autopct \n colors=colors_list, # add custom colors\n explode=explode_list # 'explode' lowest 3 continents\n )\n\n# scale the title up by 12% to match pctdistance\nplt.title('Immigration to Canada by Continent [1980 - 2013]', y=1.12) \n\nplt.axis('equal') \n\n# add legend\nplt.legend(labels=df_continents.index, loc='upper left') \n\nplt.show()", "_____no_output_____" ] ], [ [ "**Question:** Using a pie chart, explore the proportion (percentage) of new immigrants grouped by continents in the year 2013.\n\n**Note**: You might need to play with the explore values in order to fix any overlapping slice values.\n", "_____no_output_____" ] ], [ [ "### type your answer here\n\nexplode_list = [0.0, 0, 0, 0.1, 0.1, 0.2] # ratio for each continent with which to offset each wedge.\ndf_continents['2013'].plot(kind='pie',\n figsize=(15, 6),\n autopct='%1.1f%%', \n startangle=90, \n shadow=True, \n labels=None, # turn off labels on pie chart\n pctdistance=1.12, # the ratio between the pie center and start of text label\n explode=explode_list # 'explode' lowest 3 continents\n )\n\n# scale the title up by 12% to match pctdistance\nplt.title('Immigration to Canada by Continent in 2013', y=1.12) \nplt.axis('equal') \n\n# add legend\nplt.legend(labels=df_continents.index, loc='upper left') \n\n# show plot\nplt.show()", "_____no_output_____" ] ], [ [ "<details><summary>Click here for a sample python solution</summary>\n\n```python\n #The correct answer is:\n explode_list = [0.0, 0, 0, 0.1, 0.1, 0.2] # ratio for each continent with which to offset each wedge.\n\n df_continents['2013'].plot(kind='pie',\n figsize=(15, 6),\n autopct='%1.1f%%', \n startangle=90, \n shadow=True, \n labels=None, # turn off labels on pie chart\n pctdistance=1.12, # the ratio between the pie center and start of text label\n explode=explode_list # 'explode' lowest 3 continents\n )\n\n # scale the title up by 12% to match pctdistance\n plt.title('Immigration to Canada by Continent in 2013', y=1.12) \n plt.axis('equal') \n\n # add legend\n plt.legend(labels=df_continents.index, loc='upper left') \n\n # show plot\n plt.show()\n\n```\n\n</details>\n", "_____no_output_____" ], [ "# Box Plots <a id=\"8\"></a>\n\nA `box plot` is a way of statistically representing the _distribution_ of the data through five main dimensions: \n\n- **Minimun:** Smallest number in the dataset excluding the outliers.\n- **First quartile:** Middle number between the `minimum` and the `median`.\n- **Second quartile (Median):** Middle number of the (sorted) dataset.\n- **Third quartile:** Middle number between `median` and `maximum`.\n- **Maximum:** Highest number in the dataset excluding the outliers.\n", "_____no_output_____" ], [ "<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DV0101EN/labs/Images/boxplot_complete.png\" width=440, align=\"center\">\n", "_____no_output_____" ], [ "To make a `box plot`, we can use `kind=box` in `plot` method invoked on a _pandas_ series or dataframe.\n\nLet's plot the box plot for the Japanese immigrants between 1980 - 2013.\n", "_____no_output_____" ], [ "Step 1: Get the dataset. Even though we are extracting the data for just one country, we will obtain it as a dataframe. This will help us with calling the `dataframe.describe()` method to view the percentiles.\n", "_____no_output_____" ] ], [ [ "# to get a dataframe, place extra square brackets around 'Japan'.\ndf_japan = df_can.loc[['Japan'], years].transpose()\ndf_japan.head()", "_____no_output_____" ] ], [ [ "Step 2: Plot by passing in `kind='box'`.\n", "_____no_output_____" ] ], [ [ "df_japan.plot(kind='box', figsize=(8, 6))\n\nplt.title('Box plot of Japanese Immigrants from 1980 - 2013')\nplt.ylabel('Number of Immigrants')\n\nplt.show()", "_____no_output_____" ] ], [ [ "We can immediately make a few key observations from the plot above:\n\n1. The minimum number of immigrants is around 200 (min), maximum number is around 1300 (max), and median number of immigrants is around 900 (median).\n2. 25% of the years for period 1980 - 2013 had an annual immigrant count of ~500 or fewer (First quartile).\n3. 75% of the years for period 1980 - 2013 had an annual immigrant count of ~1100 or fewer (Third quartile).\n\nWe can view the actual numbers by calling the `describe()` method on the dataframe.\n", "_____no_output_____" ] ], [ [ "df_japan.describe()", "_____no_output_____" ] ], [ [ "One of the key benefits of box plots is comparing the distribution of multiple datasets. In one of the previous labs, we observed that China and India had very similar immigration trends. Let's analyize these two countries further using box plots.\n\n**Question:** Compare the distribution of the number of new immigrants from India and China for the period 1980 - 2013.\n", "_____no_output_____" ], [ "Step 1: Get the dataset for China and India and call the dataframe **df_CI**.\n", "_____no_output_____" ] ], [ [ "### type your answer here\ndf_CI= df_can.loc[['China', 'India'], years].transpose()\ndf_CI.head()", "_____no_output_____" ] ], [ [ "<details><summary>Click here for a sample python solution</summary>\n\n```python\n #The correct answer is:\n df_CI= df_can.loc[['China', 'India'], years].transpose()\n df_CI.head()\n```\n\n</details>\n", "_____no_output_____" ], [ "Let's view the percentages associated with both countries using the `describe()` method.\n", "_____no_output_____" ] ], [ [ "### type your answer here\ndf_CI.describe()", "_____no_output_____" ] ], [ [ "<details><summary>Click here for a sample python solution</summary>\n\n```python\n #The correct answer is:\n df_CI.describe()\n```\n\n</details>\n", "_____no_output_____" ], [ "Step 2: Plot data.\n", "_____no_output_____" ] ], [ [ "### type your answer here\ndf_CI.plot(kind='box', figsize=(10, 7))\n\nplt.title('Box plot of China and India Immigrants from 1980 - 2013')\nplt.ylabel('Number of Immigrants')\n\nplt.show()", "_____no_output_____" ] ], [ [ "<details><summary>Click here for a sample python solution</summary>\n\n```python\n #The correct answer is:\n df_CI.plot(kind='box', figsize=(10, 7))\n\n plt.title('Box plots of Immigrants from China and India (1980 - 2013)')\n plt.ylabel('Number of Immigrants')\n\n plt.show()\n\n```\n\n</details>\n", "_____no_output_____" ], [ "We can observe that, while both countries have around the same median immigrant population (~20,000), China's immigrant population range is more spread out than India's. The maximum population from India for any year (36,210) is around 15% lower than the maximum population from China (42,584).\n", "_____no_output_____" ], [ "If you prefer to create horizontal box plots, you can pass the `vert` parameter in the **plot** function and assign it to _False_. You can also specify a different color in case you are not a big fan of the default red color.\n", "_____no_output_____" ] ], [ [ "# horizontal box plots\ndf_CI.plot(kind='box', figsize=(10, 7), color='blue', vert=False)\n\nplt.title('Box plots of Immigrants from China and India (1980 - 2013)')\nplt.xlabel('Number of Immigrants')\n\nplt.show()", "_____no_output_____" ] ], [ [ "**Subplots**\n\nOften times we might want to plot multiple plots within the same figure. For example, we might want to perform a side by side comparison of the box plot with the line plot of China and India's immigration.\n\nTo visualize multiple plots together, we can create a **`figure`** (overall canvas) and divide it into **`subplots`**, each containing a plot. With **subplots**, we usually work with the **artist layer** instead of the **scripting layer**. \n\nTypical syntax is : <br>\n\n```python\n fig = plt.figure() # create figure\n ax = fig.add_subplot(nrows, ncols, plot_number) # create subplots\n```\n\nWhere\n\n- `nrows` and `ncols` are used to notionally split the figure into (`nrows` * `ncols`) sub-axes, \n- `plot_number` is used to identify the particular subplot that this function is to create within the notional grid. `plot_number` starts at 1, increments across rows first and has a maximum of `nrows` * `ncols` as shown below.\n\n<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DV0101EN/labs/Images/Mod3Fig5Subplots_V2.png\" width=500 align=\"center\">\n", "_____no_output_____" ], [ "We can then specify which subplot to place each plot by passing in the `ax` paramemter in `plot()` method as follows:\n", "_____no_output_____" ] ], [ [ "fig = plt.figure() # create figure\n\nax0 = fig.add_subplot(1, 2, 1) # add subplot 1 (1 row, 2 columns, first plot)\nax1 = fig.add_subplot(1, 2, 2) # add subplot 2 (1 row, 2 columns, second plot). See tip below**\n\n# Subplot 1: Box plot\ndf_CI.plot(kind='box', color='blue', vert=False, figsize=(20, 6), ax=ax0) # add to subplot 1\nax0.set_title('Box Plots of Immigrants from China and India (1980 - 2013)')\nax0.set_xlabel('Number of Immigrants')\nax0.set_ylabel('Countries')\n\n# Subplot 2: Line plot\ndf_CI.plot(kind='line', figsize=(20, 6), ax=ax1) # add to subplot 2\nax1.set_title ('Line Plots of Immigrants from China and India (1980 - 2013)')\nax1.set_ylabel('Number of Immigrants')\nax1.set_xlabel('Years')\n\nplt.show()", "_____no_output_____" ] ], [ [ "** * Tip regarding subplot convention **\n\nIn the case when `nrows`, `ncols`, and `plot_number` are all less than 10, a convenience exists such that the a 3 digit number can be given instead, where the hundreds represent `nrows`, the tens represent `ncols` and the units represent `plot_number`. For instance,\n\n```python\n subplot(211) == subplot(2, 1, 1) \n```\n\nproduces a subaxes in a figure which represents the top plot (i.e. the first) in a 2 rows by 1 column notional grid (no grid actually exists, but conceptually this is how the returned subplot has been positioned).\n", "_____no_output_____" ], [ "Let's try something a little more advanced. \n\nPreviously we identified the top 15 countries based on total immigration from 1980 - 2013.\n\n**Question:** Create a box plot to visualize the distribution of the top 15 countries (based on total immigration) grouped by the _decades_ `1980s`, `1990s`, and `2000s`.\n", "_____no_output_____" ], [ "Step 1: Get the dataset. Get the top 15 countries based on Total immigrant population. Name the dataframe **df_top15**.\n", "_____no_output_____" ] ], [ [ "### type your answer here\ndf_top15=df_can.sort_values(by='Total', ascending=False, axis=0).head(15)\ndf_top15", "_____no_output_____" ] ], [ [ "<details><summary>Click here for a sample python solution</summary>\n\n```python\n #The correct answer is:\n df_top15 = df_can.sort_values(['Total'], ascending=False, axis=0).head(15)\n df_top15\n\n```\n\n</details>\n", "_____no_output_____" ], [ "Step 2: Create a new dataframe which contains the aggregate for each decade. One way to do that:\n\n1. Create a list of all years in decades 80's, 90's, and 00's.\n2. Slice the original dataframe df_can to create a series for each decade and sum across all years for each country.\n3. Merge the three series into a new data frame. Call your dataframe **new_df**.\n", "_____no_output_____" ] ], [ [ "### type your answer here\n# create a list of all years in decades 80's, 90's, and 00's\nyears_80s = list(map(str, range(1980, 1990))) \nyears_90s = list(map(str, range(1990, 2000))) \nyears_00s = list(map(str, range(2000, 2010))) \n\n# slice the original dataframe df_can to create a series for each decade\ndf_80s = df_top15.loc[:, years_80s].sum(axis=1) \ndf_90s = df_top15.loc[:, years_90s].sum(axis=1) \ndf_00s = df_top15.loc[:, years_00s].sum(axis=1)\n\n# merge the three series into a new data frame\nnew_df = pd.DataFrame({'1980s': df_80s, '1990s': df_90s, '2000s':df_00s}) \n\n# display dataframe\nnew_df.head()", "_____no_output_____" ] ], [ [ "<details><summary>Click here for a sample python solution</summary>\n\n```python\n #The correct answer is:\n \n # create a list of all years in decades 80's, 90's, and 00's\n years_80s = list(map(str, range(1980, 1990))) \n years_90s = list(map(str, range(1990, 2000))) \n years_00s = list(map(str, range(2000, 2010))) \n\n # slice the original dataframe df_can to create a series for each decade\n df_80s = df_top15.loc[:, years_80s].sum(axis=1) \n df_90s = df_top15.loc[:, years_90s].sum(axis=1) \n df_00s = df_top15.loc[:, years_00s].sum(axis=1)\n\n # merge the three series into a new data frame\n new_df = pd.DataFrame({'1980s': df_80s, '1990s': df_90s, '2000s':df_00s}) \n\n # display dataframe\n new_df.head()\n\n\n```\n\n</details>\n", "_____no_output_____" ], [ "Let's learn more about the statistics associated with the dataframe using the `describe()` method.\n", "_____no_output_____" ] ], [ [ "### type your answer here\nnew_df.describe()", "_____no_output_____" ] ], [ [ "<details><summary>Click here for a sample python solution</summary>\n\n```python\n #The correct answer is: \n new_df.describe()\n```\n\n</details>\n", "_____no_output_____" ], [ "Step 3: Plot the box plots.\n", "_____no_output_____" ] ], [ [ "### type your answer here\nnew_df.plot(kind='box', figsize=(10, 6), color='blue', vert=False)\n\nplt.title('Immigration from top 15 countries for decades 80s, 90s and 2000s')\nplt.xlabel('Number of Immigrants')\n\nplt.show()\n\n", "_____no_output_____" ] ], [ [ "<details><summary>Click here for a sample python solution</summary>\n\n```python\n #The correct answer is: \n new_df.plot(kind='box', figsize=(10, 6))\n\n plt.title('Immigration from top 15 countries for decades 80s, 90s and 2000s')\n\n plt.show()\n\n```\n\n</details>\n", "_____no_output_____" ], [ "Note how the box plot differs from the summary table created. The box plot scans the data and identifies the outliers. In order to be an outlier, the data value must be:<br>\n\n- larger than Q3 by at least 1.5 times the interquartile range (IQR), or,\n- smaller than Q1 by at least 1.5 times the IQR.\n\nLet's look at decade 2000s as an example: <br>\n\n- Q1 (25%) = 36,101.5 <br>\n- Q3 (75%) = 105,505.5 <br>\n- IQR = Q3 - Q1 = 69,404 <br>\n\nUsing the definition of outlier, any value that is greater than Q3 by 1.5 times IQR will be flagged as outlier.\n\nOutlier > 105,505.5 + (1.5 * 69,404) <br>\nOutlier > 209,611.5\n", "_____no_output_____" ] ], [ [ "# let's check how many entries fall above the outlier threshold \nnew_df=new_df.reset_index()\nnew_df[new_df['2000s']> 209611.5]", "_____no_output_____" ] ], [ [ "<details><summary>Click here for a sample python solution</summary>\n\n```python\n #The correct answer is: \n new_df=new_df.reset_index()\n new_df[new_df['2000s']> 209611.5]\n\n```\n\n</details>\n", "_____no_output_____" ], [ "<!-- The correct answer is:\nnew_df[new_df['2000s']> 209611.5]\n-->\n", "_____no_output_____" ], [ "China and India are both considered as outliers since their population for the decade exceeds 209,611.5. \n\nThe box plot is an advanced visualizaiton tool, and there are many options and customizations that exceed the scope of this lab. Please refer to [Matplotlib documentation](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.boxplot?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) on box plots for more information.\n", "_____no_output_____" ], [ "# Scatter Plots <a id=\"10\"></a>\n\nA `scatter plot` (2D) is a useful method of comparing variables against each other. `Scatter` plots look similar to `line plots` in that they both map independent and dependent variables on a 2D graph. While the datapoints are connected together by a line in a line plot, they are not connected in a scatter plot. The data in a scatter plot is considered to express a trend. With further analysis using tools like regression, we can mathematically calculate this relationship and use it to predict trends outside the dataset.\n\nLet's start by exploring the following:\n\nUsing a `scatter plot`, let's visualize the trend of total immigrantion to Canada (all countries combined) for the years 1980 - 2013.\n", "_____no_output_____" ], [ "Step 1: Get the dataset. Since we are expecting to use the relationship betewen `years` and `total population`, we will convert `years` to `int` type.\n", "_____no_output_____" ] ], [ [ "# we can use the sum() method to get the total population per year\ndf_tot = pd.DataFrame(df_can[years].sum(axis=0))\n\n# change the years to type int (useful for regression later on)\ndf_tot.index = map(int, df_tot.index)\n\n# reset the index to put in back in as a column in the df_tot dataframe\ndf_tot.reset_index(inplace = True)\n\n# rename columns\ndf_tot.columns = ['year', 'total']\n\n# view the final dataframe\ndf_tot.head()", "_____no_output_____" ] ], [ [ "Step 2: Plot the data. In `Matplotlib`, we can create a `scatter` plot set by passing in `kind='scatter'` as plot argument. We will also need to pass in `x` and `y` keywords to specify the columns that go on the x- and the y-axis.\n", "_____no_output_____" ] ], [ [ "df_tot.plot(kind='scatter', x='year', y='total', figsize=(10, 6), color='darkblue')\n\nplt.title('Total Immigration to Canada from 1980 - 2013')\nplt.xlabel('Year')\nplt.ylabel('Number of Immigrants')\n\nplt.show()", "_____no_output_____" ] ], [ [ "Notice how the scatter plot does not connect the datapoints together. We can clearly observe an upward trend in the data: as the years go by, the total number of immigrants increases. We can mathematically analyze this upward trend using a regression line (line of best fit). \n", "_____no_output_____" ], [ "So let's try to plot a linear line of best fit, and use it to predict the number of immigrants in 2015.\n\nStep 1: Get the equation of line of best fit. We will use **Numpy**'s `polyfit()` method by passing in the following:\n\n- `x`: x-coordinates of the data. \n- `y`: y-coordinates of the data. \n- `deg`: Degree of fitting polynomial. 1 = linear, 2 = quadratic, and so on.\n", "_____no_output_____" ] ], [ [ "x = df_tot['year'] # year on x-axis\ny = df_tot['total'] # total on y-axis\nfit = np.polyfit(x, y, deg=1)\n\nfit", "_____no_output_____" ] ], [ [ "The output is an array with the polynomial coefficients, highest powers first. Since we are plotting a linear regression `y= a*x + b`, our output has 2 elements `[5.56709228e+03, -1.09261952e+07]` with the the slope in position 0 and intercept in position 1. \n\nStep 2: Plot the regression line on the `scatter plot`.\n", "_____no_output_____" ] ], [ [ "df_tot.plot(kind='scatter', x='year', y='total', figsize=(10, 6), color='darkblue')\n\nplt.title('Total Immigration to Canada from 1980 - 2013')\nplt.xlabel('Year')\nplt.ylabel('Number of Immigrants')\n\n# plot line of best fit\nplt.plot(x, fit[0] * x + fit[1], color='red') # recall that x is the Years\nplt.annotate('y={0:.0f} x + {1:.0f}'.format(fit[0], fit[1]), xy=(2000, 150000))\n\nplt.show()\n\n# print out the line of best fit\n'No. Immigrants = {0:.0f} * Year + {1:.0f}'.format(fit[0], fit[1]) ", "_____no_output_____" ] ], [ [ "Using the equation of line of best fit, we can estimate the number of immigrants in 2015:\n\n```python\nNo. Immigrants = 5567 * Year - 10926195\nNo. Immigrants = 5567 * 2015 - 10926195\nNo. Immigrants = 291,310\n```\n\nWhen compared to the actuals from Citizenship and Immigration Canada's (CIC) [2016 Annual Report](http://www.cic.gc.ca/english/resources/publications/annual-report-2016/index.asp?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ), we see that Canada accepted 271,845 immigrants in 2015. Our estimated value of 291,310 is within 7% of the actual number, which is pretty good considering our original data came from United Nations (and might differ slightly from CIC data).\n\nAs a side note, we can observe that immigration took a dip around 1993 - 1997. Further analysis into the topic revealed that in 1993 Canada introcuded Bill C-86 which introduced revisions to the refugee determination system, mostly restrictive. Further amendments to the Immigration Regulations cancelled the sponsorship required for \"assisted relatives\" and reduced the points awarded to them, making it more difficult for family members (other than nuclear family) to immigrate to Canada. These restrictive measures had a direct impact on the immigration numbers for the next several years.\n", "_____no_output_____" ], [ "**Question**: Create a scatter plot of the total immigration from Denmark, Norway, and Sweden to Canada from 1980 to 2013?\n", "_____no_output_____" ], [ "Step 1: Get the data:\n\n1. Create a dataframe the consists of the numbers associated with Denmark, Norway, and Sweden only. Name it **df_countries**.\n2. Sum the immigration numbers across all three countries for each year and turn the result into a dataframe. Name this new dataframe **df_total**.\n3. Reset the index in place.\n4. Rename the columns to **year** and **total**.\n5. Display the resulting dataframe.\n", "_____no_output_____" ] ], [ [ "### type your answer here\n\n# create df_countries dataframe\ndf_countries = df_can.loc[['Denmark', 'Norway', 'Sweden'], years].transpose()\n\n# create df_total by summing across three countries for each year\ndf_total = pd.DataFrame(df_countries.sum(axis=1))\n\n# reset index in place\ndf_total.reset_index(inplace=True)\n\n# rename columns\ndf_total.columns = ['year', 'total']\n\n# change column year from string to int to create scatter plot\ndf_total['year'] = df_total['year'].astype(int)\n\n# show resulting dataframe\ndf_total.head()", "_____no_output_____" ] ], [ [ "<details><summary>Click here for a sample python solution</summary>\n\n```python\n #The correct answer is: \n \n # create df_countries dataframe\n df_countries = df_can.loc[['Denmark', 'Norway', 'Sweden'], years].transpose()\n\n # create df_total by summing across three countries for each year\n df_total = pd.DataFrame(df_countries.sum(axis=1))\n\n # reset index in place\n df_total.reset_index(inplace=True)\n\n # rename columns\n df_total.columns = ['year', 'total']\n\n # change column year from string to int to create scatter plot\n df_total['year'] = df_total['year'].astype(int)\n\n # show resulting dataframe\n df_total.head()\n\n\n```\n\n</details>\n", "_____no_output_____" ], [ "Step 2: Generate the scatter plot by plotting the total versus year in **df_total**.\n", "_____no_output_____" ] ], [ [ "### type your answer here\n\n# generate scatter plot\ndf_total.plot(kind='scatter', x='year', y='total', figsize=(10, 6), color='darkblue')\n\n# add title and label to axes\nplt.title('Immigration from Denmark, Norway, and Sweden to Canada from 1980 - 2013')\nplt.xlabel('Year')\nplt.ylabel('Number of Immigrants')\n\n# show plot\nplt.show()", "_____no_output_____" ] ], [ [ "<details><summary>Click here for a sample python solution</summary>\n\n```python\n #The correct answer is: \n \n # generate scatter plot\n df_total.plot(kind='scatter', x='year', y='total', figsize=(10, 6), color='darkblue')\n\n # add title and label to axes\n plt.title('Immigration from Denmark, Norway, and Sweden to Canada from 1980 - 2013')\n plt.xlabel('Year')\n plt.ylabel('Number of Immigrants')\n\n # show plot\n plt.show()\n\n\n```\n\n</details>\n", "_____no_output_____" ], [ "# Bubble Plots <a id=\"12\"></a>\n\nA `bubble plot` is a variation of the `scatter plot` that displays three dimensions of data (x, y, z). The datapoints are replaced with bubbles, and the size of the bubble is determined by the third variable 'z', also known as the weight. In `maplotlib`, we can pass in an array or scalar to the keyword `s` to `plot()`, that contains the weight of each point.\n\n**Let's start by analyzing the effect of Argentina's great depression**.\n\nArgentina suffered a great depression from 1998 - 2002, which caused widespread unemployment, riots, the fall of the government, and a default on the country's foreign debt. In terms of income, over 50% of Argentines were poor, and seven out of ten Argentine children were poor at the depth of the crisis in 2002. \n\nLet's analyze the effect of this crisis, and compare Argentina's immigration to that of it's neighbour Brazil. Let's do that using a `bubble plot` of immigration from Brazil and Argentina for the years 1980 - 2013. We will set the weights for the bubble as the _normalized_ value of the population for each year.\n", "_____no_output_____" ], [ "Step 1: Get the data for Brazil and Argentina. Like in the previous example, we will convert the `Years` to type int and bring it in the dataframe.\n", "_____no_output_____" ] ], [ [ "df_can_t = df_can[years].transpose() # transposed dataframe\n\n# cast the Years (the index) to type int\ndf_can_t.index = map(int, df_can_t.index)\n\n# let's label the index. This will automatically be the column name when we reset the index\ndf_can_t.index.name = 'Year'\n\n# reset index to bring the Year in as a column\ndf_can_t.reset_index(inplace=True)\n\n# view the changes\ndf_can_t.head()", "_____no_output_____" ] ], [ [ "Step 2: Create the normalized weights. \n\nThere are several methods of normalizations in statistics, each with its own use. In this case, we will use [feature scaling](https://en.wikipedia.org/wiki/Feature_scaling?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) to bring all values into the range [0,1]. The general formula is:\n\n<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DV0101EN/labs/Images/Mod3Fig3FeatureScaling.png\" align=\"center\">\n\nwhere _`X`_ is an original value, _`X'`_ is the normalized value. The formula sets the max value in the dataset to 1, and sets the min value to 0. The rest of the datapoints are scaled to a value between 0-1 accordingly.\n", "_____no_output_____" ] ], [ [ "# normalize Brazil data\nnorm_brazil = (df_can_t['Brazil'] - df_can_t['Brazil'].min()) / (df_can_t['Brazil'].max() - df_can_t['Brazil'].min())\n\n# normalize Argentina data\nnorm_argentina = (df_can_t['Argentina'] - df_can_t['Argentina'].min()) / (df_can_t['Argentina'].max() - df_can_t['Argentina'].min())", "_____no_output_____" ] ], [ [ "Step 3: Plot the data. \n\n- To plot two different scatter plots in one plot, we can include the axes one plot into the other by passing it via the `ax` parameter. \n- We will also pass in the weights using the `s` parameter. Given that the normalized weights are between 0-1, they won't be visible on the plot. Therefore we will:\n - multiply weights by 2000 to scale it up on the graph, and,\n - add 10 to compensate for the min value (which has a 0 weight and therefore scale with x2000).\n", "_____no_output_____" ] ], [ [ "# Brazil\nax0 = df_can_t.plot(kind='scatter',\n x='Year',\n y='Brazil',\n figsize=(14, 8),\n alpha=0.5, # transparency\n color='green',\n s=norm_brazil * 2000 + 10, # pass in weights \n xlim=(1975, 2015)\n )\n\n# Argentina\nax1 = df_can_t.plot(kind='scatter',\n x='Year',\n y='Argentina',\n alpha=0.5,\n color=\"blue\",\n s=norm_argentina * 2000 + 10,\n ax = ax0\n )\n\nax0.set_ylabel('Number of Immigrants')\nax0.set_title('Immigration from Brazil and Argentina from 1980 - 2013')\nax0.legend(['Brazil', 'Argentina'], loc='upper left', fontsize='x-large')", "_____no_output_____" ] ], [ [ "The size of the bubble corresponds to the magnitude of immigrating population for that year, compared to the 1980 - 2013 data. The larger the bubble, the more immigrants in that year.\n\nFrom the plot above, we can see a corresponding increase in immigration from Argentina during the 1998 - 2002 great depression. We can also observe a similar spike around 1985 to 1993. In fact, Argentina had suffered a great depression from 1974 - 1990, just before the onset of 1998 - 2002 great depression. \n\nOn a similar note, Brazil suffered the _Samba Effect_ where the Brazilian real (currency) dropped nearly 35% in 1999. There was a fear of a South American financial crisis as many South American countries were heavily dependent on industrial exports from Brazil. The Brazilian government subsequently adopted an austerity program, and the economy slowly recovered over the years, culminating in a surge in 2010. The immigration data reflect these events.\n", "_____no_output_____" ], [ "**Question**: Previously in this lab, we created box plots to compare immigration from China and India to Canada. Create bubble plots of immigration from China and India to visualize any differences with time from 1980 to 2013. You can use **df_can_t** that we defined and used in the previous example.\n", "_____no_output_____" ], [ "Step 1: Normalize the data pertaining to China and India.\n", "_____no_output_____" ] ], [ [ "### type your answer here\n\n\n\n", "_____no_output_____" ] ], [ [ "<details><summary>Click here for a sample python solution</summary>\n\n```python\n #The correct answer is: \n \n # normalize China data\n norm_china = (df_can_t['China'] - df_can_t['China'].min()) / (df_can_t['China'].max() - df_can_t['China'].min())\n # normalize India data\n norm_india = (df_can_t['India'] - df_can_t['India'].min()) / (df_can_t['India'].max() - df_can_t['India'].min())\n\n\n```\n\n</details>\n", "_____no_output_____" ], [ "Step 2: Generate the bubble plots.\n", "_____no_output_____" ] ], [ [ "### type your answer here\n\n\n\n", "_____no_output_____" ] ], [ [ "<details><summary>Click here for a sample python solution</summary>\n\n```python\n #The correct answer is: \n \n # China\n ax0 = df_can_t.plot(kind='scatter',\n x='Year',\n y='China',\n figsize=(14, 8),\n alpha=0.5, # transparency\n color='green',\n s=norm_china * 2000 + 10, # pass in weights \n xlim=(1975, 2015)\n )\n\n # India\n ax1 = df_can_t.plot(kind='scatter',\n x='Year',\n y='India',\n alpha=0.5,\n color=\"blue\",\n s=norm_india * 2000 + 10,\n ax = ax0\n )\n\n ax0.set_ylabel('Number of Immigrants')\n ax0.set_title('Immigration from China and India from 1980 - 2013')\n ax0.legend(['China', 'India'], loc='upper left', fontsize='x-large')\n\n\n```\n\n</details>\n", "_____no_output_____" ], [ "### Thank you for completing this lab!\n\n## Author\n\n<a href=\"https://www.linkedin.com/in/aklson/\" target=\"_blank\">Alex Aklson</a>\n\n### Other Contributors\n\n[Jay Rajasekharan](https://www.linkedin.com/in/jayrajasekharan?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ)\n[Ehsan M. Kermani](https://www.linkedin.com/in/ehsanmkermani?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ)\n[Slobodan Markovic](https://www.linkedin.com/in/slobodan-markovic?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork-20297740&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ).\n\n## Change Log\n\n| Date (YYYY-MM-DD) | Version | Changed By | Change Description |\n| ----------------- | ------- | ------------ | ---------------------------------- |\n| 2021-01-05 | 2.4 | LakshmiHolla | Changed markdown for outliers |\n| 2020-11-12 | 2.3 | LakshmiHolla | Added example code for outliers |\n| 2020-11-03 | 2.2 | LakshmiHolla | Changed URL of excel file |\n| 2020-09-29 | 2.1 | LakshmiHolla | Made fix to a boxplot label |\n| 2020-08-27 | 2.0 | Lavanya | Moved lab to course repo in GitLab |\n\n## <h3 align=\"center\"> © IBM Corporation 2020. All rights reserved. <h3/>\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4a3efb4a3c76b38967687d5c31e553004485c983
187,071
ipynb
Jupyter Notebook
docs/examples/Time-energy-fit.ipynb
cescalara/threeML
4ac90d91159f5469d3ea90456901f46ba4a2d533
[ "BSD-3-Clause" ]
1
2021-01-26T14:21:26.000Z
2021-01-26T14:21:26.000Z
docs/examples/Time-energy-fit.ipynb
kabartay/threeML
83e33b7cc05fb0d2588d802439c44e3ceda0cb2d
[ "BSD-3-Clause" ]
null
null
null
docs/examples/Time-energy-fit.ipynb
kabartay/threeML
83e33b7cc05fb0d2588d802439c44e3ceda0cb2d
[ "BSD-3-Clause" ]
null
null
null
195.476489
54,940
0.891929
[ [ [ "# Time-energy fit\n\n3ML allows the possibility to model a time-varying source by explicitly fitting the time-dependent part of the model. Let's see this with an example.\n\nFirst we import what we need:", "_____no_output_____" ] ], [ [ "from threeML import *\n\nimport matplotlib.pyplot as plt\n\nfrom jupyterthemes import jtplot\n\n%matplotlib inline\njtplot.style(context=\"talk\", fscale=1, ticks=True, grid=False)\nplt.style.use(\"mike\")\n\n", "_____no_output_____" ] ], [ [ "## Generating the datasets\n\nThen we generate a simulated dataset for a source with a cutoff powerlaw spectrum with a constant photon index and cutoff but with a normalization that changes with time following a powerlaw:", "_____no_output_____" ] ], [ [ "def generate_one(K, ax):\n\n # Let's generate some data with y = Powerlaw(x)\n\n gen_function = Cutoff_powerlaw()\n gen_function.K = K\n\n # Generate a dataset using the power law, and a\n # constant 30% error\n\n x = np.logspace(0, 2, 50)\n\n xyl_generator = XYLike.from_function(\n \"sim_data\", function=gen_function, x=x, yerr=0.3 * gen_function(x)\n )\n\n y = xyl_generator.y\n y_err = xyl_generator.yerr\n\n ax.loglog(x, gen_function(x))\n\n return x, y, y_err", "_____no_output_____" ] ], [ [ "These are the times at which the simulated spectra have been observed", "_____no_output_____" ] ], [ [ "time_tags = np.array([1.0, 2.0, 5.0, 10.0])", "_____no_output_____" ] ], [ [ "This describes the time-varying normalization. If everything works as it should, we should recover from the fit a normalization of 0.23 and a index of -1.2 for the time law.", "_____no_output_____" ] ], [ [ "normalizations = 0.23 * time_tags ** (-3.5)", "_____no_output_____" ] ], [ [ "Now that we have a simple function to create the datasets, let's build them.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\n\ndatasets = [generate_one(k, ax) for k in normalizations]\n\nax.set_xlabel(\"Energy\")\nax.set_ylabel(\"Flux\")", "Using Gaussian statistic (equivalent to chi^2) with the provided errors.\nUsing Gaussian statistic (equivalent to chi^2) with the provided errors.\nUsing Gaussian statistic (equivalent to chi^2) with the provided errors.\nUsing Gaussian statistic (equivalent to chi^2) with the provided errors.\n" ] ], [ [ "## Setup the model\n\nNow set up the fit and fit it. First we need to tell 3ML that we are going to fit using an independent variable (time in this case). We init it to 1.0 and set the unit to seconds.", "_____no_output_____" ] ], [ [ "time = IndependentVariable(\"time\", 1.0, u.s)", "_____no_output_____" ] ], [ [ "Then we load the data that we have generated, tagging them with their time of observation.", "_____no_output_____" ] ], [ [ "\nplugins = []\n\nfor i, dataset in enumerate(datasets):\n \n x, y, y_err = dataset\n \n xyl = XYLike(\"data%i\" % i, x, y, y_err)\n \n # This is the important part: we need to tag the instance of the\n # plugin so that 3ML will know that this instance corresponds to the\n # given tag (a time coordinate in this case). If instead of giving\n # one time coordinate we give two time coordinates, then 3ML will\n # take the average of the model between the two time coordinates\n # (computed as the integral of the model between t1 and t2 divided \n # by t2-t1)\n \n xyl.tag = (time, time_tags[i])\n \n # To access the tag we have just set we can use:\n \n independent_variable, start, end = xyl.tag\n \n # NOTE: xyl.tag will return 3 things: the independent variable, the start and the\n # end. If like in this case you do not specify an end when assigning the tag, end\n # will be None\n \n plugins.append(xyl)", "Using Gaussian statistic (equivalent to chi^2) with the provided errors.\nUsing Gaussian statistic (equivalent to chi^2) with the provided errors.\nUsing Gaussian statistic (equivalent to chi^2) with the provided errors.\nUsing Gaussian statistic (equivalent to chi^2) with the provided errors.\n" ] ], [ [ "Generate the datalist as usual\n\n", "_____no_output_____" ] ], [ [ "data = DataList(*plugins)", "_____no_output_____" ] ], [ [ "Now let's generate the spectral model, in this case a point source with a cutoff powerlaw spectrum.", "_____no_output_____" ] ], [ [ "spectrum = Cutoff_powerlaw()\n\nsrc = PointSource(\"test\", ra=0.0, dec=0.0, spectral_shape=spectrum)\n\nmodel = Model(src)", "_____no_output_____" ] ], [ [ "Now we need to tell 3ML that we are going to use the time coordinate to specify a time dependence for some of the parameters of the model.\n\n", "_____no_output_____" ] ], [ [ "model.add_independent_variable(time)", "_____no_output_____" ] ], [ [ "Now let's specify the time-dependence (a powerlaw) for the normalization of the powerlaw spectrum.", "_____no_output_____" ] ], [ [ "time_po = Powerlaw()\ntime_po.K.bounds = (0.01, 1000)", "\nWARNING UserWarning: We have set the min_value of Powerlaw.K to 1e-99 because there was a postive transform\n\n" ] ], [ [ "Link the normalization of the cutoff powerlaw spectrum with time through the time law we have just generated.", "_____no_output_____" ] ], [ [ "model.link(spectrum.K, time, time_po)\nmodel", "_____no_output_____" ] ], [ [ "## Performing the fit", "_____no_output_____" ] ], [ [ "jl = JointLikelihood(model, data)\n\nbest_fit_parameters, likelihood_values = jl.fit()", "Best fit values:\n\n" ], [ "for p in plugins:\n\n p.plot(x_scale='log', y_scale='log');", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
4a3efce84d3c49ff6e866fb247c5b3ed395419cc
4,209
ipynb
Jupyter Notebook
EDA/FeatureEngineering_YearSold.ipynb
chazzy1/nycdsaML
7d39d0753351bfcd54f1fac018b7d27707ed380f
[ "MIT" ]
null
null
null
EDA/FeatureEngineering_YearSold.ipynb
chazzy1/nycdsaML
7d39d0753351bfcd54f1fac018b7d27707ed380f
[ "MIT" ]
null
null
null
EDA/FeatureEngineering_YearSold.ipynb
chazzy1/nycdsaML
7d39d0753351bfcd54f1fac018b7d27707ed380f
[ "MIT" ]
1
2018-11-07T02:35:48.000Z
2018-11-07T02:35:48.000Z
26.808917
91
0.511285
[ [ [ "# target value \nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\nplt.style.use('seaborn')\nfrom scipy.stats import norm, skew\nfrom scipy import stats\nimport numpy as np\nimport seaborn as sns\n\nsns.set()\n\n#Data loading\ntrain_set = pd.read_csv('../data/train.csv')\ntest_set = pd.read_csv('../data/test.csv')\n", "_____no_output_____" ], [ "pd.set_option('max_columns', None)", "_____no_output_____" ], [ "train_set.columns", "_____no_output_____" ], [ "train_set.YrSold.describe()", "_____no_output_____" ], [ "gy = train_set.groupby('YrSold')\ngy['SalePrice'].mean()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
4a3f037664551e4756e43a956a8c26c381725554
41,330
ipynb
Jupyter Notebook
notebooks/Selecting_Columns_in_DataFrame.ipynb
ilysainath/Building-a-Repeatable-Data-Analysis-Process-with-Jupyter-Notebooks
87a825a6a4b0aa25ef644498a781b222aa11369d
[ "BSD-3-Clause" ]
1,846
2015-05-18T02:04:30.000Z
2022-03-31T09:49:16.000Z
notebooks/Selecting_Columns_in_DataFrame.ipynb
ilysainath/Building-a-Repeatable-Data-Analysis-Process-with-Jupyter-Notebooks
87a825a6a4b0aa25ef644498a781b222aa11369d
[ "BSD-3-Clause" ]
28
2015-12-07T01:57:08.000Z
2021-08-24T01:21:02.000Z
notebooks/Selecting_Columns_in_DataFrame.ipynb
ilysainath/Building-a-Repeatable-Data-Analysis-Process-with-Jupyter-Notebooks
87a825a6a4b0aa25ef644498a781b222aa11369d
[ "BSD-3-Clause" ]
1,054
2015-05-18T06:19:11.000Z
2022-03-16T06:13:37.000Z
29.105634
127
0.361408
[ [ [ "## Tips for Selecting Columns in a DataFrame\n\nNotebook to accompany this [post](https://pbpython.com/selecting-columns.html).\n\n", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "df = pd.read_csv(\n 'https://data.cityofnewyork.us/api/views/vfnx-vebw/rows.csv?accessType=DOWNLOAD&bom=true&format=true'\n)", "_____no_output_____" ] ], [ [ "Build a mapping list so we can see the index of all the columns", "_____no_output_____" ] ], [ [ "col_mapping = [f\"{c[0]}:{c[1]}\" for c in enumerate(df.columns)]", "_____no_output_____" ], [ "col_mapping", "_____no_output_____" ] ], [ [ "We can also build a dictionary", "_____no_output_____" ] ], [ [ "col_mapping_dict = {c[0]:c[1] for c in enumerate(df.columns)}", "_____no_output_____" ], [ "col_mapping_dict", "_____no_output_____" ] ], [ [ "Use iloc to select just the second column (Unique Squirrel ID)", "_____no_output_____" ] ], [ [ "df.iloc[:, 2]", "_____no_output_____" ] ], [ [ "Pass a list of integers to select multiple columns by index", "_____no_output_____" ] ], [ [ "df.iloc[:, [0,1,2]]", "_____no_output_____" ] ], [ [ "We can also pass a slice object to select a range of columns", "_____no_output_____" ] ], [ [ "df.iloc[:, 0:3]", "_____no_output_____" ] ], [ [ "If we want to combine the list and slice notation, we need to use nump.r_ to process the data into an appropriate format.", "_____no_output_____" ] ], [ [ "np.r_[0:3,15:19,24,25]", "_____no_output_____" ] ], [ [ "We can pass the output of np.r_ to .iloc to use multiple selection approaches", "_____no_output_____" ] ], [ [ "df.iloc[:, np.r_[0:3,15:19,24,25]]", "_____no_output_____" ] ], [ [ "We can use the same notation when reading in a csv as well", "_____no_output_____" ] ], [ [ "df_2 = pd.read_csv(\n 'https://data.cityofnewyork.us/api/views/vfnx-vebw/rows.csv?accessType=DOWNLOAD&bom=true&format=true',\n usecols=np.r_[1,2,5:8,15:25],\n)", "_____no_output_____" ], [ "df_2.head()", "_____no_output_____" ] ], [ [ "We can also select columns using a boolean array", "_____no_output_____" ] ], [ [ "run_cols = df.columns.str.contains('run', case=False)\nrun_cols", "_____no_output_____" ], [ "df.iloc[:, run_cols].head()", "_____no_output_____" ] ], [ [ "A lambda function can be useful for combining into 1 line.", "_____no_output_____" ] ], [ [ "df.iloc[:, lambda df:df.columns.str.contains('run', case=False)].head()", "_____no_output_____" ] ], [ [ "A more complex example", "_____no_output_____" ] ], [ [ "df.iloc[:, lambda df: df.columns.str.contains('district|precinct|boundaries',\n case=False)].head()", "_____no_output_____" ] ], [ [ "Combining index and boolean arrays", "_____no_output_____" ] ], [ [ "location_cols = df.columns.str.contains('district|precinct|boundaries',\n case=False)\nlocation_cols", "_____no_output_____" ], [ "location_indices = [i for i, col in enumerate(location_cols) if col]\nlocation_indices", "_____no_output_____" ], [ "df.iloc[:, np.r_[0:3,location_indices]].head()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
4a3f0650a3703ee002854f4d961b41e5370459ad
46,705
ipynb
Jupyter Notebook
Starter_Code/credit_risk_resampling.ipynb
Satheeshbm/Classification
49f4d751d99ffec15db52381c5a057d87e915116
[ "ADSL" ]
null
null
null
Starter_Code/credit_risk_resampling.ipynb
Satheeshbm/Classification
49f4d751d99ffec15db52381c5a057d87e915116
[ "ADSL" ]
null
null
null
Starter_Code/credit_risk_resampling.ipynb
Satheeshbm/Classification
49f4d751d99ffec15db52381c5a057d87e915116
[ "ADSL" ]
null
null
null
29.48548
294
0.422182
[ [ [ "# Credit Risk Resampling Techniques", "_____no_output_____" ] ], [ [ "import warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nfrom pathlib import Path\nfrom collections import Counter\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import balanced_accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom imblearn.metrics import classification_report_imbalanced\nfrom imblearn.over_sampling import RandomOverSampler\nfrom imblearn.over_sampling import SMOTE\nfrom imblearn.under_sampling import ClusterCentroids\nfrom imblearn.combine import SMOTEENN", "_____no_output_____" ] ], [ [ "# Read the CSV into DataFrame", "_____no_output_____" ] ], [ [ "# Load the data\nfile_path = Path('Resources/lending_data.csv')\ndf = pd.read_csv(file_path)\ndf.head()", "_____no_output_____" ], [ "# Binary encoding 'homeowner' on in text\ndf = pd.get_dummies(df, columns=[\"homeowner\"])\ndf.head()", "_____no_output_____" ] ], [ [ "# Split the Data into Training and Testing", "_____no_output_____" ] ], [ [ "# Create our features\nX = df.drop(columns='loan_status')\n\n# Create our target\ny = df['loan_status']", "_____no_output_____" ], [ "X.describe()", "_____no_output_____" ], [ "# Check the balance of our target values\ny.value_counts()", "_____no_output_____" ], [ "# Create X_train, X_test, y_train, y_test\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)\nX_train.shape", "_____no_output_____" ], [ "Counter(y_train)", "_____no_output_____" ] ], [ [ "## Data Pre-Processing\n\nScale the training and testing data using the `StandardScaler` from `sklearn`. Remember that when scaling the data, you only scale the features data (`X_train` and `X_testing`).", "_____no_output_____" ] ], [ [ "# Create the StandardScaler instance\nscaler = StandardScaler()", "_____no_output_____" ], [ "# Fit the Standard Scaler with the training data\n# When fitting scaling functions, only train on the training dataset\nX_scaler = scaler.fit(X_train)", "_____no_output_____" ], [ "# Scale the training and testing data\nX_train_scaled = X_scaler.transform(X_train)\nX_test_scaled = X_scaler.transform(X_test)", "_____no_output_____" ] ], [ [ "# Simple Logistic Regression", "_____no_output_____" ] ], [ [ "model = LogisticRegression(solver='lbfgs', random_state=1)\nmodel.fit(X_train, y_train)\nCounter(y_train)", "_____no_output_____" ], [ "# Calculated the balanced accuracy score\ny_pred = model.predict(X_test_scaled)\nbalanced_accuracy_score(y_test, y_pred)", "_____no_output_____" ], [ "# Display the confusion matrix\nconfusion_matrix(y_test, y_pred)", "_____no_output_____" ], [ "# Print the imbalanced classification report\nprint(classification_report_imbalanced(y_test, y_pred))", " pre rec spe f1 geo iba sup\n\n high_risk 0.85 0.91 0.99 0.88 0.95 0.90 619\n low_risk 1.00 0.99 0.91 1.00 0.95 0.91 18765\n\navg / total 0.99 0.99 0.91 0.99 0.95 0.91 19384\n\n" ] ], [ [ "# Oversampling\n\nIn this section, you will compare two oversampling algorithms to determine which algorithm results in the best performance. You will oversample the data using the naive random oversampling algorithm and the SMOTE algorithm. For each algorithm, be sure to complete the folliowing steps:\n\n1. View the count of the target classes using `Counter` from the collections library. \n3. Use the resampled data to train a logistic regression model.\n3. Calculate the balanced accuracy score from sklearn.metrics.\n4. Print the confusion matrix from sklearn.metrics.\n5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.\n\nNote: Use a random state of 1 for each sampling algorithm to ensure consistency between tests", "_____no_output_____" ], [ "### Naive Random Oversampling", "_____no_output_____" ] ], [ [ "# Resample the training data with the RandomOversampler\nros = RandomOverSampler(random_state=1)\nX_resampled, y_resampled = ros.fit_resample(X_train, y_train)\n# View the count of target classes with Counter\nCounter(y_resampled)", "_____no_output_____" ], [ "X_resampled.head()", "_____no_output_____" ], [ "# Train the Logistic Regression model using the resampled data\nmodel_Random = LogisticRegression(solver='lbfgs', random_state=1)\nmodel_Random.fit(X_resampled, y_resampled)", "_____no_output_____" ], [ "X_resampled.shape", "_____no_output_____" ], [ "# Calculated the balanced accuracy score\ny_pred_nro = model_Random.predict(X_test)\nbalanced_accuracy_score(y_test, y_pred_nro)", "_____no_output_____" ], [ "# Display the confusion matrix\ncm_nro = confusion_matrix(y_test, y_pred_nro)\ncm_nro", "_____no_output_____" ], [ "# Print the imbalanced classification report\nprint(classification_report_imbalanced(y_test, y_pred_nro))", " pre rec spe f1 geo iba sup\n\n high_risk 0.84 0.99 0.99 0.91 0.99 0.99 619\n low_risk 1.00 0.99 0.99 1.00 0.99 0.99 18765\n\navg / total 0.99 0.99 0.99 0.99 0.99 0.99 19384\n\n" ] ], [ [ "### SMOTE Oversampling", "_____no_output_____" ] ], [ [ "# Resample the training data with SMOTE\nX_resampled_smote, y_resampled_smote = SMOTE(random_state=1, sampling_strategy=1.0).fit_resample(\n X_train, y_train)\n# View the count of target classes with Counter\nCounter(y_resampled_smote)", "_____no_output_____" ], [ "X_resampled_smote.head()", "_____no_output_____" ], [ "# Train the Logistic Regression model using the resampled data\nmodel_smote = LogisticRegression(solver='lbfgs', random_state=1)\nmodel_smote.fit(X_resampled_smote, y_resampled_smote)", "_____no_output_____" ], [ "# Calculated the balanced accuracy score\ny_pred_smote = model_smote.predict(X_test)\nbalanced_accuracy_score(y_test, y_pred_smote)", "_____no_output_____" ], [ "# Display the confusion matrix\ncm_smote = confusion_matrix(y_test, y_pred_smote)\ncm_smote", "_____no_output_____" ], [ "# Print the imbalanced classification report\nprint(classification_report_imbalanced(y_test, y_pred_smote))", " pre rec spe f1 geo iba sup\n\n high_risk 0.84 0.99 0.99 0.91 0.99 0.99 619\n low_risk 1.00 0.99 0.99 1.00 0.99 0.99 18765\n\navg / total 0.99 0.99 0.99 0.99 0.99 0.99 19384\n\n" ] ], [ [ "# Undersampling\n\nIn this section, you will test an undersampling algorithm to determine which algorithm results in the best performance compared to the oversampling algorithms above. You will undersample the data using the Cluster Centroids algorithm and complete the folliowing steps:\n\n1. View the count of the target classes using `Counter` from the collections library. \n3. Use the resampled data to train a logistic regression model.\n3. Calculate the balanced accuracy score from sklearn.metrics.\n4. Display the confusion matrix from sklearn.metrics.\n5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.\n\nNote: Use a random state of 1 for each sampling algorithm to ensure consistency between tests", "_____no_output_____" ] ], [ [ "# Resample the data using the ClusterCentroids resampler\ncc = ClusterCentroids(random_state=1)\nX_resampled_cc, y_resampled_cc = cc.fit_resample(X_train, y_train)\n\n# View the count of target classes with Counter\nCounter(y_resampled_cc)", "_____no_output_____" ], [ "# Train the Logistic Regression model using the resampled data\nmodel_cc = LogisticRegression(solver='lbfgs', random_state=1)\nmodel_cc.fit(X_resampled_cc, y_resampled_cc)", "_____no_output_____" ], [ "# Calculate the balanced accuracy score\ny_pred_cc = model_cc.predict(X_test)\ncm_cc = confusion_matrix(y_test, y_pred_cc)", "_____no_output_____" ], [ "# Display the confusion matrix\nbalanced_accuracy_score(y_test, y_pred_cc) ", "_____no_output_____" ], [ "# Print the imbalanced classification report\nprint(classification_report_imbalanced(y_test, y_pred_cc))", " pre rec spe f1 geo iba sup\n\n high_risk 0.84 0.98 0.99 0.91 0.99 0.98 619\n low_risk 1.00 0.99 0.98 1.00 0.99 0.98 18765\n\navg / total 0.99 0.99 0.98 0.99 0.99 0.98 19384\n\n" ] ], [ [ "# Combination (Over and Under) Sampling\n\nIn this section, you will test a combination over- and under-sampling algorithm to determine if the algorithm results in the best performance compared to the other sampling algorithms above. You will resample the data using the SMOTEENN algorithm and complete the folliowing steps:\n\n1. View the count of the target classes using `Counter` from the collections library. \n3. Use the resampled data to train a logistic regression model.\n3. Calculate the balanced accuracy score from sklearn.metrics.\n4. Display the confusion matrix from sklearn.metrics.\n5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.\n\nNote: Use a random state of 1 for each sampling algorithm to ensure consistency between tests", "_____no_output_____" ] ], [ [ "# Resample the training data with SMOTEENN\nsm = SMOTEENN(random_state=1)\nX_resampled_sm, y_resampled_sm = sm.fit_resample(X_train, y_train)\n# View the count of target classes with Counter\nCounter(y_resampled_sm)", "_____no_output_____" ], [ "# Train the Logistic Regression model using the resampled data\nmodel_sm = LogisticRegression(solver='lbfgs', random_state=1)\nmodel_sm.fit(X_resampled_sm, y_resampled_sm)", "_____no_output_____" ], [ "# Calculate the balanced accuracy score\ny_pred_sm = model_sm.predict(X_test)\ncm_sm = confusion_matrix(y_test, y_pred_sm)", "_____no_output_____" ], [ "# Display the confusion matrix\nbalanced_accuracy_score(y_test, y_pred_sm) ", "_____no_output_____" ], [ "# Print the imbalanced classification report\nprint(classification_report_imbalanced(y_test, y_pred_sm))", " pre rec spe f1 geo iba sup\n\n high_risk 0.83 0.99 0.99 0.91 0.99 0.99 619\n low_risk 1.00 0.99 0.99 1.00 0.99 0.99 18765\n\navg / total 0.99 0.99 0.99 0.99 0.99 0.99 19384\n\n" ] ], [ [ "# Final Questions\n\n1. Which model had the best balanced accuracy score?\n\nSMOTE oversampling and Random Naive Oversampling with same balanced accuracy score has the best score and it looks like the best model.\n\n2. Which model had the best recall score?\n\nAlmost all the models have similar recall scores.\n\n3. Which model had the best geometric mean score?\n\nGeometric mean is also very similar to all the models. \n\n\nP.S : Had referred to the instructer on this similarility.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
4a3f1fd15f144d81ff2a7ccdf5aa35ad817bb12a
45,941
ipynb
Jupyter Notebook
Predicting-Survival-Assignment/Predicting-Survival-Titanic-Assignement.ipynb
cammyoung/udemy-ml-deployment
05ee08913d237a53db0aea1334762a066853dc48
[ "MIT" ]
null
null
null
Predicting-Survival-Assignment/Predicting-Survival-Titanic-Assignement.ipynb
cammyoung/udemy-ml-deployment
05ee08913d237a53db0aea1334762a066853dc48
[ "MIT" ]
null
null
null
Predicting-Survival-Assignment/Predicting-Survival-Titanic-Assignement.ipynb
cammyoung/udemy-ml-deployment
05ee08913d237a53db0aea1334762a066853dc48
[ "MIT" ]
null
null
null
47.607254
10,128
0.674648
[ [ [ "## Predicting Survival on the Titanic\n\n### History\nPerhaps one of the most infamous shipwrecks in history, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 people on board. Interestingly, by analysing the probability of survival based on few attributes like gender, age, and social status, we can make very accurate predictions on which passengers would survive. Some groups of people were more likely to survive than others, such as women, children, and the upper-class. Therefore, we can learn about the society priorities and privileges at the time.\n\n### Assignment:\n\nBuild a Machine Learning Pipeline, to engineer the features in the data set and predict who is more likely to Survive the catastrophe.\n\nFollow the Jupyter notebook below, and complete the missing bits of code, to achieve each one of the pipeline steps.", "_____no_output_____" ] ], [ [ "import re\n\n# to handle datasets\nimport pandas as pd\nimport numpy as np\n\n# for visualization\nimport matplotlib.pyplot as plt\n\n# to divide train and test set\nfrom sklearn.model_selection import train_test_split\n\n# feature scaling\nfrom sklearn.preprocessing import StandardScaler\n\n# to build the models\nfrom sklearn.linear_model import LogisticRegression\n\n# to evaluate the models\nfrom sklearn.metrics import accuracy_score, roc_auc_score\n\n# to persist the model and the scaler\nimport joblib\n\n# to visualise al the columns in the dataframe\npd.pandas.set_option('display.max_columns', None)", "_____no_output_____" ] ], [ [ "## Prepare the data set", "_____no_output_____" ] ], [ [ "# load the data - it is available open source and online\n\ndata = pd.read_csv('https://www.openml.org/data/get_csv/16826755/phpMYEkMl')\n\n# display data\ndata.head()", "_____no_output_____" ], [ "# replace interrogation marks by NaN values\n\ndata = data.replace('?', np.nan)", "_____no_output_____" ], [ "# retain only the first cabin if more than\n# 1 are available per passenger\n\ndef get_first_cabin(row):\n try:\n return row.split()[0]\n except:\n return np.nan\n \ndata['cabin'] = data['cabin'].apply(get_first_cabin)", "_____no_output_____" ], [ "# extracts the title (Mr, Ms, etc) from the name variable\n\ndef get_title(passenger):\n line = passenger\n if re.search('Mrs', line):\n return 'Mrs'\n elif re.search('Mr', line):\n return 'Mr'\n elif re.search('Miss', line):\n return 'Miss'\n elif re.search('Master', line):\n return 'Master'\n else:\n return 'Other'\n \ndata['title'] = data['name'].apply(get_title)", "_____no_output_____" ], [ "# cast numerical variables as floats\n\ndata['fare'] = data['fare'].astype('float')\ndata['age'] = data['age'].astype('float')", "_____no_output_____" ], [ "# drop unnecessary variables\n\ndata.drop(labels=['name','ticket', 'boat', 'body','home.dest'], axis=1, inplace=True)\n\n# display data\ndata.head()", "_____no_output_____" ], [ "# save the data set\n\ndata.to_csv('titanic.csv', index=False)", "_____no_output_____" ] ], [ [ "## Data Exploration\n\n### Find numerical and categorical variables", "_____no_output_____" ] ], [ [ "data = pd.read_csv('titanic.csv')", "_____no_output_____" ], [ "target = 'survived'", "_____no_output_____" ], [ "vars_num = [var for var in data.columns if data[var].dtypes != 'O' and data[var].nunique() > 20]\n\nvars_cat = [var for var in data.columns if var not in vars_num and var != 'survived']\n\nprint('Number of numerical variables: {}'.format(len(vars_num)))\nprint('Number of categorical variables: {}'.format(len(vars_cat)))", "Number of numerical variables: 2\nNumber of categorical variables: 7\n" ] ], [ [ "### Find missing values in variables", "_____no_output_____" ] ], [ [ "# first in numerical variables\nvars_num_with_na = [var for var in vars_num if data[var].isnull().sum() > 0]\nprint(vars_num_with_na)", "['age', 'fare']\n" ], [ "# now in categorical variables\nvars_cat_with_na = [var for var in vars_cat if data[var].isnull().sum() > 0]\nprint(vars_cat_with_na)", "['cabin', 'embarked']\n" ] ], [ [ "### Determine cardinality of categorical variables", "_____no_output_____" ] ], [ [ "data[vars_cat].nunique()", "_____no_output_____" ] ], [ [ "### Determine the distribution of numerical variables", "_____no_output_____" ] ], [ [ "for var in vars_num:\n data[var].hist(bins=30)\n plt.ylabel('Number of passengers')\n plt.xlabel(var)\n plt.title(var)\n plt.show()", "_____no_output_____" ] ], [ [ "## Separate data into train and test\n\nUse the code below for reproducibility. Don't change it.", "_____no_output_____" ] ], [ [ "X_train, X_test, y_train, y_test = train_test_split(\n data.drop('survived', axis=1), # predictors\n data['survived'], # target\n test_size=0.2, # percentage of obs in test set\n random_state=0) # seed to ensure reproducibility\n\nX_train.shape, X_test.shape", "_____no_output_____" ] ], [ [ "## Feature Engineering\n\n### Extract only the letter (and drop the number) from the variable Cabin", "_____no_output_____" ] ], [ [ "X_train['cabin'] = X_train['cabin'].str.replace('\\\\d', '')\nX_test['cabin'] = X_test['cabin'].str.replace('\\\\d', '')", "_____no_output_____" ] ], [ [ "### Fill in Missing data in numerical variables:\n\n- Add a binary missing indicator\n- Fill NA in original variable with the median", "_____no_output_____" ] ], [ [ "for var in vars_num_with_na:\n var_med = X_train[var].median()\n\n X_train[var + '_na'] = np.where(X_train[var].isnull(), 1, 0)\n X_test[var + '_na'] = np.where(X_test[var].isnull(), 1, 0)\n\n X_train[var] = X_train[var].fillna(var_med)\n X_test[var] = X_test[var].fillna(var_med)", "_____no_output_____" ] ], [ [ "### Replace Missing data in categorical variables with the string **Missing**", "_____no_output_____" ] ], [ [ "for var in vars_cat_with_na:\n X_train[var] = X_train[var].fillna('Missing')\n X_test[var] = X_test[var].fillna('Missing')", "_____no_output_____" ] ], [ [ "### Remove rare labels in categorical variables\n\n- remove labels present in less than 5 % of the passengers", "_____no_output_____" ] ], [ [ "def find_frequent_labels(df, var, rare_perc):\n \n # function finds the labels that are shared by more than\n # a certain % of the houses in the dataset\n\n df = df.copy()\n\n tmp = df[var].value_counts() / len(df)\n\n return tmp[tmp > rare_perc].index\n\n\nfor var in vars_cat:\n \n # find the frequent categories\n frequent_ls = find_frequent_labels(X_train, var, 0.05)\n print(var)\n print(frequent_ls)\n print()\n \n # replace rare categories by the string \"Rare\"\n X_train[var] = np.where(X_train[var].isin(\n frequent_ls), X_train[var], 'Rare')\n \n X_test[var] = np.where(X_test[var].isin(\n frequent_ls), X_test[var], 'Rare')", "pclass\nInt64Index([3, 1, 2], dtype='int64')\n\nsex\nIndex(['male', 'female'], dtype='object')\n\nsibsp\nInt64Index([0, 1], dtype='int64')\n\nparch\nInt64Index([0, 1, 2], dtype='int64')\n\ncabin\nIndex(['Missing', 'C'], dtype='object')\n\nembarked\nIndex(['S', 'C', 'Q'], dtype='object')\n\ntitle\nIndex(['Mr', 'Miss', 'Mrs'], dtype='object')\n\n" ] ], [ [ "### Perform one hot encoding of categorical variables into k-1 binary variables\n\n- k-1, means that if the variable contains 9 different categories, we create 8 different binary variables\n- Remember to drop the original categorical variable (the one with the strings) after the encoding", "_____no_output_____" ] ], [ [ "X_train = pd.concat([X_train, pd.get_dummies(X_train[vars_cat], drop_first=True)], axis=1).drop(vars_cat, axis=1)", "_____no_output_____" ], [ "X_test = pd.concat([X_test, pd.get_dummies(X_test[vars_cat], drop_first=True)], axis=1).drop(vars_cat, axis=1)", "_____no_output_____" ], [ "for var in X_train.columns:\n if var not in X_test.columns:\n X_test[var] = 0", "_____no_output_____" ] ], [ [ "### Scale the variables\n\n- Use the standard scaler from Scikit-learn", "_____no_output_____" ] ], [ [ "scl = StandardScaler()\nscl.fit(X_train)\nX_train = scl.transform(X_train)\nX_test = scl.transform(X_test)", "_____no_output_____" ] ], [ [ "## Train the Logistic Regression model\n\n- Set the regularization parameter to 0.0005\n- Set the seed to 0", "_____no_output_____" ] ], [ [ "log_model = LogisticRegression(C=0.0005, random_state=0)\nlog_model.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "## Make predictions and evaluate model performance\n\nDetermine:\n- roc-auc\n- accuracy\n\n**Important, remember that to determine the accuracy, you need the outcome 0, 1, referring to survived or not. But to determine the roc-auc you need the probability of survival.**", "_____no_output_____" ] ], [ [ "test_pred = log_model.predict(X_test)\ntest_prob = log_model.predict_proba(X_test)\nprint(roc_auc_score(y_test, test_prob[:, 1]))\nprint(accuracy_score(y_test, test_pred))", "0.7473148148148148\n0.7061068702290076\n" ] ], [ [ "That's it! Well done\n\n**Keep this code safe, as we will use this notebook later on, to build production code, in our next assignement!!**", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a3f21dfffb5cfb30a89f3d1bc6bd466d0013b20
8,521
ipynb
Jupyter Notebook
notebooks/Skew_T/Hodograph.ipynb
helsing97/Unidata-Workshop
61d0fd9e970bb41128333997396448d2f5bedded
[ "MIT" ]
2
2019-11-15T04:12:02.000Z
2020-06-24T23:32:53.000Z
notebooks/Skew_T/Hodograph.ipynb
helsing97/Unidata-Workshop
61d0fd9e970bb41128333997396448d2f5bedded
[ "MIT" ]
null
null
null
notebooks/Skew_T/Hodograph.ipynb
helsing97/Unidata-Workshop
61d0fd9e970bb41128333997396448d2f5bedded
[ "MIT" ]
null
null
null
29.586806
271
0.572116
[ [ [ "<a name=\"top\"></a>\n<div style=\"width:1000 px\">\n\n<div style=\"float:right; width:98 px; height:98px;\">\n<img src=\"https://raw.githubusercontent.com/Unidata/MetPy/master/metpy/plots/_static/unidata_150x150.png\" alt=\"Unidata Logo\" style=\"height: 98px;\">\n</div>\n\n<h1>Hodographs</h1>\n<h3>Unidata Python Workshop</h3>\n\n<div style=\"clear:both\"></div>\n</div>\n\n<hr style=\"height:2px;\">\n\n<div style=\"float:right; width:250 px\"><img src=\"https://unidata.github.io/MetPy/latest/_images/sphx_glr_Advanced_Sounding_001.png\" alt=\"Example Skew-T\" style=\"height: 500px;\"></div>\n\n### Questions\n1. What is a hodograph?\n1. How can MetPy plot hodographs?\n1. How can the style of the hodographs be modified to encode other information?\n\n### Objectives\n1. <a href=\"#upperairdata\">Obtain upper air data</a>\n1. <a href=\"#simpleplot\">Make a simple hodograph</a>\n1. <a href=\"#annotate\">Annotate the hodograph with wind vectors</a>\n1. <a href=\"#continuous\">Color the plot (continuous)</a>\n1. <a href=\"#segmented\">Color the plot (segmented)</a>", "_____no_output_____" ], [ "<a name=\"upperairdata\"></a>\n## Obtain upper air data\n\nJust as we learned in the siphon basics and upper air and skew-T notebook, we need to obtain upperair data to plot. We are going to stick with September 10, 2017 at 00Z for Key West, Fl. If you need a review on obtaining upper air data, please review those lessons.", "_____no_output_____" ] ], [ [ "from datetime import datetime\n\nfrom metpy.units import pandas_dataframe_to_unit_arrays\nfrom siphon.simplewebservice.wyoming import WyomingUpperAir\n\ndf = WyomingUpperAir.request_data(datetime(1998, 10, 4, 0), 'OUN')\ndata = pandas_dataframe_to_unit_arrays(df)", "_____no_output_____" ] ], [ [ "<a href=\"#top\">Top</a>\n<hr style=\"height:2px;\">", "_____no_output_____" ], [ "<a name=\"simpleplot\"></a>\n## Make a Simple Hodograph\n\nThe hodograph is a plot of the wind shear in the sounding. It is constructed by drawing the winds as vectors from the origin and connecting the heads of those vectors. MetPy makes this simple!", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nfrom metpy.plots import Hodograph\n%matplotlib inline", "_____no_output_____" ], [ "fig = plt.figure(figsize=(6, 6))\nax = fig.add_subplot(1, 1, 1)\n\nh = Hodograph(ax, component_range=60.)\nh.add_grid(increment=20)\nh.plot(data['u_wind'], data['v_wind'], color='tab:red')", "_____no_output_____" ] ], [ [ "It's relatively common to not want or need to display the entire sounding on a hodograph. Let's limit these data to the lowest 10km and plot it again.", "_____no_output_____" ] ], [ [ "import metpy.calc as mpcalc\nfrom metpy.units import units\n_, u_trimmed, v_trimmed, speed_trimmed, height_trimmed = mpcalc.get_layer(data['pressure'], data['u_wind'],\n data['v_wind'], data['speed'], data['height'],\n heights=data['height'], depth=10 * units.km)", "_____no_output_____" ], [ "fig = plt.figure(figsize=(6, 6))\nax = fig.add_subplot(1, 1, 1)\n\nh = Hodograph(ax, component_range=30.)\nh.add_grid(increment=10)\nh.plot(u_trimmed, v_trimmed, color='tab:red')", "_____no_output_____" ] ], [ [ "<a name=\"annotate\"></a>\n## Annotate the hodograph with wind vectors\n\nIt may be useful when introducing hodographs to actually show the wind vectors on the plot. The `wind_vectors` method does exactly this. It is often necessary to decimate the wind vectors for the plot to be intelligible.", "_____no_output_____" ] ], [ [ "h.wind_vectors(u_trimmed[::3], v_trimmed[::3])\nfig", "_____no_output_____" ] ], [ [ "We can also set the limits to be asymmetric to beter utilize the plot space.", "_____no_output_____" ] ], [ [ "ax.set_xlim(-10, 30)\nax.set_ylim(-10, 20)\nfig", "_____no_output_____" ] ], [ [ "<a name=\"continuous\"></a>\n## Color the plot (continuous)\n\nWe can color the line on the hodograph by another variable as well. In the simplest case it will be \"continuously\" colored, changing with the value of the variable such as windspeed.", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(6, 6))\nax = fig.add_subplot(1, 1, 1)\n\nh = Hodograph(ax, component_range=30.)\nh.add_grid(increment=10)\nh.plot_colormapped(u_trimmed, v_trimmed, speed_trimmed)", "_____no_output_____" ], [ "from metpy.plots import colortables\nimport numpy as np\n\nfig = plt.figure(figsize=(6, 6))\nax = fig.add_subplot(1, 1, 1)\n\nnorm, cmap = colortables.get_with_range('Carbone42', np.min(speed_trimmed), np.max(speed_trimmed))\n\nh = Hodograph(ax, component_range=30.)\nh.add_grid(increment=10)\nh.plot_colormapped(u_trimmed, v_trimmed, speed_trimmed, cmap=cmap, norm=norm)", "_____no_output_____" ] ], [ [ "<a name=\"segmented\"></a>\n## Color the plot (segmented)\n\nIt may be useful when introducing hodographs to actually show the wind vectors on the plot. The `wind_vectors` method does exactly this. It is often necessary to decimate the wind vectors for the plot to be intelligible.", "_____no_output_____" ], [ "We can also color the hodograph based on another variable - either continuously or in a segmented way. Here we'll color the hodograph by height above ground level.", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(6, 6))\nax = fig.add_subplot(1, 1, 1)\n\nboundaries = np.array([0, 1, 3, 5, 8]) * units.km\ncolors = ['tab:red', 'tab:green', 'tab:blue', 'tab:olive']\n\n# Since we want to do things in terms of AGL, we need to make AGL heights\nagl = height_trimmed - height_trimmed[0]\n\nh = Hodograph(ax, component_range=30.)\nh.add_grid(increment=10)\nh.plot_colormapped(u_trimmed, v_trimmed, agl, bounds=boundaries, colors=colors)", "_____no_output_____" ] ], [ [ "<a href=\"#top\">Top</a>\n<hr style=\"height:2px;\">", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
4a3f45a10df589513d0e88c2fe99a98f08c36774
608,771
ipynb
Jupyter Notebook
Week3-NetworkModeling/CommunityDetection.ipynb
ycleong/brain-networks-course
bf7fab820c8e7772df4df93723e0e753daa6fa3b
[ "MIT" ]
null
null
null
Week3-NetworkModeling/CommunityDetection.ipynb
ycleong/brain-networks-course
bf7fab820c8e7772df4df93723e0e753daa6fa3b
[ "MIT" ]
null
null
null
Week3-NetworkModeling/CommunityDetection.ipynb
ycleong/brain-networks-course
bf7fab820c8e7772df4df93723e0e753daa6fa3b
[ "MIT" ]
1
2021-03-29T20:56:26.000Z
2021-03-29T20:56:26.000Z
480.861769
163,388
0.93032
[ [ [ "## Community Detection\n\nIn this notebook we will walk through a number of methods for community detection using a simple example dataset.\n\n\n", "_____no_output_____" ] ], [ [ "import numpy,pandas\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport sys\nimport operator \nimport itertools\n\nsys.path.append('../utils')\nfrom utils import algorithm_u\nfrom utils import module_degree_zscore,participation_coefficient\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "### Example graph\n\nFirst let's create a simple graph that has two communities, each of which is fully connected, with one node from each community connected to a node in the other community.", "_____no_output_____" ] ], [ [ "G = nx.Graph()\n# nodes 1-4 are members of one community, and 5-8 are members of another, with 1 and 5 connected as well\nedges=[(1,2),(1,3),(1,4),(1,5),(5,6),(5,7),(5,8),(6,7),(6,8),(7,8),(2,3),(2,4),(3,4)]\nG.add_edges_from(edges)\n\nnx.draw_spring(G,with_labels=True,node_color='yellow')", "_____no_output_____" ] ], [ [ "### Girvan-Newman method\n\nThe Girvan-Newman method is a *divisive* clustering method, meaning that it starts with the full graph and tries to find the best way to divide it into some number of clusters by removing particular edges.\n\nThe algorithm was defined by [Girvan & Newman (2002)](http://www.pnas.org/content/99/12/7821) as follows:\n\n1. Calculate the betweenness for all edges in the network.\n2. Remove the edge with the highest betweenness.\n3. Recalculate betweennesses for all edges affected by the\nremoval.\n4. Repeat from step 2 until no edges remain.\n\nLet's implement this for the example dataset, finding two clusters.", "_____no_output_____" ] ], [ [ "n_clusters=len([i for i in nx.connected_components(G)])\nG_tmp=G.copy() # make a copy of the graph to work with\nwhile n_clusters==1:\n # step 1: compute edge betweenness\n eb=nx.edge_betweenness(G_tmp)\n \n # step 2: remove the edge with highest betweeness\n # find the edge with the largest value of edge betweenness\n ebmax = max(eb.items(), key=operator.itemgetter(1))[0]\n # remove it from the graph\n G_tmp.remove_edges_from([ebmax])\n print('removing edge:',ebmax)\n \n # compute the number of connected components to see if we have\n # induced new clusters, and continue looping if not\n n_clusters=len([i for i in nx.connected_components(G_tmp)])\n \nprint('found two clusters:')\nprint([list(i.nodes) for i in nx.connected_component_subgraphs(G_tmp)])", "removing edge: (1, 5)\nfound two clusters:\n[[1, 2, 3, 4], [5, 6, 7, 8]]\n" ] ], [ [ "### Modularity\n\nOne of the most commonly used set of methods for community detection rely upon the concept of *modularity*. Here we will walk through the computation of modularity for a simple graph.\n\nHere is one expression for modularity, from [Fortunato, 2010](https://arxiv.org/pdf/0906.0612.pdf):\n\n$$\nQ = \\frac{1}{2m}\\sum_{ij}(A_{ij} - P_{ij})\\delta(C_i,C_j)\n$$\n\nwhere $m$ is the total number of edges in the graph, $A$ is the adjacency matrix, and $P_{ij}$ is the expected number of edges between i and j according to the apporpriate null model, and $\\delta$ is a matrix that denotes whether vertices i and j are within the same community:\n\n$$\n\\delta(C_i,C_j) = \\left\\{\n \\begin{array}{ll}\n 1\\ if\\ C_i=C_j\\\\\n 0\\ if\\ C_i \\neq C_j\\\\\n \\end{array}\\right.\n$$", "_____no_output_____" ], [ "To compute modularity for our example graph (which looks to be fairly modular), we first need the adjacency matrix.", "_____no_output_____" ] ], [ [ "A = nx.to_numpy_array(G)\nprint(A)", "[[0. 1. 1. 1. 1. 0. 0. 0.]\n [1. 0. 1. 1. 0. 0. 0. 0.]\n [1. 1. 0. 1. 0. 0. 0. 0.]\n [1. 1. 1. 0. 0. 0. 0. 0.]\n [1. 0. 0. 0. 0. 1. 1. 1.]\n [0. 0. 0. 0. 1. 0. 1. 1.]\n [0. 0. 0. 0. 1. 1. 0. 1.]\n [0. 0. 0. 0. 1. 1. 1. 0.]]\n" ] ], [ [ "Next we need to generate the $\\delta$ matrix denoting whether each pair of vertices is a member of the same community.", "_____no_output_____" ] ], [ [ "partition = [1,1,1,1,2,2,2,2]\ndelta = numpy.zeros((len(partition),len(partition)))\nfor i in range(len(partition)):\n for j in range(len(partition)):\n delta[i,j]=int(partition[i]==partition[j])\n \nprint(delta)", "[[1. 1. 1. 1. 0. 0. 0. 0.]\n [1. 1. 1. 1. 0. 0. 0. 0.]\n [1. 1. 1. 1. 0. 0. 0. 0.]\n [1. 1. 1. 1. 0. 0. 0. 0.]\n [0. 0. 0. 0. 1. 1. 1. 1.]\n [0. 0. 0. 0. 1. 1. 1. 1.]\n [0. 0. 0. 0. 1. 1. 1. 1.]\n [0. 0. 0. 0. 1. 1. 1. 1.]]\n" ] ], [ [ "The final thing we need is the expected edge frequency from the null model. In general we want the null model to match the actual graph as closely as possible, except for the clustering. It is common to use a null model in which the degree sequence (i.e. the values of degrees for all nodes) is identical to the real graph; this is a more stringent null model than simply equating the degree distribution. This null model can be written as (Fortunato, 2010):\n\n$$\nQ = \\frac{1}{2m}\\sum_{ij}\\bigg(A_{ij} - \\frac{k_i k_j}{2m}\\bigg)\\delta(C_i,C_j)\n$$\n\nwhere $k_i$ is the degree of vertex $i$. Note that this null model will not necessarily give an identical degree sequence on any particular realization, but should be the same on average.", "_____no_output_____" ] ], [ [ "m = len(G.edges)\nk = [G.degree[i] for i in G.nodes] # degree values\nQ=0\nfor i in range(len(k)):\n for j in range(len(k)):\n Q += (A[i,j] - (k[i]*k[j])/(2*m))*delta[i,j]\nQ = Q/(2*m)\n\nprint(Q)", "0.42307692307692296\n" ] ], [ [ "We can compare our answer to the one given by the built-in modularity function in NetworkX:", "_____no_output_____" ] ], [ [ "assert Q == nx.algorithms.community.quality.modularity(G,[{1,2,3,4},{5,6,7,8}])", "_____no_output_____" ] ], [ [ "Now let's examine how modularity varies with the partition. In this case, we can fairly easily compute all 128 possible partitions of the 8 nodes and compute modularity for each. In principle we should see that the modularity value is highest for the true partition.", "_____no_output_____" ] ], [ [ "Qvals=numpy.zeros(128)\npartitions=[]\n\n# loop through all possible partitions of edges into two communities:\nfor i,p in enumerate(algorithm_u([1,2,3,4,5,6,7,8],2)):\n Qvals[i] = nx.algorithms.community.quality.modularity(G,p)\n partitions.append(p)\n", "_____no_output_____" ], [ "plt.plot(numpy.sort(Qvals))\nprint('maximum Q:',numpy.max(Qvals))\nprint('best partition:',partitions[numpy.argsort(Qvals)[-1:][0]])", "maximum Q: 0.42307692307692296\nbest partition: [[1, 2, 3, 4], [5, 6, 7, 8]]\n" ] ], [ [ "### Modularity optimization\n\nIn general, it's not possible to perform exhaustive computation of modularity for all possible partitions (since the number of possible partitions grows exponentially with the size of the graph), so a number of researchers have developed approximate methods that perform well in finding the partition with the maximum modularity value.\n\n#### Greedy optimization\n\nOne approach (proposed initially by [Newman, 2004](https://pdfs.semanticscholar.org/29d4/dfae2807a67a2c66c720b4985cb599c4e245.pdf)) is to perform an *agglomerative* clustering using a [\"greedy\" algorithm ](https://en.wikipedia.org/wiki/Greedy_algorithm)- that is, an algorithm that makes the best possible choice at each point in the process, akin to climbing a hill by going the steepest upward direction at every point.\n\nIn Newman's greedy method, we start with each vertex in its own partition. We then find which combination of partitions would increase modularity the most, and combine those into one.\n\nThe implementation here would be far too inefficient to use with real data, but should help make clear how the algorithm works.", "_____no_output_____" ] ], [ [ "# create a function to compute modularity more easily\ndef modularity(G,partition):\n A = nx.to_numpy_array(G)\n m = len(G.edges)\n delta = numpy.zeros((len(partition),len(partition)))\n for i in range(len(partition)):\n for j in range(len(partition)):\n delta[i,j]=int(partition[i]==partition[j])\n \n k = [G.degree[i] for i in G.nodes] # degree values\n Q=0\n for i in range(len(k)):\n for j in range(len(k)):\n Q += (A[i,j] - (k[i]*k[j])/(2*m))*delta[i,j]\n Q = Q/(2*m)\n return(Q)\n\nQvals=[-numpy.inf]\nnotbest=True\npartition=numpy.array([1,2,3,4,5,6,7,8]) # initially assign all to the same\n\nwhile notbest:\n unique_partitions=numpy.unique(partition)\n print('unique partitions:',unique_partitions)\n # loop through all combinations of unique partitions\n modvals={}\n for i in range(len(unique_partitions)):\n for j in range(i+1,len(unique_partitions)):\n if i==j:\n continue\n tmp_part=numpy.array(partition)\n tmp_part[tmp_part==unique_partitions[i]]=unique_partitions[j]\n \n modvals[(unique_partitions[i],unique_partitions[j])]=modularity(G,tmp_part)\n modmax = max(modvals.items(), key=operator.itemgetter(1))[0]\n \n # this method assumes that Q increases monotonically to its maximum\n if modvals[modmax]<numpy.max(numpy.array(Qvals)):\n print('breaking: found best Q value!')\n print(partition)\n notbest=False\n else:\n print('collapsing:',modmax,modvals[modmax])\n partition[partition==modmax[0]]=modmax[1]\n Qvals.append(modvals[modmax])\n", "unique partitions: [1 2 3 4 5 6 7 8]\ncollapsing: (2, 3) -0.07692307692307693\nunique partitions: [1 3 4 5 6 7 8]\ncollapsing: (3, 4) 0.02366863905325443\nunique partitions: [1 4 5 6 7 8]\ncollapsing: (1, 4) 0.14792899408284027\nunique partitions: [4 5 6 7 8]\ncollapsing: (6, 7) 0.19822485207100599\nunique partitions: [4 5 7 8]\ncollapsing: (7, 8) 0.2988165680473374\nunique partitions: [4 5 8]\ncollapsing: (5, 8) 0.42307692307692296\nunique partitions: [4 8]\nbreaking: found best Q value!\n[4 4 4 4 8 8 8 8]\n" ] ], [ [ "### Spectral clustering\n\nAnother common method for community detection is spectral clustering, which uses the eigenvectors of matrices that describe the graph. While we could work with the adjacency matrix, it is more common to use the [Laplacian matrix](https://samidavies.wordpress.com/2016/09/20/whats-up-with-the-graph-laplacian/), which you can think of as describing the flow of some quantity away from any particular node in the graph.\n\nThe Laplacian L is defined as:\n\n$$\nL = D - A\n$$ \n\nwhere $A$ is the adjacency matrix, and $D$ is a diagonal matrix where each entry is the degree of that particular node. Here it is for our example graph:", "_____no_output_____" ] ], [ [ "D = numpy.zeros(A.shape)\nD[numpy.diag_indices_from(D)]=k\nL = D - A\nprint(L)", "[[ 4. -1. -1. -1. -1. 0. 0. 0.]\n [-1. 3. -1. -1. 0. 0. 0. 0.]\n [-1. -1. 3. -1. 0. 0. 0. 0.]\n [-1. -1. -1. 3. 0. 0. 0. 0.]\n [-1. 0. 0. 0. 4. -1. -1. -1.]\n [ 0. 0. 0. 0. -1. 3. -1. -1.]\n [ 0. 0. 0. 0. -1. -1. 3. -1.]\n [ 0. 0. 0. 0. -1. -1. -1. 3.]]\n" ] ], [ [ "In general, it's more useful to work with the normalized Laplacian, which normalizes by degree. We can compute this easily using linear algebra:\n\n$$\nLn = D^{-\\frac{1}{2}}LD^{-\\frac{1}{2}}\n$$", "_____no_output_____" ] ], [ [ "Ln = numpy.linalg.inv(numpy.sqrt(D)).dot(L).dot(numpy.linalg.inv(numpy.sqrt(D)))\nprint(Ln)", "[[ 1. -0.28867513 -0.28867513 -0.28867513 -0.25 0.\n 0. 0. ]\n [-0.28867513 1. -0.33333333 -0.33333333 0. 0.\n 0. 0. ]\n [-0.28867513 -0.33333333 1. -0.33333333 0. 0.\n 0. 0. ]\n [-0.28867513 -0.33333333 -0.33333333 1. 0. 0.\n 0. 0. ]\n [-0.25 0. 0. 0. 1. -0.28867513\n -0.28867513 -0.28867513]\n [ 0. 0. 0. 0. -0.28867513 1.\n -0.33333333 -0.33333333]\n [ 0. 0. 0. 0. -0.28867513 -0.33333333\n 1. -0.33333333]\n [ 0. 0. 0. 0. -0.28867513 -0.33333333\n -0.33333333 1. ]]\n" ], [ "eig = numpy.linalg.eig(Ln)\ndef plot_eig(eig):\n plt.figure(figsize=(8,8))\n plt.imshow(eig[1]) # Get locations and labels\n _=plt.xticks([i for i in range(len(eig[0]))], ['%0.2f'%i for i in eig[0]])\n plt.xlabel('eigenvalue')\n plt.ylabel(\"vertices\")\n plt.title('eigenvectors')\n plt.tight_layout()\n \nplot_eig(eig)", "_____no_output_____" ] ], [ [ "Note that there is a single zero eigenvalue, which represents the fact that there is a single graph component. Let's see what would happen if we were to add another component:", "_____no_output_____" ] ], [ [ "G_bigger = G.copy()\nG_bigger.add_edges_from([(9,10),(10,11),(10,12)])\nnx.draw_spring(G_bigger)", "_____no_output_____" ] ], [ [ "Now let's get the Laplacian matrix (this time using the built-in NetworkX function) and compute its eigenvalues.", "_____no_output_____" ] ], [ [ "Ln_bigger = nx.normalized_laplacian_matrix(G_bigger).todense()\neig_bigger=numpy.linalg.eig(Ln_bigger)\neig_bigger[0]", "_____no_output_____" ] ], [ [ "Here you can see that there are two zero-valued eigenvalues, reflecting the fact that there are two components.\n\nNow look back at the second column in the eigenvector matrix above, corresponding to the second largest eigenvalue. Let's draw the graph and color the nodes according to the values of this second smallest eigenvalue:", "_____no_output_____" ] ], [ [ "nx.draw_spring(G,node_color=eig[1][:,1],cmap='viridis')", "_____no_output_____" ] ], [ [ "What this shows is that the eigenvector corresponding to the smallest nonzero eigenvalue of the Laplacian matrix divides the graph by its major communities.\n\nSpectral clustering methods take this approach further by treating each vertex in a metric space defined by the eigenvectors, and then using these to perform various clustering operations (e.g., k-means clustering).", "_____no_output_____" ], [ "### Infomap\n\nThe [infomap algorithm](http://www.mapequation.org/code.html) uses a flow-based model along with concepts from information theory to identify communities in the data. It is based on the idea of a random walk across the network; the fundamental concept is that one can describe a random walk in terms of traversal across communities rather than individual nodes, and an accurate community partition should lead to a compact description of the network.\n\nLet's first simulate a random walk across our example network.", "_____no_output_____" ] ], [ [ "import random\n# length of random walk\nwalkLength = 1000\nn=1 # start node\n\nedges_visited = []\n\nfor k in range(walkLength):\n e=[i[1] for i in list(G.edges(n))]\n random.shuffle(e)\n edges_visited.append((n,e[0]))\n n=e[0] \n\n", "_____no_output_____" ] ], [ [ "Infomap uses a measure of the relative proportion of within-community versus between-community walks (though it does this using an information theoretic framework). We can get a simple idea of how this works by simply looking at how often the random walker switches between communities; the best partition should be the one that results in the smallest number of steps between communities. Let's create a function that can take a random walk and a community partition and tell us the proportion of within-community steps. We can then apply this to all possible partitions of our example graph, in order to see if the true partition indeed results in the greatest proportion of within-community steps.", "_____no_output_____" ] ], [ [ "def mean_walktype(edges,partition_list):\n \"\"\"\n compute the proportion of within-community steps in a random walk\n edges is a list of tuples referring to edges in a random walk\n partition_list is a list of lists, as returned by algorithm_u\n \"\"\"\n # turn partition_list into a partition index\n partition=numpy.zeros(len(list(itertools.chain.from_iterable(partition_list))))\n for i in partition_list[1]:\n partition[i-1]=1\n # create the delta function for the partition\n delta = numpy.zeros((len(partition),len(partition)))\n for i in range(len(partition)):\n for j in range(len(partition)):\n delta[i,j]=int(partition[i]==partition[j])\n \n # create the list of walk types using the delta array\n walktype=[] # 1 for within, 0 for between\n for i in edges:\n walktype.append(delta[i[0]-1,i[1]-1])\n\n return(numpy.mean(walktype))\n \nmeanvals=numpy.zeros(128)\npartitions=[]\n# loop through all possible partitions into two communities:\nfor i,p in enumerate(algorithm_u([1,2,3,4,5,6,7,8],2)):\n\n meanvals[i] = mean_walktype(edges_visited,p)\n partitions.append(p)\n", "_____no_output_____" ] ], [ [ "Confirm that the true partition has the maximum probability of within-community steps", "_____no_output_____" ] ], [ [ "partitions[numpy.argmax(meanvals)]", "_____no_output_____" ], [ "mv=numpy.sort(meanvals)\n\nplt.plot(mv)\n# put a line for the value of the true partition\ntrue_p=mean_walktype(edges_visited,[{1,2,3,4},{5,6,7,8}])\nplt.plot([0,len(meanvals)],[true_p,true_p],'r',alpha=0.5,linestyle='dotted')", "_____no_output_____" ] ], [ [ "### Network hubs\n\nWe know that in any group there are some individuals who are better connected than others, and this was evident from the long-tailed degree distrbution in the Facebook data. In network science, such high-degree nodes are often referred to as \"hubs\". Given our characterization of community structure, we can further differentiate between different types of hubs. Some hubs are both highly connected within their own module and to nodes in other modules, which we refer to as *connector hubs*. Other hubs are highly connected, but primarily to other nodes within their own module, which we refer to as *provincial hubs*. These different types of hubs play different roles in network communication, and we will encounter them repeatedly as we discuss network neuroscience research. \n\nA general approach to categorizing different types of hubs was presented by [Guimera and Amaral (2005)](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2175124/), known as \"cartographic analysis\". This involves computing two node-level measures:\n\n- within-module degree Z-score: The number of edges between a node and other nodes within its module, Z-scored across all members of that module. \n\n- participation coefficient: A measure of the squared proportion of edges that are with nodes outside one's own module.\n\n$$P_i = 1 - \\sum_{s=1}^{N_M} \\left ( \\frac{k_{is}}{k_i} \\right )^2$$\n\n$k_{is}$ is the number of edges that fall within one's module (i.e. module degree), and $k_i$ is the total number of edges (i.e. degree).\n\nLet's compute these for the example dataset, using functions defined in utils/utils.py\n", "_____no_output_____" ] ], [ [ "mdzs=module_degree_zscore(G, partition)\npc=participation_coefficient(G, partition)", "_____no_output_____" ] ], [ [ "Note that in our example network, all nodes are fully connected within their own module, which means that their module degree is all equal and thus the module degree Z-score is zero for nodes.", "_____no_output_____" ] ], [ [ "mdzs", "_____no_output_____" ] ], [ [ "However, if we look at the participation coefficient we will see that most nodes have a value of zero (since they are only connected to others from their own module), whereas the two nodes that connect the modules have a higher participation coefficient.", "_____no_output_____" ] ], [ [ "pc\nnx.draw_spring(G,node_color=pc,cmap='viridis')\n", "_____no_output_____" ] ], [ [ "Cartographic analysis generally only works on large networks, since the Z-scoring of module degree requires a relatively large number of values to be computed effectively. Let's generate a large modular network to see this. \n\nTo generate such a network, we first create each module using the Barabasi-Albert model, and then we connect the modules by preferentially connecting high-degree nodes across modules.\n\n", "_____no_output_____" ] ], [ [ "nmods=3\nmodsize=100\nnedges = 20 # for B-A generator\nA_full = numpy.zeros((modsize*nmods,modsize*nmods))\npartition=numpy.zeros(A_full.shape[0])\nfor m in range(nmods):\n Gtmp=nx.barabasi_albert_graph(modsize,nedges)\n partition[m*modsize:(m+1)*modsize]=m\n A_full[m*modsize:(m+1)*modsize,m*modsize:(m+1)*modsize]=nx.adjacency_matrix(Gtmp).todense()\n", "_____no_output_____" ] ], [ [ "If we look at the adjacency matrix so far, we will see that there are no connections between modules, and plotting using spring-embedding shows three disconnected components:", "_____no_output_____" ] ], [ [ "plt.imshow(A_full)\nG_mod=nx.from_numpy_array(A_full)\nmodularity(G_mod,partition)\nplt.figure()\nnx.draw_spring(G_mod,node_size=5,alpha=.5)", "_____no_output_____" ] ], [ [ "Now let's connect the modules using a second level of preferential attachment - that is, nodes that have higher degree within their own module are also more likely to be connected to another module. First let's confirm that the degree distribution is indeed long-tailed as we would expect:", "_____no_output_____" ] ], [ [ "degree = numpy.array([G_mod.degree[i] for i in G_mod.nodes])\n_=plt.hist(degree,40)", "_____no_output_____" ] ], [ [ "Now we add edges connecting the high-degree nodes:", "_____no_output_____" ] ], [ [ "edgeidx=numpy.argsort(degree)[::-1]\nn_bw_edges=40\np_edge=0.6\n\n# randomly choose some of the high-degree nodes to be connector hubs\nconnectorhubs=edgeidx[:n_bw_edges][numpy.random.rand(n_bw_edges)<p_edge]\n\nfor i in itertools.combinations(connectorhubs,2):\n if not i in G_mod.edges:\n G_mod.add_edge(i[0],i[1])", "_____no_output_____" ], [ "mdzs=module_degree_zscore(G_mod, partition)\npc=participation_coefficient(G_mod, partition)\nprint(modularity(G_mod,partition))\n\nnx.draw_spring(G_mod,alpha=.7,node_color=mdzs,cmap='viridis',node_size=pc*800)\n", "0.6202972407346107\n" ] ], [ [ "Now let's create the cartographic profile for the network.", "_____no_output_____" ] ], [ [ "# put everything into a data frame\ncartography=pandas.DataFrame(numpy.hstack((pc[:,numpy.newaxis],mdzs[:,numpy.newaxis])),\n columns=['PC','MDZS'],\n index=G_mod.nodes)\ncartography\nplt.figure(figsize=(12,8))\nplt.scatter(cartography.PC,cartography.MDZS)\nax=plt.gca()\n\n# print names for certain hubs\nfor i, txt in enumerate(cartography.index):\n if cartography.MDZS[i]>4 or (cartography.MDZS[i]>2.5 and cartography.PC[i]>0.75):\n ax.annotate(txt, (cartography.PC[i], cartography.MDZS[i]))\n \n# add lines and anntation for Guimera/Amaral regions\nplt.axvline(0.75, color='r',linestyle='dotted')\nplt.axvline(0.3, color='r',linestyle='dotted')\nplt.axhline(2.5, color='r',linestyle='dotted')\nplt.xlabel('Participation coefficient')\nplt.ylabel('Module degree z-score')\nymax=9\nax.fill_between([0,0.3], [2.5,2.5],[ymax,ymax], facecolor='green', alpha=0.25)\nax.fill_between([0.3,0.75], [2.5,2.5],[ymax,ymax], facecolor='blue', alpha=0.25)\nax.fill_between([0.75,1.], [2.5,2.5],[ymax,ymax], facecolor='red', alpha=0.25)\nplt.annotate('Provincial hubs',[0.1,ymax+0.1])\nplt.annotate('Connector hubs',[0.45,ymax+0.1])\nplt.annotate('Kinless hubs',[0.825,ymax+0.1])\n\ncartography['hubtype']=0\ncartography['hubtype'][(cartography.MDZS>2)&(cartography.PC<=0.3)]=1\ncartography['hubtype'][(cartography.MDZS>2)&(cartography.PC>0.3)]=2\n", "/Users/poldrack/anaconda3/envs/py3/lib/python3.6/site-packages/ipykernel_launcher.py:30: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n/Users/poldrack/anaconda3/envs/py3/lib/python3.6/site-packages/ipykernel_launcher.py:31: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n" ], [ "plt.figure(figsize=(8,8))\npl=nx.spring_layout(G_mod)\nnodelists={}\nnx.draw_networkx_edges(G_mod,pos=pl,alpha=0.1)\ncolors=['black','blue','yellow']\nalphas=[0.3,0.9,0.9]\nfor t in range(3):\n nodelists[t]=[i for i in range(cartography.shape[0]) if cartography['hubtype'][i]==t]\n\n nx.draw_networkx_nodes(G_mod,pos=pl,node_size=[(G_mod.degree(i)/6)**2 for i in nodelists[t]],\n nodelist=nodelists[t],\n node_color=colors[t],alpha=alphas[t])\n\nplt.axis('off')\nplt.tight_layout()\n", "_____no_output_____" ] ], [ [ "### Rich clubs\n\nIn many real-world networks (including brains) there is a subset of high-degree nodes that are preferentially connected to one another as well, which is referred to as a *rich club* ([van den Heuvel & Sporns, 2011](http://www.jneurosci.org/content/31/44/15775)). The presence of a rich club can be quantified using the rich club coefficent $\\phi$, which is computed as follows:\n\n$$\n\\phi(k) = \\frac{2E_{>k}}{N_{>k}(N_{>k} - 1)}\n$$\n\nThis is the ratio of edges between nodes with degree greater than k to the number of possible edges between those nodes. In general we want normalize this by comparing the observed value to what one expects on the basis of a matched random network (null model):\n\n$$\n\\phi_{norm}(k) = \\frac{\\phi(k)}{\\phi_{random}(k)}\n$$\n\nWe compute this for each level of k and then examine the distribution to see whether it exceeds one. In order to assess the variability across multiple instantiations of the null model, we run it repeatedly to see the distribution of rcc values (this will take a few minutes):", "_____no_output_____" ] ], [ [ "def get_rcc(G,maxtries=10):\n\n good_rcc=False\n tries=0\n while not good_rcc:\n try:\n rcc=nx.rich_club_coefficient(G_mod,normalized=True,Q=10)\n good_rcc=True\n except ZeroDivisionError:\n tries+=1\n if tries>=maxtries:\n Exception('Too many tries!')\n # return a vector rather than a dict\n idx=numpy.sort(list(rcc.keys()))\n \n return(numpy.array([rcc[i] for i in idx]))\n\nnsims=100\nrccdata=numpy.zeros((max(degree),nsims))*numpy.nan\nfor s in range(nsims):\n tmp=get_rcc(G_mod)\n rccdata[:max(degree),s]=tmp[:max(degree)]\n\n", "_____no_output_____" ], [ "minrcc=numpy.min(rccdata,1)\np=plt.plot(rccdata)\nplt.xlabel('degree')\nplt.ylabel('rich club coefficient')\nrcc_thresh=1.25\nplt.plot([0,max(degree)],[rcc_thresh,rcc_thresh])\nmindegree=numpy.where(minrcc>rcc_thresh)[0][0]\nprint('Minimum degree with all RCC> %s:'%rcc_thresh,mindegree)\nprint('Density:',numpy.mean(degree>mindegree))", "Minimum degree with all RCC> 1.25: 50\nDensity: 0.1\n" ] ], [ [ "Plotting the RCC values across simulations shows that the RCC starts to be consistently above 1 around a degree of 40, and exceeds our arbitrary threshold of 1.25 for degrees greater than 59. Let's visualize the network highlighting those vertices:", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(8,8))\nnx.draw_networkx_edges(G_mod,pos=pl,alpha=0.1)\n\nnx.draw_networkx_nodes(G_mod,pos=pl,node_size=[(G_mod.degree(i)/6)**2 for i in G_mod.nodes],\n node_color=(degree>mindegree),alpha=0.9)\n\nplt.axis('off')\nplt.tight_layout()\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
4a3f4ed09275a7d8bb634fab064aac36b856ceb6
129,444
ipynb
Jupyter Notebook
Regression/Simple Regression/Models/Simple_Regression_M1.ipynb
srijan-singh/machine-learning
2f1e0d10e38f4b513650d0a39a7b66e5ad48933a
[ "MIT" ]
null
null
null
Regression/Simple Regression/Models/Simple_Regression_M1.ipynb
srijan-singh/machine-learning
2f1e0d10e38f4b513650d0a39a7b66e5ad48933a
[ "MIT" ]
null
null
null
Regression/Simple Regression/Models/Simple_Regression_M1.ipynb
srijan-singh/machine-learning
2f1e0d10e38f4b513650d0a39a7b66e5ad48933a
[ "MIT" ]
null
null
null
137.122881
19,606
0.82482
[ [ [ "<a href=\"https://colab.research.google.com/github/srijan-singh/machine-learning/blob/main/Regression/Simple%20Regression/Models/Simple_Regression_M1.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "!pip install -U scikit-learn", "Requirement already up-to-date: scikit-learn in /usr/local/lib/python3.6/dist-packages (0.24.0)\nRequirement already satisfied, skipping upgrade: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn) (1.0.0)\nRequirement already satisfied, skipping upgrade: numpy>=1.13.3 in /usr/local/lib/python3.6/dist-packages (from scikit-learn) (1.19.4)\nRequirement already satisfied, skipping upgrade: scipy>=0.19.1 in /usr/local/lib/python3.6/dist-packages (from scikit-learn) (1.5.4)\nRequirement already satisfied, skipping upgrade: threadpoolctl>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from scikit-learn) (2.1.0)\n" ], [ "import matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport pylab as pl\r\nimport numpy as np\r\n%matplotlib inline", "_____no_output_____" ], [ "!wget -O FuelConsumption.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/FuelConsumptionCo2.csv", "--2021-01-10 08:31:57-- https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/FuelConsumptionCo2.csv\nResolving s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)... 67.228.254.196\nConnecting to s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)|67.228.254.196|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 72629 (71K) [text/csv]\nSaving to: ‘FuelConsumption.csv’\n\nFuelConsumption.csv 100%[===================>] 70.93K --.-KB/s in 0.05s \n\n2021-01-10 08:31:58 (1.37 MB/s) - ‘FuelConsumption.csv’ saved [72629/72629]\n\n" ], [ "df = pd.read_csv(\"FuelConsumption.csv\")", "_____no_output_____" ], [ "# To print the first five row of dataset\r\ndf.head()", "_____no_output_____" ], [ "# To print the stats of the data(number of entries,mean, standrad deviation, min, 25% of data, 50% of data, 75% of data, max)\r\ndf.describe()", "_____no_output_____" ], [ "# selecting features and exploring the data\r\ncdf = df[['ENGINESIZE', 'CYLINDERS', 'FUELCONSUMPTION_COMB', 'CO2EMISSIONS']]\r\n# displaying first 5 rows\r\ncdf.head()", "_____no_output_____" ], [ "# Plotting histogram\r\ncdf.hist()\r\n# Printing the figures\r\nplt.show()", "_____no_output_____" ], [ "# Plotting the scatter graph\r\nplt.scatter(cdf.FUELCONSUMPTION_COMB, cdf.CO2EMISSIONS, color='blue')\r\n# Labeling x-axis\r\nplt.xlabel(\"Fuel Consumption Combination\")\r\n# Labeling y-axis\r\nplt.ylabel(\"Emission\")\r\nplt.show()", "_____no_output_____" ], [ "plt.scatter(cdf.ENGINESIZE, cdf.CO2EMISSIONS, color=\"blue\")\r\nplt.xlabel(\"Engine Size\")\r\nplt.ylabel(\"Co2 Emission\")\r\nplt.show()", "_____no_output_____" ], [ "plt.scatter(cdf.CYLINDERS, cdf.CO2EMISSIONS, color = \"blue\")\r\nplt.xlabel(\"Cylinders\")\r\nplt.ylabel(\"CO2 Emission\")\r\nplt.show()", "_____no_output_____" ], [ "plt.scatter(cdf.ENGINESIZE, cdf.CYLINDERS, color=\"blue\")\r\nplt.xlabel(\"Engine Size\")\r\nplt.ylabel(\"Cylinder\")\r\nplt.show()", "_____no_output_____" ], [ "# Distributing the Data\r\nmsk = np.random.rand(len(df)) < 0.8\r\ntrain = cdf[msk]\r\ntest = cdf[~msk] ", "_____no_output_____" ] ], [ [ "#Creating Simple Regression Model\r\n\r\nWhere feature is Engine Size and label is Co2 Emission\r\nOn a linear equation, we can state that:<br>\r\n*y = mx + c* <br>\r\n*Co2 Emission = (Intercept * Engine Size) + Biased Coefficeint*", "_____no_output_____" ] ], [ [ "plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color='blue')\r\nplt.xlabel(\"Engine Size\")\r\nplt.ylabel(\"Co2 Emission\")\r\nplt.show()", "_____no_output_____" ] ], [ [ "### Modeling", "_____no_output_____" ] ], [ [ "from sklearn import linear_model\r\nregr = linear_model.LinearRegression()\r\ntrain_x = np.asanyarray(train[['ENGINESIZE']])\r\ntrain_y = np.asanyarray(train[['CO2EMISSIONS']])\r\nregr.fit(train_x, train_y)\r\n\r\nprint(\"Intercept: \", regr.intercept_,\" (m)\")\r\nprint('Biased Coefficient: ',regr.coef_,\" (c)\")", "Intercept: [125.23085025] (m)\nBiased Coefficient: [[39.36049308]] (c)\n" ] ], [ [ "As mentioned, Coefficient and Intercept in the simple linear regression, are the parameters of the fit line. Given that it is a simple linear regression, with only 2 parameters, and knowing that the parameters are the intercept and slope of the line, sklearn can estimate them directly from our data. Notice that all of the data must be available to traverse and calculate the parameters", "_____no_output_____" ] ], [ [ "# Plotting the Graph\r\nplt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color=\"blue\")\r\nplt.plot(train_x, regr.coef_[0][0]*train_x + regr.intercept_[0], '-r')\r\nplt.xlabel(\"Engine size\")\r\nplt.ylabel(\"Emission\")", "_____no_output_____" ] ], [ [ "###Evaluation\r\nWe compare the actual values and predicted values to calculate the accuracy of a regression model. Evaluation metrics provide a key role in the development of a model, as it provides insight to areas that require improvement.\r\n\r\nThere are different model evaluation metrics, lets use MSE here to calculate the accuracy of our model based on the test set:\r\n\r\n- Mean absolute error: It is the mean of the absolute value of the errors. This is the easiest of the metrics to understand since it’s just average error.\r\n- Mean Squared Error (MSE): Mean Squared Error (MSE) is the mean of the squared error. It’s more popular than Mean absolute error because the focus is geared more towards large errors. This is due to the squared term exponentially increasing larger errors in comparison to smaller ones.\r\n- Root Mean Squared Error (RMSE).\r\n- R-squared is not error, but is a popular metric for accuracy of your model. It represents how close the data are to the fitted regression line. The higher the R-squared, the better the model fits your data. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse).", "_____no_output_____" ] ], [ [ "from sklearn.metrics import r2_score\r\n\r\ntest_x = np.asanyarray(test[['ENGINESIZE']])\r\ntest_y = np.asanyarray(test[[\"CO2EMISSIONS\"]])\r\ntest_y_ = regr.predict(test_x)\r\n\r\nprint(\"Mean absolute error: %.2f\"% np.mean(np.absolute(test_y_ - test_y)))\r\nprint(\"Residual sum of squares (MSE): %.2f\" % np.mean((test_y_ - test_y)**2))\r\nprint(\"R2-score: %.2f\" % r2_score(test_y_, test_y))", "Mean absolute error: 26.52\nResidual sum of squares (MSE): 1159.83\nR2-score: 0.64\n" ] ], [ [ "#User Interface\r\n\r\n", "_____no_output_____" ] ], [ [ "users_engn_siz = np.asanyarray([[float(input(\"Engine Size: \"))]])\r\nprediction = regr.predict(users_engn_siz)\r\nprint(\"Co2 Emission:\",prediction[0][0])", "Engine Size: 4.5\nCo2 Emission: 302.35306913272916\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a3f65bde6235b41b9d5be82aea176452dab873b
31,922
ipynb
Jupyter Notebook
Tasks/Task1/Part2/.ipynb_checkpoints/Spain-checkpoint.ipynb
JJorgeDSIC/VDC
51afc21588b629be2f844a6fb72524e390365c77
[ "Apache-2.0" ]
null
null
null
Tasks/Task1/Part2/.ipynb_checkpoints/Spain-checkpoint.ipynb
JJorgeDSIC/VDC
51afc21588b629be2f844a6fb72524e390365c77
[ "Apache-2.0" ]
null
null
null
Tasks/Task1/Part2/.ipynb_checkpoints/Spain-checkpoint.ipynb
JJorgeDSIC/VDC
51afc21588b629be2f844a6fb72524e390365c77
[ "Apache-2.0" ]
null
null
null
57.105546
5,460
0.530449
[ [ [ "# Preprocessing data", "_____no_output_____" ] ], [ [ "import json\nimport numpy as np\nimport csv \nimport sys\n\ndictCountries={\n \"Alemania\":\"Germany\", \n \"Austria\":\"Austria\",\n \"Bélgica\":\"Belgium\",\n \"Bulgaria\":\"Bulgaria\",\n \"Chipre\":\"Cyprus\",\n \"Croacia\":\"Croatia\",\n \"Dinamarca\":\"Denmark\",\n \"Eslovenia\":\"Slovenia\",\n \"Estonia\":\"Estonia\",\n \"Finlandia\":\"Finland\",\n \"Francia\":\"France\",\n \"Grecia\":\"Greece\",\n \"Holanda\":\"Holland\",\n \"Hungría\":\"Hungary\",\n \"Irlanda\":\"Ireland\",\n \"Italia\":\"Italy\",\n \"Letonia\":\"Latvia\",\n \"Lituania\":\"Lithuania\",\n \"Luxemburgo\":\"Luxembourg\",\n \"Malta\":\"Malta\",\n \"Polonia\":\"Poland\",\n \"Portugal\":\"Portugal\",\n \"Reino Unido\":\"United Kingdom\",\n \"República Checa\":\"Czech Rep.\",\n \"República Eslovaca\":\"Slovakia\",\n \"Rumanía\":\"Romania\",\n \"Suecia\":\"Sweden\",\n \"Federación de Rusia\":\"Russia\",\n \"Noruega\":\"Norway\",\n \"Serbia\":\"Serbia\",\n \"Suiza\":\"Switzerland\",\n \"Ucrania\":\"Ukraine\"}\n\n\n\ninvdictCountries = {v: k for k, v in dictCountries.items()}\n\n#Data from: Instuto Nacional de Estadística www.ine.es\nf = open(\"./sources/info.txt\", \"r\")\nreader = csv.reader(f)\n\ndic = {}\nfor row in reader:\n name=row[0]\n row[1]=row[1].replace(\".\",\"\")\n row[2]=row[2].replace(\".\",\"\")\n if row[1].isnumeric():\n ret2013=int(row[1])\n else:\n ret2013=0\n if row[2].isnumeric():\n ret2016=int(row[2])\n else:\n ret2016=0 \n if name in dictCountries:\n entry = {}\n entry['r2k13']= ret2013\n entry['r2k16']= ret2016\n dic[dictCountries[name]]=entry\n \nf.close()\n\nf = open(\"./sources/info.json\", \"w\")\nf.write(json.dumps(dic))\nf.close()", "_____no_output_____" ] ], [ [ "# Creating the map", "_____no_output_____" ] ], [ [ "import geopandas as gpd\nimport json\nfrom collections import OrderedDict\n\nfrom shapely.geometry import Polygon, mapping\nfrom bokeh.models import GeoJSONDataSource, LinearColorMapper, LogColorMapper,ColorBar,LogTicker, AdaptiveTicker\nfrom bokeh.io import show\nfrom bokeh.plotting import figure, output_file\nimport bokeh.io\nbokeh.io.output_notebook()\n\nfrom bokeh.models import (\n ColumnDataSource,\n HoverTool,\n LogColorMapper\n)\nfrom bokeh.palettes import Viridis6 as palette\nfrom bokeh.palettes import (Blues9, BrBG9, BuGn9, BuPu9, GnBu9, Greens9,\n Greys9, OrRd9, Oranges9, PRGn9, PiYG9, PuBu9,\n PuBuGn9, PuOr9, PuRd9, Purples9, RdBu9, RdGy9,\n RdPu9, RdYlBu9, RdYlGn9, Reds9, Spectral9, YlGn9,\n YlGnBu9, YlOrBr9, YlOrRd9)\nfrom bokeh.plotting import figure\nstandard_palettes = OrderedDict([(\"Blues9\", Blues9), (\"BrBG9\", BrBG9),\n (\"BuGn9\", BuGn9), (\"BuPu9\", BuPu9),\n (\"GnBu9\", GnBu9), (\"Greens9\", Greens9),\n (\"Greys9\", Greys9), (\"OrRd9\", OrRd9),\n (\"Oranges9\", Oranges9), (\"PRGn9\", PRGn9),\n (\"PiYG9\", PiYG9), (\"PuBu9\", PuBu9),\n (\"PuBuGn9\", PuBuGn9), (\"PuOr9\", PuOr9),\n (\"PuRd9\", PuRd9), (\"Purples9\", Purples9),\n (\"RdBu9\", RdBu9), (\"RdGy9\", RdGy9),\n (\"RdPu9\", RdPu9), (\"RdYlBu9\", RdYlBu9),\n (\"RdYlGn9\", RdYlGn9), (\"Reds9\", Reds9),\n (\"Spectral9\", Spectral9), (\"YlGn9\", YlGn9),\n (\"YlGnBu9\", YlGnBu9), (\"YlOrBr9\", YlOrBr9),\n (\"YlOrRd9\", YlOrRd9)])\n\n\n\n#obtain countries shapes\nworld = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))\n#print(world)\neurope = (world.loc[world['name'] == 'Spain'])\n\ncities = gpd.read_file(gpd.datasets.get_path('naturalearth_cities'))\n\nprint(cities)\n\ncities.plot()\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a3f696c0ab1eda085020f621a27a16cd3f0677c
3,497
ipynb
Jupyter Notebook
hausaufgabe07.ipynb
LustiGMANN/Python.0118
20f2f270b3ff2b8791b39893211eecb70746b7cd
[ "MIT" ]
1
2021-03-02T06:40:49.000Z
2021-03-02T06:40:49.000Z
hausaufgabe07.ipynb
LustiGMANN/Python.0118
20f2f270b3ff2b8791b39893211eecb70746b7cd
[ "MIT" ]
null
null
null
hausaufgabe07.ipynb
LustiGMANN/Python.0118
20f2f270b3ff2b8791b39893211eecb70746b7cd
[ "MIT" ]
null
null
null
17.39801
49
0.432085
[ [ [ "import numpy as np", "_____no_output_____" ], [ "A = np.array([[10, 8], [3, 5]])\nA", "_____no_output_____" ], [ "np.linalg.inv(A)", "_____no_output_____" ], [ "np.linalg.inv(A)@A", "_____no_output_____" ], [ "c , d = np.linalg.eig(A)", "_____no_output_____" ], [ "c", "_____no_output_____" ], [ "d", "_____no_output_____" ], [ "np.linalg.eigvals(A)", "_____no_output_____" ], [ "z = np.linalg.svd(A)\nz", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a3f729e7e159ae5250692e0a5b66fb51acf4a2d
25,901
ipynb
Jupyter Notebook
lectures/unit2/lecture4.ipynb
phuijse/UACH-INFO183
0e1b6bef0bd80cda2753bd11e62016268f2de638
[ "MIT" ]
11
2018-08-27T23:53:15.000Z
2021-08-16T23:31:05.000Z
lectures/unit2/lecture4.ipynb
phuijse/UACH-INFO183
0e1b6bef0bd80cda2753bd11e62016268f2de638
[ "MIT" ]
null
null
null
lectures/unit2/lecture4.ipynb
phuijse/UACH-INFO183
0e1b6bef0bd80cda2753bd11e62016268f2de638
[ "MIT" ]
10
2019-01-04T17:43:55.000Z
2021-09-07T16:07:18.000Z
30.293567
303
0.514459
[ [ [ "import holoviews as hv\nhv.extension('bokeh')\nhv.opts.defaults(hv.opts.Curve(width=500), \n hv.opts.Image(width=500, colorbar=True, cmap='Viridis'))", "_____no_output_____" ], [ "import numpy as np\nimport scipy.signal\nimport scipy.fft\nfrom IPython.display import Audio", "_____no_output_____" ] ], [ [ "# Diseño de sistemas y filtros IIR", "_____no_output_____" ], [ "Un filtro FIR de buena calidad puede requerir una gran cantidad de coeficientes\n\nEs posible implementar filtros más eficientes usando **recursividad**. Esta es la base de los filtros de respuesta al impulso infinita o IIR que veremos en esta lección\n\n", "_____no_output_____" ], [ "## Definición de un sistema IIR \n\nGeneralizando el sistema FIR para incluir versiones pasadas de la salida y asumiendo $a[0] = 1$ llegamos a \n\n$$\n\\begin{align}\ny[n] &= b[0] x[n] + b[1] x[n-1] + b[2] x[n-2] + \\ldots + b[L] x[n-L] \\nonumber \\\\\n& - a[1] y[n-1] - a[2] y[n-2] - \\ldots - a[M] y[n-M] \\nonumber \\\\\n&= \\sum_{l=0}^{L} b[l] x[n-l] - \\sum_{m=1}^{M} a[m] y[n-m] \\nonumber \\\\\n\\sum_{m=0}^{M} a[m] y[n-m] &= \\sum_{l=0}^{L} b[l] x[n-l] \\nonumber \\\\\n(a * y)[n] &= (b * x)[n], \\nonumber\n\\end{align}\n$$\n\nes decir dos convoluciones discretas que definen una **ecuación de diferencias**\n\nEste tipo de sistema se conoce como \n- sistema *infinite impulse response* (IIR)\n- sistema *auto-regresive moving average* (ARMA)\n - autoregresivo de orden M: incluye valores pasados de la salida\n - media movil de orden L+1: pondera el valor presente y pasados de la entrada\n\nPodemos ver el sistema IIR como una generalización del sistema FIR. El caso particular del sistema FIR se recupera si\n\n$a[m] = 0$ para $m=[1, \\ldots, M]$", "_____no_output_____" ], [ "### Respuesta en frecuencia del sistema IIR\n\nAplicando la transformada de Fourier convertimos las convoluciones en multiplicaciones y encontramos la respuesta en frecuencia como\n\n$$\n\\begin{align}\n\\text{DFT}_N[(a * y)[n]] &= \\text{DFT}_N[(b * x)[n]] \\nonumber \\\\\nA[k] Y[k] &= B[k] X[k] \\nonumber \\\\\nH[k] = \\frac{Y[k]}{X[k]} &= \\frac{B[k]}{A[k]} = \\frac{ \\sum_{l=0}^L b[l]e^{-j \\frac{2\\pi}{N} nl} }{ \\sum_{m=0}^M a[m]e^{-j \\frac{2\\pi}{N} mk}} \\nonumber\n\\end{align}\n$$\n\nque existe siempre que $A[k] \\neq 0$. \n\nLa respuesta en frecuencia también suele expresarse como\n\n$$\nH[k] = K \\frac{ \\prod_{l=1}^L (e^{j \\frac{2\\pi}{N} k}- \\beta[l]) }{ \\prod_{m=1}^M (e^{j \\frac{2\\pi}{N} k}- \\alpha[m])} \n$$\n\ndonde \n\n- $K$ se llama **ganancia**\n- las raices del polinomio del numerador $\\alpha$ se llaman conjuntamente **ceros** \n- las raices del polinomio del denominador $\\beta$ se llaman conjuntamente **polos**", "_____no_output_____" ], [ "### Ejemplo de respuesta al impulso de un sistema IIR\n\nConsideremos el siguiente sistema IIR \n\n$$\n\\begin{align}\ny[n] &= (1-\\gamma) x[n] + \\gamma y[n-1] \\nonumber \\\\\ny[n] - \\gamma y[n-1] &= (1-\\gamma) x[n] \\nonumber\n\\end{align}\n$$\n\nLos coeficientes del sistema son\n\n$a[0] = 1$, $a[1] = -\\gamma$ y $b[0] = (1-\\gamma)$\n\nEs decir que es AR de orden 1 y MA de orden 1\n\n¿Cúal es su respuesta al impulso? Asumiendo $y[n]=0, n<0$, tenemos que\n\n$$\n\\begin{matrix}\nn & \\delta[n] & y[n] \\\\\n-2 & 0 & 0 \\\\\n-1 & 0 & 0 \\\\\n0 & 1 & (1-\\gamma) \\\\\n1 & 0 & \\gamma(1-\\gamma) \\\\\n2 & 0 & \\gamma^2(1-\\gamma) \\\\\n3 & 0 & \\gamma^3(1-\\gamma) \\\\\n4 & 0 & \\gamma^4(1-\\gamma) \\\\\n\\end{matrix}\n$$\n\n¿Cómo cambia la respuesta al impulso con distintos valores de $\\gamma$? ¿Qué pasa si $\\gamma \\geq 1$?\n\nRespondamos estas preguntas visualizando la respuesta al impulso de este sistema con la función `scipy.signal.dimpulse`", "_____no_output_____" ] ], [ [ "# Valores de gamma que probaremos:\ngamma = [-1.5, -1, -0.5, 0.5, 1., 1.5]\n\np = []\nfor g in gamma:\n t, y = scipy.signal.dimpulse(([1-g, 0], [1,-g], 1), x0=0, n=30)\n p.append(hv.Curve((t, y[0][:, 0]), label=f\"gamma={g}\"))\n \nhv.Layout(p).cols(3).opts(hv.opts.Curve(width=250, height=200, axiswise=True))", "_____no_output_____" ] ], [ [ "De las figuras podemos ver que:\n\n- Para $\\gamma < 0$ (primera fila) los coeficientes del sistema son alternantes en signo\n- Para $|\\gamma| < 1$ los coeficientes del sistema tienden a cero\n- Para $|\\gamma| > 1$ los coeficientes del sistema divergen y tienen a infinito\n\n:::{warning}\n\nA diferencia de un sistema FIR, el sistema IIR puede tener configuraciones inestables en que los coeficientes crecen o decrecen infinitamente\n\n:::\n\nPor otro lado consideremos el sistema anterior y asumamos que $|\\gamma|<1$, desenrollando tenemos que \n\n$$\n\\begin{align}\ny[0] &= (1-\\gamma) x[0] \\nonumber \\\\\ny[1] &= (1-\\gamma) (x[1] + \\gamma x[0]) \\nonumber \\\\\ny[2] &= (1-\\gamma) (x[2] + \\gamma x[1] + \\gamma^2 x[0]) \\nonumber \\\\\ny[3] &= (1-\\gamma) (x[3] + \\gamma x[2] + \\gamma^2 x[1] + \\gamma^3 x[0]) \\nonumber \\\\\ny[4] &= (1-\\gamma) (x[4] + \\gamma x[3] + \\gamma^2 x[2] + \\gamma^3 x[1] + \\gamma^4 x[0]) \\nonumber \\\\\ny[5] &= \\ldots \\nonumber \n\\end{align}\n$$\n\n:::{note}\n\nCon un sistema IIR de pocos coeficientes podemos representar un sistema FIR considerablemente más grande\n\n:::\n\nEn el ejemplo anterior, si escogemos $\\gamma$ tal que $\\gamma^{20 }\\approx 0$ entonces aproximamos un sistema FIR de orden 20 con tan sólo 3 coeficientes", "_____no_output_____" ], [ "### Ejemplo de respuesta en frecuencia de un sistema IIR\n\nPara el sistema del ejemplo anterior su respuesta en frecuencia es\n\n$$\n\\begin{align}\nY[k] &= (1-\\gamma) X[k] + \\gamma Y[k] e^{-j \\frac{2\\pi}{N} k} \\nonumber \\\\\nH[k] = \\frac{Y[k]}{X[k]} &= \\frac{1-\\gamma}{1 - \\gamma e^{-j \\frac{2\\pi}{N} k} } \\nonumber \n\\end{align}\n$$\n\nque en notación de polos y ceros se escribe como\n\n$$\nH[k] = (1-\\gamma)\\frac{e^{j \\frac{2\\pi}{N} k} - 0}{e^{j \\frac{2\\pi}{N} k} - \\gamma }\n$$\n\nes decir que tiene un cero en $0$, un polo en $\\gamma$ y una ganancia de $(1-\\gamma)$\n\nPara entender mejor este sistema estudiemos la magnitud de $|H[k]|$ para $\\gamma < 1$\n\n$$\n\\begin{align}\n| H[k]| &= \\frac{|1-\\gamma|}{|1 - \\gamma e^{-j \\frac{2\\pi}{N} k}|} \\nonumber \\\\\n&= \\frac{1-\\gamma}{\\sqrt{1 - 2\\gamma \\cos(\\frac{2\\pi}{N} k) + \\gamma^2}} \\nonumber\n\\end{align}\n$$\n\n¿Cómo se ve $|H[k]|$? ¿Qué función cumple este sistema?", "_____no_output_____" ] ], [ [ "k = np.arange(-24, 25)/50\nHk = lambda gamma, k : (1-gamma)/np.sqrt(1 - 2*gamma*np.cos(2.0*np.pi*k) + gamma**2)", "_____no_output_____" ], [ "p = []\nfor gamma in [0.25, 0.5, 0.75]:\n p.append(hv.Curve((k, Hk(gamma, k)), 'Frecuencia', 'Respuesta', label=f'gamma={gamma}'))\n \nhv.Overlay(p)", "_____no_output_____" ] ], [ [ ":::{note}\n\nEste sistema atenua las frecuencias altas, es decir que actua como un filtro pasa bajos\n\n:::", "_____no_output_____" ], [ "## Diseño de filtros IIR simples\n\nLos filtros IIR más simples son los de un polo y un cero, es decir filtros de primer orden\n\n$$\nH[k] = \\frac{b[0] + b[1] e^{-j \\frac{2\\pi}{N} k}}{1 + a[1] e^{-j \\frac{2\\pi}{N} k}} = K\\frac{e^{j \\frac{2\\pi}{N} k} - \\beta}{e^{j \\frac{2\\pi}{N} k} - \\alpha } \n$$\n\ndonde podemos reconocer \n\n- $b[0]=K$\n- $\\beta = - b[1] \\cdot K$\n- $\\alpha=-a[1]$\n\nDefinimos la frecuencia de corte $f_c$ como aquella frecuencia en la que el filtro alcanza una atenuación de 0.7 (-3 dB). Haciendo la equivalencia con el ejemplo anterior tenemos que $\\gamma = e^{-2\\pi f_c}$", "_____no_output_____" ], [ "### Receta para un filtro pasa bajo IIR con frecuencia de corte $f_c$\n\nAsignamos\n\n- $b[0] = 1 - e^{-2\\pi f_c}$\n- $b[1] = 0$\n- $a[1] = -e^{-2\\pi f_c}$\n\nLo que resulta en la siguiente respuesta en frecuencia\n\n$$\nH[k] = \\frac{1-e^{-2\\pi f_c}}{1 - e^{-2\\pi f_c} e^{-j \\frac{2\\pi}{N} k}} = (1-e^{-2\\pi f_c}) \\frac{(e^{j \\frac{2\\pi}{N} k}- 0)}{(e^{j \\frac{2\\pi}{N} k} - e^{-2\\pi f_c} )}\n$$\n\nEs decir un cero en $0$, un polo en $e^{-2\\pi f_c}$ y ganancia $1-e^{-2\\pi f_c}$\n\n### Receta para un filtro pasa alto IIR con frecuencia de corte $f_c$\n\nAsignamos\n\n- $b[0] = (1 + e^{-2\\pi f_c})/2$\n- $b[1] = -(1 + e^{-2\\pi f_c})/2$\n- $a[1] = -e^{-2\\pi f_c}$\n\nLo que resulta en la siguiente respuesta en frecuencia\n\n$$\nH[k] = \\frac{1+e^{-2\\pi f_c}}{2} \\frac{(e^{j \\frac{2\\pi}{N} k} - 1)}{(e^{j \\frac{2\\pi}{N} k} - e^{-2\\pi f_c})}\n$$\n\nEs decir un cero en $1$, un polo en $e^{-2\\pi f_c}$ y ganancia $\\frac{1+e^{-2\\pi f_c}}{2}$\n\n", "_____no_output_____" ], [ "### Aplicar un filtro a una señal con scipy\n\nPara filtrar una señal unidimensional con un filtro IIR (sin variar la fase de la señal) podemos utilizar la función\n\n\n```python\n scipy.signal.filtfilt(b, # Coeficientes del numerador\n a, # Coeficientes del denominador\n x, # Señal a filtrar\n ...\n )\n```\n\nLos siguientes ejemplos muestran un señal de tipo pulso rectangular filtrada con sistemas IIR de primer orden pasa bajo y pasa-alto diseñados con las recetas mostradas anteriormente", "_____no_output_____" ] ], [ [ "n = np.arange(0, 500)\nx = 0.5 + 0.5*scipy.signal.square((n)/(2.*np.pi*5), duty=0.3)", "_____no_output_____" ], [ "def iir_low_pass(signal, fc):\n gamma = np.exp(-2*np.pi*(fc))\n b, a = [(1-gamma), 0], [1, -gamma] \n return scipy.signal.filtfilt(b, a, signal)\n\ny = {}\nfor fc in [0.05, 0.02, 0.01]:\n y[fc] = iir_low_pass(x, fc)", "_____no_output_____" ], [ "px = hv.Curve((n, x))\npy = []\nfor fc, y_ in y.items():\n py.append(hv.Curve((n, y_), label=f'fc={fc}'))\n\nhv.Layout([px, hv.Overlay(py)]).cols(1).opts(hv.opts.Curve(height=200))", "_____no_output_____" ], [ "def iir_high_pass(signal, fc):\n gamma = np.exp(-2*np.pi*(fc))\n b, a = [(1+gamma)/2, -(1+gamma)/2], [1, -gamma]\n return scipy.signal.filtfilt(b, a, signal)\n\ny = {}\nfor fc in [0.01, 0.02, 0.05]:\n y[fc] = iir_high_pass(x, fc)", "_____no_output_____" ], [ "px = hv.Curve((n, x))\npy = []\nfor fc, y_ in y.items():\n py.append(hv.Curve((n, y_), label=f'fc={fc}'))\n\nhv.Layout([px, hv.Overlay(py)]).cols(1).opts(hv.opts.Curve(height=200))", "_____no_output_____" ] ], [ [ ":::{note} \n\nEl filtro pasa-bajos suaviza los cambios de los pulsos rectangulares. El filtro pasa-altos elimina las zonas constantes y resalta los cambios de la señal.\n\n:::", "_____no_output_____" ], [ "## Diseño de filtros IIR de segundo orden\n\nLos filtros IIR de segundo orden o **biquad** tienen dos polos y dos ceros.\n\nSu respuesta en frecuencia es\n\n$$\nH[k] = \\frac{b[0] + b[1] W_N^k + b[2] W_N^{2k}}{1 + a[1] W_N^k + a[2] W_N^{2k}} = K \\frac{(W_N^{-k} - \\beta_1) (W_N^{-k} - \\beta_2)}{(W_N^{-k} - \\alpha_1)(W_N^{-k} - \\alpha_2)},\n$$\n\ndonde $W_N = e^{-j \\frac{2 \\pi}{N}}$ y la relación entreo coeficientes y polos/ceros es: \n\n$$\nb[0] = K, \\quad b[1] = -K (\\beta_1 + \\beta_2), \\quad b[2]= K \\beta_1\\beta_2\n$$\n\n$$\na[1] = - (\\alpha_1 + \\alpha_2), \\quad a[2]=\\alpha_1 \\alpha_2\n$$\n\n\nCon arquitecturas de segundo orden se pueden crear filtros pasabanda y rechaza banda\n", "_____no_output_____" ], [ "## Diseño de filtros IIR de orden mayor\n\nPara crear los coeficientes de filtro IIR de orden mayor podemos usar la función\n\n```python\nscipy.signal.iirfilter(N, # Orden del filtro\n Wn, # Frecuencias de corte (normalizadas en [0,1])\n fs, # Frecuencia de muestreo\n btype='bandpass', # Tipo de filtro: 'bandpass', 'lowpass', 'highpass', 'bandstop'\n ftype='butter', # Familia del filtro: 'butter', 'ellip', 'cheby1', 'cheby2', 'bessel'\n output='ba', # Retornar coeficientes\n ...\n )\n```\n\nEl filtro Butterworth es óptimo en el sentido de tener la banda de paso lo más plana posible. \n\nOtros filtros se diseñaron con otras consideraciones. \n\nLos filtros IIR digitales están basados en los filtros IIR analógicos.\n\nObserve como al aumentar el orden el filtro pasabajo IIR comienza a cortar de forma más abrupta", "_____no_output_____" ] ], [ [ "Hk = {}\nfor order in [1, 2, 5, 20]:\n b, a = scipy.signal.iirfilter(N=order, Wn=0.2, fs=1,\n ftype='butter', btype='lowpass', output='ba')\n freq, response = scipy.signal.freqz(b, a, fs=1)\n Hk[order] = np.abs(response)", "_____no_output_____" ], [ "p = []\nfor order, response in Hk.items():\n p.append(hv.Curve((freq, response), 'Frecuencia', 'Respuesta', label=f'orden={order}'))\nhv.Overlay(p)", "_____no_output_____" ] ], [ [ "## Comparación de la respuesta en frecuencia de filtros FIR e IIR del orden equivalente\n\nComparemos la respuesta en frecuencia de un filtro IIR y otro FIR ambos pasa-bajo con 20 coeficientes\n", "_____no_output_____" ] ], [ [ "Fs = 1\nfc = 0.25\nh = scipy.signal.firwin(numtaps=20, cutoff=fc, pass_zero=True, window='hann', fs=Fs)\nb, a = scipy.signal.iirfilter(N=9, Wn=fc, fs=Fs, ftype='butter', btype='lowpass')\ndisplay(len(h), len(b)+len(a))\n\nfreq_fir, response_fir = scipy.signal.freqz(h, 1, fs=Fs)\nfreq_iir, response_iir = scipy.signal.freqz(b, a, fs=Fs)", "_____no_output_____" ], [ "p1 = hv.Curve((freq_fir, np.abs(response_fir)), 'Frecuencia', 'Respuesta', label='FIR')\np2 = hv.Curve((freq_iir, np.abs(response_iir)), 'Frecuencia', 'Respuesta', label='IIR')\nhv.Overlay([p1, p2])*hv.VLine(fc).opts(color='k', alpha=0.5)", "_____no_output_____" ] ], [ [ "La linea negra marca la ubicación de la frecuencia de corte\n\n:::{note}\n \nEl filtro IIR es mucho más abrupto, es decir filtra mejor, que el filtro FIR equivalente\n\n:::\n\nUna desventaja del filtro IIR es que por definición introduce una desfase no constante en la señal de salida", "_____no_output_____" ] ], [ [ "freq_fir, delay_fir = scipy.signal.group_delay(system=(h, 1), fs=Fs)\nfreq_iir, delay_iir = scipy.signal.group_delay(system=(b, a), fs=Fs)", "_____no_output_____" ], [ "p1 = hv.Curve((freq_fir, delay_fir), 'Frecuencia', 'Desfase', label='FIR')\np2 = hv.Curve((freq_iir, delay_iir), 'Frecuencia', 'Desfase', label='IIR')\nhv.Overlay([p1, p2])*hv.VLine(fc).opts(color='k', alpha=0.5)", "_____no_output_____" ] ], [ [ "¿Cómo se ve una señal filtrada donde se preserva la fase versus una donde no se preserva la fase?\n\nConsideremos la señal rectangular anterior y apliquemos un filtro pasa-bajo IIR de orden 1\n\nEsta vez compararemos el filtro con la función `scipy.signal.lfilter` y la función `scipy.signal.filtfilt`. La primera no preserva la fase mientras que la segunda si lo hace", "_____no_output_____" ] ], [ [ "Fs = 1\nfc = 0.01\nn = np.arange(0, 500)\nx = 0.5 + 0.5*scipy.signal.square((n)/(2.*np.pi*5), duty=0.3)\n\nb, a = scipy.signal.iirfilter(N=1, Wn=fc, fs=Fs, ftype='butter', btype='lowpass')\n# No se preserva la fase\ny_lfilter = scipy.signal.lfilter(b, a, x)\n# Se preserva la fase\ny_filtfilt = scipy.signal.filtfilt(b, a, x)", "_____no_output_____" ], [ "px = hv.Curve((n, x), 'Tiempo', 'Entrada')\npy = []\npy.append(hv.Curve((n, y_filtfilt), 'Tiempo', 'Salida', label=f'Fase constante'))\npy.append(hv.Curve((n, y_lfilter), 'Tiempo', 'Salida', label=f'Fase no constante'))\n\nhv.Layout([px, hv.Overlay(py)]).cols(1).opts(hv.opts.Curve(height=200))", "_____no_output_____" ] ], [ [ ":::{note}\n \nEn el caso donde no se preserva la fase podemos notar que la señal de salida está desplazada con respecto a la original. Además los cambios tienen una transición asimétrica \n\n:::\n\nLa función `scipy.signal.filtfilt` \"arregla\" el problema del desfase filtrando la señal dos veces. La primera vez se filtra hacia adelante en el tiempo y la segunda vez hacia atrás. Por ende no se puede aplicar en un escenario de tipo *streaming* donde los datos van llegando de forma causal.\n\nEn una aplicación causal donde se necesite preservar la fase debemos usar un filtro FIR.", "_____no_output_____" ], [ "## Apéndice: Efectos de audio con filtros IIR\n\n\nEl siguiente ejemplo muestra como implementar el conocido filtro <a href=\"https://en.wikipedia.org/wiki/Wah-wah_(music)\">Wah-wah</a> usando un sistema IIR\n\nEste es un filtro pasabanda modulado con ancho de pasada fijo $f_b$ [Hz] y una frecuencia central variable $f_c$ [Hz], donde La frecuencia central se modula con una onda lenta\n\n\nSe modela como el siguiente sistema **IIR**\n\n$$\nH[k] = \\frac{(1+c)W_N^{2k} -(1+c) }{W_N^{2k} + d(1-c)W_N^k -c}\n$$\n\ndonde \n\n$$\nd=-\\cos(2\\pi f_c/f_s)\n$$ \n\ny \n\n$$\nc = \\frac{\\tan(\\pi f_b/f_s) -1}{\\tan(2\\pi f_b /f_s)+1}\n$$\n\nVeamos como modifica este filtro una señal de audio", "_____no_output_____" ] ], [ [ "import librosa\ndata, fs = librosa.load(\"../../data/DPSAU.ogg\")\nAudio(data, rate=fs)", "_____no_output_____" ], [ "data_wah = []\nzi = np.zeros(shape=(2,))\n# Parámetros fijos del filtro\nfb, Nw = 200, 5 \nc = (np.tan(np.pi*fb/fs) - 1.)/(np.tan(2*np.pi*fb/fs) +1)\n# Filtramos una ventana de la señal moviendo lentamente fc\nfor k in range(len(data)//Nw):\n # Cálculo de la frecuencia central\n fc = 500 + 2000*(np.cos(2.0*np.pi*k*30./fs) +1)/2\n d = -np.cos(2*np.pi*fc/fs)\n # Coeficientes del filtro\n b, a = [(1+c), 0, -(1+c)], [1, d*(1-c), -c]\n # Filtramos, usando el filtrado anterior como borde (zi)\n data2, zi = scipy.signal.lfilter(b, a, data[k*Nw:(k+1)*Nw], zi=zi)\n # Guardamos\n data_wah.append(data2)", "_____no_output_____" ], [ "Audio(np.hstack(data_wah), rate=int(fs))", "_____no_output_____" ] ], [ [ "Si quieres profundizar en el tema de los filtros IIR aplicados a efectos de audio recomiendo: https://www.ee.columbia.edu/~ronw/adst-spring2010/lectures/lecture2.pdf\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
4a3f7e2ab99a608036fe9d7f48ff8eeccece4ac4
26,301
ipynb
Jupyter Notebook
assignment02.ipynb
JAyalaU/Assignment03
c1e13e06b011a853fab54b9a31131c3afd12a3be
[ "MIT" ]
null
null
null
assignment02.ipynb
JAyalaU/Assignment03
c1e13e06b011a853fab54b9a31131c3afd12a3be
[ "MIT" ]
null
null
null
assignment02.ipynb
JAyalaU/Assignment03
c1e13e06b011a853fab54b9a31131c3afd12a3be
[ "MIT" ]
null
null
null
31.841404
140
0.372647
[ [ [ "# Assignment 2. Programming Intelligent Agents\nMTY - A01152534 - Jorge Antonio Ayala Urbina \nMTY - Datos Ale \nMTY - A01037093 - Miguel Angel Cruz Gomez ", "_____no_output_____" ] ], [ [ "from agents import *\nimport random\n\n# Create things\n\n# Treasure1 thing\nclass T(Thing):\n pass\n\n# Treasure2 thing\nclass t(Thing):\n pass\n\n#Reusable tool thing\nclass H(Thing):\n pass\n\n#Disposable tool thing\nclass h(Thing):\n pass\n\n#Wall thing\nclass w(Thing):\n pass\n\n#In this enviroment, the agent can see it all\nclass IslandDay(Environment):\n \n #Flag to be activated when there are no more treasures or no treasures that can be gathered\n #agent_no_goals = False\n\n #As the environment if fully observable, the percept should contain everything found in the environment\n def percept(self, agent):\n in_existence = self.things\n in_posession = agent.holding\n at_position = self.list_things_at(agent.location)\n perception = [in_existence, in_posession, at_position]\n \n \n def rowgenerator(self, rownumber):\n rnum = rownumber\n chars = [rnum]\n \n for cell in range(0, 6):\n if len(self.list_things_at((rownumber, cell), tclass = Thing)) != 0:\n for thing in self.list_things_at((rownumber, cell), tclass = Thing):\n if (isinstance(thing,Agent) and len(self.list_things_at((rownumber, cell), tclass = Thing)) == 1):\n chars.append('-')\n break\n elif not isinstance(thing, Agent):\n if isinstance(thing, T):\n chars.append('T')\n break\n elif isinstance(thing, t):\n chars.append('t')\n break\n elif isinstance(thing, H):\n chars.append('H')\n break\n elif isinstance(thing, h):\n chars.append('h')\n break\n elif isinstance(thing, w):\n chars.append('X')\n break\n else:\n chars.append('-')\n \n chars.append(rnum)\n print('{} {} {} {} {} {} {} {}'.format(chars[0], chars[1], chars[2], chars[3], chars[4], chars[5], chars[6], chars[7]))\n\n\n \n print(\"\\ 0 1 2 3 4 5 /\")\n rg1 = rowgenerator(self, 0)\n rg2 = rowgenerator(self, 1)\n rg3 = rowgenerator(self, 2)\n rg4 = rowgenerator(self, 3)\n rg5 = rowgenerator(self, 4)\n rg6 = rowgenerator(self, 5)\n print(\"/ 0 1 2 3 4 5 \\\\\")\n return perception\n \n \n \n def execute_action(self, agent, action):\n\n if action == \"Move\":\n location = agent.location\n possMoves = [location]\n \n moveUp = (location[0],location[1]+1)\n moveDown = (location[0],location[1]-1)\n moveRight = (location[0]+1,location[1])\n moveLeft = (location[0]-1,location[1])\n \n if (moveUp[1] < 6 and len(self.list_things_at(moveUp, tclass=w)) == 0):\n possMoves.append(moveUp)\n if (moveDown[1] >= 0 and len(self.list_things_at(moveDown, tclass=w)) == 0):\n possMoves.append(moveDown)\n if (moveRight[0] < 6 and len(self.list_things_at(moveRight, tclass=w)) == 0):\n possMoves.append(moveRight)\n if (moveLeft[0] >= 0 and len(self.list_things_at(moveLeft, tclass=w)) == 0):\n possMoves.append(moveLeft)\n \n direction = possMoves.index(random.choice(possMoves))\n agent.move(possMoves[direction]) \n \n # If the action is to pick up reusable tool, run Gresure and remove the thing from the environment\n # Appending the object to the agent is done at the agent class. Same for all the following methods\n \n elif action == \"Greuse\":\n items = self.list_things_at(agent.location, tclass = H)\n holding = agent.get_held_things(tclass = H)\n if len(items) != 0:\n agent.Greuse(items[0])\n self.delete_thing(items[0])\n \n elif action == \"Gdispos\":\n items = self.list_things_at(agent.location, tclass = h)\n holding = agent.get_held_things(tclass = h)\n if len(items) != 0:\n agent.Gdispos(items[0])\n self.delete_thing(items[0])\n \n elif action == \"GTreas1\":\n items = self.list_things_at(agent.location, tclass = T)\n holding = agent.get_held_things(tclass = H)\n if (len(items) != 0 and len(holding) != 0):\n agent.GTreas1(items[0])\n self.delete_thing(items[0])\n elif action == \"GTreas2\":\n items = self.list_things_at(agent.location, tclass = t)\n if (len(items) != 0 and len(holding) != 0):\n agent.GTreas2(items[0])\n self.delete_thing(items[0])\n else:\n print('Not moving')\n \n def is_done(self):\n dead_agents = not any(agent.is_alive() for agent in self.agents)\n return dead_agents \n #agent_no_goals\n\n def step(self):\n if not self.is_done():\n# print(self.agents[0].holding)\n actions = []\n for agent in self.agents:\n if agent.alive:\n actions.append(agent.program(self.percept(agent)))\n else:\n actions.append(\"\")\n for (agent, action) in zip(self.agents, actions):\n self.execute_action(agent, action)\n self.exogenous_change()\n\n def run(self, steps=1000):\n \"Run the Environment for given number of time steps.\"\n for step in range(steps):\n if self.is_done():\n return\n self.step()\n \n \n ", "_____no_output_____" ], [ "\nclass Hunter(Agent):\n \n Agent.performance = 50\n \n def move(self, direc):\n self.performance -= 1\n prevLoc = self.location\n self.location = (direc[0],direc[1])\n print('Hunter: Moved from {} to {}'.format(prevLoc,self.location))\n \n def get_held_things(self, tclass=Thing):\n #Returns the thing held by the agent\n return [thing for thing in self.holding\n if isinstance(thing, tclass)]\n \n #If Greuse is called, the thing is appended to self.holding\n def Greuse(self, thing):\n if isinstance(thing, H):\n print(\"Hunter: Grabbed reusable tool at {}.\".format(self.location))\n self.holding.append(thing)\n return True\n return False\n \n def Gdispos(self, thing):\n# print(isinstance(thing,h))\n if isinstance(thing, h):\n print(\"Hunter: Grabbed disposable tool at {}.\".format(self.location))\n self.holding.append(thing)\n return True\n return False\n \n def GTreas1(self, thing):\n \n if isinstance(thing, T):\n print(\"Hunter: Grabbed a Treasure1 at {}.\".format(self.location))\n self.holding.append(thing)\n self.performance +=20\n print(\"Hey! my performance is: {}\".format(self.performance))\n return True\n return False\n \n #Removes disposable tool after use\n def GTreas2(self, thing):\n if isinstance(thing, t):\n print(\"Hunter: Grabbed a Treasure2 at {}.\".format(self.location))\n self.holding.append(thing)\n self.performance += 40\n print(\"Hey! my performance is: {}\".format(self.performance))\n for tool in self.holding:\n if isinstance(tool, t):\n self.holding.remove(tool)\n print(\"Disposable tool has been lost\")\n break\n return True\n return False", "_____no_output_____" ], [ "def interpret_input(percept):\n \n in_environment = percept[0]\n in_agent = percept[1]\n in_location = percept[2]\n \n agent_has_H = False\n agent_has_h = False\n \n if len(in_agent) != 0:\n for thing in in_agent:\n if isinstance(thing,h):\n agent_has_h = True\n elif isinstance(thing,H):\n agent_has_H = True\n \n \n collectables_exist = False\n \n for thing in in_environment:\n if (isinstance(thing, T) or isinstance(thing, t) or isinstance(thing, H) or isinstance(thing, t)):\n collectables_exist = True\n break\n \n #Check existences\n T_exists = False\n t_exists = False\n H_exists = False\n h_exists = False\n \n for thing in in_environment:\n if isinstance(thing, T):\n T_exists = True\n break\n \n for thing in in_environment:\n if isinstance(thing, t):\n t_exists = True\n break\n \n for thing in in_environment:\n if isinstance(thing, H):\n H_exists = True\n break\n \n for thing in in_environment:\n if isinstance(thing, h):\n h_exists = True\n break\n \n for thing in in_agent:\n if isinstance(thing, H):\n H_exists = True\n break\n \n for thing in in_agent:\n if isinstance(thing, h):\n h_exists = True\n break\n \n if len(in_location) != 1:\n \n \n if isinstance(in_location[1], H):\n print(\"Hey! I found a Greuse\")\n return 'Greuse'\n elif isinstance(in_location[1], h):\n print(\"Hey! I found a Gdispos at: {}\".format(in_location[0].location))\n return 'Gdispos'\n elif (isinstance(in_location[1], T) and agent_has_H):\n print(\"Hey! I found a Treasure\")\n return 'GTreas1'\n elif (isinstance(in_location[1], t) and agent_has_h):\n print(\"Hey! I found a treasure\")\n return 'GTreas2'\n elif (isinstance(in_location[1], T) and not(agent_has_H)):\n print(\"Can't pick up T since I have no H, so I'm moving \")\n return 'Move'\n elif (isinstance(in_location[1], t) and not (agent_has_h)):\n print(\"Can't pick up t since I have no h, so I'm moving \")\n return 'Move'\n else:\n print(\"Did'nt move\")\n return 'NoOp'\n if len(in_location) == 1:\n if((T_exists and H_exists) or (t_exists and h_exists)):\n return 'Move'\n else:\n# print(\"Did'nt move\")\n return 'NoOp'\n\n \nclass Rules():\n def __init__(self, action = ''):\n self.action = action\n \n def matches(self, a_state):\n return self.action == a_state\n \n \nExplore = Rules(\"Move\")\nReusableTool = Rules(\"Greuse\")\nDisposableTool = Rules(\"Gdispos\")\nTreasure1 = Rules(\"GTreas1\")\nTreasure2 = Rules(\"GTreas2\")\nNothingToDo = Rules(\"NoOp\")\n\nrules = [Explore, ReusableTool, DisposableTool, Treasure1, Treasure2, NothingToDo]\n\n# print(\"Explore:\")\n# print(Explore)\n# print(\"Greuse:\")\n", "_____no_output_____" ], [ "island1 = IslandDay()\nhunter_bob = Hunter(SimpleReflexAgentProgram(rules, interpret_input))\n\nH1 = H()\nT1 = T()\nT2 = T()\nh1 = h()\nh2 = h()\nt1 = t()\nt2 = t()\n\n\nisland1.add_thing(hunter_bob, (0,0))\nisland1.add_thing(H1, (1,1))\nisland1.add_thing(T1, (1,3))\nisland1.add_thing(T2, (1,4))\nisland1.add_thing(h1, (1,2))\nisland1.add_thing(h2, (0,2))\nisland1.add_thing(t1, (4,1))\nisland1.add_thing(t2, (4,2))\n\nisland1.things\n", "_____no_output_____" ], [ "island1.run(30)", "[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - H h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (0, 0) to (0, 0)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - H h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (0, 0) to (1, 0)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - H h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (1, 0) to (0, 0)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - H h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (0, 0) to (1, 0)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - H h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (1, 0) to (0, 0)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - H h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (0, 0) to (1, 0)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - H h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (1, 0) to (1, 0)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - H h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (1, 0) to (1, 1)\n[<Hunter>, <H>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - H h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHey! I found a Greuse\nHunter: Grabbed reusable tool at (1, 1).\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (1, 1) to (1, 1)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (1, 1) to (1, 0)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (1, 0) to (2, 0)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (2, 0) to (1, 0)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (1, 0) to (1, 1)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (1, 1) to (1, 0)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (1, 0) to (2, 0)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (2, 0) to (2, 1)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (2, 1) to (3, 1)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (3, 1) to (2, 1)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (2, 1) to (1, 1)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (1, 1) to (1, 0)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (1, 0) to (0, 0)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (0, 0) to (1, 0)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (1, 0) to (2, 0)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (2, 0) to (2, 0)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (2, 0) to (2, 1)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (2, 1) to (1, 1)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (1, 1) to (0, 1)\n[<Hunter>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHunter: Moved from (0, 1) to (0, 2)\n[<Hunter>, <h>]\n\\ 0 1 2 3 4 5 /\n0 - - h - - - 0\n1 - - h T T - 1\n2 - - - - - - 2\n3 - - - - - - 3\n4 - t t - - - 4\n5 - - - - - - 5\n/ 0 1 2 3 4 5 \\\nHey! I found a Gdispos at: (0, 2)\nHunter: Grabbed disposable tool at (0, 2).\n" ], [ "island1.things", "_____no_output_____" ], [ "hunter_bob.holding", "_____no_output_____" ], [ "myList = ['',2,3,'']", "_____no_output_____" ], [ "isinstance(myList,int)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a3f7e7e21cc723384b646214e1900479ab5223a
3,142
ipynb
Jupyter Notebook
projects/LearningTransformers/main.ipynb
SamiFawcett/play_with_proteins
15a33847f8c41d403ba6845198853d6f784981a5
[ "MIT" ]
1
2022-03-13T20:56:31.000Z
2022-03-13T20:56:31.000Z
projects/LearningTransformers/main.ipynb
SamiFawcett/play_with_proteins
15a33847f8c41d403ba6845198853d6f784981a5
[ "MIT" ]
null
null
null
projects/LearningTransformers/main.ipynb
SamiFawcett/play_with_proteins
15a33847f8c41d403ba6845198853d6f784981a5
[ "MIT" ]
null
null
null
30.803922
255
0.621897
[ [ [ "\n\"Attention is all you need\"\n\nWhat is a Transformer?\n\nModel sequence information that relys on the use of attention mechanisms to compute representations for the input and output.\nunlike RNN's, transformers allow for parallelization\n\n\n\nEnconder & Decoder stacks\n\n\nEncoder layer (Nx, where N = 6):\n - d_model = 512\n - multihead attention sublayer\n - ffn sublayer\n \n output of each sublayer looks like: LayerNorm(x + SubLayer(x)), where SubLayer is the function that is implemented byt the sublayer it self.\n\n\nDecoder Layer (N = 6)\n - In addition to the sublayers of the encoder, the decoder has a third layer. with a modification to the self attention mechanism to prevent positions from attending to subsequent positions.\n - The output embeddings are also offset by one position, ensures that the predictions for position i can depend only on the known outputs at positions less than i.", "_____no_output_____" ], [ "Attention\n - can be described as a mapping of query and a set of key-value pairs to outputs\n\nScaled Dot-Product Attention\nMultihead Attention\n\n\n\nmost common types of attention:\nadditive attention\nmultiplicative attention\n", "_____no_output_____" ], [ "Position wise feed forward networks\n\nFFN(x) = max(0, xW1 + b1)W2 + b2", "_____no_output_____" ], [ "Since the model doesn't inherently have knowledge about positional information of the sequence, we have to inject positional encodings to the input embeddings at the bottoms of the encoder and decoders stacks\n\n\ndifferent types of positional encodings:\nPE(pos, 2i) = sin(pos/10000^(2i/d_model))\nPE(pos, 2i+1) = cos(pos/10000^(2i/dmodel))\n\nCan also use learned embeddings instead, and found that the two versions produced nearly identical results. The chose to use the sinusoidal version because it may allow the model to extrapolate to sequence lengths longer than ones found in training.", "_____no_output_____" ], [ "Three desiderata:\n\nOne is the total computational complexity per layer. ANother is the amount of computational that can be parallelized,as mesured by the minimum number of sequential operatiosn required.\n\n\nThe third is the path length between long range dependencies in the network.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
4a3f896f1c96ed954047b3b594d28b1e83302ef5
282,643
ipynb
Jupyter Notebook
other_stuff/Schroedinger.ipynb
Project-Ellie/tutorials
9090cc7669d3e59889b15139724e662ce11be1ee
[ "Apache-2.0" ]
1
2019-02-17T12:39:58.000Z
2019-02-17T12:39:58.000Z
other_stuff/Schroedinger.ipynb
Project-Ellie/tutorials
9090cc7669d3e59889b15139724e662ce11be1ee
[ "Apache-2.0" ]
5
2020-01-28T22:33:04.000Z
2021-11-10T19:45:24.000Z
other_stuff/Schroedinger.ipynb
Project-Ellie/tutorials
9090cc7669d3e59889b15139724e662ce11be1ee
[ "Apache-2.0" ]
null
null
null
188.052562
80,424
0.89543
[ [ [ "# The Schrödinger equation\n#### Let's have some serious fun!\nWe'll look at the solutions of the Schrödinger equation for a harmonic potential. ", "_____no_output_____" ] ], [ [ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function", "_____no_output_____" ], [ "import numpy as np\nimport math\nfrom math import pi as Pi\nimport matplotlib.pyplot as plt\nfrom scipy import (inf, integrate)\nimport seaborn as sns\nsns.set()", "_____no_output_____" ] ], [ [ "### Prelude: Hermite's Polynomials\n\nHermite's Polynomials are a subset of polynomials that will help us construct solutions of the Schrödinger equation. \n\n#### Modelling polynomials\nSome object-oriented Python programming with polynomials. We represent an arbitrary polynomial\n\n$$\nP(x) = \\sum_{n=0}^{N} p_n \\cdot x^n\n$$\n\nunambiguously by its coefficients $p_n$, i.e. an array of real numbers of length $N+1$. Apart from the algebraic operators we also define the multiplication with x as ```mulx()``` and the differentiation as ```d_dx()```. ", "_____no_output_____" ] ], [ [ "class Polynomial():\n \"\"\"\n A class representing a polynomial by its coefficients\n \"\"\"\n \n def __init__(self, array=[0]):\n self.p = np.array(array)\n \n def mulx(self):\n return Polynomial(np.insert(self.p, 0, 0))\n\n def d_dx(self):\n return Polynomial([i*self.p[i] for i in range(1, len(self.p))]) \n\n def __eq__(self, other):\n return np.equal(self.p, other.p).all()\n \n def __rmul__(self, number):\n return Polynomial(number * self.p)\n \n def __sub__(self, other):\n l=max(len(self.p), len(other.p))\n return Polynomial(Polynomial.pad(self.p,l) - Polynomial.pad(other.p,l))\n\n def __add__(self, other):\n l=max(len(self.p), len(other.p))\n return Polynomial(Polynomial.pad(self.p,l) + Polynomial.pad(other.p,l))\n\n def __call__(self, x):\n return np.sum([self.p[i] * x**i for i in range(len(self.p))], axis=0)\n \n @staticmethod\n def pad(array, l):\n if len(array) == l:\n return array\n if len(array) > l:\n raise ValueError(\"can't pad to lower dimension\")\n return np.append(array, np.zeros(l-len(array)))\n\n @staticmethod\n def mono_repr(c, i):\n if c==0:\n return ''\n if i==0:\n return str(int(c))\n elif i==1:\n return \"{}x\".format(int(c))\n else:\n if c==1:\n return \"x^{}\".format(i)\n else:\n return \"{}x^{}\".format(int(c),i) \n\n def __repr__(self):\n return \" + \".join( \n np.flipud([Polynomial.mono_repr(self.p[i],i) \n for i in range(len(self.p)) if self.p[i] != 0] ))", "_____no_output_____" ] ], [ [ "#### The Hermite Polynomial generator\nNow, Hermite's polynomials are a special subset of all polynomials, defined e.g. by a recursion relation:\n\nFrom [Wikipedia](https://en.wikipedia.org/wiki/Hermite_polynomials) (if not good memories), we know that\n$$\nH_n(x) = (2x-\\frac{d}{dx})^n \\cdot 1\n$$\n\ngenerates the *physicist's* Hermite polynomials. We define our python generator in a recursive fashion returning Polynomial instances\n\n$$\nH_n(x) = (2x-\\frac{d}{dx}) \\cdot H_{n-1}\n$$", "_____no_output_____" ] ], [ [ "def H(n):\n if n<0:\n raise ValueError(\"Not defined for negativ n\")\n if n==0:\n return Polynomial([1])\n p = H(n-1)\n return 2 * p.mulx() - p.d_dx()", "_____no_output_____" ] ], [ [ "Note that we can evaluate the polynomial at any (even complex) x.", "_____no_output_____" ] ], [ [ "H_3 = H(3)\nH_3, H_3(1), H_3(1+2j)", "_____no_output_____" ] ], [ [ "The Hermite polynomials have the special properties:\n\n$$\n x \\cdot H_\\nu(x) = \\frac{1}{2} H_{\\nu+1}(x) + \\nu \\cdot H_{\\nu-1}(x)\n$$\n\n$$\n \\frac{d}{dx}H_\\nu(x) = 2 \\nu \\cdot H_{\\nu-1}(x)\n$$\n\nwhich we can verify using our implementation for the first 10 polynomials ($\\nu = {1..9}$):", "_____no_output_____" ] ], [ [ "[H(nu).mulx() == .5 * H(nu+1) + nu*H(nu-1) for nu in range(1,10)]", "_____no_output_____" ], [ "[H(nu).d_dx() == 2 * nu * H(nu - 1) for nu in range(1,10)]", "_____no_output_____" ] ], [ [ "---\n### The time-dependent Schrödinger equation\n\n$$\ni\\hbar \\frac{\\partial \\Psi(x,t)}{\\partial t} = \n\\mathcal{H}\\Psi(x,t) =\nE\\Psi(x,t)\n$$\n\nThis is the Schrödinger equation. Now, with the time-independent Hamilton operator $\\mathcal{H}$ for a particle with mass m and the harmonic potential given by $ V(x)=\\frac{1}{2}m\\omega^2 x^2$ looks like\n \n$$\n\\mathcal{H} = -\\frac{\\hbar^2}{2m}\\frac{\\partial^2}{\\partial x^2} + \\frac{1}{2}m\\omega^2 x^2 \n$$\n\nwe can separate the variables $x$ and $t$ like so:\n\n$$\n\\Psi(x, t) = \\psi(x) \\cdot \\varphi(t)\n$$\n", "_____no_output_____" ], [ "and solve both \n$$\ni\\hbar \\frac{\\partial \\varphi(t)}{\\partial t} = E \\cdot \\varphi(t)\n$$\n\nand\n\n$$\n[-\\frac{\\hbar^2}{2m}\\frac{\\partial^2}{\\partial x^2} + \\frac{1}{2}m\\omega^2 x^2] \\cdot \\psi(x) = E \\psi(x)\n$$\n\nseparately.", "_____no_output_____" ], [ "A neat trick to get rid of the physical constants is rescaling:\n\n$$\\xi = \\frac{m \\omega}{\\hbar} \\cdot x$$\n\nwith which you can easily check by yourself that the Schrödinger equation becomes:\n\n$$ \n[ -\\frac{\\partial^2}{\\partial \\xi^2} + \\xi^2 - \\frac{2E}{\\hbar \\omega}] \\cdot \\psi(\\xi) = 0\n$$\n\nwhere we postulate the boundary conditions for a constrained particle as \n\n$$\n\\psi(-\\infty) = \\psi(\\infty) = 0\n$$", "_____no_output_____" ], [ "The so-called stationary solutions of the equation in $x$ form an ortho-normal eigenbasis of the Hilbert space of bounded functions $\\psi_{\\nu}(\\xi)$ with eigenvalues $E_{\\nu}=\\hbar \\omega (\\nu + \\frac{1}{2})$. And although we're not interested in the boring (yawn!) stationary solutions, we'll use this eigenbasis to construct an analytical function that obeys the time-dependent Schrödinger equation.\n\n\nWith the above eigenvalues we finally arrive at the following concise representation of the time-independent Schrödinger equation.\n\n$$ \n[ -\\frac{\\partial^2}{\\partial \\xi^2} + \\xi^2 - (2\\nu+1)] \\cdot \\psi(\\xi) = 0\n$$", "_____no_output_____" ], [ "### Functions as eigenvectors\n\nThe solutions of this equation span a vector space, a so-called Hilbert space. That means we can define addition, multiplication by a number and even an inner product on these functions. When we look at functions as vectors in a Hilbert space, then the Schrödinger equation can as well be considered an eigenvalue problem. We'll provide the solutions without proof.\n\nThe eigenfunctions are composed of the Hermite polynomials and a gaussian:\n\n$$\n\\psi_\\nu(\\xi) = \\frac{1}{\\sqrt{2^\\nu \\cdot \\nu! \\cdot \\sqrt{\\pi}}} \\cdot H_\\nu(\\xi) \\cdot\ne^{-\\frac{\\xi^2}{2}}\n$$\n\n$$\n\\varphi_\\nu(t) = e^{-i (\\nu+\\frac{1}{2}) t}\n$$\nThus arriving at the full solution of the time-dependent Schrödinger equation as\n\n$$\n\\psi_\\nu(\\xi, t) = \\frac{1}{\\sqrt{2^\\nu \\cdot \\nu! \\cdot \\sqrt{\\pi}}} \\cdot H_\\nu(\\xi) \\cdot\ne^{-\\frac{\\xi^2}{2}-i(\\nu+\\frac{1}{2}) t}\n$$\n\nThese solutions are called stationary because they rotate in the complex plane keeping their shape. That means that for every x the value of $\\psi_\\nu(x)$ rotates in the complex plane with exactly the same *frequency* as any other. Please note that we have clandestinely scaled the time t such that it *swallowed* the physical constants. For our purpose, namely visualizing the non-stationary solutions of the Schrödinger equation, this does not make a difference.\n\n---\nDefining the normalization factor $A_\\nu$ as \n\n$$\nA_\\nu = \\frac{1}{\\sqrt{2^\\nu \\cdot \\nu! \\cdot \\sqrt{\\pi}}}\n$$\n\nwe visualize these stationary solutions such that we get an idea what they look like: ", "_____no_output_____" ] ], [ [ "def A(nu):\n return 1/math.sqrt(2**nu * math.factorial(nu) * math.sqrt(math.pi))\n\ndef psi(nu):\n def _psi(x):\n return A(nu) * H(nu)(x) * np.exp(-x*x/2) \n return _psi", "_____no_output_____" ], [ "N_points=200", "_____no_output_____" ], [ "x_ = np.linspace(-6, 6, N_points)", "_____no_output_____" ], [ "plt.plot(x_, psi(0)(x_))\nplt.plot(x_, psi(1)(x_))\nplt.plot(x_, psi(2)(x_))\nplt.plot(x_, psi(3)(x_));", "_____no_output_____" ] ], [ [ "---\n#### Ortho-normal basis\nLet's verify that our $\\psi_\\nu(\\xi)$ form an ortho-normal basis with the inner product $\\langle \\psi_\\mu | \\psi_\\nu \\rangle$, $\\mathbb{H} \\times \\mathbb{H} \\rightarrow \\mathbb{R}$ defined by\n\n$$\n\\int_{-\\infty}^{\\infty} \\bar{\\psi}_\\nu(\\xi) \\cdot \\psi_\\mu(\\xi) d\\xi= \\delta^{\\mu\\nu}\n$$\n\n$\\bar{\\psi}_\\nu(\\xi)$ being the complex conjugate of $\\psi_\\nu(\\xi)$", "_____no_output_____" ] ], [ [ "[[round(integrate.quad(lambda x: psi(mu)(x)*psi(nu)(x), -inf, +inf)[0], 6) for mu in range(5)] for nu in range(5)]", "_____no_output_____" ] ], [ [ "You can see that all inner products of two basis functions are zero, apart from the product with itself, which is what the *Kronecker* delta $\\delta^{\\mu \\nu}$ demands.", "_____no_output_____" ], [ "---\n### The fun part: coherent solutions\n\nNow, let's have some fun. As we have just verified, the eigenstates of the Schrödinger equation form an ortho-normal basis of the Hilbert space of functions in one dimension. We expect that one can approximate any other bounded function as a linear combination of the first $N$ eigenfunctions. We'll do that for the following shifted gaussian. Note that is is centered around $x=-3$, so it's not equal to the first basis function.", "_____no_output_____" ] ], [ [ "x0=-3\nfun=lambda x: psi(0)(x-x0)\n#sns.set_style(\"ticks\", {\"xtick.major.size\": 2, \"ytick.major.size\": .1})\nsns.set()\nplt.plot(x_, fun(x_));", "_____no_output_____" ] ], [ [ "We compute it's coordinates in the Schrödinger eigenbases simply by projecting it to the first $N$ eigenfunctions like this", "_____no_output_____" ] ], [ [ "N = 15\ncoords = [integrate.quad(lambda x: psi(mu)(x)*fun(x), -inf, +inf)[0] for mu in range(N)]\ncoords", "_____no_output_____" ] ], [ [ "Calling those coordinates $c_\\nu$, we compute \n\n$$\n\\psi_0(x-x_0) \\approx \\big[\\sum_{\\nu=0}^9 c_\\nu \\cdot A_\\nu H_\\nu(x)\\big] \\cdot e^{-\\frac{-x^2}{2}}\n$$", "_____no_output_____" ] ], [ [ "pol = Polynomial([0])\nfor nu in range(N):\n pol = pol + coords[nu] * A(nu) * H(nu)\n\nprojection = lambda x: pol(x) * np.exp(-x*x/2)", "_____no_output_____" ], [ "plt.plot(x_, projection(x_));", "_____no_output_____" ] ], [ [ "What you see is that the 15-dimensional projection of our shifted function into the Schrödinger eigenbasis is a formidable approximation. \n\nIt's actually much more than an approximation. You can interpret this function as the wave function of a particle resting (the momentum is zero) at $x=x_0$. Remember there's still the harmonic potential. Thus, in the limit of classical mechanics, we would expect that our particle will slowly accelerate to the right until it *feels* the potential there. Then it would reflect and move all the way back. Lacking friction, we indeed expect that this oscillation continues until eternity.\n\n---\n#### Let the clock tick...\nBecause now we have this function as a linear combination of Schrödinger solutions, we can switch on time and see ourselves. Under the influence of the time-dependent Schrödinger equation, the the fifteen eigenvectors each rotate at their own frequency determined by the eigenvalue $2\\nu+1$ ", "_____no_output_____" ], [ "The time-dependent solutions\n\n$$\n\\psi_\\nu(\\xi, t) = \\frac{1}{\\sqrt{2^\\nu \\cdot \\nu! \\cdot \\sqrt{\\pi}}} \\cdot H_\\nu(\\xi) \\cdot\ne^{-\\frac{\\xi^2}{2}-i(\\nu+\\frac{1}{2}) t}\n$$\n\nNote that now this function is complex-valued!", "_____no_output_____" ] ], [ [ "def psit(nu):\n def _psi(x, t):\n return A(nu) * H(nu)(x) * np.exp(-x*x/2) * np.exp(-1j*(nu+.5)*t)\n return _psi\n\npsit(3)(1, .3)", "_____no_output_____" ] ], [ [ "---\n#### 3-D data \nTo appreciate the dynamics of a wave function in time we display both the real part and the imaginary part of the complex value of $\\psi$.\n\n- The figure's y-axis is our space coordinate $x$\n- its z-axis spans the real part of the wave function\n- and its x-axis spans the wave function's imaginary part", "_____no_output_____" ] ], [ [ "import mpl_toolkits.mplot3d.axes3d as p3", "_____no_output_____" ] ], [ [ "We display $\\psi_2(x, t) $ at $t=0.5$", "_____no_output_____" ] ], [ [ "x_ = np.linspace(-6,6, N_points)\nf = psit(2)(x_, 0.5)\nr_f = [c.real for c in f]\ni_f = [c.imag for c in f]", "_____no_output_____" ], [ "fig=plt.figure(figsize=(12,8))\nax = fig.gca(projection='3d')\nax.view_init(30, -15)\nax.set_xlim(-1, 1)\nax.set_zlim(-1, 1)\nax.set_xlabel('Imag')\nax.set_ylabel('X')\nax.set_zlabel('Real')\nax.plot(i_f, x_, r_f)\nplt.show()", "_____no_output_____" ] ], [ [ "As you can see, the function is tilted in the complex plan due to the complex phase $e^{-\\frac{5}{2}it}$\n\n---\n#### Time-dependent wave functions\nHere, we'll create an analytical time-dependent wave function from our set of coordinates in Hilbert space that represent the resting particle at $x_0=-3$", "_____no_output_____" ] ], [ [ "def WF(sc):\n return lambda x,t: sum([sc[nu] * np.exp(-1j*(nu+.5)*t) * A(nu) * H(nu)(x) * np.exp(-x*x/2) \n# ============================== ==================================\n# ^ ^\n# time dependent coefficient Basis function \n for nu in range(len(sc))]) \n\nparticle = WF(coords)\n\nparticle(-3, 0) # a particle resting at x=-3 at time t=0 ", "_____no_output_____" ] ], [ [ "### Animating a Schrödinger particle!", "_____no_output_____" ] ], [ [ "%autosave 3600", "_____no_output_____" ], [ "N_frames=100\nN_Points=200\nXL, XR = -6, 6", "_____no_output_____" ], [ "def snapshot(N, f, t):\n x = np.linspace(XL,XR, N)\n f=f(x, t)\n r_f = np.array([c.real for c in f])\n i_f = np.array([c.imag for c in f])\n return np.array([i_f, x, r_f])", "_____no_output_____" ], [ "def update(num, n_points, n_frames, wave_function, line):\n data= snapshot(n_points, wave_function, num*4.0/n_frames*math.pi)\n line.set_data(data[0], data[1])\n line.set_3d_properties(data[2])\n return line", "_____no_output_____" ] ], [ [ "Recording the animation will take a couple of seconds. Be patient. It's worth waiting for!", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom IPython.display import HTML\n\nfig=plt.figure(figsize=(12,8))\nax = p3.Axes3D(fig)\n\ninitial_data = snapshot(N_points, particle, 0.0)\n\nline = ax.plot(initial_data[0], initial_data[1], initial_data[2])[0]\n\nax.set_xlim(-1, 1)\nax.set_zlim(-1, 1)\nax.set_ylim(XL, XR)\nax.set_xlabel('Imag')\nax.set_ylabel('X')\nax.set_zlabel('Real')\n\nax.set_title('Schroedinger particle in action!')\nax.view_init(10, -10)\n\nline_ani = animation.FuncAnimation(\n fig, update, N_frames, \n fargs=(N_Points, N_frames, particle, line), \n interval=200, blit=False)\n\njshtml = line_ani.to_jshtml()", "_____no_output_____" ], [ "#Uncomment and run this cell the see the movie. The cell will be so large that the notebook refuses to save. Thus I always comment it out before saving.\n#HTML(data=jshtml)", "_____no_output_____" ], [ "# Uncomment to save your file and serve it elsewhere\n#with open(\"Schroedinger.html\", \"w\") as file:\n# file.write(jshtml)", "_____no_output_____" ] ], [ [ "---\n### Measuring location and momentum\n\nMeasurements in the real world are represented by computing expectation values of the operator associated with the given observable.\n\n#### Angle notation\n\nIn the following, we denote eigenfunctions of the Schrödinger equation in angle notation \n\n$$\n|\\nu \\rangle \\equiv \\psi_\\nu(x,t)\n$$\n\nIn our unit-free notation, and introducing a more concise notation for the partial derivative, the momentum operator $\\hat{p}$ is defined by\n\n$$\n\\hat{p} = -i \\partial_x\n$$\n\nOperators in our Hilbert space will be written in *hat* notation. You have seen $\\hat{p}$ already. The Hamilton operator becomes:\n\n$$\n\\hat{H} = \\hat{p}^2 + \\hat{x}^2\n$$\n\nNote that we're back to using $x$, but what we really mean is the unit-less $\\xi$.\n\nThe Schrödinger equation in its eigenbasis looks like \n\n$$\n\\hat{H} |\\nu\\rangle = 2(\\nu+1)|\\nu\\rangle \n$$\n\nThe inner product of any two wave functions (not necessarily basisvectors) as defined by the integral over the product of both functions has a neat short notation:\n\n$$\n\\langle \\psi_1 | \\psi_2 \\rangle\n\\equiv\n\\int_{-\\infty}^{\\infty} \\bar{\\psi_1}(\\xi) \\cdot \\psi_2(\\xi) d\\xi\n$$\n\nThe expectation value of an observable represented by an Operator like e.g. $\\hat{p}$, given a particular wave function $\\psi$ is defined by\n\n$$\n\\langle \\psi | \\hat{p} | \\psi \\rangle\n\\equiv\n\\int_{-\\infty}^{\\infty} \\bar{\\psi}(\\xi) \\cdot (-i\\partial_x) \\psi(\\xi) d\\xi\n$$\n", "_____no_output_____" ], [ "---\n#### Dirac's ladder operators\nLet us introduce the two *ladder* operators $a$ and $a^\\dagger$ as", "_____no_output_____" ], [ "$$\na \\equiv \\frac{1}{\\sqrt 2} (\\hat{x} + i\\hat{p})\n$$\n\n$$\na^\\dagger \\equiv \\frac{1}{\\sqrt 2} (\\hat{x} - i\\hat{p})\n$$\n\nusing which we can express $\\hat{p}$ and $\\hat{x}$ like so: \n\n$$\n\\hat{p} = \\frac{i}{\\sqrt 2}(a^\\dagger - a)\n$$\n\n$$\n\\hat{x} = \\frac{1}{\\sqrt 2}(a^\\dagger + a)\n$$", "_____no_output_____" ], [ "Then you can convince yourself easily using the properties of the Hermite polynomials:\n\n$$\n x \\cdot H_\\nu(x) = \\frac{1}{2} H_{\\nu+1}(x) + \\nu \\cdot H_{\\nu-1}(x)\n$$\n\n$$\n \\frac{d}{dx}H_\\nu(x) = 2 \\nu \\cdot H_{\\nu-1}(x)\n$$\n\nand our solutions of the Schrödinger equations\n\n$$\n\\psi_\\nu(x) = A_\\nu \\cdot H_\\nu(x) \\cdot\ne^{-\\frac{x^2}{2}}\n$$\n\nthat\n$$ a|\\nu\\rangle = \\sqrt{\\nu} |\\nu-1 \\rangle $$ \nand\n$$ a^\\dagger|\\nu\\rangle = \\sqrt{\\nu+1} |\\nu+1 \\rangle $$ \n\nIt should be obvious by now why these operators are called *ladder* operators. They map each basis vector on the next resp. previous basis vector. And this neat property leads to a surprisingly simple method of applying $\\hat{p}$ or $\\hat{x}$ to arbitrary wave functions.", "_____no_output_____" ], [ "---\n#### Matrix representation\n\nWe can compute a matrix representation easily by projecting the the result of every \n$a|\\nu\\rangle$ resp. $a^\\dagger|\\nu\\rangle$ onto every eigenvector.\n\n$$\n\\langle \\mu|a|\\nu\\rangle = \\sqrt{\\nu}\\cdot\\langle \\mu | \\nu-1\\rangle = \\sqrt{\\nu} \\cdot \\delta^{\\mu,\\nu-1} \n$$\n\nand\n\n$$\n\\langle \\mu|a^\\dagger|\\nu\\rangle = \\sqrt{\\nu+1}\\cdot\\langle \\mu | \\nu+1\\rangle = \\sqrt{\\nu+1} \\cdot \\delta^{\\mu,\\nu+1} \n$$\n\n", "_____no_output_____" ], [ "In this matrix representation, the ladder operators populate the positions right above or below the diagonal, respectively.\n\n$$\na = \\left[\n \\begin{array}{c c c c c c}\n 0 & 1 & 0 & 0 & 0 & 0 & \\dots \\\\\n 0 & 0 & \\sqrt{2} & 0 & 0 & 0 & \\dots\\\\\n 0 & 0 & 0 & \\sqrt{3} & 0 & 0 & \\dots\\\\\n 0 & 0 & 0 & 0 & \\sqrt{4} & 0 & \\dots\\\\\n 0 & 0 & 0 & 0 & 0 & \\sqrt{5} & \\dots\\\\\n 0 & 0 & 0 & 0 & 0 & 0 & \\dots \\\\\n \\dots\n \\end{array}\n \\right]\n$$\n\n$$\na^\\dagger = \n \\left[\n \\begin{array}{c c c c c c}\n 0 & 0 & 0 & 0 & 0 & 0 & \\dots\\\\\n 1 & 0 & 0 & 0 & 0 & 0 & \\dots\\\\\n 0 & \\sqrt{2} & 0 & 0 & 0 & 0 & \\dots\\\\\n 0 & 0 & \\sqrt{3} & 0 & 0 & 0 & \\dots\\\\\n 0 & 0 & 0 & \\sqrt{4} & 0 & 0 & \\dots\\\\\n 0 & 0 & 0 & 0 & \\sqrt{5} & 0 & \\dots\\\\\n \\dots\n \\end{array}\n \\right]\n$$ \n \nwhich leads to\n \n$$\n\\hat{p} = \\frac{1}{\\sqrt{2}} \\cdot \\left[\n \\begin{array}{c c c c c c}\n 0 & 1 & 0 & 0 & 0 & 0 & \\dots\\\\\n i & 0 & \\sqrt{2} & 0 & 0 & 0 & \\dots\\\\\n 0 & i\\sqrt{2} & 0 & \\sqrt{3} & 0 & 0 & \\dots\\\\\n 0 & 0 & i\\sqrt{3} & 0 & \\sqrt{4} & 0 & \\dots\\\\\n 0 & 0 & 0 & i\\sqrt{4} & 0 & \\sqrt{5} & \\dots\\\\\n 0 & 0 & 0 & 0 & i\\sqrt{5} & 0 & \\dots\\\\\n \\dots\n \\end{array}\n \\right]\n$$\n\n$$\n\\hat{x} = \\frac{1}{\\sqrt{2}} \\cdot \\left[\n \\begin{array}{c c c c c c}\n 0 & i & 0 & 0 & 0 & 0 & \\dots\\\\\n 1 & 0 & i\\sqrt{2} & 0 & 0 & 0 & \\dots\\\\\n 0 & \\sqrt{2} & 0 & i\\sqrt{3} & 0 & 0 & \\dots\\\\\n 0 & 0 & \\sqrt{3} & 0 & i\\sqrt{4} & 0 & \\dots\\\\\n 0 & 0 & 0 & \\sqrt{4} & 0 & i\\sqrt{5} & \\dots\\\\\n 0 & 0 & 0 & 0 & \\sqrt{5} & 0 & \\dots\\\\\n \\dots\n \\end{array}\n \\right]\n$$\n", "_____no_output_____" ], [ "---\n\nWith these matrices we can do all our calculations just like highschool algebra! Let's verify that \n\n$$ a|2\\rangle = \\sqrt{2} \\cdot |1\\rangle $$\n\nand\n\n$$ a^\\dagger |2\\rangle = \\sqrt{3} \\cdot |3\\rangle $$\n", "_____no_output_____" ] ], [ [ "N=4 # just so that displaying the matrices doesn't clutter the notebook", "_____no_output_____" ] ], [ [ "The ladder operators as numpy arrays:", "_____no_output_____" ] ], [ [ "a=np.array([[math.sqrt(nu) if mu==nu-1 else 0.0 for nu in range(N)] for mu in range(N)])\na", "_____no_output_____" ], [ "a_d=np.array([[math.sqrt(nu+1) if mu==nu+1 else 0.0 for nu in range(N)] for mu in range(N)])\na_d", "_____no_output_____" ], [ "nu2 = np.array([0, 0, 1, 0])\nnp.matmul(a, nu2), np.matmul(a_d, nu2)", "_____no_output_____" ] ], [ [ "Convinced?\n\n---\n#### Expectation values\n\nWe can do even more exciting stuff with these matrices. Remember our initial wave function from the movie? It was a gaussian located a x=-3, and I claimed that it was at rest. It's about time to prove both.\nThe expectation value of the location $x$ is defined by\n\n$$\n\\langle \\psi | \\hat{x} | \\psi \\rangle\n\\equiv\n\\int_{-\\infty}^{\\infty} \\bar{\\psi}(x) \\cdot x \\cdot \\psi(x) dx\n$$\n", "_____no_output_____" ] ], [ [ "# Using the 15-dimensional coordinates of our initial wave function in the Hilbert space spun by the \n# solutions of the Schrödinger equation with harmonic potential\n\nc = coords\nN = len(coords)\na=np.array([[math.sqrt(nu) if mu==nu-1 else 0.0 for nu in range(N)] for mu in range(N)])\na_d=np.array([[math.sqrt(nu+1) if mu==nu+1 else 0.0 for nu in range(N)] for mu in range(N)])", "_____no_output_____" ] ], [ [ "Below we calculate\n\n$$\n \\langle \\psi | \\hat{x} | \\psi \\rangle = \n \\frac{1}{\\sqrt{2}} \\cdot (\\langle \\psi | \\hat{a} \\psi \\rangle + \\langle \\psi | \\hat{a}^\\dagger \\psi \\rangle) \n = \\frac{1}{\\sqrt{2}} \\cdot (\\psi^T \\cdot \\mathbb{M} \\cdot \\psi + \\psi^T \\cdot \\mathbb{M}^\\dagger \\cdot \\psi)\n$$\n\nwhere $\\psi^T$ is the transposed vector and $\\mathbb{M}, \\mathbb{M}^\\dagger$ are the matrix representations of the ladder operators $a, a^\\dagger$.", "_____no_output_____" ] ], [ [ "psi=np.array(coords)\n1/math.sqrt(2) * (np.matmul(np.matmul(psi.T, a), psi) + np.matmul(np.matmul(psi.T, a_d), psi))\n\n# Transposing is just for visual clarity. \n# Actually, Python would understand the matmul operation correctly, anyway.", "_____no_output_____" ] ], [ [ "Convinced? That's almost exactly what we expected.\n\nBtw. we could have been smarter by computing the $\\hat{x}$ operator first and then compute the expectation value of it: Let's do that also for $\\hat{p}$ \n\n$\\hat{p} = \\frac{i}{\\sqrt 2}(a^\\dagger - a)$ ;\n$\\hat{x} = \\frac{1}{\\sqrt 2}(a^\\dagger + a)$:", "_____no_output_____" ] ], [ [ "p_hat = 1j/math.sqrt(2) * ( a_d - a )\nx_hat = 1/math.sqrt(2) * ( a_d + a )", "_____no_output_____" ] ], [ [ "$\\langle \\psi | \\hat{p} | \\psi \\rangle$:", "_____no_output_____" ] ], [ [ "np.matmul(np.matmul(psi.T, p_hat), psi)", "_____no_output_____" ] ], [ [ "That's almost zero. C'mon, now you are convinced, right?", "_____no_output_____" ], [ "---\n\n#### Observing location and momentum over time", "_____no_output_____" ] ], [ [ "def psi_t(sc, t):\n return np.array([sc[nu] * np.exp(-1j*(nu+.5)*t) for nu in range(N)])", "_____no_output_____" ], [ "psi_07 = psi_t(psi, 0.7)\npsi_07", "_____no_output_____" ] ], [ [ "Please note that for complex coefficients we must compute $\\langle \\psi | $ as the complex conjugate of $| \\psi \\rangle$", "_____no_output_____" ] ], [ [ "np.matmul(np.matmul(np.conj(psi_07).T, p_hat), psi_07)", "_____no_output_____" ], [ "def p_exp (sc, t):\n psit = psi_t(sc, t)\n return np.matmul(np.matmul(np.conj(psit).T, p_hat), psit).real\np_exp(psi, .7)\n", "_____no_output_____" ], [ "def x_exp (sc, t):\n psit = psi_t(sc, t)\n return np.matmul(np.matmul(np.conj(psit).T, x_hat), psit).real\nx_exp(psi, np.array(0.7))", "_____no_output_____" ], [ "t_ = np.linspace(0, 2*math.pi, 100)\nxt_ = [x_exp(psi, t) for t in t_]\npt_ = [p_exp(psi, t) for t in t_]", "_____no_output_____" ], [ "plt.plot(xt_, pt_);", "_____no_output_____" ] ], [ [ "Just like in classical mechanics, the expectation values of location and momentum form an elipse (in our case even a perfect circle) in the phase space spun by values of $p$ and $x$.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
4a3f925691f5fa429b3cf3a236cd4f8ade3650d2
42,133
ipynb
Jupyter Notebook
examples/differential-privacy/pydp/PyDP_Syft_Data_Scientist.ipynb
H4LL/PySyft
baaeec792e90919f0b27f583cbecc96d61b33fd6
[ "Apache-2.0" ]
null
null
null
examples/differential-privacy/pydp/PyDP_Syft_Data_Scientist.ipynb
H4LL/PySyft
baaeec792e90919f0b27f583cbecc96d61b33fd6
[ "Apache-2.0" ]
null
null
null
examples/differential-privacy/pydp/PyDP_Syft_Data_Scientist.ipynb
H4LL/PySyft
baaeec792e90919f0b27f583cbecc96d61b33fd6
[ "Apache-2.0" ]
null
null
null
123.195906
34,072
0.882135
[ [ [ "%%capture\n!pip install python-dp", "_____no_output_____" ], [ "import syft as sy\nduet = sy.join_duet(loopback=True)", "_____no_output_____" ], [ "# https://github.com/OpenMined/PyDP/blob/dev/examples/Tutorial_1-carrots_demo/carrots_demo.ipynb", "_____no_output_____" ], [ "# we will not explicitly call pydp.xxx, instead we will call duet.pydp.xxx, which is calling pydp.xxx on the DO side, so it's not neccessary import pydb\n# import pydp", "_____no_output_____" ], [ "duet.store.pandas", "_____no_output_____" ], [ "# this will allow us to use pydp like duet.pydp.xxx\nsy.load_lib(\"pydp\")", "_____no_output_____" ], [ "BoundedMean = duet.pydp.algorithms.laplacian.BoundedMean", "_____no_output_____" ], [ "carrots_eaten_ptr = duet.store[\"carrots_eaten\"]\n# calculates mean applying differential privacy\ndef private_mean(privacy_budget: float) -> float:\n x_ptr = BoundedMean(privacy_budget, 1, 100)\n return x_ptr.quick_result(carrots_eaten_ptr).get(\n request_block=True,\n name=\"private_mean\",\n reason=\"To get the private_mean\",\n timeout_secs=10,\n )", "_____no_output_____" ], [ "print(\"Private Mean: \", private_mean(0.8))", "Private Mean: 53.7203254699707\n" ], [ "Count = duet.pydp.algorithms.laplacian.Count", "_____no_output_____" ], [ "carrots_eaten_limit_ptr = duet.store[\"carrots_eaten_limit\"]\n\n# Calculates number of animals who ate more than \"limit\" carrots applying differential privacy.\ndef private_count_above(privacy_budget: float) -> int:\n x = Count(privacy_budget, dtype=\"int\")\n return x.quick_result(carrots_eaten_limit_ptr).get(\n request_block=True,\n name=\"private_count_above\",\n reason=\"To get the private_count_above\",\n timeout_secs=10,\n )", "_____no_output_____" ], [ "print(\"private count above:\\t\" + str(private_count_above(1)))", "private count above:\t68\n" ], [ "Max = duet.pydp.algorithms.laplacian.Max", "_____no_output_____" ], [ "# Function to return the maximum of the number of carrots eaten by any one animal appyling differential privacy.\ndef private_max(privacy_budget: float) -> int:\n # 0 and 100 are the upper and lower limits for the search bound.\n x = Max(privacy_budget, 0, 100, dtype=\"int\")\n return x.quick_result(carrots_eaten_ptr).get(\n request_block=True,\n name=\"private_max\",\n reason=\"To get the private_max\",\n timeout_secs=10,\n )", "_____no_output_____" ], [ "print(\"private max:\\t\" + str(private_max(1)))", "private max:\t85\n" ], [ "BoundedSum = duet.pydp.algorithms.laplacian.BoundedSum", "_____no_output_____" ], [ "# Function to calculate sum of carrots eaten applying differential privacy.\ndef private_sum(privacy_budget: float) -> int:\n x = BoundedSum(privacy_budget,1,100, dtype=\"float\")\n return x.quick_result(carrots_eaten_ptr).get(\n request_block=True,\n name=\"private_count_above\",\n reason=\"To get the private_count_above\",\n timeout_secs=10,\n )", "_____no_output_____" ], [ "print(\"Private Sum:\\t\" + str(private_sum(1)))", "Private Sum:\t9566.6787109375\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a3f9f25620d431ed632df01efe236522fd6db72
146,726
ipynb
Jupyter Notebook
Day 6 (Filter join & merging)/Day 6 Filtering & merging.ipynb
Hani1-2/Explorartory_Data_Analysis
fe97ccb7d33816ce680d9500f45c648b02cb5d4d
[ "MIT" ]
3
2021-09-26T17:06:28.000Z
2021-10-31T20:19:06.000Z
Day 6 (Filter join & merging)/Day 6 Filtering & merging.ipynb
Hani1-2/Explorartory_Data_Analysis
fe97ccb7d33816ce680d9500f45c648b02cb5d4d
[ "MIT" ]
5
2021-10-01T06:42:03.000Z
2021-10-05T17:36:26.000Z
Day 6 (Filter join & merging)/Day 6 Filtering & merging.ipynb
Hani1-2/Explorartory_Data_Analysis
fe97ccb7d33816ce680d9500f45c648b02cb5d4d
[ "MIT" ]
12
2021-09-26T17:01:04.000Z
2021-10-04T15:57:05.000Z
30.265264
216
0.364237
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "# movies dataset\nmovies = pd.read_pickle('./dataset/movies/movies.p')\nprint(movies.shape)\nmovies.head()", "(4803, 4)\n" ], [ "#taglines dataset\ntaglines = pd.read_pickle('./dataset/movies/taglines.p')\nprint(taglines.shape)\ntaglines.head()", "(3955, 2)\n" ] ], [ [ "## Filter joins\n- semi join\n- anti join", "_____no_output_____" ], [ "Mutation join vs filter join\n- mutation is commbining data from two tables based on matching obsevation in both tables\n- filtering observation from table is based on weather or not they match an observation in another table", "_____no_output_____" ], [ "### 1. semi joins\n- return the intersection, similar to an inner join\n- return only column from left table and **not** the rigth\n- No duplicated", "_____no_output_____" ], [ "<img src='./media/semi_join.png' width=700 height=800>", "_____no_output_____" ], [ "- step 1 --> simple inner join for semi join\n- step 2 --> making a filter of semi join\n- step 3 --> filtering data ", "_____no_output_____" ] ], [ [ "#step1 -->simple inner join for semi join\nmovies_tag = movies.merge(taglines, on='id')\nmovies_tag.head()", "_____no_output_____" ], [ "#step 2 --> making a filter of semi join\nmovies['id'].isin(movies_tag['id'])", "_____no_output_____" ], [ "# step 3 --> filtering data \ntagged_movies = movies[movies['id'].isin(movies_tag['id'])]\ntagged_movies.head()", "_____no_output_____" ], [ "#semi join in one\nmovies_tag = movies.merge(taglines, on='id')\ntagged_movies = movies[movies['id'].isin(movies_tag['id'])]\ntagged_movies.head()", "_____no_output_____" ] ], [ [ "### 2. anti join\n- opposite to semi join\n- return the left table, **excluding the intersaction**\n- return only column from the left **not** from the right", "_____no_output_____" ], [ "<img src='./media/anti join.png' width= 700 height=800>", "_____no_output_____" ], [ "- step 1 --> simple left join for anti join\n- step 2 --> making a filter of anti join", "_____no_output_____" ] ], [ [ "# step 1 --> simple left join for anti join\nmovies_tag = movies.merge(taglines, on='id', how='left', indicator=True)\nprint(movies_tag.shape)\nmovies_tag.head()", "(4803, 6)\n" ], [ "# step 2 --> making a filter for anti join\nid_list = movies_tag.loc[movies_tag['_merge']=='left_only', 'id']\npd.DataFrame(id_list).head()", "_____no_output_____" ], [ "# step 3 --> applying filter\nmovies_tag = movies.merge(taglines, on='id', how='left', indicator=True)\nid_list = movies_tag.loc[movies_tag['_merge']=='left_only', 'id']\nnon_tagged_movies = movies_tag[movies_tag['id'].isin(id_list)]\nnon_tagged_movies.head()", "_____no_output_____" ] ], [ [ "## Concatenate DataFrames together vertically\n- pandas **.concat()** can concatenate both vertically and horizentally\n- **axis=0** for vertical", "_____no_output_____" ], [ "<img src='./media/verticaal_concatenation.png' width= 400 height= 500>", "_____no_output_____" ] ], [ [ "jan_movies = movies.iloc[1:5]\njan_movies", "_____no_output_____" ], [ "feb_movies = movies.iloc[11:15]\nfeb_movies", "_____no_output_____" ], [ "march_movies = movies.iloc[21:25]\nmarch_movies", "_____no_output_____" ], [ "#basic concatenation\npd.concat([jan_movies,feb_movies,march_movies])", "_____no_output_____" ], [ "# Ignoring the index\npd.concat([jan_movies,feb_movies,march_movies], ignore_index=True)", "_____no_output_____" ], [ "# Setting labels to original tables\npd.concat([jan_movies,feb_movies,march_movies], ignore_index=False, keys=['jan', 'feb', 'mar'])", "_____no_output_____" ], [ "jan_tags = taglines.iloc[1:5]\njan_tags", "_____no_output_____" ], [ "# Concatenate tables with different column names\npd.concat([jan_movies,jan_tags], sort=True) #<-- sorting column name", "_____no_output_____" ], [ "pd.concat([jan_movies,jan_tags], sort=False) #<-- without sorting column names bydefault False", "_____no_output_____" ], [ "# Concatenate tables with different column names\npd.concat([jan_movies, jan_tags],join='inner')#<-- applying inner join on columns by default outer", "_____no_output_____" ] ], [ [ "### Using append method\n**.append()**\n- Simplified version of **.concat()**\n- suppor : **sort_index** and **sort**\n- Not support : **keys** and **join** i:e. always **join == outer**\n ", "_____no_output_____" ] ], [ [ "jan_movies.append([feb_movies,march_movies], ignore_index=True, sort=True)", "_____no_output_____" ] ], [ [ "## Verifying integrity", "_____no_output_____" ], [ "<img src= './media/verfying_integrity.png'>", "_____no_output_____" ], [ "## Validating merges\n**.merge(validate=None)**\n- check if merge is not specified type\n - 'one to one'\n - 'one to many'\n - 'many to one'\n - 'many to many'", "_____no_output_____" ] ], [ [ "# lets check it on movies and taglines\nprint(movies.merge(taglines , on='id', validate='one_to_one').shape)\nmovies.merge(taglines , on='id', validate='one_to_one').head()", "(3955, 5)\n" ] ], [ [ "if one possible we'll get below error", "_____no_output_____" ], [ "**Traceback (most recent call last):<br>\nMergeError: Merge keys are not unique in right dataset; not a one-to-one merge**", "_____no_output_____" ], [ "## Verifying concatenations\n**.concat(verify_integrity=False)** :\n- Check whether the new concatenated index contains duplicates\n- Default value is **False**", "_____no_output_____" ] ], [ [ "pd.concat([jan_movies,feb_movies], verify_integrity=False)", "_____no_output_____" ], [ "duplicate_jan_movies = movies.iloc[1:5]\nduplicate_feb_movies = movies.iloc[4:5]", "_____no_output_____" ], [ "pd.concat([duplicate_jan_movies,duplicate_feb_movies], verify_integrity=False)", "_____no_output_____" ], [ "#<-- Give Error because integrity is true to chk duplicated\npd.concat([duplicate_jan_movies,duplicate_feb_movies], verify_integrity=True) ", "_____no_output_____" ] ], [ [ "# Practice", "_____no_output_____" ], [ "### Task1", "_____no_output_____" ], [ "#### Required datasets", "_____no_output_____" ] ], [ [ "employees = pd.read_csv('./employees.csv')\nemployees.head()", "_____no_output_____" ], [ "top_cust = pd.read_csv('./top_cust.csv')\ntop_cust.head()", "_____no_output_____" ] ], [ [ "#### requirements\n- Merge employees and top_cust with a left join, setting indicator argument to True. Save the result to empl_cust.\n- Select the srid column of empl_cust and the rows where _merge is 'left_only'. Save the result to srid_list.\n- Subset the employees table and select those rows where the srid is in the variable srid_list and print the results.", "_____no_output_____" ] ], [ [ "# Merge employees and top_cust\nempl_cust = employees.merge(top_cust, on='srid', \n how='left', indicator=True)\n\n# Select the srid column where _merge is left_only\nsrid_list = empl_cust.loc[empl_cust['_merge'] == 'left_only', 'srid']\n\n# Get employees not working with top customers\nemployees[employees['srid'].isin(srid_list)]", "_____no_output_____" ] ], [ [ "### Task2", "_____no_output_____" ], [ "#### The required datasets", "_____no_output_____" ] ], [ [ "non_mus_tcks = pd.read_csv('./non_musk_tcks.csv')\nnon_mus_tcks.head()", "_____no_output_____" ], [ "top_invoices = pd.read_csv('./top_invoices.csv')\ntop_invoices.head()", "_____no_output_____" ], [ "genres = pd.read_csv('./genres.csv')\ngenres.head()", "_____no_output_____" ] ], [ [ "#### The required tasks\n- Merge non_mus_tcks and top_invoices on tid using an inner join. Save the result as tracks_invoices.\n- Use .isin() to subset the rows of non_mus_tck where tid is in the tid column of tracks_invoices. Save the result as top_tracks.\n- Group top_tracks by gid and count the tid rows. Save the result to cnt_by_gid.\n- Merge cnt_by_gid with the genres table on gid and print the result.", "_____no_output_____" ] ], [ [ "non_mus_tcks.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 60 entries, 0 to 59\nData columns (total 6 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 tid 60 non-null object \n 1 name 60 non-null object \n 2 aid 60 non-null int64 \n 3 mtid 60 non-null int64 \n 4 gid 60 non-null float64\n 5 u_price 59 non-null float64\ndtypes: float64(2), int64(2), object(2)\nmemory usage: 2.9+ KB\n" ], [ "top_invoices.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 16 entries, 0 to 15\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ilid 16 non-null int64 \n 1 iid 16 non-null int64 \n 2 tid 16 non-null int64 \n 3 uprice 16 non-null float64\n 4 quantity 16 non-null int64 \ndtypes: float64(1), int64(4)\nmemory usage: 768.0 bytes\n" ], [ "def numbers(x):\n try:\n x = str(x)\n return \"\".join([i for i in x if str.isnumeric(i)])\n except:\n return 0\nnon_mus_tcks.tid.apply(numbers).head()", "_____no_output_____" ], [ "import numpy as np\nnon_mus_tcks['tid'] = non_mus_tcks['tid'].apply(numbers)\nnon_mus_tcks['tid'] = non_mus_tcks['tid'].apply(np.int64)", "_____no_output_____" ], [ "# Merge the non_mus_tck and top_invoices tables on tid\ntracks_invoices = non_mus_tcks.merge(top_invoices, on='tid')\n\n# Use .isin() to subset non_mus_tcsk to rows with tid in tracks_invoices\ntop_tracks = non_mus_tcks[non_mus_tcks['tid'].isin(tracks_invoices['tid'])]\n\n# Group the top_tracks by gid and count the tid rows\ncnt_by_gid = top_tracks.groupby(['gid'], as_index=False).agg({'tid':'count'})\n\n# Merge the genres table to cnt_by_gid on gid and print\ncnt_by_gid.merge(genres, on='gid')", "C:\\Users\\qasim\\anaconda3\\lib\\site-packages\\pandas\\core\\reshape\\merge.py:1113: UserWarning: You are merging on int and float columns where the float values are not equal to their int representation\n warnings.warn(\n" ] ], [ [ "### Task3", "_____no_output_____" ], [ "#### required datasets", "_____no_output_____" ] ], [ [ "tracks_master = pd.read_csv('./tracks_master.csv')\ntracks_master.head()", "_____no_output_____" ], [ "tracks_ride = pd.read_csv('./tracks_ride.csv')\ntracks_ride.head()", "_____no_output_____" ], [ "tracks_st = pd.read_csv('./tracks_st.csv')\ntracks_st.head()", "_____no_output_____" ] ], [ [ "#### required tasks\n- Concatenate tracks_master, tracks_ride, and tracks_st, in that order, setting sort to True.\n- Concatenate tracks_master, tracks_ride, and tracks_st, where the index goes from 0 to n-1.\n- Concatenate tracks_master, tracks_ride, and tracks_st, showing only columns that are in all tables.", "_____no_output_____" ] ], [ [ "# Concatenate the tracks\ntracks_from_albums = pd.concat([tracks_master,tracks_ride,tracks_st],\n sort=True)\ntracks_from_albums.head()", "_____no_output_____" ], [ "# Concatenate the tracks so the index goes from 0 to n-1\ntracks_from_albums = pd.concat([tracks_master, tracks_ride, tracks_st],\n ignore_index = True,\n sort=True)\ntracks_from_albums.head()", "_____no_output_____" ], [ "# Concatenate the tracks, show only columns names that are in all tables\ntracks_from_albums = pd.concat([tracks_master, tracks_ride, tracks_st],join= 'inner', sort=True)\ntracks_from_albums.head()", "_____no_output_____" ] ], [ [ "### Task4", "_____no_output_____" ], [ "#### required datasets", "_____no_output_____" ] ], [ [ "inv_jul = pd.read_csv('./inv_jul.csv')\ninv_jul.head()", "_____no_output_____" ], [ "inv_aug = pd.read_csv('./inv_aug.csv')\ninv_aug.head()", "_____no_output_____" ], [ "inv_sep = pd.read_csv('./inv_sep.csv')\ninv_sep.head()", "_____no_output_____" ] ], [ [ "- Concatenate the three tables together vertically in order with the oldest month first, adding '7Jul', '8Aug', and '9Sep' as keys for their respective months, and save to variable avg_inv_by_month.\n- Use the .agg() method to find the average of the total column from the grouped invoices.\n- Create a bar chart of avg_inv_by_month.", "_____no_output_____" ] ], [ [ "# Concatenate the tables and add keys\ninv_jul_thr_sep = pd.concat([inv_jul, inv_aug, inv_sep], \n keys=['7Jul', '8Aug', '9Sep'])\ninv_jul_thr_sep", "_____no_output_____" ], [ "# inv_jul_thr_sep['total']=inv_jul_thr_sep['total'].astype(float)", "_____no_output_____" ], [ "inv_jul_thr_sep['total'] = inv_jul_thr_sep['total'].apply(numbers)\ninv_jul_thr_sep['total'] = inv_jul_thr_sep['total'].apply(np.int64)", "_____no_output_____" ], [ "# Group the invoices by the index keys and find avg of the total column\navg_inv_by_month = inv_jul_thr_sep.groupby(level=0).agg({'total':'mean'})\n\n# Bar plot of avg_inv_by_month\navg_inv_by_month.plot(kind='bar')\nplt.show()", "_____no_output_____" ] ], [ [ "### Task5", "_____no_output_____" ], [ "#### Required tables", "_____no_output_____" ] ], [ [ "artists = pd.read_csv('./artist.csv')\nartists.head()", "_____no_output_____" ], [ "albums = pd.read_csv('./album.csv')\nalbums.head()", "_____no_output_____" ] ], [ [ "- You have been given 2 tables, artists, and albums. Use the console to merge them using artists.merge(albums, on='artid').head(). Adjust the validate argument to answer which statement is False.\n\n1- You can use 'many_to_many' without an error, since there is a duplicate key in one of the tables.\n\n2- You can use 'one_to_many' without error, since there is a duplicate key in the right table.\n\n3- You can use 'many_to_one' without an error, since there is a duplicate key in the left table.", "_____no_output_____" ] ], [ [ "# artists.merge(albums, on='artid').head()", "_____no_output_____" ], [ "# artists.merge(albums, on='artid', validate = 'one_to_many').head()", "_____no_output_____" ] ], [ [ "### Task6", "_____no_output_____" ], [ "#### required file", "_____no_output_____" ] ], [ [ "classic_18 = pd.read_csv('./classic_18.csv')\nclassic_18.head()", "_____no_output_____" ], [ "classic_19 = pd.read_csv('./classic_19.csv')\nclassic_19.head()", "_____no_output_____" ], [ "pop_18 = pd.read_csv('./pop_18.csv')\npop_18.head()", "_____no_output_____" ], [ "pop_19 = pd.read_csv('./pop_19.csv')\npop_19.head()", "_____no_output_____" ] ], [ [ "- Concatenate the classic_18 and classic_19 tables vertically where the index goes from 0 to n-1, and save to classic_18_19.\n- Concatenate the pop_18 and pop_19 tables vertically where the index goes from 0 to n-1, and save to pop_18_19.\n- With classic_18_19 on the left, merge it with pop_18_19 on tid using an inner join.\n- Use .isin() to filter classic_18_19 where tid is in classic_pop.", "_____no_output_____" ] ], [ [ "# Concatenate the classic tables vertically\nclassic_18_19 = pd.concat([classic_18, classic_19], ignore_index=True)\n\n# Concatenate the pop tables vertically\npop_18_19 = pd.concat([pop_18, pop_19], ignore_index=True)\n\n# Merge classic_18_19 with pop_18_19\nclassic_pop = classic_18_19.merge(pop_18_19, on='tid')\n\n# Using .isin(), filter classic_18_19 rows where tid is in classic_pop\npopular_classic = classic_18_19[classic_18_19['tid'].isin(classic_pop['tid'])]\n\n# Print popular chart\nprint(popular_classic)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
4a3fa83e988e55b1640dd58468ac3c24d74a0ddf
12,844
ipynb
Jupyter Notebook
chapters/04_machine-learning/01_PCA-demo.ipynb
lperezmo/pythonic-science
9fa448d0c981c92cae2be2bc50cf1dbe92e063df
[ "Unlicense" ]
5
2017-04-03T20:30:54.000Z
2019-04-01T16:46:27.000Z
chapters/04_machine-learning/01_PCA-demo.ipynb
lperezmo/pythonic-science
9fa448d0c981c92cae2be2bc50cf1dbe92e063df
[ "Unlicense" ]
null
null
null
chapters/04_machine-learning/01_PCA-demo.ipynb
lperezmo/pythonic-science
9fa448d0c981c92cae2be2bc50cf1dbe92e063df
[ "Unlicense" ]
11
2016-12-15T21:30:59.000Z
2020-04-02T01:07:47.000Z
21.300166
151
0.506774
[ [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA", "_____no_output_____" ] ], [ [ "### Generate a dataset", "_____no_output_____" ] ], [ [ "xy = np.random.multivariate_normal([0,0], [[10,7],[7,10]],1000)\nplt.plot(xy[:,0],xy[:,1],\"o\")\nplt.show()", "_____no_output_____" ] ], [ [ "### Create a Principle Component Analysis (PCA) object\n\nWhat is `n_components`?", "_____no_output_____" ] ], [ [ "pca = PCA(n_components=2)", "_____no_output_____" ] ], [ [ "`num_components` is the number of axes on which you spread the data out. You can only have as many components as you have axes (2 in this case).", "_____no_output_____" ], [ "### Fit the axes\n\nWhat does the following code do?", "_____no_output_____" ] ], [ [ "xy_pca = pca.fit(xy)", "_____no_output_____" ] ], [ [ "Does the PCA, finding the primary axes of variation. ", "_____no_output_____" ] ], [ [ "plt.plot(xy[:,0],xy[:,1],\"o\")\n\nscalar = xy_pca.explained_variance_[0]\nplt.plot([0,xy_pca.components_[0,0]*scalar/2],[0,xy_pca.components_[0,1]*scalar/2],color=\"red\")\nplt.plot([0,-xy_pca.components_[0,0]*scalar/2],[0,-xy_pca.components_[0,1]*scalar/2],color=\"red\")\n\nscalar = xy_pca.explained_variance_[1]\nplt.plot([0,xy_pca.components_[1,0]*scalar/2],[0,xy_pca.components_[1,1]*scalar/2],color=\"yellow\")\nplt.plot([0,-xy_pca.components_[1,0]*scalar/2],[0,-xy_pca.components_[1,1]*scalar/2],color=\"yellow\")", "_____no_output_____" ] ], [ [ "### What does the following do?", "_____no_output_____" ] ], [ [ "xy_trans = xy_pca.transform(xy)", "_____no_output_____" ] ], [ [ "Transforms `x` and `y` onto the PCA axes.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1,2,figsize=(10,5))\nax[0].plot(xy[:,0],xy[:,1],\"o\")\nax[0].set_xlabel(\"x\")\nax[0].set_ylabel(\"y\")\nax[0].set_xlim((-15,15)); ax[0].set_ylim((-15,15))\nax[1].plot(xy_trans[:,0],xy_trans[:,1],\"o\")\nax[1].set_xlabel(\"PCA1\")\nax[1].set_ylabel(\"PCA2\")\nax[1].set_xlim((-15,15)); ax[1].set_ylim((-15,15))\nplt.show()", "_____no_output_____" ] ], [ [ "### What does the following do?", "_____no_output_____" ] ], [ [ "print(\"Variation explained:\")\nprint(\"First component: {:.3f}\".format(xy_pca.explained_variance_ratio_[0]))\nprint(\"Second component: {:.3f}\".format(xy_pca.explained_variance_ratio_[1]))", "_____no_output_____" ] ], [ [ "Describes how much variation each PCA axis captures. \n\nInformally: if you only included the first component in a predictive model, the $R^{2}$ between you prediction and reality would be 0.85.", "_____no_output_____" ], [ "### Some helper code, which takes an xy_pair and does all of the steps above. ", "_____no_output_____" ] ], [ [ "def pca_wrapper(xy_pairs):\n \"\"\"\n Take an array of x/y data and perform a principle component analysis.\n \"\"\"\n \n fig, ax = plt.subplots(1,2,figsize=(10,5))\n \n ax[0].plot(xy_pairs[:,0],xy_pairs[:,1],\"o\")\n ax[0].set_xlim((-18,18))\n ax[0].set_ylim((-18,18))\n ax[0].set_title(\"raw x,y data\")\n ax[0].set_xlabel(\"x\")\n ax[0].set_ylabel(\"y\")\n\n # Perform the PCA fit\n pca = PCA(n_components=2)\n z = pca.fit(xy_pairs)\n \n # Transforom the data onto the new PCA axes\n new_xy_pairs = z.transform(xy_pairs)\n \n # Plot the PCA data\n ax[1].plot(new_xy_pairs[:,0],new_xy_pairs[:,1],\"o\")\n ax[1].set_title(\"PCA transformed data\")\n ax[1].set_xlim((-18,18))\n ax[1].set_ylim((-18,18))\n ax[1].set_xlabel(\"PCA1\")\n ax[1].set_ylabel(\"PCA2\")\n\n print(\"Variation explained:\")\n print(\"First component: {:.3f}\".format(pca.explained_variance_ratio_[0]))\n print(\"Second component: {:.3f}\".format(pca.explained_variance_ratio_[1]))", "_____no_output_____" ] ], [ [ "### How does fraction variation relate to skew in the data?", "_____no_output_____" ] ], [ [ "d1 = np.random.multivariate_normal([0,0], [[10,1],[1,10]],1000) \npca_wrapper(d1)", "_____no_output_____" ], [ "d2 = np.random.multivariate_normal([0,0], [[10,5],[5,10]],1000)\npca_wrapper(d2)", "_____no_output_____" ], [ "d3 = np.random.multivariate_normal([0,0], [[10,9],[9,10]],1000)\npca_wrapper(d3)", "_____no_output_____" ] ], [ [ "The stronger the covariation between parameters, the more readily the PCA can reduce dimensionality.", "_____no_output_____" ], [ "### Using PCA to try to classify things", "_____no_output_____" ], [ "### The \"Iris\" dataset\n<img style=\"margin:auto\" align=\"center\" src=\"https://www.math.umd.edu/~petersd/666/html/iris_with_labels.jpg\" />\n\n+ Three species of iris\n+ Four properties measured for many representatives from each species\n+ Properties are: sepal length, sepal width, petal length, petal width", "_____no_output_____" ], [ "### Load in the data", "_____no_output_____" ] ], [ [ "iris = datasets.load_iris()\nobs = iris.data\nspecies = iris.target\n\nmean = obs.mean(axis=0)\nstd = obs.std(axis=0)\nobs = (obs - mean)/std", "_____no_output_____" ] ], [ [ "The mean, standard deviation business normalizes the data so the values are all on the same scale. ", "_____no_output_____" ] ], [ [ "def plot_slice(obs_r,axis_i,axis_j):\n \"\"\"\n Define a helper function.\n \"\"\"\n \n plt.plot(obs_r[species == 0,axis_i],obs_r[species == 0,axis_j],\"o\",color='navy')\n plt.plot(obs_r[species == 1,axis_i],obs_r[species == 1,axis_j],\"o\",color='turquoise')\n plt.plot(obs_r[species == 2,axis_i],obs_r[species == 2,axis_j],\"o\",color='darkorange')\n plt.xlabel(axis_i)\n plt.ylabel(axis_j)\n\n plt.show()", "_____no_output_____" ] ], [ [ "### Species separate on some axes, but not all axes", "_____no_output_____" ] ], [ [ "plot_slice(obs,axis_i=0,axis_j=1)", "_____no_output_____" ] ], [ [ "### Do PCA", "_____no_output_____" ] ], [ [ "pca = PCA(n_components=4)\nobs_pca = pca.fit(obs)\nobs_trans = obs_pca.transform(obs)", "_____no_output_____" ] ], [ [ "### What is different about PCA axes?", "_____no_output_____" ] ], [ [ "plot_slice(obs_trans,axis_i=0,axis_j=1)", "_____no_output_____" ] ], [ [ "All of that separating power is jammed into the first axis. ", "_____no_output_____" ], [ "### Quantify this with explained varience ratio:", "_____no_output_____" ] ], [ [ "for r in obs_pca.explained_variance_ratio_:\n print(\"{:.3f}\".format(r))\n\n", "_____no_output_____" ] ], [ [ "### Summary\n+ PCA is a way to spread data out on \"natural\" axes\n+ Clusters in PCA space can be used to classify things\n+ Axes may be hard to interpret directly", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
4a3fcc16df4d77d062fa1ab4c3855c9fc989a59c
539,093
ipynb
Jupyter Notebook
notebooks/bias.ipynb
vafaei-ar/ccgpack
73cf1d644f747fd2251ed9256cae740e6b052da7
[ "BSD-3-Clause" ]
3
2019-04-15T08:40:32.000Z
2019-04-18T22:06:29.000Z
notebooks/bias.ipynb
vafaei-ar/ccgpack
73cf1d644f747fd2251ed9256cae740e6b052da7
[ "BSD-3-Clause" ]
null
null
null
notebooks/bias.ipynb
vafaei-ar/ccgpack
73cf1d644f747fd2251ed9256cae740e6b052da7
[ "BSD-3-Clause" ]
2
2019-04-15T08:41:35.000Z
2021-10-02T08:24:22.000Z
907.563973
293,992
0.954483
[ [ [ "%matplotlib inline\n\nimport numpy as np\nimport pylab as plt\nimport ccgpack as ccg\nfrom itertools import product\nfrom matplotlib.colors import LogNorm", "_____no_output_____" ], [ "cl = np.load('../data/cl_planck_lensed.npy')\nsfs = ccg.StochasticFieldSimulator(cl)\nnside = 1024\nsize = 30\nms = []", "_____no_output_____" ], [ "for i in range(4):\n ms.append(sfs.simulate(nside,size))", "_____no_output_____" ], [ "fig,((ax1,ax2),(ax3,ax4)) = plt.subplots(ncols=2\n ,nrows=2,figsize=(6 ,6))\nax1.imshow(ms[0])\nax2.imshow(ms[1])\nax3.imshow(ms[2])\nax4.imshow(ms[3])", "_____no_output_____" ], [ "# ll0 = cl[:600,0]\n# dl0 = cl[:600,1]*(ll0[:600]*(ll0[:600]+1)/(2*np.pi))\n# ll,p1 = ccg.power_spectrum(ms[0],size=15)\n# plt.plot(ll0,dl0,'k--')\n# plt.plot(ll[:600],p1[:600],'b')\n# plt.xscale('log')\n# plt.yscale('log')\n# plt.xlim(2,600)\n# # plt.ylim(5e-8,5e4)\n", "_____no_output_____" ], [ "cor,ecor = ccg.correlarion_fucntion(ms[0],n_p=1e6)\nplt.plot(cor)", "_____no_output_____" ], [ "ksi = ccg.ppcf(ms[0],2,1e6,700)\nplt.plot(ksi)", "_____no_output_____" ], [ "def N1(d,num=100,gt=True):\n nu = np.linspace(d.min(),d.max(),num)\n n1 = []\n for i in nu:\n if gt:\n n1.append(np.mean(d>i))\n else:\n n1.append(np.mean(d<i))\n n1 = np.array(n1)\n return nu,n1\n\ndef exterma(arr,peak=True):\n \n dim = len(arr.shape) # number of dimensions\n offsets = [0, -1, 1] # offsets, 0 first so the original entry is first \n filt = np.ones(arr.shape,dtype=np.int8)\n for shift in product(offsets, repeat=dim):\n if np.all(np.array(shift)==0):\n continue\n # print(shift)\n # print(np.roll(b, shift, np.arange(dim)))\n rolled = np.roll(arr, shift, np.arange(dim))\n \n if peak:\n filt = filt*(arr>rolled)\n else:\n filt = filt*(arr<rolled)\n \n return filt ", "_____no_output_____" ], [ "ms[0] = ms[0]-ms[0].mean()\nms[0] = ms[0]/ms[0].std()\nnu,n1_gt = N1(ms[0],num=100,gt=True)\nplt.plot(nu,n1_gt)\n\nms[0] = ms[0]-ms[0].mean()\nms[0] = ms[0]/ms[0].std()\nnu,n1_lt = N1(ms[0],num=100,gt=False)\nplt.plot(nu,n1_lt)", "_____no_output_____" ], [ "plt.plot(nu[:-1],np.diff(n1_gt))\nplt.plot(nu[:-1],np.diff(n1_lt))", "_____no_output_____" ], [ "th = 0\n\nmcopy = ms[0]+0\npeaks = exterma(mcopy ,peak=True)\nmcopy[np.logical_not(peaks.astype(bool))] = 0\n\nmcopy[mcopy<th] = 0\n\nnf1 = np.argwhere(mcopy).T\nnnn = 5*nf1.shape[1]\nrlist = np.random.randint(0,1024,(2,nnn))\n\nksi1 = ccg.ffcf_no_random(fl1=nf1, fl2=nf1, rlist=rlist, rmax=700)\n# plt.plot(ksi1)\n\nfig,(ax1,ax2) = plt.subplots(1,2,figsize=(16,8))\nax1.imshow(mcopy,cmap='gray')\nrimg = np.zeros(mcopy.shape)\nrows, cols = zip(*rlist.T)\nrimg[rows, cols] = 1\nax2.imshow(rimg,cmap='gray')\n\nmask = np.zeros(ms[0].shape)+1\nmask[700:1000,100:400] = 0\nmask[100:300,700:900] = 0\nmask[100:300,200:400] = 0\nmask[700:800,700:890] = 0\n\nmcopy = ms[0]*mask+0\npeaks = exterma(mcopy ,peak=True)\nmcopy[np.logical_not(peaks.astype(bool))] = 0\n\nmcopy[mcopy<th] = 0\n\nnf1 = np.argwhere(mcopy).T\n\nnnn = 5*nf1.shape[1]\n\nrlist = np.random.randint(0,1024,(nnn,2))\nrimg = np.zeros(mcopy.shape)\nrows, cols = zip(*rlist)\nrimg[rows, cols] = 1\nrimg = rimg*mask\nrlist = np.argwhere(rimg).T\n\nksi2 = ccg.ffcf_no_random(fl1=nf1, fl2=nf1, rlist=rlist, rmax=700)\n\nfig,(ax1,ax2) = plt.subplots(1,2,figsize=(16,8))\nax1.imshow(mcopy,cmap='gray')\nax2.imshow(rimg,cmap='gray')\n\nrlist = np.random.randint(0,1024,(2,nnn))\nksi3 = ccg.ffcf_no_random(fl1=nf1, fl2=nf1, rlist=rlist, rmax=700)\n\n\n\nksi4 = eval_ksi(ms[0],mask,thresholds=[0],peak=True)[0] \n \n\n\n# plt.plot(ksi)", "_____no_output_____" ], [ "plt.plot(ksi1,'r',label='normal')\nplt.plot(ksi2,'b',label='both_masked')\n\nplt.plot(ksi4,'k',label='func')\n\nplt.plot(ksi3,'g',label='peak_masked')\n\n\n\nplt.legend()\nplt.savefig('tpcf.jpg',dpi=150)", "_____no_output_____" ], [ "# thresholds = [-2,-1,0,1,2]\n# ksis = eval_ksi(ms[0],mask,thresholds,peak=True,rmax=700,crand=5)\n\nfor i in range(len(ksis)):\n plt.plot(ksis[i],label=str(thresholds[i]))\n \nplt.xlim(-1,100)\nplt.legend()", "_____no_output_____" ], [ "thresholds = [-2,-1,0,1,2]\nksis = eval_ksi(ms[0],mask,thresholds,peak=False,rmax=700,crand=5)\n\nfor i in range(len(ksis)):\n plt.plot(ksis[i],label=str(thresholds[i]))\n \nplt.xlim(-1,100)\nplt.legend()", "_____no_output_____" ], [ "\ndef eval_ksi(m,mask,thresholds,peak=True,rmax=700,crand=5):\n\n ksis = []\n \n mc1 = m*mask\n nside = mc1.shape[0]\n peaks = exterma(mc1 ,peak=peak)\n mc1[np.logical_not(peaks.astype(bool))] = 0\n\n for th in thresholds:\n mc2 = mc1+0\n if peak:\n mc2[mc2<th] = 0\n else:\n mc2[mc2>th] = 0\n\n nf1 = np.argwhere(mc2).T\n nnn = crand*nf1.shape[1]\n rlist = np.random.randint(0,nside,(nnn,2))\n rimg = np.zeros(mc2.shape)\n rows, cols = zip(*rlist)\n rimg[rows, cols] = 1\n rimg = rimg*mask\n rlist = np.argwhere(rimg).T\n\n ksis.append(ccg.ffcf_no_random(fl1=nf1, fl2=nf1, rlist=rlist, rmax=rmax))\n \n return ksis\n \n \n", "_____no_output_____" ], [ "# def bias(m,ths,kmin,kmax):\n \n# if not isinstance(ths, list):\n# ths = [ths]\n \n# bs = []\n# for th in ths:\n# ksi = ccg.ppcf(m,th,1e6,700)\n# biask = np.sqrt(np.absolute(ksi[:700]/cor[:700]))\n\n# bs.append(np.mean(biask[kmin:kmax]))\n \n# return bs", "_____no_output_____" ], [ "# ths = [0.5,1.0,1.5,2.0,2.5]\n\n# kmin = 10\n# kmax = 50\n\n# bs = bias(ms[0],ths,kmin,kmax)\n\n# bsth = np.array(ths)\n# plt.plot(ths,bsth,'r--')\n\n# plt.plot(ths,bs,'bo')\n\n# plt.xlabel(r'$\\nu$',fontsize=15)\n# plt.ylabel(r'$b(\\nu)$',fontsize=15)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a3fe471f838da9a29bd39ed32b453d5514b149b
292,120
ipynb
Jupyter Notebook
examples/pennylane/3_Quantum_chemistry_with_VQE.ipynb
waihoh/amazon-braket-examples
233af4378b34e786603cae2c8e1ee6f33b41e9a3
[ "Apache-2.0" ]
null
null
null
examples/pennylane/3_Quantum_chemistry_with_VQE.ipynb
waihoh/amazon-braket-examples
233af4378b34e786603cae2c8e1ee6f33b41e9a3
[ "Apache-2.0" ]
null
null
null
examples/pennylane/3_Quantum_chemistry_with_VQE.ipynb
waihoh/amazon-braket-examples
233af4378b34e786603cae2c8e1ee6f33b41e9a3
[ "Apache-2.0" ]
null
null
null
530.163339
253,584
0.944903
[ [ [ "# Quantum chemistry with VQE", "_____no_output_____" ], [ "This tutorial will show you how to solve an important problem for quantum chemistry using PennyLane on Amazon Braket: finding the ground-state energy of a molecule. The problem can be tackled using near-term quantum hardware by implementing the variational quantum eigensolver (VQE) algorithm. You can find further details on quantum chemistry and VQE in both the [Braket VQE](../hybrid_quantum_algorithms/VQE_Chemistry/VQE_chemistry_braket.ipynb) notebook and PennyLane [tutorials](https://pennylane.ai/qml/demos_basics.html).", "_____no_output_____" ], [ "## From quantum chemistry to quantum circuits", "_____no_output_____" ], [ "Our first step is to convert our chemistry problem into something that can be tackled with a quantum computer. To do this, we will use PennyLane's ``qchem`` package. If running on a local machine, the ``qchem`` package must be installed separately by following [these](https://pennylane.readthedocs.io/en/stable/introduction/chemistry.html) instructions.", "_____no_output_____" ] ], [ [ "import pennylane as qml\nfrom pennylane import qchem\nfrom pennylane import numpy as np", "_____no_output_____" ] ], [ [ "The input chemistry data is often provided in the form of a geometry file containing details about the molecule. Here, we consider the hydrogen molecule $\\mathrm{H}_2$ whose atomic structure is stored in the [h2.xyz](./qchem/h2.xyz) file. The qubit Hamiltonian for $\\mathrm{H}_2$ is built using the ``qchem`` package.", "_____no_output_____" ] ], [ [ "h, qubits = qchem.molecular_hamiltonian(name=\"h2\", geo_file=\"qchem/h2.xyz\")\nprint(h)", "(-0.042078976477823424) [I0]\n+ (0.17771287465139962) [Z0]\n+ (0.17771287465139957) [Z1]\n+ (-0.24274280513140412) [Z2]\n+ (-0.24274280513140417) [Z3]\n+ (0.17059738328801044) [Z0 Z1]\n+ (0.04475014401535161) [Y0 X1 X2 Y3]\n+ (-0.04475014401535161) [Y0 Y1 X2 X3]\n+ (-0.04475014401535161) [X0 X1 Y2 Y3]\n+ (0.04475014401535161) [X0 Y1 Y2 X3]\n+ (0.1229330505618379) [Z0 Z2]\n+ (0.1676831945771895) [Z0 Z3]\n+ (0.1676831945771895) [Z1 Z2]\n+ (0.1229330505618379) [Z1 Z3]\n+ (0.1762764080431958) [Z2 Z3]\n" ] ], [ [ "In the VQE algorithm, we compute the energy of the $\\mathrm{H}_2$ molecule by measuring the expectation value of the above Hamiltonian on a variational quantum circuit. Our objective is to train the parameters of the circuit so that the expectation value of the Hamiltonian is minimized, thereby finding the ground state energy of the molecule.\n\nIn this tutorial, we also want to compute the total spin. To that aim, we use the ``qchem`` package to build the total-spin operator $S^2$:", "_____no_output_____" ] ], [ [ "electrons = 2 # Molecular hydrogen has two electrons\n\nS2 = qchem.spin2(electrons, qubits)\nprint(S2)", "(0.75) [I0]\n+ (0.375) [Z1]\n+ (-0.375) [Z0 Z1]\n+ (0.125) [Z0 Z2]\n+ (0.375) [Z0]\n+ (-0.125) [Z0 Z3]\n+ (-0.125) [Z1 Z2]\n+ (0.125) [Z1 Z3]\n+ (0.375) [Z2]\n+ (0.375) [Z3]\n+ (-0.375) [Z2 Z3]\n+ (0.125) [Y0 X1 Y2 X3]\n+ (0.125) [Y0 Y1 X2 X3]\n+ (0.125) [Y0 Y1 Y2 Y3]\n+ (-0.125) [Y0 X1 X2 Y3]\n+ (-0.125) [X0 Y1 Y2 X3]\n+ (0.125) [X0 X1 X2 X3]\n+ (0.125) [X0 X1 Y2 Y3]\n+ (0.125) [X0 Y1 X2 Y3]\n" ] ], [ [ "## Grouping observables to reduce circuit executions", "_____no_output_____" ], [ "Suppose we want to measure the expectation value of the electronic Hamiltonian ``h``. This Hamiltonian is composed of 15 individual observables that are tensor products of Pauli operators:", "_____no_output_____" ] ], [ [ "print(\"Number of Pauli terms in h:\", len(h.ops))", "Number of Pauli terms in h: 15\n" ] ], [ [ "A straightforward approach to measuring the expectation value would be to implement the circuit 15 times, and each time measuring one of the Pauli terms that form part of the Hamiltonian ``h``. However, we could be more efficient. The Pauli terms can be separated into groups (see PennyLane's [grouping](https://pennylane.readthedocs.io/en/stable/code/qml_grouping.html) module) that can be measured concurrently on a single circuit. Elements of each group are known as qubit-wise commuting observables. The Hamiltonian ``h`` can be split into 5 groups:", "_____no_output_____" ] ], [ [ "groups, coeffs = qml.grouping.group_observables(h.ops, h.coeffs)\nprint(\"Number of qubit-wise commuting groups:\", len(groups))", "Number of qubit-wise commuting groups: 5\n" ] ], [ [ "Practically, this means that instead of executing 15 separate circuits, we just need to execute 5. This saving can become even more pronounced as the number of Pauli terms in the Hamiltonian increases. For example, switching to a larger molecule or a different chemical basis set can increase both the number of qubits and the number of terms.", "_____no_output_____" ], [ "Fortunately, the PennyLane/Braket pipeline has builtin support for pre-grouping the observables in a Hamiltonian to minimize the number of device executions, saving both runtime and simulation fees when using remote devices. Optimized observable grouping will be used in the rest of this tutorial.", "_____no_output_____" ], [ "![grouping.png](attachment:grouping.png)", "_____no_output_____" ], [ "## Defining an ansatz circuit", "_____no_output_____" ], [ "We now set up the ansatz circuit that will be trained to prepare the ground state of the Hamiltonian. Our first step is to load the local Braket device:", "_____no_output_____" ] ], [ [ "dev = qml.device(\"braket.local.qubit\", wires=qubits)", "_____no_output_____" ] ], [ [ "This tutorial uses a chemically-inspired circuit called the Unitary Coupled-Cluster Singles and Doubles ([UCCSD](https://pennylane.readthedocs.io/en/stable/code/api/pennylane.templates.subroutines.UCCSD.html)) ansatz. To use this, we must define some additional inputs from quantum chemistry.", "_____no_output_____" ] ], [ [ "ref_state = qchem.hf_state(electrons, qubits) # Hartree-Fock state\nexcitations = qchem.excitations(electrons, qubits) # generate single- and double-excitations\ns_wires, d_wires = qchem.excitations_to_wires(*excitations) # map excitations to the wires that the UCCSD circuit will act on", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\">\n<b>Note</b> A variety of alternative ansätze and templates are <a href=\"https://pennylane.readthedocs.io/en/latest/code/qml_templates.html#module-pennylane.templates.layers\">available</a> and different choices will result in varying circuit depth and number of trainable parameters.\n</div>", "_____no_output_____" ], [ "Our ansatz circuit is then simple to define:", "_____no_output_____" ] ], [ [ "def circuit(params, wires):\n qml.templates.UCCSD(params, init_state=ref_state, s_wires=s_wires, d_wires=d_wires, wires=wires)", "_____no_output_____" ] ], [ [ "Note that an output measurement has not yet been defined! This is the next step.", "_____no_output_____" ], [ "## Measuring the energy and total spin", "_____no_output_____" ], [ "We discussed earlier that we want to minimize the expectation value of the qubit Hamiltonian, corresponding to the energy of $\\mathrm{H}_2$. The expectation values of this Hamiltonian and the total spin $\\hat{S}^2$ operator can be defined using:", "_____no_output_____" ] ], [ [ "energy_expval = qml.ExpvalCost(circuit, h, dev, optimize=True)\nS2_expval = qml.ExpvalCost(circuit, S2, dev, optimize=True)", "_____no_output_____" ] ], [ [ "Notice the ``optimize=True`` option. This instructs PennyLane and Braket to break up each Hamiltonian into qubit-wise commuting groups for increased device-execution efficiency.\n\nLet's now initialize some random values and evaluate the energy and spin. The total spin $S$ of the prepared state can be obtained from the expectation value $\\langle \\hat{S}^2 \\rangle$ using $S=-\\frac{1}{2} + \\sqrt{\\frac{1}{4} + \\langle \\hat{S}^2 \\rangle}$. We can define a function to compute $S$:", "_____no_output_____" ] ], [ [ "def spin(params):\n return -0.5 + np.sqrt(1 / 4 + S2_expval(params))", "_____no_output_____" ], [ "np.random.seed(1967)\nparams = np.random.normal(0, np.pi, len(s_wires) + len(d_wires))", "_____no_output_____" ] ], [ [ "The energy and total spin are then", "_____no_output_____" ] ], [ [ "print(\"Energy:\", energy_expval(params))\nprint(\"Spin: \", spin(params))", "Energy: -0.5728383913503483\nSpin: 0.8358744751644029\n" ] ], [ [ "Since we have picked random parameters, the measured energy does not correspond to the ground state energy and the prepared state is not an eigenstate of the total-spin operator. We must now train the parameters to find the minimum energy.", "_____no_output_____" ], [ "## Minimizing the energy", "_____no_output_____" ], [ "The energy can be minimized by choosing an optimizer and running the standard optimization loop:", "_____no_output_____" ] ], [ [ "opt = qml.GradientDescentOptimizer(stepsize=0.4)", "_____no_output_____" ], [ "iterations = 40\n\nenergies = []\nspins = []\n\nfor i in range(iterations):\n params = opt.step(energy_expval, params)\n \n e = energy_expval(params)\n s = spin(params)\n \n energies.append(e)\n spins.append(s)\n \n if (i + 1) % 5 == 0:\n print(f\"Completed iteration {i + 1}\")\n print(\"Energy:\", e)\n print(\"Total spin:\", s)\n print(\"----------------\")\n \nprint(f\"Optimized energy: {e} Ha\")\nprint(f\"Corresponding total spin: {s}\")", "Completed iteration 5\nEnergy: -0.8431944276242789\nTotal spin: 0.5516429655083164\n----------------\nCompleted iteration 10\nEnergy: -1.0363737583927664\nTotal spin: 0.2384937727695714\n----------------\nCompleted iteration 15\nEnergy: -1.1051703984929993\nTotal spin: 0.08609968513511335\n----------------\nCompleted iteration 20\nEnergy: -1.1260757917995217\nTotal spin: 0.02977443469137786\n----------------\nCompleted iteration 25\nEnergy: -1.1328388288060651\nTotal spin: 0.0100760244293715\n----------------\nCompleted iteration 30\nEnergy: -1.135075420261958\nTotal spin: 0.00337487557590066\n----------------\nCompleted iteration 35\nEnergy: -1.1358187273449236\nTotal spin: 0.0011258798543014592\n----------------\nCompleted iteration 40\nEnergy: -1.1360660538819118\nTotal spin: 0.00037507164606986887\n----------------\nOptimized energy: -1.1360660538819118 Ha\nCorresponding total spin: 0.00037507164606986887\n" ] ], [ [ "The exact value for the ground state energy of molecular hydrogen has been theoretically calculated as ``-1.136189454088`` Hartrees (Ha). Notice that the optimized energy is off by less than a thousandth of a Hartree. Furthermore, the optimized state is an eigenstate of the total-spin operator with eigenvalue $S=0$ as expected for the ground state of the $\\mathrm{H}_2$ molecule. Hence, our above results look very promising! We would get even closer to the theory values if we increase the number of iterations.\n\nLet's visualize how the two quantities evolved during optimization:", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\ntheory_energy = -1.136189454088\ntheory_spin = 0\n\nplt.hlines(theory_energy, 0, 39, linestyles=\"dashed\", colors=\"black\")\nplt.plot(energies)\nplt.xlabel(\"Steps\")\nplt.ylabel(\"Energy\")\n\naxs = plt.gca()\n\ninset = inset_axes(axs, width=\"50%\", height=\"50%\", borderpad=1)\ninset.hlines(theory_spin, 0, 39, linestyles=\"dashed\", colors=\"black\")\ninset.plot(spins, \"r\")\ninset.set_xlabel(\"Steps\")\ninset.set_ylabel(\"Total spin\");", "_____no_output_____" ] ], [ [ "We have learned how to efficiently find the ground state energy of a molecule using the PennyLane/Braket pipeline!", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\">\n<b>What's next?</b> The <code>qchem</code> folder contains additional molecular structure files for different atomic separations of molecular hydrogen. Pick one of the separations and find the ground state energy. How does the ground state energy change with atomic separation? \n</div>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4a3fe719b0a20d28f4b13343deaa7de639a37c5c
2,899
ipynb
Jupyter Notebook
copernicusInit_001.ipynb
dmbernaal/copernicus
4e4b9692418bf6575f69a5b6de7228765266d14e
[ "Apache-2.0" ]
null
null
null
copernicusInit_001.ipynb
dmbernaal/copernicus
4e4b9692418bf6575f69a5b6de7228765266d14e
[ "Apache-2.0" ]
null
null
null
copernicusInit_001.ipynb
dmbernaal/copernicus
4e4b9692418bf6575f69a5b6de7228765266d14e
[ "Apache-2.0" ]
null
null
null
22.472868
105
0.535357
[ [ [ "# Copernicus Init", "_____no_output_____" ] ], [ [ "#export\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch", "_____no_output_____" ], [ "#export\n# selu init\n# custom initialization method when using SeLU Activations\ndef selu_normal_(tensor, mode1='fan_in', mode2='fan_out'):\n fan_in = nn.init._calculate_correct_fan(tensor, mode1)\n fan_out = nn.init._calculate_correct_fan(tensor, mode2)\n with torch.no_grad():\n return torch.randn(fan_in, fan_out) / math.sqrt(1./fan_in)\n \nnn.init.selu_normal_ = selu_normal_ # adding to nn package", "_____no_output_____" ], [ "#export\n# init method\ndef init_nn(m, init_method):\n \"\"\"\n Main function to initialize the nn\n m: model\n init_method: <nn.init method>\n example: nn.init.selu_normal_\n \"\"\"\n if init_method is None: init_method = nn.init.selu_normal_\n if getattr(m, 'bias', None) is not None: nn.init.constant_(m.bias, 0) # for batchnorm layers\n if isinstance(m, (nn.Linear)): init_method(m.weight) # init weights with init_method\n for l in m.children(): init_nn(l) # recursion", "_____no_output_____" ], [ "!python notebook2script.py copernicusInit_001.ipynb", "Converted copernicusInit_001.ipynb to exp\\nb_copernicusInit.py\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ] ]
4a400774fa29979db06d0f262c3cb7d2880d6bc1
12,040
ipynb
Jupyter Notebook
ipynb/crawling_brand.ipynb
p829911/room_distance_analysis
fdf44c45bcfa3b1294c16e91626108d3c6bde2c5
[ "MIT" ]
5
2019-01-28T12:11:12.000Z
2019-08-16T05:29:25.000Z
ipynb/crawling_brand.ipynb
p829911/Room_Distance_Analysis
fdf44c45bcfa3b1294c16e91626108d3c6bde2c5
[ "MIT" ]
null
null
null
ipynb/crawling_brand.ipynb
p829911/Room_Distance_Analysis
fdf44c45bcfa3b1294c16e91626108d3c6bde2c5
[ "MIT" ]
null
null
null
22.504673
230
0.428821
[ [ [ "# 스타벅스 위치 크롤링", "_____no_output_____" ] ], [ [ "from selenium import webdriver", "_____no_output_____" ], [ "url = \"https://www.istarbucks.co.kr/store/store_map.do\"\ndriver = webdriver.Chrome()\ndriver.get(url)", "_____no_output_____" ], [ "# 데이터 가져오기\ndriver.find_element_by_css_selector(\".loca_search\").click()", "_____no_output_____" ], [ "driver.find_element_by_css_selector(\"#container > div > form > fieldset > div > section > article.find_store_cont > article > article:nth-child(4) > div.loca_step1 > div.loca_step1_cont > ul > li:nth-child(1) > a\").click()", "_____no_output_____" ], [ "driver.find_element_by_css_selector(\"#mCSB_2_container > ul > li:nth-child(1) > a\").click()", "_____no_output_____" ], [ "results = driver.find_elements_by_css_selector(\"#mCSB_3_container > ul > li\")", "_____no_output_____" ], [ "len(results)", "_____no_output_____" ], [ "starbucks_data = pd.DataFrame(columns=[\"name\", \"lat\", \"long\"])", "_____no_output_____" ], [ "for result in results:\n name = result.get_attribute(\"data-name\")\n lat = result.get_attribute(\"data-lat\")\n long = result.get_attribute(\"data-long\")\n starbucks_data.loc[len(starbucks_data)] = [name, lat, long]", "_____no_output_____" ], [ "starbucks_data.head()", "_____no_output_____" ], [ "starbucks_data.tail()", "_____no_output_____" ], [ "starbucks_data.to_csv(\"starbucks_data.csv\", index=False)", "_____no_output_____" ], [ "driver.close()", "_____no_output_____" ], [ "driver.quit()", "_____no_output_____" ] ], [ [ "# 맥도날드 위치 크롤링", "_____no_output_____" ] ], [ [ "url = \"http://m.mcdonalds.co.kr/me/kor/findus/district.do\"\ndriver = webdriver.Chrome()\ndriver.get(url)", "_____no_output_____" ], [ "driver.find_element_by_css_selector(\"#skeyword\").send_keys(\"서울특별시\")", "_____no_output_____" ], [ "driver.find_element_by_css_selector(\"#shForm > div > div > input\").click()", "_____no_output_____" ], [ "while True:\n try:\n driver.find_element_by_css_selector(\"#addPage\").click()\n except:\n break", "_____no_output_____" ], [ "results = driver.find_elements_by_css_selector(\"#listAjax > li\")", "_____no_output_____" ], [ "len(results)", "_____no_output_____" ], [ "mcdonald_data = pd.DataFrame(columns=[\"name\", \"address\"])", "_____no_output_____" ], [ "for result in results:\n name = result.text.split(\"\\n\")[0]\n address = result.text.split(\"\\n\")[1]\n mcdonald_data.loc[len(mcdonald_data)] = [name, address]", "_____no_output_____" ], [ "len(mcdonald_data)", "_____no_output_____" ], [ "mcdonald_data.drop_duplicates(inplace=True)", "_____no_output_____" ], [ "len(mcdonald_data)", "_____no_output_____" ], [ "mcdonald_data.to_csv(\"mcdonald_data.csv\", index=False)", "_____no_output_____" ], [ "driver.quit()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a40190666270882398672415e36103c99b07d50
235,243
ipynb
Jupyter Notebook
examples/aesthetics.ipynb
yarikoptic/seaborn
ed4baa32267cc4a44abb40dc243ae75a1d180e85
[ "MIT", "BSD-3-Clause" ]
null
null
null
examples/aesthetics.ipynb
yarikoptic/seaborn
ed4baa32267cc4a44abb40dc243ae75a1d180e85
[ "MIT", "BSD-3-Clause" ]
null
null
null
examples/aesthetics.ipynb
yarikoptic/seaborn
ed4baa32267cc4a44abb40dc243ae75a1d180e85
[ "MIT", "BSD-3-Clause" ]
null
null
null
569.595642
90,075
0.936291
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4a401f6c4dceb3dd6d514b1cdf5f8c954dd324d9
23,729
ipynb
Jupyter Notebook
doc/source/examples/cfproto_cat_adult_ord.ipynb
zjzh/alibi
e696906681e836fe6b801ab606b0987599d028d9
[ "Apache-2.0" ]
1,570
2019-05-03T06:43:03.000Z
2022-03-31T02:49:34.000Z
doc/source/examples/cfproto_cat_adult_ord.ipynb
zjzh/alibi
e696906681e836fe6b801ab606b0987599d028d9
[ "Apache-2.0" ]
511
2019-05-02T16:36:15.000Z
2022-03-31T08:09:43.000Z
doc/source/examples/cfproto_cat_adult_ord.ipynb
zjzh/alibi
e696906681e836fe6b801ab606b0987599d028d9
[ "Apache-2.0" ]
190
2019-05-02T13:41:38.000Z
2022-03-14T21:18:56.000Z
37.427445
488
0.530659
[ [ [ "# Counterfactual explanations with ordinally encoded categorical variables", "_____no_output_____" ], [ "This example notebook illustrates how to obtain [counterfactual explanations](https://docs.seldon.io/projects/alibi/en/latest/methods/CFProto.html) for instances with a mixture of ordinally encoded categorical and numerical variables. A more elaborate notebook highlighting additional functionality can be found [here](./cfproto_cat_adult_ohe.ipynb). We generate counterfactuals for instances in the *adult* dataset where we predict whether a person's income is above or below $50k.", "_____no_output_____" ] ], [ [ "import tensorflow as tf\ntf.get_logger().setLevel(40) # suppress deprecation messages\ntf.compat.v1.disable_v2_behavior() # disable TF2 behaviour as alibi code still relies on TF1 constructs \nfrom tensorflow.keras.layers import Dense, Input, Embedding, Concatenate, Reshape, Dropout, Lambda\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.utils import to_categorical\n\n%matplotlib inline\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom sklearn.preprocessing import OneHotEncoder\nfrom time import time\nfrom alibi.datasets import fetch_adult\nfrom alibi.explainers import CounterfactualProto\n\nprint('TF version: ', tf.__version__)\nprint('Eager execution enabled: ', tf.executing_eagerly()) # False", "TF version: 2.2.0\nEager execution enabled: False\n" ] ], [ [ "## Load adult dataset", "_____no_output_____" ], [ "The `fetch_adult` function returns a `Bunch` object containing the features, the targets, the feature names and a mapping of the categories in each categorical variable.", "_____no_output_____" ] ], [ [ "adult = fetch_adult()\ndata = adult.data\ntarget = adult.target\nfeature_names = adult.feature_names\ncategory_map_tmp = adult.category_map\ntarget_names = adult.target_names", "_____no_output_____" ] ], [ [ "Define shuffled training and test set:", "_____no_output_____" ] ], [ [ "def set_seed(s=0):\n np.random.seed(s)\n tf.random.set_seed(s)", "_____no_output_____" ], [ "set_seed()\ndata_perm = np.random.permutation(np.c_[data, target])\nX = data_perm[:,:-1]\ny = data_perm[:,-1]", "_____no_output_____" ], [ "idx = 30000\ny_train, y_test = y[:idx], y[idx+1:]", "_____no_output_____" ] ], [ [ "Reorganize data so categorical features come first:", "_____no_output_____" ] ], [ [ "X = np.c_[X[:, 1:8], X[:, 11], X[:, 0], X[:, 8:11]]", "_____no_output_____" ] ], [ [ "Adjust `feature_names` and `category_map` as well:", "_____no_output_____" ] ], [ [ "feature_names = feature_names[1:8] + feature_names[11:12] + feature_names[0:1] + feature_names[8:11]\nprint(feature_names)", "['Workclass', 'Education', 'Marital Status', 'Occupation', 'Relationship', 'Race', 'Sex', 'Country', 'Age', 'Capital Gain', 'Capital Loss', 'Hours per week']\n" ], [ "category_map = {}\nfor i, (_, v) in enumerate(category_map_tmp.items()):\n category_map[i] = v", "_____no_output_____" ] ], [ [ "Create a dictionary with as keys the categorical columns and values the number of categories for each variable in the dataset. This dictionary will later be used in the counterfactual explanation.", "_____no_output_____" ] ], [ [ "cat_vars_ord = {}\nn_categories = len(list(category_map.keys()))\nfor i in range(n_categories):\n cat_vars_ord[i] = len(np.unique(X[:, i]))\nprint(cat_vars_ord)", "{0: 9, 1: 7, 2: 4, 3: 9, 4: 6, 5: 5, 6: 2, 7: 11}\n" ] ], [ [ "## Preprocess data", "_____no_output_____" ], [ "Scale numerical features between -1 and 1:", "_____no_output_____" ] ], [ [ "X_num = X[:, -4:].astype(np.float32, copy=False)\nxmin, xmax = X_num.min(axis=0), X_num.max(axis=0)\nrng = (-1., 1.)\nX_num_scaled = (X_num - xmin) / (xmax - xmin) * (rng[1] - rng[0]) + rng[0]\nX_num_scaled_train = X_num_scaled[:idx, :]\nX_num_scaled_test = X_num_scaled[idx+1:, :]", "_____no_output_____" ] ], [ [ "Combine numerical and categorical data:", "_____no_output_____" ] ], [ [ "X = np.c_[X[:, :-4], X_num_scaled].astype(np.float32, copy=False)\nX_train, X_test = X[:idx, :], X[idx+1:, :]\nprint(X_train.shape, X_test.shape)", "(30000, 12) (2560, 12)\n" ] ], [ [ "## Train a neural net", "_____no_output_____" ], [ "The neural net will use entity embeddings for the categorical variables.", "_____no_output_____" ] ], [ [ "def nn_ord():\n \n x_in = Input(shape=(12,))\n layers_in = []\n \n # embedding layers\n for i, (_, v) in enumerate(cat_vars_ord.items()):\n emb_in = Lambda(lambda x: x[:, i:i+1])(x_in)\n emb_dim = int(max(min(np.ceil(.5 * v), 50), 2))\n emb_layer = Embedding(input_dim=v+1, output_dim=emb_dim, input_length=1)(emb_in)\n emb_layer = Reshape(target_shape=(emb_dim,))(emb_layer)\n layers_in.append(emb_layer)\n \n # numerical layers\n num_in = Lambda(lambda x: x[:, -4:])(x_in)\n num_layer = Dense(16)(num_in)\n layers_in.append(num_layer)\n \n # combine\n x = Concatenate()(layers_in)\n x = Dense(60, activation='relu')(x)\n x = Dropout(.2)(x)\n x = Dense(60, activation='relu')(x)\n x = Dropout(.2)(x)\n x = Dense(60, activation='relu')(x)\n x = Dropout(.2)(x)\n x_out = Dense(2, activation='softmax')(x)\n \n nn = Model(inputs=x_in, outputs=x_out)\n nn.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n \n return nn", "_____no_output_____" ], [ "set_seed()\nnn = nn_ord()\nnn.summary()\nnn.fit(X_train, to_categorical(y_train), batch_size=128, epochs=30, verbose=0)", "Model: \"model\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_1 (InputLayer) [(None, 12)] 0 \n__________________________________________________________________________________________________\nlambda (Lambda) (None, 1) 0 input_1[0][0] \n__________________________________________________________________________________________________\nlambda_1 (Lambda) (None, 1) 0 input_1[0][0] \n__________________________________________________________________________________________________\nlambda_2 (Lambda) (None, 1) 0 input_1[0][0] \n__________________________________________________________________________________________________\nlambda_3 (Lambda) (None, 1) 0 input_1[0][0] \n__________________________________________________________________________________________________\nlambda_4 (Lambda) (None, 1) 0 input_1[0][0] \n__________________________________________________________________________________________________\nlambda_5 (Lambda) (None, 1) 0 input_1[0][0] \n__________________________________________________________________________________________________\nlambda_6 (Lambda) (None, 1) 0 input_1[0][0] \n__________________________________________________________________________________________________\nlambda_7 (Lambda) (None, 1) 0 input_1[0][0] \n__________________________________________________________________________________________________\nembedding (Embedding) (None, 1, 5) 50 lambda[0][0] \n__________________________________________________________________________________________________\nembedding_1 (Embedding) (None, 1, 4) 32 lambda_1[0][0] \n__________________________________________________________________________________________________\nembedding_2 (Embedding) (None, 1, 2) 10 lambda_2[0][0] \n__________________________________________________________________________________________________\nembedding_3 (Embedding) (None, 1, 5) 50 lambda_3[0][0] \n__________________________________________________________________________________________________\nembedding_4 (Embedding) (None, 1, 3) 21 lambda_4[0][0] \n__________________________________________________________________________________________________\nembedding_5 (Embedding) (None, 1, 3) 18 lambda_5[0][0] \n__________________________________________________________________________________________________\nembedding_6 (Embedding) (None, 1, 2) 6 lambda_6[0][0] \n__________________________________________________________________________________________________\nembedding_7 (Embedding) (None, 1, 6) 72 lambda_7[0][0] \n__________________________________________________________________________________________________\nlambda_8 (Lambda) (None, 4) 0 input_1[0][0] \n__________________________________________________________________________________________________\nreshape (Reshape) (None, 5) 0 embedding[0][0] \n__________________________________________________________________________________________________\nreshape_1 (Reshape) (None, 4) 0 embedding_1[0][0] \n__________________________________________________________________________________________________\nreshape_2 (Reshape) (None, 2) 0 embedding_2[0][0] \n__________________________________________________________________________________________________\nreshape_3 (Reshape) (None, 5) 0 embedding_3[0][0] \n__________________________________________________________________________________________________\nreshape_4 (Reshape) (None, 3) 0 embedding_4[0][0] \n__________________________________________________________________________________________________\nreshape_5 (Reshape) (None, 3) 0 embedding_5[0][0] \n__________________________________________________________________________________________________\nreshape_6 (Reshape) (None, 2) 0 embedding_6[0][0] \n__________________________________________________________________________________________________\nreshape_7 (Reshape) (None, 6) 0 embedding_7[0][0] \n__________________________________________________________________________________________________\ndense (Dense) (None, 16) 80 lambda_8[0][0] \n__________________________________________________________________________________________________\nconcatenate (Concatenate) (None, 46) 0 reshape[0][0] \n reshape_1[0][0] \n reshape_2[0][0] \n reshape_3[0][0] \n reshape_4[0][0] \n reshape_5[0][0] \n reshape_6[0][0] \n reshape_7[0][0] \n dense[0][0] \n__________________________________________________________________________________________________\ndense_1 (Dense) (None, 60) 2820 concatenate[0][0] \n__________________________________________________________________________________________________\ndropout (Dropout) (None, 60) 0 dense_1[0][0] \n__________________________________________________________________________________________________\ndense_2 (Dense) (None, 60) 3660 dropout[0][0] \n__________________________________________________________________________________________________\ndropout_1 (Dropout) (None, 60) 0 dense_2[0][0] \n__________________________________________________________________________________________________\ndense_3 (Dense) (None, 60) 3660 dropout_1[0][0] \n__________________________________________________________________________________________________\ndropout_2 (Dropout) (None, 60) 0 dense_3[0][0] \n__________________________________________________________________________________________________\ndense_4 (Dense) (None, 2) 122 dropout_2[0][0] \n==================================================================================================\nTotal params: 10,601\nTrainable params: 10,601\nNon-trainable params: 0\n__________________________________________________________________________________________________\n" ] ], [ [ "## Generate counterfactual", "_____no_output_____" ], [ "Original instance:", "_____no_output_____" ] ], [ [ "X = X_test[0].reshape((1,) + X_test[0].shape)", "_____no_output_____" ] ], [ [ "Initialize counterfactual parameters:", "_____no_output_____" ] ], [ [ "shape = X.shape\nbeta = .01\nc_init = 1.\nc_steps = 5\nmax_iterations = 500\nrng = (-1., 1.) # scale features between -1 and 1\nrng_shape = (1,) + data.shape[1:]\nfeature_range = ((np.ones(rng_shape) * rng[0]).astype(np.float32), \n (np.ones(rng_shape) * rng[1]).astype(np.float32))", "_____no_output_____" ] ], [ [ "Initialize explainer. Since the `Embedding` layers in `tf.keras` do not let gradients propagate through, we will only make use of the model's predict function, treat it as a black box and perform numerical gradient calculations. ", "_____no_output_____" ] ], [ [ "set_seed()\n\n# define predict function\npredict_fn = lambda x: nn.predict(x)\n\ncf = CounterfactualProto(predict_fn,\n shape,\n beta=beta,\n cat_vars=cat_vars_ord,\n max_iterations=max_iterations,\n feature_range=feature_range,\n c_init=c_init,\n c_steps=c_steps,\n eps=(.01, .01) # perturbation size for numerical gradients\n )", "_____no_output_____" ] ], [ [ "Fit explainer. Please check the [documentation](https://docs.seldon.io/projects/alibi/en/latest/methods/CFProto.html) for more info about the optional arguments.", "_____no_output_____" ] ], [ [ "cf.fit(X_train, d_type='abdm', disc_perc=[25, 50, 75]);", "_____no_output_____" ] ], [ [ "Explain instance:", "_____no_output_____" ] ], [ [ "set_seed()\nexplanation = cf.explain(X)", "_____no_output_____" ] ], [ [ "Helper function to more clearly describe explanations:", "_____no_output_____" ] ], [ [ "def describe_instance(X, explanation, eps=1e-2):\n print('Original instance: {} -- proba: {}'.format(target_names[explanation.orig_class],\n explanation.orig_proba[0]))\n print('Counterfactual instance: {} -- proba: {}'.format(target_names[explanation.cf['class']],\n explanation.cf['proba'][0]))\n print('\\nCounterfactual perturbations...')\n print('\\nCategorical:')\n X_orig_ord = X\n X_cf_ord = explanation.cf['X']\n delta_cat = {}\n for i, (_, v) in enumerate(category_map.items()):\n cat_orig = v[int(X_orig_ord[0, i])]\n cat_cf = v[int(X_cf_ord[0, i])]\n if cat_orig != cat_cf:\n delta_cat[feature_names[i]] = [cat_orig, cat_cf]\n if delta_cat:\n for k, v in delta_cat.items():\n print('{}: {} --> {}'.format(k, v[0], v[1]))\n print('\\nNumerical:')\n delta_num = X_cf_ord[0, -4:] - X_orig_ord[0, -4:]\n n_keys = len(list(cat_vars_ord.keys()))\n for i in range(delta_num.shape[0]):\n if np.abs(delta_num[i]) > eps:\n print('{}: {:.2f} --> {:.2f}'.format(feature_names[i+n_keys],\n X_orig_ord[0,i+n_keys],\n X_cf_ord[0,i+n_keys]))", "_____no_output_____" ], [ "describe_instance(X, explanation)", "Original instance: <=50K -- proba: [0.6976237 0.30237624]\nCounterfactual instance: >50K -- proba: [0.49604183 0.5039582 ]\n\nCounterfactual perturbations...\n\nCategorical:\n\nNumerical:\nCapital Gain: -1.00 --> -0.88\n" ] ], [ [ "The person's incomce is predicted to be above $50k by increasing his or her capital gain.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
4a4026b214dd40e3f17e7b7844bd5ade2a48a7d5
11,931
ipynb
Jupyter Notebook
examples/feedback/metrics-server/README.ipynb
ashrafgt/seldon-core
ec1235789120b2a0418819801048f7384258b542
[ "Apache-2.0" ]
null
null
null
examples/feedback/metrics-server/README.ipynb
ashrafgt/seldon-core
ec1235789120b2a0418819801048f7384258b542
[ "Apache-2.0" ]
null
null
null
examples/feedback/metrics-server/README.ipynb
ashrafgt/seldon-core
ec1235789120b2a0418819801048f7384258b542
[ "Apache-2.0" ]
null
null
null
27.239726
571
0.516051
[ [ [ "# Stateful Model Feedback Metrics Server\nIn this example we will add statistical performance metrics capabilities by levering the Seldon metrics server.\n\nDependencies\n* Seldon Core installed\n* Ingress provider (Istio or Ambassador)\n\nAn easy way is to run `examples/centralized-logging/full-kind-setup.sh` and then:\n```bash\n helm delete seldon-core-loadtesting\n helm delete seldon-single-model\n```\n \nThen port-forward to that ingress on localhost:8003 in a separate terminal either with:\n\nAmbassador:\n\n kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080\n\nIstio:\n\n kubectl port-forward -n istio-system svc/istio-ingressgateway 8003:80\n\n\n", "_____no_output_____" ] ], [ [ "!kubectl create namespace seldon || echo \"namespace already created\"", "Error from server (AlreadyExists): namespaces \"seldon\" already exists\nnamespace already created\n" ], [ "!kubectl config set-context $(kubectl config current-context) --namespace=seldon", "Context \"kind-ansible\" modified.\n" ], [ "!mkdir -p config", "_____no_output_____" ] ], [ [ "### Create a simple model\nWe create a multiclass classification model - iris classifier.\n\nThe iris classifier takes an input array, and returns the prediction of the 4 classes.\n\nThe prediction can be done as numeric or as a probability array.", "_____no_output_____" ] ], [ [ "%%bash\nkubectl apply -f - << END\napiVersion: machinelearning.seldon.io/v1\nkind: SeldonDeployment\nmetadata:\n name: multiclass-model\nspec:\n predictors:\n - graph:\n children: []\n implementation: SKLEARN_SERVER\n modelUri: gs://seldon-models/v1.13.0-dev/sklearn/iris\n name: classifier\n logger:\n url: http://seldon-multiclass-model-metrics.seldon.svc.cluster.local:80/\n mode: all\n name: default\n replicas: 1\nEND", "seldondeployment.machinelearning.seldon.io/multiclass-model created\n" ], [ "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=multiclass-model -o jsonpath='{.items[0].metadata.name}')", "Waiting for deployment \"multiclass-model-default-0-classifier\" rollout to finish: 0 of 1 updated replicas are available...\ndeployment \"multiclass-model-default-0-classifier\" successfully rolled out\n" ] ], [ [ "#### Send test request", "_____no_output_____" ] ], [ [ "res=!curl -X POST \"http://localhost:8003/seldon/seldon/multiclass-model/api/v1.0/predictions\" \\\n -H \"Content-Type: application/json\" -d '{\"data\": { \"ndarray\": [[1,2,3,4]]}, \"meta\": { \"puid\": \"hello\" }}'\nprint(res)\nimport json\nj=json.loads(res[-1])\nassert(len(j[\"data\"][\"ndarray\"][0])==3)", "[' % Total % Received % Xferd Average Speed Time Time Time Current', ' Dload Upload Total Spent Left Speed', '', ' 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0', '100 266 100 202 100 64 15538 4923 --:--:-- --:--:-- --:--:-- 20461', '{\"data\":{\"names\":[\"t:0\",\"t:1\",\"t:2\"],\"ndarray\":[[0.0006985194531162835,0.00366803903943666,0.995633441507447]]},\"meta\":{\"puid\":\"hello\",\"requestPath\":{\"classifier\":\"seldonio/sklearnserver:1.12.0-dev\"}}}']\n" ] ], [ [ "### Metrics Server\nYou can create a kubernetes deployment of the metrics server with this:", "_____no_output_____" ] ], [ [ "%%writefile config/multiclass-deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: seldon-multiclass-model-metrics\n namespace: seldon\n labels:\n app: seldon-multiclass-model-metrics\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: seldon-multiclass-model-metrics\n template:\n metadata:\n labels:\n app: seldon-multiclass-model-metrics\n spec:\n securityContext:\n runAsUser: 8888\n containers:\n - name: user-container\n image: seldonio/alibi-detect-server:1.13.0-dev\n imagePullPolicy: IfNotPresent\n args:\n - --model_name\n - multiclassserver\n - --http_port\n - '8080'\n - --protocol\n - seldonfeedback.http\n - --storage_uri\n - \"adserver.cm_models.multiclass_one_hot.MulticlassOneHot\"\n - --reply_url\n - http://message-dumper.default \n - --event_type\n - io.seldon.serving.feedback.metrics\n - --event_source\n - io.seldon.serving.feedback\n - MetricsServer\n env:\n - name: \"SELDON_DEPLOYMENT_ID\"\n value: \"multiclass-model\"\n - name: \"PREDICTIVE_UNIT_ID\"\n value: \"classifier\"\n - name: \"PREDICTIVE_UNIT_IMAGE\"\n value: \"seldonio/alibi-detect-server:1.13.0-dev\"\n - name: \"PREDICTOR_ID\"\n value: \"default\"\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: seldon-multiclass-model-metrics\n namespace: seldon\n labels:\n app: seldon-multiclass-model-metrics\nspec:\n selector:\n app: seldon-multiclass-model-metrics\n ports:\n - protocol: TCP\n port: 80\n targetPort: 8080", "Overwriting config/multiclass-deployment.yaml\n" ], [ "!kubectl apply -n seldon -f config/multiclass-deployment.yaml", "deployment.apps/seldon-multiclass-model-metrics created\nservice/seldon-multiclass-model-metrics created\n" ], [ "!kubectl rollout status deploy/seldon-multiclass-model-metrics", "deployment \"seldon-multiclass-model-metrics\" successfully rolled out\n" ], [ "import time\n\ntime.sleep(20)", "_____no_output_____" ] ], [ [ "### Send feedback", "_____no_output_____" ] ], [ [ "res=!curl -X POST \"http://localhost:8003/seldon/seldon/multiclass-model/api/v1.0/feedback\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\"response\": {\"data\": {\"ndarray\": [[0.0006985194531162841,0.003668039039435755,0.9956334415074478]]}}, \"truth\":{\"data\": {\"ndarray\": [[0,0,1]]}}}'\nprint(res)\nimport json\nj=json.loads(res[-1])\nassert(\"data\" in j)", "[' % Total % Received % Xferd Average Speed Time Time Time Current', ' Dload Upload Total Spent Left Speed', '', ' 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0', '100 252 100 108 100 144 9000 12000 --:--:-- --:--:-- --:--:-- 21000', '{\"data\":{\"tensor\":{\"shape\":[0]}},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/sklearnserver:1.12.0-dev\"}}}']\n" ], [ "import time\n\ntime.sleep(3)", "_____no_output_____" ] ], [ [ "### Check that metrics are recorded", "_____no_output_____" ] ], [ [ "res=!kubectl logs $(kubectl get pods -l app=seldon-multiclass-model-metrics \\\n -n seldon -o jsonpath='{.items[0].metadata.name}') | grep \"PROCESSING Feedback Event\"\nprint(res)\nassert(len(res)>0)", "['[I 211208 11:08:09 cm_model:99] PROCESSING Feedback Event.']\n" ] ], [ [ "### Cleanup", "_____no_output_____" ] ], [ [ "!kubectl delete -n seldon -f config/multiclass-deployment.yaml", "deployment.apps \"seldon-multiclass-model-metrics\" deleted\nservice \"seldon-multiclass-model-metrics\" deleted\n" ], [ "!kubectl delete sdep multiclass-model", "seldondeployment.machinelearning.seldon.io \"multiclass-model\" deleted\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
4a4026bc15724903f31c8c87e5cdabf3fec8b349
10,997
ipynb
Jupyter Notebook
1Sentence splitter and Tokenization.ipynb
rahulsarkar906/Pretrained-
105f7347c1fd3f07d655bceadcbab608b8f120bd
[ "Unlicense" ]
1
2020-02-25T14:59:08.000Z
2020-02-25T14:59:08.000Z
1Sentence splitter and Tokenization.ipynb
rahulsarkar906/Pretrained-
105f7347c1fd3f07d655bceadcbab608b8f120bd
[ "Unlicense" ]
null
null
null
1Sentence splitter and Tokenization.ipynb
rahulsarkar906/Pretrained-
105f7347c1fd3f07d655bceadcbab608b8f120bd
[ "Unlicense" ]
null
null
null
19.922101
179
0.484405
[ [ [ "<img src=\"../Pics/MLSb-T.png\" width=\"160\">\n<br><br>\n<center><u><H1>Sentence splitter and Tokenization</H1></u></center>", "_____no_output_____" ], [ "## Sentence splitter:", "_____no_output_____" ] ], [ [ "string = 'Every one of us is, in the cosmic perspective, precious. If a human disagrees with you, let him live. In a hundred billion galaxies, you will not find another.'", "_____no_output_____" ], [ "from nltk.tokenize import sent_tokenize", "_____no_output_____" ], [ "_sent = sent_tokenize(string)", "_____no_output_____" ], [ "print(_sent)", "['Every one of us is, in the cosmic perspective, precious.', 'If a human disagrees with you, let him live.', 'In a hundred billion galaxies, you will not find another.']\n" ], [ "import nltk.tokenize.punkt\n#This tokenizer divides a text into a list of sentences,\n#by using an unsupervised algorithm to build a model for abbreviation\n#words, collocations, and words that start sentences. ", "_____no_output_____" ], [ "tokenizer = nltk.tokenize.punkt.PunktSentenceTokenizer()", "_____no_output_____" ], [ "tokenizer.tokenize(string)", "_____no_output_____" ] ], [ [ "## Tokenization:", "_____no_output_____" ], [ "### Word Tokenizing:", "_____no_output_____" ] ], [ [ "a = 'Hi NLTK students ! level s10'", "_____no_output_____" ], [ "# simplest tokenizer: uses white space as delimiter.\nprint(a.split())", "['Hi', 'NLTK', 'students', '!', 'level', 's10']\n" ], [ "from nltk.tokenize import word_tokenize", "_____no_output_____" ], [ "word_tokenize(a)", "_____no_output_____" ], [ "# Another method using TreebankWorldTokenizer\nfrom nltk.tokenize import TreebankWordTokenizer", "_____no_output_____" ], [ "tokenizer = TreebankWordTokenizer()\nprint(tokenizer.tokenize(a))", "['Hi', 'NLTK', 'students', '!', 'level', 's10']\n" ] ], [ [ "### Removing Noise", "_____no_output_____" ] ], [ [ "import re\n# Example of removing numbers:\n\ndef remove_numbers(text):\n return re.sub(r'\\d+', \"\", text)", "_____no_output_____" ], [ "txt = 'This a sample sentence in English, \\n with whitespaces and many numbers 123456!'", "_____no_output_____" ], [ "print('Removed numbers:', remove_numbers(txt))", "Removed numbers: This a sample sentence in English, \n with whitespaces and many numbers !\n" ], [ "# example of removing punctuation from text\nimport string\n\ndef remove_punctuation(text):\n words = word_tokenize(text)\n pun_removed = [w for w in words if w.lower() not in string.punctuation]\n return \" \".join(pun_removed)", "_____no_output_____" ], [ "b = 'This a great course of NLP using Python and NLTK!!! for this year 2017, isnt.?'\nprint(remove_punctuation(b))", "This a great course of NLP using Python and NLTK for this year 2017 isnt\n" ], [ "from nltk.tokenize import regexp_tokenize, wordpunct_tokenize, blankline_tokenize", "_____no_output_____" ], [ "# + : one or more times | \\w : character or digit\nregexp_tokenize(b, pattern='\\w+')", "_____no_output_____" ], [ "regexp_tokenize(b, pattern='\\d+')", "_____no_output_____" ], [ "c = 'The capital is raising up to $100000'", "_____no_output_____" ], [ "regexp_tokenize(c, pattern='\\w+|\\$')", "_____no_output_____" ], [ "# + = one or more times\nregexp_tokenize(c, pattern='\\w+|\\$[\\d]')", "_____no_output_____" ], [ "regexp_tokenize(c, pattern='\\w+|\\$[\\d\\.]+|\\S+')", "_____no_output_____" ], [ "wordpunct_tokenize(b)", "_____no_output_____" ], [ "blankline_tokenize(b)", "_____no_output_____" ] ], [ [ "## References:\n\nhttps://docs.python.org/2/library/tokenize.html\n\nhttp://www.nltk.org/_modules/nltk/tokenize.html\n\nhttp://www.nltk.org/_modules/nltk/tokenize/punkt.html\n\nhttp://www.nltk.org/_modules/nltk/tokenize/treebank.html\n\nhttp://www.nltk.org/_modules/nltk/tokenize/regexp.html", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
4a404c02e44b6e1651f96ce72c18cd368a2bebf2
144,112
ipynb
Jupyter Notebook
P1.ipynb
alien2rv/SdcProject1
c1869bb0955eb55904feb7a0430effe2865a6467
[ "MIT" ]
null
null
null
P1.ipynb
alien2rv/SdcProject1
c1869bb0955eb55904feb7a0430effe2865a6467
[ "MIT" ]
null
null
null
P1.ipynb
alien2rv/SdcProject1
c1869bb0955eb55904feb7a0430effe2865a6467
[ "MIT" ]
null
null
null
223.776398
114,836
0.898773
[ [ [ "# Self-Driving Car Engineer Nanodegree\n\n\n## Project: **Finding Lane Lines on the Road** \n***\nIn this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip \"raw-lines-example.mp4\" (also contained in this repository) to see what the output should look like after using the helper functions below. \n\nOnce you have a result that looks roughly like \"raw-lines-example.mp4\", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.\n\nIn addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.\n\n---\nLet's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the \"play\" button above) to display the image.\n\n**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the \"Kernel\" menu above and selecting \"Restart & Clear Output\".**\n\n---", "_____no_output_____" ], [ "**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**\n\n---\n\n<figure>\n <img src=\"examples/line-segments-example.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your output should look something like this (above) after detecting line segments using the helper functions below </p> \n </figcaption>\n</figure>\n <p></p> \n<figure>\n <img src=\"examples/laneLines_thirdPass.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your goal is to connect/average/extrapolate line segments to get output like this</p> \n </figcaption>\n</figure>", "_____no_output_____" ], [ "**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** ", "_____no_output_____" ], [ "## Import Packages", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\nfrom processing import processing\nimport os\nimport timeit\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Read in an Image", "_____no_output_____" ] ], [ [ "#reading in an image\nimage = mpimg.imread('test_images/solidWhiteRight.jpg')\n\n#printing out some stats and plotting\nprint('This image is:', type(image), 'with dimensions:', image.shape)\nplt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')", "This image is: <class 'numpy.ndarray'> with dimensions: (540, 960, 3)\n" ] ], [ [ "## Ideas for Lane Detection Pipeline", "_____no_output_____" ], [ "**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**\n\n`cv2.inRange()` for color selection \n`cv2.fillPoly()` for regions selection \n`cv2.line()` to draw lines on an image given endpoints \n`cv2.addWeighted()` to coadd / overlay two images \n`cv2.cvtColor()` to grayscale or change color \n`cv2.imwrite()` to output images to file \n`cv2.bitwise_and()` to apply a mask to an image\n\n**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**", "_____no_output_____" ], [ "## Helper Functions", "_____no_output_____" ], [ "Below are some helper functions to help get you started. They should look familiar from the lesson!", "_____no_output_____" ] ], [ [ "import math\n\ndef grayscale(img):\n \"\"\"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n (assuming your grayscaled image is called 'gray')\n you should call plt.imshow(gray, cmap='gray')\"\"\"\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \ndef canny(img, low_threshold, high_threshold):\n \"\"\"Applies the Canny transform\"\"\"\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n \"\"\"Applies a Gaussian Noise kernel\"\"\"\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n \n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n `vertices` should be a numpy array of integer points.\n \"\"\"\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=2):\n \"\"\"\n NOTE: this is the function you might want to use as a starting point once you want to \n average/extrapolate the line segments you detect to map out the full\n extent of the lane (going from the result shown in raw-lines-example.mp4\n to that shown in P1_example.mp4). \n \n Think about things like separating line segments by their \n slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left\n line vs. the right line. Then, you can average the position of each of \n the lines and extrapolate to the top and bottom of the lane.\n \n This function draws `lines` with `color` and `thickness`. \n Lines are drawn on the image inplace (mutates the image).\n If you want to make the lines semi-transparent, think about combining\n this function with the weighted_img() function below\n \"\"\"\n for line in lines:\n for x1,y1,x2,y2 in line:\n cv2.line(img, (x1, y1), (x2, y2), color, thickness)\n\ndef hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):\n \"\"\"\n `img` should be the output of a Canny transform.\n \n Returns an image with hough lines drawn.\n \"\"\"\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n draw_lines(line_img, lines)\n return line_img\n\n# Python 3 has support for cool math symbols.\n\ndef weighted_img(img, initial_img, α=0.8, β=1., γ=0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n \n `initial_img` should be the image before any processing.\n \n The result image is computed as follows:\n \n initial_img * α + img * β + γ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n return cv2.addWeighted(initial_img, α, img, β, γ)", "_____no_output_____" ] ], [ [ "## Test Images\n\nBuild your pipeline to work on the images in the directory \"test_images\" \n**You should make sure your pipeline works well on these images before you try the videos.**", "_____no_output_____" ] ], [ [ "import os\nos.listdir(\"test_images/\")", "_____no_output_____" ], [ "def perform_magic_py(img):\n image = mpimg.imread(img)\n img=os.path.basename(img)\n #printing out some stats and plotting\n print('This image is:', type(image), 'with dimensions:', image.shape)\n #grayscale image\n mod_image=processing.grayscale(image)\n paths=os.path.join(\"test_images\",\"results\",\"grayscale\",img)\n plt.imsave(paths,mod_image)\n #gaussian blur\n mod_image=processing.gaussian_blur(img=mod_image,kernel_size=5)\n #canny edge detection\n mod_image=processing.canny(img=mod_image,low_threshold=50, high_threshold=150)\n paths=os.path.join(\"test_images\",\"results\",\"canny\",img)\n plt.imsave(paths,mod_image)\n #masking unnecessary edges using triangular masking\n left_bottom = [100, 535]\n right_bottom = [900, 535]\n apex = [500, 300]\n #mask = np.zeros_like(mod_image) \n #ignore_mask_color = 255 \n #imshape = image.shape\n vertices = np.array([left_bottom, right_bottom, apex], dtype=np.int32)\n #cv2.fillPoly(mask, vertices, ignore_mask_color)\n #masked_edges = cv2.bitwise_and(mod_image, mask)\n masked_edges= processing.region_of_interest(img=mod_image, vertices=vertices)\n #hough transform\n mod_image=processing.hough_lines(img=masked_edges, rho=int(1), theta=int(np.pi/180), threshold=int(50), min_line_len=int(150), max_line_gap=int(150))\n #print('This image is:', type(grey), 'with dimensions:', grey.shape)\n #interopolate hough transformed image to main image\n mod_image = processing.weighted_img(mod_image,image)\n plt.imshow(mod_image)\n paths=os.path.join(\"test_images\",\"results\",\"hough_transform\",img)\n print(os.path.join(\"test_images\",\"results\",\"mod_image_\"+img))\n plt.imsave(paths,mod_image)", "_____no_output_____" ] ], [ [ "## Build a Lane Finding Pipeline\n\n", "_____no_output_____" ] ], [ [ "folder= 'test_images'\nfor filename in os.listdir(folder):\n if \".jpg\" in filename:\n print(f\"processing file : {filename}\")\n perform_magic_py(os.path.join(folder,filename))\n #print(os.path.join(folder,filename))", "processing file : solidWhiteCurve.jpg\nThis image is: <class 'numpy.ndarray'> with dimensions: (540, 960, 3)\n" ] ], [ [ "Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.\n\nTry tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.", "_____no_output_____" ] ], [ [ "# TODO: Build your pipeline that will draw lane lines on the test_images\n# then save them to the test_images_output directory.", "_____no_output_____" ] ], [ [ "## Test on Videos\n\nYou know what's cooler than drawing lanes over images? Drawing lanes over video!\n\nWe can test our solution on two provided videos:\n\n`solidWhiteRight.mp4`\n\n`solidYellowLeft.mp4`\n\n**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**\n\n**If you get an error that looks like this:**\n```\nNeedDownloadError: Need ffmpeg exe. \nYou can download it by calling: \nimageio.plugins.ffmpeg.download()\n```\n**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**", "_____no_output_____" ] ], [ [ "# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML", "_____no_output_____" ], [ "def process_image(image):\n # NOTE: The output you return should be a color image (3 channel) for processing video below\n # TODO: put your pipeline here,\n # you should return the final output (image where lines are drawn on lanes)\n\n return result", "_____no_output_____" ] ], [ [ "Let's try the one with the solid white lane on the right first ...", "_____no_output_____" ] ], [ [ "white_output = 'test_videos_output/solidWhiteRight.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\").subclip(0,5)\nclip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\")\nwhite_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\n%time white_clip.write_videofile(white_output, audio=False)", "_____no_output_____" ] ], [ [ "Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.", "_____no_output_____" ] ], [ [ "HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(white_output))", "_____no_output_____" ] ], [ [ "## Improve the draw_lines() function\n\n**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\".**\n\n**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**", "_____no_output_____" ], [ "Now for the one with the solid yellow lane on the left. This one's more tricky!", "_____no_output_____" ] ], [ [ "yellow_output = 'test_videos_output/solidYellowLeft.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)\nclip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')\nyellow_clip = clip2.fl_image(process_image)\n%time yellow_clip.write_videofile(yellow_output, audio=False)", "_____no_output_____" ], [ "HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(yellow_output))", "_____no_output_____" ] ], [ [ "## Writeup and Submission\n\nIf you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file.\n", "_____no_output_____" ], [ "## Optional Challenge\n\nTry your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!", "_____no_output_____" ] ], [ [ "challenge_output = 'test_videos_output/challenge.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)\nclip3 = VideoFileClip('test_videos/challenge.mp4')\nchallenge_clip = clip3.fl_image(process_image)\n%time challenge_clip.write_videofile(challenge_output, audio=False)", "_____no_output_____" ], [ "HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(challenge_output))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
4a405cad299410816f0fe7edb134392c533003aa
27,538
ipynb
Jupyter Notebook
1._Introduction+Dealing_with_technical_noise/1.3_Batch_Correction.ipynb
YeoLab/single-cell-bioinformatics-scrm-2016
2ec3f4e8439b6574eee27a8cc0ed4bb8efbb1925
[ "BSD-3-Clause" ]
3
2016-10-23T09:17:16.000Z
2018-06-07T22:38:43.000Z
1._Introduction+Dealing_with_technical_noise/1.3_Batch_Correction.ipynb
zqfang/single-cell-bioinformatics-scrm-2016
2ec3f4e8439b6574eee27a8cc0ed4bb8efbb1925
[ "BSD-3-Clause" ]
null
null
null
1._Introduction+Dealing_with_technical_noise/1.3_Batch_Correction.ipynb
zqfang/single-cell-bioinformatics-scrm-2016
2ec3f4e8439b6574eee27a8cc0ed4bb8efbb1925
[ "BSD-3-Clause" ]
8
2016-06-16T12:50:40.000Z
2021-03-02T21:50:48.000Z
38.676966
533
0.618309
[ [ [ "# Batch correction\n\nWhat is batch correction? A \"Batch\" is when experiments have been performed at different times and there's some obvious difference between them. Single-cell experiments are often inherently \"batchy\" because you can only perform so many single cell captures at once, and you do multiple captures, over different days, with different samples. How do you correct for the technical noise without deleting the true biological signal?\n\n## Avoiding batch effects\n\nFirst things first, it's best to design your experiments to minimize batch effects. For example, if you can mix your samples such that there are multiple representations of samples per single-cell capture, then this will help because you will have representations of both biological and technical variance across batches, rather than BOTH biological and technical variance.\n\n![](figures/hicks_figure1.png)\n\n[Hicks et al, preprint](http://biorxiv.org/content/early/2015/12/27/025528)\n\n### Bad: Technical variance is the same as biological variance\n\n![Don't confound your biological and technical variance](figures/batch_effects-01.png)\n\nHere, when you try to correct for batch effects between captures, it's impossible to know whether you're removing the technical noise of the different captures, or the biological signal of the data.\n\n### Good: Technical variance is different from biological variance\nThe idea here is that you would ahead of time, mix the cells from your samples in equal proportions and then perform cell capture on the mixed samples, so you would get different technical batches, but they wouldn't be counfounded by the biological signals.\n\n![Spread your biological signal across technical replicates](figures/batch_effects-02.png)\n\nHere, when you correct for batch effects, the technical batches and biological signals are separate.\n\n### If it's completely impossible to do multiple biological samples in the same technical replicate...\n\nFor example, if you have to harvest your cells at parcticular timepoints, here are some ways that you can try to mitigate the batch effects:\n\n* Repeat the timepoint \n* Save an aliquot of cells from each timepoint and run another experiment with the mixed aliquots", "_____no_output_____" ], [ "## Correcting batch effects\nOkay so say your data are such that you couldn't have mixed your biological samples ahead of time. What do you do?\n\n\nThere's two main ways to approach batch correction: using groups of samples or groups of features (genes).\n\n### Sample-batchy\n\nThis is when you have groups of samples that may have some biological difference between them, but also have technical differences between them. Say, you performed single-cell capture on several different days from different mice, of somewhat overlapping ages. You know that you have the biological signal from the different mice and the different ages, but you *also* have the technical signal from the different batches. BUT there's no getting around that you had to sacrifice the mice and collect their cells in one batch\n\n### Feature-batchy\n\nThis is when you think particular groups of genes are contributing to the batch effects.\n\nHow to find these features:\n\n* Numerical feature (e.g. RIN) associated with each sample\n* Cell cycle genes (??Buetttner 2015?)\n* (RUVseq) - Use an external dataset (e.g. bulk samples) to find non-differentially expressed genes and use them to correct between groups", "_____no_output_____" ] ], [ [ "from __future__ import print_function\n\n# Interactive Python (IPython - now Jupyter) widgets for interactive exploration\nimport ipywidgets\n\n# Numerical python library\nimport numpy as np\n\n# PLotting library\nimport matplotlib.pyplot as plt\n\n# Dataframes in python\nimport pandas as pd\n\n# Linear model correction\nimport patsy \n\n# Even better plotting\nimport seaborn as sns\n\n# Batch effect correction\n# This import statement only works because there's a folder called \"combat_py\" here, not that there's a module installed\nfrom combat_py.combat import combat\n\n\n# Use the styles and colors that I like\nsns.set(style='white', context='talk', palette='Set2')\n%matplotlib inline", "_____no_output_____" ] ], [ [ "### Feature-batchy", "_____no_output_____" ] ], [ [ "np.random.seed(2016)\n\nn_samples = 10\nn_genes = 20\n\nhalf_genes = int(n_genes/2)\nhalf_samples = int(n_samples/2)\nsize = n_samples * n_genes\n\ngenes = ['Gene_{}'.format(str(i+1).zfill(2)) for i in range(n_genes)]\nsamples = ['Sample_{}'.format(str(i+1).zfill(2)) for i in range(n_samples)]\n\ndata = pd.DataFrame(np.random.randn(size).reshape(n_samples, n_genes), index=samples, columns=genes)\n\n# Add biological variance\ndata.iloc[:half_samples, :half_genes] += 1\ndata.iloc[:half_samples, half_genes:] += -1\ndata.iloc[half_samples:, half_genes:] += 1\ndata.iloc[half_samples:, :half_genes] += -1\n\n# Biological samples\nmouse_groups = pd.Series(dict(zip(data.index, (['Mouse_01'] * int(n_samples/2)) + (['Mouse_02'] * int(n_samples/2)))), \n name=\"Mouse\")\nmouse_to_color = dict(zip(['Mouse_01', 'Mouse_02'], ['lightgrey', 'black']))\nmouse_colors = [mouse_to_color[mouse_groups[x]] for x in samples]\n\n# Gene colors\ngene_colors = sns.color_palette('husl', n_colors=n_genes)", "_____no_output_____" ] ], [ [ "### Plot original biological variance data", "_____no_output_____" ] ], [ [ "g = sns.clustermap(data, row_colors=mouse_colors, col_cluster=False, row_cluster=False, linewidth=0.5, \n col_colors=gene_colors,\n cbar_kws=dict(label='Normalized Expression'))\nplt.setp(g.ax_heatmap.get_yticklabels(), rotation=0);", "_____no_output_____" ], [ "def make_tidy(data, sample_groups):\n tidy = data.unstack()\n tidy = tidy.reset_index()\n tidy = tidy.rename(columns={'level_0': 'Gene', 'level_1': \"Sample\", 0: \"Normalized Expression\"})\n tidy = tidy.join(sample_groups, on='Sample')\n return tidy", "_____no_output_____" ], [ "tidy = make_tidy(data, mouse_groups)", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nsns.boxplot(hue='Gene', y='Normalized Expression', data=tidy, x='Mouse')\nax.legend_.set_visible(False)", "_____no_output_____" ] ], [ [ "### Add technical noise", "_____no_output_____" ] ], [ [ "# Choose odd-numbered samples to be in batch1 and even numbered samples to be in batch 2\nbatch1_samples = samples[::2]\nbatch2_samples = data.index.difference(batch1_samples)\nbatches = pd.Series(dict((x, 'Batch_01') if x in batch1_samples else (x, \"Batch_02\") for x in samples), name=\"Batch\")\n\n# Add random noise for all genes except the last two in each batch\nnoisy_data = data.copy()\nnoisy_data.ix[batch1_samples, :-2] += np.random.normal(size=n_genes-2, scale=2)\nnoisy_data.ix[batch2_samples, :-2] += np.random.normal(size=n_genes-2, scale=2)\n\n\n# Assign colors for batches\nbatch_to_color = dict(zip([\"Batch_01\", \"Batch_02\"], sns.color_palette()))\nbatch_colors = [batch_to_color[batches[x]] for x in samples]\nrow_colors = [mouse_colors, batch_colors]\n\n\ng = sns.clustermap(noisy_data, row_colors=row_colors, col_cluster=False, row_cluster=False, linewidth=0.5, \n col_colors=gene_colors, cbar_kws=dict(label='Normalized Expression'))\nplt.setp(g.ax_heatmap.get_yticklabels(), rotation=0);", "_____no_output_____" ] ], [ [ "We can see that there's some batch effect - for batch1 (light grey), `Gene_15` is in general lower, and `Gene_01` is in general higher. And for batch2 (black), `Gene_16` is in general higher.\n\nBut, Gene_19 and Gene_20 are unaffected.", "_____no_output_____" ] ], [ [ "tidy_noisy = make_tidy(noisy_data, mouse_groups)\ntidy_noisy = tidy_noisy.join(batches, on='Sample')\ntidy_noisy.head()", "_____no_output_____" ] ], [ [ "Lets plot the boxplots of data the same way, with the x-axis as the mouse they came from and the y-axis ad the genes", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\nsns.boxplot(hue='Gene', y='Normalized Expression', data=tidy_noisy, x='Mouse')\nax.legend_.set_visible(False)", "_____no_output_____" ] ], [ [ "We can see that compared to before, where we had clear differences in gene expression from genes 1-10 and 11-19 in the two mice, we don't see it as much with the noisy data.\n\nNow let's plot the data a different way, with the x-axis as the *batch*", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\nsns.boxplot(hue='Gene', y='Normalized Expression', data=tidy_noisy, x='Batch')\nax.legend_.set_visible(False)", "_____no_output_____" ] ], [ [ "## How to quantify the batch effect?", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\nsns.pointplot(hue='Batch', x='Normalized Expression', data=tidy_noisy, y='Gene', orient='horizontal', \n scale=0.5, palette=batch_colors)", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nsns.pointplot(hue='Batch', x='Normalized Expression', data=tidy_noisy, y='Gene', orient='horizontal', scale=0.5)\nsns.pointplot(x='Normalized Expression', data=tidy_noisy, y='Gene', orient='horizontal', scale=0.75, color='k', \n linestyle=None)", "_____no_output_____" ] ], [ [ "## How to get rid of the batch effect?\n\n\n### COMBAT\nWe will use \"COMBAT\" to get rid of the batch effect. What combat does is basically what we just did with our eyes and intuition - find genes whose gene expression varies greatly between batches, and adjust the expression of the gene so it's closer to the mean total expression across batches.\n\n\n(may need to whiteboard here)", "_____no_output_____" ], [ "Create metadata matrix", "_____no_output_____" ] ], [ [ "metadata = pd.concat([batches, mouse_groups], axis=1)\nmetadata", "_____no_output_____" ], [ "\ndef remove_batch_effects_with_combat(batch, keep_constant=None, cluster_on_correlations=False):\n if keep_constant is not None or keep_constant in metadata:\n # We'll use patsy (statistical models in python) to create a \"Design matrix\" which encodes the batch as \n # a boolean (0 or 1) value so the computer cna understand it.\n model = patsy.dmatrix('~ {}'.format(keep_constant), metadata, return_type=\"dataframe\")\n elif keep_constant == 'null' or keep_constant is None:\n model = None\n \n # --- Correct for batch effects --- #\n corrected_data = combat(noisy_data.T, metadata[batch], model)\n \n # Transpose so samples are the rows and the features are the columns\n corrected_data = corrected_data.T\n\n # --- Plot the heatmap --- #\n if cluster_on_correlations:\n g = sns.clustermap(corrected_data.T.corr(), row_colors=row_colors, col_cluster=True, row_cluster=True, linewidth=0.5, \n vmin=-1, vmax=1, col_colors=row_colors, cbar_kws=dict(label='Pearson R'))\n plt.setp(g.ax_heatmap.get_yticklabels(), rotation=0);\n else:\n g = sns.clustermap(corrected_data, row_colors=row_colors, col_cluster=False, row_cluster=False, linewidth=0.5, \n col_colors=gene_colors, cbar_kws=dict(label='Normalized Expression'))\n plt.setp(g.ax_heatmap.get_yticklabels(), rotation=0);\n \n # Uncomment the line below to save the batch corrected heatmap\n # g.savefig('combat_batch_corrected_clustermap.pdf')\n \n # --- Quantification of the batch effect correction --- #\n # Create a \"tidy\" version of the dataframe for plotting\n tidy_corrected = make_tidy(corrected_data, mouse_groups)\n tidy_corrected = tidy_corrected.join(batches, on='Sample')\n tidy_corrected.head()\n\n # Set up the figure\n # 4 columns of figure panels\n figure_columns = 4\n width = 4.5 * figure_columns\n height = 4\n fig, axes = plt.subplots(ncols=figure_columns, figsize=(width, height))\n\n # PLot original data vs the corrected data\n ax = axes[0]\n ax.plot(data.values.flat, corrected_data.values.flat, 'o', \n # Everything in the next line is my personal preference so it looks nice\n alpha=0.5, markeredgecolor='k', markeredgewidth=0.5)\n ax.set(xlabel='Original (Batchy) data', ylabel='COMBAT corrected data')\n\n # PLot the mean gene expression within batch in colors, and the mean gene expression across both batches in black\n ax = axes[1]\n sns.pointplot(hue='Batch', x='Normalized Expression', data=tidy_corrected, y='Gene', orient='horizontal', scale=.5, ax=ax)\n sns.pointplot(x='Normalized Expression', data=tidy_corrected, y='Gene', orient='horizontal', \n scale=0.75, color='k', linestyle=None, ax=ax)\n\n # PLot the gene epxression distribution per mouse\n ax = axes[2]\n sns.boxplot(hue='Gene', y='Normalized Expression', data=tidy_corrected, x='Mouse', ax=ax, \n # Adjusting linewidth for my personal preference\n linewidth=1)\n # Don't show legend because it's too big\n ax.legend_.set_visible(False)\n \n \n # --- Plot boxplots of average difference between gene expression in batches --- #\n # Gete mean gene expression within batch for the original noisy data\n mean_batch_expression = noisy_data.groupby(batches).mean()\n noisy_batch_diff = (mean_batch_expression.loc['Batch_01'] - mean_batch_expression.loc['Batch_02']).abs()\n noisy_batch_diff.name = 'mean(|batch1 - batch2|)'\n noisy_batch_diff = noisy_batch_diff.reset_index()\n noisy_batch_diff['Data type'] = 'Noisy'\n\n # Get mean gene expression within batch for the corrected data\n mean_corrected_batch_expression = corrected_data.groupby(batches).mean()\n corrected_batch_diff = (mean_corrected_batch_expression.loc['Batch_01'] - mean_corrected_batch_expression.loc['Batch_02']).abs()\n corrected_batch_diff.name = 'mean(|batch1 - batch2|)'\n corrected_batch_diff = corrected_batch_diff.reset_index()\n corrected_batch_diff['Data type'] = 'Corrected'\n\n # Compile the two tables into one (concatenate)\n batch_differences = pd.concat([noisy_batch_diff, corrected_batch_diff])\n batch_differences.head()\n\n sns.boxplot(x='Data type', y='mean(|batch1 - batch2|)', data=batch_differences, ax=axes[3])\n\n # Remove right and top axes lines so it looks nicer\n sns.despine()\n\n # Magically adjust the figure panels (axes) so they fit nicely\n fig.tight_layout()\n\n # Uncomment the line below to save the figure of three panels\n # fig.savefig('combat_batch_corrected_panels.pdf')\n\n\nipywidgets.interact(\n remove_batch_effects_with_combat,\n batch=ipywidgets.Dropdown(options=['Mouse', 'Batch'], value=\"Batch\", description='Batch to correct for'), \n keep_constant=ipywidgets.Dropdown(value=None, options=[None, 'Mouse', 'Batch', 'Mouse + Batch'], \n description='Variable of interest'),\n cluster_on_correlations=ipywidgets.Checkbox(value=False, description=\"Cluster on (Pearson) correlations between samples\"));", "_____no_output_____" ] ], [ [ "Try doing these and see how they compare. Do you see similar trends to the original data? Do any of these create errors? Why would that be?\n\n1. Batch to correct for = Batch, Variable of interest = Mouse\n2. Batch to correct for = Mouse, Variable of interest = Batch\n3. Batch to correct for = Batch, Variable of interest = Mouse + Batch\n4. ... your own combinations!\n\nWith each of these try turning \"Cluster on (Pearson) correlations between samples\" on and off.\nThis is a nice way that we can visualize the improvement in reducing the batch-dependent signal.", "_____no_output_____" ], [ "\n\n## Feature-batchy\n\nWhat if there are specific genes or features that are contributing to the batches?\n\nThis is the idea behind correcting for cell-cycle genes or some other feature that you know is associated with the data, e.g. the RNA Integrity Number (RIN).\n\nLet's add some feature-batchy noise to our original data", "_____no_output_____" ] ], [ [ "metadata['RIN'] = np.arange( len(samples)) + 0.5\nmetadata", "_____no_output_____" ] ], [ [ "Add noise and plot it. Use first and last genes as controls that dno't have any noise", "_____no_output_____" ] ], [ [ "# rin_noise = metadata['RIN'].apply(lambda x: pd.Series(np.random.normal(loc=x, size=n_genes), index=genes))\nrin_noise = metadata['RIN'].apply(lambda x: pd.Series(np.ones(n_genes-2)*x, index=genes[1:-1]))\nrin_noise = rin_noise.reindex(columns=genes)\nrin_noise = rin_noise.fillna(0)\n\ng = sns.clustermap(rin_noise, row_colors=mouse_colors, col_cluster=False, row_cluster=False, linewidth=0.5, \n col_colors=gene_colors, cbar_kws=dict(label='RIN Noise'))\nplt.setp(g.ax_heatmap.get_yticklabels(), rotation=0);", "_____no_output_____" ] ], [ [ "Add the noise to the data and re-center so that each gene's mean is approximately zero.", "_____no_output_____" ] ], [ [ "rin_batchy_data = data + rin_noise\nrin_batchy_data\n\n# Renormalize the data so genes are 0-centered\n\nrin_batchy_data = (rin_batchy_data - rin_batchy_data.mean())/rin_batchy_data.std()\n\ng = sns.clustermap(rin_batchy_data, row_colors=mouse_colors, col_cluster=False, row_cluster=False, linewidth=0.5, \n col_colors=gene_colors, cbar_kws=dict(label='Normalized Expression'))\nplt.setp(g.ax_heatmap.get_yticklabels(), rotation=0);", "_____no_output_____" ] ], [ [ "If we plot the RIN vs the RIN-batchy gene expression, we'll see that from this one variable, we see an increase in expression! Of course, we could also have created a variable that linearly decreases expression. ", "_____no_output_____" ] ], [ [ "\ntidy_rin_batchy = make_tidy(rin_batchy_data, mouse_groups)\ntidy_rin_batchy = tidy_rin_batchy.join(metadata['RIN'], on='Sample')\n\n\ng = sns.FacetGrid(tidy_rin_batchy, hue='Gene')\ng.map(plt.plot, 'RIN', 'Normalized Expression', alpha=0.5)", "_____no_output_____" ] ], [ [ "### Use RIN to predict gene expression\n\nWe will use linear regression to use RIN as our dependent variable and predict gene expression from there. Then we'll create a new, corrected matrix, with the influence of RIN removed", "_____no_output_____" ] ], [ [ "from __future__ import print_function\nimport six\nfrom sklearn import linear_model\n\nregressor = linear_model.LinearRegression()\nregressor\n\n# Use RIN as the \"X\" - the \"dependent\" variable, the one you expect your gene expression to vary with.\n\nregressor.fit(metadata['RIN'].to_frame(), rin_batchy_data)\n\n# Use RIN to predict gene expression\nrin_dependent_data = pd.DataFrame(regressor.predict(metadata['RIN'].to_frame()), columns=genes, index=samples)\nrin_dependent_data\n\nfrom sklearn.metrics import r2_score\n\n# explained_variance = r2_score(rin_batchy_data, rin_dependent_data)\n# six.print_(\"Explained variance by RIN:\", explained_variance)\n\nrin_corrected_data = rin_batchy_data - rin_dependent_data\nrin_corrected_data\n\n# Somewhat contrived, but try to predict the newly corrected data with RIN\n\nr2_score(rin_corrected_data, rin_dependent_data)\n\ntidy_rin_corrected = make_tidy(rin_corrected_data, mouse_groups)\ntidy_rin_corrected = tidy_rin_corrected.join(metadata['RIN'], on=\"Sample\")\ntidy_rin_corrected.head()\n\ng = sns.FacetGrid(tidy_rin_corrected, hue='Gene')\ng.map(plt.plot, 'RIN', 'Normalized Expression', alpha=0.5)\n\ng = sns.clustermap(rin_corrected_data, row_colors=mouse_colors, col_cluster=False, row_cluster=False, linewidth=0.5, \n col_colors=gene_colors, cbar_kws=dict(label='Normalized Expression'))\nplt.setp(g.ax_heatmap.get_yticklabels(), rotation=0);\n\nsns.clustermap(rin_corrected_data.T.corr(), row_colors=mouse_colors,linewidth=0.5, \n col_colors=mouse_colors, cbar_kws=dict(label='Pearson R'))\nplt.setp(g.ax_heatmap.get_yticklabels(), rotation=0);", "_____no_output_____" ] ], [ [ "Now the data dcoens't vary by RIN! But.... now we over-corrected and removed the biological signal as well.", "_____no_output_____" ], [ "### Other options to talk about\n\nAs you have seen, dealing with batch effects in single-cell data is supremely difficult and the best thing you can do for yourself is design your experiment nicely so you don't have to.\n\n* [SVA](http://www.biostat.jhsph.edu/~jleek/papers/sva.pdf)\n * Can specify that you want to correct for something (like RIN) but don't correct for what you're interested in. But... often in single cell data you're trying to find new populations so you don't know *a prior* what you want to not be corrected for\n* [RUVseq](http://www.nature.com/nbt/journal/v32/n9/full/nbt.2931.html)\n * \"RUV\" = \"Remove unwanted variation\"\n * With the \"RUVg\" version can specify a set of control genes that you know aren't supposed to change between groups (maybe from a bulk experiment) but they say in their manual not to use the normalized counts for differential expression, only for exploration, because you may have corrected for something you actually *DID* want but didn't know\n* [scLVM](https://github.com/PMBio/scLVM)\n * This method claims to account for differences in cell cycle stage and help to put all cells onto the same scale, so you can then do pseudotime ordering and clustering and all that jazz.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4a4060423b065dc57d6e07a56720052918e398fa
1,040,130
ipynb
Jupyter Notebook
src/InserimentoFeatures_BanNumCatData.ipynb
EnricoPittini/Box-office-prediction
e6c635f0d5bb8469967942ae89d241e3f104a62e
[ "MIT" ]
null
null
null
src/InserimentoFeatures_BanNumCatData.ipynb
EnricoPittini/Box-office-prediction
e6c635f0d5bb8469967942ae89d241e3f104a62e
[ "MIT" ]
null
null
null
src/InserimentoFeatures_BanNumCatData.ipynb
EnricoPittini/Box-office-prediction
e6c635f0d5bb8469967942ae89d241e3f104a62e
[ "MIT" ]
null
null
null
260.553607
37,044
0.909795
[ [ [ "# **PROGETTO**", "_____no_output_____" ], [ "# FEATURES NUMERICHE, CATEGORIALI, DATA", "_____no_output_____" ], [ "In questo notebook tratto ed introduco le features numeriche, categoriali e di tipo data. Le varie features verranno aggiunte in modo incrementale. Nel successivo notebook verranno introdotte ulteriore features: di tipo insiemistico e di tipo testuale.\n\nSpesso è necessario valutare se vale la pena aggiungere certe features o è necessario capire che alternativa è migliore nella lavorazione di certe features : uso lo score sul validation set per capire cosa è meglio fare e per orientarmi tra le varie possibili scelte. \n\nVerranno presi in considerazione 4 tipi di algoritmi di learning : kNN, regressione lineare, albero di decisione, random forest. Dunque per ogni possibile alternativa ho 4 modelli diversi e dunque ho 4 score sul validation set diversi. Scelgo l'alternativa e il modello che hanno score sul validation minore : questo è il modello migliore fino a quel momento ottenuto. Dunque la mia guida è sempre lo score sul validation e scelgo ciò che minimizza ciò.", "_____no_output_____" ], [ "Nel valutare questi 4 algoritmi faccio tuning su certi iperparametri.\n1. Per kNN faccio tuning sul numero di vicini (lo indichiamo con k)\n2. Per decision tree faccio tuning sul numero massimo di foglie (lo indichiamo con k)\n3. Per random forest faccio tuning sul numero di alberi (lo indichiamo con k)\n\nLinear regression invece non faccio tuning.", "_____no_output_____" ], [ "### FUNZIONI VALUTAZIONE E SELEZIONE MODELLI\n\nImporto le **funzioni per la valutazione e selezione dei modelli**. \n\nLe funzioni compute_train_val_test e model_selection_TrainValTest effettuano la valutazione tramite gli score su training/validation/test, rispettivamente su un modello solo o su una lista di modelli. Le funzioni compute_bias_variance_erroe e model_selection_BiasVarianceError effettuano la valutazione tramite il calcolo di bias/variance/error, rispettivamente su un modello solo o su una lista di modelli.\n\nNel progetto uso lo **score sul validation** come misura principale per selezionare un modello. Uso il calcolo di bias/variance/error come misura ulteriore di bontà, in particolare per capire come poter migliorare il modello stesso.", "_____no_output_____" ] ], [ [ "from valutazione_modelli import compute_train_val_test, model_selection_TrainValTest, compute_bias_variance_error, \\\n model_selection_BiasVarianceError", "_____no_output_____" ] ], [ [ "# PRIMA LETTURA E FEATURES NUMERICHE", "_____no_output_____" ], [ "Per prima cosa effettuiamo la prima lettura del dataset e aggiungiamo nel modello le features numeriche: \"budget\", \"popularity\", \"runtime\".\n\nLa funzione **cleaning_data_numeric** effettua la prima lettura e lavora ed estrae le features numeriche. Ritorna:\n- *dataframe*, che è il dataframe pandas grezzo e completo di tutte le features. L'unica operazione che ho eseguito è quella di rimuovere le istanze con valori non significativi di \"revenue\". (*dataframe* mi serve perchè di fatto contiene tutto il dataset).\n- *df*, che è il dataframe pandas che ha solo le features esplicative (X) fin'ora prese in considerazione e lavorate. Dunque per ora contiene solo le features numeriche lavorate opportunamente, come specificato nel notebook di analisi del dataset. Il dataframe *df* ha dunque 4 features: \"budget\", \"budget_dummy\", \"popularity\", \"runtime\".\n- *y*, che è l'array numpy relativo a solo \"revenue\". I valori sono stati opportunamente scalati tramite MinMaxMScaler. \n\nIn tutto il progetto uso sia dataframe pandas (come *dataframe* e *df*) che array numpy (come *y*). I dataframe li uso per gestire, lavorare e visualizzare meglio il dataset e le features selezionate. Gli array numpy li uso per valutare i modelli.", "_____no_output_____" ] ], [ [ "from lavorazione_dataset_NumCatData import cleaning_data_numeric\n\ndataframe, df, y = cleaning_data_numeric()", "_____no_output_____" ], [ "df.info()\ndf.describe()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 2943 entries, 0 to 2999\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 budget 2943 non-null float64\n 1 budget_dummy 2943 non-null int32 \n 2 popularity 2943 non-null float64\n 3 runtime 2943 non-null float64\ndtypes: float64(3), int32(1)\nmemory usage: 103.5 KB\n" ] ], [ [ "Definiamo l'array numpy *X*. *X* è semplicemente la versione numpy di *df*. Come detto, gli array numpy li uso per valutare i modelli. Dunque valuterò i modelli sempre passando *X* e *y*.", "_____no_output_____" ] ], [ [ "X = df.values ", "_____no_output_____" ] ], [ [ "# PREDITTORE BANALE : revenue come funzione lineare del solo budget.\n\nIl primo modello che prendiamo in considerazione è un modello che considera solo \"budget\" come feature per spiegare \"revenue\". Predittore banale. In particolare consideriamo la regressione lineare tra \"revenue\" e \"budget\".", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\n\nmodel = LinearRegression(fit_intercept=True) # Modello regressione lineare\n\n# Calcolo gli score su training/validation/test della regressione lineare con solo \"budget\".\ntrain, val, test = compute_train_val_test1(model, X[:,0:1], y)\n\nprint(\"MSE : \",val)", "MSE : 0.003941546138667958\n" ] ], [ [ "Questo è il nostro primo score ottenuto.", "_____no_output_____" ], [ "# SOLO FEATURES NUMERICHE ", "_____no_output_____" ], [ "Consideriamo ora tutte e 4 le features numeriche messe in X. Valutiamo i 4 algoritmi di learning su tale dataset.", "_____no_output_____" ], [ "### **1) KNN (con preprocessing)**", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsRegressor\n\nmodels = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True, \n plottaTest=True, xvalues=range(1,50), xlabel=\"Numero vicini\", \n title=\"Valutazione modelli kNN con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,50))[best_model])", "MSE migliore: 0.003274847961645496 | k: 9\n" ] ], [ [ "Meglio del predittore banale.", "_____no_output_____" ], [ "### 2) DECISION TREE REGRESSOR", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeRegressor\n\nmodels = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(2,52), xlabel=\"Numero massimo foglie\",\n title=\"Valutazione modelli decision tree con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(2,52))[best_model])", "MSE migliore: 0.0037196055971774763 | k: 51\n" ] ], [ [ "### 3) LINEAR REGRESSION", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\n\nmodel = LinearRegression(fit_intercept=True) # Modello regressione lineare\n\ntrain, val, test = compute_train_val_test(model, X, y)\n\nprint(\"MSE : \",val)", "MSE : 0.003266990786261166\n" ] ], [ [ "### 4) RANDOM FOREST", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestRegressor\n\nmodels = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(1,51), xlabel=\"Numero alberi\",\n title=\"Valutazione modelli random forest con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,51))[best_model])", "MSE migliore: 0.003157106076117885 | k: 34\n" ] ], [ [ "Tutti e 4 modelli sono migliori del predittore banale. Il modello migliore è random forest, con un MSE sul validation di 0.00316. (34 alberi)", "_____no_output_____" ], [ "# AGGIUNTA FEATURES CATEGORIALI", "_____no_output_____" ], [ "Aggiungiamo ora le features categoriali: \"belongs_to_collection\", \"homepage\", \"original_language\".\n\nLe features \"belongs_to_collection\" e \"homepage\" ho già descritto come le tratto. Invece abbiamo due alternative su come trattare \"original_language\". Per prima cosa allora aggiungiamo \"belongs_to_collection\" e \"homepage\".\n\nLa funzione **add_categorial** prende il dataframe completo (*dataframe*) e ritorna newdf, che è il datframe con le sole features categoriali selezionate e da aggiungere (appunto \"belongs_to_collection\" e \"homepage\"). Ritorna dunque solo le feature oggetto di studio, lavorate e processate. \n\nConcatendando *df* e *newdf* in *df_tmp* otteniamo di fatto il dataframe con tutte le features fin'ora lavorate e selzionate. Sovrascriveremo *df* con *df_tmp* solo nel caso in cui ne valga la pena, ovvero solo nel caso in cui l'aggiunta di tali features migliora effettivamente il modello.", "_____no_output_____" ] ], [ [ "from lavorazione_dataset_NumCatData import add_categorial\nimport pandas as pd\n\nnewdf = add_categorial(dataframe) # newdf è il dataframe delle sole feature categoriali in questione, processate e lavorate.\n\ndf_tmp = pd.concat([df,newdf],axis=1)", "_____no_output_____" ] ], [ [ "Ora quindi abbiamo 6 features in tutto. Ecco le features aggiunte.", "_____no_output_____" ] ], [ [ "print(newdf.info())\nnewdf.describe()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 2943 entries, 0 to 2999\nData columns (total 2 columns):\n # Column Non-Null Count Dtype\n--- ------ -------------- -----\n 0 belongs_to_collection 2943 non-null int32\n 1 homepage 2943 non-null int32\ndtypes: int32(2)\nmemory usage: 46.0 KB\nNone\n" ], [ "X = df_tmp.values", "_____no_output_____" ] ], [ [ "Valutiamo i 4 algoritmi di learning su tale dataset.", "_____no_output_____" ], [ "### **1) KNN (con preprocessing)**", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsRegressor\n\nmodels = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True, \n plottaTest=True, xvalues=range(1,50), xlabel=\"Numero vicini\", \n title=\"Valutazione modelli kNN con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,50))[best_model])", "MSE migliore: 0.0030900271402999587 | k: 8\n" ] ], [ [ "### 2) DECISION TREE REGRESSOR", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeRegressor\n\nmodels = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(2,52), xlabel=\"Numero massimo foglie\",\n title=\"Valutazione modelli decision tree con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(2,52))[best_model])", "MSE migliore: 0.003636012580774361 | k: 12\n" ] ], [ [ "### 3) LINEAR REGRESSION", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\n\nmodel = LinearRegression(fit_intercept=True) # Modello regressione lineare\n\ntrain, val, test = compute_train_val_test(model, X, y)\n\nprint(\"MSE : \",val)", "MSE : 0.003027217124926855\n" ] ], [ [ "### 4) RANDOM FOREST", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestRegressor\n\nmodels = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(1,51), xlabel=\"Numero alberi\",\n title=\"Valutazione modelli random forest con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,51))[best_model])", "MSE migliore: 0.00291691202078572 | k: 49\n" ] ], [ [ "kNN, albero di decisione e regressione lineare rimangono piuttosto stabili rispetto a prima. La random forest migliora il suo MSE sul validation. Il modello migliore ora risulta la random forest con anche le features categoriali. MSE : 0.00292. (49 alberi).\n\nDunque **aggiungiamo tali features**: riportiamo ciò su df.", "_____no_output_____" ] ], [ [ "df = df_tmp", "_____no_output_____" ] ], [ [ "**ORIGINAL_LANGUAGE**\n\nAggiungiamo ora la feature \"original_language\". Abbiamo due alternative su come trattare \"original_language\". La funzione **add_language_1** esegue la prima alternativa, mentre la funzione **add_language_2** esegue la seconda alternativa. \n\nEntrambe le funzioni prendono in input il dataframe completo (*dataframe*) e ritornano *newdf*, ovvero il datframe delle features selezionate e lavorate. add_language_1 --> *newdf_1* ; add_language_2 --> *newdf_2*. \n\nConcateniamo *df* con *newdf_1* in *df_tmp_1* e *df* con *newdf_2* in *df_tmp_2*. Valutiamo quale alternativa è migliore e sovrascriviamo sulla base di ciò *df*.", "_____no_output_____" ], [ "**Alternativa 1**", "_____no_output_____" ], [ "\"original_language\" diventa semplicemente una feature dummy : vale 1 se il film è in lingua inglese, 0 altrimenti.\nAggiungiamo dunque un ulteriore feature. ", "_____no_output_____" ] ], [ [ "from lavorazione_dataset_NumCatData import add_language_1\nimport pandas as pd\n\nnewdf_1 = add_language_1(dataframe)\n\ndf_tmp_1 = pd.concat([df,newdf_1],axis=1)", "_____no_output_____" ] ], [ [ "Ora abbiamo 7 features in tutto. Vediamo la feature aggiunta.", "_____no_output_____" ] ], [ [ "print(newdf_1.info())\nnewdf1.describe()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 2943 entries, 0 to 2999\nData columns (total 1 columns):\n # Column Non-Null Count Dtype\n--- ------ -------------- -----\n 0 original_language 2943 non-null int32\ndtypes: int32(1)\nmemory usage: 34.5 KB\nNone\n" ] ], [ [ "Andiamo a valutare.", "_____no_output_____" ] ], [ [ "X = df_tmp_1.values", "_____no_output_____" ] ], [ [ "### **1) KNN (con preprocessing)**", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsRegressor\n\nmodels = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True, \n plottaTest=True, xvalues=range(1,50), xlabel=\"Numero vicini\", \n title=\"Valutazione modelli kNN con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,50))[best_model])", "MSE migliore: 0.00305841484563747 | k: 8\n" ] ], [ [ "### 2) DECISION TREE REGRESSOR", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeRegressor\n\nmodels = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(2,52), xlabel=\"Numero massimo foglie\",\n title=\"Valutazione modelli decision tree con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(2,52))[best_model])", "MSE migliore: 0.0036360125807743614 | k: 12\n" ] ], [ [ "### 3) LINEAR REGRESSION", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\n\nmodel = LinearRegression(fit_intercept=True) # Modello regressione lineare\n\ntrain, val, test = compute_train_val_test(model, X, y)\n\nprint(\"MSE : \",val)", "MSE : 0.0030279721615750987\n" ] ], [ [ "### 4) RANDOM FOREST", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestRegressor\n\nmodels = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(1,51), xlabel=\"Numero alberi\",\n title=\"Valutazione modelli random forest con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,51))[best_model])", "MSE migliore: 0.002868495673118281 | k: 39\n" ] ], [ [ "C'è un miglioramento complessivo dei modelli. Ed in particolare la random forest migliora molto il suo MSE sul validation. Dunque ora il modello migliore risulta la random forest con anche \"original_language\" trattata nella prima alternativa. MSE : 0.002868. (39 alberi)", "_____no_output_____" ], [ "**Alternativa 2**", "_____no_output_____" ], [ "Le prime 7 lingue rispetto al revenue medio le tengo come valori categoriali distinti. Tutte le altre lingue le accorpo nella categoria \"other_language\". Ho dunque una variabile categoriale con 8 valori distinti: tale feature la trasformo in 8 variabili binarie(dummy).\n\nAggiungo in totale 8 feature in più.", "_____no_output_____" ] ], [ [ "from lavorazione_dataset_NumCatData import add_language_2\nimport pandas as pd\n\nnewdf_2 = add_language_2(dataframe)\n\ndf_tmp_2 = pd.concat([df,newdf_2],axis=1)", "_____no_output_____" ] ], [ [ "Abbiamo in totale 14 features. Vediamo le features aggiunte.", "_____no_output_____" ] ], [ [ "print(newdf_2.info())\nnewdf_2.describe()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 2943 entries, 0 to 2999\nData columns (total 8 columns):\n # Column Non-Null Count Dtype\n--- ------ -------------- -----\n 0 zh 2943 non-null uint8\n 1 en 2943 non-null uint8\n 2 tr 2943 non-null uint8\n 3 ja 2943 non-null uint8\n 4 cn 2943 non-null uint8\n 5 hi 2943 non-null uint8\n 6 de 2943 non-null uint8\n 7 other_language 2943 non-null uint8\ndtypes: uint8(8)\nmemory usage: 46.0 KB\nNone\n" ], [ "X = df_tmp_2.values", "_____no_output_____" ] ], [ [ "### **1) KNN (con preprocessing)**", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsRegressor\n\nmodels = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True, \n plottaTest=True, xvalues=range(1,50), xlabel=\"Numero vicini\", \n title=\"Valutazione modelli kNN con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,50))[best_model])", "MSE migliore: 0.0030794267012323588 | k: 8\n" ] ], [ [ "### 2) DECISION TREE REGRESSOR", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeRegressor\n\nmodels = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(2,52), xlabel=\"Numero massimo foglie\",\n title=\"Valutazione modelli decision tree con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(2,52))[best_model])", "MSE migliore: 0.003636012580774361 | k: 12\n" ] ], [ [ "### 3) LINEAR REGRESSION", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\n\nmodel = LinearRegression(fit_intercept=True) # Modello regressione lineare\n\ntrain, val, test = compute_train_val_test(model, X, y)\n\nprint(\"MSE : \",val)", "MSE : 0.0030354477918669346\n" ] ], [ [ "### 4) RANDOM FOREST", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestRegressor\n\nmodels = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(1,51), xlabel=\"Numero alberi\",\n title=\"Valutazione modelli random forest con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,51))[best_model])", "MSE migliore: 0.002890429954409866 | k: 37\n" ] ], [ [ "Gli score sono molto simili a quelli dell'alternativa 1. In ogni caso, lo score sul validation della random forest è peggiorato: dunque il MSE migliore c'era con l'alternativa 1. **Scegliamo l'alternativa 1.**", "_____no_output_____" ] ], [ [ "df = df_tmp_1", "_____no_output_____" ] ], [ [ "# AGGIUNTA FEATURES DATA", "_____no_output_____" ], [ "Aggiungiamo l'unica feature di tipo data: \"release_date\". Come visto, estraiamo da tale feature sia l'anno che il mese. Per l'anno lo trattiamo banalmente come feature numerica, per il mese invece abbiamo 6 alternative diverse da valutare.\n\nPer ogni diversa alternativa c'è una diversa funzione. Ogni funzione prende in input il dataframe completo (*dataframe*) e ritornano *newdf*, ovvero il datframe delle features selezionate e lavorate. Siccome abbiamo 6 diverse funzioni, otteniamo 6 diversi *newdf* : *newdf_1* *newdf_2* *newdf_3* *newdf_4* *newdf_5* *newdf_6*.\n\nConcatendo *df* con i vari *newdf* otteniamo 6 diversi *df_tmp_i*. Valutiamo quale alternativa è migliore e sovrascriviamo sulla base di ciò *df*.", "_____no_output_____" ], [ "## ALTERNATIVA 1", "_____no_output_____" ], [ "Considero il mese semplicemente come una variabile categoriale a 12 livelli: da ciò 12 features dummy binarie.", "_____no_output_____" ] ], [ [ "from lavorazione_dataset_NumCatData import add_data_1\nimport pandas as pd\n\nnewdf_1 = add_data_1(dataframe) \n\ndf_tmp_1 = pd.concat([df,newdf_1],axis=1)", "_____no_output_____" ] ], [ [ "Ora quindi abbiamo 20 features in tutto. Ecco le features aggiunte.", "_____no_output_____" ] ], [ [ "print(newdf_1.info())\nnewdf_1.describe()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 2943 entries, 0 to 2999\nData columns (total 13 columns):\n # Column Non-Null Count Dtype\n--- ------ -------------- -----\n 0 year 2943 non-null int64\n 1 gen 2943 non-null uint8\n 2 feb 2943 non-null uint8\n 3 mar 2943 non-null uint8\n 4 apr 2943 non-null uint8\n 5 may 2943 non-null uint8\n 6 jun 2943 non-null uint8\n 7 jul 2943 non-null uint8\n 8 aug 2943 non-null uint8\n 9 sep 2943 non-null uint8\n 10 oct 2943 non-null uint8\n 11 nov 2943 non-null uint8\n 12 dec 2943 non-null uint8\ndtypes: int64(1), uint8(12)\nmemory usage: 80.5 KB\nNone\n" ], [ "X = df_tmp_1.values", "_____no_output_____" ] ], [ [ "Valutiamo i 4 algoritmi di learning su tale dataset.", "_____no_output_____" ], [ "### **1) KNN (con preprocessing)**", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsRegressor\n\nmodels = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True, \n plottaTest=True, xvalues=range(1,50), xlabel=\"Numero vicini\", \n title=\"Valutazione modelli kNN con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,50))[best_model])", "MSE migliore: 0.004054620541045222 | k: 2\n" ] ], [ [ "### 2) DECISION TREE REGRESSOR", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeRegressor\n\nmodels = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(2,52), xlabel=\"Numero massimo foglie\",\n title=\"Valutazione modelli decision tree con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(2,52))[best_model])", "MSE migliore: 0.0035631307899930743 | k: 12\n" ] ], [ [ "### 3) LINEAR REGRESSION", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\n\nmodel = LinearRegression(fit_intercept=True) # Modello regressione lineare\n\ntrain, val, test = compute_train_val_test(model, X, y)\n\nprint(\"MSE : \",val)", "MSE : 0.0030141282297215825\n" ] ], [ [ "### 4) RANDOM FOREST", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestRegressor\n\nmodels = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(1,51), xlabel=\"Numero alberi\",\n title=\"Valutazione modelli random forest con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,51))[best_model])", "MSE migliore: 0.0026986017100130806 | k: 43\n" ] ], [ [ "Lo score migliore è, come sempre, quello della random forest. MSE : 0.002699 (43 alberi). Lo score è migliorato rispetto al modello senza features della data.", "_____no_output_____" ], [ "## ALTERNATIVA 2", "_____no_output_____" ], [ "Tengo come valori distinti solo i primi 5 mesi rispetto alla numerosità di film: tutti gli altri film li accorpo nel livello \"other_month\". Ottengo quindi 6 livelli possibili --> 6 nuove features dummy.", "_____no_output_____" ] ], [ [ "from lavorazione_dataset_NumCatData import add_data_2\nimport pandas as pd\n\nnewdf_2 = add_data_2(dataframe) \n\ndf_tmp_2 = pd.concat([df,newdf_2],axis=1)", "_____no_output_____" ] ], [ [ "Ora quindi abbiamo 14 features in tutto. Ecco le features aggiunte.", "_____no_output_____" ] ], [ [ "print(newdf_2.info())\nnewdf_2.describe()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 2943 entries, 0 to 2999\nData columns (total 7 columns):\n # Column Non-Null Count Dtype\n--- ------ -------------- -----\n 0 year 2943 non-null int64\n 1 sep 2943 non-null int32\n 2 oct 2943 non-null int32\n 3 dec 2943 non-null int32\n 4 aug 2943 non-null int32\n 5 apr 2943 non-null int32\n 6 other_month 2943 non-null int64\ndtypes: int32(5), int64(2)\nmemory usage: 126.5 KB\nNone\n" ], [ "X = df_tmp_2.values", "_____no_output_____" ] ], [ [ "Valutiamo i 4 algoritmi di learning su tale dataset.", "_____no_output_____" ], [ "### **1) KNN (con preprocessing)**", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsRegressor\n\nmodels = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True, \n plottaTest=True, xvalues=range(1,50), xlabel=\"Numero vicini\", \n title=\"Valutazione modelli kNN con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,50))[best_model])", "MSE migliore: 0.00362015988930369 | k: 2\n" ] ], [ [ "### 2) DECISION TREE REGRESSOR", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeRegressor\n\nmodels = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(2,52), xlabel=\"Numero massimo foglie\",\n title=\"Valutazione modelli decision tree con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(2,52))[best_model])", "MSE migliore: 0.0035614476350532887 | k: 12\n" ] ], [ [ "### 3) LINEAR REGRESSION", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\n\nmodel = LinearRegression(fit_intercept=True) # Modello regressione lineare\n\ntrain, val, test = compute_train_val_test(model, X, y)\n\nprint(\"MSE : \",val)", "MSE : 0.0030094827336334144\n" ] ], [ [ "### 4) RANDOM FOREST", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestRegressor\n\nmodels = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(1,51), xlabel=\"Numero alberi\",\n title=\"Valutazione modelli random forest con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,51))[best_model])", "MSE migliore: 0.0026956274045434924 | k: 41\n" ] ], [ [ "Lo score minore (sempre di random forest) è migliore rispetto all'alternativa 1. MSE : 0.002696(41 alberi).", "_____no_output_____" ], [ "## ALTERNATIVA 3", "_____no_output_____" ], [ "Considero come valori possibili solo i primi 5 mesi con media revenue maggiore. Tutti gli altri mesi li accorpo nel valore \"other_month\". Ottengo quindi 6 livelli possibili --> 6 nuove features dummy.", "_____no_output_____" ] ], [ [ "from lavorazione_dataset_NumCatData import add_data_3\nimport pandas as pd\n\nnewdf_3 = add_data_3(dataframe) \n\ndf_tmp_3 = pd.concat([df,newdf_3],axis=1)", "_____no_output_____" ] ], [ [ "Ora quindi abbiamo 14 features in tutto. Ecco le features aggiunte.", "_____no_output_____" ] ], [ [ "print(newdf_3.info())\nnewdf_3.describe()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 2943 entries, 0 to 2999\nData columns (total 7 columns):\n # Column Non-Null Count Dtype\n--- ------ -------------- -----\n 0 year 2943 non-null int64\n 1 jun 2943 non-null int32\n 2 dec 2943 non-null int32\n 3 jul 2943 non-null int32\n 4 may 2943 non-null int32\n 5 nov 2943 non-null int32\n 6 other_month 2943 non-null int64\ndtypes: int32(5), int64(2)\nmemory usage: 126.5 KB\nNone\n" ], [ "X = df_tmp_3.values", "_____no_output_____" ] ], [ [ "Valutiamo i 4 algoritmi di learning su tale dataset.", "_____no_output_____" ], [ "### **1) KNN (con preprocessing)**", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsRegressor\n\nmodels = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True, \n plottaTest=True, xvalues=range(1,50), xlabel=\"Numero vicini\", \n title=\"Valutazione modelli kNN con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,50))[best_model])", "MSE migliore: 0.0036721813546296738 | k: 4\n" ] ], [ [ "### 2) DECISION TREE REGRESSOR", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeRegressor\n\nmodels = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(2,52), xlabel=\"Numero massimo foglie\",\n title=\"Valutazione modelli decision tree con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(2,52))[best_model])", "MSE migliore: 0.0036021422947714516 | k: 12\n" ] ], [ [ "### 3) LINEAR REGRESSION", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\n\nmodel = LinearRegression(fit_intercept=True) # Modello regressione lineare\n\ntrain, val, test = compute_train_val_test(model, X, y)\n\nprint(\"MSE : \",val)", "MSE : 0.0030163765210121252\n" ] ], [ [ "### 4) RANDOM FOREST", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestRegressor\n\nmodels = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(1,51), xlabel=\"Numero alberi\",\n title=\"Valutazione modelli random forest con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,51))[best_model])", "MSE migliore: 0.002686418614642118 | k: 41\n" ] ], [ [ "Lo score minore (sempre di random forest) è migliore rispetto all'alternativa 2: per ora l'alternativa 3 è la migliore. MSE : 0.002686 (41 alberi).", "_____no_output_____" ], [ "## ALTERNATIVA 4", "_____no_output_____" ], [ "Creiamo una sola feature categoriale binaria (dummy) : 1 se il mese del film è nei primi 6 mesi con revenue maggiore ; 0 se è nei 6 con revenue peggiore. ", "_____no_output_____" ] ], [ [ "from lavorazione_dataset_NumCatData import add_data_4\nimport pandas as pd\n\nnewdf_4 = add_data_4(dataframe) \n\ndf_tmp_4 = pd.concat([df,newdf_4],axis=1)", "_____no_output_____" ] ], [ [ "Ora quindi abbiamo 9 features in tutto. Ecco le features aggiunte.", "_____no_output_____" ] ], [ [ "print(newdf_4.info())\nnewdf_4.describe()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 2943 entries, 0 to 2999\nData columns (total 2 columns):\n # Column Non-Null Count Dtype\n--- ------ -------------- -----\n 0 year 2943 non-null int64\n 1 month 2943 non-null int64\ndtypes: int64(2)\nmemory usage: 69.0 KB\nNone\n" ], [ "X = df_tmp_4.values", "_____no_output_____" ] ], [ [ "Valutiamo i 4 algoritmi di learning su tale dataset.", "_____no_output_____" ], [ "### **1) KNN (con preprocessing)**", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsRegressor\n\nmodels = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True, \n plottaTest=True, xvalues=range(1,50), xlabel=\"Numero vicini\", \n title=\"Valutazione modelli kNN con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,50))[best_model])", "MSE migliore: 0.003198636098418034 | k: 4\n" ] ], [ [ "### 2) DECISION TREE REGRESSOR", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeRegressor\n\nmodels = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(2,52), xlabel=\"Numero massimo foglie\",\n title=\"Valutazione modelli decision tree con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(2,52))[best_model])", "MSE migliore: 0.0036037666434928796 | k: 12\n" ] ], [ [ "### 3) LINEAR REGRESSION", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\n\nmodel = LinearRegression(fit_intercept=True) # Modello regressione lineare\n\ntrain, val, test = compute_train_val_test(model, X, y)\n\nprint(\"MSE : \",val)", "MSE : 0.003003535664250761\n" ] ], [ [ "### 4) RANDOM FOREST", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestRegressor\n\nmodels = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(1,51), xlabel=\"Numero alberi\",\n title=\"Valutazione modelli random forest con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,51))[best_model])", "MSE migliore: 0.0026956743803049936 | k: 36\n" ] ], [ [ "Lo score minore (sempre di random forest) non è migliore rispetto all'alternativa 3: l'alternativa 3 rimane la migliore.", "_____no_output_____" ], [ "## ALTERNATIVA 5", "_____no_output_____" ], [ "Dividiamo i mesi in 3 gruppi : il primo mese più rilevante ; gli altri 5 mesi più rilevanti ; i restanti 6 mesi più rilevanti.\nFeature categoriale con 3 livelli --> dunque 3 features dummy.", "_____no_output_____" ] ], [ [ "from lavorazione_dataset_NumCatData import add_data_5\nimport pandas as pd\n\nnewdf_5 = add_data_5(dataframe) \n\ndf_tmp_5 = pd.concat([df,newdf_5],axis=1)", "_____no_output_____" ] ], [ [ "Ora quindi abbiamo 11 features in tutto. Ecco le features aggiunte.", "_____no_output_____" ] ], [ [ "print(newdf_5.info())\nnewdf_5.describe()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 2943 entries, 0 to 2999\nData columns (total 4 columns):\n # Column Non-Null Count Dtype\n--- ------ -------------- -----\n 0 year 2943 non-null int64\n 1 month_group_1 2943 non-null uint8\n 2 month_group_2 2943 non-null uint8\n 3 month_group_3 2943 non-null uint8\ndtypes: int64(1), uint8(3)\nmemory usage: 54.6 KB\nNone\n" ], [ "X = df_tmp_5.values", "_____no_output_____" ] ], [ [ "Valutiamo i 4 algoritmi di learning su tale dataset.", "_____no_output_____" ], [ "### **1) KNN (con preprocessing)**", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsRegressor\n\nmodels = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True, \n plottaTest=True, xvalues=range(1,50), xlabel=\"Numero vicini\", \n title=\"Valutazione modelli kNN con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,50))[best_model])", "MSE migliore: 0.0030331000820939306 | k: 4\n" ] ], [ [ "### 2) DECISION TREE REGRESSOR", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeRegressor\n\nmodels = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(2,52), xlabel=\"Numero massimo foglie\",\n title=\"Valutazione modelli decision tree con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(2,52))[best_model])", "MSE migliore: 0.003630437775036629 | k: 11\n" ] ], [ [ "### 3) LINEAR REGRESSION", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\n\nmodel = LinearRegression(fit_intercept=True) # Modello regressione lineare\n\ntrain, val, test = compute_train_val_test(model, X, y)\n\nprint(\"MSE : \",val)", "MSE : 0.0030177435640194672\n" ] ], [ [ "### 4) RANDOM FOREST", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestRegressor\n\nmodels = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(1,51), xlabel=\"Numero alberi\",\n title=\"Valutazione modelli random forest con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,51))[best_model])", "MSE migliore: 0.0026877408227620643 | k: 30\n" ] ], [ [ "Lo score minore (sempre di random forest) non è migliore rispetto all'alternativa 3: l'alternativa 3 rimane la migliore.", "_____no_output_____" ], [ "## ALTERNATIVA 6", "_____no_output_____" ], [ "Dividiamo i mesi in 3 gruppi : primi 4 mesi migliori rispetto a revenue; successivi 4 mesi migliori ; ultimi 4 mesi. Dunque sempre 3 livelli, ma questa volta più bilanciati. Dunque abbiamo 3 livelli possibili per la features categorica mese: da ciò 3 features dummy binarie.", "_____no_output_____" ] ], [ [ "from lavorazione_dataset_NumCatData import add_data_6\nimport pandas as pd\n\nnewdf_6 = add_data_6(dataframe) \n\ndf_tmp_6 = pd.concat([df,newdf_6],axis=1)", "_____no_output_____" ] ], [ [ "Ora quindi abbiamo 11 features in tutto. Ecco le features aggiunte.", "_____no_output_____" ] ], [ [ "print(newdf_6.info())\nnewdf_6.describe()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 2943 entries, 0 to 2999\nData columns (total 4 columns):\n # Column Non-Null Count Dtype\n--- ------ -------------- -----\n 0 year 2943 non-null int64\n 1 month_group_1 2943 non-null uint8\n 2 month_group_2 2943 non-null uint8\n 3 month_group_3 2943 non-null uint8\ndtypes: int64(1), uint8(3)\nmemory usage: 54.6 KB\nNone\n" ], [ "X = df_tmp_6.values", "_____no_output_____" ] ], [ [ "Valutiamo i 4 algoritmi di learning su tale dataset.", "_____no_output_____" ], [ "### **1) KNN (con preprocessing)**", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsRegressor\n\nmodels = [KNeighborsRegressor(n_neighbors=k) for k in range(1,50)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X ,y ,scale=True, plotta=True, plottaTrain=True, \n plottaTest=True, xvalues=range(1,50), xlabel=\"Numero vicini\", \n title=\"Valutazione modelli kNN con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,50))[best_model])", "MSE migliore: 0.0032985225754586693 | k: 8\n" ] ], [ [ "### 2) DECISION TREE REGRESSOR", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeRegressor\n\nmodels = [DecisionTreeRegressor(max_leaf_nodes=k) for k in range(2,52)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(2,52), xlabel=\"Numero massimo foglie\",\n title=\"Valutazione modelli decision tree con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(2,52))[best_model])", "MSE migliore: 0.0037353947899219026 | k: 9\n" ] ], [ [ "### 3) LINEAR REGRESSION", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\n\nmodel = LinearRegression(fit_intercept=True) # Modello regressione lineare\n\ntrain, val, test = compute_train_val_test(model, X, y)\n\nprint(\"MSE : \",val)", "MSE : 0.002990533213107489\n" ] ], [ [ "### 4) RANDOM FOREST", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestRegressor\n\nmodels = [RandomForestRegressor(n_estimators=k) for k in range(1,51)]\n\nlist_trainValTest , best_model = model_selection_TrainValTest(models, X, y, plotta=True, plottaTrain=True, plottaTest=True,\n xvalues=range(1,51), xlabel=\"Numero alberi\",\n title=\"Valutazione modelli random forest con score sul training/validation/test\" )\n\nprint(\"MSE migliore: \",list_trainValTest[best_model][1],\" | k: \",list(range(1,51))[best_model])", "MSE migliore: 0.0026721842311812276 | k: 39\n" ] ], [ [ "Lo score minore (sempre di random forest) è migliore rispetto all'alternativa 3: l'alternativa 6 è la migliore. **In definitiva dunque scegliamo l'alternativa 6.** MSE : 0.002672 (39 alberi).\n\nRiportiamo ciò su *df*.", "_____no_output_____" ] ], [ [ "df = df_tmp_6", "_____no_output_____" ] ], [ [ "**Il dataset lavorato ottenuto fin'ora è dunque il seguente. 11 features.**", "_____no_output_____" ] ], [ [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 2943 entries, 0 to 2999\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 budget 2943 non-null float64\n 1 budget_dummy 2943 non-null int32 \n 2 popularity 2943 non-null float64\n 3 runtime 2943 non-null float64\n 4 belongs_to_collection 2943 non-null int32 \n 5 homepage 2943 non-null int32 \n 6 original_language 2943 non-null int32 \n 7 year 2943 non-null int64 \n 8 month_group_1 2943 non-null uint8 \n 9 month_group_2 2943 non-null uint8 \n 10 month_group_3 2943 non-null uint8 \ndtypes: float64(3), int32(4), int64(1), uint8(3)\nmemory usage: 169.6 KB\n" ] ], [ [ "**Il migliore modello risulta random forest con 39 alberi. MSE : 0.002672.**\n\nNel notebook successivo si introducono le features di tipo insiemistico e testuale.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a406a42e812691235e4c0dce3d5729267d9d675
3,838
ipynb
Jupyter Notebook
notebooks/beginner/exercises/strings_exercise.ipynb
hapoyige/learn-python3
da587a7445269a6cc04c2224cd489128fbc5acbe
[ "MIT" ]
null
null
null
notebooks/beginner/exercises/strings_exercise.ipynb
hapoyige/learn-python3
da587a7445269a6cc04c2224cd489128fbc5acbe
[ "MIT" ]
null
null
null
notebooks/beginner/exercises/strings_exercise.ipynb
hapoyige/learn-python3
da587a7445269a6cc04c2224cd489128fbc5acbe
[ "MIT" ]
null
null
null
21.322222
181
0.53752
[ [ [ "# 1. Fill missing pieces\nFill `____` pieces below to have correct values for `lower_cased`, `stripped` and `stripped_lower_case` variables.", "_____no_output_____" ] ], [ [ "original = ' Python strings are COOL! '\nlower_cased = original.lower()\nstripped = original.strip()\nstripped_lower_cased = original.lower().strip()", "_____no_output_____" ] ], [ [ "Let's verify that the implementation is correct by running the cell below. `assert` will raise `AssertionError` if the statement is not true. ", "_____no_output_____" ] ], [ [ "assert lower_cased == ' python strings are cool! '\nassert stripped == 'Python strings are COOL!'\nassert stripped_lower_cased == 'python strings are cool!'", "_____no_output_____" ] ], [ [ "# 2. Prettify ugly string\nUse `str` methods to convert `ugly` to wanted `pretty`.", "_____no_output_____" ] ], [ [ "ugly = ' tiTle of MY new Book\\n\\n'", "_____no_output_____" ], [ "# Your implementation:\npretty = ugly.strip().title()", "_____no_output_____" ] ], [ [ "Let's make sure that it does what we want. `assert` raises [`AssertionError`](https://docs.python.org/3/library/exceptions.html#AssertionError) if the statement is not `True`.", "_____no_output_____" ] ], [ [ "print('pretty: {}'.format(pretty))\nassert pretty == 'Title Of My New Book'", "pretty: Title Of My New Book\n" ] ], [ [ "# 3. Format string based on existing variables\nCreate `sentence` by using `verb`, `language`, and `punctuation` and any other strings you may need.", "_____no_output_____" ] ], [ [ "verb = 'is'\nlanguage = 'Python'\npunctuation = '!'", "_____no_output_____" ], [ "# Your implementation:\nsentence = 'Learning {} {} fun{}'.format(language, verb, punctuation)", "_____no_output_____" ], [ "print('sentence: {}'.format(sentence))\nassert sentence == 'Learning Python is fun!'", "sentence: Learning Python is fun!\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
4a4071e76b602c0a149a34d07daaf14b51cf809f
788,863
ipynb
Jupyter Notebook
start_1a.ipynb
NicholasGoh/nndl
bc6540bb6a90796b8e75eda5cba9aeb5a101bb07
[ "MIT" ]
null
null
null
start_1a.ipynb
NicholasGoh/nndl
bc6540bb6a90796b8e75eda5cba9aeb5a101bb07
[ "MIT" ]
null
null
null
start_1a.ipynb
NicholasGoh/nndl
bc6540bb6a90796b8e75eda5cba9aeb5a101bb07
[ "MIT" ]
null
null
null
532.296221
49,268
0.941011
[ [ [ "import tensorflow\nimport pandas as pd\nimport time\nimport numpy as np\n\n# ignore all info and warnings but not error messages\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' \n\n# tensorflow libraries\nimport tensorflow as tf\nimport tensorflow.keras as keras\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout\n\n# sklearn libraries are useful for preprocessing, performance measures, etc.\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import RepeatedKFold\nfrom sklearn.model_selection import train_test_split", "_____no_output_____" ] ], [ [ "# Read Data", "_____no_output_____" ] ], [ [ "df = pd.read_csv('./features_30_sec.csv')\ndf.head()", "_____no_output_____" ], [ "df['label'].value_counts()", "_____no_output_____" ] ], [ [ "Split and scale dataset", "_____no_output_____" ] ], [ [ "columns_to_drop = ['label','filename', 'length']\n\ndef prepare_dataset(df, columns_to_drop, test_size, random_state):\n\n # Encode the labels from 0 to n_classes-1 \n label_encoder = preprocessing.LabelEncoder()\n df['label'] = label_encoder.fit_transform(df['label'])\n \n # devide data to train and test\n df_train, df_test = train_test_split(df, test_size=test_size, random_state=random_state)\n \n # scale the training inputs\n x_train = df_train.drop(columns_to_drop,axis=1)\n y_train = df_train['label'].to_numpy()\n \n standard_scaler = preprocessing.StandardScaler()\n x_train_scaled = standard_scaler.fit_transform(x_train)\n\n #scale and prepare testing data\n x_test = df_test.drop(columns_to_drop,axis=1)\n x_test_scaled = standard_scaler.transform(x_test)\n y_test = df_test['label'].to_numpy() \n \n return x_train_scaled, y_train, x_test_scaled, y_test", "_____no_output_____" ], [ "x_train, y_train, x_test, y_test = prepare_dataset(df, columns_to_drop, test_size=0.3, random_state=0)\n\nprint(x_train.shape, y_train.shape)\nprint(x_test.shape, y_test.shape)", "(700, 57) (700,)\n(300, 57) (300,)\n" ] ], [ [ "# Q1", "_____no_output_____" ], [ "For all parts, `train`, `valid`, `test` will be defined as follows:\n\n`train`:\n- data that network weights are updated after seeing\n\n`valid`:\n- data that network weights are **NOT** updated after seeing\n\n`test`:\n- data that network will only see only once to evaluate generability\n - either with `model.evaluate()`\n - or training on (`train` + `valid`), validating on `test` after cross validation\n - to get better estimate of model performance with chosen hyperparameters", "_____no_output_____" ], [ "As seen below,\n```python\ny_valid[2] == 2 # integer\n```\ninstead of something like\n```python\ny_valid == [0 1 0 0 0] # one hot\n```\nso `sparse_categorical_crossentropy` should be used as opposed to `categorical_crossentropy`", "_____no_output_____" ] ], [ [ "x_valid = x_test\ny_valid = y_test\nclasses = len(df['label'].unique())\ny_valid[2]", "_____no_output_____" ], [ "def run_network(\n train=(x_train, y_train),\n valid=(x_valid, y_valid),\n dropout=.3,\n neurons=16,\n batch_size=1,\n epochs=50,\n verbose=0,\n callbacks=[],\n summary=False\n):\n # clear previous models\n tf.keras.backend.clear_session()\n \n model = tf.keras.Sequential([\n tf.keras.layers.InputLayer(input_shape=(x_train.shape[1])),\n tf.keras.layers.Dense(neurons, 'relu'),\n tf.keras.layers.Dropout(dropout),\n tf.keras.layers.Dense(classes)\n ])\n if summary:\n return model.summary()\n \n model.compile(\n tf.optimizers.Adam(),\n metrics='accuracy',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) # for efficiency\n )\n return model.fit(\n x=train[0],\n y=train[1],\n verbose=verbose,\n epochs=epochs,\n batch_size=batch_size,\n callbacks=callbacks,\n validation_data=valid\n )", "_____no_output_____" ] ], [ [ "Model architecture", "_____no_output_____" ] ], [ [ "history = run_network(summary=True)", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense (Dense) (None, 16) 928 \n_________________________________________________________________\ndropout (Dropout) (None, 16) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 10) 170 \n=================================================================\nTotal params: 1,098\nTrainable params: 1,098\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "%%time\nhistory = run_network()", "CPU times: user 1min 8s, sys: 6.77 s, total: 1min 14s\nWall time: 41.5 s\n" ], [ "import matplotlib.pyplot as plt\ndef plot(history, suptitle_label=''):\n f, axes = plt.subplots(1, 2, figsize=(12, 4))\n f.subplots_adjust(top=.75 if suptitle_label == '' else .7)\n\n accuracy = history['accuracy']\n loss = history['loss']\n val_accuracy = history['val_accuracy']\n val_loss = history['val_loss']\n \n axes[1].plot(history['accuracy'])\n axes[1].plot(history['val_accuracy'])\n axes[1].set_title('Model accuracy')\n axes[1].set(ylabel = 'accuracy', xlabel = 'Epoch')\n axes[1].legend(['Train', 'Valid'], loc='upper left')\n axes[0].plot(history['loss'])\n axes[0].plot(history['val_loss'])\n axes[0].set_title('Model loss')\n axes[0].set(ylabel = 'Loss', xlabel = 'Epoch')\n axes[0].legend(['Train', 'Valid'], loc='upper left')\n axes[0].grid()\n axes[1].grid()\n \n title = (\n suptitle_label +\n 'Min Training loss: {:.{}f}\\n'.format(np.min(loss), 3) +\n 'Max Training accuracy: {:.{}f}\\n'.format(np.max(accuracy), 3) +\n 'Min Validation loss: {:.{}f}\\n'.format(np.min(val_loss), 3) +\n 'Max Validation accuracy: {:.{}f}\\n'.format(np.max(val_accuracy), 3)\n )\n f.suptitle(title)", "_____no_output_____" ] ], [ [ "Observations:\n- for both loss and accuracy:\n - `valid` diverges from `train` at around epoch 10\n- loss diverges a lot more than accuracy\n\nInsights:\n- model starts overfitting around epoch 10\n - memorizing `train`\n - learning information (weights updated) that does not generalise to predicting `valid`\n- large divergence in loss does not mean large divergence in accuracy\n - not inversely related (although this is intuitive)\n - predictions may become more uncertain\n - probability of predicting `1` when class is indeed `1` decreases but is still the most probable when compared against the other classes, so accuracy may plateau while loss increases", "_____no_output_____" ] ], [ [ "plot(history.history)", "_____no_output_____" ] ], [ [ "# Q2", "_____no_output_____" ], [ "- 30% of data that was `valid` is now `test`\n - will be withheld from training and validation until hyperparameters are chosen\n- 70% of data that was `train` will be split into 3 folds\n - each fold will be taken as `valid` once, with the other folds as `train`", "_____no_output_____" ] ], [ [ "x_test = x_valid\ny_test = y_valid\nrkfold = RepeatedKFold(n_splits=3, random_state=0)", "_____no_output_____" ] ], [ [ "RepeatedKFold is used\n- for each of the 6 `batch_sizes`\n - 3 fold is repeated 10 times\n- average will be taken across this 10 sets of 3 folds as seen in the next cell", "_____no_output_____" ] ], [ [ "%%time\nkfold_history = {}\nbatch_sizes = [1, 4, 8, 16, 32, 64]\n\nfor batch_size in batch_sizes:\n kfold_history[batch_size] = []\n for train, valid in rkfold.split(x_train):\n train_x, valid_x = x_train[train], x_train[valid]\n train_y, valid_y = y_train[train], y_train[valid]\n \n history = run_network(\n train=(train_x, train_y),\n valid=(valid_x, valid_y),\n batch_size=batch_size)\n kfold_history[batch_size].append(history)", "CPU times: user 47min 10s, sys: 4min 35s, total: 51min 45s\nWall time: 28min 31s\n" ] ], [ [ "Observations:\n- increasing `batch_size` results in poorer best performance throughout the 50 epochs\n- larger `batch_size` decreases divergence\n- `batch_size=64` has smallest divergence and thus is the optimal `batch_size`\n\nInsights:\n- convergence to global minimum of cost function is slower for larger `batch_size`\n- but useful information is learnt\n - weights updated to improve `train` is generalisable to `valid` as well", "_____no_output_____" ] ], [ [ "history = {}\n\nfor batch_size in batch_sizes:\n for key in kfold_history[1][0].history.keys():\n history[key] = np.mean(\n [h.history[key] for h in kfold_history[batch_size]],\n axis=0\n )\n plot(history, suptitle_label=f'batch_size: {batch_size}\\n')", "_____no_output_____" ], [ "import time\n\nclass EpochTime(keras.callbacks.Callback):\n def __init__(self, logs={}):\n self.time_taken = []\n def on_epoch_begin(self, epoch, logs=None):\n self.start_time = time.time()\n def on_epoch_end(self, epoch, logs=None):\n self.time_taken.append(time.time() - self.start_time)", "_____no_output_____" ], [ "%%time\n\net = EpochTime()\nfor batch_size in batch_sizes:\n for train, valid in rkfold.split(x_train):\n train_x, valid_x = x_train[train], x_train[valid]\n train_y, valid_y = y_train[train], y_train[valid]\n \n history = run_network(\n train=(train_x, train_y),\n valid=(valid_x, valid_y),\n epochs=1,\n batch_size=batch_size,\n callbacks=[et]\n )", "CPU times: user 2min 14s, sys: 4.46 s, total: 2min 19s\nWall time: 1min 52s\n" ] ], [ [ "- `batch_size=64` is the fastest (due to capitalization of optimization of matrix multiplication)\n - multi core cpu parallelization\n - gpu core parallelization\n- `batch_size=64` also results in better generalisation as described before\n\nThus `batch_size=64` is optimal", "_____no_output_____" ] ], [ [ "table = {k: [] for k in batch_sizes}\nfor i, t in enumerate(et.time_taken):\n table[batch_sizes[i // 30]].append(t)\n \npd.DataFrame.from_dict(table).apply(lambda x: [round(np.median(x), 2)])", "_____no_output_____" ] ], [ [ "Now that `batch_size=64` is chosen\n- retrain on `train` + `valid` and validate on `test` to get better estimate on model's performance as mentioned\n- usually before deployment of model, it will be retrained on all of the data, with no data set aside for `valid` or `test`\n - since estimate of model's performance is acceptable", "_____no_output_____" ] ], [ [ "x_valid = x_test\ny_valid = y_test", "_____no_output_____" ], [ "%%time\nhistory = run_network(batch_size=64)", "CPU times: user 2.91 s, sys: 179 ms, total: 3.09 s\nWall time: 2.1 s\n" ], [ "plot(history.history, suptitle_label=f'optimal batch_size: 64\\n')", "_____no_output_____" ] ], [ [ "minibatch:\n- capitalises on vectorisation to train faster (matrix multiplication of `batch_size` number of samples at one go)\n\nstochastic:\n- converges/diverges faster but often at local minima instead of global minima\n\nmodel training:\n- stochastic gradient descent is almost never used", "_____no_output_____" ], [ "# Q3", "_____no_output_____" ] ], [ [ "%%time\nkfold_history = {}\nneurons = [8, 16, 32, 64]\n\nfor neuron in neurons:\n kfold_history[neuron] = []\n for train, valid in rkfold.split(x_train):\n train_x, valid_x = x_train[train], x_train[valid]\n train_y, valid_y = y_train[train], y_train[valid]\n \n history = run_network(\n train=(train_x, train_y),\n valid=(valid_x, valid_y),\n batch_size=32,\n neurons=neuron\n )\n kfold_history[neuron].append(history)", "CPU times: user 6min 58s, sys: 38.3 s, total: 7min 36s\nWall time: 4min 51s\n" ] ], [ [ "Observations:\n- `neurons=8`\n - smallest divergence and thus is the optimal `neurons`\n - similar to `batch_size=64`, valid accuracy is better than train accuracy.\n - likely due to dropout activating only in training, thus valid outperforms train\n - could also be that for `neurons=8`, `dropout=0.3` is too high\n \nInsights:\n- useful information is learnt\n - weights updated to improve `train` is generalisable to `valid` as well", "_____no_output_____" ] ], [ [ "history = {}\n\nfor neuron in neurons:\n for key in kfold_history[8][0].history.keys():\n history[key] = np.mean(\n [h.history[key] for h in kfold_history[neuron]],\n axis=0\n )\n plot(history, suptitle_label=f'neurons: {neuron}\\n')", "_____no_output_____" ], [ "%%time\nhistory = run_network(\n batch_size=64,\n neurons=8\n)", "CPU times: user 3.16 s, sys: 234 ms, total: 3.4 s\nWall time: 2.4 s\n" ], [ "plot(history.history, suptitle_label=f'optimal neurons: 8\\n')", "_____no_output_____" ] ], [ [ "Other things that can be done:\n- hidden layers\n - vary number of hidden layers\n \n- learning rate\n - start with `lr>1e-3` which is the default for `Adam`\n - use a learning rate scheduler to decay learning rate as epochs progress\n - big steps towards global minima at early epochs as randomly initialised weights are likely far from good\n - slowly reduce steps as epochs increase as global minima is likely close, want to give chance to find it\n \n- optimizer\n - change optimizer\n - tweak optimizer params like `beta_1` for `Adam`\n - usually small improvements and/or not worth the time tweaking\n \n- others\n - many other ways that will not be listed here", "_____no_output_____" ], [ "# Q4", "_____no_output_____" ] ], [ [ "def run_2_hidden_network(\n train=(x_train, y_train),\n valid=(x_valid, y_valid),\n dropout=.3,\n neurons=16,\n batch_size=1,\n epochs=50,\n verbose=0,\n callbacks=[],\n summary=False\n):\n # clear previous models\n tf.keras.backend.clear_session()\n \n model = tf.keras.Sequential([\n tf.keras.layers.InputLayer(input_shape=(x_train.shape[1])),\n tf.keras.layers.Dense(neurons, 'relu'),\n tf.keras.layers.Dropout(dropout),\n tf.keras.layers.Dense(neurons, 'relu'),\n tf.keras.layers.Dropout(dropout),\n tf.keras.layers.Dense(classes)\n ])\n if summary:\n return model.summary()\n \n model.compile(\n tf.optimizers.Adam(),\n metrics='accuracy',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n )\n return model.fit(\n x=train[0],\n y=train[1],\n verbose=verbose,\n epochs=epochs,\n batch_size=batch_size,\n callbacks=callbacks,\n validation_data=valid\n )", "_____no_output_____" ], [ "%%time\ntwo_hidden_layer = run_2_hidden_network(summary=True)", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense (Dense) (None, 16) 928 \n_________________________________________________________________\ndropout (Dropout) (None, 16) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 16) 272 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 16) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 10) 170 \n=================================================================\nTotal params: 1,370\nTrainable params: 1,370\nNon-trainable params: 0\n_________________________________________________________________\nCPU times: user 37.7 ms, sys: 0 ns, total: 37.7 ms\nWall time: 35.5 ms\n" ], [ "%%time\ntwo_hidden_layer = run_2_hidden_network(\n neurons=8\n)\none_hidden_layer = run_network(\n batch_size=64,\n neurons=8\n)", "CPU times: user 1min 28s, sys: 8.09 s, total: 1min 36s\nWall time: 54.4 s\n" ] ], [ [ "Observations:\n- two hidden layer\n - loss and accuracy is more unstable\n - but better performance loss-wise and accuracy-wise\n - possibly due to stochastic nature\n- one hidden layer\n - little loss and accuracy divergence\n - stable (little fluctuation as opposed to two hidden layer)\n \nInsights:\n- this suggests 2 hidden layers overfits on `train` more as more layers in allows for the model to memorize the `train` data and not generalize to `valid` data", "_____no_output_____" ] ], [ [ "plot(one_hidden_layer.history, suptitle_label=f'1 hidden layer\\n')\nplot(two_hidden_layer.history, suptitle_label=f'2 hidden layers\\n')", "_____no_output_____" ] ], [ [ "# Q5", "_____no_output_____" ] ], [ [ "%%time\nno_dropout = run_network(dropout=0)\ndropout = run_network()", "CPU times: user 2min 32s, sys: 14.3 s, total: 2min 46s\nWall time: 1min 31s\n" ] ], [ [ "- larger divergence without dropout\n - When dropout is removed, the lack of regularisation causes the model to overfit even more on `train`", "_____no_output_____" ] ], [ [ "plot(no_dropout.history, suptitle_label=f'No Dropout\\n')\nplot(dropout.history, suptitle_label=f'Dropout\\n')", "_____no_output_____" ] ], [ [ "Another approach to combat overfitting is `tf.keras.layers.BatchNormalization()`\n\nNormalize output after first hidden layer to have each dimension in a similar scale", "_____no_output_____" ], [ "# Summary", "_____no_output_____" ], [ "Instead of using hand crafted features, deep features can be used. For example, audio files can be converted to Mel spectrograms (sample attached below). This can now be approached as a computer vision problem, where (deep) visual features are learnt from the convolutional layers and used for classification instead.\n\nThis is much more scalable as deep features are implicitly learnt; in other words, no engineer is needed to think up ways/features to describe each audio file such as harmonics, as these will be implicitly learnt in theory\n\n![mel_sample](mel_spectrogram.png)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
4a4076728c4da9903df04d7bfad9520de9a4dc9c
11,151
ipynb
Jupyter Notebook
module1-aws-sagemaker/LS_DS_331_AWS_SageMaker.ipynb
wjarvis2/DS-Unit-3-Sprint-3-Big-Data
4706362317be8eae6ac888003390dfac40060bec
[ "MIT" ]
null
null
null
module1-aws-sagemaker/LS_DS_331_AWS_SageMaker.ipynb
wjarvis2/DS-Unit-3-Sprint-3-Big-Data
4706362317be8eae6ac888003390dfac40060bec
[ "MIT" ]
null
null
null
module1-aws-sagemaker/LS_DS_331_AWS_SageMaker.ipynb
wjarvis2/DS-Unit-3-Sprint-3-Big-Data
4706362317be8eae6ac888003390dfac40060bec
[ "MIT" ]
null
null
null
20.804104
165
0.530715
[ [ [ "_Lambda School Data Science — Big Data_\n\n# AWS SageMaker\n\n### Links\n\n#### AWS\n- The Open Guide to Amazon Web Services: EC2 Basics _(just this one short section!)_ https://github.com/open-guides/og-aws#ec2-basics\n- AWS in Plain English https://www.expeditedssl.com/aws-in-plain-english\n- Amazon SageMaker » Create an Amazon SageMaker Notebook Instance https://docs.aws.amazon.com/sagemaker/latest/dg/gs-setup-working-env.html\n- Amazon SageMaker » Install External Libraries https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-add-external.html \n\n`conda install -n python3 bokeh dask datashader fastparquet numba python-snappy`\n\n#### Dask\n- Why Dask? https://docs.dask.org/en/latest/why.html\n- Use Cases https://docs.dask.org/en/latest/use-cases.html\n- User Interfaces https://docs.dask.org/en/latest/user-interfaces.html\n\n#### Numba\n- A ~5 minute guide http://numba.pydata.org/numba-doc/latest/user/5minguide.html", "_____no_output_____" ], [ "## 1. Estimate pi\nhttps://en.wikipedia.org/wiki/Approximations_of_π#Summing_a_circle's_area", "_____no_output_____" ], [ "### With plain Python", "_____no_output_____" ] ], [ [ "import random\n\ndef monte_carlo_pi(nsamples):\n acc = 0\n for _ in range(int(nsamples)):\n x = random.random()\n y = random.random()\n if (x**2 + y**2) < 1.0:\n acc += 1\n return 4.0 * acc / nsamples", "_____no_output_____" ], [ "%%time\nmonte_carlo_pi(1e7)", "_____no_output_____" ] ], [ [ "### With Numba\nhttp://numba.pydata.org/", "_____no_output_____" ] ], [ [ "from numba import njit", "_____no_output_____" ] ], [ [ "## 2. Loop a slow function", "_____no_output_____" ], [ "### With plain Python", "_____no_output_____" ] ], [ [ "from time import sleep\n\ndef slow_square(x):\n sleep(1)\n return x**2", "_____no_output_____" ], [ "%%time\n[slow_square(n) for n in range(16)]", "_____no_output_____" ] ], [ [ "### With Dask\n- https://examples.dask.org/delayed.html\n- http://docs.dask.org/en/latest/setup/single-distributed.html", "_____no_output_____" ] ], [ [ "from dask import compute, delayed", "_____no_output_____" ] ], [ [ "## 3. Analyze millions of Instacart orders", "_____no_output_____" ], [ "### Download data\nhttps://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2", "_____no_output_____" ] ], [ [ "!wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz", "_____no_output_____" ], [ "!tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz", "_____no_output_____" ], [ "%cd instacart_2017_05_01", "_____no_output_____" ], [ "!ls -lh *.csv", "_____no_output_____" ] ], [ [ "### With Pandas", "_____no_output_____" ], [ "#### Load & merge data", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "%%time\norder_products = pd.concat([\n pd.read_csv('order_products__prior.csv'), \n pd.read_csv('order_products__train.csv')])\n\norder_products.info()", "_____no_output_____" ], [ "order_products.head()", "_____no_output_____" ], [ "products = pd.read_csv('products.csv')\nproducts.info()", "_____no_output_____" ], [ "products.head()", "_____no_output_____" ], [ "%%time\norder_products = pd.merge(order_products, products[['product_id', 'product_name']])", "_____no_output_____" ], [ "order_products.head()", "_____no_output_____" ] ], [ [ "#### Most popular products?", "_____no_output_____" ], [ "#### Organic?", "_____no_output_____" ], [ "### With Dask\nhttps://examples.dask.org/dataframe.html", "_____no_output_____" ] ], [ [ "import dask.dataframe as dd\nfrom dask.distributed import Client", "_____no_output_____" ] ], [ [ "#### Load & merge data\nhttps://examples.dask.org/dataframes/01-data-access.html#Read-CSV-files", "_____no_output_____" ], [ "http://docs.dask.org/en/latest/dataframe-performance.html#persist-intelligently", "_____no_output_____" ], [ "#### Most popular products?", "_____no_output_____" ], [ "#### Organic?", "_____no_output_____" ], [ "## 4. Fit a machine learning model", "_____no_output_____" ], [ "### Load data", "_____no_output_____" ] ], [ [ "%cd ../ds-predictive-modeling-challenge", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\n\ntrain_features = pd.read_csv('train_features.csv')\ntrain_labels = pd.read_csv('train_labels.csv')\n\nX_train_numeric = train_features.select_dtypes(np.number)\ny_train = train_labels['status_group']", "_____no_output_____" ] ], [ [ "### With 2 cores (like Google Colab)\nhttps://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html", "_____no_output_____" ] ], [ [ "model = RandomForestClassifier(n_estimators=200, oob_score=True, n_jobs=2, random_state=42, verbose=1)\nmodel.fit(X_train_numeric, y_train)\nprint('Out-of-bag score:', model.oob_score_)", "_____no_output_____" ] ], [ [ "### With 16 cores (on AWS m4.4xlarge)", "_____no_output_____" ], [ "## ASSIGNMENT\n\nRevisit a previous assignment or project that had slow speeds or big data.\n\nMake it better with what you've learned today!\n\nYou can use `wget` or Kaggle API to get data. Some possibilities include:\n\n- https://www.kaggle.com/c/ds1-predictive-modeling-challenge\n- https://www.kaggle.com/ntnu-testimon/paysim1\n- https://github.com/mdeff/fma\n- https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2 \n\n\n\nAlso, you can play with [Datashader](http://datashader.org/) and its [example datasets](https://github.com/pyviz/datashader/blob/master/examples/datasets.yml)!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4a40800ef647a512286a69622b1ed03b1d9e22cb
18,624
ipynb
Jupyter Notebook
KWOC .ipynb
basilisk-2001/Statistics-and-Econometrics-for-Data-Science
7a3f74d5245c8e6e3601d6b8eaa4f63f8d0fb5e9
[ "MIT" ]
null
null
null
KWOC .ipynb
basilisk-2001/Statistics-and-Econometrics-for-Data-Science
7a3f74d5245c8e6e3601d6b8eaa4f63f8d0fb5e9
[ "MIT" ]
null
null
null
KWOC .ipynb
basilisk-2001/Statistics-and-Econometrics-for-Data-Science
7a3f74d5245c8e6e3601d6b8eaa4f63f8d0fb5e9
[ "MIT" ]
null
null
null
32
89
0.278243
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "import numpy as np", "_____no_output_____" ], [ "df=pd.read_csv(\"Dataset.csv\")", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df1=pd.get_dummies(df['Irrigation'])\ndf2=pd.concat([df1,df],axis=1)", "_____no_output_____" ], [ "df2", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
4a408900c2d36fcbc52f1278ac567a86577717a1
1,356
ipynb
Jupyter Notebook
chapter Five/Lesson 3 Music model/Basic code structure of machine_learning model training.ipynb
TimzOwen/Full-Python-Course
0e2451ee374caa0e553a49d87e2f87f15e770ba7
[ "MIT" ]
1
2022-01-29T10:52:02.000Z
2022-01-29T10:52:02.000Z
chapter Five/Lesson 3 Music model/Basic code structure of machine_learning model training.ipynb
TimzOwen/Full-Python-Course
0e2451ee374caa0e553a49d87e2f87f15e770ba7
[ "MIT" ]
null
null
null
chapter Five/Lesson 3 Music model/Basic code structure of machine_learning model training.ipynb
TimzOwen/Full-Python-Course
0e2451ee374caa0e553a49d87e2f87f15e770ba7
[ "MIT" ]
1
2021-04-06T07:31:27.000Z
2021-04-06T07:31:27.000Z
22.983051
56
0.55236
[ [ [ "# import pandas lib\nimport pandas as pd\n\n# import decision tree from sciklearn\nfrom sklearn.tree import DecisionTreeClassifier\n\n#load the data into pandas\nmusic_data = pd.read_csv('music.csv')\n\n# split the data (\"cleaning the data\").\n# separate the output from other columns\nX = music_data.drop(columns = ['genre'])\n\n# now create an output dataset\ny = music_data['genre']\n\n#create an instance of the model \nmodel = DecisionTreeClassifier()\n\n# feed the model with training data\nmodel.fit(X, y)\n\n# now let the model predict the data\nmodel_prediction = model.predict(X_test)\n\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
4a409c65db27f1437f1b2f5734dfe2688da11336
724,160
ipynb
Jupyter Notebook
neural_networks/nn_1_lesson.ipynb
QuackingKnox/geekbrains
1f95d970429729513667981df7f6edc805a6509e
[ "MIT" ]
null
null
null
neural_networks/nn_1_lesson.ipynb
QuackingKnox/geekbrains
1f95d970429729513667981df7f6edc805a6509e
[ "MIT" ]
null
null
null
neural_networks/nn_1_lesson.ipynb
QuackingKnox/geekbrains
1f95d970429729513667981df7f6edc805a6509e
[ "MIT" ]
null
null
null
451.752963
217,364
0.93282
[ [ [ "# Введение в искусственные нейронные сети\n# Урок 1. Основы обучения нейронных сетей", "_____no_output_____" ], [ "## Содержание методического пособия:\n\n\n<ol>\n <li>Общие сведения о искусственных нейронных сетях</li>\n <li>Место искусственных нейронных сетей в современном мире</li>\n <li>Области применения</li>\n <li>Строение биологической нейронной сети</li>\n <li>История искусственных нейронных сетей</li>\n <li>Небольшой пример по обучению простой нейронной сети</li>\n <li>Основы обучения нейронных сетей</li>\n <li>Инициализация весов. Функции активации</li>\n <li>Обратное распространение ошибки и градиентный спуск</li>\n <li>Пример построения двухслойной нейронной сети на numpy</li> \n \n</ol>", "_____no_output_____" ], [ "## Общие сведения об искусственных нейронных сетях\n\nВ даннном разделе мы узнаем, что такое нейронные сети и из каких компонентов они состоят.\n\nИскусственнная нейронная сеть - это программная реализация математической модели биологической нейронной сети. Ее цель — извлечь пользу из переданных ей данных. Например, сказать, что на фотографии или сделать какой либо прогноз.\n\nНейронные сети состоят из входных, внутренних и внешних слоев. Внутренние слои отвественны за обучение. Входные слои позволяют ввести данные во внутренние слои, а внешние слои позволяют вывести полезный вывод из данных. Сами слои состоят из отдельных нейронов, которые связаны с другими нейронами и по определенным алгоритмам способны усиливать с ними связь.\n\nЧерез нейронные сети в процессе обучения многократно пропускают какие-либо данные, например изображения и указывают, что на них. За счет работы различных алгоритмов в нейронной сети вырабатывается такая система связей между нейронами, которая позволяет в последствии при переданных ей новых данных получить ответ, что на фотографии.\n\nНо распознавание объектов на фотографиях далеко не единственная сфера применения нейронных сетей. Существует множество видов различных нейронных сетей для разных задач. Основные из этих видов мы разберем в данном курсе. Детальное рассмотрение процесса обучения нейронной сети и базовые сведения об ее архитекутуре будут разобраны во второй половине данного методического пособия.\n\n\n", "_____no_output_____" ], [ "## Место искусственных нейронных сетей в современном мире\n\nВ данном разделе мы разберем какие новшества принесли искусственные нейронные сети в современный мир. Долгое время создание искусственого интеллекта оставалось частью научной фантастики и несмотря на многие предсказания прошлого времени, что искусственный интеллект вот-вот будет создан, искусственный интеллект был создан совсем не давно. По крайней мере люди стали говорить, что они пользуются искусственным интеллектом. Как миним можно сказать, что искусственный интеллект создан с маркетинговой точки зрения. Но, не только с маркетинговой. Отчасти со сторого научной точки зрения он также создан — в 2014 году был пройден Тест Тьюрига.Тем не менее сейчас мы можем говорить только лишь о том, что создан специализированный искусственный интеллект, способный выполнять только определенные задачи, которые раньше было свойственно выполнять только человеку. Т.е. создан так называеймый «узкий искусственный интеллект». Создание общего искусственного ителлекта, т. е. программы, которая могла бы мыслить как человек, по прежнему остается фантастикой.\n\nИскусственные нейронные сети, которые являются предметом изучения данного курса являются основой революции в области «узкого искусственного интеллекта» и являются одной из главных надежд для создания общего искусственного интеллекта в будущем. Однако искусственвенный интеллект может строиться не только на базе искусственных нейронных сетей. Более того как мы увидем в разделе посвященным истории, данных подход долгое время считался неперспективным. Так почему же революция совершена именно благодаря им, а не другим подходам? Дело в том, что компьютерные программы давно могут автоматизировать различную работу человека. Им можно дать определенный алгоритм работы и они будут его надежно выполнять. Программы как известно получают на вход данные, обрабатывают их с помощью определенного алгоритма и на выходе опять выдают данные.\nНо возьмем к примеру завод. На нем есть много труда, который можно было автоматизировать. Не трудно составить алгоритм, например, для промышленного манипулятора, который будет переносить определенные тяжелые предметы. С помощью технологий перевода данных обработанных процессором в движения манипулятора можно заставить его совершить определенную полезное работу. Но как ввести данные для обработки? Как дать понять манипулятору какой предмет ему нужно взять? Здесь на помощь может прийти камера. \n\nДолгое время различные алгоритмы компьютерного зрения справлялись с определенными несложными задачами наподобее определения предметов через цвет или его геометрическую форму.\nНо что если нужно работать с предметами разного цвета и формы, но объеденных другими общими свойствами? Например сортировать яблоки и груши. Они могут быть разной формы и цвета, но тем не менее нужно отделить отдно от другого. Человеку не трудно справиться с подобной задачей но для искусственного интеллекта долгое время это оставалось не посильной задачей. Однако с применением глубоких искусственных нейронных сетей начиная с 2012 года выполнение этой задачи стало реальностью. Но не только в сфере распознования образов важную роль сыграли нейронные сети. Также в сфере распознования речи. И не только в сфере распознования, сейчас есть нейронные сети которые могут создавать изображения и синтезировать голос. Также они применяются в многих других сферах.\n\nПодобный переворот случился благодаря тому, что нейронные сети стали повторять собою нейронные сети человеческого мозга, который как раз может легко справляться с подобной задачей. Об этом мы подробнее поговорим в разделе «Строение билогической нейронной сети».\nСейчас же давайте перейдем к разделу «Применение нейронных сетей», чтобы понять какие задачи они могут выполнять и к решению каких проблем реального мира нужно быть готовым специалистам по нейронным сетям.\n\n", "_____no_output_____" ], [ "## Применение нейронных сетей\n\nВ нашем курсе мы будем проходить различные виды нейронных сетей и будем рассматривать в соотвествующих уроках применение каждого вида отдельно. Здесь же в общих чертах нарисуем картину их применения.\n\nНейронные сети применяются в области компьютерного зрения. Начиная с обычных программ распознающих предметы в производстве или распознование личности на телефоне, заканчивая крупными комплексами компьютерного зрения, которые применяются в беспилотных автомобилях.\n\nНейронные сети применяются для распознования голоса — Siri, Google помощник, Alexa, Алиса и прочие голосовые помощники распознают голос человек с помощью нейронных сетей. Их применение не ограничивается только лишь распознованием голоса. В последние годы достигнут сущесветнный прогресс в синтезе голоса. Применяются они также в области машинного перевода. Прогресс в этой области как раз связан с ними. \n\nКомбинация из нескольких нейронных сетей позволяет описать содержание фотографии.\nНейронные сети применяются в медицине — для анализа снимков и заключений о болезни человека в некоторых областях лучше чем человек.\n\nНейронные сети также применяются для анализа поведения покупателей, а также на бирже. Находят они применения также в прогнозах погоды.\n\nНейронные сети находят также применение для творчества. Уже сейчас есть художники которые продают картины которые рисуют нейронные сети.\n\nЕсть и другие интересные области применения нейронных сетей. Например они используются для придания цвета старым фотографиям или фильмам. В улучшении качетства изображений. Для чтения по губам. Для генерации фотографий и много другого.\n", "_____no_output_____" ], [ "## Строение биологической нейронной сети\n\nТермин «Нейрон» был введен Г. В. Вальдейером в 1891 г. Что из себя предствляет человечекий нейрон? Часто можно услышать его сравнение с транизистором. Однаком более убедительным является точка зрения нейрофизиологов утверждающих, что нейрон это полноценный компьютер. Человеческий нейрон также как и компьютер призван обрабатывать сигналы. \nТо, что человеческое сознание является результатом работы нейронов было впервые отмечено в работах Александра Бейна (1873) и Уильяма Джеймса (1890). В работах Бейна впервые акт запоминания был сформулирован как акт укрепление нейронных связей.\n\nВ нейронах мозга есть аксоны, через которые в него попадают сигналы, у искусственных нейронов также есть подобные входы. У нейронов мозга есть выходной компонент дендрит и искуственных нейронов также есть выходные компоненты. У нейронов человеческого мозга насчитывается порядка 10 000 связей с другими нейронами у искусственных нейронов их также может быть много. И биологической нейронной сети и в искусственной в случае если сумма поступающих сигналов в нейрон превышает определенный порог то сигнал передается следующему нейрону.\n\nИскусственные нейронные сети работают подобно биологическим — через них пропускаются данные и те связи которые ведут к нужным результатам укрепляются. Но если в человеческом мозге проихсходит засчет утолщения этих связей, то в программных нейронных сетях это происходит за счет увеличение чисел символизирующих эти связи.\n\n\n", "_____no_output_____" ], [ "## История исскусственных нейронных сетей\n\n\nПредставление о нейронных сетях как главном способе создания искусственного интеллекта сложилось далеко не сразу и в данном разделе мы осветим основные вехи этой технологии.\n\nМатематическую модель искусственного нейрона была предложена У. Маккалоком и У. Питтсом в 50-х годах 20 века.\n\nВ виде компьютерной системы нейронная сеть была впервые реализована Фрэнком Розенблаттом в 1960 году. Фрэнк Розенблат создал «Марк-1», который являлся программно-аппаратным комлексом и воплащал в себе простую нейронную сеть в один слой. Можно отметить, что современные сети (на момент ноября 2019 г.) включают в себя 50 — 150 слоев, а экспериментальные достигают размера более 1000 слоев.\n\nОднако волна энтузиазма 60-х годов в отношении искусственных нейронных сетей сменилась скепсисом в отношении них из-за трудностей в совершении определенных логических операций и невозможности получать практические результаты. Доминирующим подходом для создания искусственного интеллекта стали экспертные системы. Они являлись по сути продвинутой энциклопедией знаний в той или иной сфере.\n\nОднако в 2012 г. появились глубокие нейронные сети. Т.е. сети с количеством внутренних слоев больше одного. Сначала кол-во слоев таких нейронных сетях было примерно 3-10. Но в силу эффективности подхода по увеличению кол-ва слоев, быстро появились нейронные сети кол-во слоев которых стало исчисляться десятками.\n\nИменно с 2012 г. нейросети стали считаться доминирующим способом в решении многих задач искусственного интеллекта. Стоит задаться вопросом: почему именно в этот период? Прорыв был обеспечен более продвинутой архитектурой, возросшими количеством хранимых компьютерами данных для тренировки нейронных сетей, а также возросшей вычислительной мощностью компьютеров. Также можно отметить появления в эту пору возможности использовать видеокарты для тренировки нейронных сетей, которые оказались лучше приспособленными для решения этих задач чем классические CPU. \n", "_____no_output_____" ], [ "\n\n\n\n", "_____no_output_____" ], [ "## Общие сведения о том, как происходит обучение нейронных сетей", "_____no_output_____" ], [ "Давайте возьмем какую-нибудь типичную задачу для нейросети и попробуем разобрать как она могла бы ее решить. В качества примера задачи можем взять типичный пример — определить на фотографии находиться кот или собака. \n\nСначала давайте подумаем как это данная задача решалась бы без нейронной сети. Вспомним, что фотография это набор пикселей. Пиксели в компьютере репрезентуются матрицей чисел. Если бы все коты были синего цвета, а собаки красного мы могли бы просто детектировать числа отвественные за данные цвета в изображении и на основаннии этого делать выводы о том кот или собака расположены на фотографии. В действительности как мы понимаем это не так. У кота очень много отличительных черт как и у собаки. Перечесление этих уникальных свойсвт займет долгое время. Но стоит учесть вот какое обстоятесльво - усики и лапки и шерстка есть у многих животных. И скорее всего все что нам остается делать это описывать размеры этих усиков, лапок и т. д. Изучать их угла наклона и т. д. Попытки делать это вручную были, как раз до эпохи нейронных сетей. Но результаты были не высокие. Как вы понимаете объем признаков слишком большой. \n\nТаким образом задача нейронной сети содержать в себе необходимый набор признаков которые позволяет ей отличить один объект от другого. В данном уроке будет разобран пример разработки нейронной сети, которая будет обучаться различать разновидности цветка ириса. Данные виды похожи, они состоят из одинаковых элементов, но эти элементы имеют разные размеры в случаи каждого отдельного вида. На этом простом примере мы сможем понять как работают более сложные нейронные сети.\nНо встает вопрос как поместить в нейронную сеть необходимый набор признаков? Ответ на этот вопрос будет даваться в течении всего данного курса и всеравно не будет исчерпывающим. Поскольку для это придется обучать нейронную сеть, а ее обучение порой преобретает характер искусства нежели набор предписаний.\nТем не менее в процессе обучения всегда нужно пройти ряд этапов, которые будут общими для любого процесса обучения.\n\nНам определиться с архитектурой нейронной сети. От удачно выбранной архитектуры будет зависеть насколько быстро мы сможем обучить нейронную сеть, насколько точной она будет, а также сможем ли мы ее обучить в принципе. \nАрхитектура нейронной сети как вы догадываетесь зависит от задачи. В зависимости от задачи нам нужно будет выбрать:\n\n- Количество слоев из которых будет состоять нейронная сеть\n- Сколько будет нейронов в этих слоях\n\nКроме этого нам нужно будет подобрать верным образом компоненты, за счет которых будет обучаться нейронная сеть (подробнее о них можно будет узнать в следующих разделах данного мет. пособия)\n\n- Систему инициализации весов нейронов \n- Функцию активации весов нейронов\n- Алгоритм корректировки весов нейронов\n\nЭто безусловно список самых общих компонентов с которыми нам нужно будет определиться, но на данной стадии обучения надо полагать нам их будет достаточно. \nАрхитектуры нейронных сетей мы будем изучать на отдельных уроках этого курса. В этом же уроке мы разберем следующие неотъемлимые компоненты обучения нейронной сети любой архитектуры — инициализация весов, их корректировка, рассмотрим виды функций активации весов нейронной сети, разберем, что такое градиентный спуск и метод обратного распространения ошибки.", "_____no_output_____" ], [ "## Инициализация весов. Функции активации", "_____no_output_____" ], [ "![vesa.png](attachment:vesa.png)", "_____no_output_____" ], [ "Мы с вами выяснили, что в процессе обучения нейронной сети в ней должны вырабататься признаки по которым она сможет определять, что на фотографии. Но как эти признаки будут репрезентованы в нейронной сети? Все эти признаки будут состовлять определенную сложную матрицу чисел. Настолько сложную, что человеку не под силу ее проанализировать. Какие это будут признаки? Это тоже неизвестно. Их может быть так много и они могут быть такими специфическими, что и описать их будет трудно. Для этого и нужны нейросети, они берут на себя содержание и описание в себе этих признаков.\n\nОднако все эти признаки записываются через систему весов нейронов. Веса нейрона отражают толщину связи этого нейрона с др. нейроном. Из совокупности этих связей и состоят признаки. Т.е. другими словами обучить нейронную сеть значит найти нужные веса для ее нейронов. \n\nПервоначально веса можно задать случайными числами. И в процессе обучения они будут все больше и больше подходить под решения задачи. Программно это можно показать на примере следующего фрагмента кода.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\n# генерации случайных чисел для инициализации весов\nnp.random.seed(1)\nsynaptic_weights = 2 * np.random.random((3, 1)) - 1\n\nw1 = 2 * np.random.random((1, 2)) - 1\nw2 = 2 * np.random.random((2, 2)) - 1\n\n# можем посмотреть \n\nprint(w1)\nprint(w2)", "[[-0.39533485 -0.70648822]]\n[[-0.81532281 -0.62747958]\n [-0.30887855 -0.20646505]]\n" ] ], [ [ "Но мало просто присвоить определенные веса нейронам. Необходимо также определить функцию по которой будет активироваться нейроны. Выбранная функция активация будет одинаковой для всех нейронов. С помощью нее можно определять с какой силой нужно подействовать на нейрон, чтобы он активировался и передал сигнал дальше.", "_____no_output_____" ], [ "![formula.png](attachment:formula.png)", "_____no_output_____" ], [ "На картинке выше показан нейрон в виде формулы. Активация его как говорилась зависит от входных данных, его весов и некоего порогого значения.\n\nФункций таких существует очень много - сигмоида, линейная, ступенчатая, ReLu, tahn и т. д. Нам скорее сейчас нужно понять их суть изложенную выше нежели чем научиться их выбирать. Отметим лишь следующее. Для учебных целей часто можно встретить сигмоиду. Для реальных задач — ReLu.", "_____no_output_____" ], [ "![formula2.png](attachment:formula2.png)", "_____no_output_____" ], [ "В данном фрагменте кода мы можем посмотреть как программно реализовать функцию сигмоид -", "_____no_output_____" ] ], [ [ "# вычисление сигмоид функции\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))", "_____no_output_____" ], [ "# для картинки\nD = 10\n\nX = np.linspace(0-D,0+D,20)\nY = sigmoid(X)\n\nplt.plot(X , Y ,'-g',label = 'сигмоид' )\nplt.legend()\nplt.xlabel('X')\nplt.ylabel('Y')\nplt.show()", "_____no_output_____" ] ], [ [ "А вот, как например на Python, можно реализовать упомянутую выше функцию активации Relu - ", "_____no_output_____" ] ], [ [ "# вычисление Relu функции\n\ndef ReLU(x):\n return x * (x > 0)\n", "_____no_output_____" ], [ "# для картинки\nD = 10\n\nX = np.linspace(0-D,0+D,21)\nY = ReLU(X)\n\nplt.plot(X , Y ,'-g',label = 'ReLU' )\nplt.legend()\nplt.xlabel('X')\nplt.ylabel('Y')\nplt.show()", "_____no_output_____" ] ], [ [ "## Обратное распространение ошибки и градиентный спуск", "_____no_output_____" ], [ "Давайте прежде чем начнем разбирать обратное распространение и градиентный спуск, скажем несколько слов о тех типах архитектур нейронной сети, которые будут использованы в данном уроке. Это простые нейронные сети. С точки зрения архитектуры их можно отнести к полносвязным нейронным сетям так как все нейроны связаны между собой. С другой стороны их можно отнести к нейронным сетям прямого распространения(feed forward). Сети прямого распространения подразумевает распространения сигналы от вход к выходу, в отличии от реккурентных нейронных сетей где во внутренних слоях сигналы могут ходить по циклу. О плюсах и минусах сетей полносвязных мы поговорим в уроке посвященном Сверточным нейронным сетям, поскольку данные нейронные сети отошли от этого подхода и нам нужно будет выяснить почему.\n\nПосле того как на выходе нейронной сети мы получили определенное цифровое значение нам нужно сравнить его с искомым. Мы можем посчитать насколько в количественом выражение ошиблась нейросеть. Задача обратного распространения ошибки пройтись от выхода ко входу и скорректировать веса нейронов. Это процесс происходит множество раз в процесс обучения.\n\nМожно сказать что процесс обучения нейронной сети это попытка оптимизировать веса \nнейронной сети до тех пор пока не будет достигнута минимальная степень ошибки. Для этого хорошо подходит такой алгоритм как градиентный спуск. Суть данного метода заключается в том, чтобы искать такие числовые параметры при которых значение ошибки достигнет нуля. Градиентным он называется потому что это процесс пошаговый, требующий одно вычисление за другим. Спуском он называется потому что значение ошибки должно быть как можно меньше.\n\nОбратите внимание на следующий график из него видно, что есть определенное число по шкале весов которому соотсвествует минимальное значение по шкале Error. Это число и нужно находить в процессе обучения нейронных сетей.", "_____no_output_____" ], [ "![formula3.png](attachment:formula3.png)", "_____no_output_____" ], [ "Давайте попробуем реализовать программного градиентный спуск, чтобы лучше понять как он работает.", "_____no_output_____" ] ], [ [ "''' \nИсходный код к уроку 1.\nДемонстрация работы градиентного спуска \n'''\n\n# первоначальное точка\nstart_point = 1\n\n# размер шага(learning rate)\nlearn_r = 0.01\n\n# установка первоначальной точности\nprecision = 0.0001\n\n# функция градиента для y = X**4 - 3 * X**3 \ngr_func = lambda x: 4 * x**3 - 9 * x**2\n\n# для картинки\nD = 1\n\nX = np.linspace(2.2-D,2.2+D,20)\nY = X**4 - 3 * X**3 \n\n# начальная точка\nnext_point = start_point\n\niter = 0 \n\nx = []\nx.append(next_point)", "_____no_output_____" ], [ "\nplt.figure(figsize=(16,2))\nplt.plot(X, Y ,'r',label = 'Y(X)' )\n\n# количество итерация \nn = 150\nfor i in range(n):\n current_point = next_point\n\n # движение в негативную сторону вычисляемого градиента\n next_point = current_point - learn_r*gr_func(current_point)\n x.append(next_point)\n # print(next_point) \n\n iter += 1\n\n # остановка когда достигнута необходимая степень точности\n print(f\"Итерация: {iter}\")\n print(f\"Текущая точка {current_point}| След-я точка {next_point}\")\n print(f\"Дистан-я между текущей точк. и след. {abs(current_point - next_point)}\")\n print(\"--------------------------------------------------------\")\n \n \n if(abs(current_point - next_point) <= precision):\n break\n\nprint(f\"минимум {next_point}, количество затраченных итераций: {iter}\") \nX_grad = np.array(x)\nplt.plot(X_grad , (X_grad **4 - 3 * X_grad **3) ,'-*g',label = 'GD' )\nplt.legend()\nplt.xlabel('X')\nplt.ylabel('Y')\nplt.show()", "Итерация: 19\nТекущая точка 2.046459608850202| След-я точка 2.0805566701861915\nДистан-я между текущей точк. и след. 0.034097061335989665\n--------------------------------------------------------\nИтерация: 20\nТекущая точка 2.0805566701861915| След-я точка 2.109895552692658\nДистан-я между текущей точк. и след. 0.029338882506466657\n--------------------------------------------------------\nИтерация: 21\nТекущая точка 2.109895552692658| След-я точка 2.1348434430078296\nДистан-я между текущей точк. и след. 0.024947890315171417\n--------------------------------------------------------\nИтерация: 22\nТекущая точка 2.1348434430078296| След-я точка 2.155836743721791\nДистан-я между текущей точк. и след. 0.02099330071396155\n--------------------------------------------------------\nИтерация: 23\nТекущая точка 2.155836743721791| След-я точка 2.173342190492916\nДистан-я между текущей точк. и след. 0.017505446771124866\n--------------------------------------------------------\nИтерация: 24\nТекущая точка 2.173342190492916| След-я точка 2.1878256603002413\nДистан-я между текущей точк. и след. 0.014483469807325289\n--------------------------------------------------------\nИтерация: 25\nТекущая точка 2.1878256603002413| След-я точка 2.1997297611221267\nДистан-я между текущей точк. и след. 0.011904100821885422\n--------------------------------------------------------\nИтерация: 26\nТекущая точка 2.1997297611221267| След-я точка 2.209459688560492\nДистан-я между текущей точк. и след. 0.009729927438365316\n--------------------------------------------------------\nИтерация: 27\nТекущая точка 2.209459688560492| След-я точка 2.217375933741103\nДистан-я между текущей точк. и след. 0.007916245180611181\n--------------------------------------------------------\nИтерация: 28\nТекущая точка 2.217375933741103| След-я точка 2.223792116723167\nДистан-я между текущей точк. и след. 0.006416182982063923\n--------------------------------------------------------\nИтерация: 29\nТекущая точка 2.223792116723167| След-я точка 2.2289762995591555\nДистан-я между текущей точк. и след. 0.005184182835988427\n--------------------------------------------------------\nИтерация: 30\nТекущая точка 2.2289762995591555| След-я точка 2.233154411317629\nДистан-я между текущей точк. и след. 0.004178111758473602\n--------------------------------------------------------\nИтерация: 31\nТекущая точка 2.233154411317629| След-я точка 2.2365147549448667\nДистан-я между текущей точк. и след. 0.003360343627237583\n--------------------------------------------------------\nИтерация: 32\nТекущая точка 2.2365147549448667| След-я точка 2.2392128818310377\nДистан-я между текущей точк. и след. 0.002698126886170993\n--------------------------------------------------------\nИтерация: 33\nТекущая точка 2.2392128818310377| След-я точка 2.241376378323333\nДистан-я между текущей точк. и след. 0.0021634964922951916\n--------------------------------------------------------\nИтерация: 34\nТекущая точка 2.241376378323333| След-я точка 2.2431093013321735\nДистан-я между текущей точк. и след. 0.0017329230088405367\n--------------------------------------------------------\nИтерация: 35\nТекущая точка 2.2431093013321735| След-я точка 2.244496134188636\nДистан-я между текущей точк. и след. 0.001386832856462572\n--------------------------------------------------------\nИтерация: 36\nТекущая точка 2.244496134188636| След-я точка 2.2456052210274837\nДистан-я между текущей точк. и след. 0.0011090868388476949\n--------------------------------------------------------\nИтерация: 37\nТекущая точка 2.2456052210274837| След-я точка 2.246491690629864\nДистан-я между текущей точк. и след. 0.0008864696023804797\n--------------------------------------------------------\nИтерация: 38\nТекущая точка 2.246491690629864| След-я точка 2.247199909522326\nДистан-я между текущей точк. и след. 0.0007082188924618649\n--------------------------------------------------------\nИтерация: 39\nТекущая точка 2.247199909522326| След-я точка 2.247765517431017\nДистан-я между текущей точк. и след. 0.0005656079086908683\n--------------------------------------------------------\nИтерация: 40\nТекущая точка 2.247765517431017| След-я точка 2.248217101873276\nДистан-я между текущей точк. и след. 0.00045158444225901206\n--------------------------------------------------------\nИтерация: 41\nТекущая точка 2.248217101873276| След-я точка 2.2485775668\nДистан-я между текущей точк. и след. 0.0003604649267239246\n--------------------------------------------------------\nИтерация: 42\nТекущая точка 2.2485775668| След-я точка 2.2488652454412037\nДистан-я между текущей точк. и след. 0.0002876786412038257\n--------------------------------------------------------\nИтерация: 43\nТекущая точка 2.2488652454412037| След-я точка 2.249094801517584\nДистан-я между текущей точк. и след. 0.00022955607638008857\n--------------------------------------------------------\nИтерация: 44\nТекущая точка 2.249094801517584| След-я точка 2.2492779567507686\nДистан-я между текущей точк. и след. 0.0001831552331847952\n--------------------------------------------------------\nИтерация: 45\nТекущая точка 2.2492779567507686| След-я точка 2.2494240766814335\nДистан-я между текущей точк. и след. 0.00014611993066493412\n--------------------------------------------------------\nИтерация: 46\nТекущая точка 2.2494240766814335| След-я точка 2.249540641457304\nДистан-я между текущей точк. и след. 0.00011656477587029812\n--------------------------------------------------------\nИтерация: 47\nТекущая точка 2.249540641457304| След-я точка 2.249633623584228\nДистан-я между текущей точк. и след. 9.29821269242126e-05\n--------------------------------------------------------\nминимум 2.249633623584228, количество затраченных итераций: 47\n" ] ], [ [ "Метод обратного распрасранения (backpropogation) самый популярный способ обучения нейронных сетей, однако у него есть несколько альтернатив - Метод упругого распространения (Resilient propagation или Rprop) и генетический алгоритм (Genetic Algorithm). Rprop для корректировки весов и смещений использует знак градиента, а не его значение, а генетический алгоритм для задач оптимизации и моделирования использует случайный подбор.\n", "_____no_output_____" ], [ "## Небольшой пример по обучению простой нейронной сети\n\n\nРассмотрим создание простой нейронной сети. Данная нейронная сеть будет обучаться предсказывать 4 число на основании первых трех. Для этого мы ей передадим обучающую набор данных из трех последовательностей чисел. В каждой последовательности чисел будет три первых числа в качетсве входных данных и 4 число которое следуюет за этими данными.\nПосле обучения нейронной сети мы просим пользователя ввести 3 числа и программа выдаст 4 число в качестве предсказания.\n\nВ данном примере будут использоваться следующие обучающие последовательсноти:\n\nДанные 1| 0\t 0\t1\t 0\n\nДанные 2| 1\t 1\t1\t 1\n\nДанные 3| 1\t 0\t1\t 1\n\nДанные 4| 0\t 1\t1\t 0\n\n\nЗдесь можно заметить, что четвертое число всегда соотвествует первому. Эту закономерность и должна благодаря коду ниже научиться обнаруживать нейронная сеть, а затем на произвольных данных введенных пользователем выдать правильное предсказание.\nЭтот пример позволяет создать и запустить простейшую нейронную сеть, но уже в этом примере присутсвуют все необходимые атрибуты создания нейронных сетей: подготовка данных для обучения модели, конфигурация модели, запуск модели.\nДальше мы разберем, что такое функция активации, обратное распространение, внутренние слои нейронной сети и прочие аспекты создания нейронных сетей.\nОбратите внимание, что здесь используется библиотека numpy для получения дополнительных команд при работе с массивами. Более подробно данную библиотеку мы рассмотрим позже.", "_____no_output_____" ] ], [ [ "'''\nИсходный код к уроку 1.\nПример простой нейронной сети на numpy\n'''\n\nimport numpy as np\n\n# генерации случайных чисел для инициализации весов\nnp.random.seed(1)\nsynaptic_weights = 2 * np.random.random((3, 1)) - 1\n\n\n# вычисление сигмоид функции\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n# вычисление производной от сигмоид функции\ndef sigm_deriv(x):\n return x * (1 - x)\n \n# вычисление Relu функции\n\ndef ReLU(x):\n return x * (x > 0)\n# вычисление производной от Relu функции\ndef relu_deriv(x):\n return x>0", "_____no_output_____" ], [ "# для картинки\nD = 10\n\nX = np.linspace(0-D,0+D,210)\nY = sigmoid(X)\ndY = sigm_deriv(sigmoid(X))\n\nplt.figure(figsize =(16,4))\nplt.subplot(1,2,1)\nplt.plot(X , Y ,'-g',label = 'сигмоид' )\nplt.plot(X , dY ,'-r',label = 'd сигмоид' )\nplt.grid('On')\nplt.legend()\nplt.xlabel('X')\nplt.ylabel('Y')\n\nYr = ReLU(X)\ndYr = relu_deriv(ReLU(X))\nplt.subplot(1,2,2)\nplt.plot(X , Yr ,'-g',label = 'ReLU' )\nplt.plot(X , dYr ,'-r',label = 'd ReLU' )\nplt.legend()\nplt.grid('On')\nplt.xlabel('X')\nplt.ylabel('Yr')\nplt.show()", "_____no_output_____" ], [ "# тренировка нейронной сети\ndef train_nn(training_inputs, training_outputs, training_iterations):\n global synaptic_weights\n for iteration in range(training_iterations):\n # перекачивание данных через нейрон\n output = run_nn(training_inputs)\n\n # вычисление ошибки через обратное распространение back-propagation\n error = training_outputs - output\n \n # выполнение корректировки весов\n adjustments = np.dot(training_inputs.T, error * sigm_deriv(output))\n\n synaptic_weights += adjustments\n\n\n# пропускание входных данных через нейрон и получение предсказания\n# конвертация значений во floats\ndef run_nn(inputs):\n global synaptic_weights\n inputs = inputs.astype(float)\n output = sigmoid(np.dot(inputs, synaptic_weights))\n return output", "_____no_output_____" ], [ "# создание данных для обучения\ntraining_inputs = np.array([[0,0,1], [1,1,1], [1,0,1], [0,1,1]])\ntraining_outputs = np.array([[0,1,1,0]]).T\n\n# запуск тренировки нейронной сети \ntrain_nn(training_inputs, training_outputs, 10000)\nprint(\"веса после завершения обучения: \")\nprint(synaptic_weights)\n\n# получение трех чисел от пользователя\nuser_inp1 = str(input(\"Первое число(0 или 1): \"))\nuser_inp2 = str(input(\"Второе число(0 или 1): \"))\nuser_inp3 = str(input(\"Третье число(0 или 1): \"))\n\nprint(f\"Проверка на новых данных: {user_inp1} {user_inp2} {user_inp3}\")\nprint(\"Предсказание нейронной сети: \")\nprint(run_nn(np.array([user_inp1, user_inp2, user_inp3])))\n", "веса после завершения обучения: \n[[10.38040701]\n [-0.20641179]\n [-4.98452047]]\nПервое число(0 или 1): 1\nВторое число(0 или 1): 1\nТретье число(0 или 1): 1\nПроверка на новых данных: 1 1 1\nПредсказание нейронной сети: \n[0.99445597]\n" ], [ "print(run_nn(np.array([0,0,0])))", "[0.5]\n" ] ], [ [ "## Пример построения двухслойной нейронной сети на numpy", "_____no_output_____" ] ], [ [ "'''\nИсходный код к уроку 1.\nПостроение двухслойной нейронный сети для классификации цветков ириса\n'''\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n# sklearn здесь только, чтобы разделить выборку на тренировочную и тестовую\nfrom sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "### Шаг 1. Определение функций, которые понадобяться для обучения\n# преобразование массива в бинарный вид результатов\ndef to_one_hot(Y):\n n_col = np.amax(Y) + 1\n binarized = np.zeros((len(Y), n_col))\n for i in range(len(Y)):\n binarized[i, Y[i]] = 1.\n return binarized\n\n# преобразование массива в необходимый вид\ndef from_one_hot(Y):\n arr = np.zeros((len(Y), 1))\n\n for i in range(len(Y)):\n l = Y[i]\n for j in range(len(l)):\n if(l[j] == 1):\n arr[i] = j+1\n return arr\n\n# сигмоида и ее производная\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n\ndef sigmoid_deriv(x):\n return (x)*(1 - (x))\n\n# нормализация массива\ndef normalize(X, axis=-1, order=2):\n l2 = np.atleast_1d(np.linalg.norm(X, order, axis))\n l2[l2 == 0] = 1\n return X / np.expand_dims(l2, axis)", "_____no_output_____" ], [ "\n### Шаг 2. Подготовка тренировочных данных\n# получения данных из csv файла. укажите здесь путь к файлу Iris.csv\niris_data = pd.read_csv(\"Iris.csv\")\n# print(iris_data.head()) # расскоментируйте, чтобы посмотреть структуру данных\n\n# репрезентация данных в виде графиков\ng = sns.pairplot(iris_data.drop(\"Id\", axis=1), hue=\"Species\")\nplt.show() # расскоментируйте, чтобы посмотреть\n\n# замена текстовых значений на цифровые\niris_data['Species'].replace(['Iris-setosa', 'Iris-virginica', 'Iris-versicolor'], [0, 1, 2], inplace=True)\n\n# формирование входных данных\ncolumns = ['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']\nx = pd.DataFrame(iris_data, columns=columns)\n#x = normalize(x.as_matrix())\nx = normalize(x.values)\n\n# формирование выходных данных(результатов)\ncolumns = ['Species']\ny = pd.DataFrame(iris_data, columns=columns)\n#y = y.as_matrix()\ny = y.values\ny = y.flatten()\ny = to_one_hot(y)\n\n# Разделение данных на тренировочные и тестовые\nX_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.33)", "_____no_output_____" ], [ "y", "_____no_output_____" ], [ "def neural_network(neuron_numb, l_r, epoches):\n \n w0 = 2*np.random.random((4, neuron_numb)) - 1 # для входного слоя - 4 входа, 3 выхода\n w1 = 2*np.random.random((neuron_numb, 3)) - 1 # для внутреннего слоя - 5 входов, 3 выхода\n \n errors = []\n \n for i in range(epoches):\n\n # прямое распространение(feed forward)\n layer0 = X_train\n layer1 = sigmoid(np.dot(layer0, w0))\n layer2 = sigmoid(np.dot(layer1, w1))\n \n # обратное распространение(back propagation) с использованием градиентного спуска\n layer2_error = y_train - layer2 # производная функции потерь = производная квадратичных потерь \n layer2_delta = layer2_error * sigmoid_deriv(layer2)\n \n layer1_error = layer2_delta.dot(w1.T)\n layer1_delta = layer1_error * sigmoid_deriv(layer1)\n \n w1 += layer1.T.dot(layer2_delta) * l_r\n w0 += layer0.T.dot(layer1_delta) * l_r\n \n # метрика модели\n error = np.mean(np.abs(layer2_error))\n errors.append(error)\n accuracy = (1 - error) * 100\n \n# plt.figure(figsize = (16,5))\n# plt.plot(errors)\n# plt.xlabel('Обучение')\n# plt.ylabel('Ошибка')\n# plt.show() # расскоментируйте, чтобы посмотреть \n \n# N = 50\n# plt.figure(figsize = (16,5))\n# plt.plot(layer2[:N,1], 'r',label = 'Y new')\n# plt.plot(y_train[:N,1],'g', label = 'Y train')\n# plt.xlabel('№ примера')\n# plt.ylabel('выход сети и целевой')\n# plt.legend( )\n# plt.show() # расскоментируйте, чтобы посмотреть \n \n print(\"Аккуратность нейронной сети \" + str(round(accuracy,2)) + \"%\")\n \n# return round(accuracy,2)\n\n # прямое распространение(feed forward)\n layer0_t = X_test\n layer1_t = sigmoid(np.dot(layer0_t, w0))\n layer2_t = sigmoid(np.dot(layer1_t, w1))\n layer2_error_t = y_test - layer2_t\n \n \n# N = 50\n# plt.figure(figsize = (16,5))\n# plt.plot(layer2_t[:N,1], 'r',label = 'Y new')\n# plt.plot(y_test[:N,1],'g', label = 'Y train')\n# plt.xlabel('№ примера')\n# plt.ylabel('выход сети и целевой')\n# plt.legend( )\n# plt.show() # расскоментируйте, чтобы посмотреть\n \n # метрика модели\n error_t = np.mean(np.abs(layer2_error_t))\n accuracy_t = (1 - error_t) * 100\n print(\"Аккуратность нейронной сети на тесте \" + str(round(accuracy_t,2)) + \"%\")", "_____no_output_____" ], [ "neural_network(neuron_numb=5, l_r=0.1, epoches=3000)", "Аккуратность нейронной сети 95.43%\nАккуратность нейронной сети на тесте 98.11%\n" ], [ "neural_network(neuron_numb=10, l_r=0.1, epoches=3000)", "Аккуратность нейронной сети 95.31%\nАккуратность нейронной сети на тесте 98.49%\n" ], [ "neural_network(neuron_numb=50, l_r=0.1, epoches=3000)", "Аккуратность нейронной сети 95.28%\nАккуратность нейронной сети на тесте 98.45%\n" ], [ "neural_network(neuron_numb=7, l_r=0.4, epoches=3000)", "Аккуратность нейронной сети 95.8%\nАккуратность нейронной сети на тесте 98.32%\n" ], [ "neural_network(neuron_numb=5, l_r=1, epoches=3000)", "Аккуратность нейронной сети 76.53%\nАккуратность нейронной сети на тесте 79.87%\n" ], [ "neural_network(neuron_numb=5, l_r=5, epoches=3000)", "Аккуратность нейронной сети 66.67%\nАккуратность нейронной сети на тесте 66.67%\n" ], [ "neural_network(neuron_numb=5, l_r=1, epoches=300)", "Аккуратность нейронной сети 76.43%\nАккуратность нейронной сети на тесте 82.29%\n" ], [ "neural_network(neuron_numb=5, l_r=1, epoches=30000)", "Аккуратность нейронной сети 96.28%\nАккуратность нейронной сети на тесте 99.19%\n" ] ], [ [ "### Вывод:\nДля улучшения точности нейронной сети нужно выбирать адекватные параметры, для этой модели значения в 5 скрытых нейронов, шаг обучения 1 и самое большое количество эпох (30000) показало лучший результат.\n\nКоличество скрытых нейронов должно лежать в промежутке от 3 до 7. При увеличении числа нейронов точность не сильно изменяется, однако, параметров становится на порядки больше, что ведет к увеличению затрат мощности.\n\nСкорость обучения показала хорошие результаты в рамках [0.1; 1]. При увеличении скорости обучения алгоритм не сходится в самой нижней точке на поверхности ошибок, что не позволяет дойти до минимальной ошибки.\n\nС каждой эпохой величина весов усиливается, что дает более точное предсказание. При очень больших количествах эпох сеть может переобучится, выкручивая в максимум и минимум веса, что неоднозначно скажется на сложных задачах.", "_____no_output_____" ] ], [ [ "### Шаг 3. Обученние нейронной сети\n\n# определим число нейронов скрытого слоя\nneuron_numb = 5\n\n\n# присваевание случайных весов\n\n\n\nw0 = 2*np.random.random((4, neuron_numb)) - 1 # для входного слоя - 4 входа, 3 выхода\nw1 = 2*np.random.random((neuron_numb, 3)) - 1 # для внутреннего слоя - 5 входов, 3 выхода\n\n# скорость обучения (learning rate)\nn = 0.1\n\n# массив для ошибок, чтобы потом построить график\nerrors = []\n\n# процесс обучения\nfor i in range(3000):\n\n # прямое распространение(feed forward)\n layer0 = X_train\n layer1 = sigmoid(np.dot(layer0, w0))\n layer2 = sigmoid(np.dot(layer1, w1))\n\n # обратное распространение(back propagation) с использованием градиентного спуска\n layer2_error = y_train - layer2 # производная функции потерь = производная квадратичных потерь \n layer2_delta = layer2_error * sigmoid_deriv(layer2)\n \n layer1_error = layer2_delta.dot(w1.T)\n layer1_delta = layer1_error * sigmoid_deriv(layer1)\n \n w1 += layer1.T.dot(layer2_delta) * n\n w0 += layer0.T.dot(layer1_delta) * n\n # метрика модели\n error = np.mean(np.abs(layer2_error))\n errors.append(error)\n accuracy = (1 - error) * 100\n\n\n### Шаг 4. Демонстрация полученных результатов\n# черчение диаграммы точности в зависимости от обучения\nplt.figure(figsize = (16,5))\nplt.plot(errors)\nplt.xlabel('Обучение')\nplt.ylabel('Ошибка')\nplt.show() # расскоментируйте, чтобы посмотреть \n\nN = 50\nplt.figure(figsize = (16,5))\nplt.plot(layer2[:N,1], 'r',label = 'Y new')\nplt.plot(y_train[:N,1],'g', label = 'Y train')\nplt.xlabel('№ примера')\nplt.ylabel('выход сети и целевой')\nplt.legend( )\nplt.show() # расскоментируйте, чтобы посмотреть \n \nprint(\"Аккуратность нейронной сети \" + str(round(accuracy,2)) + \"%\")\n\n", "_____no_output_____" ], [ "# прямое распространение(feed forward)\nlayer0_t = X_test\nlayer1_t = sigmoid(np.dot(layer0_t, w0))\nlayer2_t = sigmoid(np.dot(layer1_t, w1))\nlayer2_error_t = y_test - layer2_t\n \n \nN = 50\nplt.figure(figsize = (16,5))\nplt.plot(layer2_t[:N,1], 'r',label = 'Y new')\nplt.plot(y_test[:N,1],'g', label = 'Y train')\nplt.xlabel('№ примера')\nplt.ylabel('выход сети и целевой')\nplt.legend( )\nplt.show() # расскоментируйте, чтобы посмотреть\n\n# метрика модели\nerror_t = np.mean(np.abs(layer2_error_t))\naccuracy_t = (1 - error_t) * 100\nprint(\"Аккуратность нейронной сети на тесте \" + str(round(accuracy_t,2)) + \"%\")", "_____no_output_____" ] ], [ [ "В этом уроке мы с вами рассматривали как сделать простые нейронные сети без использования специальных фреймворков и библиотек для этого. В следующих уроках мы с вами познакомимся как делать нейронные сети с помощью Keras и TensorFlow.", "_____no_output_____" ], [ "## Домашнее задание\n\n1. Попробуйте видоизменить параметры разобранной на уроке двухслойной нейронной сети таким образом, чтобы улучшить ее точность (число нейронов, число эпох , можно изменять число слоев).\n2. Проведите анализ — что приводит к ухудшению точности нейронной сети? Что приводит к увеличению ее точности?", "_____no_output_____" ], [ "Мы разобрались с основами обучения нейронных сетей и получили некоторое представление об архитектурах простых нейронных сетей. Давайте попробуем закрепить эти знания на практике. Кроме того на примере который будет изложен ниже возможно проясняться какие-либо оставшиеся вопросы.\n\nВ данном примере мы сделаем нейронную сеть которая будет отличать различные виды ириса между собой. Надо полагать данный датасет вам уже знаком. Логика работы этого кода будет такой же как и в первом разобранном примере, но только все компоненты этого кода будут несколько усложнены.", "_____no_output_____" ], [ "## Дополнительные материалы\n\n<ol>\n <li>https://medium.com/topic/machine-learning</li>\n</ol>", "_____no_output_____" ], [ "## Используемая литература \n\nДля подготовки данного методического пособия были использованы следующие ресурсы:\n<ol>\n <li>Глубокое обучение — Николенко С. И., Кадурин 2018</li>\n <li>Шакла Н. — Машинное обучение и TensorFlow 2019</li>\n <li>Asifullah Khan, Anabia Sohail, Umme Zahoora, Aqsa Saeed Qureshi - A Survey of the Recent Architectures of Deep Convolutional Neural Networks 2019</li>\n <li>A direct adaptive method for faster backpropagation learning: the RPROP algorithm - Neural Networks, 1993</li>\n <li>Википедия</li>\n \n</ol>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
4a40b3d5e4460d2e3e69da67c3552025e540ca23
7,018
ipynb
Jupyter Notebook
Anshoo/LiveModelTesting.ipynb
GrayFlash/TUNEX
9ef0f592eda0dea447b823f9e02e819bb0bf4d01
[ "MIT" ]
7
2020-11-21T03:39:47.000Z
2022-02-14T16:59:44.000Z
Anshoo/LiveModelTesting.ipynb
GrayFlash/TUNEX
9ef0f592eda0dea447b823f9e02e819bb0bf4d01
[ "MIT" ]
null
null
null
Anshoo/LiveModelTesting.ipynb
GrayFlash/TUNEX
9ef0f592eda0dea447b823f9e02e819bb0bf4d01
[ "MIT" ]
3
2021-01-02T19:19:02.000Z
2022-02-13T17:20:51.000Z
28.184739
117
0.475064
[ [ [ "import numpy as np\nimport cv2\nimport tensorflow as tf\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\nmodel = tf.keras.models.load_model(\"/home/d3adsh0t/Tunex/8\")\n# EMOTIONS = [\"angry\" ,\"disgust\",\"scared\", \"happy\", \"sad\", \"surprised\",\"neutral\"]\n# EMOTIONS=[\"angry\",\n# \"disgust\",\n# \"happy\",\n# \"neutral\",\n# \"sad\",\n# \"surprise\"]\nEMOTIONS = [\"afraid\",\"angry\",\"disgust\",\"happy\",\"neutral\",\"sad\",\"surprised\"]", "_____no_output_____" ], [ "def prepare(ima):\n IMG_SIZE = 48 # image size\n img_array = cv2.cvtColor(ima,cv2.COLOR_BGR2GRAY)\n img_array=img_array/255.0 \n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize image to match model's expected sizing\n return new_array.reshape(-1,IMG_SIZE, IMG_SIZE,1)", "_____no_output_____" ] ], [ [ "# Static Test", "_____no_output_____" ] ], [ [ "image=cv2.imread(\"afraid.jpeg\")\n# faces = face_cascade.detectMultiScale(image, 1.3, 5)\n# faces = sorted(faces, reverse=True, key = lambda x: (x[2]-x[0]) *(x[3]-x[1]))[0]\n# (x,y,w,h)=faces\n# roi = image[y-40:y+h+40, x:x+w]\nprediction = model.predict([prepare(image)])\npreds = prediction[0]\nlabel = EMOTIONS[preds.argmax()]\nprint(label)\n# image = cv2.rectangle(image,(x,y-40),(x+w,y+h+40),(255,0,0),2)\ncv2.imshow(\"image\",image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()", "afraid\n" ] ], [ [ "# Live Test", "_____no_output_____" ] ], [ [ "# cap=cv2.VideoCapture(\"test3.mp4\")\ncap=cv2.VideoCapture(0)\n# result = cv2.VideoWriter('1testface.avi',cv2.VideoWriter_fourcc(*'MJPG'), 30, (540, 960)) \nwhile True:\n ret, img=cap.read()\n# print(img.shape)\n# img = cv2.resize(img, (540, 960))\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.1, 5)\n canvas = np.zeros((256,256,3), dtype=\"uint8\")\n frameclone=img\n try:\n faces = sorted(faces, reverse=True, key = lambda x: (x[2]-x[0]) *(x[3]-x[1]))[0]\n (x,y,w,h)=faces\n img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n roi = img[y:y+h, x:x+w]\n cv2.imshow('img2',roi)\n prediction = (model.predict([prepare(roi)]))\n preds = prediction[0]\n label = EMOTIONS[preds.argmax()]\n for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)):\n text = \"{}: {:.2f}%\".format(emotion, prob*100)\n w = int(prob*300)\n cv2.rectangle(canvas, (7, (i*35)+5), (w, (i*35)+35),(0,0,255), -1)\n cv2.putText(canvas, text, (10, (i*35) +23), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255,255,255), 2)\n cv2.imshow(\"Probabilities\", canvas)\n \n cv2.putText(img,label, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)\n result.write(img)\n except:\n pass\n cv2.imshow('img',img)\n cv2.waitKey(1)\n if cv2.waitKey(1) & cv2.waitKey(1) == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()", "_____no_output_____" ] ], [ [ "# Test on static Validation data", "_____no_output_____" ] ], [ [ "\nfor j in range(0,7):\n right_count=0\n wrong_count=0\n for i in range(1,50):\n# try:\n img=cv2.imread(\"/home/arjun/DM/Face/validation/\"+str(j)+\"/\"+str(i)+\".jpg\")\n # cv2.imshow(\"image\",img)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows() \n# faces = face_cascade.detectMultiScale(img, 1.3, 5)\n# print(faces)\n# faces = sorted(faces, reverse=True, key = lambda x: (x[2]-x[0]) *(x[3]-x[1]))[0]\n# (x,y,w,h)=faces\n# roi = image[y-20:y+h, x:x+w]\n pr=model.predict([prepare(img)])\n preds=pr[0]\n label = EMOTIONS[preds.argmax()]\n if(label==EMOTIONS[j]):\n right_count+=1\n else:\n wrong_count+=1\n# except:\n# pass\n print(EMOTIONS[j])\n print(\"Right \"+str(right_count)+\" Wrong \"+str(wrong_count))", "angry\nRight 25 Wrong 24\ndisgust\nRight 33 Wrong 16\nfear\nRight 18 Wrong 31\nhappy\nRight 36 Wrong 13\nneutral\nRight 33 Wrong 16\nsad\nRight 31 Wrong 18\nsurprise\nRight 40 Wrong 9\n" ] ], [ [ "\n", "_____no_output_____" ] ], [ [ "angry\nRight 20 Wrong 29\ndisgust\nRight 30 Wrong 19\nfear\nRight 23 Wrong 26\nhappy\nRight 40 Wrong 9\nneutral\nRight 26 Wrong 23\nsad\nRight 32 Wrong 17\nsurprise\nRight 34 Wrong 15\n\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a40b8aa98a02d87675cbab4e9772d7e92bc11c7
585,253
ipynb
Jupyter Notebook
teachopencadd/talktorials/T005_compound_clustering/talktorial.ipynb
jesperswillem/teachopencadd_a2a
cbc7f3e076bc000ec1d34e53c50fbffe8698c78a
[ "CC-BY-4.0" ]
null
null
null
teachopencadd/talktorials/T005_compound_clustering/talktorial.ipynb
jesperswillem/teachopencadd_a2a
cbc7f3e076bc000ec1d34e53c50fbffe8698c78a
[ "CC-BY-4.0" ]
null
null
null
teachopencadd/talktorials/T005_compound_clustering/talktorial.ipynb
jesperswillem/teachopencadd_a2a
cbc7f3e076bc000ec1d34e53c50fbffe8698c78a
[ "CC-BY-4.0" ]
1
2021-11-08T11:55:34.000Z
2021-11-08T11:55:34.000Z
395.976319
131,792
0.936906
[ [ [ "# T005 · Compound clustering\n\nAuthors:\n\n- Gizem Spriewald, CADD Seminar, 2017, Charité/FU Berlin\n- Calvinna Caswara, CADD Seminar, 2018, Charité/FU Berlin\n- Jaime Rodríguez-Guerra, 2019-2020, [Volkamer lab](https://volkamerlab.org), Charité", "_____no_output_____" ], [ "__Talktorial T005__: This talktorial is part of the TeachOpenCADD pipeline described in the [first TeachOpenCADD paper](https://jcheminf.biomedcentral.com/articles/10.1186/s13321-019-0351-x), comprising of talktorials T001-T010.", "_____no_output_____" ], [ "## Aim of this talktorial\n\n<!-- TODO: The wording of this paragraph is confusing -->\n\nSimilar compounds might bind to the same targets and show similar effects. \nBased on this similar property principle, compound similarity can be used to build chemical groups via clustering. \nFrom such a clustering, a diverse set of compounds can also be selected from a larger set of screening compounds for further experimental testing.", "_____no_output_____" ], [ "### Contents in _Theory_\n\n* Introduction to clustering and Jarvis-Patrick algorithm\n* Detailed explanation of Butina clustering\n* Picking diverse compounds", "_____no_output_____" ], [ "### Contents in _Practical_\n\n* Clustering with the Butina algorithm\n* Visualizing the clusters\n* Picking the final list of compounds\n* Bonus: analysis of run times", "_____no_output_____" ], [ "### References\n\n* Butina, D. Unsupervised Data Base Clustering Based on Daylight’s Fingerprint and Tanimoto Similarity: A Fast and Automated Way To Cluster Small and Large Data Set. _J. Chem. Inf. Comput. Sci._ (1999)\n* Leach, Andrew R., Gillet, Valerie J. An Introduction to Chemoinformatics (2003)\n* [Jarvis-Patrick Clustering](http://www.improvedoutcomes.com/docs/WebSiteDocs/Clustering/Jarvis-Patrick_Clustering_Overview.htm)\n* [TDT Tutorial](https://github.com/sriniker/TDT-tutorial-2014/blob/master/TDT_challenge_tutorial.ipynb)\n* [RDKit clustering documentation](http://rdkit.org/docs/Cookbook.html#clustering-molecules)", "_____no_output_____" ], [ "## Theory", "_____no_output_____" ], [ "### Introduction to clustering and Jarvis-Patrick algorithm\n\n[Clustering](https://en.wikipedia.org/wiki/Cluster_analysis) can be defined as _the task of grouping a set of objects in such a way that objects in the same group (called a cluster) are more similar (in some sense) to each other than to those in other groups (clusters)_.\n\nCompound clustering in pharmaceutical research is often based on chemical or structural similarity between compounds to find groups that share properties as well as to design a diverse and representative set for further analysis. \n\nGeneral procedure: \n\n* Methods are based on clustering data by similarity between neighboring points. \n* In cheminformatics, compounds are often encoded as molecular fingerprints and similarity can be described by the Tanimoto similarity (see **Talktorial T004**).\n\n> Quick reminder:\n> \n> * Fingerprints are binary vectors where each bit indicates the presence or absence of a particular substructural fragment within a molecule. \n> * Similarity (or distance) matrix: The similarity between each pair of molecules represented by binary fingerprints is most frequently quantified using the Tanimoto coefficient, which measures the number of common features (bits). \n> * The value of the Tanimoto coefficient ranges from zero (no similarity) to one (high similarity).\n\nThere are a number of clustering algorithms available, with the [Jarvis-Patrick clustering](http://www.improvedoutcomes.com/docs/WebSiteDocs/Clustering/Jarvis-Patrick_Clustering_Overview.htm) being one of the most widely used algorithms in the pharmaceutical context.\n\nJarvis-Patrick clustering algorithm is defined by two parameters $K$ and $K_{min}$:\n\n* Calculate the set of $K$ nearest neighbors for each molecule. \n* Two molecules cluster together if \n * they are in each others list of nearest neighbors\n * they have at least $K_{min}$ of their $K$ nearest neighbors in common.\n\nThe Jarvis-Patrick clustering algorithm is deterministic and able to deal with large sets of molecules in a matter of a few hours. However, a downside lies in the fact that this method tends to produce large heterogeneous clusters (see _Butina clustering_, referenced above). \n\nMore clustering algorithms can also be found in the [scikit-learn clustering module](http://scikit-learn.org/stable/modules/clustering.html).", "_____no_output_____" ], [ "### Detailed explanation of Butina clustering\n\nButina clustering ([*J. Chem. Inf. Model.* (1999), **39** (4), 747](https://pubs.acs.org/doi/abs/10.1021/ci9803381)) was developed to identify smaller but homogeneous clusters, with the prerequisite that (at least) the cluster centroid will be more similar than a given threshold to every other molecule in the cluster.\n\nThese are the key steps in this clustering approach (see flowchart below):\n\n#### 1. Data preparation and compound encoding\n\n* To identify chemical similarities, the compounds in the input data (e.g. given as SMILES) will be encoded as molecular fingerprints, e.g., RDK5 fingerprint which is a subgraph-based fingerprint similar to the well known [Daylight Fingerprint](http://www.daylight.com/dayhtml/doc/theory/theory.finger.html) (which was used in the original publication).\n\n\n#### 2. Tanimoto similarity (or distance) matrix\n\n* The similarity between two fingerprints is calculated using the Tanimoto coefficient.\n* Matrix with Tanimoto similarities between all possible molecule/fingerprint pairs ($n*n$ similarity matrix with $n$=number of molecules, upper triangle matrix used only).\n* Equally, the distances matrix can be calculated ($1 - similarity$).\n\n#### 3. Clustering molecules: Centroids and exclusion spheres \n\n> Note: Molecules will be clustered together, if they have a maximum distance below a specified cut-off from the cluster centroid (if distance matrix is used) or if they have a minimum similarity above the specified cut-off (if similarity matrix is used). \n\n* **Identification of potential cluster centroids**\n * The cluster centroid is the molecule within a given cluster which has the largest number of neighbors.\n * Annotate neighbors: For each molecule count all molecules with a Tanimoto distance below a given threshold.\n * Sort the molecules by their number of neighbors in descending order, so that potential cluster centroids (i.e. the compounds with the largest number of neighbors) are placed at the top of the file. \n\n* **Clustering based on the exclusion spheres**\n * Starting with the first molecule (centroid) in the sorted list.\n * All molecules with a Tanimoto index above or equal to the cut-off value used for clustering then become members of that cluster (in case of similarity).\n * Each molecule that has been identified as a member of the given cluster is flagged and removed from further comparisons. Thus, flagged molecules cannot become either another cluster centroid or a member of another cluster. This process is like putting an exclusion sphere around the newly formed cluster.\n * Once the first compound in the list has found all its neighbors, the first available (i.e. not flagged) compound at the top of the list becomes the new cluster centroid.\n * The same process is repeated for all other unflagged molecules down the list.\n * Molecules that have not been flagged by the end of the clustering process become singletons.\n * Note that some molecules assigned as singletons can have neighbors at the given Tanimoto similarity index, but those neighbors have been excluded by a stronger cluster centroid.", "_____no_output_____" ] ], [ [ "from IPython.display import IFrame\n\nIFrame(\"images/butina_full.pdf\", width=800, height=500)", "_____no_output_____" ] ], [ [ "*Figure 1:* Theoretical example of the Butina clustering algorithm, drawn by Calvinna Caswara.", "_____no_output_____" ], [ "### Picking diverse compounds\n\nFinding representative sets of compounds is a concept often used in pharmaceutical industry.\n\n* Let's say, we applied a virtual screening campaign but only have a limited amount of resources to experimentally test a few compounds in a confirmatory assay. \n* In order to obtain as much information as possible from this screen, we want to select a diverse set. Thus, we pick one representative of each chemical series in our list of potentially active compounds.\n\nAnother scenario would be to select one series to gain information about the structure-activity relationship; i.e., how small structural changes in the molecule affect the _in vitro_ activity.", "_____no_output_____" ], [ "## Practical", "_____no_output_____" ], [ "### Clustering with the Butina algorithm\n\nApplication is following the example of the [TDT tutorial notebook by S. Riniker and G. Landrum](https://github.com/sriniker/TDT-tutorial-2014/blob/master/TDT_challenge_tutorial.ipynb).", "_____no_output_____" ], [ "#### Load data and calculate fingerprints\nIn this part the data is prepared and fingerprints are calculated.", "_____no_output_____" ] ], [ [ "import time\nimport random\nfrom pathlib import Path\n\nimport pandas as pd\nimport numpy\nimport matplotlib.pyplot as plt\nfrom rdkit import Chem\nfrom rdkit import DataStructs\nfrom rdkit.ML.Cluster import Butina\nfrom rdkit.Chem import Draw\nfrom rdkit.Chem import rdFingerprintGenerator\n\nfrom teachopencadd.utils import seed_everything\n\nseed_everything() # fix seed to get deterministic outputs", "_____no_output_____" ], [ "HERE = Path(_dh[-1])\nDATA = HERE / \"data\"", "_____no_output_____" ], [ "# Load and have a look into data\n# Filtered data taken from **Talktorial T002**\ncompound_df = pd.read_csv(\n HERE / \"../T002_compound_adme/data/A2A_compounds_lipinski.csv\",\n index_col=0,\n)\nprint(\"Dataframe shape:\", compound_df.shape)\ncompound_df.head()", "Dataframe shape: (3545, 10)\n" ], [ "# Create molecules from SMILES and store in array\ncompounds = []\n# .itertuples() returns a (index, column1, column2, ...) tuple per row\n# we don't need index so we use _ instead\n# note how we are slicing the dataframe to only the two columns we need now\nfor _, chembl_id, smiles in compound_df[[\"molecule_chembl_id\", \"smiles\"]].itertuples():\n compounds.append((Chem.MolFromSmiles(smiles), chembl_id))\ncompounds[:5]", "_____no_output_____" ], [ "# Create fingerprints for all molecules\nrdkit_gen = rdFingerprintGenerator.GetRDKitFPGenerator(maxPath=5)\nfingerprints = [rdkit_gen.GetFingerprint(mol) for mol, idx in compounds]\n\n# How many compounds/fingerprints do we have?\nprint(\"Number of compounds converted:\", len(fingerprints))\nprint(\"Fingerprint length per compound:\", len(fingerprints[0]))\n# NBVAL_CHECK_OUTPUT", "Number of compounds converted: 3545\nFingerprint length per compound: 2048\n" ] ], [ [ "#### Tanimoto similarity and distance matrix\nNow that we generated fingerprints, we move on to the next step: The identification of potential cluster centroids. For this, we define functions to calculate the Tanimoto similarity and distance matrix.", "_____no_output_____" ] ], [ [ "def tanimoto_distance_matrix(fp_list):\n \"\"\"Calculate distance matrix for fingerprint list\"\"\"\n dissimilarity_matrix = []\n # Notice how we are deliberately skipping the first and last items in the list\n # because we don't need to compare them against themselves\n for i in range(1, len(fp_list)):\n # Compare the current fingerprint against all the previous ones in the list\n similarities = DataStructs.BulkTanimotoSimilarity(fp_list[i], fp_list[:i])\n # Since we need a distance matrix, calculate 1-x for every element in similarity matrix\n dissimilarity_matrix.extend([1 - x for x in similarities])\n return dissimilarity_matrix", "_____no_output_____" ] ], [ [ "See also [[Rdkit-discuss] BulkTanimotoSimilarity](https://sourceforge.net/p/rdkit/mailman/rdkit-discuss/thread/663770d4-b809-c599-e379-31f57380a1d0%40gmail.com/#msg36335970).", "_____no_output_____" ] ], [ [ "# Example: Calculate single similarity of two fingerprints\n# NBVAL_CHECK_OUTPUT\nsim = DataStructs.TanimotoSimilarity(fingerprints[0], fingerprints[1])\nprint(f\"Tanimoto similarity: {sim:.2f}, distance: {1-sim:.2f}\")", "Tanimoto similarity: 0.93, distance: 0.07\n" ], [ "# Example: Calculate distance matrix (distance = 1-similarity)\ntanimoto_distance_matrix(fingerprints)[0:5]", "_____no_output_____" ], [ "# Side note: That looked like a list and not a matrix.\n# But it is a triangular similarity matrix in the form of a list\nn = len(fingerprints)\n\n# Calculate number of elements in triangular matrix via n*(n-1)/2\nelem_triangular_matr = (n * (n - 1)) / 2\nprint(\n f\"Elements in the triangular matrix ({elem_triangular_matr:.0f}) ==\",\n f\"tanimoto_distance_matrix(fingerprints) ({len(tanimoto_distance_matrix(fingerprints))})\",\n)\n# NBVAL_CHECK_OUTPUT", "Elements in the triangular matrix (6281740) == tanimoto_distance_matrix(fingerprints) (6281740)\n" ] ], [ [ "#### Clustering molecules: Centroids and exclusion spheres\nIn this part, we cluster the molecules and look at the results.", "_____no_output_____" ], [ "Define a clustering function.", "_____no_output_____" ] ], [ [ "def cluster_fingerprints(fingerprints, cutoff=0.2):\n \"\"\"Cluster fingerprints\n Parameters:\n fingerprints\n cutoff: threshold for the clustering\n \"\"\"\n # Calculate Tanimoto distance matrix\n distance_matrix = tanimoto_distance_matrix(fingerprints)\n # Now cluster the data with the implemented Butina algorithm:\n clusters = Butina.ClusterData(distance_matrix, len(fingerprints), cutoff, isDistData=True)\n clusters = sorted(clusters, key=len, reverse=True)\n return clusters", "_____no_output_____" ] ], [ [ "Cluster the molecules based on their fingerprint similarity.", "_____no_output_____" ] ], [ [ "# Run the clustering procedure for the dataset\nclusters = cluster_fingerprints(fingerprints, cutoff=0.3)\n\n# Give a short report about the numbers of clusters and their sizes\nnum_clust_g1 = sum(1 for c in clusters if len(c) == 1)\nnum_clust_g5 = sum(1 for c in clusters if len(c) > 5)\nnum_clust_g25 = sum(1 for c in clusters if len(c) > 25)\nnum_clust_g100 = sum(1 for c in clusters if len(c) > 100)\n\nprint(\"total # clusters: \", len(clusters))\nprint(\"# clusters with only 1 compound: \", num_clust_g1)\nprint(\"# clusters with >5 compounds: \", num_clust_g5)\nprint(\"# clusters with >25 compounds: \", num_clust_g25)\nprint(\"# clusters with >100 compounds: \", num_clust_g100)\n# NBVAL_CHECK_OUTPUT", "total # clusters: 456\n# clusters with only 1 compound: 206\n# clusters with >5 compounds: 120\n# clusters with >25 compounds: 32\n# clusters with >100 compounds: 2\n" ], [ "# Plot the size of the clusters\nfig, ax = plt.subplots(figsize=(15, 4))\nax.set_xlabel(\"Cluster index\")\nax.set_ylabel(\"Number of molecules\")\nax.bar(range(1, len(clusters) + 1), [len(c) for c in clusters], lw=5);", "_____no_output_____" ] ], [ [ "#### How to pick a reasonable cutoff?\nSince the clustering result depends on the threshold chosen by the user, we will have a closer look on the choice of a cutoff.", "_____no_output_____" ] ], [ [ "for cutoff in numpy.arange(0.0, 1.0, 0.2):\n clusters = cluster_fingerprints(fingerprints, cutoff=cutoff)\n fig, ax = plt.subplots(figsize=(15, 4))\n ax.set_title(f\"Threshold: {cutoff:3.1f}\")\n ax.set_xlabel(\"Cluster index\")\n ax.set_ylabel(\"Number of molecules\")\n ax.bar(range(1, len(clusters) + 1), [len(c) for c in clusters], lw=5)\n display(fig)", "_____no_output_____" ] ], [ [ "As you can see, the higher the threshold (distance cutoff), the more molecules are considered as similar and, therefore, clustered into less clusters.\nThe lower the threshold, the more small clusters and \"singletons\" appear.\n\n> The smaller the distance value cut-off, the more similar the compounds are required to be to belong to one cluster.\n\nLooking at the plots above, we decided to choose a distance threshold of `0.2`. There are not many singletons and the cluster sizes don't have an extreme but smooth distribution.", "_____no_output_____" ] ], [ [ "cutoff = 0.2\nclusters = cluster_fingerprints(fingerprints, cutoff=cutoff)\n\n# Plot the size of the clusters - save plot\nfig, ax = plt.subplots(figsize=(15, 4))\nax.set_xlabel(\"Cluster index\")\nax.set_ylabel(\"# molecules\")\nax.bar(range(1, len(clusters) + 1), [len(c) for c in clusters])\nax.set_title(f\"Threshold: {cutoff:3.1f}\")\nfig.savefig(\n DATA / f\"cluster_dist_cutoff_{cutoff:4.2f}.png\",\n dpi=300,\n bbox_inches=\"tight\",\n transparent=True,\n)\n\nprint(\n f\"Number of clusters: {len(clusters)} from {len(compounds)} molecules at distance cut-off {cutoff:.2f}\"\n)\nprint(\"Number of molecules in largest cluster:\", len(clusters[0]))\nprint(\n f\"Similarity between two random points in same cluster: {DataStructs.TanimotoSimilarity(fingerprints[clusters[0][0]], fingerprints[clusters[0][1]]):.2f}\"\n)\nprint(\n f\"Similarity between two random points in different cluster: {DataStructs.TanimotoSimilarity(fingerprints[clusters[0][0]], fingerprints[clusters[1][0]]):.2f}\"\n)", "Number of clusters: 770 from 3545 molecules at distance cut-off 0.20\nNumber of molecules in largest cluster: 118\nSimilarity between two random points in same cluster: 0.82\nSimilarity between two random points in different cluster: 0.78\n" ] ], [ [ "### Visualizing the clusters", "_____no_output_____" ], [ "#### 10 examples from largest cluster\n\nNow, let's have a closer look at the first 10 molecular structures of the first/largest clusters.", "_____no_output_____" ] ], [ [ "print(\"Ten molecules from largest cluster:\")\n# Draw molecules\nDraw.MolsToGridImage(\n [compounds[i][0] for i in clusters[0][:10]],\n legends=[compounds[i][1] for i in clusters[0][:10]],\n molsPerRow=5,\n)", "Ten molecules from largest cluster:\n" ], [ "# Save molecules from largest cluster so other talktorials can use it\nsdf_path = str(DATA / \"molecule_set_largest_cluster.sdf\")\nsdf = Chem.SDWriter(sdf_path)\nfor index in clusters[0]:\n mol, label = compounds[index]\n # Add label to metadata\n mol.SetProp(\"_Name\", label)\n sdf.write(mol)\nsdf.close()", "_____no_output_____" ] ], [ [ "#### 10 examples from second largest cluster", "_____no_output_____" ] ], [ [ "print(\"Ten molecules from second largest cluster:\")\n# Draw molecules\nDraw.MolsToGridImage(\n [compounds[i][0] for i in clusters[1][:10]],\n legends=[compounds[i][1] for i in clusters[1][:10]],\n molsPerRow=5,\n)", "Ten molecules from second largest cluster:\n" ] ], [ [ "The first ten molecules in the respective clusters look indeed similar to each other and many share a common scaffold (visually detected). \n\nSee **Talktorial T006** for more information on how to calculate the maximum common substructure (MCS) of a set of molecules.", "_____no_output_____" ], [ "#### Examples from first 10 clusters\n\nFor comparison, we have a look at the cluster centers of the first 10 clusters.", "_____no_output_____" ] ], [ [ "print(\"Ten molecules from first 10 clusters:\")\n# Draw molecules\nDraw.MolsToGridImage(\n [compounds[clusters[i][0]][0] for i in range(10)],\n legends=[compounds[clusters[i][0]][1] for i in range(10)],\n molsPerRow=5,\n)", "Ten molecules from first 10 clusters:\n" ] ], [ [ "Save cluster centers from first 3 clusters as SVG file.", "_____no_output_____" ] ], [ [ "# Generate image\nimg = Draw.MolsToGridImage(\n [compounds[clusters[i][0]][0] for i in range(0, 3)],\n legends=[f\"Cluster {i}\" for i in range(1, 4)],\n subImgSize=(200, 200),\n useSVG=True,\n)\n\n# Patch RAW svg data: convert non-transparent to transparent background and set font size\nmolsvg = img.data.replace(\"opacity:1.0\", \"opacity:0.0\").replace(\"12px\", \"20px\")\n\n# Save altered SVG data to file\nwith open(DATA / \"cluster_representatives.svg\", \"w\") as f:\n f.write(molsvg)", "_____no_output_____" ] ], [ [ "While still some similarity is visible, clearly, the centroids from the different clusters look more dissimilar then the compounds within one cluster.", "_____no_output_____" ], [ "#### Intra-cluster Tanimoto similarities\n\nWe can also have a look at the intra-cluster Tanimoto similarities.", "_____no_output_____" ] ], [ [ "def intra_tanimoto(fps_clusters):\n \"\"\"Function to compute Tanimoto similarity for all pairs of fingerprints in each cluster\"\"\"\n intra_similarity = []\n # Calculate intra similarity per cluster\n for cluster in fps_clusters:\n # Tanimoto distance matrix function converted to similarity matrix (1-distance)\n intra_similarity.append([1 - x for x in tanimoto_distance_matrix(cluster)])\n return intra_similarity", "_____no_output_____" ], [ "# Recompute fingerprints for 10 first clusters\nmol_fps_per_cluster = []\nfor cluster in clusters[:10]:\n mol_fps_per_cluster.append([rdkit_gen.GetFingerprint(compounds[i][0]) for i in cluster])\n\n# Compute intra-cluster similarity\nintra_sim = intra_tanimoto(mol_fps_per_cluster)", "_____no_output_____" ], [ "# Violin plot with intra-cluster similarity\n\nfig, ax = plt.subplots(figsize=(10, 5))\nindices = list(range(10))\nax.set_xlabel(\"Cluster index\")\nax.set_ylabel(\"Similarity\")\nax.set_xticks(indices)\nax.set_xticklabels(indices)\nax.set_yticks(numpy.arange(0.6, 1.0, 0.1))\nax.set_title(\"Intra-cluster Tanimoto similarity\", fontsize=13)\nr = ax.violinplot(intra_sim, indices, showmeans=True, showmedians=True, showextrema=False)\nr[\"cmeans\"].set_color(\"red\")\n# mean=red, median=blue", "_____no_output_____" ] ], [ [ "### Picking the final list of compounds\n\nIn the following, we are going to pick a final list of **max. 1000 compounds** as a **diverse** subset. \n\nFor this, we take the cluster centroid from each cluster (i.e. the first molecule of each cluster) and then for each cluster (starting with the largest one) we take the 10 molecules (or 50% if less than 10 molecules are left in the cluster) that are most similar to the centroid, until we have selected max. 1000 compounds. Thus, we have representatives of each cluster. \n\nAim of this compound picking is to ensure the diversity for a smaller set of compounds which are proposed for testing in a confirmatory assay. \n\n> Picking procedure was adapted from [TDT tutorial notebook by S. Riniker and G. Landrum](https://github.com/sriniker/TDT-tutorial-2014/blob/master/TDT_challenge_tutorial.ipynb). \n\nAs described there: the idea behind this approach is to ensure diversity (representatives of each cluster) while getting some SAR (structure-activity relationship) from the results of the confirmatory assay (groups of quite similar molecules from larger clusters retained).", "_____no_output_____" ], [ "Get cluster centers.", "_____no_output_____" ] ], [ [ "# Get the cluster center of each cluster (first molecule in each cluster)\ncluster_centers = [compounds[c[0]] for c in clusters]\n# How many cluster centers/clusters do we have?\nprint(\"Number of cluster centers:\", len(cluster_centers))\n# NBVAL_CHECK_OUTPUT", "Number of cluster centers: 770\n" ] ], [ [ "Sort clusters by size and molecules in each cluster by similarity.", "_____no_output_____" ] ], [ [ "# Sort the molecules within a cluster based on their similarity\n# to the cluster center and sort the clusters based on their size\nsorted_clusters = []\nfor cluster in clusters:\n if len(cluster) <= 1:\n continue # Singletons\n # else:\n # Compute fingerprints for each cluster element\n sorted_fingerprints = [rdkit_gen.GetFingerprint(compounds[i][0]) for i in cluster]\n # Similarity of all cluster members to the cluster center\n similarities = DataStructs.BulkTanimotoSimilarity(\n sorted_fingerprints[0], sorted_fingerprints[1:]\n )\n # Add index of the molecule to its similarity (centroid excluded!)\n similarities = list(zip(similarities, cluster[1:]))\n # Sort in descending order by similarity\n similarities.sort(reverse=True)\n # Save cluster size and index of molecules in clusters_sort\n sorted_clusters.append((len(similarities), [i for _, i in similarities]))\n # Sort in descending order by cluster size\n sorted_clusters.sort(reverse=True)", "_____no_output_____" ] ], [ [ "Pick a maximum of 1000 compounds.", "_____no_output_____" ] ], [ [ "# Count selected molecules, pick cluster centers first\nselected_molecules = cluster_centers.copy()\n# Take 10 molecules (or a maximum of 50%) of each cluster starting with the largest one\nindex = 0\npending = 1000 - len(selected_molecules)\nwhile pending > 0 and index < len(sorted_clusters):\n # Take indices of sorted clusters\n tmp_cluster = sorted_clusters[index][1]\n # If the first cluster is > 10 big then take exactly 10 compounds\n if sorted_clusters[index][0] > 10:\n num_compounds = 10\n # If smaller, take half of the molecules\n else:\n num_compounds = int(0.5 * len(tmp_cluster)) + 1\n if num_compounds > pending:\n num_compounds = pending\n # Write picked molecules and their structures into list of lists called picked_fps\n selected_molecules += [compounds[i] for i in tmp_cluster[:num_compounds]]\n index += 1\n pending = 1000 - len(selected_molecules)\nprint(\"# Selected molecules:\", len(selected_molecules))\n# NBVAL_CHECK_OUTPUT", "# Selected molecules: 1000\n" ] ], [ [ "This set of diverse molecules could now be used for experimental testing.", "_____no_output_____" ], [ "### Bonus: analysis of run times\n\nAt the end of the talktorial, we can play with the size of the dataset and see how the Butina clustering run time changes.", "_____no_output_____" ] ], [ [ "# Reuse old dataset\nsampled_mols = compounds.copy()", "_____no_output_____" ] ], [ [ "Note that you can try out larger datasets, but data sizes larger than 10000 data points already start to consume quite some memory and time (that's why we stopped there). ", "_____no_output_____" ] ], [ [ "# Helper function for time computation\ndef measure_runtime(sampled_mols):\n start_time = time.time()\n sampled_fingerprints = [rdkit_gen.GetFingerprint(m) for m, idx in sampled_mols]\n # Run the clustering with the dataset\n sampled_clusters = cluster_fingerprints(sampled_fingerprints, cutoff=0.3)\n return time.time() - start_time", "_____no_output_____" ], [ "len(sampled_mols)\n# NBVAL_CHECK_OUTPUT", "_____no_output_____" ], [ "sample_sizes = [100, 500, 1000, 2000]\nruntimes = []\n# Take random samples with replacement\nfor size in sample_sizes:\n time_taken = measure_runtime(random.sample(sampled_mols, size))\n print(f\"Dataset size {size}, time {time_taken:4.2f} seconds\")\n runtimes.append(time_taken)", "Dataset size 100, time 0.04 seconds\nDataset size 500, time 0.19 seconds\nDataset size 1000, time 0.41 seconds\nDataset size 2000, time 0.99 seconds\n" ], [ "fig, ax = plt.subplots()\nax.set_title(\"Runtime measurement of Butina Clustering with different dataset sizes\")\nax.set_xlabel(\"# Molecules in data set\")\nax.set_ylabel(\"Runtime in seconds\")\nax.plot(sample_sizes, runtimes, \"g^\");", "_____no_output_____" ] ], [ [ "Notice how the runtime is not exactly proportional to the sample size! It grows faster!", "_____no_output_____" ], [ "## Discussion\n\nWe have introduced the Butina algorithm to cluster a compound dataset and discussed how to pick a reasonable clustering threshold. The clustering was rationalized by looking at example compounds from different clusters and by checking intra-cluster similarities. Finally, the clusters were used to pick a divers subset of compounds.", "_____no_output_____" ], [ "## Quiz\n* Why is clustering of molecules important?\n* Which algorithms can you use to cluster a set of molecules and what is the general idea behind the algorithm?\n* Do you know other clustering algorithms?", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
4a40bf0c601f4440c270d42d8b1d76b2dacdf3ed
16,564
ipynb
Jupyter Notebook
notebooks/tutorial/Getting Started.ipynb
edwardcwang/magmathon
f9a1e9d93823d0a90e77c968d952d05b553a6a86
[ "MIT" ]
null
null
null
notebooks/tutorial/Getting Started.ipynb
edwardcwang/magmathon
f9a1e9d93823d0a90e77c968d952d05b553a6a86
[ "MIT" ]
null
null
null
notebooks/tutorial/Getting Started.ipynb
edwardcwang/magmathon
f9a1e9d93823d0a90e77c968d952d05b553a6a86
[ "MIT" ]
null
null
null
36.087146
272
0.56659
[ [ [ "## Getting Started\n\n[`Magma`](https://github.com/phanrahan/magma) is a hardware construction language written in `Python 3`. The central abstraction in `Magma` is a `Circuit`, which is analagous to a verilog module. A circuit is a set of functional units that are wired together.\n\n`Magma` is designed to work with [`Mantle`](https://github.com/phanrahan/mantle), a library of hardware building blocks including logic and arithmetic units, registers, memories, etc. \n\nThe [`Loam`](https://github.com/phanrahan/loam) system builds upon the `Magma` `Circuit` abstraction to represent *parts* and *boards*. A board consists of a set of parts that are wired together. `Loam` makes it is easy to setup a board such as the Lattice IceStick.", "_____no_output_____" ], [ "### Lattice IceStick\n\nIn this tutorial, we will be using the Lattice IceStick.\nThis breakout board contains a ICE40HX FPGA with 1K 4-input LUTs. \nThe board has several useful peripherals including an FTDI USB interface \nwith an integrated JTAG interface which is used to program the FPGA\nand a USART which is used to communicate with the host.\nThe board also contains 5 LEDs, \na PMOD interface, \nand 2 10-pin headers (J1 and J3). \nThe 10-pin headers bring out 8 GPIO pins, \nas well as power and ground.\nThis board is inexpensive ($25), can be plugged into the USB port on your laptop,\nand, best of all, can be\nprogrammed using an open source software toolchain.\n\n![icestick](images/icestick.jpg)\n\nAdditional information about the IceStick Board can be found in the \n[IceStick Programmers Guide](http://www.latticesemi.com/~/media/LatticeSemi/Documents/UserManuals/EI/icestickusermanual.pdf)", "_____no_output_____" ], [ "### Blink\n\nAs a first example,\nlet's write a `Magma` program that blinks an LED on the Icestick Board.\n\nFirst, we import `Magma` as the module `m`.\nNext, we import `Counter` from `Mantle`.\nBefore doing the import we configure mantle to use the ICE40 as the target device.", "_____no_output_____" ] ], [ [ "import magma as m\nm.set_mantle_target(\"ice40\")", "_____no_output_____" ] ], [ [ "The next step is to setup the IceStick board. We import the class `IceStick` from `Loam`. \nWe then create an instance of an `IceStick`.\nThis board instance has member variables \nthat store the configuration of all the parts on the board.\nThe blink program will use the Clock and the LED D5. \nTurning *on* the Clock and the LED D5 sets up the build environment \nto use the associated ICE40 GPIO pins.", "_____no_output_____" ] ], [ [ "from loam.boards.icestick import IceStick\n\n# Create an instance of an IceStick board\nicestick = IceStick()\n\n# Turn on the Clock \n# The clock must turned on because we are using a synchronous counter\nicestick.Clock.on()\n\n# Turn on the LED D5\nicestick.D5.on();", "import lattice ice40\nimport lattice mantle40\n" ] ], [ [ "Now that the IceStick setup is done, \nwe create a `main` program that runs on the Lattice ICE40 FPGA. \nThis main program becomes the top level module.\n\nWe create a simple circuit inside `main`. \nThe circuit has a a 22-bit counter wired to D5. \nThe crystal connected to the ICE40 has a frequency of 12 Mhz. \nso the counter will increment at that rate.\nWiring the most-significant bit of the counter to D5\nwill cause the LED to blink roughly 3 times per second.\n`D5` is accessible via `main`.\nIn a similar way, the output of the counter is accesible via `counter.O`,\nand since this an array of bits we can access the MSB using Python's standard list indexing syntax.", "_____no_output_____" ] ], [ [ "from mantle import Counter\n\nN = 22\n\n# Define the main Magma Circuit on the FPGA on the IceStick\nmain = icestick.DefineMain()\n\n# Instance a 22-bit counter\ncounter = Counter(N)\n\n# Wire bit 21 of the counter's output to D5.\nm.wire(counter.O[N-1], main.D5)\n\n# End main\nm.EndDefine()", "_____no_output_____" ] ], [ [ "We then compile the program to verilog. This step also creates a PCF (physical constraints file).", "_____no_output_____" ] ], [ [ "m.compile('build/blink', main)", "compiling FullAdder\ncompiling Add22Cout\ncompiling Register22\ncompiling Counter22\ncompiling main\n" ] ], [ [ "Now we run the open source tools for the Lattice ICE40. \n`yosys` synthesizes the input verilog file (`blink.v`) \nto produce an output netlist (`blink.blif`).\n`arachne-pnr` runs the place and router and generates the bitstream as a text file.\n`icepack` creates a binary bitstream file that can be downloaded to the FPGA. `iceprog` uploads the bitstream to the device. Once the device has been programmed, you should see the center, green LED blinking.", "_____no_output_____" ] ], [ [ "%%bash\ncd build\nyosys -q -p 'synth_ice40 -top main -blif blink.blif' blink.v\narachne-pnr -q -d 1k -o blink.txt -p blink.pcf blink.blif \nicepack blink.txt blink.bin\niceprog blink.bin", "/Users/hanrahan/git/magmathon/notebooks/tutorial/build\n" ] ], [ [ "You can view the verilog file generated by `Magma`.", "_____no_output_____" ] ], [ [ "%cat build/blink.v", "module FullAdder (input I0, input I1, input CIN, output O, output COUT);\r\nwire inst0_O;\r\nwire inst1_CO;\r\nSB_LUT4 #(.LUT_INIT(16'h9696)) inst0 (.I0(I0), .I1(I1), .I2(CIN), .I3(1'b0), .O(inst0_O));\r\nSB_CARRY inst1 (.I0(I0), .I1(I1), .CI(CIN), .CO(inst1_CO));\r\nassign O = inst0_O;\r\nassign COUT = inst1_CO;\r\nendmodule\r\n\r\nmodule Add22Cout (input [21:0] I0, input [21:0] I1, output [21:0] O, output COUT);\r\nwire inst0_O;\r\nwire inst0_COUT;\r\nwire inst1_O;\r\nwire inst1_COUT;\r\nwire inst2_O;\r\nwire inst2_COUT;\r\nwire inst3_O;\r\nwire inst3_COUT;\r\nwire inst4_O;\r\nwire inst4_COUT;\r\nwire inst5_O;\r\nwire inst5_COUT;\r\nwire inst6_O;\r\nwire inst6_COUT;\r\nwire inst7_O;\r\nwire inst7_COUT;\r\nwire inst8_O;\r\nwire inst8_COUT;\r\nwire inst9_O;\r\nwire inst9_COUT;\r\nwire inst10_O;\r\nwire inst10_COUT;\r\nwire inst11_O;\r\nwire inst11_COUT;\r\nwire inst12_O;\r\nwire inst12_COUT;\r\nwire inst13_O;\r\nwire inst13_COUT;\r\nwire inst14_O;\r\nwire inst14_COUT;\r\nwire inst15_O;\r\nwire inst15_COUT;\r\nwire inst16_O;\r\nwire inst16_COUT;\r\nwire inst17_O;\r\nwire inst17_COUT;\r\nwire inst18_O;\r\nwire inst18_COUT;\r\nwire inst19_O;\r\nwire inst19_COUT;\r\nwire inst20_O;\r\nwire inst20_COUT;\r\nwire inst21_O;\r\nwire inst21_COUT;\r\nFullAdder inst0 (.I0(I0[0]), .I1(I1[0]), .CIN(1'b0), .O(inst0_O), .COUT(inst0_COUT));\r\nFullAdder inst1 (.I0(I0[1]), .I1(I1[1]), .CIN(inst0_COUT), .O(inst1_O), .COUT(inst1_COUT));\r\nFullAdder inst2 (.I0(I0[2]), .I1(I1[2]), .CIN(inst1_COUT), .O(inst2_O), .COUT(inst2_COUT));\r\nFullAdder inst3 (.I0(I0[3]), .I1(I1[3]), .CIN(inst2_COUT), .O(inst3_O), .COUT(inst3_COUT));\r\nFullAdder inst4 (.I0(I0[4]), .I1(I1[4]), .CIN(inst3_COUT), .O(inst4_O), .COUT(inst4_COUT));\r\nFullAdder inst5 (.I0(I0[5]), .I1(I1[5]), .CIN(inst4_COUT), .O(inst5_O), .COUT(inst5_COUT));\r\nFullAdder inst6 (.I0(I0[6]), .I1(I1[6]), .CIN(inst5_COUT), .O(inst6_O), .COUT(inst6_COUT));\r\nFullAdder inst7 (.I0(I0[7]), .I1(I1[7]), .CIN(inst6_COUT), .O(inst7_O), .COUT(inst7_COUT));\r\nFullAdder inst8 (.I0(I0[8]), .I1(I1[8]), .CIN(inst7_COUT), .O(inst8_O), .COUT(inst8_COUT));\r\nFullAdder inst9 (.I0(I0[9]), .I1(I1[9]), .CIN(inst8_COUT), .O(inst9_O), .COUT(inst9_COUT));\r\nFullAdder inst10 (.I0(I0[10]), .I1(I1[10]), .CIN(inst9_COUT), .O(inst10_O), .COUT(inst10_COUT));\r\nFullAdder inst11 (.I0(I0[11]), .I1(I1[11]), .CIN(inst10_COUT), .O(inst11_O), .COUT(inst11_COUT));\r\nFullAdder inst12 (.I0(I0[12]), .I1(I1[12]), .CIN(inst11_COUT), .O(inst12_O), .COUT(inst12_COUT));\r\nFullAdder inst13 (.I0(I0[13]), .I1(I1[13]), .CIN(inst12_COUT), .O(inst13_O), .COUT(inst13_COUT));\r\nFullAdder inst14 (.I0(I0[14]), .I1(I1[14]), .CIN(inst13_COUT), .O(inst14_O), .COUT(inst14_COUT));\r\nFullAdder inst15 (.I0(I0[15]), .I1(I1[15]), .CIN(inst14_COUT), .O(inst15_O), .COUT(inst15_COUT));\r\nFullAdder inst16 (.I0(I0[16]), .I1(I1[16]), .CIN(inst15_COUT), .O(inst16_O), .COUT(inst16_COUT));\r\nFullAdder inst17 (.I0(I0[17]), .I1(I1[17]), .CIN(inst16_COUT), .O(inst17_O), .COUT(inst17_COUT));\r\nFullAdder inst18 (.I0(I0[18]), .I1(I1[18]), .CIN(inst17_COUT), .O(inst18_O), .COUT(inst18_COUT));\r\nFullAdder inst19 (.I0(I0[19]), .I1(I1[19]), .CIN(inst18_COUT), .O(inst19_O), .COUT(inst19_COUT));\r\nFullAdder inst20 (.I0(I0[20]), .I1(I1[20]), .CIN(inst19_COUT), .O(inst20_O), .COUT(inst20_COUT));\r\nFullAdder inst21 (.I0(I0[21]), .I1(I1[21]), .CIN(inst20_COUT), .O(inst21_O), .COUT(inst21_COUT));\r\nassign O = {inst21_O,inst20_O,inst19_O,inst18_O,inst17_O,inst16_O,inst15_O,inst14_O,inst13_O,inst12_O,inst11_O,inst10_O,inst9_O,inst8_O,inst7_O,inst6_O,inst5_O,inst4_O,inst3_O,inst2_O,inst1_O,inst0_O};\r\nassign COUT = inst21_COUT;\r\nendmodule\r\n\r\nmodule Register22 (input [21:0] I, output [21:0] O, input CLK);\r\nwire inst0_Q;\r\nwire inst1_Q;\r\nwire inst2_Q;\r\nwire inst3_Q;\r\nwire inst4_Q;\r\nwire inst5_Q;\r\nwire inst6_Q;\r\nwire inst7_Q;\r\nwire inst8_Q;\r\nwire inst9_Q;\r\nwire inst10_Q;\r\nwire inst11_Q;\r\nwire inst12_Q;\r\nwire inst13_Q;\r\nwire inst14_Q;\r\nwire inst15_Q;\r\nwire inst16_Q;\r\nwire inst17_Q;\r\nwire inst18_Q;\r\nwire inst19_Q;\r\nwire inst20_Q;\r\nwire inst21_Q;\r\nSB_DFF inst0 (.C(CLK), .D(I[0]), .Q(inst0_Q));\r\nSB_DFF inst1 (.C(CLK), .D(I[1]), .Q(inst1_Q));\r\nSB_DFF inst2 (.C(CLK), .D(I[2]), .Q(inst2_Q));\r\nSB_DFF inst3 (.C(CLK), .D(I[3]), .Q(inst3_Q));\r\nSB_DFF inst4 (.C(CLK), .D(I[4]), .Q(inst4_Q));\r\nSB_DFF inst5 (.C(CLK), .D(I[5]), .Q(inst5_Q));\r\nSB_DFF inst6 (.C(CLK), .D(I[6]), .Q(inst6_Q));\r\nSB_DFF inst7 (.C(CLK), .D(I[7]), .Q(inst7_Q));\r\nSB_DFF inst8 (.C(CLK), .D(I[8]), .Q(inst8_Q));\r\nSB_DFF inst9 (.C(CLK), .D(I[9]), .Q(inst9_Q));\r\nSB_DFF inst10 (.C(CLK), .D(I[10]), .Q(inst10_Q));\r\nSB_DFF inst11 (.C(CLK), .D(I[11]), .Q(inst11_Q));\r\nSB_DFF inst12 (.C(CLK), .D(I[12]), .Q(inst12_Q));\r\nSB_DFF inst13 (.C(CLK), .D(I[13]), .Q(inst13_Q));\r\nSB_DFF inst14 (.C(CLK), .D(I[14]), .Q(inst14_Q));\r\nSB_DFF inst15 (.C(CLK), .D(I[15]), .Q(inst15_Q));\r\nSB_DFF inst16 (.C(CLK), .D(I[16]), .Q(inst16_Q));\r\nSB_DFF inst17 (.C(CLK), .D(I[17]), .Q(inst17_Q));\r\nSB_DFF inst18 (.C(CLK), .D(I[18]), .Q(inst18_Q));\r\nSB_DFF inst19 (.C(CLK), .D(I[19]), .Q(inst19_Q));\r\nSB_DFF inst20 (.C(CLK), .D(I[20]), .Q(inst20_Q));\r\nSB_DFF inst21 (.C(CLK), .D(I[21]), .Q(inst21_Q));\r\nassign O = {inst21_Q,inst20_Q,inst19_Q,inst18_Q,inst17_Q,inst16_Q,inst15_Q,inst14_Q,inst13_Q,inst12_Q,inst11_Q,inst10_Q,inst9_Q,inst8_Q,inst7_Q,inst6_Q,inst5_Q,inst4_Q,inst3_Q,inst2_Q,inst1_Q,inst0_Q};\r\nendmodule\r\n\r\nmodule Counter22 (output [21:0] O, output COUT, input CLK);\r\nwire [21:0] inst0_O;\r\nwire inst0_COUT;\r\nwire [21:0] inst1_O;\r\nAdd22Cout inst0 (.I0(inst1_O), .I1({1'b0,1'b0,1'b0,1'b0,1'b0,1'b0,1'b0,1'b0,1'b0,1'b0,1'b0,1'b0,1'b0,1'b0,1'b0,1'b0,1'b0,1'b0,1'b0,1'b0,1'b0,1'b1}), .O(inst0_O), .COUT(inst0_COUT));\r\nRegister22 inst1 (.I(inst0_O), .O(inst1_O), .CLK(CLK));\r\nassign O = inst1_O;\r\nassign COUT = inst0_COUT;\r\nendmodule\r\n\r\nmodule main (output D5, input CLKIN);\r\nwire [21:0] inst0_O;\r\nwire inst0_COUT;\r\nCounter22 inst0 (.O(inst0_O), .COUT(inst0_COUT), .CLK(CLKIN));\r\nassign D5 = inst0_O[21];\r\nendmodule\r\n\r\n" ] ], [ [ "Notice that the top-level module contains two arguments (ports),\n`D5` and `CLKIN`. \n`D5` has been configured as an output,\nand `CLKIN` as an input.\n\nThe mapping from these named arguments to pins is contained in the\nPCF (physical constraint file).", "_____no_output_____" ] ], [ [ "%cat build/blink.pcf", "set_io D5 95\r\nset_io CLKIN 21\r\n" ] ], [ [ "`D5` is connected to pin 95 and `CLKIN` is connected to pin 21. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a40c2fb2f179456fae86634279977e1649d120a
597,179
ipynb
Jupyter Notebook
Data_Leadership/Final-Tahoe-Healthcare-Jonathan-Hilgart.ipynb
jonhilgart22/glavanize-projects
39fd6aea1b42975a5cf7c3b15512c732ea12d60f
[ "MIT" ]
null
null
null
Data_Leadership/Final-Tahoe-Healthcare-Jonathan-Hilgart.ipynb
jonhilgart22/glavanize-projects
39fd6aea1b42975a5cf7c3b15512c732ea12d60f
[ "MIT" ]
null
null
null
Data_Leadership/Final-Tahoe-Healthcare-Jonathan-Hilgart.ipynb
jonhilgart22/glavanize-projects
39fd6aea1b42975a5cf7c3b15512c732ea12d60f
[ "MIT" ]
1
2020-05-09T09:38:03.000Z
2020-05-09T09:38:03.000Z
124.646003
57,796
0.840204
[ [ [ "# Tahoe Healthcare\n## How to reduce readmissions to each hospital\n- The goal of this case is exploratory data analysis to understand what factors are the biggest indicator or readmissions. This way, instead of rolling out 'Care Tracker' to every patient ( which costs `$1,200` per patient), only the groups of patients most at risk of being re-admitted will be enrolled into the Care Tracker program.\n- The first section will be some basic exploratory data analysis to understand the makeup of the patient data.\n- The second section will look into clustering from both a manegerial and statistical perspective.\n- The third second will work on fitting different supervised marchine learning classification models (based on RMSE) to predict readmission.\n- The fourth section will outline the most important variables to predict readmission, total money saved, as well as recommendations to Tahoe Healthcare.", "_____no_output_____" ], [ "## Exploratory data analysis", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom itertools import product\nfrom collections import defaultdict\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.ensemble import RandomForestClassifier\nfrom model_testing import Model_Testing_Regression\nfrom scipy.spatial.distance import euclidean\nfrom sklearn.metrics import r2_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import roc_curve, auc, confusion_matrix\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import GridSearchCV\nfrom operator import itemgetter\nfrom sklearn.preprocessing import StandardScaler\n%pylab inline", "Populating the interactive namespace from numpy and matplotlib\n" ], [ "tahoe_df = pd.read_csv('Final Project Data_Case.csv')\n", "_____no_output_____" ], [ "tahoe_df.tail()", "_____no_output_____" ] ], [ [ "- Rename the columns to make them easier to work with\n", "_____no_output_____" ] ], [ [ "tahoe_df['age']=tahoe_df['age']\ntahoe_df['severity_score']=tahoe_df['severity score']\ntahoe_df['comorbidity_score'] = tahoe_df['comorbidity score']\ntahoe_df.drop(['age','severity score','comorbidity score'],axis=1,inplace=True)", "_____no_output_____" ], [ "tahoe_df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 4382 entries, 0 to 4381\nData columns (total 7 columns):\nfemale 4382 non-null int64\nflu_season 4382 non-null int64\ned_admit 4382 non-null int64\nreadmit30 4382 non-null int64\nage 4382 non-null int64\nseverity_score 4382 non-null int64\ncomorbidity_score 4382 non-null int64\ndtypes: int64(7)\nmemory usage: 239.7 KB\n" ], [ "# 4,382 patient records", "_____no_output_____" ], [ "tahoe_df.describe()", "_____no_output_____" ] ], [ [ "- Age varies from 65 to 105\n- Slightly more males\n- More admits outside of flue season, \n- Majority are emergency room admits\n- Most admits have fairly high severity score and comordibity score\n- About 23% readmit after 30 days", "_____no_output_____" ] ], [ [ "tahoe_corr_matrix = tahoe_df.corr()\ntahoe_corr_matrix", "_____no_output_____" ], [ "sns.heatmap(tahoe_corr_matrix);", "_____no_output_____" ] ], [ [ "- Interested in correlations with readmit30\n - High correlation with comorbidty score, severity score, and to a lesser extend flu season and age\n- Next, plot the distributions of these variables", "_____no_output_____" ] ], [ [ "sns.distplot(tahoe_df.loc[:,'age']);", "/Users/jonathanhilgart/anaconda/lib/python3.5/site-packages/statsmodels/nonparametric/kdetools.py:20: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future\n y = X[:m/2+1] + np.r_[0,X[m/2+1:],0]*1j\n" ], [ "sns.distplot(tahoe_df.loc[:,'female']);", "/Users/jonathanhilgart/anaconda/lib/python3.5/site-packages/statsmodels/nonparametric/kdetools.py:20: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future\n y = X[:m/2+1] + np.r_[0,X[m/2+1:],0]*1j\n" ], [ "sns.distplot(tahoe_df.loc[:,'flu_season']);", "/Users/jonathanhilgart/anaconda/lib/python3.5/site-packages/statsmodels/nonparametric/kdetools.py:20: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future\n y = X[:m/2+1] + np.r_[0,X[m/2+1:],0]*1j\n" ], [ "sns.distplot(tahoe_df.loc[:,'ed_admit']);", "/Users/jonathanhilgart/anaconda/lib/python3.5/site-packages/statsmodels/nonparametric/kdetools.py:20: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future\n y = X[:m/2+1] + np.r_[0,X[m/2+1:],0]*1j\n" ], [ "sns.distplot(tahoe_df.loc[:,'severity_score']);", "/Users/jonathanhilgart/anaconda/lib/python3.5/site-packages/statsmodels/nonparametric/kdetools.py:20: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future\n y = X[:m/2+1] + np.r_[0,X[m/2+1:],0]*1j\n" ], [ "sns.distplot(tahoe_df.loc[:,'comorbidity_score']);", "/Users/jonathanhilgart/anaconda/lib/python3.5/site-packages/statsmodels/nonparametric/kdetools.py:20: VisibleDeprecationWarning: using a non-integer number instead of an integer will result in an error in the future\n y = X[:m/2+1] + np.r_[0,X[m/2+1:],0]*1j\n" ] ], [ [ "## Cluster the patients\n- First, managerial clustering\n - Cluster on the percentiles of comorbidty score, severity score, and flu season. There are four quartiles for each of the first two varaibles (and two for the second for a total of 32 `bclusters\n- Second, statistical clustering\n - K-means will be used on all of the varaibles to determine the optimal clustering strategy", "_____no_output_____" ] ], [ [ "tahoe_quartiles = tahoe_df.describe()", "_____no_output_____" ], [ "severity_score_quartiles = [i for i in tahoe_quartiles['severity_score'].iloc[4:7]]\nseverity_score_quartiles.append(33) ## to account for last quartile\ncomorbidity_score_quartiles = [i for i in tahoe_quartiles['comorbidity_score'].iloc[4:7]]\ncomorbidity_score_quartiles.append(132)## to account for last quartile\nflu_season=[.1,.9] # greater than or less than this (i.e. 0 or 1)", "_____no_output_____" ], [ "tahoe_quartiles.loc[:,('severity_score','comorbidity_score','flu_season')]", "_____no_output_____" ], [ "tahoe_df.head()", "_____no_output_____" ], [ "tahoe_df['severity_quantile'] = pd.qcut(tahoe_df['severity_score'], 4, labels=False)\ntahoe_df['comorbidity_quantile'] = pd.qcut(tahoe_df['comorbidity_score'], 4, labels=False)\n", "_____no_output_____" ], [ "severity_score_quartiles", "_____no_output_____" ], [ "comorbidity_score_quartiles ", "_____no_output_____" ], [ "def assign_managerial_clusters(input_dataframe):\n \"\"\"Assign managerial clusters given the severity socre, comorbidty score, and fluseason indicator.\n This assumes that the input dataframe already has indicators for the bins for each columns\"\"\"\n count = 1\n df = input_dataframe.copy()\n list_of_df = []\n count = 0\n df['managerial_cluster']=np.nan\n final_df = pd.DataFrame(columns = ['female', 'flu_season', 'ed_admit', 'readmit30', 'age',\n 'severity_score', 'comorbidity_score', 'severity_quantile',\n 'comorbidity_quantile','managerial_cluster'])\n count = 0\n row_of_assignments = []\n cluster_assignments =defaultdict(int)\n for comordibty_q in range(4):\n for severity_q in range(4):\n for flu_h in range(2):\n cluster = df[(df['comorbidity_quantile'] == comordibty_q) & \\\n (df['severity_quantile'] == severity_q) &\\\n (df['flu_season'] == flu_h)]\n cluster['managerial_cluster'] = count\n final_df = pd.concat([final_df,cluster]) ## add to a final DF\n cluster_assignments[comordibty_q,severity_q,flu_h]=count\n\n count +=1 \n return final_df,cluster_assignments\n \n \n \n \n \n ", "_____no_output_____" ], [ "tahoe_mang_cluster_df, custer_assignments = assign_managerial_clusters(tahoe_df)", "/Users/jonathanhilgart/anaconda/lib/python3.5/site-packages/ipykernel/__main__.py:19: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n" ] ], [ [ "- Next, determine the probability of re-admittance per managerial cluster", "_____no_output_____" ] ], [ [ "## total number of readmission per managerial cluster\nreadmission_per_cluster = tahoe_mang_cluster_df.groupby(['managerial_cluster'])['readmit30'].sum().reset_index()", "_____no_output_____" ], [ "readmission_per_cluster.head()", "_____no_output_____" ], [ "# divide by the total number to get the probability of re-admission per cluster\n\npatients_per_cluster = tahoe_mang_cluster_df.groupby(['managerial_cluster'])['readmit30'].count().reset_index()\n", "_____no_output_____" ], [ "## probability of readmission per cluster\nprobability_readmission_per_cluster = (readmission_per_cluster.readmit30/patients_per_cluster.readmit30).reset_index()\n#sorted_probability_readmission_per_cluster = probability_readmission_per_cluster.sort_values(ascending=False).reset_index()\nprobability_readmission_per_cluster['probability_of_readmit'] =probability_readmission_per_cluster['readmit30'] ", "_____no_output_____" ], [ "probability_readmission_per_cluster['managerial_cluster']=probability_readmission_per_cluster['index']", "_____no_output_____" ], [ "probability_readmission_per_cluster['patients_per_cluster']=patients_per_cluster['readmit30']\nprobability_readmission_per_cluster['readmit_per_cluster'] = readmission_per_cluster['readmit30']", "_____no_output_____" ], [ "# sort by top readmit clusters\nprobability_readmission_per_cluster.sort_values('probability_of_readmit',ascending=False,inplace=True)", "_____no_output_____" ], [ "### add in the probability of readmit with caretracker (lowers it by 40%)\nprobability_readmission_per_cluster['probability_readmit_caretracker'] = \\\nprobability_readmission_per_cluster.probability_of_readmit*.6", "_____no_output_____" ] ], [ [ "- Next, determine the cost of adding Care Tracker per cluster per person in each cluster\n", "_____no_output_____" ] ], [ [ "def find_minimum_cost(dataframe_input,cost_of_readmit=8000,cost_of_caretracker=1200):\n \"\"\"find the minimum combination between using the caretracker for each person in a cluster, or the cost\n of readmission $8k per person in that cluter.\n \n The formula is The Cost of readmittance * Number of patients who have be readmitted , compared to\n \n The probability of readmittance given that a patient is using care tracker, \n times the number of patients in that cluster, time the cost of readmittance, plus the cost of care tracker times\n the number of patients in that cluster\n \"\"\"\n dataframe_i = dataframe_input.copy()\n list_of_options = []\n min_cost_per_option = []\n alternative_cost = []\n \n for idx, row in dataframe_i.iterrows():\n if (row['probability_readmit_caretracker'] *row['patients_per_cluster'] * cost_of_readmit + \\\n cost_of_caretracker *row['patients_per_cluster']) \\\n < (cost_of_readmit *row['readmit_per_cluster']):\n \n list_of_options.append(1) ## assign to caretracker program\n min_cost_per_option.append(row['probability_readmit_caretracker']\\\n *row['patients_per_cluster'] * cost_of_readmit + \\\n cost_of_caretracker *row['patients_per_cluster'])\n alternative_cost.append(cost_of_readmit *row['probability_readmit_caretracker']\\\n *row['patients_per_cluster'])\n else:\n list_of_options.append(0) ## don't assign to caretracker program\n min_cost_per_option.append(cost_of_readmit *row['readmit_per_cluster'])\n alternative_cost.append(row['probability_readmit_caretracker']\\\n *row['patients_per_cluster'] * cost_of_readmit + \\\n cost_of_caretracker *row['patients_per_cluster'])\n \n dataframe_i['min_cost']=min_cost_per_option\n dataframe_i['option']=list_of_options # 1 = assign to caretracker 0 = don't assign to caretracker\n dataframe_i['alternative_cost'] = alternative_cost\n return dataframe_i \n \n ", "_____no_output_____" ], [ "min_cost_tahoe_prob_df = find_minimum_cost(probability_readmission_per_cluster)", "_____no_output_____" ], [ "# This is the cost of assigning everyone to care tracker\nmin_cost_tahoe_prob_df['care_tracker_cost_cluster'] = \\\n min_cost_tahoe_prob_df['patients_per_cluster']*1200 + \\\n min_cost_tahoe_prob_df['probability_readmit_caretracker']*min_cost_tahoe_prob_df['patients_per_cluster']*8000\n\n # This is the cost of assigning no one to care tracker\nmin_cost_tahoe_prob_df['readmit_cost_cluster'] = \\\n min_cost_tahoe_prob_df['readmit_per_cluster']*8000 ", "_____no_output_____" ], [ "# Find the savings per cluster", "_____no_output_____" ], [ "savings_over_readmit= sum(min_cost_tahoe_prob_df.readmit_cost_cluster -min_cost_tahoe_prob_df.min_cost )\nsavings_over_care_tracker = sum(min_cost_tahoe_prob_df.care_tracker_cost_cluster-min_cost_tahoe_prob_df.min_cost )\ntotal_cost_caretracker = sum(min_cost_tahoe_prob_df.care_tracker_cost_cluster)\ntotal_cost_readmitt_no_caretracker = sum(min_cost_tahoe_prob_df.readmit_cost_cluster)\nmanagerial_min_cost = sum(min_cost_tahoe_prob_df.min_cost )", "_____no_output_____" ], [ "print(\"Tahoe will save {:20,.2f} compared to not assigning anyone to care tracker\".format(savings_over_readmit))\nprint(\"Tahoe will save {:20,.2f} compared to assigning everyone to care tracker\".format(savings_over_care_tracker))\nbaseline_readmittance = sum(min_cost_tahoe_prob_df.readmit_per_cluster)/sum(min_cost_tahoe_prob_df.patients_per_cluster)\nbaseline_noreadmittance = 1-baseline_readmittance\nprint(\"The total cost of assigning everyone to caretracker is {:20,.2f}\".format(total_cost_caretracker))\nprint(\"The total cost of assigning noone to caretracker {:20,.2f}\".format(total_cost_readmitt_no_caretracker ))\nprint(\"The total cost of using maangerial clusters and assigning to caretracker from there is {:20,.2f}\".format(managerial_min_cost))\nprint(\"The baseline probability of re-admittance is {:.2%}\".format(\n sum(min_cost_tahoe_prob_df.readmit_per_cluster)/sum(min_cost_tahoe_prob_df.patients_per_cluster)))\nprint(\"The baseline of no readmittance is {:.2%}\".format(baseline_noreadmittance))", "Tahoe will save 426,400.00 compared to not assigning anyone to care tracker\nTahoe will save 2,491,200.00 compared to assigning everyone to care tracker\nThe total cost of assigning everyone to caretracker is 10,048,800.00\nThe total cost of assigning noone to caretracker 7,984,000.00\nThe total cost of using maangerial clusters and assigning to caretracker from there is 7,557,600.00\nThe baseline probability of re-admittance is 22.77%\nThe baseline of no readmittance is 77.23%\n" ] ], [ [ "- Graph the probability of readmission per cluster", "_____no_output_____" ] ], [ [ "mang_cluster_and_prob_readmit = tahoe_mang_cluster_df.groupby('managerial_cluster')['readmit30'].mean().reset_index()\nmang_cluster_and_prob_readmit['probability_of_readmission']=mang_cluster_and_prob_readmit['readmit30']", "_____no_output_____" ], [ "mang_cluster_and_prob_readmit=mang_cluster_and_prob_readmit.sort_values('probability_of_readmission',ascending=False)", "_____no_output_____" ], [ "plt.figure(figsize=(12,5))\nplt.title('Readmission per patient quantile')\nsns.barplot(x=mang_cluster_and_prob_readmit.managerial_cluster, y=mang_cluster_and_prob_readmit.probability_of_readmission)", "_____no_output_____" ], [ "min_cost_tahoe_prob_df.head()", "_____no_output_____" ], [ "sum(min_cost_tahoe_prob_df [(min_cost_tahoe_prob_df.managerial_cluster>=0) &\n (min_cost_tahoe_prob_df.managerial_cluster<=15)]['readmit_per_cluster'])*8000", "_____no_output_____" ], [ "cluster_one = sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==0]['patients_per_cluster'])*8000*.02", "_____no_output_____" ], [ "cluster_two = sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==1]['patients_per_cluster'])*8000*.1", "_____no_output_____" ], [ "cluster_three = sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==2]['patients_per_cluster'])*8000*.05", "_____no_output_____" ], [ "cluster_four = sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==3]['patients_per_cluster'])*8000*.1", "_____no_output_____" ], [ "cluster_five = sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==4]['patients_per_cluster'])*8000*.08", "_____no_output_____" ], [ "cluster_six = sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==5]['patients_per_cluster'])*8000*.11", "_____no_output_____" ], [ "cluster_seven = sum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==6]['patients_per_cluster'])*8000*.11", "_____no_output_____" ], [ "cluster_nine =\\\nsum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==8]['patients_per_cluster'])*8000*.09", "_____no_output_____" ], [ "cluster_ten =\\\nsum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==9]['patients_per_cluster'])*8000*.085", "_____no_output_____" ], [ "cluster_eleven =\\\nsum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==10]['patients_per_cluster'])*8000*.08", "_____no_output_____" ], [ "cluster_twelve =\\\nsum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==11]['patients_per_cluster'])*8000*.18", "_____no_output_____" ], [ "cluster_thirteen =\\\nsum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==12]['patients_per_cluster'])*8000*.12", "_____no_output_____" ], [ "cluster_fourteen =\\\nsum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==13]['patients_per_cluster'])*8000*.18", "_____no_output_____" ], [ "cluster_eight =\\\nsum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==7]['patients_per_cluster'])*8000*.21", "_____no_output_____" ], [ "cluster_fifteen =\\\nsum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==14]['patients_per_cluster'])*8000*.23", "_____no_output_____" ], [ "cluster_sixteen=\\\nsum(min_cost_tahoe_prob_df [min_cost_tahoe_prob_df.managerial_cluster==14]['patients_per_cluster'])*8000*.3", "_____no_output_____" ], [ "print(\"Expected cost for first 16 cluster {}\".format(sum([cluster_one,cluster_two,cluster_three,\\\n cluster_four,cluster_five,\\\n cluster_six,\\\n cluster_seven,\\\n cluster_eight,\\\n cluster_nine,cluster_ten,cluster_eleven,cluster_twelve,cluster_thirteen,cluster_fourteen,cluster_fifteen])))", "Expected cost for first 16 cluster 1800840.0\n" ], [ "fig = sns.barplot(x=['everyone caretracker','no one caretracker','managerial decision rule'],\\\n y=[total_cost_caretracker,total_cost_readmitt_no_caretracker,managerial_min_cost])\nfig.get_yaxis().set_major_formatter(\n matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))\nplt.title('Cost Comparison')\nplt.ylabel(\"Dollars\")", "_____no_output_____" ], [ "# How many patients to assign to caretracker\nsum(min_cost_tahoe_prob_df[min_cost_tahoe_prob_df.option==1]['patients_per_cluster'])", "_____no_output_____" ] ], [ [ "### Following the advice from managerial clustering on which segments to assign to care tracker, we would save `$2,491,200` compared to assigning everyone to care tracker and will save `$426,000` compared to not assigning anyone to care tracker.\n- This is assuming our sample of patients represents the entire population of patients\n- Managerial is typically easier for businesses to implement compared to an algorithm\n- However, some risks inlude the fact the this is a static model that does not evolve over time.\n\n\n> We should assign segments, 31,29,25,27,30, and 28 to care tracker\n- 31: comorbidity_score above 131, severity_score above 32, in flu season\n- 29: comorbidity_score above 131, severity_score 19-32, in flu season\n- 25: comorbidity_score above 131, severity_score below 8, in flu season\n- 27: comorbidity_score above 131, severity_score 8 to 19, in flu season\n- 30: comorbidity_score above 131, severity_score above 32, not in flu season\n- 28: comorbidity_score above 131, severity_score 19-32, not in flu season", "_____no_output_____" ], [ "# Next, investigate statistical clustering and machine learning to determine the best option.\n- Start with k-means\n - To find the ideal number of clusters, run a silhouette score model on different cluster sizes\n - Don't cluster on readmit rate, we don't know this when a patient comes in\n- Then, investigate a couple supervised classification models", "_____no_output_____" ] ], [ [ "def euclidean_distance(a,b):\n \"\"\"Expects numpy array and returns the euclidan distance between them\"\"\"\n return sqrt(sum((a-b)**2))", "_____no_output_____" ], [ "clustering_df = tahoe_df.iloc[:,:5]\nclustering_df.head()", "_____no_output_____" ], [ "\ndef sample_silhouette_score(dataframe_input,max_cluster_size=100):\n \"\"\"Run a three fold CV on 10,000 samples from the dataframe to determine the ideal number of clusters.\n Output is the ideal number of clusters of 3 CV folds with 10k samples.\"\"\"\n silhouette_score_l = []\n predicted_labels = defaultdict(list)\n for clusters in range(2,max_cluster_size):\n knn_classifier = KMeans(clusters)\n silhouette_scores_for_this_cluster = []\n fit_knn = knn_classifier.fit(dataframe_input)\n predicted_labels[clusters]=fit_knn.labels_\n predicted_lab = fit_knn.labels_\n silhouette_score_l.append(silhouette_score(X=dataframe_input,labels=predicted_lab))\n print('Finished iteration {}'.format(clusters))\n number_of_clusters = [i for i in range(2,max_cluster_size)]\n plt.plot([i for i in range(2,max_cluster_size)],silhouette_score_l)\n plt.title('Ideal Number of Clusters')\n plt.ylabel('Silhouette score')\n plt.xlabel('Number of clusters')\n print('The best number of clusters is {}'.format(number_of_clusters[np.argmax(np.array(silhouette_score_l))]))\n return predicted_labels[number_of_clusters[np.argmax(np.array(silhouette_score_l))]]\n ", "_____no_output_____" ], [ "clustered_labels = sample_silhouette_score(clustering_df)", "Finished iteration 2\nFinished iteration 3\nFinished iteration 4\nFinished iteration 5\nFinished iteration 6\nFinished iteration 7\nFinished iteration 8\nFinished iteration 9\nFinished iteration 10\nFinished iteration 11\nFinished iteration 12\nFinished iteration 13\nFinished iteration 14\nFinished iteration 15\nFinished iteration 16\nFinished iteration 17\nFinished iteration 18\nFinished iteration 19\nFinished iteration 20\nFinished iteration 21\nFinished iteration 22\nFinished iteration 23\nFinished iteration 24\nFinished iteration 25\nFinished iteration 26\nFinished iteration 27\nFinished iteration 28\nFinished iteration 29\nFinished iteration 30\nFinished iteration 31\nFinished iteration 32\nFinished iteration 33\nFinished iteration 34\nFinished iteration 35\nFinished iteration 36\nFinished iteration 37\nFinished iteration 38\nFinished iteration 39\nFinished iteration 40\nFinished iteration 41\nFinished iteration 42\nFinished iteration 43\nFinished iteration 44\nFinished iteration 45\nFinished iteration 46\nFinished iteration 47\nFinished iteration 48\nFinished iteration 49\nFinished iteration 50\nFinished iteration 51\nFinished iteration 52\nFinished iteration 53\nFinished iteration 54\nFinished iteration 55\nFinished iteration 56\nFinished iteration 57\nFinished iteration 58\nFinished iteration 59\nFinished iteration 60\nFinished iteration 61\nFinished iteration 62\nFinished iteration 63\nFinished iteration 64\nFinished iteration 65\nFinished iteration 66\nFinished iteration 67\nFinished iteration 68\nFinished iteration 69\nFinished iteration 70\nFinished iteration 71\nFinished iteration 72\nFinished iteration 73\nFinished iteration 74\nFinished iteration 75\nFinished iteration 76\nFinished iteration 77\nFinished iteration 78\nFinished iteration 79\nFinished iteration 80\nFinished iteration 81\nFinished iteration 82\nFinished iteration 83\nFinished iteration 84\nFinished iteration 85\nFinished iteration 86\nFinished iteration 87\nFinished iteration 88\nFinished iteration 89\nFinished iteration 90\nFinished iteration 91\nFinished iteration 92\nFinished iteration 93\nFinished iteration 94\nFinished iteration 95\nFinished iteration 96\nFinished iteration 97\nFinished iteration 98\nFinished iteration 99\nThe best number of clusters is 2\n" ] ], [ [ "##### The ideal number of clusters is two, which means that there is not a lot of difference in the people being clustered. We will skip the clustering here and move onto a machine learning algorithm\n- Test random forest\n- Logistic regression\n- Gradient Boosting\n- SVMs\n\n\n", "_____no_output_____" ] ], [ [ "admitted_df =tahoe_df[tahoe_df.readmit30==1]\n", "_____no_output_____" ], [ "not_admitted_df = tahoe_df[tahoe_df.readmit30==0]", "_____no_output_____" ], [ "len(admitted_df)", "_____no_output_____" ] ], [ [ "### Build a random forest model", "_____no_output_____" ] ], [ [ "# not balanced\ntahoe_X = tahoe_df.loc[:,('female', 'flu_season', 'ed_admit', 'age',\\\n 'severity_score', 'comorbidity_score')]\ntahoe_y = tahoe_df.loc[:,'readmit30']\ntahoe_X_labels = ['female', 'flu_season', 'ed_admit', 'age',\n 'severity_score', 'comorbidity_score']\ntahoe_y_labels = 'readmit30'", "_____no_output_____" ], [ "X_train_rf, X_test_rf, y_train_rf, y_test_rf = train_test_split(tahoe_X,tahoe_y,random_state=42)", "_____no_output_____" ], [ "rf_params = {'n_estimators':[i for i in range(100,500,50)],'max_depth':[i for i in range(1,5)]}", "_____no_output_____" ], [ "rf_grid = GridSearchCV( RandomForestClassifier(),rf_params)", "_____no_output_____" ], [ "rf_grid.fit(X_train_rf,y_train_rf)", "_____no_output_____" ], [ "# Create RF model\nbest_rf_model = rf_grid.best_estimator_", "_____no_output_____" ], [ "# Best random forest\nbest_rf_model.fit( X_train_rf, y_train_rf)\n\n# compute the metrics on this model\nbest_rf_rmse =euclidean(y_test_rf,best_rf_model.predict(X_test_rf))/sqrt(len(y_test_rf))\nprint('Best RF RMSE :{}'.format(best_rf_rmse ))\nprint()\nbest_rf_r2 =abs(r2_score(y_test_rf,best_rf_model.predict(X_test_rf)))\nprint('Best RF R^2 score :{:.2%}'.format(best_rf_r2))\nprint()\nrf_accuracy_score = accuracy_score(y_test_rf,best_rf_model.predict(X_test_rf))\nprint(\"Best accuracy score {:.2%}\".format(rf_accuracy_score))\nfeature_weights_rf = best_rf_model.feature_importances_[np.argsort(best_rf_model.feature_importances_)[::-1]]\n# print(regression_tahoe_readmit.x_labels) \nfeature_names_rf = np.array(tahoe_X.columns)[np.argsort(best_rf_model.feature_importances_)[::-1]]\n\nprint([('Feature:',i,'Importance:',f)\\\nfor f,i in zip(best_rf_model.feature_importances_[np.argsort(best_rf_model.feature_importances_)[::-1]],\\\n np.array(tahoe_X.columns)[np.argsort(best_rf_model.feature_importances_)[::-1]])])", "Best RF RMSE :0.4689248457363985\n\nBest RF R^2 score :19.64%\n\nBest accuracy score 78.01%\n[('Feature:', 'comorbidity_score', 'Importance:', 0.60334951115337243), ('Feature:', 'severity_score', 'Importance:', 0.24792139410724132), ('Feature:', 'flu_season', 'Importance:', 0.070846953778592431), ('Feature:', 'age', 'Importance:', 0.062665083276477462), ('Feature:', 'female', 'Importance:', 0.0082572764188916566), ('Feature:', 'ed_admit', 'Importance:', 0.0069597812654245998)]\n" ], [ "fpr_rf,tpr_rf,threshold_rf = roc_curve(y_test_rf, [_[1] for _ in best_rf_model.predict_proba(X_test_rf)])", "_____no_output_____" ], [ "auc_rf = auc(fpr_rf,tpr_rf)", "_____no_output_____" ] ], [ [ "## Next, try logistic classification", "_____no_output_____" ] ], [ [ "scaler = StandardScaler()", "_____no_output_____" ], [ "logistic_params = {'penalty':['l1','l2'],'C':[i for i in np.linspace(.3,1,100)],\\\n 'max_iter':[i for i in range(5,50,5)]}", "_____no_output_____" ], [ "log_model = LogisticRegression()", "_____no_output_____" ], [ "logistic_m = GridSearchCV(LogisticRegression() ,logistic_params )\nlog_model= GridSearchCV(LogisticRegression() ,logistic_params )", "_____no_output_____" ], [ "X_train_log, X_test_log, y_train_log, y_test_log = train_test_split(np.array(tahoe_X.values.astype(float)),\n np.array(tahoe_y.values.astype(float)),random_state=42)", "_____no_output_____" ], [ "# scale features to compute variable importance\nscaled_x_train = scaler.fit_transform(X_train_log)\nscaled_x_test = scaler.fit_transform(X_test_log)\n\n", "_____no_output_____" ], [ "logistic_m.fit(scaled_x_train,y_train_log)", "_____no_output_____" ], [ "log_model.fit(X_train_log,y_train_log )", "_____no_output_____" ], [ "logistic_m.best_estimator_", "_____no_output_____" ], [ "# return the best estimator\nlogistic_model = log_model.best_estimator_", "_____no_output_____" ], [ "# scaled coefficients\nlog_m = logistic_m.best_estimator_", "_____no_output_____" ], [ "best_logistic_rmse =euclidean(y_test_log,logistic_model.predict(X_test_log))/sqrt(len(y_test_log))\nprint('Best logistic RMSE :{}'.format(best_logistic_rmse))\nprint()\nbest_logistic_r2 = abs(r2_score(y_test_log,logistic_model.predict(X_test_log)))\nprint('Best logistic R^2 score :{:.2%}'.format(best_logistic_r2))\nprint()\naccuracy_score_log = accuracy_score(y_test_log,logistic_model.predict(X_test_log))\nprint(\"Best logistic accuracy {:.1%}\".format(accuracy_score_log))\n\nfeature_weights_logistic = abs(logistic_model.coef_)[0][np.argsort(abs(log_m.coef_))[0][::-1]]\nfeature_names_logistic = np.array(tahoe_X.columns)[np.argsort(log_m.coef_)[0][::-1]]\n\nprint([('Feature:',i,'Importance:',f)\\\n for f,i in zip(feature_weights_logistic ,\\\n feature_names_logistic)])", "Best logistic RMSE :0.45409732388778046\n\nBest logistic R^2 score :12.19%\n\nBest logistic accuracy 79.4%\n[('Feature:', 'comorbidity_score', 'Importance:', 0.015933619382016952), ('Feature:', 'severity_score', 'Importance:', 0.027699241849915127), ('Feature:', 'flu_season', 'Importance:', 0.70037753566829231), ('Feature:', 'female', 'Importance:', 0.20281877112100571), ('Feature:', 'age', 'Importance:', 0.10556936248567193), ('Feature:', 'ed_admit', 'Importance:', 0.0098562878230878843)]\n" ], [ "fpr_log,tpr_log,threshold = roc_curve(y_test_log, [_[1] for _ in logistic_model.predict_proba(X_test_log)])\n", "_____no_output_____" ], [ "#area under the curve for the ROC curve\nauc_log = auc(fpr_log,tpr_log)", "_____no_output_____" ] ], [ [ "# Try gradient boosting as well", "_____no_output_____" ] ], [ [ "g_boost_params = {'max_depth':[i for i in range(1,5)],'n_estimators':[i for i in range(50,500,50)],\\\n 'loss':['deviance','exponential']}", "_____no_output_____" ], [ "X_train_gb, X_test_gb, y_train_gb, y_test_gb = train_test_split(tahoe_X,tahoe_y,random_state=42)", "_____no_output_____" ], [ "grid_gb = GridSearchCV(GradientBoostingClassifier(),g_boost_params)", "_____no_output_____" ], [ "grid_gb.fit(X_train_gb,y_train_gb)", "_____no_output_____" ], [ "grid_gb.best_estimator_", "_____no_output_____" ], [ "GBoostModel = grid_gb.best_estimator_", "_____no_output_____" ], [ "best_gb_rmse =euclidean(y_test_gb,GBoostModel.predict(X_test_gb))/sqrt(len(y_test_gb))\nprint('Best gb RMSE :{}'.format(best_gb_rmse))\nprint()\nbest_gb_r2 = abs(r2_score(y_test_gb,GBoostModel.predict(X_test_gb)))\nprint('Best gb R^2 score :{:.2%}'.format(best_gb_r2))\nprint()\naccuracy_score_gb = accuracy_score(y_test_gb,GBoostModel.predict(X_test_gb))\nprint(\"Best gb accuracy {:.1%}\".format(accuracy_score_gb))\n\nfeature_weights_gb = GBoostModel.feature_importances_[np.argsort(GBoostModel.feature_importances_)[::-1]]\nfeature_names_gb = np.array(tahoe_X.columns)[np.argsort(GBoostModel.feature_importances_)[::-1]]\n\nprint([('Feature:',i,'Importance:',f)\\\n for f,i in zip(feature_weights_gb ,\\\n feature_names_gb)])", "Best gb RMSE :0.46008567910421677\n\nBest gb R^2 score :15.17%\n\nBest gb accuracy 78.8%\n[('Feature:', 'comorbidity_score', 'Importance:', 0.48499999999999999), ('Feature:', 'severity_score', 'Importance:', 0.33500000000000002), ('Feature:', 'flu_season', 'Importance:', 0.12), ('Feature:', 'age', 'Importance:', 0.044999999999999998), ('Feature:', 'female', 'Importance:', 0.014999999999999999), ('Feature:', 'ed_admit', 'Importance:', 0.0)]\n" ], [ "GBoostModel.predict_proba(X_test_gb)", "_____no_output_____" ], [ "GBoostModel.classes_", "_____no_output_____" ], [ "fpr_gb,tpr_gb,threshold = roc_curve(np.array(y_test_gb), [_[1] for _ in GBoostModel.predict_proba(X_test_gb)])", "_____no_output_____" ], [ "auc_gb= auc(fpr_gb,tpr_gb)", "_____no_output_____" ] ], [ [ "# Finally, use support vector machines to predict readmission\n", "_____no_output_____" ] ], [ [ "svm_model = SVC(probability=True)", "_____no_output_____" ], [ "params_svm ={'C':[i for i in np.linspace(.0000001,2,10)],'gamma':[i for i in np.linspace(.0001,2,10)]}", "_____no_output_____" ], [ "best_svm_model = GridSearchCV(svm_model,params_svm)", "_____no_output_____" ], [ "X_train_svm, X_test_svm, y_train_svm, y_test_svm = train_test_split(tahoe_X,tahoe_y,random_state=42)", "_____no_output_____" ], [ "svm_m = best_svm_model.fit(X_train_svm,y_train_svm)", "_____no_output_____" ], [ "svm_model = svm_m.best_estimator_", "_____no_output_____" ], [ " svm_m.best_estimator_", "_____no_output_____" ], [ "#compute SVM metrics\nbest_svm_rmse =euclidean(y_test_svm,svm_model.predict(X_test_svm))/sqrt(len(y_test_svm))\nprint('Best svm RMSE :{}'.format(best_svm_rmse))\nprint()\nbest_svm_r2 = abs(r2_score(y_test_svm,svm_model.predict(X_test_svm)))\nprint('Best svm R^2 score :{:.2%}'.format(best_svm_r2))\nprint()\naccuracy_score_svm = accuracy_score(y_test_svm,svm_model.predict(X_test_svm))\nprint(\"Best svm accuracy {:.1%}\".format(accuracy_score_svm))\n", "Best svm RMSE :0.4728003235885118\n\nBest svm R^2 score :21.62%\n\nBest svm accuracy 77.6%\n" ], [ " best_svm_model.predict_proba(X_test_gb)", "_____no_output_____" ], [ "fpr_svm,tpr_svm,threshold_svm = roc_curve(y_test_svm,[_[1] for _ in best_svm_model.predict_proba(X_test_svm)],pos_label=1)", "_____no_output_____" ], [ "auc_svm = auc(fpr_svm,tpr_svm)", "_____no_output_____" ], [ "print(\"The area under the curve for logistic {}, random forest {}, gradient boosting {}, svm {}\".format(\n auc_log,auc_rf,auc_gb,auc_svm))", "The area under the curve for logistic 0.7891271854334632, random forest 0.7851707582208534, gradient boosting 0.7877434550231, svm 0.674218679228191\n" ] ], [ [ "## Compare each model using a ROC curve", "_____no_output_____" ] ], [ [ "# baseline for ROC curve\nbaseline_x = [ i for i in np.linspace(0,1,100)]\nbaseline_y = [ i for i in np.linspace(0,1,100)]", "_____no_output_____" ], [ "plt.figure(figsize=(10,5))\nplt.plot(fpr_log,tpr_log, label='LOG',color='yellow')\nplt.plot(fpr_rf,tpr_rf, label = 'RF')\nplt.plot(fpr_gb,tpr_gb,label='GB')\nplt.plot(fpr_svm,tpr_svm, label = 'SVM')\n\nplt.plot(baseline_x,baseline_y,label='BASELINE',linestyle='dashed')\nplt.title(\"ROC curve foreadmissions\")\nplt.ylabel(\"True Positive Rate\")\nplt.xlabel(\"False Postiive Rate\")\nplt.legend()", "_____no_output_____" ] ], [ [ "- Based upon this, use the Logistic Classification model\n- Run each row through the model, and generate a probability of readmittance\n- From this probability, determine where the threshold is to minimize cost\n", "_____no_output_____" ] ], [ [ "## only 465 positive cases from the default logistic regression (however, this does not distinguish between false\n## positive and true positive)\nsum(logistic_model.predict(tahoe_X))", "_____no_output_____" ], [ "# almost one thousand positive in reality\nsum(tahoe_y)\n", "_____no_output_____" ] ], [ [ "### Create a confusion matrix to understand th tp,fp,tn,fn for logistic regression\n- Compare the deafult threshold of .50 to the optimal threshold of .34 to see where patients are placed", "_____no_output_____" ] ], [ [ "## 00 is true negatives\n## false negative 10\n# true positive 1,1\n# false postiive 0,1\nlog_confusion_matrix = confusion_matrix(tahoe_y , logistic_model.predict(tahoe_X))", "_____no_output_____" ], [ "310*8000+310*1200+688*8000", "_____no_output_____" ], [ "log_confusion_matrix ", "_____no_output_____" ], [ "sns.heatmap(log_confusion_matrix);\nplt.xlabel(\"predicted\")\nplt.ylabel(\"actual\")", "_____no_output_____" ] ], [ [ "- Lot of True Negatives (0,0)\n- Also, a lot of false negatives (cases where the person was readmitted but we perdicted that there were not going to be readmitted. This means we are conservative in our guessing readmittance.\n - Adjust threshold to reduce the false negatives\n ", "_____no_output_____" ] ], [ [ "#logsitic_predictions_df['prob_readmit']=prob_readmit \n#logsitic_predictions_df['prob_noreadmit']=prob_noreadmit\n", "_____no_output_____" ], [ "#prob_readmit_per_managerial_cluster = min_cost_tahoe_prob_df.loc[:,('managerial_cluster',\"probability_of_readmit\")]", "_____no_output_____" ], [ "#prob_readmit_per_managerial_cluster.head()", "_____no_output_____" ] ], [ [ "## Next, combine this probability or readmit from logistic, with the probability of readmission per cluster from the managerial clusters", "_____no_output_____" ] ], [ [ "combo_df = tahoe_mang_cluster_df.join(prob_readmit_per_managerial_cluster,on='managerial_cluster',lsuffix='m_clust')", "_____no_output_____" ], [ "combo_df.drop('managerial_clusterm_clust',axis=1,inplace=True)\n", "_____no_output_____" ], [ "logistic_model.predict_proba(combo_df.loc[:,('female','flu_season','ed_admit',\n 'age','severity_score','comorbidity_score')])", "_____no_output_____" ], [ "prob_noreadmit, prob_readmit = zip(*logistic_model.predict_proba(combo_df.loc[:,('female','flu_season','ed_admit',\n 'age','severity_score','comorbidity_score')]))", "_____no_output_____" ], [ "combo_df['prob_readmit_logistic']=prob_readmit \ncombo_df['prob_noreadmit_logistic']=prob_noreadmit\ncombo_df['logistic_prediction']=logistic_model.predict(combo_df.loc[:,('female','flu_season','ed_admit',\n 'age','severity_score','comorbidity_score')])\n", "_____no_output_____" ], [ "combo_df.tail()", "_____no_output_____" ] ], [ [ "### Current threshold is at .50, find the number that maximizes the number of true positives and true negatives (and minimizes cost)\n- This will help compensate for the uneven number of people in each case\n", "_____no_output_____" ] ], [ [ "def find_threshold(dataframe_i,caretracker_cost=1200,readmit_cost=8000):\n \"\"\"Find the best threshold that minimizes cost for logistic classification.\n \n The formula is The Cost of readmittance * Number of patients who have be readmitted , compared to\n \n The probability of readmittance given that a patient is using care tracker, \n times the number of patients in that cluster, time the cost of readmittance, plus the cost of care tracker times\n the number of patients in that cluster.\n \n Returnsa list of tuples containing each threshold and the cost of that threshold\n \"\"\"\n dataframe_i = dataframe_i.copy()\n cost_per_threshold =[]\n list_of_thresholds = [i for i in np.linspace(.01,.99,60)]\n for threshold in list_of_thresholds:\n default_threshold = .5\n print(threshold,'current threshold')\n ## check if the probability prediction by logistic is greater than our threshold\n dataframe_i['predicted_outcome'] = dataframe_i.prob_readmit_logistic > threshold\n #dataframe_i['predicted_outcome_logistic'] = dataframe_i.prob_readmit_logistic > deafult_threshold\n expected_current_cost = 0\n ## based upon this threshold, go through each row and determine the cost of that patient\n for idx, row in dataframe_i.iterrows():\n if row['predicted_outcome']==1 and row['readmit30']==1:\n expected_current_cost += caretracker_cost + .6*readmit_cost\n ## caretracker lowers the chance of being readmitted by 40%\n ## our prediction was correct here\n #print(row)\n elif row['predicted_outcome']==1 and row['readmit30']==0:\n ## our algo was wrong\n expected_current_cost += caretracker_cost \n elif row['predicted_outcome']==0 and row['readmit30']==1:\n ### our algo was wrong, false negative\n expected_current_cost +=readmit_cost\n elif row['predicted_outcome']==0 and row['readmit30']==0:\n ## true negative does not add any cost to us\n continue\n cost_per_threshold.append((threshold,expected_current_cost))\n # get the default cost of logistic \n expected_default_cost = 0\n default_threshold = .5\n default_cost_per_threshold = []\n for idx, row in dataframe_i.iterrows():\n if row['logistic_prediction']==1 and row['readmit30']==1:\n expected_default_cost += caretracker_cost + .6*readmit_cost\n ## caretracker lowers the chance of being readmitted by 40%\n ## our prediction was correct here\n #print(row)\n elif row['logistic_prediction']==1 and row['readmit30']==0:\n ## our algo was wrong\n expected_default_cost += caretracker_cost\n elif row['logistic_prediction']==0 and row['readmit30']==1:\n ### our algo was wrong, false negative\n expected_default_cost +=readmit_cost\n elif row['logistic_prediction']==0 and row['readmit30']==0:\n ## true negative does not add any cost to us\n continue\n default_cost_per_threshold.append(([i for i in np.linspace(.01,.99,80)],\\\n [expected_default_cost for _ in range(len(dataframe_i))]) ) \n return cost_per_threshold,default_cost_per_threshold\n \n\n ", "_____no_output_____" ], [ "combo_df.head()", "_____no_output_____" ], [ "best_thresholds, default_threshold = find_threshold(combo_df)", "0.01 current threshold\n0.0266101694915 current threshold\n0.0432203389831 current threshold\n0.0598305084746 current threshold\n0.0764406779661 current threshold\n0.0930508474576 current threshold\n0.109661016949 current threshold\n0.126271186441 current threshold\n0.142881355932 current threshold\n0.159491525424 current threshold\n0.176101694915 current threshold\n0.192711864407 current threshold\n0.209322033898 current threshold\n0.22593220339 current threshold\n0.242542372881 current threshold\n0.259152542373 current threshold\n0.275762711864 current threshold\n0.292372881356 current threshold\n0.308983050847 current threshold\n0.325593220339 current threshold\n0.342203389831 current threshold\n0.358813559322 current threshold\n0.375423728814 current threshold\n0.392033898305 current threshold\n0.408644067797 current threshold\n0.425254237288 current threshold\n0.44186440678 current threshold\n0.458474576271 current threshold\n0.475084745763 current threshold\n0.491694915254 current threshold\n0.508305084746 current threshold\n0.524915254237 current threshold\n0.541525423729 current threshold\n0.55813559322 current threshold\n0.574745762712 current threshold\n0.591355932203 current threshold\n0.607966101695 current threshold\n0.624576271186 current threshold\n0.641186440678 current threshold\n0.657796610169 current threshold\n0.674406779661 current threshold\n0.691016949153 current threshold\n0.707627118644 current threshold\n0.724237288136 current threshold\n0.740847457627 current threshold\n0.757457627119 current threshold\n0.77406779661 current threshold\n0.790677966102 current threshold\n0.807288135593 current threshold\n0.823898305085 current threshold\n0.840508474576 current threshold\n0.857118644068 current threshold\n0.873728813559 current threshold\n0.890338983051 current threshold\n0.906949152542 current threshold\n0.923559322034 current threshold\n0.940169491525 current threshold\n0.956779661017 current threshold\n0.973389830508 current threshold\n0.99 current threshold\n" ], [ "plt.plot(*zip(*best_thresholds))\nf = plt.plot(default_threshold[0][0],[default_threshold[0][1][0] for _ in np.linspace(.01,99,80)])\nplt.ylabel('Total cost MM')\nplt.xlabel('Prob. of readmission threshold')\nplt.title(\"Optimal Threshold vs. Default Treshold - Logistic Classification\");\nf.get_yaxis().set_major_formatter(\n matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))", "_____no_output_____" ], [ "\nmin_threshold, min_cost_optimal_threshold = min(best_thresholds, key=itemgetter(1))", "_____no_output_____" ], [ "min_threshold", "_____no_output_____" ], [ "min_cost_optimal_threshold", "_____no_output_____" ], [ "log_confusion_matrix ", "_____no_output_____" ], [ "top_threshold_matrix ", "_____no_output_____" ], [ "combo_df.head()", "_____no_output_____" ], [ "combo_df['new_threshold(.34)_prediction']=combo_df['prob_readmit_logistic']>.34", "_____no_output_____" ], [ "top_threshold_matrix = confusion_matrix(tahoe_y, combo_df['new_threshold(.34)_prediction'])", "_____no_output_____" ], [ "top_threshold_matrix ", "_____no_output_____" ], [ "log_confusion_matrix", "_____no_output_____" ], [ "sns.heatmap(top_threshold_matrix);\nplt.xlabel(\"predicted\")\nplt.ylabel(\"actual\")\nplt.title('Confusion Matrix with .34 threshold')", "_____no_output_____" ], [ "sns.heatmap(log_confusion_matrix,annot_kws=True);\nplt.xlabel(\"predicted\")\nplt.ylabel(\"actual\")\nplt.title('Confusion matrix with default(.50) threshold')", "_____no_output_____" ] ], [ [ "#### The number of true positives decreased while the number of false negatives increased. Let us explore why\n- Cost of true positive is caretracker_cost (`$1,200`) + $.6*$readmit_cost(`$8,000`)\n- Cost of false negative is readmit_cost(`$8,000`)\n- Therefore, only want to assign people to caretracker if you are VERY sure they will be readmitted. We are willing to accept more flase negatives since they are generally less expensive. \n- Find where the cross over threshold is", "_____no_output_____" ] ], [ [ "1200+.05*8000", "_____no_output_____" ], [ "combo_df.head()", "_____no_output_____" ], [ "combo_df['new_threshold(.69)_prediction']=combo_df['prob_readmit_logistic']>.69\ndefault_cost_logistic = default_threshold[0][1][0]\n", "_____no_output_____" ], [ "default_cost_logistic", "_____no_output_____" ], [ "print(\"The total ACTUAL cost of assigning everyone to caretracker is {:20,.2f}\".format(total_cost_caretracker))\nprint(\"The total ACTUAL cost of assigning noone to caretracker {:20,.2f}\".format(total_cost_readmitt_no_caretracker ))\nprint(\"The total EXPECTED cost of using managerial clusters and assigning to caretracker from there is {:20,.2f}\".format(managerial_min_cost))\n\nprint(\"The EXPECTED cost of using logistic with the default threshold {:20,.2f}\".format(default_cost_logistic))\nprint(\"The EXPECTED cost of using logistic with the optimal (.34) threshold {:20,.2f}\".format(min_cost_optimal_threshold ))\nprint(\"The savings of using the optimal logistic model is {:20,.2f}\".format(total_cost_readmitt_no_caretracker-min_cost_optimal_threshold))", "The total ACTUAL cost of assigning everyone to caretracker is 10,048,800.00\nThe total ACTUAL cost of assigning noone to caretracker 7,984,000.00\nThe total EXPECTED cost of using managerial clusters and assigning to caretracker from there is 7,557,600.00\nThe EXPECTED cost of using logistic with the default threshold 7,550,000.00\nThe EXPECTED cost of using logistic with the optimal (.34) threshold 7,492,400.00\nThe savings of using the optimal logistic model is 491,600.00\n" ] ], [ [ "- Plot all of the costs against eachother", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10,5))\nfig = sns.barplot(x=['everyone caretracker','no one caretracker','managerial decision rule',\\\n 'default logistic','optimal logistic'],\\\n y=[total_cost_caretracker,total_cost_readmitt_no_caretracker,managerial_min_cost, \n default_cost_logistic,min_cost_optimal_threshold ],ci='.9')\nfig.get_yaxis().set_major_formatter(\n matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))\nplt.title('Cost Comparison')\nplt.ylabel(\"Dollars\")\n", "_____no_output_____" ], [ "# Error range for optimal logistic given that accuracy is only 80%\n\"{:20,.2f} -{:20,.2f} \".format(min_cost_optimal_threshold*.2+min_cost_optimal_threshold, \\\n min_cost_optimal_threshold-min_cost_optimal_threshold*.2,)", "_____no_output_____" ] ], [ [ "### Finally, compre the number of people enrolled in caretracker across policies", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10,5))\nsns.barplot(x=['everyone caretracker','managerial decision rule',\\\n 'default logistic','optimal logistic'],\n y=[4382,850,465,937])\nplt.title('Caretracker Enrollment across Policies')\nplt.ylabel('Number of patients')", "_____no_output_____" ] ], [ [ "### Recommendation\n- Even though the 'optimal' threshold for logistic regression will save 8k over the default logistic regression, it is too sensitive to cluster probabilities\n - Given that we have a very small sample size, these probabilities are bound to change\n- The recommendation is to use the default logistic regression, which assigns 310 people to caretracker (vs. 210 for the optimal logistic)\n - Still have savings of $.48 MM", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a40cee69ef49a772ebed84db41fe027947cdbc1
12,185
ipynb
Jupyter Notebook
rep/2-more-about-functions.ipynb
vafaei-ar/medical_datascience
1fc9982e21799766de7f45adf0bd905d9a0466d3
[ "MIT" ]
null
null
null
rep/2-more-about-functions.ipynb
vafaei-ar/medical_datascience
1fc9982e21799766de7f45adf0bd905d9a0466d3
[ "MIT" ]
null
null
null
rep/2-more-about-functions.ipynb
vafaei-ar/medical_datascience
1fc9982e21799766de7f45adf0bd905d9a0466d3
[ "MIT" ]
null
null
null
19.590032
857
0.455068
[ [ [ "### Recursive functions", "_____no_output_____" ] ], [ [ "def sigma(k):\n if(k>0):\n r = k+sigma(k-1)\n# print(r)\n else:\n r = 0\n return r\n\nsigma(3)", "_____no_output_____" ], [ "def factorial(k):\n if(k>1):\n r = k*factorial(k-1)\n# print(r)\n else:\n r = 1\n return r\n\nfactorial(3)", "_____no_output_____" ] ], [ [ "### \\*args and \\**kwargs", "_____no_output_____" ] ], [ [ "def add(x,y):\n return x+y\nadd(1,2)", "_____no_output_____" ], [ "def add(*numbers):\n n = 0\n for i in numbers:\n n += i\n return n\nadd(1,2,3,5,11)", "_____no_output_____" ], [ "def my_function(*args):\n for i in args:\n print(i)\n\nmy_function(\"Emil\", \"Tobias\", \"Linus\")", "Emil\nTobias\nLinus\n" ], [ "def my_function(*args):\n for i in args:\n print(i)\n\nmy_function([\"Emil\", \"Tobias\"], \"Linus\") ", "['Emil', 'Tobias']\nLinus\n" ], [ "def my_function(name,*args):\n for i in args:\n if name==i:\n print(i)\n break\n\nmy_function(\"Emil\", \"Tobias\", \"Linus\", \"Emil\")", "Emil\n" ], [ "def math_operate(mode,*numbers):\n if mode=='add':\n n = 0\n for i in numbers:\n n += i\n return n\n elif mode=='mul':\n n = 1\n for i in numbers:\n n *= i \n return n\n else:\n print('entered more is not recognized!')\n \nmath_operate('mul',1,2,3,5,11)", "_____no_output_____" ], [ "def bar(**kwargs):\n for a in kwargs:\n print(a, kwargs[a])\n\nbar(name='one', age=27)", "name one\nage 27\n" ] ], [ [ "### lambda functions", "_____no_output_____" ] ], [ [ "lambda x:x+1", "_____no_output_____" ], [ "(lambda x:x+1)(1)", "_____no_output_____" ], [ "fun1 = lambda x:x+1\nfun1(1)", "_____no_output_____" ], [ "x = lambda a : a + 10\nprint(x(5))", "15\n" ], [ "x = lambda a, b : a * b\nprint(x(5, 6))", "30\n" ] ], [ [ "### functions as objects", "_____no_output_____" ] ], [ [ "def add(x=1,y=2):\n return x+y\n\ndef mul(x=1,y=2):\n return x*y\n\ndef myshow(fun,x,y):\n print('This function is ran using myshow',fun(x,y))", "_____no_output_____" ], [ "myshow(add,1,2)", "This function is ran using myshow 3\n" ], [ "myshow(mul,1,2)", "This function is ran using myshow 2\n" ], [ " myshow(lambda x,y:x**y,3,2)", "This function is ran using myshow 9\n" ], [ "def general_times_maker(n):\n return lambda a : a * n\n\ntimes2 = general_times_maker(2)\ntimes3 = general_times_maker(3)", "_____no_output_____" ], [ "times2", "_____no_output_____" ], [ "print(times2(11))\nprint(times3(11)) ", "22\n33\n" ], [ "myfunc(2)(11)", "_____no_output_____" ] ], [ [ "### function scope", "_____no_output_____" ] ], [ [ "def testing_scope1():\n z = 1\n print(z)\n \nz = 10\ntesting_scope1()\nprint(z)", "1\n10\n" ], [ "def testing_scope2():\n print(z)\nz = 10\ntesting_scope2()", "10\n" ], [ "def testing_scope2():\n print(z)\n z = 1\nz = 10\ntesting_scope2()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
4a40ea5d51d3bdd956da3f4974b23dec05e29618
1,973
ipynb
Jupyter Notebook
draft_notebooks/wikidown.ipynb
ayarov/SubjectMatterExpertise
d7e5cbfa42c2bc92bf528e213d361c209e741f1b
[ "MIT" ]
null
null
null
draft_notebooks/wikidown.ipynb
ayarov/SubjectMatterExpertise
d7e5cbfa42c2bc92bf528e213d361c209e741f1b
[ "MIT" ]
null
null
null
draft_notebooks/wikidown.ipynb
ayarov/SubjectMatterExpertise
d7e5cbfa42c2bc92bf528e213d361c209e741f1b
[ "MIT" ]
null
null
null
23.211765
86
0.534719
[ [ [ "import re\nfrom urllib import request, parse\nfrom urllib.parse import urljoin\nfrom bs4 import BeautifulSoup", "_____no_output_____" ], [ "# # dump_date = '20180820'\n# dump_url = r'https://dumps.wikimedia.org/enwiki/latest'\n# dump_html_url = urljoin(dump_url, dump_date)\n# print(dump_html_url)\n# print(urljoin(dump_html_url, 'a'))", "_____no_output_____" ], [ "dump_url = r'https://dumps.wikimedia.org/enwiki/latest'\nwith request.urlopen(dump_url) as response:\n soup = BeautifulSoup(response.read(), 'html.parser')", "_____no_output_____" ], [ "with open(r'D:\\data\\enwiki\\latest\\stub-meta-history.txt', mode='a') as f:\n match = soup.find_all('a', {'href': re.compile(r'stub-meta-history')})\n if len(match) > 0:\n for m in match:\n if str(m.getText()).endswith('gz-rss.xml'):\n continue\n f.write(parse.urljoin(dump_url + '/', m.getText()))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
4a40ee0e2c6210a1e684421ecc9dea1e5094d8eb
5,345
ipynb
Jupyter Notebook
portopt/notebooks/exploratory/1.0-jujbates-S&P500-PO_problem_identification.ipynb
jujbates/Portfolio-Optimization
405be097662e19263854a88432e58d64c59df51c
[ "MIT" ]
null
null
null
portopt/notebooks/exploratory/1.0-jujbates-S&P500-PO_problem_identification.ipynb
jujbates/Portfolio-Optimization
405be097662e19263854a88432e58d64c59df51c
[ "MIT" ]
null
null
null
portopt/notebooks/exploratory/1.0-jujbates-S&P500-PO_problem_identification.ipynb
jujbates/Portfolio-Optimization
405be097662e19263854a88432e58d64c59df51c
[ "MIT" ]
null
null
null
46.885965
522
0.653321
[ [ [ "\n# **The Data Science Method** \n\n\n1. [**Problem Identification**](https://medium.com/@aiden.dataminer/the-data-science-method-problem-identification-6ffcda1e5152)\n\n2. [Data Wrangling](https://medium.com/@aiden.dataminer/the-data-science-method-dsm-data-collection-organization-and-definitions-d19b6ff141c4) \n * Data Organization - Using cookiecutter template with some modifications to start.\n * Data Collection - Collected data from wikipedia and yahoo finance price dataset. The wikipedia showed us the current S&P 500 companies and then used their ticker symbols to query yahoo finance adj. close prices.\n - Load the S&P 500 tickers from wikipedia page\n - Get S&P 500 Index (^GSPC) as a Bench Mark\n - Use S&P Symbols to Get Adj Close from Yahoo Finance\n * Data Cleaning - The S&P 500 data from yahoo finance price is almost clean and ready for analysis use. Need to remove tickers that IPO or die mid year, creating usable Nan values.\n * Basic Data Visualizations \n * Data Definition \n\n \n3. [Exploratory Data Analysis](https://medium.com/@aiden.dataminer/the-data-science-method-dsm-exploratory-data-analysis-bc84d4d8d3f9)\n * Build data profile tables and plots\n - Cumulative Return\n - Annualized Return\n - Daily Return\n - Mean Daily Return\n - Standard Deviation Daily Return\n - Simple Moving Average\n - Exponential Moving Average\n - Moving Average Convergence Divergence\n - Adj. Close & Daily Return Covariance\n - Adj. Close & Daily Return Correlation\n - Sharpe Ratio\n - Skew \n - Kurtosis\n * Explore data relationships\n * Identification and creation of features \n\n4. [Pre-processing and Training Data Development](https://medium.com/@aiden.dataminer/the-data-science-method-dsm-pre-processing-and-training-data-development-fd2d75182967)\n * Create dummy or indicator features for categorical variables\n * Standardize the magnitude of numeric features\n * Split into testing and training datasets\n * Apply scalar to the testing set\n5. [Modeling](https://medium.com/@aiden.dataminer/the-data-science-method-dsm-modeling-56b4233cad1b)\n * Create dummy or indicator features for categorical variable\n * Fit Models with Training Data Set\n * Review Model Outcomes — Iterate over additional models as needed.\n * Identify the Final Model\n\n6. [Documentation](https://medium.com/@aiden.dataminer/the-data-science-method-dsm-documentation-c92c28bd45e6)\n\n * Review the Results\n * Present and share your findings - storytelling\n * Finalize Code \n * Finalize Documentation\n\n", "_____no_output_____" ], [ "# Problem Identification", "_____no_output_____" ], [ "The world of financial asset management is difficult to understand. It takes time and money to research and analyze assets. Some of the most interesting and high profile assets are assets in the form of securities. In the United States, financial securities are simply tradable assets such as debts, equities, and derivatives. Nowadays, the financial asset that most U.S. citizens think of is equities and more specifically common stocks. They have begun to believe it is the best indicator of economic growth. \n\nSure you can buckle down and evaluate stock after stock, company after company, CEO after CEO, quarter after quarter. But is there an easier way for common folks without the financial know how or even just time. ETFs, Exchange-Traded Funds are portfolios that are traded on stock exchanges just like any other stock. ETF portfolios can contain assets such as stocks, bonds, currencies, and/or commodities. This makes it much easier for an everyday investor to invest with lower risk and little to no supervision. \n\nThe most common ETFs follow the S&P 500 index (^GSPC) like The Vanguard Group (VOO), iShares (IVV), and State Street Corporation (SPY). The S&P is a capitalization-weighted index, which means it indexes the equities by allocating with capitalization, the stock’s share price multiplied by the number of outstanding shares.\n", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
4a40f84e813aafc8e2fd32436242a21d68034e1e
47,757
ipynb
Jupyter Notebook
sagemaker/tf-deploy/1.mnist_train.ipynb
gonsoomoon-ml/aws-ai-ml-workshop-kr
cf5653364d4f24066b7ad371bd0f7230758ab49c
[ "MIT-0" ]
null
null
null
sagemaker/tf-deploy/1.mnist_train.ipynb
gonsoomoon-ml/aws-ai-ml-workshop-kr
cf5653364d4f24066b7ad371bd0f7230758ab49c
[ "MIT-0" ]
null
null
null
sagemaker/tf-deploy/1.mnist_train.ipynb
gonsoomoon-ml/aws-ai-ml-workshop-kr
cf5653364d4f24066b7ad371bd0f7230758ab49c
[ "MIT-0" ]
null
null
null
59.621723
1,430
0.629876
[ [ [ "# SageMaker Tensorflow를 이용한 MNIST 학습\n\nMNIST는 필기 숫자 분류하는 문제로 이미지 처리의 테스트용으로 널리 사용되는 데이터 세트입니다. 28x28 픽셀 그레이스케일로 70,000개의 손으로 쓴 숫자 이미지가 레이블과 함께 구성됩니다. 데이터 세트는 60,000개의 훈련 이미지와 10,000개의 테스트 이미지로 분할됩니다. 0~9까지 10개의 클래스가 있습니다. 이 튜토리얼은 SageMaker에서 Tensorflow V2를 이용하여 MNIST 분류 모델을 훈련하는 방법을 보여줍니다.\n", "_____no_output_____" ] ], [ [ "import sagemaker \nsagemaker.__version__", "_____no_output_____" ], [ "import os\nimport json\n\nimport sagemaker\nfrom sagemaker.tensorflow import TensorFlow\nfrom sagemaker import get_execution_role\n\nsess = sagemaker.Session()\n\nrole = get_execution_role()\n\noutput_path='s3://' + sess.default_bucket() + '/tensorflow/mnist'", "_____no_output_____" ] ], [ [ "## TensorFlow Estimator\n\nTensorflow 클래스를 사용하면 SageMaker의 컨테이너 환경에서 학습 스크립트를 실행할 수 있습니다. \n다음 파라미터 설정을 통해 환경을 셋업합니다.\n\n\n- entry_point: 트레이닝 컨테이너에서 신경망 학습을 위해 사용하는 사용자 정의 파이썬 파일. 다음 섹션에서 다시 논의됩니다.\n- role: AWS 자원에 접근하기 위한 IAM 역할(role) \n- instance_type: 스크립트를 실행하는 SAGEMAKER 인스턴스 유형. 본 노트북을 실행하기 위해 사용중인 SageMaker 인스턴스에서 훈련 작업을 실행하려면`local`로 설정하십시오.\n- model_dir: 학습중에 체크 포인트 데이터와 모델을 내보내는 S3 Bucket URI. (default : None). 이 매개변수가 스크립트에 전달되는 것을 막으려면 `model_dir`=False 로 설정하 수 있습니다.\n- instance count: 학습작업이 실행될 인스턴스의 갯수. 분산 학습을 위해서는 1 이상의 값이 필요합니다. \n- output_path: 학습의 결과물 (모델 아티팩트와 out 파일)을 내보내는 S3 Bucket URI. \n- framework_version: 사용하는 프레임워크의 버전\n- py_version: 파이썬 버전\n\n보다 자세한 내용은 [the API reference](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html#sagemaker.estimator.EstimatorBase)를 참조합니다.\n\n", "_____no_output_____" ], [ "## 학습을 위한 entry point 스트립트 작성\n\n`entrypoint`를 통해 Tensorflow 모델을 학습하기 위한 Python 코드를 Enstimator (Tensroflow 클래스)에 제공합니다. \n\nSageMaker Tensorflow Estimator는 AWS의 관리환경으로 Tensorflow 실행환경이 저장된 도커 이미지를 가져올 것입니다. Estimator 클래스를 초기화할 때 사용한 파라미터 설정에 따라 스크립트를 실행합니다. \n\n실행되는 훈련 스크립트는 Amazon SageMaker 외부에서 실행될 수있는 훈련 스크립트와 매우 유사하지만 교육 이미지에서 제공하는 환경 변수에 액세스 하는 설정 등이 추가될 수 있습니다. 사용가능한 환경변수의 리스트를 확인하려면 다음 리소스 [the short list of environment variables provided by the SageMaker service](https://sagemaker.readthedocs.io/en/stable/frameworks/mxnet/using_mxnet.html?highlight=entry%20point)를 참고하십시오. 환경변수의 풀셋은 다음 링크 [the complete list of environment variables](https://github.com/aws/sagemaker-training-toolkit/blob/master/ENVIRONMENT_VARIABLES.md)에서 확인할 수 있습니다.\n\n본 예제에서는 `code/train.py` 스크립트를 사용합니다.\n", "_____no_output_____" ] ], [ [ "!pygmentize 'code/train.py'", "\u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36m__future__\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m print_function\n\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36margparse\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mlogging\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mos\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mjson\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mgzip\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mnumpy\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mnp\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtraceback\u001b[39;49;00m\n\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtensorflow\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mtf\u001b[39;49;00m\n\u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36mtensorflow\u001b[39;49;00m\u001b[04m\u001b[36m.\u001b[39;49;00m\u001b[04m\u001b[36mkeras\u001b[39;49;00m\u001b[04m\u001b[36m.\u001b[39;49;00m\u001b[04m\u001b[36mlayers\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m Dense, Flatten, Conv2D\n\u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36mtensorflow\u001b[39;49;00m\u001b[04m\u001b[36m.\u001b[39;49;00m\u001b[04m\u001b[36mkeras\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m Model\n\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\u001b[37m# Define the model object\u001b[39;49;00m\n\n\u001b[34mclass\u001b[39;49;00m \u001b[04m\u001b[32mSmallConv\u001b[39;49;00m(Model):\n \u001b[34mdef\u001b[39;49;00m \u001b[32m__init__\u001b[39;49;00m(\u001b[36mself\u001b[39;49;00m):\n \u001b[36msuper\u001b[39;49;00m(SmallConv, \u001b[36mself\u001b[39;49;00m).\u001b[32m__init__\u001b[39;49;00m()\n \u001b[36mself\u001b[39;49;00m.conv1 = Conv2D(\u001b[34m32\u001b[39;49;00m, \u001b[34m3\u001b[39;49;00m, activation=\u001b[33m'\u001b[39;49;00m\u001b[33mrelu\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n \u001b[36mself\u001b[39;49;00m.flatten = Flatten()\n \u001b[36mself\u001b[39;49;00m.d1 = Dense(\u001b[34m128\u001b[39;49;00m, activation=\u001b[33m'\u001b[39;49;00m\u001b[33mrelu\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n \u001b[36mself\u001b[39;49;00m.d2 = Dense(\u001b[34m10\u001b[39;49;00m)\n \n \u001b[34mdef\u001b[39;49;00m \u001b[32mcall\u001b[39;49;00m(\u001b[36mself\u001b[39;49;00m, x):\n x = \u001b[36mself\u001b[39;49;00m.conv1(x)\n x = \u001b[36mself\u001b[39;49;00m.flatten(x)\n x = \u001b[36mself\u001b[39;49;00m.d1(x)\n \u001b[34mreturn\u001b[39;49;00m \u001b[36mself\u001b[39;49;00m.d2(x)\n\n\n\u001b[37m# Decode and preprocess data\u001b[39;49;00m\n\u001b[34mdef\u001b[39;49;00m \u001b[32mconvert_to_numpy\u001b[39;49;00m(data_dir, images_file, labels_file):\n \u001b[33m\"\"\"Byte string to numpy arrays\"\"\"\u001b[39;49;00m\n \u001b[34mwith\u001b[39;49;00m gzip.open(os.path.join(data_dir, images_file), \u001b[33m'\u001b[39;49;00m\u001b[33mrb\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m) \u001b[34mas\u001b[39;49;00m f:\n images = np.frombuffer(f.read(), np.uint8, offset=\u001b[34m16\u001b[39;49;00m).reshape(-\u001b[34m1\u001b[39;49;00m, \u001b[34m28\u001b[39;49;00m, \u001b[34m28\u001b[39;49;00m)\n \n \u001b[34mwith\u001b[39;49;00m gzip.open(os.path.join(data_dir, labels_file), \u001b[33m'\u001b[39;49;00m\u001b[33mrb\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m) \u001b[34mas\u001b[39;49;00m f:\n labels = np.frombuffer(f.read(), np.uint8, offset=\u001b[34m8\u001b[39;49;00m)\n\n \u001b[34mreturn\u001b[39;49;00m (images, labels)\n\n\u001b[34mdef\u001b[39;49;00m \u001b[32mmnist_to_numpy\u001b[39;49;00m(data_dir, train):\n \u001b[33m\"\"\"Load raw MNIST data into numpy array\u001b[39;49;00m\n\u001b[33m \u001b[39;49;00m\n\u001b[33m Args:\u001b[39;49;00m\n\u001b[33m data_dir (str): directory of MNIST raw data. \u001b[39;49;00m\n\u001b[33m This argument can be accessed via SM_CHANNEL_TRAINING\u001b[39;49;00m\n\u001b[33m \u001b[39;49;00m\n\u001b[33m train (bool): use training data\u001b[39;49;00m\n\u001b[33m\u001b[39;49;00m\n\u001b[33m Returns:\u001b[39;49;00m\n\u001b[33m tuple of images and labels as numpy array\u001b[39;49;00m\n\u001b[33m \"\"\"\u001b[39;49;00m\n\n \u001b[34mif\u001b[39;49;00m train:\n images_file = \u001b[33m\"\u001b[39;49;00m\u001b[33mtrain-images-idx3-ubyte.gz\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\n labels_file = \u001b[33m\"\u001b[39;49;00m\u001b[33mtrain-labels-idx1-ubyte.gz\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\n \u001b[34melse\u001b[39;49;00m:\n images_file = \u001b[33m\"\u001b[39;49;00m\u001b[33mt10k-images-idx3-ubyte.gz\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\n labels_file = \u001b[33m\"\u001b[39;49;00m\u001b[33mt10k-labels-idx1-ubyte.gz\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\n\n \u001b[34mreturn\u001b[39;49;00m convert_to_numpy(data_dir, images_file, labels_file)\n\n\n\u001b[34mdef\u001b[39;49;00m \u001b[32mnormalize\u001b[39;49;00m(x, axis):\n eps = np.finfo(\u001b[36mfloat\u001b[39;49;00m).eps\n\n mean = np.mean(x, axis=axis, keepdims=\u001b[34mTrue\u001b[39;49;00m)\n \u001b[37m# avoid division by zero\u001b[39;49;00m\n std = np.std(x, axis=axis, keepdims=\u001b[34mTrue\u001b[39;49;00m) + eps\n \u001b[34mreturn\u001b[39;49;00m (x - mean) / std\n\n\u001b[37m# Training logic\u001b[39;49;00m\n\n\u001b[34mdef\u001b[39;49;00m \u001b[32mtrain\u001b[39;49;00m(args):\n \u001b[37m# create data loader from the train / test channels\u001b[39;49;00m\n x_train, y_train = mnist_to_numpy(data_dir=args.train, train=\u001b[34mTrue\u001b[39;49;00m)\n x_test, y_test = mnist_to_numpy(data_dir=args.test, train=\u001b[34mFalse\u001b[39;49;00m)\n\n x_train, x_test = x_train.astype(np.float32), x_test.astype(np.float32)\n\n \u001b[37m# normalize the inputs to mean 0 and std 1\u001b[39;49;00m\n x_train, x_test = normalize(x_train, (\u001b[34m1\u001b[39;49;00m, \u001b[34m2\u001b[39;49;00m)), normalize(x_test, (\u001b[34m1\u001b[39;49;00m, \u001b[34m2\u001b[39;49;00m))\n\n \u001b[37m# expand channel axis\u001b[39;49;00m\n \u001b[37m# tf uses depth minor convention\u001b[39;49;00m\n x_train, x_test = np.expand_dims(x_train, axis=\u001b[34m3\u001b[39;49;00m), np.expand_dims(x_test, axis=\u001b[34m3\u001b[39;49;00m)\n \n \u001b[37m# normalize the data to mean 0 and std 1\u001b[39;49;00m\n train_loader = tf.data.Dataset.from_tensor_slices(\n (x_train, y_train)).shuffle(\u001b[36mlen\u001b[39;49;00m(x_train)).batch(args.batch_size)\n\n test_loader = tf.data.Dataset.from_tensor_slices(\n (x_test, y_test)).batch(args.batch_size)\n\n model = SmallConv()\n model.compile()\n loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=\u001b[34mTrue\u001b[39;49;00m)\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=args.learning_rate, \n beta_1=args.beta_1,\n beta_2=args.beta_2\n )\n\n\n train_loss = tf.keras.metrics.Mean(name=\u001b[33m'\u001b[39;49;00m\u001b[33mtrain_loss\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name=\u001b[33m'\u001b[39;49;00m\u001b[33mtrain_accuracy\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n\n test_loss = tf.keras.metrics.Mean(name=\u001b[33m'\u001b[39;49;00m\u001b[33mtest_loss\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name=\u001b[33m'\u001b[39;49;00m\u001b[33mtest_accuracy\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n\n\n \u001b[90m@tf\u001b[39;49;00m.function\n \u001b[34mdef\u001b[39;49;00m \u001b[32mtrain_step\u001b[39;49;00m(images, labels):\n \u001b[34mwith\u001b[39;49;00m tf.GradientTape() \u001b[34mas\u001b[39;49;00m tape:\n predictions = model(images, training=\u001b[34mTrue\u001b[39;49;00m)\n loss = loss_fn(labels, predictions)\n grad = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(\u001b[36mzip\u001b[39;49;00m(grad, model.trainable_variables))\n \n train_loss(loss)\n train_accuracy(labels, predictions)\n \u001b[34mreturn\u001b[39;49;00m \n \n \u001b[90m@tf\u001b[39;49;00m.function\n \u001b[34mdef\u001b[39;49;00m \u001b[32mtest_step\u001b[39;49;00m(images, labels):\n predictions = model(images, training=\u001b[34mFalse\u001b[39;49;00m)\n t_loss = loss_fn(labels, predictions)\n test_loss(t_loss)\n test_accuracy(labels, predictions)\n \u001b[34mreturn\u001b[39;49;00m\n \n \u001b[36mprint\u001b[39;49;00m(\u001b[33m\"\u001b[39;49;00m\u001b[33mTraining starts ...\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\n \u001b[34mfor\u001b[39;49;00m epoch \u001b[35min\u001b[39;49;00m \u001b[36mrange\u001b[39;49;00m(args.epochs):\n train_loss.reset_states()\n train_accuracy.reset_states()\n test_loss.reset_states()\n test_accuracy.reset_states()\n \n \u001b[34mfor\u001b[39;49;00m batch, (images, labels) \u001b[35min\u001b[39;49;00m \u001b[36menumerate\u001b[39;49;00m(train_loader):\n train_step(images, labels)\n \n \u001b[34mfor\u001b[39;49;00m images, labels \u001b[35min\u001b[39;49;00m test_loader:\n test_step(images, labels)\n \n \u001b[36mprint\u001b[39;49;00m(\n \u001b[33mf\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m\u001b[33mEpoch \u001b[39;49;00m\u001b[33m{\u001b[39;49;00mepoch + \u001b[34m1\u001b[39;49;00m\u001b[33m}\u001b[39;49;00m\u001b[33m, \u001b[39;49;00m\u001b[33m'\u001b[39;49;00m\n \u001b[33mf\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m\u001b[33mLoss: \u001b[39;49;00m\u001b[33m{\u001b[39;49;00mtrain_loss.result()\u001b[33m}\u001b[39;49;00m\u001b[33m, \u001b[39;49;00m\u001b[33m'\u001b[39;49;00m\n \u001b[33mf\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m\u001b[33mAccuracy: \u001b[39;49;00m\u001b[33m{\u001b[39;49;00mtrain_accuracy.result() * \u001b[34m100\u001b[39;49;00m\u001b[33m}\u001b[39;49;00m\u001b[33m, \u001b[39;49;00m\u001b[33m'\u001b[39;49;00m\n \u001b[33mf\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m\u001b[33mTest Loss: \u001b[39;49;00m\u001b[33m{\u001b[39;49;00mtest_loss.result()\u001b[33m}\u001b[39;49;00m\u001b[33m, \u001b[39;49;00m\u001b[33m'\u001b[39;49;00m\n \u001b[33mf\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m\u001b[33mTest Accuracy: \u001b[39;49;00m\u001b[33m{\u001b[39;49;00mtest_accuracy.result() * \u001b[34m100\u001b[39;49;00m\u001b[33m}\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m\n )\n\n \u001b[37m# Save the model\u001b[39;49;00m\n \u001b[37m# A version number is needed for the serving container\u001b[39;49;00m\n \u001b[37m# to load the model\u001b[39;49;00m\n version = \u001b[33m'\u001b[39;49;00m\u001b[33m00000000\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m\n ckpt_dir = os.path.join(args.model_dir, version)\n \u001b[34mif\u001b[39;49;00m \u001b[35mnot\u001b[39;49;00m os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\n model.save(ckpt_dir)\n \u001b[34mreturn\u001b[39;49;00m\n\n\n\u001b[34mdef\u001b[39;49;00m \u001b[32mparse_args\u001b[39;49;00m():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--batch-size\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mint\u001b[39;49;00m, default=\u001b[34m32\u001b[39;49;00m)\n parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--epochs\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mint\u001b[39;49;00m, default=\u001b[34m1\u001b[39;49;00m)\n parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--learning-rate\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mfloat\u001b[39;49;00m, default=\u001b[34m1e-3\u001b[39;49;00m)\n parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--beta_1\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mfloat\u001b[39;49;00m, default=\u001b[34m0.9\u001b[39;49;00m)\n parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--beta_2\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mfloat\u001b[39;49;00m, default=\u001b[34m0.999\u001b[39;49;00m)\n \n \u001b[37m# Environment variables given by the training image\u001b[39;49;00m\n parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--model-dir\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mstr\u001b[39;49;00m, default=os.environ[\u001b[33m'\u001b[39;49;00m\u001b[33mSM_MODEL_DIR\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m])\n parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--train\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mstr\u001b[39;49;00m, default=os.environ[\u001b[33m'\u001b[39;49;00m\u001b[33mSM_CHANNEL_TRAINING\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m])\n parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--test\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mstr\u001b[39;49;00m, default=os.environ[\u001b[33m'\u001b[39;49;00m\u001b[33mSM_CHANNEL_TESTING\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m])\n\n parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--current-host\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mstr\u001b[39;49;00m, default=os.environ[\u001b[33m'\u001b[39;49;00m\u001b[33mSM_CURRENT_HOST\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m])\n parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--hosts\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mlist\u001b[39;49;00m, default=json.loads(os.environ[\u001b[33m'\u001b[39;49;00m\u001b[33mSM_HOSTS\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m]))\n\n \u001b[34mreturn\u001b[39;49;00m parser.parse_args()\n\n\n\n\u001b[34mif\u001b[39;49;00m \u001b[31m__name__\u001b[39;49;00m == \u001b[33m'\u001b[39;49;00m\u001b[33m__main__\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m:\n args = parse_args()\n train(args)\n" ] ], [ [ "### 하이퍼파리미터 설정\n\n추가로, Tensorflow Estimator는 명령라인 매개변수로 학습작업에서 사용할 하이퍼파라미터를 전달합니다.\n\n<span style=\"color:red\"> Note: SageMaker Studio 에서는 local mode가 지원되지 않습니다. </span>", "_____no_output_____" ] ], [ [ "# set local_mode if you want to run the training script on the machine that runs this notebook\n\ninstance_type='ml.c4.xlarge'\n \nest = TensorFlow(\n entry_point='train.py',\n source_dir='code', # directory of your training script\n role=role,\n framework_version='2.3.1',\n model_dir=False, # don't pass --model_dir to your training script\n py_version='py37',\n instance_type=instance_type,\n instance_count=1,\n output_path=output_path,\n hyperparameters={\n 'batch-size':512,\n 'epochs':10,\n 'learning-rate': 1e-3,\n 'beta_1' : 0.9,\n 'beta_2' : 0.999\n \n }\n)\n", "_____no_output_____" ] ], [ [ "학습 컨테이너는 아래와 같은 방식으로 하이퍼파라미터를 전달하고 스크립트를 실행할것입니다. \n\n```\npython train.py --batch-size 32 --epochs 10 --learning-rate 0.001\n --beta_1 0.9 --beta_2 0.999\n```", "_____no_output_____" ], [ "## 학습 & 테스트 데이터 채널 지정 \n\nTensorflow Estimator에게 학습 및 테스트 데이터셋을 찾을 수있는 위치를 알려야합니다. S3 버킷에 대한 링크 또는 로컬 모드를 사용하는 경우 로컬 파일 시스템의 경로가 될 수 있습니다. 이 예에서는 공용 S3 버킷에서 MNIST 데이터를 다운로드하고 기본 버킷에 업로드합니다.", "_____no_output_____" ] ], [ [ "import logging\nimport boto3\nfrom botocore.exceptions import ClientError\n# Download training and testing data from a public S3 bucket\n\ndef download_from_s3(data_dir='/tmp/data', train=True):\n \"\"\"Download MNIST dataset and convert it to numpy array\n \n Args:\n data_dir (str): directory to save the data\n train (bool): download training set\n \n Returns:\n None\n \"\"\"\n \n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n \n if train:\n images_file = \"train-images-idx3-ubyte.gz\"\n labels_file = \"train-labels-idx1-ubyte.gz\"\n else:\n images_file = \"t10k-images-idx3-ubyte.gz\"\n labels_file = \"t10k-labels-idx1-ubyte.gz\"\n \n with open('code/config.json', 'r') as f:\n config = json.load(f)\n\n # download objects\n s3 = boto3.client('s3')\n bucket = config['public_bucket']\n for obj in [images_file, labels_file]:\n key = os.path.join(\"datasets/image/MNIST\", obj)\n dest = os.path.join(data_dir, obj)\n if not os.path.exists(dest):\n s3.download_file(bucket, key, dest)\n return\n\n\ndownload_from_s3('/tmp/data', True)\ndownload_from_s3('/tmp/data', False)\n", "_____no_output_____" ], [ "# upload to the default bucket\n\nprefix = 'mnist'\nbucket = sess.default_bucket()\nloc = sess.upload_data(path='/tmp/data', bucket=bucket, key_prefix=prefix)\n\nchannels = {\n \"training\": loc,\n \"testing\": loc\n}\n", "_____no_output_____" ] ], [ [ "\n학습 실행시 `channels` 딕셔너리는 컨테이너 내에 `SM_CHANNEL_<key name>` 형태의 환경 변수를 만듭니다.\n\n본 사례에서는 `SM_CHANNEL_TRAINING`과 `SM_CHANNEL_TESTING` 이라는 이름으로 생성될 것입니다. `code/train.py` 에서 해당 값을 어떻게 참조하는지 살펴보십시오. 보다 자세한 내용은 [SM_CHANNEL_{channel_name}](https://github.com/aws/sagemaker-training-toolkit/blob/master/ENVIRONMENT_VARIABLES.md#sm_channel_channel_name)를 참조합니다.\n\n필요시 다음과 같이 검증 채널을 추가할 수 있습니다.\n```\nchannels = {\n 'training': train_data_loc,\n 'validation': val_data_loc,\n 'test': test_data_loc\n }\n```\n위 코드에 의해서는 다음 채널이 스크립트에서 사용가능하게 될 것입니다. \n`SM_CHANNEL_VALIDATION`.", "_____no_output_____" ], [ "## SageMaker 학습작업 실행\n\n이제 훈련 컨테이너에는 교육용 스크립트를 실행할 수 있습니다. fit 명령을 호출하여 컨테이너를 시작할 수 있습니다\n", "_____no_output_____" ] ], [ [ "est.fit(inputs=channels)", "2021-07-16 13:52:08 Starting - Starting the training job...\n2021-07-16 13:52:32 Starting - Launching requested ML instancesProfilerReport-1626443528: InProgress\n...\n2021-07-16 13:53:07 Starting - Preparing the instances for training.........\n2021-07-16 13:54:33 Downloading - Downloading input data...\n2021-07-16 13:54:57 Training - Downloading the training image..\u001b[34m2021-07-16 13:55:19.238815: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:460] Initializing the SageMaker Profiler.\u001b[0m\n\u001b[34m2021-07-16 13:55:19.249118: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:105] SageMaker Profiler is not enabled. The timeline writer thread will not be started, future recorded events will be dropped.\u001b[0m\n\u001b[34m2021-07-16 13:55:19.630437: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:460] Initializing the SageMaker Profiler.\u001b[0m\n\u001b[34m2021-07-16 13:55:23,655 sagemaker-training-toolkit INFO Imported framework sagemaker_tensorflow_container.training\u001b[0m\n\u001b[34m2021-07-16 13:55:23,663 sagemaker-training-toolkit INFO No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34m2021-07-16 13:55:24,157 sagemaker-training-toolkit INFO No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34m2021-07-16 13:55:24,172 sagemaker-training-toolkit INFO No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34m2021-07-16 13:55:24,187 sagemaker-training-toolkit INFO No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34m2021-07-16 13:55:24,201 sagemaker-training-toolkit INFO Invoking user script\n\u001b[0m\n\u001b[34mTraining Env:\n\u001b[0m\n\u001b[34m{\n \"additional_framework_parameters\": {},\n \"channel_input_dirs\": {\n \"testing\": \"/opt/ml/input/data/testing\",\n \"training\": \"/opt/ml/input/data/training\"\n },\n \"current_host\": \"algo-1\",\n \"framework_module\": \"sagemaker_tensorflow_container.training:main\",\n \"hosts\": [\n \"algo-1\"\n ],\n \"hyperparameters\": {\n \"batch-size\": 512,\n \"beta_1\": 0.9,\n \"beta_2\": 0.999,\n \"learning-rate\": 0.001,\n \"epochs\": 10\n },\n \"input_config_dir\": \"/opt/ml/input/config\",\n \"input_data_config\": {\n \"testing\": {\n \"TrainingInputMode\": \"File\",\n \"S3DistributionType\": \"FullyReplicated\",\n \"RecordWrapperType\": \"None\"\n },\n \"training\": {\n \"TrainingInputMode\": \"File\",\n \"S3DistributionType\": \"FullyReplicated\",\n \"RecordWrapperType\": \"None\"\n }\n },\n \"input_dir\": \"/opt/ml/input\",\n \"is_master\": true,\n \"job_name\": \"tensorflow-training-2021-07-16-13-52-08-478\",\n \"log_level\": 20,\n \"master_hostname\": \"algo-1\",\n \"model_dir\": \"/opt/ml/model\",\n \"module_dir\": \"s3://sagemaker-us-east-1-308961792850/tensorflow-training-2021-07-16-13-52-08-478/source/sourcedir.tar.gz\",\n \"module_name\": \"train\",\n \"network_interface_name\": \"eth0\",\n \"num_cpus\": 4,\n \"num_gpus\": 0,\n \"output_data_dir\": \"/opt/ml/output/data\",\n \"output_dir\": \"/opt/ml/output\",\n \"output_intermediate_dir\": \"/opt/ml/output/intermediate\",\n \"resource_config\": {\n \"current_host\": \"algo-1\",\n \"hosts\": [\n \"algo-1\"\n ],\n \"network_interface_name\": \"eth0\"\n },\n \"user_entry_point\": \"train.py\"\u001b[0m\n\u001b[34m}\n\u001b[0m\n\u001b[34mEnvironment variables:\n\u001b[0m\n\u001b[34mSM_HOSTS=[\"algo-1\"]\u001b[0m\n\u001b[34mSM_NETWORK_INTERFACE_NAME=eth0\u001b[0m\n\u001b[34mSM_HPS={\"batch-size\":512,\"beta_1\":0.9,\"beta_2\":0.999,\"epochs\":10,\"learning-rate\":0.001}\u001b[0m\n\u001b[34mSM_USER_ENTRY_POINT=train.py\u001b[0m\n\u001b[34mSM_FRAMEWORK_PARAMS={}\u001b[0m\n\u001b[34mSM_RESOURCE_CONFIG={\"current_host\":\"algo-1\",\"hosts\":[\"algo-1\"],\"network_interface_name\":\"eth0\"}\u001b[0m\n\u001b[34mSM_INPUT_DATA_CONFIG={\"testing\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"},\"training\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"}}\u001b[0m\n\u001b[34mSM_OUTPUT_DATA_DIR=/opt/ml/output/data\u001b[0m\n\u001b[34mSM_CHANNELS=[\"testing\",\"training\"]\u001b[0m\n\u001b[34mSM_CURRENT_HOST=algo-1\u001b[0m\n\u001b[34mSM_MODULE_NAME=train\u001b[0m\n\u001b[34mSM_LOG_LEVEL=20\u001b[0m\n\u001b[34mSM_FRAMEWORK_MODULE=sagemaker_tensorflow_container.training:main\u001b[0m\n\u001b[34mSM_INPUT_DIR=/opt/ml/input\u001b[0m\n\u001b[34mSM_INPUT_CONFIG_DIR=/opt/ml/input/config\u001b[0m\n\u001b[34mSM_OUTPUT_DIR=/opt/ml/output\u001b[0m\n\u001b[34mSM_NUM_CPUS=4\u001b[0m\n\u001b[34mSM_NUM_GPUS=0\u001b[0m\n\u001b[34mSM_MODEL_DIR=/opt/ml/model\u001b[0m\n\u001b[34mSM_MODULE_DIR=s3://sagemaker-us-east-1-308961792850/tensorflow-training-2021-07-16-13-52-08-478/source/sourcedir.tar.gz\u001b[0m\n\u001b[34mSM_TRAINING_ENV={\"additional_framework_parameters\":{},\"channel_input_dirs\":{\"testing\":\"/opt/ml/input/data/testing\",\"training\":\"/opt/ml/input/data/training\"},\"current_host\":\"algo-1\",\"framework_module\":\"sagemaker_tensorflow_container.training:main\",\"hosts\":[\"algo-1\"],\"hyperparameters\":{\"batch-size\":512,\"beta_1\":0.9,\"beta_2\":0.999,\"epochs\":10,\"learning-rate\":0.001},\"input_config_dir\":\"/opt/ml/input/config\",\"input_data_config\":{\"testing\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"},\"training\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"}},\"input_dir\":\"/opt/ml/input\",\"is_master\":true,\"job_name\":\"tensorflow-training-2021-07-16-13-52-08-478\",\"log_level\":20,\"master_hostname\":\"algo-1\",\"model_dir\":\"/opt/ml/model\",\"module_dir\":\"s3://sagemaker-us-east-1-308961792850/tensorflow-training-2021-07-16-13-52-08-478/source/sourcedir.tar.gz\",\"module_name\":\"train\",\"network_interface_name\":\"eth0\",\"num_cpus\":4,\"num_gpus\":0,\"output_data_dir\":\"/opt/ml/output/data\",\"output_dir\":\"/opt/ml/output\",\"output_intermediate_dir\":\"/opt/ml/output/intermediate\",\"resource_config\":{\"current_host\":\"algo-1\",\"hosts\":[\"algo-1\"],\"network_interface_name\":\"eth0\"},\"user_entry_point\":\"train.py\"}\u001b[0m\n\u001b[34mSM_USER_ARGS=[\"--batch-size\",\"512\",\"--beta_1\",\"0.9\",\"--beta_2\",\"0.999\",\"--epochs\",\"10\",\"--learning-rate\",\"0.001\"]\u001b[0m\n\u001b[34mSM_OUTPUT_INTERMEDIATE_DIR=/opt/ml/output/intermediate\u001b[0m\n\u001b[34mSM_CHANNEL_TESTING=/opt/ml/input/data/testing\u001b[0m\n\u001b[34mSM_CHANNEL_TRAINING=/opt/ml/input/data/training\u001b[0m\n\u001b[34mSM_HP_BATCH-SIZE=512\u001b[0m\n\u001b[34mSM_HP_BETA_1=0.9\u001b[0m\n\u001b[34mSM_HP_BETA_2=0.999\u001b[0m\n\u001b[34mSM_HP_LEARNING-RATE=0.001\u001b[0m\n\u001b[34mSM_HP_EPOCHS=10\u001b[0m\n\u001b[34mPYTHONPATH=/opt/ml/code:/usr/local/bin:/usr/local/lib/python37.zip:/usr/local/lib/python3.7:/usr/local/lib/python3.7/lib-dynload:/usr/local/lib/python3.7/site-packages\n\u001b[0m\n\u001b[34mInvoking script with the following command:\n\u001b[0m\n\u001b[34m/usr/local/bin/python3.7 train.py --batch-size 512 --beta_1 0.9 --beta_2 0.999 --epochs 10 --learning-rate 0.001\n\n\u001b[0m\n\u001b[34mTraining starts ...\u001b[0m\n\n2021-07-16 13:55:33 Training - Training image download completed. Training in progress.\u001b[34m[2021-07-16 13:55:27.833 ip-10-2-196-137.ec2.internal:24 INFO utils.py:27] RULE_JOB_STOP_SIGNAL_FILENAME: None\u001b[0m\n\u001b[34m[2021-07-16 13:55:28.122 ip-10-2-196-137.ec2.internal:24 INFO profiler_config_parser.py:102] User has disabled profiler.\u001b[0m\n\u001b[34m[2021-07-16 13:55:28.123 ip-10-2-196-137.ec2.internal:24 INFO json_config.py:91] Creating hook from json_config at /opt/ml/input/config/debughookconfig.json.\u001b[0m\n\u001b[34m[2021-07-16 13:55:28.124 ip-10-2-196-137.ec2.internal:24 INFO hook.py:199] tensorboard_dir has not been set for the hook. SMDebug will not be exporting tensorboard summaries.\u001b[0m\n\u001b[34m[2021-07-16 13:55:28.124 ip-10-2-196-137.ec2.internal:24 INFO hook.py:253] Saving to /opt/ml/output/tensors\u001b[0m\n\u001b[34m[2021-07-16 13:55:28.124 ip-10-2-196-137.ec2.internal:24 INFO state_store.py:75] The checkpoint config file /opt/ml/input/config/checkpointconfig.json does not exist.\u001b[0m\n\u001b[34m[2021-07-16 13:55:28.125 ip-10-2-196-137.ec2.internal:24 INFO hook.py:413] Monitoring the collections: metrics, sm_metrics, losses\u001b[0m\n\u001b[34mEpoch 1, Loss: 0.27551141381263733, Accuracy: 91.52999877929688, Test Loss: 0.11824200302362442, Test Accuracy: 96.58000183105469\u001b[0m\n\u001b[34mEpoch 2, Loss: 0.07692711800336838, Accuracy: 97.78333282470703, Test Loss: 0.07241345942020416, Test Accuracy: 97.64999389648438\u001b[0m\n\u001b[34mEpoch 3, Loss: 0.042056649923324585, Accuracy: 98.75166320800781, Test Loss: 0.05691447854042053, Test Accuracy: 98.11000061035156\u001b[0m\n\u001b[34mEpoch 4, Loss: 0.02515381947159767, Accuracy: 99.28333282470703, Test Loss: 0.05645830184221268, Test Accuracy: 98.1500015258789\u001b[0m\n\u001b[34mEpoch 5, Loss: 0.017273804172873497, Accuracy: 99.53166961669922, Test Loss: 0.05516675114631653, Test Accuracy: 98.33999633789062\u001b[0m\n\u001b[34mEpoch 6, Loss: 0.012449586763978004, Accuracy: 99.65499877929688, Test Loss: 0.06764646619558334, Test Accuracy: 98.12999725341797\u001b[0m\n\u001b[34mEpoch 7, Loss: 0.009526542387902737, Accuracy: 99.71666717529297, Test Loss: 0.05913087725639343, Test Accuracy: 98.29999542236328\u001b[0m\n\u001b[34mEpoch 8, Loss: 0.004901242908090353, Accuracy: 99.90333557128906, Test Loss: 0.05749906972050667, Test Accuracy: 98.36000061035156\u001b[0m\n\u001b[34mEpoch 9, Loss: 0.002548153977841139, Accuracy: 99.96833038330078, Test Loss: 0.056115925312042236, Test Accuracy: 98.5\u001b[0m\n\n2021-07-16 13:58:12 Uploading - Uploading generated training model\u001b[34mEpoch 10, Loss: 0.0011424849508330226, Accuracy: 99.99666595458984, Test Loss: 0.0559561550617218, Test Accuracy: 98.43000030517578\u001b[0m\n\u001b[34m2021-07-16 13:55:24.543481: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:460] Initializing the SageMaker Profiler.\u001b[0m\n\u001b[34m2021-07-16 13:55:24.543634: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:105] SageMaker Profiler is not enabled. The timeline writer thread will not be started, future recorded events will be dropped.\u001b[0m\n\u001b[34m2021-07-16 13:55:24.571841: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:460] Initializing the SageMaker Profiler.\u001b[0m\n\u001b[34mWARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/tensorflow/python/training/tracking/tracking.py:111: Model.state_updates (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.\u001b[0m\n\u001b[34mInstructions for updating:\u001b[0m\n\u001b[34mThis property should not be used in TensorFlow 2.0, as updates are applied automatically.\u001b[0m\n\u001b[34mWARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/tensorflow/python/training/tracking/tracking.py:111: Model.state_updates (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.\u001b[0m\n\u001b[34mInstructions for updating:\u001b[0m\n\u001b[34mThis property should not be used in TensorFlow 2.0, as updates are applied automatically.\u001b[0m\n\u001b[34mWARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/tensorflow/python/training/tracking/tracking.py:111: Layer.updates (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.\u001b[0m\n\u001b[34mInstructions for updating:\u001b[0m\n\u001b[34mThis property should not be used in TensorFlow 2.0, as updates are applied automatically.\u001b[0m\n\u001b[34mWARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/tensorflow/python/training/tracking/tracking.py:111: Layer.updates (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.\u001b[0m\n\u001b[34mInstructions for updating:\u001b[0m\n\u001b[34mThis property should not be used in TensorFlow 2.0, as updates are applied automatically.\u001b[0m\n\u001b[34m2021-07-16 13:58:09.140695: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\u001b[0m\n\u001b[34mINFO:tensorflow:Assets written to: /opt/ml/model/00000000/assets\u001b[0m\n\u001b[34mINFO:tensorflow:Assets written to: /opt/ml/model/00000000/assets\n\u001b[0m\n\u001b[34m2021-07-16 13:58:09,801 sagemaker-training-toolkit INFO Reporting training SUCCESS\u001b[0m\n\n2021-07-16 13:58:34 Completed - Training job completed\nProfilerReport-1626443528: NoIssuesFound\nTraining seconds: 229\nBillable seconds: 229\n" ] ], [ [ "## 저장된 모델 데이터 확인 \n\n이제 교육이 완료되면 모델 아티팩트가 `output_path`에 저장됩니다.", "_____no_output_____" ] ], [ [ "tf_mnist_model_data = est.model_data\nprint(\"Model artifact saved at:\\n\", tf_mnist_model_data)", "_____no_output_____" ] ], [ [ "\n이제 현재 노트북 커널에 변수 `model_data`를 저장합니다. 다음 노트북에서 모델 아티팩트를 검색하고 SageMaker 엔드 포인트에 배포하는 방법을 배우게됩니다.\n", "_____no_output_____" ] ], [ [ "%store tf_mnist_model_data", "Stored 'tf_mnist_model_data' (str)\n" ] ], [ [ "## 학습컨테이너에서 실행하기 전에 스크립트를 테스트하고 디버깅하기 \n\n앞서 사용한 `train.py`는 테스트가 완료된 코드이며, 바로 학습 컨테이너에서 실행할 수 있습니다. 하지만 해당 스크립트를 개발할 때에는, SageMaker로 보내기 전에 로컬 환경에서 컨테이너 환경을 시뮬레이션하고 테스트해야할 수 있습니다. 컨테이너 환경에서 테스트와 디버깅을 하는 것이 번거롭다면 다음과 같은 코드를 참조하여 활용할 수 있습니다.", "_____no_output_____" ] ], [ [ "!pygmentize code/test_train.py", "\u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36mtrain\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m train, parse_args\n\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36msys\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mos\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mboto3\u001b[39;49;00m\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mjson\u001b[39;49;00m\n\ndirname = os.path.dirname(os.path.abspath(\u001b[31m__file__\u001b[39;49;00m))\n\n\u001b[34mwith\u001b[39;49;00m \u001b[36mopen\u001b[39;49;00m(os.path.join(dirname, \u001b[33m\"\u001b[39;49;00m\u001b[33mconfig.json\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m), \u001b[33m\"\u001b[39;49;00m\u001b[33mr\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m) \u001b[34mas\u001b[39;49;00m f:\n CONFIG = json.load(f)\n \n\u001b[34mdef\u001b[39;49;00m \u001b[32mdownload_from_s3\u001b[39;49;00m(data_dir=\u001b[33m'\u001b[39;49;00m\u001b[33m/tmp/data\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, train=\u001b[34mTrue\u001b[39;49;00m):\n \u001b[33m\"\"\"Download MNIST dataset and convert it to numpy array\u001b[39;49;00m\n\u001b[33m Args:\u001b[39;49;00m\n\u001b[33m data_dir (str): directory to save the data\u001b[39;49;00m\n\u001b[33m train (bool): download training set\u001b[39;49;00m\n\u001b[33m Returns:\u001b[39;49;00m\n\u001b[33m tuple of images and labels as numpy arrays\u001b[39;49;00m\n\u001b[33m \"\"\"\u001b[39;49;00m\n \n \u001b[34mif\u001b[39;49;00m \u001b[35mnot\u001b[39;49;00m os.path.exists(data_dir):\n os.makedirs(data_dir)\n \n \u001b[34mif\u001b[39;49;00m train:\n images_file = \u001b[33m\"\u001b[39;49;00m\u001b[33mtrain-images-idx3-ubyte.gz\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\n labels_file = \u001b[33m\"\u001b[39;49;00m\u001b[33mtrain-labels-idx1-ubyte.gz\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\n \u001b[34melse\u001b[39;49;00m:\n images_file = \u001b[33m\"\u001b[39;49;00m\u001b[33mt10k-images-idx3-ubyte.gz\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\n labels_file = \u001b[33m\"\u001b[39;49;00m\u001b[33mt10k-labels-idx1-ubyte.gz\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\n\n \u001b[37m# download objects\u001b[39;49;00m\n s3 = boto3.client(\u001b[33m'\u001b[39;49;00m\u001b[33ms3\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n bucket = CONFIG[\u001b[33m\"\u001b[39;49;00m\u001b[33mpublic_bucket\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m]\n \u001b[34mfor\u001b[39;49;00m obj \u001b[35min\u001b[39;49;00m [images_file, labels_file]:\n key = os.path.join(\u001b[33m\"\u001b[39;49;00m\u001b[33mdatasets/image/MNIST\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, obj)\n dest = os.path.join(data_dir, obj)\n \u001b[34mif\u001b[39;49;00m \u001b[35mnot\u001b[39;49;00m os.path.exists(dest):\n s3.download_file(bucket, key, dest)\n \u001b[34mreturn\u001b[39;49;00m\n\n\u001b[34mclass\u001b[39;49;00m \u001b[04m\u001b[32mEnv\u001b[39;49;00m:\n \u001b[34mdef\u001b[39;49;00m \u001b[32m__init__\u001b[39;49;00m(\u001b[36mself\u001b[39;49;00m): \n \u001b[37m# simulate container env\u001b[39;49;00m\n os.environ[\u001b[33m\"\u001b[39;49;00m\u001b[33mSM_MODEL_DIR\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m] = \u001b[33m\"\u001b[39;49;00m\u001b[33m/tmp/tf/model\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\n os.environ[\u001b[33m\"\u001b[39;49;00m\u001b[33mSM_CHANNEL_TRAINING\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m]=\u001b[33m\"\u001b[39;49;00m\u001b[33m/tmp/data\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\n os.environ[\u001b[33m\"\u001b[39;49;00m\u001b[33mSM_CHANNEL_TESTING\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m]=\u001b[33m\"\u001b[39;49;00m\u001b[33m/tmp/data\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\n os.environ[\u001b[33m\"\u001b[39;49;00m\u001b[33mSM_HOSTS\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m] = \u001b[33m'\u001b[39;49;00m\u001b[33m[\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\u001b[33malgo-1\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\u001b[33m]\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m\n os.environ[\u001b[33m\"\u001b[39;49;00m\u001b[33mSM_CURRENT_HOST\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m]=\u001b[33m\"\u001b[39;49;00m\u001b[33malgo-1\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\n os.environ[\u001b[33m\"\u001b[39;49;00m\u001b[33mSM_NUM_GPUS\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m] = \u001b[33m\"\u001b[39;49;00m\u001b[33m0\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\n \n \n\u001b[34mif\u001b[39;49;00m \u001b[31m__name__\u001b[39;49;00m==\u001b[33m'\u001b[39;49;00m\u001b[33m__main__\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m:\n Env()\n download_from_s3()\n download_from_s3(train=\u001b[34mFalse\u001b[39;49;00m)\n args = parse_args()\n train(args)\n" ] ], [ [ "In [the next notebook](get_started_mnist_deploy.ipynb) you will see how to deploy your \ntrained model artifacts to a SageMaker endpoint. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a40fa9ecb4494b1d58b6d196a8deca8c2bb3109
1,819
ipynb
Jupyter Notebook
Untitled1.ipynb
iamaboy04/lalacsc102
e0fe42828cec81a0e2ba83cec0892f6ad788fd1b
[ "MIT" ]
null
null
null
Untitled1.ipynb
iamaboy04/lalacsc102
e0fe42828cec81a0e2ba83cec0892f6ad788fd1b
[ "MIT" ]
null
null
null
Untitled1.ipynb
iamaboy04/lalacsc102
e0fe42828cec81a0e2ba83cec0892f6ad788fd1b
[ "MIT" ]
null
null
null
19.55914
50
0.406817
[ [ [ "T=[[11,3,45,6],[54,3,78,2],[45,23,4,2]]\nT.insert(2,[5,2])\nfor r in T:\n for d in r:\n print (d, end=\" \")\n print()", "11 3 45 6 \n54 3 78 2 \n5 2 \n45 23 4 2 \n" ], [ "from numpy import*\nm=array([['Mon',4,6,8,2],['Tue',4,6,8,2],\n ['Wed',4,6,8,2],['Thr',4,6,8,2],\n ['Fri',4,6,8,2],['Sat',4,6,8,2],\n ['Sun',4,6,8,2]])\nm=delete(m,[2],0)\nprint(m)", "[['Mon' '4' '6' '8' '2']\n ['Tue' '4' '6' '8' '2']\n ['Thr' '4' '6' '8' '2']\n ['Fri' '4' '6' '8' '2']\n ['Sat' '4' '6' '8' '2']\n ['Sun' '4' '6' '8' '2']]\n" ] ], [ [ "# Matplotlib & Data Visualizations", "_____no_output_____" ] ] ]
[ "code", "markdown" ]
[ [ "code", "code" ], [ "markdown" ] ]
4a410a92a9c7595507ab194510a334ee3c559ee5
33,218
ipynb
Jupyter Notebook
starter_code/VacationPy.ipynb
KenyanBoy/Python-API-Challenge
0c7baa82899aa2f60cbe94a830378c6e447314dc
[ "ADSL" ]
null
null
null
starter_code/VacationPy.ipynb
KenyanBoy/Python-API-Challenge
0c7baa82899aa2f60cbe94a830378c6e447314dc
[ "ADSL" ]
null
null
null
starter_code/VacationPy.ipynb
KenyanBoy/Python-API-Challenge
0c7baa82899aa2f60cbe94a830378c6e447314dc
[ "ADSL" ]
null
null
null
32.98709
232
0.352791
[ [ [ "# VacationPy\n----\n\n#### Note\n* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.\n\n* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.", "_____no_output_____" ] ], [ [ "# Dependencies and Setup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport requests\nimport gmaps\nimport os\n\n# Import API key\nfrom api_keys import g_key", "_____no_output_____" ] ], [ [ "### Store Part I results into DataFrame\n* Load the csv exported in Part I to a DataFrame", "_____no_output_____" ] ], [ [ "weather_data = pd.read_csv(\"../output_data/weather_data.csv\")\nweather_data\n", "_____no_output_____" ] ], [ [ "### Humidity Heatmap\n* Configure gmaps.\n* Use the Lat and Lng as locations and Humidity as the weight.\n* Add Heatmap layer to map.", "_____no_output_____" ] ], [ [ "\n# Configure gmaps\ngmaps.configure(api_key = g_key)\n\n# Store Humidity as weight, find maxiumum Humidity\nhumidity = weather_data[\"Humidity (%)\"]\nhumidity_max = float(humidity.max())\n\n# Store Latitude and Longitude as locations\nlocations = weather_data[[\"Latitude\", \"Longitude\"]]", "_____no_output_____" ], [ "# Plot Heatmap\nfig = gmaps.figure()\n\n# Create Heat layer\nheat_layer = gmaps.heatmap_layer(locations, weights = humidity, dissipating = False, max_intensity = humidity_max, point_radius = 3)\n\n# Add Heat layer and display\nfig.add_layer(heat_layer)\nfig", "_____no_output_____" ] ], [ [ "### Create new DataFrame fitting weather criteria\n* Narrow down the cities to fit weather conditions.\n* Drop any rows will null values.", "_____no_output_____" ] ], [ [ "# Find cities with max temperature between 21°C and 27°C, wind speed less than 5 m/s and 0 cloudiness\nnarrowed_weather_data = weather_data.loc[(weather_data[\"Max Temperature (°C)\"] > 21) & (weather_data[\"Max Temperature (°C)\"] < 27) & (weather_data[\"Cloudiness (%)\"] == 0) & (weather_data[\"Wind Speed (m/s)\"] < 5), :]\n\n# Drop any rows with null values\nnarrowed_weather_data = narrowed_weather_data.dropna(how='any')\nnarrowed_weather_data.reset_index(inplace=True)\ndel narrowed_weather_data['index']\n\n# Display narrowed down cities\nnarrowed_weather_data", "_____no_output_____" ] ], [ [ "### Hotel Map\n* Store into variable named `hotel_df`.\n* Add a \"Hotel Name\" column to the DataFrame.\n* Set parameters to search for hotels with 5000 meters.\n* Hit the Google Places API for each city's coordinates.\n* Store the first Hotel result into the DataFrame.\n* Plot markers on top of the heatmap.", "_____no_output_____" ] ], [ [ "\n# Store into hotel_df variable\nhotel_df = narrowed_weather_data\n\n# Add Hotel Name column\nhotel_df[\"Hotel Name\"] = \" \"\n\n# Display new DataFrame\nhotel_df", "_____no_output_____" ], [ "# Empty list to hold hotel names\nhotel_ls = []\n\n# Construct URL\nbase_url = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json\"\n\n# Set parameters\nparams = {\"type\": \"hotel\",\n \"keyword\": \"hotel\",\n \"radius\": 5000,\n \"key\" : g_key}\n\n# Iterate through cities in hotel_df\nfor index, row in hotel_df.iterrows():\n \n # Find latitude and longitude for each city in DataFrame\n lat = row[\"Latitude\"]\n lng = row[\"Longitude\"]\n \n # Add location to parameters\n params[\"location\"] = f\"{lat},{lng}\"\n \n # Construct URL and make API request\n response = requests.get(base_url, params = params).json()\n \n try:\n hotel_ls.append(response[\"results\"][0][\"name\"])\n \n except:\n hotel_ls.append(\"NaN\")\n\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\nhotel_df[\"Hotel Name\"] = hotel_ls\nhotel_df", "_____no_output_____" ], [ "# NOTE: Do not change any of the code in this cell\n\n# Using the template add the hotel marks to the heatmap\ninfo_box_template = \"\"\"\n<dl>\n<dt>Name</dt><dd>{Hotel Name}</dd>\n<dt>City</dt><dd>{City}</dd>\n<dt>Country</dt><dd>{Country}</dd>\n</dl>\n\"\"\"\n# Store the DataFrame Row\n# NOTE: be sure to update with your DataFrame name\nhotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]\nlocations = hotel_df[[\"Latitude\", \"Longitude\"]]", "_____no_output_____" ], [ "\n# Add marker layer ontop of heat map\nmarkers = gmaps.marker_layer(locations, info_box_content = hotel_info)\nfig.add_layer(markers)\n\n# Display figure\nfig", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
4a4110bc94385c47e4056322827a706ecf1dfa05
140,139
ipynb
Jupyter Notebook
loading weather data.ipynb
Zumrannain/Python-Data
206feef7bc920657f2c853d349d1dd1aae234962
[ "MIT" ]
null
null
null
loading weather data.ipynb
Zumrannain/Python-Data
206feef7bc920657f2c853d349d1dd1aae234962
[ "MIT" ]
null
null
null
loading weather data.ipynb
Zumrannain/Python-Data
206feef7bc920657f2c853d349d1dd1aae234962
[ "MIT" ]
null
null
null
214.279817
42,536
0.861473
[ [ [ "import math\nimport collections\nimport urllib\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as pp\n\n%matplotlib inline", "_____no_output_____" ], [ "import getweather", "_____no_output_____" ], [ "pasadena = getweather.getyear('PASADENA', {'TMIN', \"TMAX\"}, 2001)", "Using ('USC00046719', 34.1483, -118.1447, 263.3, 'CA', 'PASADENA', '', 'HCN', '').\nDownloading https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/all/USC00046719.dly...\n" ], [ "np.mean(pasadena['TMIN']), np.min(pasadena['TMIN']), np.max(pasadena['TMIN'])", "_____no_output_____" ], [ "pasadena['TMIN']", "_____no_output_____" ], [ "np.nan + 1\n#cant mondify nan by adding 1", "_____no_output_____" ], [ "np.isnan(pasadena['TMIN'])", "_____no_output_____" ], [ "False + True + True\n#this gives us 2", "_____no_output_____" ], [ "np.sum(np.isnan(pasadena['TMIN']))", "_____no_output_____" ], [ "np.nanmin(pasadena['TMIN']), np.nanmax(pasadena['TMAX'])", "_____no_output_____" ], [ "pasadena['TMIN'][np.isnan(pasadena['TMIN'])] = np.nanmean(pasadena['TMIN'])\npasadena['TMAX'][np.isnan(pasadena['TMAX'])] = np.nanmean(pasadena['TMAX'])\n", "_____no_output_____" ], [ "pasadena['TMIN']", "_____no_output_____" ], [ "pp.plot(pasadena['TMIN'])", "_____no_output_____" ], [ "xdata = np.array([0,1,4,5,7,8], 'd')\nydata = np.array([10,5,2,7,7.5,10], 'd')\npp.plot(xdata, ydata, '--o')", "_____no_output_____" ], [ "xnew = np.linspace(0,8,9)\n#9 points between 0 and 8\nynew = np.interp(xnew, xdata, ydata)\n\npp.plot(xdata, ydata, '--o', ms = 10)\npp.plot(xnew, ynew, 's')", "_____no_output_____" ], [ "xnew = xnew = np.linspace(0,8,30)\nynew = np.interp(xnew, xdata, ydata)\npp.plot(xdata, ydata, '--o', ms = 10)\npp.plot(xnew, ynew, 's')", "_____no_output_____" ], [ "pasadena = getweather.getyear('PASADENA', {'TMIN', 'TMAX'}, 2001)", "_____no_output_____" ], [ "good = ~np.isnan(pasadena['TMIN'])\nx = np.arange(0, 365)\n\nnp.interp(x, x[good], pasadena['TMIN'][good])", "_____no_output_____" ], [ "def filenans(array):\n good = ~np.isnan(array)\n x = np.arange(len(array))\n return np.interp(x, x[good], array[good])", "_____no_output_____" ], [ "pp.plot(filenans(pasadena['TMIN']))\npp.plot(filenans(pasadena['TMAX']))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a412daebd6ded800681bb9fba17ca1e95012f43
15,418
ipynb
Jupyter Notebook
tutorials/Certification_Trainings/Healthcare/21.Gender_Classifier.ipynb
iamvarol/spark-nlp-workshop
73a9064bd47d4dc0692f0297748eb43cd094aabd
[ "Apache-2.0" ]
null
null
null
tutorials/Certification_Trainings/Healthcare/21.Gender_Classifier.ipynb
iamvarol/spark-nlp-workshop
73a9064bd47d4dc0692f0297748eb43cd094aabd
[ "Apache-2.0" ]
null
null
null
tutorials/Certification_Trainings/Healthcare/21.Gender_Classifier.ipynb
iamvarol/spark-nlp-workshop
73a9064bd47d4dc0692f0297748eb43cd094aabd
[ "Apache-2.0" ]
null
null
null
15,418
15,418
0.692048
[ [ [ "![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png)", "_____no_output_____" ], [ "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/21.Gender_Classifier.ipynb)", "_____no_output_____" ], [ "# 21. Gender Classifier ", "_____no_output_____" ], [ "**Gender Classifier** detects the gender of the patient in the clinical document. \nIt can classify the documents into `Female`, `Male` and `Unknown`.\n\n\n-'**Classifierdl_gender_sbert**' (works with licensed `sbiobert_base_cased_mli`)\n\nIt has been trained on more than four thousands clinical documents (radiology reports, pathology reports, clinical visits etc.) which were annotated internally.", "_____no_output_____" ], [ "## Colab Setup", "_____no_output_____" ] ], [ [ "import json, os\nfrom google.colab import files\n\nif 'spark_jsl.json' not in os.listdir():\n license_keys = files.upload()\n os.rename(list(license_keys.keys())[0], 'spark_jsl.json')\n\nwith open('spark_jsl.json') as f:\n license_keys = json.load(f)\n\n# Defining license key-value pairs as local variables\nlocals().update(license_keys)\nos.environ.update(license_keys)", "_____no_output_____" ], [ "# Installing pyspark and spark-nlp\n! pip install --upgrade -q pyspark==3.1.2 spark-nlp==$PUBLIC_VERSION\n\n# Installing Spark NLP Healthcare\n! pip install --upgrade -q spark-nlp-jsl==$JSL_VERSION --extra-index-url https://pypi.johnsnowlabs.com/$SECRET", "_____no_output_____" ], [ "import json\nimport os\n\nimport sparknlp_jsl\nimport sparknlp\n\nfrom pyspark.ml import Pipeline,PipelineModel\nfrom pyspark.sql import SparkSession\n\nfrom sparknlp.annotator import *\nfrom sparknlp_jsl.annotator import *\nfrom sparknlp.base import *\n\nparams = {\"spark.driver.memory\":\"16G\",\n \"spark.kryoserializer.buffer.max\":\"2000M\",\n \"spark.driver.maxResultSize\":\"2000M\"}\n\nspark = sparknlp_jsl.start(license_keys['SECRET'],params=params)\n\nprint(\"Spark NLP Version :\", sparknlp.version())\nprint(\"Spark NLP_JSL Version :\", sparknlp_jsl.version())\n\nspark", "Spark NLP Version : 3.4.2\nSpark NLP_JSL Version : 3.5.0\n" ], [ "# if you want to start the session with custom params as in start function above\ndef start(secret):\n builder = SparkSession.builder \\\n .appName(\"Spark NLP Licensed\") \\\n .master(\"local[*]\") \\\n .config(\"spark.driver.memory\", \"16G\") \\\n .config(\"spark.serializer\", \"org.apache.spark.serializer.KryoSerializer\") \\\n .config(\"spark.kryoserializer.buffer.max\", \"2000M\") \\\n .config(\"spark.jars.packages\", \"com.johnsnowlabs.nlp:spark-nlp_2.11:\"+version) \\\n .config(\"spark.jars\", \"https://pypi.johnsnowlabs.com/\"+secret+\"/spark-nlp-jsl-\"+jsl_version+\".jar\")\n \n return builder.getOrCreate()\n\n#spark = start(secret)", "_____no_output_____" ] ], [ [ "\n\n# Gender Classifier Pipeline with **sbert**", "_____no_output_____" ] ], [ [ "document = DocumentAssembler()\\\n .setInputCol(\"text\")\\\n .setOutputCol(\"document\")\n\nsbert_embedder = BertSentenceEmbeddings().pretrained(\"sbiobert_base_cased_mli\", 'en', 'clinical/models')\\\n .setInputCols([\"document\"])\\\n .setOutputCol(\"sentence_embeddings\")\\\n .setMaxSentenceLength(512)\n\ngender_classifier = ClassifierDLModel.pretrained( 'classifierdl_gender_sbert', 'en', 'clinical/models') \\\n .setInputCols([\"document\", \"sentence_embeddings\"]) \\\n .setOutputCol(\"class\") \n\ngender_pred_pipeline_sbert = Pipeline(stages = [ \n document, \n sbert_embedder, \n gender_classifier \n ])\n\nempty_data = spark.createDataFrame([[\"\"]]).toDF(\"text\")\n\nmodel_sbert = gender_pred_pipeline_sbert.fit(empty_data)\n", "sbiobert_base_cased_mli download started this may take some time.\nApproximate size to download 384.3 MB\n[OK!]\nclassifierdl_gender_sbert download started this may take some time.\nApproximate size to download 22.2 MB\n[OK!]\n" ], [ "text =\"\"\"social history: shows that does not smoke cigarettes or drink alcohol,lives in a nursing home.family history: shows a family history of breast cancer.\"\"\"\n\ngender_pipeline_sbert = LightPipeline(model_sbert)\n\nresult = gender_pipeline_sbert.annotate(text)\n\nresult['class'][0]\n", "_____no_output_____" ] ], [ [ "### Sample Clinical Notes", "_____no_output_____" ] ], [ [ "text1 = '''social history: shows that does not smoke cigarettes or drink alcohol,lives in a nursing home.\nfamily history: shows a family history of breast cancer.'''\n\nresult = gender_pipeline_sbert.annotate(text1)\n\nresult['class'][0]", "_____no_output_____" ], [ "text2 = '''The patient is a 48- year-old, with severe mitral stenosis diagnosed by echocardiography, moderate\n aortic insufficiency and moderate to severe pulmonary hypertension who is being evaluated as a part of a preoperative \n workup for mitral and possible aortic valve repair or replacement.'''\n\nresult = gender_pipeline_sbert.annotate(text2)\n\nresult['class'][0]", "_____no_output_____" ], [ "text3 = '''HISTORY: The patient is a 57-year-old XX, who I initially saw in the office on 12/27/07, as a referral from the Tomball Breast Center.\nOn 12/21/07, the patient underwent image-guided needle core biopsy of a 1.5 cm lesion at the 7 o'clock position of the left breast (inferomedial). \nThe biopsy returned showing infiltrating ductal carcinoma high histologic grade.\nThe patient stated that xx had recently felt and her physician had felt a palpable mass in that area prior to her breast imaging.'''\n\nresult = gender_pipeline_sbert.annotate(text3)\n\nresult['class'][0]", "_____no_output_____" ], [ "text4 = '''The patient states that xx has been overweight for approximately 35 years and has tried multiple weight loss modalities in \nthe past including Weight Watchers, NutriSystem, Jenny Craig, TOPS, cabbage diet, grape fruit diet, Slim-Fast, Richard Simmons,\nas well as over-the-counter measures without any long-term sustainable weight loss.\nAt the time of presentation to the practice, xx is 5 feet 6 inches tall with a weight of 285.4 pounds and a body mass index of 46.\nxx has obesity-related comorbidities, which includes hypertension and hypercholesterolemia.'''\n\nresult = gender_pipeline_sbert.annotate(text4)\n\nresult['class'][0]", "_____no_output_____" ], [ "text5 = '''Prostate gland showing moderately differentiated infiltrating adenocarcinoma, \nGleason 3 + 2 extending to the apex involving both lobes of the prostate, mainly right.'''\n\nresult = gender_pipeline_sbert.annotate(text5)\n\nresult['class'][0]", "_____no_output_____" ], [ "text6 = '''SKIN: The patient has significant subcutaneous emphysema of the upper chest and \nanterior neck area although he states that the subcutaneous emphysema has improved significantly since yesterday.'''\n\nresult = gender_pipeline_sbert.annotate(text6)\n\nresult['class'][0]", "_____no_output_____" ], [ "text7 = '''INDICATION: The patient is a 42-year-old XX who is five days out from transanal excision of a benign anterior base lesion.\nxx presents today with diarrhea and bleeding. Digital exam reveals bright red blood on the finger.\nxx is for exam under anesthesia and control of hemorrhage at this time.\n'''\nresult = gender_pipeline_sbert.annotate(text7)\n\nresult['class'][0]", "_____no_output_____" ], [ "text8 = '''INDICATION: ___ year old patient with complicated medical history of paraplegia\nand chronic indwelling foley, recurrent MDR UTIs, hx Gallbladder fossa\nabscess,type 2 DM, HTN, CAD, DVT s/p left AKA complicated complicated by\nrespiratory failure requiring tracheostomy and PEG placement, right ischium\nosteomyelitis due to chronic pressure ulcers with acute shortness of breath...'''\n\nresult = gender_pipeline_sbert.annotate(text8)\n\nresult['class'][0]\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a413624afd15afb707fcd6f805ad0cc8ed6df91
8,227
ipynb
Jupyter Notebook
chap7.ipynb
dominhhai/rl-intro
9967ac93af34d6104ea0e0a2abb1127f1426a156
[ "MIT" ]
2
2018-07-26T07:46:09.000Z
2018-08-02T12:51:40.000Z
chap7.ipynb
dominhhai/rl-intro
9967ac93af34d6104ea0e0a2abb1127f1426a156
[ "MIT" ]
null
null
null
chap7.ipynb
dominhhai/rl-intro
9967ac93af34d6104ea0e0a2abb1127f1426a156
[ "MIT" ]
2
2018-08-28T04:16:59.000Z
2018-11-16T14:29:09.000Z
38.990521
194
0.486933
[ [ [ "# Chapter 7: n-step Bootstrapping", "_____no_output_____" ], [ "## 1. n-step TD Prediction\n- Generalize one-step TD(0) method\n- Temporal difference extends over n-steps\n\n![n-step methods](assets/7.1.n-step.png)\n\n- Want to update estimated value $v_\\pi(S_t)$ of state $S_t$ from:\n$$S_t,R_{t+1},S_{t+1},R_{t+1},...,R_T,S_T$$\n\n - for *MC*, target is complete return\n $$G_t = R_{t+1}+\\gamma R_{t+3}+\\gamma^2R_{t+3}+...+\\gamma^{T-t-1}R_T$$\n - for *TD*, one-step method\n $$G_{t:t+1} = R_{t+1}+\\gamma V_t(S_{t+1})$$\n - for *two-step TD*, one-step method\n $$G_{t:t+2} = R_{t+1}+\\gamma R_{t+2}+\\gamma^2V_{t+1}(S_{t+2})$$\n - for *n-step TD*, one-step method with $n\\ge 1, 0\\le t<T-n$\n $$\n \\begin{cases}\n G_{t:t+n} &= R_{t+1}+\\gamma R_{t+2}+...+\\gamma^{n-1}R_{t+n}+\\gamma^nV_{t+n-1}(S_{t+n})\n \\\\G_{t:t+n} &= G_t ~~~,\\text{if } t+n\\ge T\n \\end{cases}\n $$\n\n\n- Wait for $R_{t+n}, V_{t+n-1}$, until time $t+n$, then update estimate values:\n$$V_{t+n}(S_t) = V_{t+n-1}(S_t)+\\alpha\\big[G_{t:t+n}-V_{t+n-1}(S_t)\\big] ~~~, 0\\le t<T$$\n - all other states remain unchanged: $V_{t+n}(s)=V_{t+n-1}(s), \\forall s\\neq S_t$\n\n![n-step methods](assets/7.1.n-step-td.png)\n\n\n- **Error Reduction Property** of n-step returns:\n$$\\max_s\\big| E_\\pi[G_{t:t+n} | S_t=s]-v_\\pi(s)\\big| \\le \\gamma^n\\max_s\\big| V_{t+n-1}(s)-v_\\pi(s)\\big|, \\forall n\\ge 1$$\n- Can show formally that n-step TD methods converge to the correct predictions", "_____no_output_____" ], [ "## 2. n-step Sarsa\n- Switch states for actions (state-action pairs) and then use an ε-greedy policy\n\n![n-step SARSA](assets/7.2.n-step-sarsa.png)\n\n- n-step returns for action-value:\n$$G_{t:t+n}=R_{t+1}+\\gamma R_{t+2}+...+\\gamma^{n-1}R_{t+n}+\\gamma^nQ_{t+n-1}(S_{t+n},A_{t+n})~~~, n\\ge 1, 0\\le t<T-n$$\n with $G_{t:t+n}=G_t \\text{ if }t+n\\ge T$\n\n\n- **n-step Sarsa**:\n $$Q_{t+n}(S_t,A_t)=Q_{t+n-1}(S_t,A_t)+\\alpha\\big[G_{t:t+n}-Q_{t+n-1}(S_t,A_t)\\big]~~~,0\\le t<T$$\n \n \n- **n-step Expected Sarsa**:\n$$G_{t:t+n}=R_{t+1}+\\gamma R_{t+2}+...+\\gamma^{n-1}R_{t+n}+\\gamma^n\\overline V_{t+n-1}(S_{t+n},A_{t+n})~~~, t+n<T$$\n - where, *expected approximate value* of state $s$:\n $$\\overline V_t(s)=\\sum_a\\pi(a | s)Q_t(s,a) ~~~, \\forall s\\in\\mathcal S$$\n - if $s$ is terminal, then $\\overline V(s)=0$\n \n![n-step SARSA Pseudocode](assets/7.2.n-step-sarsa-pseudocode.png)", "_____no_output_____" ], [ "## 3. n-step Off-policy Learning\n- Use relative probability of just n actions:\n$$\\rho_{t:h}=\\prod_{k=t}^{\\min(h,T-1)}\\frac{\\pi(A_k | S_k)}{b(A_k | S_k)}$$\n\n- n-step TD:\n$$V_{t+n}(S_t)=V_{t+n-1}(S_t)+\\alpha\\color{blue}{\\rho_{t:t+n-1}}\\big[G_{t:t+n}-V_{t+n-1}(S_t)\\big]~~~,0\\le t<T$$\n\n- n-step Sarsa:\n$$Q_{t+n}(S_t,A_t)=V_{t+n-1}(S_t,A_t)+\\alpha\\color{blue}{\\rho_{t+1:t+n}}\\big[G_{t:t+n}-Q_{t+n-1}(S_t,A_1)\\big]~~~,0\\le t<T$$\n\n- n-step Expected Sarsa:\n$$Q_{t+n}(S_t,A_t)=V_{t+n-1}(S_t,A_t)+\\alpha\\color{blue}{\\rho_{t+1:t+n-1}}\\big[G_{t:t+n}-Q_{t+n-1}(S_t,A_1)\\big]~~~,0\\le t<T$$\n\n![n-step Off-policy](assets/7.3.n-step-off-policy.png)", "_____no_output_____" ], [ "## 4. Per-decision Methods with Control Variates\n- add *control variate* to **off-policy** of n-step return to reduce variance\n$$G_{t:h}=\\rho_t(R_{t+1}+\\gamma G_{t+1:h})+(1-\\rho_t)V_{h-1}(S_t) ~~~,t<h<T$$\n where, $G_{h:h}=V_{h-1}(S_h)$\n\n- if $\\rho_t=0$, then the target does not change\n- Includes on-policy when $\\rho_t=1$\n- for action values, the first action does not play a role in the importance sampling\n$$\n\\begin{aligned}\nG_{t:h} &= R_{t+1}+\\gamma\\big(\\rho_{t+1}G_{t+1:h}+\\overline V_{h-1}(S_{t+1})-\\rho_{t+1}Q_{h-1}(S_{t+1},A_{t+1})\\big)\n\\\\ &= R_{t+1}+\\gamma\\rho_{t+1}\\big(G_{t+1:h}-Q_{h-1}(S_{t+1},A_{t+1})\\big)+\\gamma\\overline V_{h-1}(S_{t+1})\n\\end{aligned}\n$$\n where, $t<h\\le T$, if $h<T$, then $G_{h:h}=Q_{h-1}(S_h,A_h)$, else $G_{T-1:h}=R_T$", "_____no_output_____" ], [ "## 5. Off-policy Learning Without Importance Sampling: The n-step Tree Backup Algorithm\n- Use **left nodes** to estimate action-values\n\n\n![example](assets/7.5.off-policy-wto-weight.png)\n\n\n- one-step return is them same as Expected Sarsa for $t<T-1$:\n$$G_{t:t+1}=R_{t+1}+\\gamma\\sum_a\\pi(a | S_{t+1})Q_t(S_{t+1},a)$$\n- two-step tree-backup for $t<T-2$:\n$$\n\\begin{aligned}\nG_{t:t+1} &= R_{t+1}+\\gamma\\sum_{a\\neq A_{t+1}}\\pi(a | S_{t+1})Q_{t+1}(S_{t+1},a)\n\\\\ & ~~~ +\\gamma\\pi(A_{t+1} | S_{t+1})\\big(R_{t+1:t+2}\\gamma\\sum_{a\\neq A_{t+1}}\\pi(a | S_{t+2})Q_{t+1}(S_{t+2},a)\\big)\n\\\\ &= R_{t+1}+\\gamma\\sum_{a\\neq A_{t+1}}\\pi(a | S_{t+1})Q_{t+1}(S_{t+1},a)+\\gamma\\pi(A_{t+1} | S_{t+1})Q_{t+1:t+2}\n\\end{aligned}\n$$\n\n- n-step tree-backup for $t<T-1,n\\ge 2$:\n$$G_{t:t+1} = R_{t+1}+\\gamma\\sum_{a\\neq A_{t+1}}\\pi(a | S_{t+1})Q_{t+1}(S_{t+1},a)+\\gamma\\pi(A_{t+1} | S_{t+1})Q_{t+1:t+n}$$\n\n- action-value update rule as usual from n-step Sarsa:\n$$Q_{t+n}(S_t,A_t)=Q_{t+n-1}(S_t,A_t)+\\alpha[G_{t:t+n}-Q_{t+n-1}(S_t,A_t)]$$\n for, $0\\le t < T$\n\n\n![n-step Tree Backup](assets/7.5.n-step-tree-backup.png)", "_____no_output_____" ], [ "## 6. A Unifying Algorithm: n-step Q(σ)\n\n![n-step-types](assets/7.6.n-step-types.png)\n\n- $\\sigma_t\\in[0,1]$ denote the degree of sampling on step $t$\n - $\\sigma=0$ for full sampling\n - $\\sigma=1$ for pure expection\n\n- Rewrite the n-step back-up tree as:\n$$\n\\begin{aligned}\nG_{t:h} &= R_{t+1}+\\gamma\\sum_{a\\neq A_{t+1}}\\pi(a | S_{t+1})Q_{h-1}(S_{t+1},a)+\\gamma\\pi(A_{t+1} | S_{t+1})G_{t+1:h}\n\\\\ &= R_{t+1}+\\gamma\\overline V_{h-1}(S_{t+1})-\\gamma\\pi(A_{t+1} | S_{t+1})Q_{h-1}(S_{t+1},A_{t+1})+\\gamma\\pi(A_{t+1} | S_{t+1})G_{t+1:h}\n\\\\ &= R_{t+1}+\\gamma\\pi(A_{t+1} | S_{t+1})\\big(G_{t+1:h}-Q_{h-1}(S_{t+1},A_{t+1})\\big)+\\gamma\\overline V_{h-1}(S_{t+1})\n\\end{aligned}\n$$\n\n- n-step $Q(\\sigma)$:\n$$G_{t:h}=R_{t+1}+\\gamma\\big(\\sigma_{t+1}\\rho_{t+1}+(1-\\sigma_{t+1})\\pi(A_{t+1} | S_{t+1})\\big)\\big(G_{t+1:h}-Q_{h-1}(S_{t+1},A_{t+1})\\big)+\\gamma\\overline V_{h-1}(S_{t+1})$$\n \n where, $t<h\\le T$\n - if $h<T$, then $G_{h:h}=Q_{h-1}(S_h,A_h)$\n - if $h=T$, then $G_{T-1:T}=R_T$\n\n\n![n-step-q](assets/7.6.n-step-q.png)", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
4a41384ba598098a0d00a011245396e0caa61403
51,896
ipynb
Jupyter Notebook
analyses/molecular-subtyping-EPN/EPN_generate_alldata.ipynb
UCSC-Treehouse/OpenPBTA-analysis
56f82b9f23d369a5e208c7564ce8fc22fa0b64dd
[ "BSD-3-Clause" ]
2
2020-05-22T17:57:33.000Z
2020-11-03T04:00:55.000Z
analyses/molecular-subtyping-EPN/EPN_generate_alldata.ipynb
UCSC-Treehouse/OpenPBTA-analysis
56f82b9f23d369a5e208c7564ce8fc22fa0b64dd
[ "BSD-3-Clause" ]
null
null
null
analyses/molecular-subtyping-EPN/EPN_generate_alldata.ipynb
UCSC-Treehouse/OpenPBTA-analysis
56f82b9f23d369a5e208c7564ce8fc22fa0b64dd
[ "BSD-3-Clause" ]
null
null
null
37.228121
216
0.380164
[ [ [ " This script takes the notebook with RNA and DNA BSID's and collects information for the corresponding samples from fusion summary files, breakpoint density files, GISTIC CNA broad_values file and FPKM files", "_____no_output_____" ] ], [ [ "import argparse\nimport pandas as pd\nimport numpy as np \nimport zipfile\nimport statistics\nimport scipy\nfrom scipy import stats", "_____no_output_____" ], [ "# Reading all the input files \nzip=zipfile.ZipFile(\"/Users/kogantit/Documents/OpenPBTA/OpenPBTA-analysis/data/pbta-cnv-cnvkit-gistic.zip\")\nCNA=pd.read_csv(zip.open(\"2019-12-10-gistic-results-cnvkit/broad_values_by_arm.txt\"), sep=\"\\t\")\nCNA = CNA.set_index(\"Chromosome Arm\")\n\ngsva = pd.read_csv(\"/Users/kogantit/Documents/OpenPBTA/OpenPBTA-analysis/analyses/gene-set-enrichment-analysis/results/gsva_scores_stranded.tsv\", sep=\"\\t\")\ngsva_NFKB = gsva.loc[gsva['hallmark_name'] == \"HALLMARK_TNFA_SIGNALING_VIA_NFKB\"]\ngsva_NFKB = gsva_NFKB.set_index(\"Kids_First_Biospecimen_ID\")\n\nfpkm_df = pd.read_csv(\"/Users/kogantit/Documents/OpenPBTA/OpenPBTA-analysis/analyses/molecular-subtyping-EPN/epn-subset/epn-pbta-gene-expression-rsem-fpkm-collapsed.stranded.tsv.gz\", sep = \"\\t\")\nfpkm_df = fpkm_df.set_index(\"GENE\")\nzscore_fpkm_df = fpkm_df.apply(scipy.stats.zscore)\n\nfusion = pd.read_csv(\"/Users/kogantit/Documents/OpenPBTA/OpenPBTA-analysis/analyses/fusion-summary/results/fusion_summary_ependymoma_foi.tsv\", sep=\"\\t\")\nfusion = fusion.set_index(\"Kids_First_Biospecimen_ID\")\n\nbreakpoint_density = pd.read_csv(\"/Users/kogantit/Documents/OpenPBTA/OpenPBTA-analysis/analyses/chromosomal-instability/breakpoint-data/union_of_breaks_densities.tsv\", sep=\"\\t\")\nbreakpoint_density = breakpoint_density.set_index(\"samples\")\n\nEPN_notebook = pd.read_csv(\"/Users/kogantit/Documents/OpenPBTA/OpenPBTA-analysis/analyses/molecular-subtyping-EPN/results/EPN_molecular_subtype.tsv\", sep=\"\\t\")\n", "_____no_output_____" ], [ "# This function takes in a GISTIC broad_values \n# and a string (loss/gain) and returns 0/1 accordingly \ndef DNA_samples_fill_df(CNA_value, loss_gain):\n if CNA_value<0 and loss_gain==\"loss\":\n return(1)\n elif loss_gain==\"gain\" and CNA_value>0:\n return(1)\n else:\n return(0)", "_____no_output_____" ], [ "# Function to generate Z-scores column for every gene \ndef fill_df_with_fpkm_zscores(df,fpkmdf, column_name, gene_name):\n zscore_list = scipy.stats.zscore(np.array(df.apply(lambda x: fpkmdf.loc[gene_name, x[\"Kids_First_Biospecimen_ID_RNA\"]], axis=1)))\n df[column_name] = pd.Series(zscore_list)\n return(df)", "_____no_output_____" ], [ "# Input notebook before adding columns \nEPN_notebook.head()", "_____no_output_____" ], [ "# Input. CNA file \nCNA.head()", "_____no_output_____" ], [ "#Adding columns to EPN_notebook based on values from CNA file (boolean value)\n# Matching based on DNA BSID (row names in CNA file and column names in EPN_notebook) -> Look at row 4 below\n\nEPN_notebook[\"1q_loss\"] = EPN_notebook.apply(lambda x: DNA_samples_fill_df(CNA.loc[\"1q\", x[\"Kids_First_Biospecimen_ID_DNA\"]], \"loss\") \n if x[\"Kids_First_Biospecimen_ID_DNA\"] is not np.nan else 0,axis=1)\n\nEPN_notebook.head()", "_____no_output_____" ], [ "#. Similar to the above, adding more columns to EPN_notebook\nEPN_notebook[\"9p_loss\"] = EPN_notebook.apply(lambda x: DNA_samples_fill_df(CNA.loc[\"9p\", x[\"Kids_First_Biospecimen_ID_DNA\"]], \"loss\") \n if x[\"Kids_First_Biospecimen_ID_DNA\"] is not np.nan else 0,axis=1)\nEPN_notebook[\"9q_loss\"] = EPN_notebook.apply(lambda x: DNA_samples_fill_df(CNA.loc[\"9q\", x[\"Kids_First_Biospecimen_ID_DNA\"]], \"loss\") \n if x[\"Kids_First_Biospecimen_ID_DNA\"] is not np.nan else 0,axis=1)\nEPN_notebook[\"6p_loss\"] = EPN_notebook.apply(lambda x: DNA_samples_fill_df(CNA.loc[\"6p\", x[\"Kids_First_Biospecimen_ID_DNA\"]], \"loss\") \n if x[\"Kids_First_Biospecimen_ID_DNA\"] is not np.nan else 0,axis=1)\nEPN_notebook[\"6q_loss\"] = EPN_notebook.apply(lambda x: DNA_samples_fill_df(CNA.loc[\"6q\", x[\"Kids_First_Biospecimen_ID_DNA\"]], \"loss\") \n if x[\"Kids_First_Biospecimen_ID_DNA\"] is not np.nan else 0,axis=1)\nEPN_notebook[\"11q_loss\"] = EPN_notebook.apply(lambda x: DNA_samples_fill_df(CNA.loc[\"11q\", x[\"Kids_First_Biospecimen_ID_DNA\"]], \"loss\") \n if x[\"Kids_First_Biospecimen_ID_DNA\"] is not np.nan else 0,axis=1)\nEPN_notebook[\"11q_gain\"] = EPN_notebook.apply(lambda x: DNA_samples_fill_df(CNA.loc[\"11q\", x[\"Kids_First_Biospecimen_ID_DNA\"]], \"gain\") \n if x[\"Kids_First_Biospecimen_ID_DNA\"] is not np.nan else 0,axis=1)\n\nEPN_notebook.head(4)\n", "_____no_output_____" ], [ "gsva_NFKB.head(3)\n# GSVA. score for NFKB score ", "_____no_output_____" ], [ "# Adding column for NFKB GSEA_score to EPN_notebook\n# If DNA sample BSID not found, then fill with \"NA\" \nEPN_notebook[\"breaks_density-chromosomal_instability\"] = EPN_notebook.apply(lambda x: breakpoint_density.loc[x[\"Kids_First_Biospecimen_ID_DNA\"], \"breaks_density\"] \n if x[\"Kids_First_Biospecimen_ID_DNA\"] is not np.nan else \"NA\", axis=1)\n\nEPN_notebook.head(3)", "_____no_output_____" ], [ "# Printing. FPKM dataframe \nfpkm_df.head(2)", "_____no_output_____" ], [ "# Adding FPKM for different genes to EPN_notebook using function fill_df_with_fpkm_zscores\n\nEPN_notebook = fill_df_with_fpkm_zscores(EPN_notebook, fpkm_df, \"RELA_expr_Z-scores\", \"RELA\")\nEPN_notebook = fill_df_with_fpkm_zscores(EPN_notebook, fpkm_df, \"L1CAM_expr_Zscore\", \"L1CAM\")\nEPN_notebook = fill_df_with_fpkm_zscores(EPN_notebook, fpkm_df, \"ARL4D_expr_Zscore\", \"ARL4D\")\nEPN_notebook = fill_df_with_fpkm_zscores(EPN_notebook, fpkm_df, \"CLDN1_expr_zscore\", \"CLDN1\")\nEPN_notebook = fill_df_with_fpkm_zscores(EPN_notebook, fpkm_df, \"CXorf67_expr_zscore\", \"CXorf67\")\nEPN_notebook = fill_df_with_fpkm_zscores(EPN_notebook, fpkm_df, \"TKTL1_expr_zscore\", \"TKTL1\")\nEPN_notebook = fill_df_with_fpkm_zscores(EPN_notebook, fpkm_df, \"GPBP1_expr_zscore\", \"GPBP1\")\nEPN_notebook = fill_df_with_fpkm_zscores(EPN_notebook, fpkm_df, \"IFT46_expr_zscore\", \"IFT46\")\n\n\nEPN_notebook.head(4)\n\n\n# Finally print out the dataframe to an output file \n", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a413deff198316f7dbbffbbeed0d9781af5c983
1,530
ipynb
Jupyter Notebook
fetch-quickbooks.ipynb
astrojuanlu/orchest-quickbooks-oauth
12a626d72f2b805962b29c9e4e9f46805e435608
[ "Apache-2.0" ]
null
null
null
fetch-quickbooks.ipynb
astrojuanlu/orchest-quickbooks-oauth
12a626d72f2b805962b29c9e4e9f46805e435608
[ "Apache-2.0" ]
null
null
null
fetch-quickbooks.ipynb
astrojuanlu/orchest-quickbooks-oauth
12a626d72f2b805962b29c9e4e9f46805e435608
[ "Apache-2.0" ]
2
2021-09-28T17:49:41.000Z
2022-02-17T10:12:41.000Z
23.181818
90
0.573203
[ [ [ "import requests\n\nfrom utils import get_quickbooks_client", "_____no_output_____" ], [ "auth_client = get_quickbooks_client()", "_____no_output_____" ], [ "base_url = 'https://sandbox-quickbooks.api.intuit.com'\nurl = '{0}/v3/company/{1}/companyinfo/{1}'.format(base_url, auth_client.realm_id)\nauth_header = 'Bearer {0}'.format(auth_client.access_token)\nheaders = {\n 'Authorization': auth_header,\n 'Accept': 'application/json'\n}\nresponse = requests.get(url, headers=headers)\nprint(response.json())", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
4a414573ecd27b9cc4a089ebe1e855dcd09973fb
3,566
ipynb
Jupyter Notebook
Chapter4_Iterables/Tuples/named_tuple.ipynb
tomex74/UdemyPythonPro
b4b83483fa2d3337a2860d53ff38e68eb38b3ac4
[ "MIT" ]
null
null
null
Chapter4_Iterables/Tuples/named_tuple.ipynb
tomex74/UdemyPythonPro
b4b83483fa2d3337a2860d53ff38e68eb38b3ac4
[ "MIT" ]
null
null
null
Chapter4_Iterables/Tuples/named_tuple.ipynb
tomex74/UdemyPythonPro
b4b83483fa2d3337a2860d53ff38e68eb38b3ac4
[ "MIT" ]
null
null
null
26.61194
590
0.570667
[ [ [ "from collections import namedtuple", "_____no_output_____" ], [ "User = namedtuple('User', ['firstname', 'lastname', 'birthday'])", "_____no_output_____" ], [ "user_jan = User(firstname='Jan', lastname='Schaffranek', birthday='24.02.1994')\nuser_peter = User(firstname='Peter', lastname='Peterson', birthday='01.01.1990')", "_____no_output_____" ], [ "print(user_jan)\nprint(user_peter)", "User(firstname='Jan', lastname='Schaffranek', birthday='24.02.1994')\nUser(firstname='Peter', lastname='Peterson', birthday='01.01.1990')\n" ], [ "print(user_jan.birthday)\nprint(user_jan.firstname)\nprint(user_jan.lastname)", "24.02.1994\nJan\nSchaffranek\n" ], [ "print(dir(user_jan))", "['__add__', '__class__', '__contains__', '__delattr__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__getnewargs__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__mul__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__rmul__', '__setattr__', '__sizeof__', '__slots__', '__str__', '__subclasshook__', '_asdict', '_field_defaults', '_fields', '_fields_defaults', '_make', '_replace', 'birthday', 'count', 'firstname', 'index', 'lastname']\n" ], [ "#user_jan.birthday = '24.03.1994'\nprint(class(user_jan))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
4a415fc60723233f54c36a4604b8166cff5a15d8
1,628
ipynb
Jupyter Notebook
matplotlib/gallery_jupyter/text_labels_and_annotations/mathtext_demo.ipynb
kingreatwill/penter
2d027fd2ae639ac45149659a410042fe76b9dab0
[ "MIT" ]
13
2020-01-04T07:37:38.000Z
2021-08-31T05:19:58.000Z
matplotlib/gallery_jupyter/text_labels_and_annotations/mathtext_demo.ipynb
kingreatwill/penter
2d027fd2ae639ac45149659a410042fe76b9dab0
[ "MIT" ]
3
2020-06-05T22:42:53.000Z
2020-08-24T07:18:54.000Z
matplotlib/gallery_jupyter/text_labels_and_annotations/mathtext_demo.ipynb
kingreatwill/penter
2d027fd2ae639ac45149659a410042fe76b9dab0
[ "MIT" ]
9
2020-10-19T04:53:06.000Z
2021-08-31T05:20:01.000Z
30.148148
514
0.518428
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# Mathtext Demo\n\n\nUse Matplotlib's internal LaTeX parser and layout engine. For true LaTeX\nrendering, see the text.usetex option.\n", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nfig, ax = plt.subplots()\n\nax.plot([1, 2, 3], label=r'$\\sqrt{x^2}$')\nax.legend()\n\nax.set_xlabel(r'$\\Delta_i^j$', fontsize=20)\nax.set_ylabel(r'$\\Delta_{i+1}^j$', fontsize=20)\nax.set_title(r'$\\Delta_i^j \\hspace{0.4} \\mathrm{versus} \\hspace{0.4} '\n r'\\Delta_{i+1}^j$', fontsize=20)\n\ntex = r'$\\mathcal{R}\\prod_{i=\\alpha_{i+1}}^\\infty a_i\\sin(2 \\pi f x_i)$'\nax.text(1, 1.6, tex, fontsize=20, va='bottom')\n\nfig.tight_layout()\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ] ]
4a4160ba6c0fbe7387e031b0b88da75ff6302a44
421,277
ipynb
Jupyter Notebook
The Crowd Density Project/People-Counting-Crowd-Density-Detection-master/Contributions/CSRNet @ Sreekanth Zipsy/data_prep.ipynb
bharatnishant/UdacityOpenSource
abd641700f0878a2e083de7a392b837efdd244c4
[ "Apache-2.0" ]
2
2019-08-20T12:58:01.000Z
2019-08-20T13:13:28.000Z
The Crowd Density Project/People-Counting-Crowd-Density-Detection-master/Contributions/CSRNet @ Sreekanth Zipsy/data_prep.ipynb
bharatnishant/UdacityOpenSource
abd641700f0878a2e083de7a392b837efdd244c4
[ "Apache-2.0" ]
null
null
null
The Crowd Density Project/People-Counting-Crowd-Density-Detection-master/Contributions/CSRNet @ Sreekanth Zipsy/data_prep.ipynb
bharatnishant/UdacityOpenSource
abd641700f0878a2e083de7a392b837efdd244c4
[ "Apache-2.0" ]
1
2019-08-20T13:14:59.000Z
2019-08-20T13:14:59.000Z
81.944563
224,828
0.79141
[ [ [ "# importing libraries\nimport h5py\nimport scipy.io as io\nimport PIL.Image as Image\nimport numpy as np\nimport os\nimport glob\nfrom matplotlib import pyplot as plt\nfrom scipy.ndimage.filters import gaussian_filter\nimport scipy\nfrom scipy import spatial\nimport json\nfrom matplotlib import cm as CM\nfrom image import *\nfrom model import CSRNet\nimport torch\nfrom tqdm import tqdm\n%matplotlib inline", "_____no_output_____" ], [ "# function to create density maps for images\ndef gaussian_filter_density(gt):\n print (gt.shape)\n density = np.zeros(gt.shape, dtype=np.float32)\n gt_count = np.count_nonzero(gt)\n if gt_count == 0:\n return density\n\n pts = np.array(list(zip(np.nonzero(gt)[1], np.nonzero(gt)[0])))\n leafsize = 2048\n # build kdtree\n tree = scipy.spatial.KDTree(pts.copy(), leafsize=leafsize)\n # query kdtree\n distances, locations = tree.query(pts, k=4)\n\n print ('generate density...')\n for i, pt in enumerate(pts):\n pt2d = np.zeros(gt.shape, dtype=np.float32)\n pt2d[pt[1],pt[0]] = 1.\n if gt_count > 1:\n sigma = (distances[i][1]+distances[i][2]+distances[i][3])*0.1\n else:\n sigma = np.average(np.array(gt.shape))/2./2. #case: 1 point\n density += scipy.ndimage.filters.gaussian_filter(pt2d, sigma, mode='constant')\n print ('done.')\n return density", "_____no_output_____" ], [ "#setting the root to the Shanghai dataset you have downloaded\n# change the root path as per your location of dataset\nroot = '../ShanghaiTech/'", "_____no_output_____" ], [ "part_A_train = os.path.join(root,'part_A/train_data','images')\npart_A_test = os.path.join(root,'part_A/test_data','images')\npart_B_train = os.path.join(root,'part_B/train_data','images')\npart_B_test = os.path.join(root,'part_B/test_data','images')\npath_sets = [part_A_train,part_A_test]", "_____no_output_____" ], [ "img_paths = []\nfor path in path_sets:\n for img_path in glob.glob(os.path.join(path, '*.jpg')):\n img_paths.append(img_path)", "_____no_output_____" ], [ "for img_path in img_paths:\n print (img_path)\n mat = io.loadmat(img_path.replace('.jpg','.mat').replace('images','ground-truth').replace('IMG_','GT_IMG_'))\n img= plt.imread(img_path)\n k = np.zeros((img.shape[0],img.shape[1]))\n gt = mat[\"image_info\"][0,0][0,0][0]\n for i in range(0,len(gt)):\n if int(gt[i][1])<img.shape[0] and int(gt[i][0])<img.shape[1]:\n k[int(gt[i][1]),int(gt[i][0])]=1\n k = gaussian_filter_density(k)\n with h5py.File(img_path.replace('.jpg','.h5').replace('images','ground-truth'), 'w') as hf:\n hf['density'] = k", "../ShanghaiTech/part_A/train_data\\images\\IMG_1.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_10.jpg\n(683, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_100.jpg\n(654, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_101.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_102.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_103.jpg\n(400, 400)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_104.jpg\n(405, 540)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_105.jpg\n(632, 990)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_106.jpg\n(675, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_107.jpg\n(517, 800)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_108.jpg\n(767, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_109.jpg\n(681, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_11.jpg\n(686, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_110.jpg\n(498, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_111.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_112.jpg\n(327, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_113.jpg\n(390, 600)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_114.jpg\n(375, 500)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_115.jpg\n(611, 1021)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_116.jpg\n(640, 640)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_117.jpg\n(579, 840)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_118.jpg\n(651, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_119.jpg\n(594, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_12.jpg\n(680, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_120.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_121.jpg\n(373, 561)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_122.jpg\n(787, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_123.jpg\n(774, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_124.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_125.jpg\n(565, 849)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_126.jpg\n(576, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_127.jpg\n(300, 400)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_128.jpg\n(683, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_129.jpg\n(689, 998)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_13.jpg\n(747, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_130.jpg\n(680, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_131.jpg\n(498, 390)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_132.jpg\n(800, 600)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_133.jpg\n(683, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_134.jpg\n(1024, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_135.jpg\n(182, 420)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_136.jpg\n(500, 666)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_137.jpg\n(585, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_138.jpg\n(652, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_139.jpg\n(370, 545)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_14.jpg\n(645, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_140.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_141.jpg\n(427, 640)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_142.jpg\n(455, 670)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_143.jpg\n(605, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_144.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_145.jpg\n(561, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_146.jpg\n(680, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_147.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_148.jpg\n(680, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_149.jpg\n(683, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_15.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_150.jpg\n(450, 970)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_151.jpg\n(449, 716)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_152.jpg\n(600, 800)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_153.jpg\n(400, 600)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_154.jpg\n(416, 624)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_155.jpg\n(409, 620)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_156.jpg\n(625, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_157.jpg\n(450, 299)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_158.jpg\n(671, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_159.jpg\n(267, 400)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_16.jpg\n(1024, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_160.jpg\n(540, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_161.jpg\n(480, 640)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_162.jpg\n(480, 720)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_163.jpg\n(615, 970)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_164.jpg\n(462, 650)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_165.jpg\n(274, 939)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_166.jpg\n(683, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_167.jpg\n(830, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_168.jpg\n(944, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_169.jpg\n(649, 800)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_17.jpg\n(478, 600)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_170.jpg\n(480, 722)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_171.jpg\n(351, 600)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_172.jpg\n(685, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_173.jpg\n(512, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_174.jpg\n(512, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_175.jpg\n(360, 640)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_176.jpg\n(380, 594)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_177.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_178.jpg\n(688, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_179.jpg\n(616, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_A/train_data\\images\\IMG_18.jpg\n(793, 1024)\ngenerate density...\ndone.\n" ], [ "plt.imshow(Image.open(img_paths[0]))", "_____no_output_____" ], [ "gt_file = h5py.File(img_paths[0].replace('.jpg','.h5').replace('images','ground-truth'),'r')\ngroundtruth = np.asarray(gt_file['density'])\nplt.imshow(groundtruth,cmap=CM.jet)", "_____no_output_____" ], [ "np.sum(groundtruth)", "_____no_output_____" ], [ "path_sets = [part_B_train,part_B_test]\nimg_paths = []\nfor path in path_sets:\n for img_path in glob.glob(os.path.join(path, '*.jpg')):\n img_paths.append(img_path)\n \n# creating density map for part_b images\n\nfor img_path in img_paths:\n print (img_path)\n mat = io.loadmat(img_path.replace('.jpg','.mat').replace('images','ground-truth').replace('IMG_','GT_IMG_'))\n img= plt.imread(img_path)\n k = np.zeros((img.shape[0],img.shape[1]))\n gt = mat[\"image_info\"][0,0][0,0][0]\n for i in range(0,len(gt)):\n if int(gt[i][1])<img.shape[0] and int(gt[i][0])<img.shape[1]:\n k[int(gt[i][1]),int(gt[i][0])]=1\n k = gaussian_filter_density(k)\n with h5py.File(img_path.replace('.jpg','.h5').replace('images','ground-truth'), 'w') as hf:\n hf['density'] = k", "../ShanghaiTech/part_B/train_data\\images\\IMG_1.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_10.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_100.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_101.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_102.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_103.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_104.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_105.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_106.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_107.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_108.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_109.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_11.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_110.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_111.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_112.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_113.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_114.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_115.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_116.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_117.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_118.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_119.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_12.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_120.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_121.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_122.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_123.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_124.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_125.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_126.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_127.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_128.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_129.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_13.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_130.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_131.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_132.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_133.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_134.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_135.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_136.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_137.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_138.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_139.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_14.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_140.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_141.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_142.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_143.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_144.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_145.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_146.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_147.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_148.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_149.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_15.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_150.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_151.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_152.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_153.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_154.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_155.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_156.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_157.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_158.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_159.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_16.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_160.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_161.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_162.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_163.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_164.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_165.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_166.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_167.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_168.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_169.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_17.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_170.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_171.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_172.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_173.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_174.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_175.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_176.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_177.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_178.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_179.jpg\n(768, 1024)\ngenerate density...\ndone.\n../ShanghaiTech/part_B/train_data\\images\\IMG_18.jpg\n(768, 1024)\ngenerate density...\ndone.\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a41729c47dd6359c2946c01be0353cf8e20091c
44,093
ipynb
Jupyter Notebook
Prediccion Salarios2.ipynb
jaarroyl/Caso-estudio-2.1-Prediccion-Salarios
30af4374e0f102186628335fb1323eb33ef59472
[ "MIT" ]
null
null
null
Prediccion Salarios2.ipynb
jaarroyl/Caso-estudio-2.1-Prediccion-Salarios
30af4374e0f102186628335fb1323eb33ef59472
[ "MIT" ]
null
null
null
Prediccion Salarios2.ipynb
jaarroyl/Caso-estudio-2.1-Prediccion-Salarios
30af4374e0f102186628335fb1323eb33ef59472
[ "MIT" ]
null
null
null
26.658404
143
0.382646
[ [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ] ], [ [ "##### Cargar la data de salarios", "_____no_output_____" ] ], [ [ "data = pd.read_csv('../Datasets casos de estudio 2/Case study 1/cs2.1.csv')", "_____no_output_____" ] ], [ [ "##### Variables en dataset\n", "_____no_output_____" ] ], [ [ "data.head()", "_____no_output_____" ], [ "data.dtypes", "_____no_output_____" ] ], [ [ "##### Dimensiones del dataset", "_____no_output_____" ] ], [ [ "data.shape", "_____no_output_____" ] ], [ [ "##### Estadisticos principales", "_____no_output_____" ] ], [ [ "data.describe()", "_____no_output_____" ] ], [ [ "##### Linear Regression para el salario", "_____no_output_____" ] ], [ [ "data = data.rename(columns={'wage': 'salario'})", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn import metrics", "_____no_output_____" ], [ "lr = LinearRegression()", "_____no_output_____" ], [ "X = data.iloc[:,:11]\nY = data.iloc[:,11]", "_____no_output_____" ], [ "X.head()", "_____no_output_____" ], [ "Y.head()", "_____no_output_____" ] ], [ [ "##### Run Linear Specification and compute MSE and R^2", "_____no_output_____" ] ], [ [ "full_fit = lr.fit(X, Y)", "_____no_output_____" ], [ "y_predict = lr.predict(X)", "_____no_output_____" ], [ "p_fmla1 = X.shape\np_fmla1 = p_fmla1[1]\np_fmla1", "_____no_output_____" ], [ "#Crear formula para obtener R2\ndef adj_r2_score(model,y,yhat):\n adj = 1 - float(len(y)-1)/(len(y)-len(model.coef_)-1)*(1 - metrics.r2_score(y,yhat))\n return adj\n ", "_____no_output_____" ] ], [ [ "##### R2 y R2 ajustado", "_____no_output_____" ] ], [ [ "# R2 \nr2_fmla1 = lr.score(X,Y)\nr2_fmla1", "_____no_output_____" ], [ "# R2_adjusted\nr2_adj_fmla1 = adj_r2_score(lr,Y,y_predict)\nr2_adj_fmla1", "_____no_output_____" ] ], [ [ "##### MSE", "_____no_output_____" ] ], [ [ "# MSE\nmse_fmla1 = mean_squared_error(Y, y_predict)\nmse_fmla1", "_____no_output_____" ] ], [ [ "### Creacion de nuevas variables cuadraticas", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import PolynomialFeatures\npf = PolynomialFeatures(2, interaction_only=True)", "_____no_output_____" ], [ "X = X.drop(['mw','ne'], axis = 1)\nX.head()", "_____no_output_____" ], [ "interac_var = X.iloc[:,1:]\ninterac_var.head()", "_____no_output_____" ], [ "female = np.array(X.iloc[:,0])\nfemale", "_____no_output_____" ], [ "new_features = pf.fit_transform(interac_var)\nnew_features", "_____no_output_____" ], [ "print('female: {}'.format(female.shape))\nprint('new_features: {}'.format(new_features.shape))", "female: (3835,)\nnew_features: (3835, 37)\n" ], [ "X_poly = np.append(female.reshape(-1,1),new_features, axis = 1)\nX_poly.shape", "_____no_output_____" ], [ "interac_var2 = X.iloc[:,1:3]\ninterac_var2.head()", "_____no_output_____" ], [ "new_features2 = pf.fit_transform(interac_var2)\nnew_features2", "_____no_output_____" ], [ "X_poly_2var = np.append(female.reshape(-1,1),new_features2, axis = 1)\nX_poly_2var.shape", "_____no_output_____" ] ], [ [ "##### Run Quadratic specification and compute MSE an R^2", "_____no_output_____" ] ], [ [ "fmla2 = lr.fit(X_poly, Y)\np_fmla2 = X_poly.shape\np_fmla2 = p_fmla2[1]\ny_predict_fmla2 = lr.predict(X_poly)\n# R2 \nr2_fmla2 = lr.score(X_poly,Y)\n# R2_adjusted\nr2_adj_fmla2 = adj_r2_score(lr,Y,y_predict_fmla2)\n# MSE \nmse_fmla2 = mean_squared_error(Y,y_predict_fmla2)", "_____no_output_____" ], [ "p_fmla2", "_____no_output_____" ] ], [ [ "##### Resumen de los calculos", "_____no_output_____" ] ], [ [ "table = np.ndarray((2,4))\ntable[0,] = (p_fmla1, r2_fmla1, r2_adj_fmla1, mse_fmla1)\ntable[1,] = (p_fmla2, r2_fmla2, r2_adj_fmla2, mse_fmla2)\ntable = pd.DataFrame(table, columns=['p','R2','R2_adj','MSE'], index=['Basic reg','Flexible reg'])\ntable", "_____no_output_____" ], [ "##### Linear and Quadratic specifications with Sample Splitting", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ] ], [ [ "##### Basic reg & Flexbile split", "_____no_output_____" ] ], [ [ "x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.25, random_state = 42, shuffle = True)\nx_train_poly, x_test_poly, y_train_poly, y_test_poly = train_test_split(X_poly, Y, test_size = 0.25, random_state = 42, shuffle = True)\n", "_____no_output_____" ] ], [ [ "##### Basic", "_____no_output_____" ] ], [ [ "fmla1 = lr.fit(x_train, y_train)\np_fmla1 = x_train.shape\np_fmla1 = p_fmla1[1]\ny_pred_test = lr.predict(x_test)\ny_pred_train = lr.predict(x_train)\n\nprint('Calculo de scores para training')\n# R2 \nr2_fmla1_train = lr.score(x_train,y_train)\nprint('R^2:{}'.format(r2_fmla1_train))\n# R2_adjusted\nr2_adj_fmla1_train = adj_r2_score(lr, y_train, y_pred_train)\nprint('R^2_adj:{}'.format(r2_adj_fmla1_train))\n# MSE \nmse_fmla1_train = mean_squared_error(y_train, y_pred_train)\nprint('MSE:{}'.format(mse_fmla1_train))\n\nprint('Calculo de scores para test')\n# R2 \nr2_fmla1_test = lr.score(x_test,y_test)\nprint('R^2:{}'.format(r2_fmla1_test))\n# R2_adjusted\nr2_adj_fmla1_test = adj_r2_score(lr, y_test, y_pred_test)\nprint('R^2_adj:{}'.format(r2_adj_fmla1_test))\n# MSE \nmse_fmla1_test = mean_squared_error(y_test, y_pred_test)\nprint('MSE:{}'.format(mse_fmla1_test))", "Calculo de scores para training\nR^2:0.0957172775763746\nR^2_adj:0.09287759003212726\nMSE:159.23581245038653\nCalculo de scores para test\nR^2:0.08896184704009168\nR^2_adj:0.08032186455680479\nMSE:184.47564883513414\n" ] ], [ [ "##### Flexible", "_____no_output_____" ] ], [ [ "fmla1 = lr.fit(x_train_poly, y_train_poly)\np_fmla1_poly = x_train_poly.shape\np_fmla1_poly = p_fmla1_poly[1]\ny_pred_test_poly = lr.predict(x_test_poly)\ny_pred_train_poly = lr.predict(x_train_poly)\n\nprint('Calculo de scores para training')\n# R2 \nr2_fmla1_train_poly = lr.score(x_train_poly,y_train_poly)\nprint('R^2:{}'.format(r2_fmla1_train_poly))\n# R2_adjusted\nr2_adj_fmla1_train_poly = adj_r2_score(lr, y_train_poly, y_pred_train)\nprint('R^2_adj:{}'.format(r2_adj_fmla1_train_poly))\n# MSE \nmse_fmla1_train_poly = mean_squared_error(y_train_poly, y_pred_train_poly)\nprint('MSE:{}'.format(mse_fmla1_train_poly))\n\nprint('Calculo de scores para test')\n# R2 \nr2_fmla1_test_poly = lr.score(x_test_poly,y_test_poly)\nprint('R^2:{}'.format(r2_fmla1_test_poly))\n# R2_adjusted\nr2_adj_fmla1_test_poly = adj_r2_score(lr, y_test_poly, y_pred_test_poly)\nprint('R^2_adj:{}'.format(r2_adj_fmla1_test_poly))\n# MSE \nmse_fmla1_test_poly = mean_squared_error(y_test_poly, y_pred_test_poly)\nprint('MSE:{}'.format(mse_fmla1_test_poly))", "Calculo de scores para training\nR^2:0.10079725810843665\nR^2_adj:0.08360492528448249\nMSE:158.3412749266716\nCalculo de scores para test\nR^2:0.09876123070193364\nR^2_adj:0.06153615110049171\nMSE:182.49137665802562\n" ], [ "table = np.ndarray((4,4))\ntable[0,] = (p_fmla1, r2_fmla1_train, r2_adj_fmla1_train, mse_fmla1_train)\ntable[1,] = (p_fmla1, r2_fmla1_test, r2_adj_fmla1_test, mse_fmla1_test)\ntable[2,] = (p_fmla1_poly, r2_fmla1_train_poly, r2_adj_fmla1_train_poly, mse_fmla1_train_poly)\ntable[3,] = (p_fmla1_poly, r2_fmla1_test_poly, r2_adj_fmla1_test_poly, mse_fmla1_test_poly)\n\ntable = pd.DataFrame(table, columns=['p','R2','R2_adj','MSE'], index=['Basic reg train','Basic reg test',\n 'Flexible reg train','Flexible reg test'])\ntable", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
4a4172ab4adda6151f4882babaf50d74dc84d0fd
106,805
ipynb
Jupyter Notebook
research_development/Harshit/SwapNet_Experimentation.ipynb
Pherokung/VIRTUON
987cf4e37a72b214f02f0f7fbda68c0cc74e6de4
[ "MIT" ]
8
2020-11-20T17:51:59.000Z
2020-11-26T05:14:06.000Z
research_development/Harshit/SwapNet_Experimentation.ipynb
Pherokung/VIRTUON
987cf4e37a72b214f02f0f7fbda68c0cc74e6de4
[ "MIT" ]
3
2021-09-22T18:45:51.000Z
2022-02-10T09:09:23.000Z
research_development/Harshit/SwapNet_Experimentation.ipynb
Pherokung/VIRTUON
987cf4e37a72b214f02f0f7fbda68c0cc74e6de4
[ "MIT" ]
11
2020-11-28T04:09:29.000Z
2022-03-21T09:00:55.000Z
141.276455
30,118
0.775001
[ [ [ "<a href=\"https://colab.research.google.com/github/arjunparmar/VIRTUON/blob/main/Harshit/SwapNet_Experimentation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "Mounted at /content/drive\n" ], [ "## Imports\nimport os\nimport sys\nimport random\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom glob import glob\nimport tensorflow\nfrom tensorflow import keras\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.preprocessing import image\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.python.keras.layers import concatenate, Concatenate\n\n## Seeding \nseed = 2019\nrandom.seed = seed\nnp.random.seed = seed\ntensorflow.seed = seed", "_____no_output_____" ], [ "def load_image(img_path, show=False):\n img = cv2.imread(img_path)\n img = cv2.resize(img, (128,128))\n img_tensor = image.img_to_array(img) # (height, width, channels)\n #|img_tensor = np.expand_dims(img_tensor, axis=0) # (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels) # imshow expects values in the range [0, 1]\n\n return img_tensor", "_____no_output_____" ], [ "!mkdir seg_train\n!cp -r /content/drive/Shareddrives/Virtuon/Clothing\\ Coparsing/dataset/seg_train/* /content/seg_train/\n!mkdir seg_test\n!cp -r /content/drive/Shareddrives/Virtuon/Clothing\\ Coparsing/dataset/seg_test/* /content/seg_test/\n!mkdir pos_train\n!cp -r /content/drive/Shareddrives/Virtuon/Clothing\\ Coparsing/dataset/pose_train/* /content/pos_train/\n!mkdir pos_test\n!cp -r /content/drive/Shareddrives/Virtuon/Clothing\\ Coparsing/dataset/pose_test/* /content/pos_test/\n", "mkdir: cannot create directory ‘pos_test’: File exists\n" ], [ "x = []\ny = []\n\ndef get_image(path):\n data =[]\n for subdir, dirs, files in os.walk(path):\n for f in files:\n path = os.path.join(subdir, f)\n img = load_image(path) \n # print(img.shape)\n data.append(img)\n return data\n\n\nx_1 = get_image(r'/content/pos_train') #BS\nx_2 = get_image(r'/content/seg_train') #CS \n\ny = get_image(r'/content/seg_train')\n\nx_1 = np.asarray(x_1)\nx_2 = np.asarray(x_2)\ny = np.asarray(y)\n", "_____no_output_____" ], [ "print(x_1.shape)\nprint(x_2.shape)\nprint(y.shape)", "(900, 128, 128, 3)\n(900, 128, 128, 3)\n(900, 128, 128, 3)\n" ], [ "def down_block(x, filters, kernel_size=(3, 3), padding=\"same\", strides=1):\n c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation=\"relu\")(x)\n c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation=\"relu\")(c)\n c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation=\"relu\")(c)\n c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation=\"relu\")(c)\n p = keras.layers.MaxPool2D((2, 2), (2, 2))(c)\n return c, p\n\ndef up_block(x, skip, filters, kernel_size=(3, 3), padding=\"same\", strides=1):\n us = keras.layers.UpSampling2D((2, 2))(x)\n concat = keras.layers.Concatenate()([us, skip])\n c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation=\"relu\")(concat)\n c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation=\"relu\")(c)\n c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation=\"relu\")(c)\n return c\n\ndef bottleneck(x, filters, kernel_size=(3, 3), padding=\"same\", strides=1):\n c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation=\"relu\")(x)\n c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation=\"relu\")(c)\n c = keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, activation=\"relu\")(c)\n return c", "_____no_output_____" ], [ "def res_block(u3):\n c1 = keras.layers.Conv2D(64, kernel_size= (3,3), padding=\"same\", strides=1, activation=\"relu\")(u3)\n c2 = keras.layers.Conv2D(32, kernel_size= (3,3), padding=\"same\", strides=1, activation=\"relu\")(c1)\n c3 = keras.layers.Conv2D(32, kernel_size= (3,3), padding=\"same\", strides=1, activation=\"relu\")(c2)\n \n c3 = keras.layers.Concatenate()([u3, c3])\n \n c4 = keras.layers.Conv2D(64, kernel_size= (3,3), padding=\"same\", strides=1, activation=\"relu\")(c3)\n c5 = keras.layers.Conv2D(32, kernel_size= (3,3), padding=\"same\", strides=1, activation=\"relu\")(c4)\n c6 = keras.layers.Conv2D(32, kernel_size= (3,3), padding=\"same\", strides=1, activation=\"relu\")(c5)\n \n c6 = keras.layers.Concatenate()([u3, c3, c6])\n \n c7 = keras.layers.Conv2D(64, kernel_size= (3,3), padding=\"same\", strides=1, activation=\"relu\")(c6)\n c8 = keras.layers.Conv2D(32, kernel_size= (3,3), padding=\"same\", strides=1, activation=\"relu\")(c7)\n c9 = keras.layers.Conv2D(16, kernel_size= (3,3), padding=\"same\", strides=1, activation=\"relu\")(c8)\n return c9", "_____no_output_____" ], [ "K.clear_session()\ndef UNet():\n f = [16, 32, 64, 128, 256]\n inputs1 = keras.layers.Input((128,128, 3))\n inputs2 = keras.layers.Input((128,128, 3))\n \n p0 = inputs1\n c1, p1 = down_block(p0, f[0]) #128 -> 64\n c2, p2 = down_block(p1, f[1]) #64 -> 32\n c3, p3 = down_block(p2, f[2]) #32 -> 16\n bn1 = bottleneck(p3, f[3])\n print(bn1.shape)\n \n inputs2 = keras.layers.Input((128,128, 3))\n np0 = inputs2\n nc1, np1 = down_block(np0, f[0]) #128 -> 64\n nc2, np2 = down_block(np1, f[1]) #64 -> 32\n nc3, np3 = down_block(np2, f[2]) #32 -> 16\n bn2 = bottleneck(np3, f[3])\n print(bn2.shape)\n\n bn = keras.layers.Concatenate()([bn1, bn2])\n print(bn.shape)\n u1 = up_block(bn, nc3, f[2]) #16 -> 32\n u2 = up_block(u1, nc2, f[1]) #32 -> 64\n u3 = up_block(u2, nc1, f[0]) #64 -> 128\n print(u3.shape)\n \n #apply resblocks\n res = res_block(u3)\n \n \n outputs = keras.layers.Conv2D(3, (1, 1), padding=\"same\", activation=\"sigmoid\")(res)\n model = keras.models.Model([inputs1, inputs2], outputs)\n \n return model\n", "_____no_output_____" ], [ "model = UNet()", "(None, 16, 16, 128)\n(None, 16, 16, 128)\n(None, 16, 16, 256)\n(None, 128, 128, 16)\n" ], [ "\nmodel.compile(optimizer=\"adam\", loss=\"categorical_crossentropy\", metrics=[\"acc\"])\nmodel.summary()", "Model: \"model\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_1 (InputLayer) [(None, 128, 128, 3) 0 \n__________________________________________________________________________________________________\ninput_3 (InputLayer) [(None, 128, 128, 3) 0 \n__________________________________________________________________________________________________\nconv2d (Conv2D) (None, 128, 128, 16) 448 input_1[0][0] \n__________________________________________________________________________________________________\nconv2d_15 (Conv2D) (None, 128, 128, 16) 448 input_3[0][0] \n__________________________________________________________________________________________________\nconv2d_1 (Conv2D) (None, 128, 128, 16) 2320 conv2d[0][0] \n__________________________________________________________________________________________________\nconv2d_16 (Conv2D) (None, 128, 128, 16) 2320 conv2d_15[0][0] \n__________________________________________________________________________________________________\nconv2d_2 (Conv2D) (None, 128, 128, 16) 2320 conv2d_1[0][0] \n__________________________________________________________________________________________________\nconv2d_17 (Conv2D) (None, 128, 128, 16) 2320 conv2d_16[0][0] \n__________________________________________________________________________________________________\nconv2d_3 (Conv2D) (None, 128, 128, 16) 2320 conv2d_2[0][0] \n__________________________________________________________________________________________________\nconv2d_18 (Conv2D) (None, 128, 128, 16) 2320 conv2d_17[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 64, 64, 16) 0 conv2d_3[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_3 (MaxPooling2D) (None, 64, 64, 16) 0 conv2d_18[0][0] \n__________________________________________________________________________________________________\nconv2d_4 (Conv2D) (None, 64, 64, 32) 4640 max_pooling2d[0][0] \n__________________________________________________________________________________________________\nconv2d_19 (Conv2D) (None, 64, 64, 32) 4640 max_pooling2d_3[0][0] \n__________________________________________________________________________________________________\nconv2d_5 (Conv2D) (None, 64, 64, 32) 9248 conv2d_4[0][0] \n__________________________________________________________________________________________________\nconv2d_20 (Conv2D) (None, 64, 64, 32) 9248 conv2d_19[0][0] \n__________________________________________________________________________________________________\nconv2d_6 (Conv2D) (None, 64, 64, 32) 9248 conv2d_5[0][0] \n__________________________________________________________________________________________________\nconv2d_21 (Conv2D) (None, 64, 64, 32) 9248 conv2d_20[0][0] \n__________________________________________________________________________________________________\nconv2d_7 (Conv2D) (None, 64, 64, 32) 9248 conv2d_6[0][0] \n__________________________________________________________________________________________________\nconv2d_22 (Conv2D) (None, 64, 64, 32) 9248 conv2d_21[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_1 (MaxPooling2D) (None, 32, 32, 32) 0 conv2d_7[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_4 (MaxPooling2D) (None, 32, 32, 32) 0 conv2d_22[0][0] \n__________________________________________________________________________________________________\nconv2d_8 (Conv2D) (None, 32, 32, 64) 18496 max_pooling2d_1[0][0] \n__________________________________________________________________________________________________\nconv2d_23 (Conv2D) (None, 32, 32, 64) 18496 max_pooling2d_4[0][0] \n__________________________________________________________________________________________________\nconv2d_9 (Conv2D) (None, 32, 32, 64) 36928 conv2d_8[0][0] \n__________________________________________________________________________________________________\nconv2d_24 (Conv2D) (None, 32, 32, 64) 36928 conv2d_23[0][0] \n__________________________________________________________________________________________________\nconv2d_10 (Conv2D) (None, 32, 32, 64) 36928 conv2d_9[0][0] \n__________________________________________________________________________________________________\nconv2d_25 (Conv2D) (None, 32, 32, 64) 36928 conv2d_24[0][0] \n__________________________________________________________________________________________________\nconv2d_11 (Conv2D) (None, 32, 32, 64) 36928 conv2d_10[0][0] \n__________________________________________________________________________________________________\nconv2d_26 (Conv2D) (None, 32, 32, 64) 36928 conv2d_25[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_2 (MaxPooling2D) (None, 16, 16, 64) 0 conv2d_11[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_5 (MaxPooling2D) (None, 16, 16, 64) 0 conv2d_26[0][0] \n__________________________________________________________________________________________________\nconv2d_12 (Conv2D) (None, 16, 16, 128) 73856 max_pooling2d_2[0][0] \n__________________________________________________________________________________________________\nconv2d_27 (Conv2D) (None, 16, 16, 128) 73856 max_pooling2d_5[0][0] \n__________________________________________________________________________________________________\nconv2d_13 (Conv2D) (None, 16, 16, 128) 147584 conv2d_12[0][0] \n__________________________________________________________________________________________________\nconv2d_28 (Conv2D) (None, 16, 16, 128) 147584 conv2d_27[0][0] \n__________________________________________________________________________________________________\nconv2d_14 (Conv2D) (None, 16, 16, 128) 147584 conv2d_13[0][0] \n__________________________________________________________________________________________________\nconv2d_29 (Conv2D) (None, 16, 16, 128) 147584 conv2d_28[0][0] \n__________________________________________________________________________________________________\nconcatenate (Concatenate) (None, 16, 16, 256) 0 conv2d_14[0][0] \n conv2d_29[0][0] \n__________________________________________________________________________________________________\nup_sampling2d (UpSampling2D) (None, 32, 32, 256) 0 concatenate[0][0] \n__________________________________________________________________________________________________\nconcatenate_1 (Concatenate) (None, 32, 32, 320) 0 up_sampling2d[0][0] \n conv2d_26[0][0] \n__________________________________________________________________________________________________\nconv2d_30 (Conv2D) (None, 32, 32, 64) 184384 concatenate_1[0][0] \n__________________________________________________________________________________________________\nconv2d_31 (Conv2D) (None, 32, 32, 64) 36928 conv2d_30[0][0] \n__________________________________________________________________________________________________\nconv2d_32 (Conv2D) (None, 32, 32, 64) 36928 conv2d_31[0][0] \n__________________________________________________________________________________________________\nup_sampling2d_1 (UpSampling2D) (None, 64, 64, 64) 0 conv2d_32[0][0] \n__________________________________________________________________________________________________\nconcatenate_2 (Concatenate) (None, 64, 64, 96) 0 up_sampling2d_1[0][0] \n conv2d_22[0][0] \n__________________________________________________________________________________________________\nconv2d_33 (Conv2D) (None, 64, 64, 32) 27680 concatenate_2[0][0] \n__________________________________________________________________________________________________\nconv2d_34 (Conv2D) (None, 64, 64, 32) 9248 conv2d_33[0][0] \n__________________________________________________________________________________________________\nconv2d_35 (Conv2D) (None, 64, 64, 32) 9248 conv2d_34[0][0] \n__________________________________________________________________________________________________\nup_sampling2d_2 (UpSampling2D) (None, 128, 128, 32) 0 conv2d_35[0][0] \n__________________________________________________________________________________________________\nconcatenate_3 (Concatenate) (None, 128, 128, 48) 0 up_sampling2d_2[0][0] \n conv2d_18[0][0] \n__________________________________________________________________________________________________\nconv2d_36 (Conv2D) (None, 128, 128, 16) 6928 concatenate_3[0][0] \n__________________________________________________________________________________________________\nconv2d_37 (Conv2D) (None, 128, 128, 16) 2320 conv2d_36[0][0] \n__________________________________________________________________________________________________\nconv2d_38 (Conv2D) (None, 128, 128, 16) 2320 conv2d_37[0][0] \n__________________________________________________________________________________________________\nconv2d_39 (Conv2D) (None, 128, 128, 64) 9280 conv2d_38[0][0] \n__________________________________________________________________________________________________\nconv2d_40 (Conv2D) (None, 128, 128, 32) 18464 conv2d_39[0][0] \n__________________________________________________________________________________________________\nconv2d_41 (Conv2D) (None, 128, 128, 32) 9248 conv2d_40[0][0] \n__________________________________________________________________________________________________\nconcatenate_4 (Concatenate) (None, 128, 128, 48) 0 conv2d_38[0][0] \n conv2d_41[0][0] \n__________________________________________________________________________________________________\nconv2d_42 (Conv2D) (None, 128, 128, 64) 27712 concatenate_4[0][0] \n__________________________________________________________________________________________________\nconv2d_43 (Conv2D) (None, 128, 128, 32) 18464 conv2d_42[0][0] \n__________________________________________________________________________________________________\nconv2d_44 (Conv2D) (None, 128, 128, 32) 9248 conv2d_43[0][0] \n__________________________________________________________________________________________________\nconcatenate_5 (Concatenate) (None, 128, 128, 96) 0 conv2d_38[0][0] \n concatenate_4[0][0] \n conv2d_44[0][0] \n__________________________________________________________________________________________________\nconv2d_45 (Conv2D) (None, 128, 128, 64) 55360 concatenate_5[0][0] \n__________________________________________________________________________________________________\nconv2d_46 (Conv2D) (None, 128, 128, 32) 18464 conv2d_45[0][0] \n__________________________________________________________________________________________________\nconv2d_47 (Conv2D) (None, 128, 128, 16) 4624 conv2d_46[0][0] \n__________________________________________________________________________________________________\nconv2d_48 (Conv2D) (None, 128, 128, 3) 51 conv2d_47[0][0] \n==================================================================================================\nTotal params: 1,563,091\nTrainable params: 1,563,091\nNon-trainable params: 0\n__________________________________________________________________________________________________\n" ], [ "#Data augmentation to generate new data from the given data at the time of each batch\n# construct the training image generator for data augmentation\nbatch_size = 32\naug = ImageDataGenerator(rotation_range=20)\n# train the network\nmodel.fit_generator(aug.flow([x_1, x_2], y, batch_size=batch_size), steps_per_epoch=len(x_1) // batch_size, epochs=100)", "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:1844: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n warnings.warn('`Model.fit_generator` is deprecated and '\n" ], [ "def plot(img): \n plt.imshow(img) \n plt.axis('off')\n plt.show()", "_____no_output_____" ], [ "p1 = r'/content/pos_test/0.jpg'\nimg1= cv2.imread(p1)\nplot(img1)\np2 = r'/content/seg_test/0.jpg'\nimg2= cv2.imread(p2)\nplot(img2)", "_____no_output_____" ], [ "img1 = load_image(p1)\nimg2 = load_image(p2)\nprint(img1.shape)\nprint(img2.shape)\nimg1 = np.expand_dims(img1, axis = 0)\nimg2 = np.expand_dims(img2, axis = 0)\nresult = model.predict([img1, img2])\n# result = np.resize(result, (128,128,3))", "(128, 128, 3)\n(128, 128, 3)\n" ], [ "result.shape", "_____no_output_____" ], [ "result = np.squeeze(result)", "_____no_output_____" ], [ "plt.imshow(result)", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a41899f8d9686db37608d66feea7190b473cfc7
20,088
ipynb
Jupyter Notebook
LS_DS_223_assignment.ipynb
dataabyss/DS-Unit-2-Kaggle-Challenge
99f68210cc9788562e6dd9fa3903dea6d746a13d
[ "MIT" ]
null
null
null
LS_DS_223_assignment.ipynb
dataabyss/DS-Unit-2-Kaggle-Challenge
99f68210cc9788562e6dd9fa3903dea6d746a13d
[ "MIT" ]
null
null
null
LS_DS_223_assignment.ipynb
dataabyss/DS-Unit-2-Kaggle-Challenge
99f68210cc9788562e6dd9fa3903dea6d746a13d
[ "MIT" ]
null
null
null
42.025105
1,567
0.623158
[ [ [ "Lambda School Data Science\n\n*Unit 2, Sprint 2, Module 3*\n\n---", "_____no_output_____" ], [ "# Cross-Validation\n\n\n## Assignment\n- [x] [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.\n- [x] Continue to participate in our Kaggle challenge. \n- [x] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.\n- [x] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)\n- [x] Commit your notebook to your fork of the GitHub repo.\n\n\nYou won't be able to just copy from the lesson notebook to this assignment.\n\n- Because the lesson was ***regression***, but the assignment is ***classification.***\n- Because the lesson used [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html), which doesn't work as-is for _multi-class_ classification.\n\nSo you will have to adapt the example, which is good real-world practice.\n\n1. Use a model for classification, such as [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)\n2. Use hyperparameters that match the classifier, such as `randomforestclassifier__ ...`\n3. Use a metric for classification, such as [`scoring='accuracy'`](https://scikit-learn.org/stable/modules/model_evaluation.html#common-cases-predefined-values)\n4. If you’re doing a multi-class classification problem — such as whether a waterpump is functional, functional needs repair, or nonfunctional — then use a categorical encoding that works for multi-class classification, such as [OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html) (not [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html))\n\n\n\n## Stretch Goals\n\n### Reading\n- Jake VanderPlas, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation\n- Jake VanderPlas, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107)\n- Ron Zacharski, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation\n- Sebastian Raschka, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb)\n- Peter Worcester, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85)\n\n### Doing\n- Add your own stretch goals!\n- Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/). See the previous assignment notebook for details.\n- In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives.\n- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for \"Grid-Searching Which Model To Use\" in Chapter 6:\n\n> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...\n\nThe example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines?\n", "_____no_output_____" ], [ "### BONUS: Stacking!\n\nHere's some code you can use to \"stack\" multiple submissions, which is another form of ensembling:\n\n```python\nimport pandas as pd\n\n# Filenames of your submissions you want to ensemble\nfiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']\n\ntarget = 'status_group'\nsubmissions = (pd.read_csv(file)[[target]] for file in files)\nensemble = pd.concat(submissions, axis='columns')\nmajority_vote = ensemble.mode(axis='columns')[0]\n\nsample_submission = pd.read_csv('sample_submission.csv')\nsubmission = sample_submission.copy()\nsubmission[target] = majority_vote\nsubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)\n```", "_____no_output_____" ] ], [ [ "%%capture\nimport sys\n\n# If you're on Colab:\nif 'google.colab' in sys.modules:\n DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'\n !pip install category_encoders==2.*\n\n# If you're working locally:\nelse:\n DATA_PATH = '../data/'", "_____no_output_____" ], [ "import pandas as pd\n\n# Merge train_features.csv & train_labels.csv\ntrain = pd.merge(pd.read_csv(DATA_PATH+'tanzania/train_features.csv'), \n pd.read_csv(DATA_PATH+'tanzania/train_labels.csv'))\n\n# Read test_features.csv & sample_submission.csv\ntest = pd.read_csv(DATA_PATH+'tanzania/test_features.csv')\nsample_submission = pd.read_csv(DATA_PATH+'tanzania/sample_submission.csv')", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n# Merge train_features.csv & train_labels.csv\ntrain = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'), \n pd.read_csv('../data/tanzania/train_labels.csv'))\n\n# Read test_features.csv & sample_submission.csv\ntest = pd.read_csv('../data/tanzania/test_features.csv')\nsample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')\n\n# Split train into train & val\ntrain, val = train_test_split(train, train_size=0.80, test_size=0.20, \n stratify=train['status_group'], \n random_state=42)\n\n\ndef wrangle(X):\n \"\"\"Wrangle train, validate, and test sets in the same way\"\"\"\n \n # Prevent SettingWithCopyWarning\n X = X.copy()\n \n # About 3% of the time, latitude has small values near zero,\n # outside Tanzania, so we'll treat these values like zero.\n X['latitude'] = X['latitude'].replace(-2e-08, 0)\n \n # When columns have zeros and shouldn't, they are like null values.\n # So we will replace the zeros with nulls, and impute missing values later.\n # Also create a \"missing indicator\" column, because the fact that\n # values are missing may be a predictive signal.\n cols_with_zeros = ['longitude', 'latitude', 'construction_year', \n 'gps_height', 'population']\n for col in cols_with_zeros:\n X[col] = X[col].replace(0, np.nan)\n X[col+'_MISSING'] = X[col].isnull()\n \n # Drop duplicate columns\n duplicates = ['quantity_group', 'payment_type']\n X = X.drop(columns=duplicates)\n \n # Drop recorded_by (never varies) and id (always varies, random)\n unusable_variance = ['recorded_by', 'id']\n X = X.drop(columns=unusable_variance)\n \n # Convert date_recorded to datetime\n X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)\n \n # Extract components from date_recorded, then drop the original column\n X['year_recorded'] = X['date_recorded'].dt.year\n X['month_recorded'] = X['date_recorded'].dt.month\n X['day_recorded'] = X['date_recorded'].dt.day\n X = X.drop(columns='date_recorded')\n \n # Engineer feature: how many years from construction_year to date_recorded\n X['years'] = X['year_recorded'] - X['construction_year']\n X['years_MISSING'] = X['years'].isnull()\n \n # return the wrangled dataframe\n return X\n\ntrain = wrangle(train)\nval = wrangle(val)\ntest = wrangle(test)", "_____no_output_____" ], [ "# The status_group column is the target\ntarget = 'status_group'\n\n# Get a dataframe with all train columns except the target\ntrain_features = train.drop(columns=[target])\n\n# Get a list of the numeric features\nnumeric_features = train_features.select_dtypes(include='number').columns.tolist()\n\n# Get a series with the cardinality of the nonnumeric features\ncardinality = train_features.select_dtypes(exclude='number').nunique()\n\n# Get a list of all categorical features with cardinality <= 50\ncategorical_features = cardinality[cardinality <= 50].index.tolist()\n\n# Combine the lists \nfeatures = numeric_features + categorical_features\n\nX_train = train[features]\ny_train = train[target]\nX_val = val[features]\ny_val = val[target]\nX_test = test[features]", "_____no_output_____" ], [ "import numpy as np\nfrom sklearn.pipeline import make_pipeline\nimport category_encoders as ce\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.experimental import enable_iterative_imputer\nfrom sklearn.impute import IterativeImputer\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.feature_selection import f_classif, SelectKBest\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.impute import SimpleImputer\nfrom scipy.stats import randint", "_____no_output_____" ], [ "pipeline = make_pipeline(\n ce.OrdinalEncoder(cols=['basin',\n 'region',\n 'public_meeting',\n 'scheme_management',\n 'permit',\n 'extraction_type',\n 'extraction_type_group',\n 'extraction_type_class',\n 'management',\n 'management_group',\n 'payment',\n 'water_quality',\n 'quality_group',\n 'quantity',\n 'source',\n 'source_type',\n 'source_class',\n 'waterpoint_type']),\n ce.OneHotEncoder(use_cat_names=True), \n SimpleImputer(), \n StandardScaler(),\n SelectKBest(f_classif, k=20),\n RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)\n)\n\n# Fit on train, score on val\npipeline.fit(X_train, y_train)\nprint('Train Accuracy', pipeline.score(X_train, y_train))\nprint('Validation Accuracy', pipeline.score(X_val, y_val))", "Train Accuracy 0.9940867003367003\nValidation Accuracy 0.7787878787878788\n" ], [ "k = 3\nscores = cross_val_score(pipeline, X_train, y_train, cv=k, \n scoring='accuracy')\nprint(f'MAE for {k} folds:', scores)", "MAE for 3 folds: [0.76051004 0.77643791 0.76829345]\n" ], [ "pipeline.get_params().keys()", "_____no_output_____" ], [ "param_distributions = {\n 'simpleimputer__strategy': ['mean', 'median'], \n 'selectkbest__k': range(1, len(X_train.columns)+1), \n 'randomforestclassifier__min_samples_leaf': [5, 20, 5],\n}\n\nsearch = RandomizedSearchCV(\n pipeline, \n param_distributions=param_distributions, \n n_iter=100, \n cv=5, \n scoring='accuracy', \n verbose=10, \n return_train_score=True, \n n_jobs=-1\n)\n\nsearch.fit(X_train, y_train);", "Fitting 5 folds for each of 100 candidates, totalling 500 fits\n" ], [ "print('Best hyperparameters', search.best_params_)\nprint()\nprint('Cross-validation MAE', search.best_score_)", "Best hyperparameters {'selectkbest__k': 38}\n\nCross-validation MAE 0.7921927609427609\n" ], [ "print('Best hyperparameters', search.best_params_)\nprint()\nprint('Cross-validation MAE', search.best_score_)", "Best hyperparameters {'simpleimputer__strategy': 'mean', 'selectkbest__k': 38, 'randomforestclassifier__min_samples_leaf': 5}\n\nCross-validation MAE 0.7974957912457913\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a4190dd8c35484f19a3c450746f5f5f0a591ed9
3,457
ipynb
Jupyter Notebook
Software Engineering Part 1/optimizing_code_holiday_gifts.ipynb
sauravraghuvanshi/AWS-Machine-Learning-Foundation-by-Udacity
00e2d4137cf435f4799fc41238178eeb791fb041
[ "MIT" ]
2
2020-06-01T21:19:34.000Z
2021-01-31T18:24:16.000Z
Software Engineering Part 1/optimizing_code_holiday_gifts.ipynb
sauravraghuvanshi/AWS-Machine-Learning-Foundation-by-Udacity
00e2d4137cf435f4799fc41238178eeb791fb041
[ "MIT" ]
null
null
null
Software Engineering Part 1/optimizing_code_holiday_gifts.ipynb
sauravraghuvanshi/AWS-Machine-Learning-Foundation-by-Udacity
00e2d4137cf435f4799fc41238178eeb791fb041
[ "MIT" ]
1
2021-07-17T22:55:45.000Z
2021-07-17T22:55:45.000Z
26.79845
428
0.575644
[ [ [ "# Optimizing Code: Holiday Gifts\nIn the last example, you learned that using vectorized operations and more efficient data structures can optimize your code. Let's use these tips for one more example.\n\nSay your online gift store has one million users that each listed a gift on a wish list. You have the prices for each of these gifts stored in `gift_costs.txt`. For the holidays, you're going to give each customer their wish list gift for free if it is under 25 dollars. Now, you want to calculate the total cost of all gifts under 25 dollars to see how much you'd spend on free gifts. Here's one way you could've done it.", "_____no_output_____" ] ], [ [ "import time\nimport numpy as np", "_____no_output_____" ], [ "with open('gift_costs.txt') as f:\n gift_costs = f.read().split('\\n')\n \ngift_costs = np.array(gift_costs).astype(int) # convert string to int", "_____no_output_____" ], [ "start = time.time()\n\ntotal_price = 0\nfor cost in gift_costs:\n if cost < 25:\n total_price += cost * 1.08 # add cost after tax\n\nprint(total_price)\nprint('Duration: {} seconds'.format(time.time() - start))", "32765421.24\nDuration: 5.287686109542847 seconds\n" ] ], [ [ "Here you iterate through each cost in the list, and check if it's less than 25. If so, you add the cost to the total price after tax. This works, but there is a much faster way to do this. Can you refactor this to run under half a second?", "_____no_output_____" ], [ "## Refactor Code\n**Hint:** Using numpy makes it very easy to select all the elements in an array that meet a certain condition, and then perform operations on them together all at once. You can them find the sum of what those values end up being.", "_____no_output_____" ] ], [ [ "start = time.time()\n\ntotal_price = (gift_costs[gift_costs < 25]).sum() * 1.08\nprint(total_price)\n\nprint('Duration: {} seconds'.format(time.time() - start))", "32765421.24\nDuration: 0.06973600387573242 seconds\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
4a41b26aef179d6c64f163f33d8d402b5bcc60d7
5,222
ipynb
Jupyter Notebook
Initial simulation with natural values.ipynb
xIa066/StochasticGradientDescentAlgorithm-MarkovChianCovidModelling
8c8b35c2099d7f639f04ce02c17a0fc0b80e5110
[ "MIT" ]
null
null
null
Initial simulation with natural values.ipynb
xIa066/StochasticGradientDescentAlgorithm-MarkovChianCovidModelling
8c8b35c2099d7f639f04ce02c17a0fc0b80e5110
[ "MIT" ]
null
null
null
Initial simulation with natural values.ipynb
xIa066/StochasticGradientDescentAlgorithm-MarkovChianCovidModelling
8c8b35c2099d7f639f04ce02c17a0fc0b80e5110
[ "MIT" ]
2
2020-06-02T02:12:43.000Z
2020-06-10T02:56:54.000Z
30.899408
105
0.451168
[ [ [ "import numpy as np", "_____no_output_____" ], [ "# Define Cost function, lambda function, p function, alpha function\ndef cost(theta:float) -> float:\n return theta\ndef la(theta:float) -> float:\n return 1/theta\ndef p(theta:float) -> float:\n return 1/theta\ndef al(theta:float)-> float:\n return theta\n# def L_al_la(la,al,x_n):\n# l=2*x_n *(la-al)\n# return l\n# def L_al_la_d_al(x_n):\n# \"\"\"\n# differential of L_al_la in terms of alpha\n# :param x_n:\n# :return:\n# \"\"\"\n# return 2 * x_n * -1\n# def L_al_la_d_la(x_n):\n# \"\"\"\n# differential of L_al_la in terms of lambda\n# :param x_n:\n# :return:\n# \"\"\"\n# return 2 * x_n\n# def update_lambda(la,al,e, x_n):\n# beta_1 = 0.023\n# la-= e*L_al_la(la,al,x_n)-beta_1*L_al_la_d_la(x_n)\n# if la < 0:\n# la = 0\n# return la\n# def update_alpha(la,al,e, x_n):\n# beta_2 = 10**8\n# al-= e*L_al_la(la,al,x_n)-beta_2*L_al_la_d_al(x_n)\n# # alpha cannot be negative\n# if al < 0:\n# al = 0\n# return al\n# @ param N: is the number of total population\n# @ param total_time: the time that we want to simulate the process (6 month)\n# @ param x_0: initial infected patients (greater than 0)\ndef simulation_infected(la, p, al, N, total_time, x_0):\n # initialize time and infected patient\n t_0 = 0\n # bind variables to initial conditions\n x_n = x_0\n assert(x_n!=0)\n t_n = t_0\n # initiate X_n process and corresponding T_n process\n X_n = [x_0]\n T_n = [t_0]\n # converge\n conv =0\n # loop\n # break condition\n # @ condition1: when the time exceeds the total time\n # @ condition2: when the infected patients go to zero\n # @ condition3: when the whole populations are infected! :(\n while (t_n<total_time and\n x_n != 0 and\n x_n < N\n ):\n i = x_n\n # q i _ i+1\n q_forward_i = la*p*2*x_n*(N-i)/(N*(N-1))\n # q i _ i-1\n q_backward_i = al*i\n # waiting time rate v_i = (q i _ i+1) + (q i _ i-1)\n v_i = q_forward_i + q_backward_i\n t_i = np.random.exponential(v_i)\n # jumping probability to STATE i+1 is (q i _ i+1)/v_i\n jump = np.random.binomial(n=1,p=(q_forward_i/v_i))\n if (jump ==1):\n x_n += 1\n else:\n x_n -= 1\n # increase time \n t_n = t_n+t_i\n # append Process\n X_n.append(x_n)\n T_n.append(t_n)\n\n if len(X_n) > 1 and abs(x_n - X_n[-2]) < 1:\n conv = 1\n break\n\n # update alpha and lambda\n e = 0.01\n def update_alpha(al):\n beta_2 = 10**8\n al-= e*v_i-beta_2* (p*2*x_n*(N-i)/(N*(N-1)))\n # alpha cannot be negative\n if al < 0:\n al = 0\n return al\n\n def update_lambda(la):\n beta_1 = 0.023\n la-= e*v_i-beta_1*i\n if la < 0:\n la = 0\n return la\n al = update_alpha(al)\n la = update_lambda(la)\n \n return X_n,T_n,conv, al,la\n\ne=0.01\npara_alpha = 10**(-8)\npara_lamda = 0.01\n\nX_n,_,conv, para_alpha, para_lamda = simulation_infected(para_lamda,0.2,para_alpha,20000,100,10)\nif conv ==1:\n print('converge to C')\nelse:\n print(\"couldn't converge\")\nprint(\"alpha={}, lambda={}\".format(para_alpha, para_lamda))", "couldn't converge\nalpha=39561.18805886545, lambda=0\n" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
4a41b2b1db0209f39755a78998a4069c26345bff
306,158
ipynb
Jupyter Notebook
.ipynb_checkpoints/pipeline-checkpoint.ipynb
zhou-wenbin/Udacity-Advanced-Lane-detection
b5ec3c50e3d3e340ecd24ed925665cb297fedb91
[ "MIT" ]
null
null
null
.ipynb_checkpoints/pipeline-checkpoint.ipynb
zhou-wenbin/Udacity-Advanced-Lane-detection
b5ec3c50e3d3e340ecd24ed925665cb297fedb91
[ "MIT" ]
null
null
null
.ipynb_checkpoints/pipeline-checkpoint.ipynb
zhou-wenbin/Udacity-Advanced-Lane-detection
b5ec3c50e3d3e340ecd24ed925665cb297fedb91
[ "MIT" ]
null
null
null
161.731643
174,624
0.760473
[ [ [ "import cv2\nimport glob\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.image as mpimg\n%matplotlib inline\n", "_____no_output_____" ], [ "left_top=[585, 456]\nleft_bottom =[253, 697]\nright_top =[1061, 690]\nright_bottom =[700, 456]\n\ncorners = np.float32([left_top,left_bottom, right_top,right_bottom])\noffset = 150 #test the image to estimate a offset\nimg_size = (img.shape[1], img.shape[0])\nsrc = np.float32(\n [corners[0],\n corners[1],\n corners[2],\n corners[3]])\n\n#decide a place to place the birdviewed image, get these points by testing an image\ndst = np.float32([\n [offset, 0], \n [offset, img_size[1]], \n [img_size[0] - offset, img_size[1]], \n [img_size[0] - offset,0]]) \n\ngrad_thresh=[20,100]\ns_thresh=[170,255]", "_____no_output_____" ], [ "import pickle\n#load distortion matrix from camera_cal\nwith open('./camera_cal/wide_dist_pickle.p',mode='rb') as f:\n dist_pickle = pickle.load(f)\n mtx = dist_pickle['mtx']\n dist = dist_pickle[\"dist\"]\nprint(mtx)\nprint(dist)\nprint('loaded mtx matrix and distortion matrix from undistortion')", "[[1.15777930e+03 0.00000000e+00 6.67111054e+02]\n [0.00000000e+00 1.15282291e+03 3.86128938e+02]\n [0.00000000e+00 0.00000000e+00 1.00000000e+00]]\n[[-0.24688775 -0.02373132 -0.00109842 0.00035108 -0.00258571]]\nloaded mtx matrix and distortion matrix from undistortion\n" ], [ "import glob\nnx = 9\nny = 6\nobjp = np.zeros((nx*ny,3),np.float32)\nobjp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)\n\n# Arrays to store object points and image points from all the images\nobjpoints= []# 3d points in real world space\nimgpoints= []# 2d points in image space\n\n#make a list of calibration images\nimages = glob.glob('./camera_cal/calibration*.jpg')\n\n#Step through the list and search for chessboard corners\nfor frame in images:\n img = cv2.imread(frame)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)\n # If found, add object points, image points\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)", "_____no_output_____" ], [ "def get_shresholded_img(image,grad_thresh,s_thresh):\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n #process the x direction gradient \n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x\n abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal\n scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))\n sxbinary = np.zeros_like(scaled_sobel)\n sxbinary[(scaled_sobel >= grad_thresh[0]) & (scaled_sobel <= grad_thresh[1])] = 1\n \n #process the HIS s channel \n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n s_channel = hls[:,:,2]\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1\n\n# color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255\n# one can show it out to see the colored binary\n\n # Combine the two binary thresholds\n combined_binary = np.zeros_like(sxbinary)\n combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1\n return combined_binary", "_____no_output_____" ], [ "def undistort_image(image, objectpoints, imagepoints):\n # Get image size\n img_size = (image.shape[1], image.shape[0])\n # Calibrate camera based on objectpoints, imagepoints, and image size\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objectpoints, imagepoints, img_size, None, None)\n # Call cv2.undistort\n dst = cv2.undistort(image, mtx, dist, None, mtx)\n return dst", "_____no_output_____" ], [ "def warp_image_to_birdseye_view(image):\n image_size=(image.shape[1], image.shape[0])\n # Get perspective transform\n perspectiveTransform = cv2.getPerspectiveTransform(src, dst)\n # Warp perspective\n warped = cv2.warpPerspective(image, perspectiveTransform, image_size, flags=cv2.INTER_LINEAR)\n # Get the destination perspective transform\n Minv = cv2.getPerspectiveTransform(dst, src)\n return warped, Minv", "_____no_output_____" ], [ "\ndef find_lane_lines(warped_binary_image, testing=False):\n if testing == True:\n # Create an output image to draw on and visualize the result\n output_image = np.dstack((warped_binary_image, warped_binary_image, warped_binary_image))*255\n\n # Create histogram to find the lanes by identifying the peaks in the histogram\n histogram = np.sum(warped_binary_image[int(warped_binary_image.shape[0]/2):,:], axis=0)\n \n # Find the peak of the left and right halves of the histogram\n midpoint = np.int(histogram.shape[0]/2)\n left_x_base = np.argmax(histogram[:midpoint])\n right_x_base = np.argmax(histogram[midpoint:]) + midpoint\n \n # Choose the number of sliding windows\n number_of_windows = 9\n # Set height of windows\n window_height = np.int(warped_binary_image.shape[0]/number_of_windows)\n \n # Identify the x and y positions of all nonzero pixels in the image\n nonzero_pixels = warped_binary_image.nonzero()\n nonzero_y_pixels = np.array(nonzero_pixels[0])\n nonzero_x_pixels = np.array(nonzero_pixels[1])\n \n # Current positions to be updated for each window\n left_x_current = left_x_base\n right_x_current = right_x_base\n \n # Set the width of the windows +/- margin\n margin = 100\n # Set minimum number of pixels found to recenter window\n minpix = 50\n # Create empty lists to receive left and right lane pixel indices\n left_lane_inds = []\n right_lane_inds = []\n \n # Step through the windows one by one\n for window in range(number_of_windows):\n # Identify window boundaries in x and y (and right and left)\n win_y_low = warped_binary_image.shape[0] - (window+1)*window_height\n win_y_high = warped_binary_image.shape[0] - window*window_height\n win_x_left_low = left_x_current - margin\n win_x_left_high = left_x_current + margin\n win_x_right_low = right_x_current - margin\n win_x_right_high = right_x_current + margin\n \n if testing == True:\n # Draw the windows on the visualization image\n cv2.rectangle(output_image, (win_x_left_low,win_y_low), (win_x_left_high,win_y_high), (0,255,0), 2)\n cv2.rectangle(output_image, (win_x_right_low,win_y_low), (win_x_right_high,win_y_high), (0,255,0), 2)\n \n # Identify the nonzero pixels in x and y within the window\n left_inds = ((nonzero_y_pixels >= win_y_low) & (nonzero_y_pixels < win_y_high) & (nonzero_x_pixels >= win_x_left_low) & (nonzero_x_pixels < win_x_left_high)).nonzero()[0]\n right_inds = ((nonzero_y_pixels >= win_y_low) & (nonzero_y_pixels < win_y_high) & (nonzero_x_pixels >= win_x_right_low) & (nonzero_x_pixels < win_x_right_high)).nonzero()[0]\n \n # Append these indices to the lists\n left_lane_inds.append(left_inds)\n right_lane_inds.append(right_inds)\n \n # If you found > minpix pixels, recenter next window on their mean position\n if len(left_inds) > minpix:\n left_x_current = np.int(np.mean(nonzero_x_pixels[left_inds]))\n if len(right_inds) > minpix: \n right_x_current = np.int(np.mean(nonzero_x_pixels[right_inds]))\n \n # Concatenate the arrays of indices\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n \n # Extract left and right line pixel positions\n left_x = nonzero_x_pixels[left_lane_inds]\n left_y = nonzero_y_pixels[left_lane_inds] \n right_x = nonzero_x_pixels[right_lane_inds]\n right_y = nonzero_y_pixels[right_lane_inds]\n \n # Fit a second order polynomial to each\n left_fit = np.polyfit(left_y, left_x, 2)\n right_fit = np.polyfit(right_y, right_x, 2)\n \n # Generate x and y values for plotting\n plot_y = np.linspace(0, warped_binary_image.shape[0]-1, warped_binary_image.shape[0] )\n left_fit_x = left_fit[0]*plot_y**2 + left_fit[1]*plot_y + left_fit[2]\n right_fit_x = right_fit[0]*plot_y**2 + right_fit[1]*plot_y + right_fit[2]\n \n # Get binary warped image size\n image_size = warped_binary_image.shape\n \n # Get max of plot_y\n y_eval = np.max(plot_y)\n \n # Define conversions in x and y from pixels space to meters\n y_m_per_pix = 30/720\n x_m_per_pix = 3.7/700\n \n # Fit new polynomials to x,y in world space\n left_fit_cr = np.polyfit(left_y*y_m_per_pix, left_x*x_m_per_pix, 2)\n right_fit_cr = np.polyfit(right_y*y_m_per_pix, right_x*x_m_per_pix, 2)\n \n # Calculate radius of curve\n left_curve = ((1+(2*left_fit_cr[0]*y_eval*y_m_per_pix+left_fit_cr[1])**2)**1.5)/np.absolute(2*left_fit_cr[0])\n right_curve = ((1+(2*right_fit_cr[0]*y_eval*y_m_per_pix+right_fit_cr[1])**2)**1.5)/np.absolute(2*right_fit_cr[0])\n \n # Calculate lane deviation from center of lane\n scene_height = image_size[0] * y_m_per_pix\n scene_width = image_size[1] * x_m_per_pix\n \n # Calculate the intercept points at the bottom of our image\n left_intercept = left_fit_cr[0] * scene_height ** 2 + left_fit_cr[1] * scene_height + left_fit_cr[2]\n right_intercept = right_fit_cr[0] * scene_height ** 2 + right_fit_cr[1] * scene_height + right_fit_cr[2]\n center = (left_intercept + right_intercept) / 2.0\n \n # Use intercept points to calculate the lane deviation of the vehicle\n lane_deviation = (center - scene_width / 2.0)\n \n if testing == True:\n output_image[nonzero_y_pixels[left_lane_inds], nonzero_x_pixels[left_lane_inds]] = [255, 0, 0]\n output_image[nonzero_y_pixels[right_lane_inds], nonzero_x_pixels[right_lane_inds]] = [0, 0, 255]\n return left_fit_x, right_fit_x, plot_y, left_fit, right_fit, left_curve, right_curve, lane_deviation, output_image\n else:\n return left_fit_x, right_fit_x, plot_y, left_curve, right_curve, lane_deviation\n\n\n\n\n", "_____no_output_____" ], [ "def draw_lane_lines(warped_binary_image, undistorted_image, Minv):\n # Create a blank image to draw the lines on\n warp_zero = np.zeros_like(warped_binary_image).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n left_fit_x, right_fit_x, ploty, left_radius, right_radius, lane_deviation=find_lane_lines(warped_binary_image)\n \n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fit_x, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fit_x, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n \n # Draw the lane onto the warped blank image with green color\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n \n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n unwarp = cv2.warpPerspective(color_warp, Minv, (undistorted_image.shape[1], undistorted_image.shape[0])) \n \n # Combine the result with the original image\n result = cv2.addWeighted(undistorted_image, 1, unwarp, 0.3, 0)\n \n # Write text on image\n curvature_text = \"Curvature: Left = \" + str(np.round(left_radius, 2)) + \", Right = \" + str(np.round(right_radius, 2))\n font = cv2.FONT_HERSHEY_TRIPLEX \n cv2.putText(result, curvature_text, (30, 60), font, 1, (0,255,0), 2)\n deviation_text = \"Lane deviation from center = {:.2f} m\".format(lane_deviation) \n font = cv2.FONT_HERSHEY_TRIPLEX\n cv2.putText(result, deviation_text, (30, 90), font, 1, (0,255,0), 2)\n \n return result\n", "_____no_output_____" ], [ "def process_image(image):\n undistorted = undistort_image(image, objpoints, imgpoints)\n \n combined_binary = get_shresholded_img(undistorted,grad_thresh,s_thresh)\n \n binary_warped, Minv = warp_image_to_birdseye_view(combined_binary)\n\n lane_lines_img = draw_lane_lines(binary_warped, undistorted, Minv)\n \n return lane_lines_img", "_____no_output_____" ], [ "image=plt.imread('test_images/test5.jpg')\nresult=process_image(image)\nplt.imshow(result)", "_____no_output_____" ], [ "image=plt.imread('test_images/test5.jpg')\nwith open('./camera_cal/wide_dist_pickle.p',mode='rb') as f:\n dist_pickle = pickle.load(f)\n mtx = dist_pickle['mtx']\n dist = dist_pickle[\"dist\"]\ndst = cv2.undistort(image, mtx, dist, None, mtx)\ncombined_binary = get_shresholded_img(dst,grad_thresh,s_thresh)\ndef warp_image_to_birdseye_view(image):\n image_size=(image.shape[1], image.shape[0])\n # Get perspective transform\n perspectiveTransform = cv2.getPerspectiveTransform(src, dst)\n # Warp perspective\n warped = cv2.warpPerspective(image, perspectiveTransform, image_size, flags=cv2.INTER_LINEAR)\n # Get the destination perspective transform\n Minv = cv2.getPerspectiveTransform(dst, src)\n return warped, Minv\n\nwarped, Minv = warp_image_to_birdseye_view(combined_binary )\nplt.figure(figsize=(20,10))\nplt.imshow(warped)", "_____no_output_____" ], [ "from moviepy.editor import VideoFileClip\nfrom IPython.display import HTML\n!pip install moviepy", "Requirement already satisfied: moviepy in /Users/zhouwenbin/anaconda3/envs/py36/lib/python3.6/site-packages (1.0.2)\nRequirement already satisfied: proglog<=1.0.0 in /Users/zhouwenbin/anaconda3/envs/py36/lib/python3.6/site-packages (from moviepy) (0.1.9)\nRequirement already satisfied: decorator<5.0,>=4.0.2 in /Users/zhouwenbin/anaconda3/envs/py36/lib/python3.6/site-packages (from moviepy) (4.4.2)\nRequirement already satisfied: numpy; python_version >= \"2.7\" in /Users/zhouwenbin/anaconda3/envs/py36/lib/python3.6/site-packages (from moviepy) (1.18.1)\nRequirement already satisfied: imageio<3.0,>=2.5; python_version >= \"3.4\" in /Users/zhouwenbin/anaconda3/envs/py36/lib/python3.6/site-packages (from moviepy) (2.8.0)\nRequirement already satisfied: requests<3.0,>=2.8.1 in /Users/zhouwenbin/anaconda3/envs/py36/lib/python3.6/site-packages (from moviepy) (2.23.0)\nRequirement already satisfied: imageio-ffmpeg>=0.2.0; python_version >= \"3.4\" in /Users/zhouwenbin/anaconda3/envs/py36/lib/python3.6/site-packages (from moviepy) (0.4.1)\nRequirement already satisfied: tqdm<5.0,>=4.11.2 in /Users/zhouwenbin/anaconda3/envs/py36/lib/python3.6/site-packages (from moviepy) (4.46.0)\nRequirement already satisfied: pillow in /Users/zhouwenbin/anaconda3/envs/py36/lib/python3.6/site-packages (from imageio<3.0,>=2.5; python_version >= \"3.4\"->moviepy) (7.1.2)\nRequirement already satisfied: idna<3,>=2.5 in /Users/zhouwenbin/anaconda3/envs/py36/lib/python3.6/site-packages (from requests<3.0,>=2.8.1->moviepy) (2.9)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /Users/zhouwenbin/anaconda3/envs/py36/lib/python3.6/site-packages (from requests<3.0,>=2.8.1->moviepy) (1.25.9)\nRequirement already satisfied: chardet<4,>=3.0.2 in /Users/zhouwenbin/anaconda3/envs/py36/lib/python3.6/site-packages (from requests<3.0,>=2.8.1->moviepy) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /Users/zhouwenbin/anaconda3/envs/py36/lib/python3.6/site-packages (from requests<3.0,>=2.8.1->moviepy) (2020.4.5.1)\n" ], [ "\nvideo_output = \"output_images/project_video.mp4\"\nclip1 = VideoFileClip(\"project_video.mp4\")\nclip1_output = clip1.fl_image(process_image)\n%time clip1_output.write_videofile(video_output, audio=False)", "t: 28%|██▊ | 137/485 [38:27<04:14, 1.37it/s, now=None]\nt: 0%| | 0/1260 [00:00<?, ?it/s, now=None]\u001b[A" ], [ "HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(video_output))\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a41bae425fb3b7239aad11a077c34958c800575
46,002
ipynb
Jupyter Notebook
tutorial/08 - Graph and Network Plots.ipynb
strates-git/bokeh-notebooks
e4ee6f6be4d1d64a4846b65f6dc9187fafa67d1d
[ "BSD-3-Clause" ]
683
2015-01-12T05:10:04.000Z
2022-03-30T09:56:36.000Z
Bokeh_tuts/08 - Graph and Network Plots.ipynb
iitylerdurden/Data-Visualization-with-Python
80d3cff94cd81042a373a16f9af352a8f70f5735
[ "MIT" ]
86
2015-03-16T11:18:02.000Z
2021-05-07T23:06:29.000Z
Bokeh_tuts/08 - Graph and Network Plots.ipynb
iitylerdurden/Data-Visualization-with-Python
80d3cff94cd81042a373a16f9af352a8f70f5735
[ "MIT" ]
828
2015-01-21T11:48:18.000Z
2022-03-30T09:56:39.000Z
62.844262
6,225
0.54202
[ [ [ "<table style=\"float:left; border:none\">\n <tr style=\"border:none\">\n <td style=\"border:none\">\n <a href=\"https://bokeh.org/\"> \n <img \n src=\"assets/bokeh-transparent.png\" \n style=\"width:50px\"\n >\n </a> \n </td>\n <td style=\"border:none\">\n <h1>Bokeh Tutorial</h1>\n </td>\n </tr>\n</table>\n\n<div style=\"float:right;\"><h2>08. Graph and Network Plots</h2></div>", "_____no_output_____" ], [ "This chapter will cover how to plot network node/link graphs in Bokeh using NetworkX. For information on creating graph renderers from a low level, see [Visualizing Network Graphs](https://docs.bokeh.org/en/latest/docs/user_guide/graph.html)\n", "_____no_output_____" ] ], [ [ "from bokeh.io import show, output_notebook\nfrom bokeh.plotting import figure\n\noutput_notebook()", "_____no_output_____" ] ], [ [ "## Plotting from NetworkX\n\nThe easiest way to plot network graphs with Bokeh is to use the `from_networkx` function. This function accepts any NetworkX graph and returns a Bokeh `GraphRenderer` that can be added to a plot. The `GraphRenderer` has `node_renderer` and `edge_renderer` properties that contain the Bokeh renderers that draw the nodes and edges, respectively. \n\nThe example below shows a Bokeh plot of `nx.desargues_graph()`, setting some of the node and edge properties.", "_____no_output_____" ] ], [ [ "import networkx as nx\nfrom bokeh.models import Range1d, Plot\nfrom bokeh.plotting import from_networkx\n\nG = nx.desargues_graph()\n\n# We could use figure here but don't want all the axes and titles\nplot = Plot(x_range=Range1d(-2, 2), y_range=Range1d(-2, 2))\n\n# Create a Bokeh graph from the NetworkX input using nx.spring_layout\ngraph = from_networkx(G, nx.spring_layout, scale=1.8, center=(0,0))\nplot.renderers.append(graph)\n\n# Set some of the default node glyph (Circle) properties\ngraph.node_renderer.glyph.update(size=20, fill_color=\"orange\")\n\n# Set some edge properties too\ngraph.edge_renderer.glyph.line_dash = [2,2]\n\nshow(plot)", "_____no_output_____" ], [ "# Exercise: try a different NetworkX layout, and set some properies on `graph.edge_renderer.glyph` \n# and `graph.node_renderer.glyph`\n", "_____no_output_____" ] ], [ [ "## Adding Extra Data Columns.\n\nThe `node_renderer` and `edge_renderer` properties of the graph renderer each have a `data_source` that is a standard `ColumnDataSource` that you can add new data to, e.g. to drive a hover tool, or to specify colors for the renderer. The example below demonstates both.", "_____no_output_____" ] ], [ [ "from bokeh.models import HoverTool\nfrom bokeh.palettes import Category20_20\n\nG = nx.desargues_graph() # always 20 nodes\n\n# We could use figure here but don't want all the axes and titles\nplot = Plot(x_range=Range1d(-2, 2), y_range=Range1d(-2, 2))\n\n# Create a Bokeh graph from the NetworkX input using nx.spring_layout\ngraph = from_networkx(G, nx.spring_layout, scale=1.8, center=(0,0))\nplot.renderers.append(graph)\n\n# Add some new columns to the node renderer data source\ngraph.node_renderer.data_source.data['index'] = list(range(len(G)))\ngraph.node_renderer.data_source.data['colors'] = Category20_20\n\ngraph.node_renderer.glyph.update(size=20, fill_color=\"colors\")\n\nplot.add_tools(HoverTool(tooltips=\"index: @index\"))\n\nshow(plot)", "_____no_output_____" ], [ "# Exercise: Add your own columns for other node or edge properties e.g. fill_alpha or line_color,\n# or to show other fields in a tooltoip\n", "_____no_output_____" ] ], [ [ "## Inspection and Selection Policies\n\nBokeh graph renderers have `inspection_policy` and `selection_policy` properties. These can be used to control how hover inspections highlight the graph, or how selection tools make selections. These properties may be set to any of the inpection policies in `bokeh.graphs`. For instance, if a user hovers over a node, you may wish to highlight all the associated edges as well. This can be accomplished by setting the inspection policy:\n\n graph.inspection_policy = NodesAndLinkedEdges()\n \nas the example below demonstrates.", "_____no_output_____" ] ], [ [ "from bokeh.models.graphs import NodesAndLinkedEdges\nfrom bokeh.models import Circle, HoverTool, MultiLine\n\nG = nx.gnm_random_graph(15, 30)\n\n# We could use figure here but don't want all the axes and titles\nplot = Plot(x_range=Range1d(-2, 2), y_range=Range1d(-2 ,2))\n\n# Create a Bokeh graph from the NetworkX input using nx.spring_layout\ngraph = from_networkx(G, nx.spring_layout, scale=1.8, center=(0,0))\nplot.renderers.append(graph)\n\n# Blue circles for nodes, and light grey lines for edges\ngraph.node_renderer.glyph = Circle(size=25, fill_color='#2b83ba')\ngraph.edge_renderer.glyph = MultiLine(line_color=\"#cccccc\", line_alpha=0.8, line_width=2)\n\n# green hover for both nodes and edges\ngraph.node_renderer.hover_glyph = Circle(size=25, fill_color='#abdda4')\ngraph.edge_renderer.hover_glyph = MultiLine(line_color='#abdda4', line_width=4)\n\n# When we hover over nodes, highlight adjecent edges too\ngraph.inspection_policy = NodesAndLinkedEdges()\n\nplot.add_tools(HoverTool(tooltips=None))\n\nshow(plot)", "_____no_output_____" ], [ "# Exercise: try a different inspection (or selection) policy like NodesOnly or EdgesAndLinkedNodes\n", "_____no_output_____" ] ], [ [ "# Next Section", "_____no_output_____" ], [ "Click on this link to go to the next notebook: [09 - Geographic Plots](09%20-%20Geographic%20Plots.ipynb).\n\nTo go back to the overview, click [here](00%20-%20Introduction%20and%20Setup.ipynb).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
4a41c441695b9524d64327deb45dc2606e27c717
35,879
ipynb
Jupyter Notebook
notebooks/FoldX.ipynb
victorfica/Master-thesis
5390d8d2df50300639d860a8d17ccd54445cf3a3
[ "MIT" ]
1
2021-03-20T04:56:56.000Z
2021-03-20T04:56:56.000Z
notebooks/FoldX.ipynb
victorfica/Master-thesis
5390d8d2df50300639d860a8d17ccd54445cf3a3
[ "MIT" ]
null
null
null
notebooks/FoldX.ipynb
victorfica/Master-thesis
5390d8d2df50300639d860a8d17ccd54445cf3a3
[ "MIT" ]
null
null
null
51.550287
1,721
0.48867
[ [ [ "from pathlib import Path\nimport os\nimport shlex\nimport shutil\nimport subprocess\nimport pandas as pd\n\n\nnames_rows_stability = [\n ['dg', 1], # totalEnergy\n ['backbone_hbond', 2],\n ['sidechain_hbond', 3],\n ['van_der_waals', 4],\n ['electrostatics', 5],\n ['solvation_polar', 6],\n ['solvation_hydrophobic', 7],\n ['van_der_waals_clashes', 8],\n ['entropy_sidechain', 9],\n ['entropy_mainchain', 10],\n ['sloop_entropy', 11],\n ['mloop_entropy', 12],\n ['cis_bond', 13],\n ['torsional_clash', 14],\n ['backbone_clash', 15],\n ['helix_dipole', 16],\n ['water_bridge', 17],\n ['disulfide', 18],\n ['electrostatic_kon', 19],\n ['partial_covalent_bonds', 20],\n ['energy_ionisation', 21],\n ['entropy_complex', 22],\n ['number_of_residues', 23],\n ['interface_residues', 24],\n ['interface_residues_clashing', 25],\n ['interface_residues_vdw_clashing', 26],\n ['interface_residues_bb_clashing', 27]\n]\n\nnames_rows_stability_complex = ([\n ['intraclashes_energy_1', 3],\n ['intraclashes_energy_2', 4],\n] + [[x[0], x[1] + 4] for x in names_rows_stability])\nnames_stability_complex = list(next(zip(*names_rows_stability_complex)))\nnames_stability_complex_wt = [name + '_wt'\n for name in names_stability_complex[:-5]] + \\\n ['number_of_residues', 'interface_residues_wt', 'interface_residues_clashing_wt',\n 'interface_residues_vdw_clashing_wt', 'interface_residues_bb_clashing_wt']\nnames_stability_complex_mut = [name + '_mut'\n for name in names_stability_complex[:-5]] + \\\n ['number_of_residues', 'interface_residues_mut', 'interface_residues_clashing_mut',\n 'interface_residues_vdw_clashing_mut', 'interface_residues_bb_clashing_mut']\n\n\ndef _export_foldxpath(path_to_export):\n # export PATH=$PATH:/path/to/folder\n if str(path_to_export) not in os.environ[\"PATH\"]:\n os.environ[\"PATH\"] += \":\" + str(path_to_export)\n print(\"foldx path exported\")\n else:\n print(\"foldx bin folder already in PATH\")\n\n\ndef _rotabase_symlink(rotabase_path):\n # rotabase symlink\n sym_rotabase = Path(\"rotabase.txt\")\n if not sym_rotabase.is_symlink():\n sym_rotabase.symlink_to(rotabase_path)\n print(\"Symlink to rotabase.txt create on working dir\")\n else:\n print(\"rotabase.txt symlink already exist on working dir\")\n\n\ndef read_analyse_complex(output_file):\n df = pd.read_csv(output_file, sep='\\t', index_col=False, skiprows=8)\n # Format dataframe\n df = df.rename(columns=lambda s: s.lower().replace(' ', '_'))\n #logger.debug(df.head())\n assert df.shape[0] == 1\n result = df.drop(pd.Index(['pdb', 'group1', 'group2']), axis=1).iloc[0].tolist()\n return result\n\n\ndef convert_features_to_differences(df, keep_mut=False):\n \"\"\"Convert `_wt` and `_mut` columns into `_wt` and `_change` columns.\n Create a new set of features (ending in `_change`) that describe the difference between values\n of the wildtype (features ending in `_wt`) and mutant (features ending in `_mut`) features.\n If `keep_mut` is `False`, removes all mutant features (features ending in `_mut`).\n \"\"\"\n column_list = []\n for column_name, column in df.iteritems():\n if ('_mut' in column_name and column_name.replace('_mut', '_wt') in df.columns and\n df[column_name].dtype != object):\n if keep_mut:\n column_list.append(column)\n new_column = column - df[column_name.replace('_mut', '_wt')]\n if 'secondary_structure' in column_name:\n new_column = new_column.apply(lambda x: 1 if x else 0)\n new_column.name = column_name.replace('_mut', '_change')\n column_list.append(new_column)\n else:\n column_list.append(column)\n new_df = pd.concat(column_list, axis=1)\n return new_df\n\nfoldx_exe = \"/mnt/d/Python_projects/AbPred/libs/foldx5Linux64/\"\nclass FoldX:\n\n def __init__(self, foldx_dir=None, verbose=True):\n\n self._tempdir = Path(foldx_exe)\n _export_foldxpath(self._tempdir)\n #self.verbose = verbose\n self.pdbfile = None\n\n def _run(self, cmd, **options):\n\n \"\"\" ********************************************\n *** ***\n *** FoldX 4 (c) ***\n *** ***\n *** code by the FoldX Consortium ***\n *** ***\n *** Jesper Borg, Frederic Rousseau ***\n *** Joost Schymkowitz, Luis Serrano ***\n *** Peter Vanhee, Erik Verschueren ***\n *** Lies Baeten, Javier Delgado ***\n *** and Francois Stricher ***\n *** and any other of the 9! permutations ***\n *** based on an original concept by ***\n *** Raphael Guerois and Luis Serrano ***\n ********************************************\n\n FoldX program options:\n\n\n Basic OPTIONS:\n -v [ --version ] arg (=Version beta 4)\n print version string\n -h [ --help ] produce help message\n -c [ --command ] arg Choose your FoldX Command:\n\n AlaScan\n AnalyseComplex\n BuildModel\n CrystalWaters\n Dihedrals\n DNAContact\n DNAScan\n LoopReconstruction\n MetalBinding\n Optimize\n PDBFile\n PepX\n PositionScan\n PrintNetworks\n Pssm\n QualityAssessment\n ReconstructSideChains\n RepairPDB\n Rmsd\n SequenceDetail\n SequenceOnly\n Stability\n\n -f [ --config ] arg config file location\n -d [ --debug ] arg Debug, produces more output\n\n Generic OPTIONS:\n --pdb arg (=\"\")\n --pdb-list arg (=\"\") File with a list of PDB files\n --pdb-dir arg (=\"./\") PDB directory\n --output-dir arg (=\"./\") OutPut directory\n --output-file arg (=\"\") OutPut file\n --queue arg cluster queue: fast, normal, infinity,\n highmem, all.q\n --clean-mode arg (=0) FoldX clean mode: none, all, output or\n pdb\n --max-nr-retries arg (=1) Maximum number of retries of a FoldX\n command if not finished successfully.\n Especially important to set at least to\n two when working on a cluster and file\n transfers often fail.\n --skip-build arg (=0) Skip the build step in the algorithm\n\n FoldX OPTIONS:\n\n input:\n --fixSideChains arg allows FoldX to complete missing\n sidechains at read-time, defaults to\n true\n --rotabaseLocation arg set the location of the rotabase,\n defaults to rotabase.txt\n --noCterm arg set whether the last residue in a list\n of peptides (ex:ABC) shouldn't be\n considered as the C-terminal (i.e.,\n have an OXT), defaults to none\n --noNterm arg set whether the first residue in a list\n peptides (ex: ABC) shouldn't be\n considered as the N-Terminal (i.e.,\n have a third proton on the N), defaults\n to none\n\n output:\n --screen arg (=1) sets screen output, defaults to true\n --overwriteBatch arg (=1) set to overwrite or not the specific\n name given as the first value in a\n command, defaults to true\n --noHeader arg (=0) remove standard FoldX Header from\n outputs, defaults to false\n\n PDB output:\n --out-pdb arg (=1) set to output PDBs when doing\n mutations, defaults to true\n --pdbHydrogens arg (=0) output the hydrogens we place in the\n generated pdbs, defaults to false\n --pdbWaters arg (=0) output the predicted water bridges in\n the generated pdbs, defaults to false\n --pdbIons arg (=0) output the predicted metal ions in the\n generated pdbs, defaults to false\n --pdbDummys arg (=0) output the the dummy atoms we use (for\n N and C caps of helixes as well as the\n free orbitals) in the generated pdbs,\n defaults to false\n --pdbIsoforms arg (=0) output the isoforms of the His in the\n generated pdbs, defaults to false\n\n physico chemical parameters:\n --temperature arg set the temperature (in K) of the\n calculations, defaults to 298 K\n --pH arg set the pH of the calculations,\n defaults to 7\n --ionStrength arg set the ionic strength of the\n calculations, defaults to 0.05\n\n force-field:\n --vdwDesign arg set VdWDesign of the experiment,\n defaults to 2 ( 0 very soft, 1 medium\n soft, 2 strong used for design )\n --clashCapDesign arg set maximun penalty per atom of the van\n der waals' clashes, defaults set to 5.0\n --backBoneAtoms arg consider only backbone atoms for all\n energy calculations, defaults to false\n --dipoles arg set to consider helices dipoles,\n defaults to true\n --complexClashes arg set the threshold (in kcal/mol) for\n counting clashing aminoacids at the\n interface, defaults to 1.\n\n entropy calculations:\n --fullMainEntropy arg set to maximally penalize the main\n chain of ligand and protein (usefull\n when comparing peptide data with\n poly-Alanine backbones), defaults to\n false\n\n water and ion evaluations:\n --water arg set how FoldX considers waters:\n -CRYSTAL (read the pdb waters) -PREDICT\n (predict water bridges from sratch)\n -IGNORE (don't consider waters)\n -COMPARE, defaults to -IGNORE\n\n complex options:\n --complexWithDNA arg set to consider only two groups in a\n protein-DNA complex, DNA + protein,\n defaults to false\n\n algorithm specific parameters:\n --moveNeighbours arg set to move neighbours when we mutate,\n defaults to true\n --numberOfRuns arg set the number of runs done in\n BuidModel, defaults to 1\n --fitAtoms arg set atoms involved in the RMSD command\n BB(backbone atoms), CA(Calpha),\n CA_CB(both Calpha and Cbeta),\n N_CA_O(N,Calpha and O), defaults to BB\n --rmsdPDB arg print out the rotated target of the\n RMSD command, defaults to true\n --repair_Interface arg set to limit RepairPDB when applying to\n a complex: ALL(repair all residues\n including interface), ONLY(repair only\n the interface), NONE(no repair of the\n interface), defaults to ALL\n --burialLimit arg set a burial limit under which a\n residue is not repaired, defaults to 1.\n (inactive)\n --bFactorLimit arg set a relative bFactor limit above\n which a residue is not repaired,\n defaults to 0. (inactive)\"\"\"\n\n if options:\n for key, value in options.items():\n cmd.extend([\"--\" + key, value])\n p = subprocess.Popen(shlex.split(cmd), universal_newlines=True, shell=False, stdout=subprocess.PIPE)\n while True:\n out = p.stdout.readline()\n if not out and p.poll() is not None:\n break\n if self.verbose and out:\n print(out.splitlines()[0])\n \n def _run(self,cmd):\n # call external program on `filename`\n fout = open(\"stdout_{}.txt\".format(self.pdbfile[:-4]),\"w\")\n subprocess.check_call(shlex.split(cmd),stdout=fout)\n fout.close()\n\n\n def repair_pdb(self, pdbfile):\n \"\"\"Run FoldX ``RepairPDB`` \"\"\"\n\n pdb = Path(pdbfile).absolute()\n self.pdbfile = pdb.name\n command = (\"foldx --command=RepairPDB --pdb={}\".format(self.pdbfile))\n self._run(command)\n\n def analyse_complex(self, pdb_file, partners):\n \"\"\"Run FoldX ``AnalyseComplex``.\"\"\"\n\n pdb = Path(pdb_file).absolute()\n pdb_name = pdb.name[:-4]\n partner1 = partners.split('_')[0]\n partner2 = partners.split('_')[1]\n\n command = (\"foldx --command=AnalyseComplex --pdb={} \".format(pdb.name) +\n \"--analyseComplexChains={},{} \".format(partner1, partner2))\n\n self._run(command)\n output_file = pdb.parent.joinpath('Interaction_%s_AC.fxout' % pdb_name)\n\n result = read_analyse_complex(output_file)\n\n return result\n\n def point_mutations(self, pdb_file, partners, to_mutate, mutations):\n \"\"\"Run FoldX ``Pssm``.\n\n Parameters\n ----------\n to_mutate:\n Mutation specified in the following format:\n {mutation.residue_wt}{chain_id}{residue_id}\n mutations:\n Mutant residues\n \"\"\"\n pdb = Path(pdb_file).absolute()\n pdb_mutation = pdb.name[:-4]+'_'+to_mutate+mutations\n partner1 = partners.split('_')[0]\n partner2 = partners.split('_')[1]\n\n command = (\"foldx --command=Pssm --pdb={} \".format(pdb.name) +\n \"--analyseComplexChains={},{} \".format(partner1, partner2) +\n \"--positions={}a \".format(to_mutate) + \"--aminoacids={} \".format(mutations) +\n '--output-file={}'.format(pdb_mutation))\n\n self._run(command)\n\n # Copy foldX result to mantain local copy\n wt_result = Path('WT_{}_1.pdb'.format(pdb.name[:-4]))\n mut_result = Path('{}_1.pdb'.format(pdb.name[:-4]))\n wt_rename = Path('{}-{}-wt.pdb'.format(pdb.name[:-4], to_mutate+mutations))\n mut_rename = Path('{}-{}-mut.pdb'.format(pdb.name[:-4], to_mutate+mutations))\n\n shutil.copy(wt_result, wt_rename)\n shutil.copy(mut_result, mut_rename)\n\n def build_model(self, pdb_file, foldx_mutation):\n\n pdb = Path(pdb_file).absolute()\n\n mutation_file = self._get_mutation_file(pdb_file, foldx_mutation)\n command = (\"foldx --command=BuildModel --pdb='{}' \".format(pdb.name) +\n \"--mutant-file='{}'\".format(mutation_file))\n\n self._run(command)\n\n # Copy foldX result to mantain local copy\n wt_result = Path('WT_{}_1.pdb'.format(pdb.name[:-4]))\n mut_result = Path('{}_1.pdb'.format(pdb.name[:-4]))\n wt_rename = Path('{}-{}-wt.pdb'.format(pdb.name[:-4], foldx_mutation))\n mut_rename = Path('{}-{}-mut.pdb'.format(pdb.name[:-4], foldx_mutation))\n\n shutil.copy(wt_result, wt_rename)\n shutil.copy(mut_result, mut_rename)\n\n def _get_mutation_file(self, pdb_file, foldx_mutation):\n \"\"\"\n Parameters\n ----------\n foldx_mutation:\n Mutation specified in the following format:\n {mutation.residue_wt}{chain_id}{residue_id}{mutation.residue_mut}\n \"\"\"\n pdb = Path(pdb_file).absolute()\n\n mutation_file = Path('individual_list_{}_{}.txt'.format(pdb.name[:-4], foldx_mutation))\n mutation_file.write_text('{};\\n'.format(foldx_mutation))\n\n return mutation_file\n\n", "_____no_output_____" ] ], [ [ "The main difference is that, while popen is a non-blocking function (meaning you can continue the execution of the program without waiting the call to finish), both call and check_output are blocking.\n\nThe other difference is in what they return:\n\npopen returns a Popen object.\ncall returns the returncode attribute.\ncheck_output returns the output of the command execution.\nThe methods call and check_output are, in fact, blocking wrappers of popen, using a Popen object. For example, you can get the returncode attribute by calling Popen.returncode().", "_____no_output_____" ] ], [ [ "PDBS_DIR = Path(\"out_models/\")\npdbs_paths = list(PDBS_DIR.glob(\"*mut.pdb\"))", "_____no_output_____" ], [ "subprocess.DEVNULL?", "_____no_output_____" ], [ "### form 1\nprocs = []\nfor p in range(2):\n pdb = Path(\"VRC01.pdb\").absolute()\n\n command = (\"foldx --command=RepairPDB --pdb={}\".format(pdb.name))\n fout = open(\"stdout_%d.txt\" % p,'w')\n p = subprocess.Popen(shlex.split(command), stdout=fout)\n fout.close()\n procs.append(p)\n\nfor p in procs:\n p.wait()", "_____no_output_____" ], [ "f.name", "_____no_output_____" ], [ "# form 2\nimport os\nimport concurrent.futures\n\ndef run(command):\n ... # call external program on `filename`\n command = shlex.split(command)\n fout = open(\"stdout_{}.txt\".format(f.name),\"w\")\n subprocess.check_call(command,stdout=fout)\n fout.close()\n\n\ndef repair_pdb(pdbfile):\n pdb = Path(pdbfile).absolute()\n command = (\"foldx --command=RepairPDB --pdb={}\".format(pdb.name))\n run(command)\n\n\n# populate files\npdbs_paths = list(PDBS_DIR.glob(\"*mut.pdb\"))[:10]\n\nCWD = os.getcwd()\ntry:\n os.chdir(PDBS_DIR)\n # start threads\n with concurrent.futures.ProcessPoolExecutor(max_workers=3) as executor:\n future_to_file = dict((executor.submit(repair_pdb, f), f) for f in pdbs_paths)\n\n for future in concurrent.futures.as_completed(future_to_file):\n f = future_to_file[future]\n if future.exception() is not None:\n print('%r generated an exception: %s' % (f, future.exception()))\n # run() doesn't return anything so `future.result()` is always `None`\nfinally:\n os.chdir(CWD)", "_____no_output_____" ], [ "foldx.", "_____no_output_____" ], [ "# form 2 with foldx class\npdbs_paths = list(PDBS_DIR.glob(\"*mut.pdb\"))[:10]\n\nCWD = os.getcwd()\ntry:\n os.chdir(PDBS_DIR)\n # start threads\n foldx = FoldX()\n with concurrent.futures.ProcessPoolExecutor(max_workers=3) as executor:\n future_to_file = dict((executor.submit(foldx.repair_pdb, f), f) for f in pdbs_paths)\n\n for future in concurrent.futures.as_completed(future_to_file):\n f = future_to_file[future]\n if future.exception() is not None:\n print('%r generated an exception: %s' % (f, future.exception()))\n # run() doesn't return anything so `future.result()` is always `None`\nfinally:\n os.chdir(CWD)", "foldx bin folder already in PATH\n" ], [ "concurrent.futures.as_completed?", "_____no_output_____" ] ], [ [ "# Testing foldx class", "_____no_output_____" ] ], [ [ "foldx = FoldX(verbose=True)\n", "foldx path exported\n" ], [ "foldx.repair_pdb(pdb_file=\"VRC01.pdb\")\n", "_____no_output_____" ], [ "pdbs_to_repair = PDBS_DIR.glob(\"*.pdb\")\ntry:\n os.chdir(PDBS_DIR)\n #create symlink to rotabase.txt\n rotabase_symlink(ROTABASE)\n (PDBS_DIR.glob(\"*.pdb\"))\n for pdb in pdbs_to_repair:\n options = {\"command\":\"RepairPDB\",\"repair_Interface\":\"ONLY\",\"pdb\":str(pdb.name)}\n \n FoldX(exe=\"foldx\",verbose=True,**options).run()\nfinally:\n os.chdir(CWD)", "_____no_output_____" ], [ "subprocess.Popen?", "_____no_output_____" ] ] ]
[ "code", "raw", "code", "markdown", "code" ]
[ [ "code" ], [ "raw" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
4a41c66afaf722bd96bf83e4d960e3fdb0c75c02
10,938
ipynb
Jupyter Notebook
index.ipynb
Kilroy-Was-Here/QuantumKatas
1e86663dd2657f6bf7ce67cef01c6da5ff035774
[ "MIT" ]
null
null
null
index.ipynb
Kilroy-Was-Here/QuantumKatas
1e86663dd2657f6bf7ce67cef01c6da5ff035774
[ "MIT" ]
null
null
null
index.ipynb
Kilroy-Was-Here/QuantumKatas
1e86663dd2657f6bf7ce67cef01c6da5ff035774
[ "MIT" ]
null
null
null
63.225434
327
0.694825
[ [ [ "# Quantum Katas and Tutorials as Jupyter Notebooks\n\nTo run the katas and tutorials online, make sure you're viewing this file on Binder (if not, use [this link](https://mybinder.org/v2/gh/Microsoft/QuantumKatas/main?urlpath=/notebooks/index.ipynb)).\n\nTo run the katas and tutorials locally, follow [these installation instructions](https://github.com/microsoft/QuantumKatas/blob/main/README.md#kata-locally).\n\n> While running the Katas online is the easiest option to get started, if you want to save your progress and enjoy better performance, we recommend you to choose the local option.", "_____no_output_____" ], [ "## Learning path\n\nHere is the learning path we suggest you to follow if you are starting to learn quantum computing and quantum programming. Once you're comfortable with the basics, you're welcome to jump ahead to the topics that pique your interest!\n\n#### Quantum Computing Concepts: Qubits and Gates\n\n* **[Complex arithmetic (tutorial)](./tutorials/ComplexArithmetic/ComplexArithmetic.ipynb)**.\n Learn about complex numbers and the mathematics required to work with quantum computing.\n* **[Linear algebra (tutorial)](./tutorials/LinearAlgebra/LinearAlgebra.ipynb)**.\n Learn about vectors and matrices used to represent quantum states and quantum operations.\n* **[The qubit (tutorial)](./tutorials/Qubit/Qubit.ipynb)**.\n Learn what a qubit is.\n* **[Single-qubit gates (tutorial)](./tutorials/SingleQubitGates/SingleQubitGates.ipynb)**.\n Learn what a quantum gate is and about the most common single-qubit gates.\n* **[Basic quantum computing gates](./BasicGates/BasicGates.ipynb)**.\n Learn to apply the most common gates used in quantum computing.\n* **[Multi-qubit systems (tutorial)](./tutorials/MultiQubitSystems/MultiQubitSystems.ipynb)**.\n Learn to represent multi-qubit systems.\n* **[Multi-qubit gates (tutorial)](./tutorials/MultiQubitGates/MultiQubitGates.ipynb)**.\n Learn about the most common multi-qubit gates.\n* **[Superposition](./Superposition/Superposition.ipynb)**.\n Learn to prepare superposition states.\n\n#### Quantum Computing Concepts: Measurements\n\n* **[Single-qubit measurements (tutorial)](./tutorials/SingleQubitSystemMeasurements/SingleQubitSystemMeasurements.ipynb)**.\n Learn what quantum measurement is and how to use it for single-qubit systems.\n* **[Multi-qubit measurements (tutorial)](./tutorials/MultiQubitSystemMeasurements/MultiQubitSystemMeasurements.ipynb)**.\n Learn to use measurements for multi-qubit systems.\n* **[Measurements](./Measurements/Measurements.ipynb)**.\n Learn to distinguish quantum states using measurements.\n* **[Distinguish unitaries](./DistinguishUnitaries/DistinguishUnitaries.ipynb)**\\*.\n Learn to distinguish unitaries by designing and performing experiments with them.\n* **[Joint measurements](./JointMeasurements/JointMeasurements.ipynb)**\\*.\n Learn about using joint (parity) measurements to distinguish quantum states and to perform state transformations.\n\n#### Q\\# and Microsoft Quantum Development Kit Tools\n\n* **[Visualization tools (tutorial)](./tutorials/VisualizationTools/VisualizationTools.ipynb)**.\n Learn to use the various tools for visualizing elements of Q\\# programs.\n\n#### Simple Algorithms\n\n* **[Random number generation (tutorial)](./tutorials/RandomNumberGeneration/RandomNumberGenerationTutorial.ipynb)**.\n Learn to generate random numbers using the principles of quantum computing.\n* **[Teleportation](./Teleportation/Teleportation.ipynb)**. \n Implement standard teleportation protocol and its variations.\n* **[Superdense coding](./SuperdenseCoding/SuperdenseCoding.ipynb)**. \n Implement the superdense coding protocol.\n\n#### Quantum Oracles and Simple Oracle Algorithms\n\n* **[Quantum oracles (tutorial)](./tutorials/Oracles/Oracles.ipynb)**.\n Learn to implement classical functions as equivalent quantum oracles. \n* **[Exploring Deutsch and Deutsch–Jozsa algorithms (tutorial)](./tutorials/ExploringDeutschJozsaAlgorithm/DeutschJozsaAlgorithmTutorial_P1.ipynb)**.\n Learn to implement classical functions and equivalent quantum oracles, \n and compare the quantum solution to the Deutsch–Jozsa problem to a classical one.\n* **[Deutsch–Jozsa algorithm](./DeutschJozsaAlgorithm/DeutschJozsaAlgorithm.ipynb)**.\n Learn about quantum oracles which implement classical functions, and implement Bernstein–Vazirani and Deutsch–Jozsa algorithms.\n\n#### Grover's search algorithm\n\n* **[Implementing Grover's algorithm](./GroversAlgorithm/GroversAlgorithm.ipynb)**. \n Learn about Grover's search algorithm and how to write quantum oracles to use with it.\n* **[Exploring Grover's search algorithm (tutorial)](./tutorials/ExploringGroversAlgorithm/ExploringGroversAlgorithmTutorial.ipynb)**.\n Learn more about Grover's search algorithm, picking up where the [Grover's algorithm kata](./GroversAlgorithm/GroversAlgorithm.ipynb) left off.\n* **[Solving SAT problems using Grover's algorithm](./SolveSATWithGrover/SolveSATWithGrover.ipynb)**. \n Explore Grover's search algorithm, using SAT problems as an example. \n Learn to implement quantum oracles based on the problem description instead of a hard-coded answer. \n Use Grover's algorithm to solve problems with an unknown number of solutions.\n* **[Solving graph coloring problems using Grover's algorithm](./GraphColoring/GraphColoring.ipynb)**.\n Continue the exploration of Grover's search algorithm, using graph coloring problems as an example.\n* **[Solving bounded knapsack problem using Grover's algorithm](./BoundedKnapsack/BoundedKnapsack.ipynb)**.\n Learn how solve the variants of knapsack problem with Grover's search.\n\n#### Tools and libraries/Building up to Shor's algorithm\n\n* **[Quantum Fourier transform](./QFT/QFT.ipynb)**.\n Learn to implement quantum Fourier transform and to use it to perform simple state transformations.\n* **[Phase estimation](./PhaseEstimation/PhaseEstimation.ipynb)**.\n Learn about phase estimation algorithms.\n\n#### Entanglement games\n\n* **[CHSH game](./CHSHGame/CHSHGame.ipynb)**.\n* **[GHZ game](./GHZGame/GHZGame.ipynb)**.\n* **[Mermin-Peres magic square game](./MagicSquareGame/MagicSquareGame.ipynb)**.\n\n#### Reversible computing\n\n* **[Truth tables](./TruthTables/TruthTables.ipynb)**.\n Learn to represent and manipulate Boolean functions as truth tables and to implement them as quantum operations.\n* **[Ripple-carry adder](./RippleCarryAdder/RippleCarryAdder.ipynb)**.\n Build a ripple-carry adder on a quantum computer.\n\n#### Miscellaneous\n\n* **[BB84 protocol](./KeyDistribution_BB84/KeyDistribution_BB84.ipynb)**.\n Implement the BB84 key distribution algorithm.\n* **[Bit-flip error correcting code](./QEC_BitFlipCode/QEC_BitFlipCode.ipynb)**.\n Learn about a 3-qubit error correcting code for protecting against bit-flip errors.\n* **[Unitary patterns](./UnitaryPatterns/UnitaryPatterns.ipynb)**.\n Learn to implement unitaries with matrices that follow certain patterns of zero and non-zero elements.\n* **[Quantum classification (tutorial)](./tutorials/QuantumClassification/ExploringQuantumClassificationLibrary.ipynb)**.\n Learn about circuit-centric classifiers and the quantum machine learning library included in the QDK.\n\nFor a full list of Quantum Katas available as Q# projects instead of Jupyter Notebooks, see the [QuantumKatas repository](https://github.com/Microsoft/QuantumKatas#learning-path).", "_____no_output_____" ], [ "## Getting Started with Kata Notebooks and Tutorials\n\nEach kata notebook presents the tasks of the respective kata (Q# project) in Jupyter Notebook format. This makes getting started with the katas a lot easier - you don't need to install anything locally to try them out!\n\nNotebook tutorials are designed with Notebook format in mind - in addition to programming exercises they include a lot of theoretical explanations and code samples for you to learn from.\n\nMake sure you're viewing this file when running Jupyter notebooks on your machine or on Binder (for running on Binder, use [this link](https://mybinder.org/v2/gh/Microsoft/QuantumKatas/main?urlpath=/notebooks/index.ipynb)). From here you can navigate to the individual kata or tutorial notebooks using the links above.\n\n* Each tutorial or kata notebook contains a sequence of tasks on the topic, progressing from trivial to challenging.\n* Each task is defined in a separate code cell, preceded by the description of the task in a Markdown cell.\n Your goal is to fill in the blanks in the code (marked with `// ...` comments) with some Q# code that solves the task. \n* To verify your solution, run the code cell using Ctrl + Enter (or ⌘ + Enter on macOS). This will invoke the test covering the task and let you know whether it passes or fails, and if it fails, what the error is.\n* You can find pointers to reference materials you might need to solve the tasks, both on quantum computing and on Q#, either in the beginning of the tutorial or the kata or next to the task to which they are relevant.\n* You can find reference solutions in `ReferenceImplementation.qs` files of the corresponding katas or tutorials.\n* A lot of katas and tutorials have *workbooks* - detailed explanations of task solutions. Feel free to look them up if you're stuck!", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
4a41c767e0ea2a1fb2ed568b6f4e6d2da661d25e
111,596
ipynb
Jupyter Notebook
ngram_inference.ipynb
zzbn12345/WHOSe_Heritage
ed42d2775d301d18d1e1ceea2608266f69e16235
[ "MIT" ]
5
2021-04-12T15:04:12.000Z
2021-05-26T05:16:39.000Z
ngram_inference.ipynb
zzbn12345/WHOSe_Heritage
ed42d2775d301d18d1e1ceea2608266f69e16235
[ "MIT" ]
null
null
null
ngram_inference.ipynb
zzbn12345/WHOSe_Heritage
ed42d2775d301d18d1e1ceea2608266f69e16235
[ "MIT" ]
null
null
null
34.592684
260
0.482096
[ [ [ "# Classifying OUV using NGram features and MLP", "_____no_output_____" ], [ "## Imports", "_____no_output_____" ] ], [ [ "import sys\nsys.executable", "_____no_output_____" ], [ "from argparse import Namespace\nfrom collections import Counter\nimport json\nimport os\nimport re\nimport string\nimport random\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\nfrom tqdm.notebook import tqdm\n\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.metrics import confusion_matrix\n\nfrom scipy.special import softmax\n\nimport pickle\nimport matplotlib.pyplot as plt\n\nimport torch.autograd.profiler as profiler\n\nimport torchtext\nfrom torchtext.data import get_tokenizer\ntokenizer = get_tokenizer('spacy')", "_____no_output_____" ], [ "print(\"PyTorch version {}\".format(torch.__version__))\nprint(\"GPU-enabled installation? {}\".format(torch.cuda.is_available()))", "PyTorch version 1.7.0\nGPU-enabled installation? False\n" ], [ "device = torch.device(\"cuda:0\") if torch.cuda.is_available() else torch.device(\"cpu\")\nprint(device)", "cpu\n" ] ], [ [ "## Data Vectorization Classes", "_____no_output_____" ], [ "### The Vocabulary", "_____no_output_____" ] ], [ [ "class Vocabulary(object):\n \"\"\"Class to process text and extract vocabulary for mapping\"\"\"\n\n def __init__(self, token_to_idx=None, add_unk=True, unk_token=\"<UNK>\"):\n \"\"\"\n Args:\n token_to_idx (dict): a pre-existing map of tokens to indices\n add_unk (bool): a flag that indicates whether to add the UNK token\n unk_token (str): the UNK token to add into the Vocabulary\n \"\"\"\n\n if token_to_idx is None:\n token_to_idx = {}\n self._token_to_idx = token_to_idx\n\n self._idx_to_token = {idx: token \n for token, idx in self._token_to_idx.items()}\n \n self._add_unk = add_unk\n self._unk_token = unk_token\n \n self.unk_index = -1\n if add_unk:\n self.unk_index = self.add_token(unk_token) \n \n \n def to_serializable(self):\n \"\"\" returns a dictionary that can be serialized \"\"\"\n return {'token_to_idx': self._token_to_idx, \n 'add_unk': self._add_unk, \n 'unk_token': self._unk_token}\n\n @classmethod\n def from_serializable(cls, contents):\n \"\"\" instantiates the Vocabulary from a serialized dictionary \"\"\"\n return cls(**contents)\n\n def add_token(self, token):\n \"\"\"Update mapping dicts based on the token.\n\n Args:\n token (str): the item to add into the Vocabulary\n Returns:\n index (int): the integer corresponding to the token\n \"\"\"\n if token in self._token_to_idx:\n index = self._token_to_idx[token]\n else:\n index = len(self._token_to_idx)\n self._token_to_idx[token] = index\n self._idx_to_token[index] = token\n return index\n \n def add_many(self, tokens):\n \"\"\"Add a list of tokens into the Vocabulary\n \n Args:\n tokens (list): a list of string tokens\n Returns:\n indices (list): a list of indices corresponding to the tokens\n \"\"\"\n return [self.add_token(token) for token in tokens]\n\n def lookup_token(self, token):\n \"\"\"Retrieve the index associated with the token \n or the UNK index if token isn't present.\n \n Args:\n token (str): the token to look up \n Returns:\n index (int): the index corresponding to the token\n Notes:\n `unk_index` needs to be >=0 (having been added into the Vocabulary) \n for the UNK functionality \n \"\"\"\n if self.unk_index >= 0:\n return self._token_to_idx.get(token, self.unk_index)\n else:\n return self._token_to_idx[token]\n\n def lookup_index(self, index):\n \"\"\"Return the token associated with the index\n \n Args: \n index (int): the index to look up\n Returns:\n token (str): the token corresponding to the index\n Raises:\n KeyError: if the index is not in the Vocabulary\n \"\"\"\n if index not in self._idx_to_token:\n raise KeyError(\"the index (%d) is not in the Vocabulary\" % index)\n return self._idx_to_token[index]\n\n def __str__(self):\n return \"<Vocabulary(size=%d)>\" % len(self)\n\n def __len__(self):\n return len(self._token_to_idx)\n\n\"\"\"### The Vectorizer\"\"\"\ndef sparse_to_tensor(M):\n \"\"\"\n input: M is Scipy sparse matrix\n output: pytorch sparse tensor in GPU \n \"\"\"\n M = M.tocoo().astype(np.float32)\n indices = torch.from_numpy(np.vstack((M.row, M.col))).long()\n values = torch.from_numpy(M.data)\n shape = torch.Size(M.shape)\n Ms = torch.sparse.FloatTensor(indices, values, shape)\n return Ms.to_dense().to(args.device)", "_____no_output_____" ] ], [ [ "### The Vectorizer", "_____no_output_____" ] ], [ [ "def ngrams_iterator(token_list, ngrams):\n \"\"\"Return an iterator that yields the given tokens and their ngrams.\n\n Arguments:\n token_list: A list of tokens\n ngrams: the number of ngrams.\n\n Examples:\n >>> token_list = ['here', 'we', 'are']\n >>> list(ngrams_iterator(token_list, 2))\n >>> ['here', 'here we', 'we', 'we are', 'are']\n \"\"\"\n\n def _get_ngrams(n):\n return zip(*[token_list[i:] for i in range(n)])\n\n for x in token_list:\n yield x\n for n in range(2, ngrams + 1):\n for x in _get_ngrams(n):\n yield ' '.join(x)", "_____no_output_____" ], [ "# Vectorization parameters\n# Range (inclusive) of n-gram sizes for tokenizing text.\nNGRAM_RANGE = (1, 2)\n\n# Limit on the number of features. We use the top 20K features.\nTOP_K = 20000\n\n# Whether text should be split into word or character n-grams.\n# One of 'word', 'char'.\nTOKEN_MODE = 'word'\n\n# Minimum document/corpus frequency below which a token will be discarded.\nMIN_DOCUMENT_FREQUENCY = 2\n\ndef sparse_to_tensor(M):\n \"\"\"\n input: M is Scipy sparse matrix\n output: pytorch sparse tensor in GPU \n \"\"\"\n M = M.tocoo().astype(np.float32)\n indices = torch.from_numpy(np.vstack((M.row, M.col))).long()\n values = torch.from_numpy(M.data)\n shape = torch.Size(M.shape)\n Ms = torch.sparse.FloatTensor(indices, values, shape)\n return Ms.to_dense().to(args.device)\n\nclass OuvVectorizer(object):\n \"\"\" The Vectorizer which coordinates the Vocabularies and puts them to use\"\"\"\n def __init__(self, ouv_vocab, ngrams, vectorizer):\n \"\"\"\n Args:\n review_vocab (Vocabulary): maps words to integers\n \"\"\"\n self.ouv_vocab = ouv_vocab\n self.ngrams = ngrams\n self.vectorizer = vectorizer\n \n def vectorize(self, data):\n \"\"\"Create a tf_idf vector for the ouv data\n \n Args:\n data (str): the ouv description data\n ngrams (int): the maximum ngram value\n Returns:\n tf_idf (np.ndarray): the tf-idf encoding \n \"\"\"\n data = [data]\n tf_idf = self.vectorizer.transform(data)\n \n return sparse_to_tensor(tf_idf)[0]\n\n @classmethod\n def from_dataframe(cls, ouv_df, ngrams, cutoff=5):\n \"\"\"Instantiate the vectorizer from the dataset dataframe\n \n Args:\n ouv_df (pandas.DataFrame): the ouv dataset\n cutoff (int): the parameter for frequency-based filtering\n ngrams (int): the maximum ngram value\n Returns:\n an instance of the OuvVectorizer\n \"\"\"\n ouv_vocab = Vocabulary(add_unk=True)\n corpus=[]\n \n # Add top words if count > provided count\n word_counts = Counter()\n for data in ouv_df.data:\n corpus.append(data)\n for word in ngrams_iterator(data.split(' '),ngrams=ngrams):\n if word not in string.punctuation:\n word_counts[word] += 1\n \n for word, count in word_counts.items():\n if count > cutoff:\n ouv_vocab.add_token(word)\n \n # Create keyword arguments to pass to the 'tf-idf' vectorizer.\n kwargs = {\n 'ngram_range': NGRAM_RANGE, # Use 1-grams + 2-grams.\n 'dtype': 'int32',\n 'strip_accents': 'unicode',\n 'decode_error': 'replace',\n 'analyzer': TOKEN_MODE, # Split text into word tokens.\n 'min_df': MIN_DOCUMENT_FREQUENCY,\n }\n vectorizer = TfidfVectorizer(**kwargs)\n\n # Learn vocabulary from training texts and vectorize training texts.\n vectorizer.fit_transform(corpus).astype('float32')\n\n return cls(ouv_vocab, ngrams, vectorizer)\n\n @classmethod\n def from_serializable(cls, contents, ngrams, vectorizer):\n \"\"\"Instantiate a OuvVectorizer from a serializable dictionary\n \n Args:\n contents (dict): the serializable dictionary\n Returns:\n an instance of the OuvVectorizer class\n \"\"\"\n ouv_vocab = Vocabulary.from_serializable(contents['ouv_vocab'])\n \n return cls(ouv_vocab=ouv_vocab, ngrams=ngrams, vectorizer = vectorizer)\n\n def to_serializable(self):\n \"\"\"Create the serializable dictionary for caching\n \n Returns:\n contents (dict): the serializable dictionary\n \"\"\"\n return {'ouv_vocab': self.ouv_vocab.to_serializable()}", "_____no_output_____" ] ], [ [ "### The Dataset", "_____no_output_____" ] ], [ [ "class OuvDataset(Dataset):\n def __init__(self, ouv_df, vectorizer):\n \"\"\"\n Args:\n ouv_df (pandas.DataFrame): the dataset\n vectorizer (ReviewVectorizer): vectorizer instantiated from dataset\n \"\"\"\n self.ouv_df = ouv_df\n self._vectorizer = vectorizer\n\n self.train_df = self.ouv_df[self.ouv_df.split=='train']\n self.train_size = len(self.train_df)\n\n self.val_df = self.ouv_df[self.ouv_df.split=='dev']\n self.validation_size = len(self.val_df)\n\n self.test_df = self.ouv_df[self.ouv_df.split=='test']\n self.test_size = len(self.test_df)\n\n self._lookup_dict = {'train': (self.train_df, self.train_size),\n 'val': (self.val_df, self.validation_size),\n 'test': (self.test_df, self.test_size)}\n\n self.set_split('train')\n\n @classmethod\n def load_dataset_and_make_vectorizer(cls, ouv_csv, ngrams, cutoff):\n \"\"\"Load dataset and make a new vectorizer from scratch\n \n Args:\n ouv_csv (str): location of the dataset\n Returns:\n an instance of OuvDataset\n \"\"\"\n ouv_df = pd.read_csv(ouv_csv)\n train_ouv_df = ouv_df[ouv_df.split=='train']\n return cls(ouv_df, OuvVectorizer.from_dataframe(train_ouv_df,ngrams=ngrams, cutoff=cutoff))\n \n @classmethod\n def load_dataset_and_load_vectorizer(cls, ouv_csv, vectorizer_filepath, ngrams, vectorizer):\n \"\"\"Load dataset and the corresponding vectorizer. \n Used in the case in the vectorizer has been cached for re-use\n \n Args:\n ouv_csv (str): location of the dataset\n vectorizer_filepath (str): location of the saved vectorizer\n Returns:\n an instance of OuvDataset\n \"\"\"\n ouv_df = pd.read_csv(ouv_csv)\n vectorizer = cls.load_vectorizer_only(vectorizer_filepath, ngrams=ngrams, vectorizer=vectorizer)\n return cls(ouv_df, vectorizer)\n\n @staticmethod\n def load_vectorizer_only(vectorizer_filepath,ngrams, vectorizer):\n \"\"\"a static method for loading the vectorizer from file\n \n Args:\n vectorizer_filepath (str): the location of the serialized vectorizer\n Returns:\n an instance of ReviewVectorizer\n \"\"\"\n with open(vectorizer_filepath) as fp:\n return OuvVectorizer.from_serializable(json.load(fp),ngrams=ngrams, vectorizer=vectorizer)\n\n def save_vectorizer(self, vectorizer_filepath):\n \"\"\"saves the vectorizer to disk using json\n \n Args:\n vectorizer_filepath (str): the location to save the vectorizer\n \"\"\"\n with open(vectorizer_filepath, \"w\") as fp:\n json.dump(self._vectorizer.to_serializable(), fp)\n\n def get_vectorizer(self):\n \"\"\" returns the vectorizer \"\"\"\n return self._vectorizer\n\n def set_split(self, split=\"train\"):\n \"\"\" selects the splits in the dataset using a column in the dataframe \n \n Args:\n split (str): one of \"train\", \"val\", or \"test\"\n \"\"\"\n self._target_split = split\n self._target_df, self._target_size = self._lookup_dict[split]\n\n def __len__(self):\n return self._target_size\n\n def __getitem__(self, index):\n \"\"\"the primary entry point method for PyTorch datasets\n \n Args:\n index (int): the index to the data point \n Returns:\n a dictionary holding the data point's features (x_data) and component for labels (y_target and y_fuzzy)\n \"\"\"\n row = self._target_df.iloc[index]\n\n ouv_vector = \\\n self._vectorizer.vectorize(row.data)\n\n true_label = \\\n np.fromstring(row.true[1:-1],dtype=float, sep=' ')\n if len(true_label)==10:\n true_label = np.append(true_label,0.0)\n \n fuzzy_label = \\\n np.fromstring(row.fuzzy[1:-1],dtype=float, sep=' ')\n\n return {'x_data': ouv_vector,\n 'y_target': true_label,\n 'y_fuzzy': fuzzy_label\n }\n\n def get_num_batches(self, batch_size):\n \"\"\"Given a batch size, return the number of batches in the dataset\n \n Args:\n batch_size (int)\n Returns:\n number of batches in the dataset\n \"\"\"\n return len(self) // batch_size \n \ndef generate_batches(dataset, batch_size, shuffle=True,\n drop_last=True, device=\"cpu\"):\n \"\"\"\n A generator function which wraps the PyTorch DataLoader. It will \n ensure each tensor is on the write device location.\n \"\"\"\n dataloader = DataLoader(dataset=dataset, batch_size=batch_size,\n shuffle=shuffle, drop_last=drop_last)\n\n for data_dict in dataloader:\n out_data_dict = {}\n for name, tensor in data_dict.items():\n out_data_dict[name] = data_dict[name].to(device)\n yield out_data_dict", "_____no_output_____" ] ], [ [ "## The Model: Naive_Bayers_Classifier", "_____no_output_____" ] ], [ [ "class MLPClassifier(nn.Module):\n \n def __init__(self, embedding_size, hidden_dim, num_classes, dropout_p, \n pretrained_embeddings=None, padding_idx=0):\n \"\"\"\n Args:\n embedding_size (int): size of the embedding vectors\n num_embeddings (int): number of embedding vectors\n hidden_dim (int): the size of the hidden dimension\n num_classes (int): the number of classes in classification\n dropout_p (float): a dropout parameter \n pretrained_embeddings (numpy.array): previously trained word embeddings\n default is None. If provided, \n padding_idx (int): an index representing a null position\n \"\"\"\n super(MLPClassifier, self).__init__()\n\n self._dropout_p = dropout_p\n self.dropout = nn.Dropout(dropout_p)\n self.fc1 = nn.Linear(embedding_size, hidden_dim)\n self.fc2 = nn.Linear(hidden_dim, num_classes)\n\n def forward(self, x_in, apply_softmax=False):\n \"\"\"The forward pass of the classifier\n \n Args:\n x_in (torch.Tensor): an input data tensor. \n x_in.shape should be (batch, dataset._max_seq_length)\n apply_softmax (bool): a flag for the softmax activation\n should be false if used with the Cross Entropy losses\n Returns:\n the resulting tensor. tensor.shape should be (batch, num_classes)\n \"\"\"\n \n intermediate_vector = F.relu(self.dropout(self.fc1(x_in)))\n prediction_vector = self.fc2(intermediate_vector)\n \n if apply_softmax:\n prediction_vector = F.softmax(prediction_vector, dim=1)\n\n return prediction_vector", "_____no_output_____" ] ], [ [ "## Training Routine", "_____no_output_____" ], [ "### Helper Functions", "_____no_output_____" ] ], [ [ "def make_train_state(args):\n return {'stop_early': False,\n 'early_stopping_step': 0,\n 'early_stopping_best_k_acc_val': 0,\n 'learning_rate': args.learning_rate,\n 'epoch_index': 0,\n 'train_loss': [],\n 'train_1_acc': [],\n 'train_k_acc': [],\n 'train_k_jac': [],\n 'val_loss': [],\n 'val_1_acc': [],\n 'val_k_acc': [],\n 'val_k_jac': [],\n 'test_loss': -1,\n 'test_1_acc': -1,\n 'test_k_acc':-1,\n 'test_k_jac':-1,\n 'model_filename': args.model_state_file}\n\ndef update_train_state(args, model, train_state):\n \"\"\"Handle the training state updates.\n\n Components:\n - Early Stopping: Prevent overfitting.\n - Model Checkpoint: Model is saved if the model is better\n\n :param args: main arguments\n :param model: model to train\n :param train_state: a dictionary representing the training state values\n :returns:\n a new train_state\n \"\"\"\n\n # Save one model at least\n if train_state['epoch_index'] == 0:\n torch.save(model.state_dict(), train_state['model_filename'])\n train_state['stop_early'] = False\n\n # Save model if performance improved\n elif train_state['epoch_index'] >= 1:\n acc_tm1, acc_t = train_state['val_k_acc'][-2:]\n\n # If accuracy worsened\n if acc_t <= train_state['early_stopping_best_k_acc_val']:\n # Update step\n train_state['early_stopping_step'] += 1\n # Loss decreased\n else:\n # Save the best model from sklearn\n if acc_t > train_state['early_stopping_best_k_acc_val']:\n train_state['early_stopping_best_k_acc_val'] = acc_t\n torch.save(model.state_dict(), train_state['model_filename'])\n \n # Reset early stopping step\n train_state['early_stopping_step'] = 0\n\n # Stop early ?\n train_state['stop_early'] = \\\n train_state['early_stopping_step'] >= args.early_stopping_criteria\n\n return train_state", "_____no_output_____" ] ], [ [ "### Evaluation Metrics", "_____no_output_____" ] ], [ [ "def compute_cross_entropy(y_pred, y_target):\n y_target = y_target.cpu().float()\n y_pred = y_pred.cpu().float()\n criterion = nn.BCEWithLogitsLoss()\n return criterion(y_target, y_pred)\n\ndef compute_1_accuracy(y_pred, y_target):\n y_target_indices = y_target.max(dim=1)[1]\n y_pred_indices = y_pred.max(dim=1)[1]\n n_correct = torch.eq(y_pred_indices, y_target_indices).sum().item()\n return n_correct / len(y_pred_indices) * 100\n\ndef compute_k_accuracy(y_pred, y_target, k=3):\n y_pred_indices = y_pred.topk(k, dim=1)[1]\n y_target_indices = y_target.max(dim=1)[1]\n n_correct = torch.tensor([y_pred_indices[i] in y_target_indices[i] for i in range(len(y_pred))]).sum().item()\n return n_correct / len(y_pred_indices) * 100\n\ndef compute_k_jaccard_index(y_pred, y_target, k=3):\n y_target_indices = y_target.topk(k, dim=1)[1]\n y_pred_indices = y_pred.max(dim=1)[1]\n jaccard = torch.tensor([len(np.intersect1d(y_target_indices[i], y_pred_indices[i]))/\n len(np.union1d(y_target_indices[i], y_pred_indices[i]))\n for i in range(len(y_pred))]).sum().item()\n return jaccard / len(y_pred_indices)\n\ndef compute_jaccard_index(y_pred, y_target, k=3, multilabel=False):\n \n threshold = 1.0/(k+1)\n threshold_2 = 0.5\n \n if multilabel:\n y_pred_indices = y_pred.gt(threshold_2)\n else:\n y_pred_indices = y_pred.gt(threshold)\n \n y_target_indices = y_target.gt(threshold)\n \n jaccard = ((y_target_indices*y_pred_indices).sum(axis=1)/((y_target_indices+y_pred_indices).sum(axis=1)+1e-8)).sum().item()\n return jaccard / len(y_pred_indices)", "_____no_output_____" ], [ "def softmax_sensitive(T):\n T = np.exp(T) - np.exp(0) + 1e-9\n if len(T.shape)==1:\n return T/T.sum()\n return T/(T.sum(axis=1).unsqueeze(1))", "_____no_output_____" ], [ "def cross_entropy(pred, soft_targets):\n logsoftmax = nn.LogSoftmax(dim=1)\n return torch.mean(torch.sum(- soft_targets * logsoftmax(pred), 1))", "_____no_output_____" ], [ "# convert a df to tensor to be used in pytorch\ndef df_to_tensor(df):\n device = args.device\n return torch.from_numpy(df.values).float().to(device)", "_____no_output_____" ], [ "def get_prior():\n prior = pd.read_csv(args.prior_csv,sep=';',names=classes[:-1], skiprows=1)\n prior['Others'] = 1\n prior = prior.T\n prior['Others'] = 1\n prior = df_to_tensor(prior)\n return prior", "_____no_output_____" ], [ "def compute_fuzzy_label(y_target, y_fuzzy, fuzzy=False, how='uni', lbd=0):\n '''\n Using two sets of prediction labels and fuzziness parameters to compute the fuzzy label in the form as \n a distribution over classes\n \n Args:\n y_target (torch.Tensor) of shape (n_batch, n_classes): the true label of the ouv description\n y_fuzzy (torch.Tensor) of shape (n_batch, n_classes): the fuzzy label of the ouv description\n fuzzy (bool): whether or not to turn on the fuzziness option\n how (string): the way fuzziness weights are used, one of the options in {'uni', 'prior'}\n lbd (float): the scaler applied to the fuzziness of the label\n \n Returns:\n A pytorch Tensor of shape (n_batch, n_classes): The processed label in the form of distribution that add to 1\n '''\n assert y_target.shape == y_fuzzy.shape, 'target labels must have the same size'\n assert how in {'uni', 'prior', 'origin'}, '''how must be one of the two options in {'uni', 'prior', 'origin'}'''\n \n if not fuzzy:\n return softmax_sensitive(y_target)\n \n if how == 'uni':\n y_label = y_target + lbd * y_fuzzy\n return softmax_sensitive(y_label)\n \n ### TO DO ###\n elif how == 'prior':\n prior = get_prior()\n y_inter = torch.matmul(y_target.float(),prior)\n y_inter = y_inter/(y_inter.max(dim=1, keepdim=True)[0])\n y_label = y_target + lbd * y_fuzzy * y_inter\n return softmax_sensitive(y_label)\n \n else:\n y_label = y_target + lbd\n return softmax_sensitive(y_label)", "_____no_output_____" ], [ "def sparse_to_tensor(M):\n \"\"\"\n input: M is Scipy sparse matrix\n output: pytorch sparse tensor in GPU \n \"\"\"\n M = M.tocoo().astype(np.float32)\n indices = torch.from_numpy(np.vstack((M.row, M.col))).long()\n values = torch.from_numpy(M.data)\n shape = torch.Size(M.shape)\n Ms = torch.sparse.FloatTensor(indices, values, shape, device=args.device)\n return Ms.to_dense()", "_____no_output_____" ] ], [ [ "### General Utilities", "_____no_output_____" ] ], [ [ "def set_seed_everywhere(seed, cuda):\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n if cuda:\n torch.cuda.manual_seed_all(seed)\n\ndef handle_dirs(dirpath):\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)", "_____no_output_____" ] ], [ [ "### Settings and Some Prep Work", "_____no_output_____" ] ], [ [ "args = Namespace(\n # Data and Path information\n frequency_cutoff=1,\n model_state_file='model.pth',\n ouv_csv='Data/ouv_with_splits_full.csv',\n #ouv_csv='Data/all_with_splits_full.csv',\n prior_csv = 'Data/Coappearance_matrix.csv',\n save_dir='model_storage/ngram/',\n vectorizer_file='vectorizer.json',\n # Model hyper parameters\n ngrams=2,\n hidden_dim=200, \n # Training hyper parameters\n batch_size=128,\n early_stopping_criteria=5,\n learning_rate=0.0002,\n l2 = 1e-5,\n dropout_p=0.5,\n k = 3,\n fuzzy = True,\n fuzzy_how = 'uni',\n fuzzy_lambda = 0.1,\n num_epochs=100,\n seed=1337,\n # Runtime options\n catch_keyboard_interrupt=True,\n cuda=True,\n expand_filepaths_to_save_dir=True,\n reload_from_files=False,\n)\n\nclasses = ['Criteria i', 'Criteria ii', 'Criteria iii', 'Criteria iv', 'Criteria v', 'Criteria vi', \n 'Criteria vii', 'Criteria viii', 'Criteria ix', 'Criteria x', 'Others']\n\nif args.expand_filepaths_to_save_dir:\n args.vectorizer_file = os.path.join(args.save_dir,\n args.vectorizer_file)\n\n args.model_state_file = os.path.join(args.save_dir,\n args.model_state_file)\n \n print(\"Expanded filepaths: \")\n print(\"\\t{}\".format(args.vectorizer_file))\n print(\"\\t{}\".format(args.model_state_file))\n \n# Check CUDA\nif not torch.cuda.is_available():\n args.cuda = False\n\nprint(\"Using CUDA: {}\".format(args.cuda))\n\nargs.device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n\n# Set seed for reproducibility\nset_seed_everywhere(args.seed, args.cuda)\n\n# handle dirs\nhandle_dirs(args.save_dir)", "Expanded filepaths: \n\tmodel_storage/ngram/vectorizer.json\n\tmodel_storage/ngram/model.pth\nUsing CUDA: False\n" ] ], [ [ "## Initialization", "_____no_output_____" ] ], [ [ "set_seed_everywhere(args.seed, args.cuda)\nif args.reload_from_files:\n # training from a checkpoint\n dataset = OuvDataset.load_dataset_and_load_vectorizer(args.ouv_csv, args.vectorizer_file)\n\nelse:\n # create dataset and vectorizer\n dataset = OuvDataset.load_dataset_and_make_vectorizer(args.ouv_csv, \n cutoff=args.frequency_cutoff, ngrams=args.ngrams)\n dataset.save_vectorizer(args.vectorizer_file) \n\nvectorizer = dataset.get_vectorizer()\n\nembedding_size = len(vectorizer.vectorizer.vocabulary_)\n\nclassifier = MLPClassifier(embedding_size=embedding_size, \n hidden_dim=args.hidden_dim, \n num_classes=len(classes), \n dropout_p=args.dropout_p)", "/opt/miniconda3/lib/python3.7/site-packages/sklearn/feature_extraction/text.py:1799: UserWarning: Only (<class 'numpy.float64'>, <class 'numpy.float32'>, <class 'numpy.float16'>) 'dtype' should be used. int32 'dtype' will be converted to np.float64.\n UserWarning)\n" ], [ "embedding_size", "_____no_output_____" ] ], [ [ "### Training Loop", "_____no_output_____" ] ], [ [ "with profiler.profile(record_shapes=True) as prof:\n with profiler.record_function(\"model_inference\"):\n classifier(X)", "_____no_output_____" ], [ "print(prof.key_averages().table(sort_by=\"cpu_time_total\", row_limit=10))", "----------------------- ------------ ------------ ------------ ------------ ------------ ------------ \n Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls \n----------------------- ------------ ------------ ------------ ------------ ------------ ------------ \n model_inference 6.54% 224.467us 99.33% 3.408ms 3.408ms 1 \n aten::addmm 81.37% 2.792ms 83.10% 2.852ms 1.426ms 2 \n aten::dropout 0.48% 16.322us 7.10% 243.634us 243.634us 1 \n aten::bernoulli_ 3.59% 123.054us 3.63% 124.419us 124.419us 1 \n aten::t 1.16% 39.824us 1.70% 58.427us 29.214us 2 \n aten::div_ 0.80% 27.575us 1.33% 45.740us 45.740us 1 \n aten::mul 1.13% 38.764us 1.19% 40.791us 40.791us 1 \n aten::copy_ 1.08% 36.901us 1.15% 39.359us 13.120us 3 \n aten::empty 1.00% 34.303us 1.00% 34.303us 4.288us 8 \n aten::relu 0.43% 14.880us 0.78% 26.640us 26.640us 1 \n----------------------- ------------ ------------ ------------ ------------ ------------ ------------ \nSelf CPU time total: 3.431ms\n\n" ] ], [ [ "## Loading Trained Models", "_____no_output_____" ], [ "### Option 1 LS Model", "_____no_output_____" ] ], [ [ "with open(args.save_dir+'hyperdict_fuzzy.p', 'rb') as fp:\n hyperdict_fuzzy = pickle.load(fp)\n train_state = hyperdict_fuzzy[('uni',0.1)]", "_____no_output_____" ], [ "classifier.load_state_dict(torch.load(args.save_dir+'1337/model.pth',map_location=torch.device('cpu')))\nclassifier.eval()", "_____no_output_____" ] ], [ [ "### Option 2 Baseline w/o LS", "_____no_output_____" ] ], [ [ "with open(args.save_dir+'hyperdict_fuzzy.p', 'rb') as fp:\n hyperdict_fuzzy = pickle.load(fp)\n train_state = hyperdict_fuzzy[('uni',0)]", "_____no_output_____" ], [ "classifier.load_state_dict(torch.load(args.save_dir+'baseline/model.pth',map_location=torch.device('cpu')))\nclassifier.eval()", "_____no_output_____" ], [ "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\ncount_parameters(classifier)", "_____no_output_____" ], [ "# compute the loss & accuracy on the test set using the best available model\nloss_func = cross_entropy\nset_seed_everywhere(args.seed, args.cuda)\n\ndataset.set_split('test')\nbatch_generator = generate_batches(dataset, \n batch_size=args.batch_size, \n device=args.device)\nrunning_loss = 0.\nrunning_1_acc = 0.\nrunning_k_acc = 0.\nrunning_k_jac = 0.\nclassifier.eval()\n\nfor batch_index, batch_dict in enumerate(batch_generator):\n \n # get the data compute fuzzy labels\n X = batch_dict['x_data']\n\n y_target = batch_dict['y_target']\n y_fuzzy = batch_dict['y_fuzzy']\n\n Y = compute_fuzzy_label(y_target, y_fuzzy, fuzzy= args.fuzzy, \n how=args.fuzzy_how, lbd = args.fuzzy_lambda)\n\n # compute the output\n with torch.no_grad():\n y_pred = classifier(X)\n\n # compute the loss\n loss = loss_func(y_pred, Y)\n loss_t = loss.item()\n running_loss += (loss_t - running_loss) / (batch_index + 1)\n\n # compute the accuracy\n acc_1_t = compute_1_accuracy(y_pred, y_target)\n acc_k_t = compute_k_accuracy(y_pred, y_target, args.k)\n jac_k_t = compute_jaccard_index(y_pred, y_target, args.k)\n\n running_1_acc += (acc_1_t - running_1_acc) / (batch_index + 1)\n running_k_acc += (acc_k_t - running_k_acc) / (batch_index + 1)\n running_k_jac += (jac_k_t - running_k_jac) / (batch_index + 1)\n\ntrain_state['test_loss'] = running_loss\ntrain_state['test_1_acc'] = running_1_acc\ntrain_state['test_k_acc'] = running_k_acc\ntrain_state['test_k_jac'] = running_k_jac", "_____no_output_____" ], [ "# Result of LS Model\ntrain_state", "_____no_output_____" ], [ "# Result of Baseline\ntrain_state", "_____no_output_____" ] ], [ [ "## Inference", "_____no_output_____" ] ], [ [ "def preprocess_text(text):\n text = text.lower()\n text = re.sub(r\"([.,!?])\", r\" \\1 \", text)\n text = re.sub(r\"[^a-zA-Z.,!?]+\", r\" \", text)\n return text", "_____no_output_____" ], [ "def predict_rating(text, classifier, vectorizer, classes, k=1):\n \"\"\"Predict the rating of a review\n \n Args:\n text (str): the text of the description\n classifier (ReviewClassifier): the trained model\n vectorizer (ReviewVectorizer): the corresponding vectorizer\n classes (list of str): The name of the ouv classes\n k (int): show the largest k prediction, default to 1\n \"\"\"\n classifier.eval()\n ouv = preprocess_text(text)\n vectorized_ouv = vectorizer.vectorize(ouv)\n X = vectorized_ouv.view(1,-1)\n with torch.no_grad():\n result = classifier(vectorized_ouv.unsqueeze(0), apply_softmax=True)\n \n if k==1:\n pred_id = result.argmax().item()\n return (classes[pred_id], result[0][pred_id])\n else:\n pred_indices = [i.item() for i in result.topk(k)[1][0]]\n output = []\n for pred_id in pred_indices:\n output.append((classes[pred_id],result[0][pred_id].item()))\n return output", "_____no_output_____" ], [ "test_ouv = 'this is a very old building dating back to 13th century'\n\nprediction = predict_rating(test_ouv,classifier,vectorizer,classes)\nprint('{} -> {} with a probability of {:0.2f}'.format(test_ouv, prediction[0],prediction[1]))", "this is a very old building dating back to 13th century -> Criteria iv with a probability of 0.31\n" ], [ "test_ouv = 'this is a very old building dating back to 13th century'\nk=3\npredictions = predict_rating(test_ouv,classifier,vectorizer,classes,k)\n\nprint(\"Top {} predictions:\".format(k))\nprint(\"===================\")\nfor prediction in predictions:\n print('{} -> {} with a probability of {:0.2f}'.format(test_ouv, prediction[0],prediction[1]))", "Top 3 predictions:\n===================\nthis is a very old building dating back to 13th century -> Criteria iv with a probability of 0.31\nthis is a very old building dating back to 13th century -> Criteria iii with a probability of 0.26\nthis is a very old building dating back to 13th century -> Criteria i with a probability of 0.09\n" ], [ "test_ouv = 'The particular layout of the complex is unique to this site'\nk=3\npredictions = predict_rating(test_ouv,classifier,vectorizer,classes,k)\n\nprint(\"Top {} predictions:\".format(k))\nprint(\"===================\")\nfor prediction in predictions:\n print('{} -> {} with a probability of {:0.2f}'.format(test_ouv, prediction[0],prediction[1]))", "Top 3 predictions:\n===================\nThe particular layout of the complex is unique to this site -> Criteria iv with a probability of 0.27\nThe particular layout of the complex is unique to this site -> Criteria iii with a probability of 0.24\nThe particular layout of the complex is unique to this site -> Criteria ii with a probability of 0.16\n" ], [ "test_ouv = '''the lagoon of venice also has one of the highest concentrations of masterpieces in the world from \ntorcellos cathedral to the church of santa maria della salute . the years of the republics extraordinary golden \nage are represented by monuments of incomparable beauty'''\nk=3\npredictions = predict_rating(test_ouv,classifier,vectorizer,classes,k)\n\nprint(\"Top {} predictions:\".format(k))\nprint(\"===================\")\nfor prediction in predictions:\n print('{} -> {} with a probability of {:0.2f}'.format(test_ouv, prediction[0],prediction[1]))", "Top 3 predictions:\n===================\nthe lagoon of venice also has one of the highest concentrations of masterpieces in the world from \ntorcellos cathedral to the church of santa maria della salute . the years of the republics extraordinary golden \nage are represented by monuments of incomparable beauty -> Criteria i with a probability of 0.32\nthe lagoon of venice also has one of the highest concentrations of masterpieces in the world from \ntorcellos cathedral to the church of santa maria della salute . the years of the republics extraordinary golden \nage are represented by monuments of incomparable beauty -> Criteria iv with a probability of 0.17\nthe lagoon of venice also has one of the highest concentrations of masterpieces in the world from \ntorcellos cathedral to the church of santa maria della salute . the years of the republics extraordinary golden \nage are represented by monuments of incomparable beauty -> Criteria iii with a probability of 0.12\n" ], [ "test_ouv = '''the lagoon of venice also has one of the highest concentrations of masterpieces in the world'''\nk=3\npredictions = predict_rating(test_ouv,classifier,vectorizer,classes,k)\n\nprint(\"Top {} predictions:\".format(k))\nprint(\"===================\")\nfor prediction in predictions:\n print('{} -> {} with a probability of {:0.2f}'.format(test_ouv, prediction[0],prediction[1]))", "Top 3 predictions:\n===================\nthe lagoon of venice also has one of the highest concentrations of masterpieces in the world -> Criteria i with a probability of 0.26\nthe lagoon of venice also has one of the highest concentrations of masterpieces in the world -> Criteria x with a probability of 0.13\nthe lagoon of venice also has one of the highest concentrations of masterpieces in the world -> Criteria vii with a probability of 0.13\n" ], [ "test_ouv = '''from torcellos cathedral to the church of santa maria della salute'''\nk=3\npredictions = predict_rating(test_ouv,classifier,vectorizer,classes,k)\n\nprint(\"Top {} predictions:\".format(k))\nprint(\"===================\")\nfor prediction in predictions:\n print('{} -> {} with a probability of {:0.2f}'.format(test_ouv, prediction[0],prediction[1]))", "Top 3 predictions:\n===================\nfrom torcellos cathedral to the church of santa maria della salute -> Criteria iv with a probability of 0.28\nfrom torcellos cathedral to the church of santa maria della salute -> Criteria vi with a probability of 0.16\nfrom torcellos cathedral to the church of santa maria della salute -> Criteria ii with a probability of 0.15\n" ], [ "test_ouv = '''the years of the republics extraordinary golden age are represented by monuments of incomparable beauty'''\nk=3\npredictions = predict_rating(test_ouv,classifier,vectorizer,classes,k)\n\nprint(\"Top {} predictions:\".format(k))\nprint(\"===================\")\nfor prediction in predictions:\n print('{} -> {} with a probability of {:0.2f}'.format(test_ouv, prediction[0],prediction[1]))", "Top 3 predictions:\n===================\nthe years of the republics extraordinary golden age are represented by monuments of incomparable beauty -> Criteria iii with a probability of 0.20\nthe years of the republics extraordinary golden age are represented by monuments of incomparable beauty -> Criteria iv with a probability of 0.20\nthe years of the republics extraordinary golden age are represented by monuments of incomparable beauty -> Criteria i with a probability of 0.16\n" ], [ "import time\nclass Timer(object):\n def __init__(self, name=None):\n self.name = name\n\n def __enter__(self):\n self.tstart = time.time()\n\n def __exit__(self, type, value, traceback):\n if self.name:\n print('[%s]' % self.name,)\n print('Elapsed: %s' % (time.time() - self.tstart))\n \nset_seed_everywhere(args.seed, args.cuda) \ntest_ouv = 'The particular layout of the complex is unique to this site'\nk=3\nwith Timer():\n predictions = predict_rating(test_ouv,classifier,vectorizer,classes,k=k)", "Elapsed: 0.0027370452880859375\n" ] ], [ [ "## Interpretability", "_____no_output_____" ] ], [ [ "def infer_tokens_importance(vocab, classifier, vectorizer, classes, k=50):\n \"\"\"Predict the rating of a review\n \n Args:\n vocab (list of str): the whole vocabulary\n classifier (ReviewClassifier): the trained model\n vectorizer (ReviewVectorizer): the corresponding vectorizer\n classes (list of str): The name of the ouv classes\n k (int): show the largest k prediction, default to 1\n \"\"\"\n classifier.eval()\n X = sparse_to_tensor(vectorizer.vectorizer.transform(list(vocab.keys())))\n \n with torch.no_grad():\n result = classifier(X, apply_softmax=True)\n \n vocab_id = result[1:].topk(k, dim=0)[1]\n vocab_weight = result[1:].topk(k, dim=0)[0]\n return vocab_id, vocab_weight", "_____no_output_____" ], [ "vocab = vectorizer.vectorizer.vocabulary_\nlen(vocab)", "_____no_output_____" ], [ "all_k = infer_tokens_importance(vocab, classifier, vectorizer, classes, k=50)[0]", "_____no_output_____" ], [ "all_k.shape", "_____no_output_____" ], [ "id_vocab = {vocab[token]:token for token in vocab.keys()}", "_____no_output_____" ], [ "def make_top_k_DataFrame(vocab, classifier, vectorizer, classes, k=10):\n \n vocab_id = infer_tokens_importance(vocab, classifier, vectorizer, classes, k)[0]\n df = pd.DataFrame(columns = classes)\n for i in range(len(classes)):\n \n indices = vocab_id[:,i].tolist()\n words = pd.Series([id_vocab[j] for j in indices])\n df[classes[i]] = words\n return df", "_____no_output_____" ], [ "make_top_k_DataFrame(vocab, classifier, vectorizer, classes, k=20)", "_____no_output_____" ], [ "make_top_k_DataFrame(vocab, classifier, vectorizer, classes, k=50).to_csv(args.save_dir+'top_words.csv')", "_____no_output_____" ] ], [ [ "## Confusion Matrix", "_____no_output_____" ] ], [ [ "dataset.set_split('test')\nset_seed_everywhere(args.seed, args.cuda)\nbatch_generator = generate_batches(dataset, \n batch_size=args.batch_size, \n device=args.device)\nconf_mat_test = np.zeros((len(classes)-1,len(classes)-1))", "_____no_output_____" ], [ "for batch_index, batch_dict in enumerate(batch_generator):\n \n # get the data compute fuzzy labels\n X = batch_dict['x_data']\n\n y_target = batch_dict['y_target']\n y_fuzzy = batch_dict['y_fuzzy']\n\n Y = compute_fuzzy_label(y_target, y_fuzzy, fuzzy= args.fuzzy, \n how=args.fuzzy_how, lbd = args.fuzzy_lambda)\n\n # compute the output\n y_pred = classifier(X)\n \n conf_mat_test = np.add(conf_mat_test,confusion_matrix(y_target.argmax(axis=1), y_pred.argmax(axis=1),\n labels=range(len(classes)-1)))", "_____no_output_____" ], [ "conf_mat_test", "_____no_output_____" ], [ "dataset.set_split('val')\nset_seed_everywhere(args.seed, args.cuda)\nbatch_generator = generate_batches(dataset, \n batch_size=args.batch_size, \n device=args.device)\nconf_mat_val = np.zeros((len(classes)-1,len(classes)-1))", "_____no_output_____" ], [ "for batch_index, batch_dict in enumerate(batch_generator):\n \n # get the data compute fuzzy labels\n X = batch_dict['x_data']\n\n y_target = batch_dict['y_target']\n y_fuzzy = batch_dict['y_fuzzy']\n\n Y = compute_fuzzy_label(y_target, y_fuzzy, fuzzy= args.fuzzy, \n how=args.fuzzy_how, lbd = args.fuzzy_lambda)\n\n # compute the output\n y_pred = classifier(X)\n \n conf_mat_val = np.add(conf_mat_val,confusion_matrix(y_target.argmax(axis=1), y_pred.argmax(axis=1),labels=range(len(classes)-1)))", "_____no_output_____" ], [ "conf_mat_val", "_____no_output_____" ], [ "dataset.set_split('train')\nset_seed_everywhere(args.seed, args.cuda)\nbatch_generator = generate_batches(dataset, \n batch_size=args.batch_size, \n device=args.device)\nconf_mat_train = np.zeros((len(classes)-1,len(classes)-1))", "_____no_output_____" ], [ "for batch_index, batch_dict in enumerate(batch_generator):\n \n # get the data compute fuzzy labels\n X = batch_dict['x_data']\n\n y_target = batch_dict['y_target']\n y_fuzzy = batch_dict['y_fuzzy']\n\n Y = compute_fuzzy_label(y_target, y_fuzzy, fuzzy= args.fuzzy, \n how=args.fuzzy_how, lbd = args.fuzzy_lambda)\n\n # compute the output\n y_pred = classifier(X)\n \n conf_mat_train = np.add(conf_mat_train,confusion_matrix(y_target.argmax(axis=1), y_pred.argmax(axis=1),labels=range(len(classes)-1)))", "_____no_output_____" ], [ "conf_mat_train", "_____no_output_____" ], [ "pd.concat([pd.DataFrame(conf_mat_test),pd.DataFrame(conf_mat_val),pd.DataFrame(conf_mat_train)],axis=1).to_csv(args.save_dir+'confusion_matrix.csv')", "_____no_output_____" ], [ "pd.concat([pd.DataFrame(conf_mat_test),pd.DataFrame(conf_mat_val),pd.DataFrame(conf_mat_train)],axis=1).to_csv(args.save_dir+'baseline_confusion_matrix.csv')", "_____no_output_____" ], [ "def per_class_metrics(confusion_matrix, classes):\n '''\n Compute the per class precision, recall, and F1 for all the classes\n \n Args:\n confusion_matrix (np.ndarry) with shape of (n_classes,n_classes): a confusion matrix of interest\n classes (list of str) with shape (n_classes,): The names of classes\n \n Returns:\n metrics_dict (dictionary): a dictionary that records the per class metrics\n '''\n num_class = confusion_matrix.shape[0]\n metrics_dict = {}\n for i in range(num_class):\n key = classes[i]\n temp_dict = {}\n row = confusion_matrix[i,:]\n col = confusion_matrix[:,i]\n val = confusion_matrix[i,i]\n precision = val/row.sum()\n recall = val/col.sum()\n F1 = 2*(precision*recall)/(precision+recall)\n temp_dict['precision'] = precision\n temp_dict['recall'] = recall\n temp_dict['F1'] = F1\n metrics_dict[key] = temp_dict\n \n return metrics_dict", "_____no_output_____" ], [ "metrics_dict = {}\nmetrics_dict['test'] = per_class_metrics(conf_mat_test, classes[:-1])\nmetrics_dict['val'] = per_class_metrics(conf_mat_val, classes[:-1])\nmetrics_dict['train'] = per_class_metrics(conf_mat_train, classes[:-1])", "_____no_output_____" ], [ "metrics_df = pd.DataFrame.from_dict({(i,j): metrics_dict[i][j] \n for i in metrics_dict.keys() \n for j in metrics_dict[i].keys()},\n orient='index')", "_____no_output_____" ], [ "metrics_df.to_csv(args.save_dir+'per_class_metrics.csv')", "_____no_output_____" ], [ "metrics_df.to_csv(args.save_dir+'baseline_per_class_metrics.csv')", "_____no_output_____" ] ], [ [ "## Try on totally Unseen Data", "_____no_output_____" ] ], [ [ "#ouv_csv='Data/ouv_with_splits_full.csv',\nnew_ouv_csv='Data/sd_full.csv'", "_____no_output_____" ], [ "def compute_jac_k_accuracy(y_pred, y_target, k=3, multilabel=False):\n \n y_pred_indices = y_pred.topk(k, dim=1)[1]\n y_target_indices = y_target.topk(k, dim=1)[1]\n \n n_correct = torch.tensor([torch.tensor([y_pred_indices[j][i] in y_target_indices[j] for i in range(k)]).sum()>0 \n for j in range(len(y_pred))]).sum().item()\n return n_correct / len(y_pred_indices) * 100\n\ndef compute_jac_1_accuracy(y_pred, y_target, k=3, multilabel=False):\n \n y_pred_indices = y_pred.topk(1, dim=1)[1]\n y_target_indices = y_target.topk(k, dim=1)[1]\n \n n_correct = torch.tensor([torch.tensor([y_pred_indices[j] in y_target_indices[j] for i in range(k)]).sum()>0 \n for j in range(len(y_pred))]).sum().item()\n return n_correct / len(y_pred_indices) * 100", "_____no_output_____" ], [ "with Timer():\n loss_func = cross_entropy\n set_seed_everywhere(args.seed, args.cuda)\n train_state = make_train_state(args)\n dataset = OuvDataset.load_dataset_and_load_vectorizer(new_ouv_csv, args.vectorizer_file, \n ngrams=args.ngrams, vectorizer=vectorizer.vectorizer)\n\n dataset.set_split('val')\n verbose=False\n try:\n # Iterate over training dataset\n\n # setup: batch generator, set loss and acc to 0, set train mode on\n dataset.set_split('val')\n batch_generator = generate_batches(dataset, \n batch_size=args.batch_size, \n device=args.device)\n running_loss = 0.0\n running_1_acc = 0.0\n running_k_acc = 0.0\n running_k_jac = 0.0\n classifier.eval()\n\n for batch_index, batch_dict in enumerate(batch_generator):\n\n # step 2. get the data compute fuzzy labels\n X = batch_dict['x_data']\n\n y_target = batch_dict['y_target']\n y_fuzzy = batch_dict['y_fuzzy']\n\n Y = compute_fuzzy_label(y_target, y_fuzzy, fuzzy= args.fuzzy, \n how=args.fuzzy_how, lbd = args.fuzzy_lambda)\n\n # step 3. compute the output\n with torch.no_grad():\n y_pred = classifier(X)\n\n # step 4. compute the loss\n loss = loss_func(y_pred, Y)\n loss_t = loss.item()\n running_loss += (loss_t - running_loss) / (batch_index + 1)\n\n # -----------------------------------------\n # compute the accuracies\n acc_1_t = compute_jac_1_accuracy(y_pred, y_target)\n acc_k_t = compute_jac_k_accuracy(y_pred, y_target, args.k)\n jac_k_t = compute_jaccard_index(y_pred, y_target, len(classes))\n\n running_1_acc += (acc_1_t - running_1_acc) / (batch_index + 1)\n running_k_acc += (acc_k_t - running_k_acc) / (batch_index + 1)\n running_k_jac += (jac_k_t - running_k_jac) / (batch_index + 1)\n\n # update bar\n if verbose:\n val_bar.set_postfix(loss=running_loss, \n acc_1=running_1_acc,\n acc_k=running_k_acc,\n jac_k=running_k_jac,\n epoch=epoch_index)\n val_bar.update()\n\n train_state['val_loss'].append(running_loss)\n train_state['val_1_acc'].append(running_1_acc)\n train_state['val_k_acc'].append(running_k_acc)\n train_state['val_k_jac'].append(running_k_jac)\n\n except KeyboardInterrupt:\n print(\"Exiting loop\")\n pass", "Elapsed: 6.545941114425659\n" ], [ "# LS Model\ntrain_state", "_____no_output_____" ], [ "# Baseline\ntrain_state", "_____no_output_____" ] ], [ [ "## END", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
4a41ea00d42472cd96eef4cba7fe92558a58b30d
180,112
ipynb
Jupyter Notebook
Churn A B tests and power of ChiSq.ipynb
ianozsvald/churn_a_b_tests_statistical_power
7f427c80ce0f93d344c249589c1edd53052dfb6a
[ "MIT" ]
null
null
null
Churn A B tests and power of ChiSq.ipynb
ianozsvald/churn_a_b_tests_statistical_power
7f427c80ce0f93d344c249589c1edd53052dfb6a
[ "MIT" ]
null
null
null
Churn A B tests and power of ChiSq.ipynb
ianozsvald/churn_a_b_tests_statistical_power
7f427c80ce0f93d344c249589c1edd53052dfb6a
[ "MIT" ]
null
null
null
339.833962
117,752
0.919406
[ [ [ "# A/B and A/A tests and the power to detect a difference on a binary task (e.g. churn or propensity to buy)\n\nA/B tests are used to detect a difference in two populations. Here we look at churn on 2 cohorts who have a low churn rate (5%), we'd like to determine how many people we need to sample to reliably detect an improvement in churn by 5% (i.e. 5%->4.75% churn improvement). The necessary sample size is probably larger than we'd desire!\n\nThe two papers below discuss ways to reduce variance and thereby increase the statistic power of a test (here we're using a ChiSq test on counts of a simulated binary outcome). \n\nDetecting small changes requires a huge population. Detecting large changes (which are more likely to occur if you break something that was good, than if you improve something that was already good) is easier.\n\nIf you don't control for statistical power you're likely to have test groups that are too small, so you rarely know that you've seen a real change, leading to rejections of small but useful improvements.\n\n_Sidenote_ - fixing churn is hard anyhow as folk who will churn will do so for lots of reasons (e.g. dissatisfaction, price, low use of product) so it is more likely that this is a product design test than a machine-learning led intervention.\n \nTwo good papers:\n * \"Trustworthy Online Controlled Experiments: Five Puzzling Outcomes Explained, 2012 (KDD), Kohavi, Deng, Frasca, Longbotham, Walker, Xu\" (https://dl.acm.org/doi/10.1145/2339530.2339653)\n * \"Practical Guide to Controlled Experiments on the Web, 2007 (KDD), Kohavi, Henne, Sommerfield\" (https://courses.cs.washington.edu/courses/cse454/15au/papers/p959-kohavi.pdf) ", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib as mpl\n\nimport matplotlib.pyplot as plt\nfrom scipy.stats import ks_2samp\nimport tqdm\nimport sys\n\nfrom scipy.stats import chi2_contingency\n\ndef set_common_mpl_styles(\n ax, legend=True, grid_axis=\"y\", ylabel=None, xlabel=None, title=None, ymin=None,\n xmin=None\n):\n ax.grid(axis=grid_axis)\n if legend == False:\n ax.legend_.remove()\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if title is not None:\n ax.set_title(title)\n if ymin is not None:\n ax.set_ylim(ymin=ymin)\n if xmin is not None:\n ax.set_xlim(xmin=xmin)", "_____no_output_____" ], [ "# Check on usage of chi2, given a contingency table we get a pvalue and expectation\n# If the contingency table significantly diverges from the expected values then we\n# get a small pvalue, if the tables are roughly the same then the pvalue is close to 1.0\nobs = np.array([[50, 50], [28, 71]])\nimport pprint\npprint.pprint(obs)\nchi2, pvalue, dof, expected = chi2_contingency(obs)\n# a pvalue < 0.05 means we can rejected the NH (NH: distributions are the same)\n# and accept the AH of different distributions, with an expected error rate\n# that we incorrectly reject the NH 5% of the time when there's no actual difference\nprint(f'pvalue for this result {pvalue:0.2f}') \nprint('Expected frequencies:')\nprint(expected)", "array([[50, 50],\n [28, 71]])\npvalue for this result 0.00\nExpected frequencies:\n[[39.1959799 60.8040201]\n [38.8040201 60.1959799]]\n" ] ], [ [ "```\nEach experiment is repeated 10k times to get the general behaviour.\n\nThe NH is that the distributions are the same (no difference between the A and B groups). On an A A test we would hope not to see any rejections (but they'll occur due to random variation). \n\nThe AH is that the groups come from different distributions.\n\nFirst we'll perform an A A test on 5k people (10k total). 5% of group A and B churn (i.e. they're the same - no intervention). On 4% of runs the NH is rejected. 96% of the time the NH is accepted. Even in this case we incorrectly see \"a difference\" on 4% of the experiments.\n\nWhilst 4% or so might be rejected, we'd expect a 5% rejection if we did _lots_ more experiments (we're testing at the 5% level so we expect 5% false NH rejections).\n\nNext we run an A B test on the same groups (10k people in total). 5% of group A churn, 4.75% of group B churn i.e. group B is 5% less likely to churn - we'd like to detect this difference by rejecting the NH. This time we reject the NH 8% of the time (i.e. we detect this small difference but not much more frequently than for the A A test), so 92% of the time we believe there's no difference between these groups (which is wrong). Subtle differences on small groups are not reliably detectable.\n\nNext we run an A B test on the same groups, this time group B is 20% less likely to churn (group A churns at 5%, group B churns at 4%). With this larger difference we reject the NH 65% of the time (so 2/3 of the time we correctly identify that the difference is significant). 1/3 of the time we still believe there's no difference. \n\nIf each marketing experiment is run only once, using a larger cohort than is currently used (i.e. the 10k proposed above), we'd still see high variance in the results.\n\nTo reliably detect a relatively large 20% change in churn we'd need a population of 20k people under study (two groups of 10k each), this lets us identify the difference on 90% of the runs (i.e. we'd still see the wrong answer 10% of the time). Thus the power of this configuration is 90% (which is a commonly accepted level).\n\nTo detect a smaller improvement (which is more likely to be the case) we start to need significantly larger test groups.\n\nTo reliably detect a smaller 5% change in churn (assuming churn is 5% at the start) we'd need a population of 320,000 people in total, to get the correct answer 90% of the time. The research paper noted below has an estimator formula, it suggests we'd need circa 480k people to achieve this accuracy. This formula seems to overestimate but roughly gives the same answer as the simulation, so it is a useful and quick guide.\n\nWith 2,000,000 people in total (1M per group), in an A/A test, we reject the NH 5% of the time. With an A/B test with a very tiny variation (10^-7) we detect this change barely above the 5% level i.e. we can't detect very tiny changes even with a huge sample (we'd need an even-huger sample for this).\n```", "_____no_output_____" ] ], [ [ "REPEATS = 10_000\n\nPEOPLE_A = 10_000\nPEOPLE_B = 10_000\n\nCUTOFF_A = 0.05 # 5% churn\n#B_MODIFIER = 1.0 # 1.0 means no change\n#B_MODIFIER = 0.95 # 0.95 means go down by 5% (i.e. improve churn by 5%)\nB_MODIFIER = 0.8 # 0.8 means go down by 20% (i.e. improve churn by 20%)\nCUTOFF_B = CUTOFF_A * B_MODIFIER \n\nprint(f'{PEOPLE_A:,} in A, {PEOPLE_B:,} in B')\nprint(f'P(churn|A) == {CUTOFF_A*100:0.2f}%, P(churn|B) == {CUTOFF_B*100:0.2f}%')", "10,000 in A, 10,000 in B\nP(churn|A) == 5.00%, P(churn|B) == 4.00%\n" ] ], [ [ "### Estimate using \"Practical Guide to Controlled Experiments on the Web\" (paper) formula\n\nUsing \"Practical Guide to Controlled Experiments on the Web, 2007 (KDD), Kohavi, Henne, Sommerfield\" (https://courses.cs.washington.edu/courses/cse454/15au/papers/p959-kohavi.pdf) we can estimate how many participants we need to track if we're going to do a >90% power 2 category ChiSq test.\n\n**NOTE** that this estimation method _overestimates_ the number of samples required, but is correct (given my experiments) within a factor of 2.", "_____no_output_____" ] ], [ [ "# \"Practical Guide to Controlled Experiments on the Web, 2007 (KDD), Kohavi, Henne, Sommerfield\"\nNBR_VARIANTS = 2\nDESIRED_LEVEL_OF_CHANGE_TO_DETECT = max(1-B_MODIFIER, 0.0000000001) # avoid 0 else this estimator fails\nSTD_DEV_OEC = np.sqrt(CUTOFF_A * (1-CUTOFF_A)) # std dev of Bernoulli trial on an X% event e.g. 5% churn rate\nDELTA = CUTOFF_A * DESIRED_LEVEL_OF_CHANGE_TO_DETECT\nnbr_trials_we_need = int((4 * NBR_VARIANTS * STD_DEV_OEC / (DELTA)) ** 2)\nprint(f\"According to the _estimate_ formula we'll need {nbr_trials_we_need:,} participants in total\")\nprint('Note that this formula is quick and it over-estimates the population size')", "According to the _estimate_ formula we'll need 30,400 participants in total\nNote that this formula is quick and it over-estimates the population size\n" ], [ "NEW_B_MODIFIERS = [0.95, 0.9, 0.85, 0.8, 0.75] # reduce churn by a set of \n#NEW_B_MODIFIERS = np.arange(0.99, 0.85, -0.01)\ntotal_estimated_participants = []\nresults_to_plot = []\nfor new_b_modifier in NEW_B_MODIFIERS:\n NBR_VARIANTS = 2\n DESIRED_LEVEL_OF_CHANGE_TO_DETECT = max(1-new_b_modifier, 0.0000000001) # avoid 0 else this estimator fails\n STD_DEV_OEC = np.sqrt(CUTOFF_A * (1-CUTOFF_A)) # std dev of Bernoulli trial on an X% event e.g. 5% churn rate\n DELTA = CUTOFF_A * DESIRED_LEVEL_OF_CHANGE_TO_DETECT\n nbr_trials_we_need = int((4 * NBR_VARIANTS * STD_DEV_OEC / (DELTA)) ** 2)\n total_estimated_participants.append(nbr_trials_we_need)\n results_to_plot.append({'change_to_detect': (1-new_b_modifier)*100, 'nbr_trials': nbr_trials_we_need})\n\ndef set_human_format(ax, on_x_axis=False, on_y_axis=False, **kwargs):\n '''Add commas e.g. 1_000_000 -> \"1,000,000\"'''\n # note hardcoded in this case to the one plot I'm using below\n if on_x_axis == False and on_y_axis == False:\n raise ValueError(\"An axis must be chosen!\")\n if on_x_axis:\n axis = ax.get_xaxis()\n axis.set_major_formatter(\n mpl.ticker.FuncFormatter(lambda x, p: f\"{results_to_plot_df.index[x]:0.0f}%\")\n )\n if on_y_axis:\n axis = ax.get_yaxis()\n axis.set_major_formatter(\n mpl.ticker.FuncFormatter(lambda x, p: f\"{int(x):,}\")\n )", "_____no_output_____" ], [ "fig, axs = plt.subplots(figsize=(6, 6), constrained_layout=True)\nax = axs\nresults_to_plot_df = pd.DataFrame(results_to_plot).set_index('change_to_detect').sort_index(ascending=False)\nresults_to_plot_df.plot(kind='bar', ax=ax, )\n\ntitle = 'With larger expected changes we need significantly fewer samples\\nfor small changes we need disproportionately more samples\\nto reliably detect a true difference 90% of the time'\nset_common_mpl_styles(ax, title=title, ylabel='Number of trials required',\n xlabel = 'Level of change we wish to detect', ymin=0, legend=False)\n#results_to_plot.style.format({'nbr_trials':\"0.0f%\"})\nset_human_format(ax, on_y_axis=True, on_x_axis=True)", "_____no_output_____" ] ], [ [ "### Monte Carlo simulation result", "_____no_output_____" ] ], [ [ "pvalues = []\nprint(f'{PEOPLE_A:,} in A, {PEOPLE_B:,} in B')\nprint(f'P(churn|A) == {CUTOFF_A*100:0.2f}%, P(churn|B) == {CUTOFF_B*100:0.2f}%')\nsys.stdout.flush() # ignore print conflict with tqdm\nnbr_times_mean_a_gt_mean_b = []\nmean_a = []\nmean_b = []\n\nfor rep in tqdm.tqdm(range(REPEATS)):\n # real values in the continuous uniform range [0, 1)\n # then cast to ints (0 or 1) for chi sq frequency table\n arr_a = (np.random.random_sample(size=PEOPLE_A) < CUTOFF_A).astype('int')\n arr_b = (np.random.random_sample(size=PEOPLE_B) < CUTOFF_B).astype('int')\n # create a chi sq frequency table (as above)\n obs = [[arr_a.sum(), arr_a.shape[0]-arr_a.sum()], [arr_b.sum(), arr_b.shape[0]-arr_b.sum()]]\n chi2, pvalue, dof, expected = chi2_contingency(obs)\n pvalues.append(pvalue)\n \n mean_a.append(arr_a.mean())\n mean_b.append(arr_b.mean())\n nbr_times_mean_a_gt_mean_b.append(arr_a.mean() > arr_b.mean())\n\npvalues = np.array(pvalues) # turn list to array\nsys.stderr.flush()\nprint('Calculate nbr of times we reject NH of no-difference:')\nprint(f'{sum(pvalues < 0.05) / REPEATS * 100:0.1f}% Null Hyp rejects (NH: No difference in distributions)')\n# Calculate how often mean_a > mean_b, if we had a significant result we would then follow\n# this guidance which for an A A test would mean a random choice (as mean_a==mean_b==50%)\nfraction_time_mean_a_gt_mean_b = sum(nbr_times_mean_a_gt_mean_b) / len(nbr_times_mean_a_gt_mean_b)\nprint(f'Percentage of time mean-A is greater than mean-B: {fraction_time_mean_a_gt_mean_b*100:0.2f}%')", "10,000 in A, 10,000 in B\nP(churn|A) == 5.00%, P(churn|B) == 4.00%\n" ], [ "df = pd.DataFrame({'mean_a': mean_a, 'mean_b': mean_b})\nfig, axs = plt.subplots(figsize = (6, 6), ncols=1, sharey=True)\nax = axs\nborder_color = ['r' if v==True else 'b' for v in pvalues < 0.05] # r if reject NH\ndf.plot(kind='scatter', x='mean_a', y='mean_b', ax=ax, alpha=1, edgecolors=border_color)\ntitle = f'Means of A and B on {len(mean_a):,} repeated experiments'\ntitle += \"\\nStraight line shows expectation if there's no relationship\"\ntitle += \"\\nbut ignores where on the line we'd be\"\ntitle += \"\\nred border == pvalue < 0.5 else blue border\"\nset_common_mpl_styles(ax, ylabel='mean_b (note false 0)', xlabel='mean_a (note false 0)', title=title)\nmin_val = min(ax.get_xlim()[0], ax.get_ylim()[0])\nmax_val = max(ax.get_xlim()[1], ax.get_ylim()[1])\nax.set_xlim(xmin=min_val, xmax=max_val)\nax.set_ylim(ymin=min_val, ymax=max_val);\nax.plot([min_val, max_val], [min_val, max_val], color='blue');", "_____no_output_____" ] ], [ [ "## Plot distribution of pvalues from this test \n\nThe left-most bin counts the number of times we've seen a pvalue < 0.05. We can visually see if the pvalues are evenly distributed (which suggests an A A test) or if they're biased one way, if biased to the left then it suggests for a large enough set of repeats that we're seeing a difference in the population.", "_____no_output_____" ] ], [ [ "fig, axs = plt.subplots(figsize = (8, 6), ncols=1, sharey=True, constrained_layout=True)\nbins = np.linspace(0, 1, 21)\nax = axs\nser = pd.cut(pd.Series(pvalues), bins, right=False).value_counts().sort_index()\nser.plot(kind='bar')\nset_common_mpl_styles(ax, ymin=-1, ylabel='Frequency', xlabel='pvalue',\n title=r\"Distribution of $\\chi^2$ pvalues for NH on boolean Churn results\")\n\nax.get_yaxis().set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))", "_____no_output_____" ], [ "df = pd.DataFrame({'A': arr_a, 'B': arr_b})\nprint('A and B groups with no-churn (0) and churn (1)')\nlabels = {0:'no churn', 1: 'churn'}\npd.crosstab(df['A'], df['B'], margins=True, normalize=True). \\\nrename(columns=labels, index=labels).style.format('{:.2%}')", "A and B groups with no-churn (0) and churn (1)\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4a41ec21f12b82b8f15b04dcf87efa88a4ad7993
52,926
ipynb
Jupyter Notebook
DocumentedExamples/transports.ipynb
ruth-moorman/cosima-recipes
3cc38cd3d50fc6d51492607b3866e9fdfb278228
[ "Apache-2.0" ]
null
null
null
DocumentedExamples/transports.ipynb
ruth-moorman/cosima-recipes
3cc38cd3d50fc6d51492607b3866e9fdfb278228
[ "Apache-2.0" ]
null
null
null
DocumentedExamples/transports.ipynb
ruth-moorman/cosima-recipes
3cc38cd3d50fc6d51492607b3866e9fdfb278228
[ "Apache-2.0" ]
null
null
null
156.123894
43,920
0.890262
[ [ [ "# Mass Transports\n\nTransport diagnostics for flow through major straits.\n\n## Theory\n\nFormally, mass transports are given by\n$$T_x = \\rho u $$\n$$T_y = \\rho v $$\n\nMass transports are diagnostics that are calculated online by the model:\n\n|--|\n|variable|long name|units|dimensions|\n|--|\n|tx_trans|T-cell i-mass transport|Sv|(time,st_ocean,xu_ocean,yt_ocean)|\n|ty_trans|T-cell j-mass transport|Sv|(time,st_ocean,xt_ocean,yu_ocean)|\n\n\nThese variables are saved in `ocean.nc` files.\n\n", "_____no_output_____" ], [ "## Calculation", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport cosima_cookbook as cc\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "Use default database for this calculation", "_____no_output_____" ] ], [ [ "session = cc.database.create_session()", "_____no_output_____" ] ], [ [ "This dictionary defines a few key choke points that you might be interested in. For the purposes of demonstration we are just using Drake Passage in this example.", "_____no_output_____" ] ], [ [ "straights = { 'DrakePassage': [-69.9, -69.9, -71.6, -51.0],\n 'Lombok': [-244.6+360, -243.9+360, -8.6, -8.6],\n 'Ombai' : [-235.0+360, -235.0+360, -9.2, -8.1],\n 'Timor' : [-235.9+360, -235.9+360, -11.9, -9.9],\n 'Bering' : [-172, -167, 65.8, 65.8],\n 'Denmark' : [-42, -22, 65.8, 65.8],\n }", "_____no_output_____" ], [ "def calc_transport(expt, straight, n=24):\n \"\"\"\n Calculate barotropic transport across a given \n line of latitude or longitude.\n \n Designed for flow through straits.\n \"\"\"\n \n print('Calculating {}:{} transport'.format(expt, straight))\n \n xmin, xmax, ymin, ymax = straights[straight]\n \n if xmin == xmax: \n tx_trans = cc.querying.getvar(expt,'tx_trans',session)\n \n transport = tx_trans.sel(xu_ocean=xmin, method='nearest')\\\n .sel(yt_ocean=slice(ymin, ymax))\\\n .sum('st_ocean').sum('yt_ocean')/1e6/1036\n\n elif ymin == ymax:\n ty_trans = cc.querying.getvar(expt,'tx_trans',session)\n transport = ty_trans.sel(yu_ocean=ymin, method='nearest')\\\n .sel(xt_ocean=slice(xmin, xmax))\\\n .sum('st_ocean').sum('xt_ocean')/1e6/1036\n else:\n raise ValueError('Transports are computed only along lines of either constant latitude or longitude')\n \n transport = transport.compute()\n \n return transport", "_____no_output_____" ], [ "%%time\n#expt = '025deg_jra55v13_ryf8485_gmredi6'\nexpt = '01deg_jra55v13_ryf8485_spinup6_000-413'\ntransport = calc_transport(expt, 'DrakePassage')", "Calculating 01deg_jra55v13_ryf8485_spinup6_000-413:DrakePassage transport\n" ], [ "transport.plot(linestyle='-')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
4a41f77f8e9a576d3675cec041ffa57ec675839a
831
ipynb
Jupyter Notebook
services/dy-jupyter-extensions/neuron/notebooks/examples/netpyne/HHTut/HHTut.ipynb
ignapas/osparc-services
76715bd6c8a788b2bee975b6a493ed8accf0ec10
[ "MIT" ]
2
2019-08-16T16:54:48.000Z
2020-06-10T05:50:35.000Z
services/dy-jupyter-extensions/neuron/notebooks/examples/netpyne/HHTut/HHTut.ipynb
mguidon/osparc-services
1cff293fee5e61a6708f1148077ca6a33880c7f4
[ "MIT" ]
63
2019-07-04T07:03:42.000Z
2022-02-09T18:03:55.000Z
services/dy-jupyter-extensions/neuron/notebooks/examples/netpyne/HHTut/HHTut.ipynb
mguidon/osparc-services
1cff293fee5e61a6708f1148077ca6a33880c7f4
[ "MIT" ]
9
2019-04-17T07:11:10.000Z
2020-06-03T13:42:58.000Z
21.307692
120
0.578821
[ [ [ "%matplotlib notebook\nimport HHTut # import parameters file \nfrom netpyne import sim # import netpyne sim module\n\nsim.createSimulateAnalyze(netParams = HHTut.netParams, simConfig = HHTut.simConfig) # create and simulate network", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
4a42021139829217aaa11907af58705082f9ff54
317,266
ipynb
Jupyter Notebook
docs/notebooks/thresholding/Thresholding.ipynb
gitter-badger/model-validation-toolkit
5859b19cb216f9dc87fc600adc7d74700787cbfa
[ "Apache-2.0" ]
null
null
null
docs/notebooks/thresholding/Thresholding.ipynb
gitter-badger/model-validation-toolkit
5859b19cb216f9dc87fc600adc7d74700787cbfa
[ "Apache-2.0" ]
null
null
null
docs/notebooks/thresholding/Thresholding.ipynb
gitter-badger/model-validation-toolkit
5859b19cb216f9dc87fc600adc7d74700787cbfa
[ "Apache-2.0" ]
null
null
null
437.00551
51,456
0.938509
[ [ [ "# Introduction to Adaptive Thresholding\nThis tutorial will go over some basic concepts you may wish to consider when setting thresholds for production models or otherwise.", "_____no_output_____" ], [ "## Make Some Data\nThis tutorial doesn't actually require real data--nor even a model! We'll make some fake data to get the idea. Don't worry too much about this step. Let's just assume we have a series of scores. These could represent model performance, divergences, or model scores themselves. Throughout this tutorial, we'll assume that increasing values of this score will be increasingly likely to represent a good alert. Then we are left to determine an appropriate threshold to balance true/false positive/negatives. This is balancing time wasted on bad alerts with the utility gained from finding a good alert that resulted from a lower score.", "_____no_output_____" ] ], [ [ "import numpy\nimport pandas\n\nnumpy.random.seed(0)\npandas.options.display.max_rows=5 # restrict to 5 rows on display\n\nn_positive = 600\npositives = numpy.random.beta(5, 1, size=n_positive)\nn_negative = 5 * n_positive\nnegatives = numpy.random.beta(2, 3, size=n_negative)\ndata = pandas.DataFrame(numpy.asarray((numpy.concatenate((numpy.ones(n_positive),\n numpy.zeros(n_negative))),\n numpy.concatenate((positives,\n negatives)))).T,\n columns=['Ground Truth', 'Model Score'])\ndata = data.sample(frac=1, random_state=0).reset_index(drop=True)\ndata", "_____no_output_____" ] ], [ [ "## Positive and Negative Distributions\n\nWe want to determine the trade off between catching more true positives and getting more false negatives. Let's see what the distribution of scores associated with good (positive) and bad (negative) alerts looks like.", "_____no_output_____" ] ], [ [ "import seaborn as sns; sns.set()\nimport matplotlib\nimport matplotlib.pylab as plt\nimport numpy\n\nplt.figure(figsize=(10, 2))\n\nplt.title('Positive Scores')\nplt.xlabel('Model Score')\nplt.yticks([])\npositives = data[data['Ground Truth'] == 1]['Model Score'].sample(100, random_state=0).values\nsns.rugplot(positives,\n height=1.0,\n color='green',\n label='Positive Samples')\nplt.show()", "_____no_output_____" ], [ "import seaborn as sns; sns.set()\nimport matplotlib\nimport matplotlib.pylab as plt\nimport numpy\n\nplt.figure(figsize=(10, 2))\n\nplt.title('Negative Scores')\nplt.xlabel('Model Score')\nplt.yticks([])\nnegatives = data[data['Ground Truth'] == 0]['Model Score'].sample(100, random_state=0).values\nsns.rugplot(negatives,\n height=1.0,\n color='red',\n label='Negative Samples')\nplt.show()", "_____no_output_____" ] ], [ [ "We can also plot an approximation of the probability distribution of positive and negative scores given our sample data.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10, 10))\n\nsns.set()\nplt.title('Positive Score Distribution')\nplt.xlabel('Model Score')\nplt.ylabel('Probability Density')\npositives = data[data['Ground Truth'] == 1]['Model Score'].values\nsns.kdeplot(positives,\n color='green',\n label='Positive Score Distribution')\nplt.show()", "_____no_output_____" ], [ "plt.figure(figsize=(10, 10))\n\nsns.set()\nplt.title('Negative Score Distribution')\nplt.xlabel('Model Score')\nplt.ylabel('Probability Density')\nnegatives = data[data['Ground Truth'] == 0]['Model Score'].values\nsns.kdeplot(negatives,\n color='red',\n label='Negative Score Distribution')\nplt.show()", "_____no_output_____" ] ], [ [ "It will also be important to keep in mind the distribution of model scores.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10, 10))\n\nsns.set()\nplt.title('Score Distribution')\nplt.xlabel('Model Score')\nplt.ylabel('Probability Density')\nscores = data['Model Score'].values\nsns.kdeplot(scores,\n color='blue',\n label='Score Distribution')\nplt.show()", "_____no_output_____" ] ], [ [ "## Computing Optimal Thresholds\nWhen scoring a model after choosing a threshold, each model score can be associated with one of four possible outcomes:\n\n1. Positive instance is scored above the threshold.\n2. Negative instance is scored above the threshold.\n3. Negative instance is scored at or below the threshold.\n4. Positive instance is scored at or below the threshold.\n\nLet's say each of these outcomes has an associated probability $p$ and an associated utility $u$ as determined by the business:\n\n1. $p_\\mathrm{tp}$, $u_\\mathrm{tp}$\n2. $p_\\mathrm{fp}$, $u_\\mathrm{fp}$\n3. $p_\\mathrm{tn}$, $u_\\mathrm{tn}$\n4. $p_\\mathrm{fn}$, $u_\\mathrm{fn}$\n\nThen the expected utility of a scored sample with unknown ground truth is\n\n$u = p_\\mathrm{tp}u_\\mathrm{tp} +\np_\\mathrm{fp}u_\\mathrm{fp} +\np_\\mathrm{tn}u_\\mathrm{tn} +\np_\\mathrm{fn}u_\\mathrm{fn}$\n\nFor the purposes of this experiment, let's say the business would be 10 times as disappointed to learn of a false negative than they would be to have to pay for analysts wasting their time on a false positive.", "_____no_output_____" ] ], [ [ "def utility(tp, fp, tn, fn):\n return -10 * fn - fp", "_____no_output_____" ] ], [ [ "## Visualizing Utility of Each Threshold\nLet's plot expected utility against some candidate thresholds. The algorithm below will generate a plot with error bars over the expected value. If you need to interpret them, you can say _there's a 50% chance the true utility of each threshold falls within the shaded region_.", "_____no_output_____" ] ], [ [ "import numpy\nimport matplotlib.pylab as plt\nimport seaborn as sns, numpy\n\nfrom mvtk import thresholding\n\nplt.figure(figsize=(10, 10))\nplt.title('Expected Utility vs Threshold')\nplt.xlabel('Threshold')\nplt.ylabel('Expected Utility')\n\nscores, utility_mean, utility_quantiles = thresholding.expected_utility(\n utility, data[['Ground Truth', 'Model Score']].values)\nthresholding.plot_err(scores,\n utility_mean,\n utility_quantiles,\n label='Expected Utility')\nleg = plt.legend()\nfor lh in leg.legendHandles:\n lh.set_alpha(1)\nplt.show()", "_____no_output_____" ] ], [ [ "## Finding the Optimal Threshold\n\nThis is the threshold that corresponds to the peak of the utility function plotted above.\n\nYou don't need to worry about the mechanics of this function, you can just copy and paste it.", "_____no_output_____" ] ], [ [ "thresholding.optimal_threshold(utility, data)", "_____no_output_____" ] ], [ [ "## Optimal Threshold Distribution\nIf we know our sample of positives and negatives is unbiased (e.g. the analysts were equally likely to label any instance of their data), we can generally express our uncertainty in the location of the optimal threshold (which stems from our uncertainty in the utility function) to compute a distribution over what our optimum threshold might be given the data we have so far.\n\nYou don't need to worry about the mechanics of this function, you can just copy and paste it.", "_____no_output_____" ] ], [ [ "sns.set(); numpy.random.seed(0)\nplt.figure(figsize=(10, 10))\nplt.xlim([0, 1])\nplt.title('Threshold Distribution')\nplt.xlabel('Threshold')\nplt.ylabel('Probability Density')\nsns.kdeplot(thresholding.thompson_sample(utility, data),\n color='blue',\n label='Likelihood Threshold is Optimal')\nplt.show()", "_____no_output_____" ] ], [ [ "## Adaptive Thresholding\nWithout getting into the mechanics, we can dynamically choose between _exploration mode_, during which it will set the threshold to `0`, and _exploitation mode_, during which it will attempt to pick the optimal choice (or, in practice, something likely to be optimal).", "_____no_output_____" ], [ "## Online Learning\nHere we will give an example of how to apply adaptive thresholding to an online learning problem. \n\nIn this example, we will iterate over the data we have in chronological order (since it's fake data, let's just assume it was already ordered chronologically) and simulate a system that applies the above adaptive thresholding algorithm to the problem of identifying a new optimal threshold each time a new label arrives (e.g. from someone checking in on an alert and determining if it's a good one).", "_____no_output_____" ] ], [ [ "thresholder = thresholding.AdaptiveThreshold(utility)\nthresholds = []\nfor ground_truth, score in data[['Ground Truth', 'Model Score']].values:\n thresholds.append(thresholder(ground_truth, score))\nthresholds = numpy.asarray(thresholds)", "_____no_output_____" ] ], [ [ "What percent of the time did we end up setting the threshold to `0`? You'll notice we start out setting the threshold to `0` about 45% of the time to gather data, but that quickly drops to about 5% once we have a good understanding of the system.", "_____no_output_____" ] ], [ [ "import matplotlib.pylab as plt\n\nplt.figure(figsize=(9, 9))\nplt.ylabel('Exploration Percent (Moving Average)')\nplt.xlabel('Epoch')\nplt.plot(thresholding.exploration_proportion(thresholds, 100))\nplt.show()", "_____no_output_____" ] ], [ [ "About 10% of the alerts triggered were just to get unbiased data.", "_____no_output_____" ] ], [ [ "(thresholds == 0).sum()", "_____no_output_____" ], [ "(thresholds == 0).mean()", "_____no_output_____" ] ], [ [ "## Examining Results\nTo get a feel for what the algorithm is doing, let's reconstruct the utility function plot as before, but with the 64 most recent thresholds. As you can see, the thresholds are landing pretty close to the optimal value, while we were typically only taking unbiased data 2 to 3% of the time.", "_____no_output_____" ] ], [ [ "import matplotlib.pylab as plt\nimport seaborn as sns, numpy\n\nplt.figure(figsize=(10, 10))\nplt.title('Expected Utility vs Threshold')\nplt.xlabel('Threshold')\nplt.ylabel('Expected Utility')\n\nscores, utility_mean, utility_quantiles = thresholding.expected_utility(\n utility, data[['Ground Truth', 'Model Score']].values)\n\n# candidate thresholds are existing model scores\nthresholding.plot_err(scores,\n utility_mean,\n utility_quantiles,\n label='Expected Utility')\nax = sns.rugplot(thresholds[-64:], # most recent \n color='green',\n label='64 Most Recent Thresholds')\nleg = plt.legend()\nfor lh in leg.legendHandles:\n lh.set_alpha(1)\nplt.show()", "_____no_output_____" ] ], [ [ "We can watch the distribution of (nonzero) thresholds chosen evolve over time and approach the ideal one (computed using _all_ the data in our data set).", "_____no_output_____" ] ], [ [ "%%capture\n\nimport matplotlib.pylab as plt\nimport seaborn as sns, numpy\nimport os\nimport shutil\n\nfrom pathlib import Path\n\nframe_dir = 'frames'\nif os.path.exists(frame_dir):\n shutil.rmtree(frame_dir)\nPath(frame_dir).mkdir(parents=True, exist_ok=True)\n\n\ndef mkplot(thresholds, large_sample_ideal_thresholds):\n sns.set(); numpy.random.seed(0)\n plt.figure(figsize=(10, 10))\n plt.xlim([0, 1])\n plt.title(f'Threshold Distribution (epoch {i})')\n plt.xlabel('Threshold')\n plt.ylabel('Probability Density')\n ax = sns.distplot(thresholds[thresholds > 0],\n rug=True,\n hist=False,\n kde=True,\n color='green',\n label='Thresholds Chosen Using Unbiased Samples')\n ax = sns.distplot(large_sample_ideal_thresholds, # most recent \n rug=False,\n hist=False,\n kde=True,\n color='blue',\n label='Ideal Threshold Distribution')\n leg = plt.legend(loc='upper right')\n for lh in leg.legendHandles:\n lh.set_alpha(1)\n plt.savefig(os.path.join(frame_dir, f'im_{i}.png'))\n \nlarge_sample_ideal_thresholds = thresholding.thompson_sample(utility, data)\nN = 64\ndn = len(thresholds) // N\nj = 0\nfor num_frames, i in enumerate(range(dn, len(thresholds) + dn, dn)):\n mkplot(thresholds[j:i], large_sample_ideal_thresholds)\n j = i\nmkplot(thresholds[j:], large_sample_ideal_thresholds)", "_____no_output_____" ], [ "import os\nimport imageio\n\nimages = []\nfor filename in sorted(os.listdir(frame_dir), key=lambda x: int(x[3:-4])):\n images.append(imageio.imread(os.path.join(frame_dir, filename)))\nimageio.mimsave('threshold_distribution_evolution.gif', images, duration=30 / (num_frames + 1))", "_____no_output_____" ] ], [ [ "![threshold_distribution_evolution](threshold_distribution_evolution.gif)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
4a4205aedcd9dedf677d195a626dbf4d557fa100
761,516
ipynb
Jupyter Notebook
paper/figures/fig12_overlap/OverlapMatrices/Playing with matrix plots.ipynb
livecomsjournal/alchemical-best-practices
3c5284c0f09ac71887317dedec15b13212e3dadd
[ "CC-BY-4.0" ]
21
2018-04-07T14:55:28.000Z
2020-10-16T07:08:04.000Z
paper/figures/fig12_overlap/OverlapMatrices/Playing with matrix plots.ipynb
livecomsjournal/alchemical-best-practices
3c5284c0f09ac71887317dedec15b13212e3dadd
[ "CC-BY-4.0" ]
80
2018-04-04T19:50:09.000Z
2020-08-24T19:55:49.000Z
paper/figures/fig12_overlap/OverlapMatrices/Playing with matrix plots.ipynb
livecomsjournal/alchemical-best-practices
3c5284c0f09ac71887317dedec15b13212e3dadd
[ "CC-BY-4.0" ]
8
2018-03-28T23:59:08.000Z
2020-08-30T03:42:40.000Z
494.490909
169,612
0.93036
[ [ [ "# Overlap matrices\nThis notebook will look at different ways of plotting overlap matrices and making them visually appealing. \nOne way to guarantee right color choices for color blind poeple is using this tool: https://davidmathlogic.com/colorblind", "_____no_output_____" ] ], [ [ "%pylab inline\nimport pandas as pd", "Populating the interactive namespace from numpy and matplotlib\n" ], [ "import seaborn as sbn\nsbn.set_style(\"ticks\")\nsbn.set_context(\"notebook\", font_scale = 1.5)", "_____no_output_____" ], [ "data = np.loadtxt('raw_matrices_review.dat')\ngood = (data[:9][:])\nbad = data[-9:][:]\nugly = data[9:18][:]", "_____no_output_____" ], [ "# Your Standard plot\nfig =figsize(8,8)\nax = sbn.heatmap(bad,annot=True, fmt='.2f', linewidths=.3, annot_kws={\"size\": 14},square=True,robust=True,cmap=sbn.light_palette((210, 90, 60), input=\"husl\") )\nax.set_xlabel(r'$\\lambda$ index')\nax.set_ylabel(r'$\\lambda$ index')\n", "_____no_output_____" ], [ "# Changing the colour map\n\nfrom matplotlib import colors\nfrom matplotlib.colors import LogNorm\n#cmap = colors.ListedColormap(['#FBE8EB','#88CCEE','#78C592', '#117733'])\ncmap = colors.ListedColormap(['#117733','#88CCEE', '#FBE8EB'])\nbounds=[0.0, 0.025, 0.1, 0.8]\nnorm = colors.BoundaryNorm(bounds, cmap.N, clip=False)\ncbar_kws=dict(ticks=[0.2, 0.4, 0.6, 0.8 ,1.0])\n#ax = sbn.heatmap(ugly,annot=True, fmt='.2f', linewidths=.3, annot_kws={\"size\": 14},square=True,robust=True,cmap=cmap, norm=norm,cbar_kws=cbar_kws )\n\n\nax = sbn.heatmap(ugly,annot=True, fmt='.2f', linewidths=0, linecolor='white', annot_kws={\"size\": 14},square=True,robust=True,cmap='bone_r', vmin=0, vmax=1 )\nax.xaxis.tick_top()\nax.xaxis.set_label_position('top') \nax.set_xlabel(r'$\\lambda$ index')\nax.set_ylabel(r'$\\lambda$ index')\nfor _, spine in ax.spines.items():\n spine.set_visible(True)\nshow_annot_array = ugly >= 0.0001\nfor text, show_annot in zip(ax.texts, (element for row in show_annot_array for element in row)):\n text.set_visible(show_annot)\n ", "_____no_output_____" ], [ "# Changing the colour map\n\nfrom matplotlib import colors\nfrom matplotlib.colors import LogNorm\n#cmap = colors.ListedColormap(['#FBE8EB','#88CCEE','#78C592', '#117733'])\ncmap = colors.ListedColormap(['#117733','#88CCEE', '#FBE8EB'])\nbounds=[0.0, 0.025, 0.1, 0.8]\nnorm = colors.BoundaryNorm(bounds, cmap.N, clip=False)\ncbar_kws=dict(ticks=[0.2, 0.4, 0.6, 0.8 ,1.0])\n#ax = sbn.heatmap(ugly,annot=True, fmt='.2f', linewidths=.3, annot_kws={\"size\": 14},square=True,robust=True,cmap=cmap, norm=norm,cbar_kws=cbar_kws )\n\n\nax = sbn.heatmap(good,annot=True, fmt='.2f', linewidths=0, linecolor='black', annot_kws={\"size\": 14},square=True,robust=True,cmap='bone_r',vmin=0, vmax=1 )\nax.xaxis.tick_top()\nax.xaxis.set_label_position('top') \nax.set_xlabel(r'$\\lambda$ index')\nax.set_ylabel(r'$\\lambda$ index')\nfor _, spine in ax.spines.items():\n spine.set_visible(True)\nshow_annot_array = good >= 0.001\nfor text, show_annot in zip(ax.texts, (element for row in show_annot_array for element in row)):\n text.set_visible(show_annot)", "_____no_output_____" ], [ "# Changing the colour map\n\nfrom matplotlib import colors\nfrom matplotlib.colors import LogNorm\n#cmap = colors.ListedColormap(['#FBE8EB','#88CCEE','#78C592', '#117733'])\ncmap = colors.ListedColormap(['#117733','#88CCEE', '#FBE8EB'])\nbounds=[0.0, 0.025, 0.1, 0.8]\nnorm = colors.BoundaryNorm(bounds, cmap.N, clip=False)\ncbar_kws=dict(ticks=[0.2, 0.4, 0.6, 0.8 ,1.0])\n#ax = sbn.heatmap(ugly,annot=True, fmt='.2f', linewidths=.3, annot_kws={\"size\": 14},square=True,robust=True,cmap=cmap, norm=norm,cbar_kws=cbar_kws )\n\n\nax = sbn.heatmap(bad,annot=True, fmt='.2f', linewidths=0, linecolor='black', annot_kws={\"size\": 14},square=True,robust=True,cmap='bone_r',vmin=0, vmax=1 )\nax.xaxis.tick_top()\nax.xaxis.set_label_position('top') \nax.set_xlabel(r'$\\lambda$ index')\nax.set_ylabel(r'$\\lambda$ index')\nfor _, spine in ax.spines.items():\n spine.set_visible(True)\nshow_annot_array = bad >= 0.01\nfor text, show_annot in zip(ax.texts, (element for row in show_annot_array for element in row)):\n text.set_visible(show_annot)", "_____no_output_____" ], [ "# Changing the colour map\n\nfrom matplotlib import colors\n#cmap = colors.ListedColormap(['#FBE8EB','#88CCEE','#78C592', '#117733'])\ncmap = colors.ListedColormap(['#FBE8EB','#88CCEE','#78C592', '#117733'])\nbounds=[0.0, 0.025, 0.1, 0.3,0.8]\nnorm = colors.BoundaryNorm(bounds, cmap.N, clip=False)\ncbar_kws=dict(ticks=[.025, .1, .3,0.8])\nax = sbn.heatmap(ugly,annot=True, fmt='.2f', linewidths=.3, annot_kws={\"size\": 14},square=True,robust=True,cmap=cmap, norm=norm,cbar_kws=cbar_kws )\nax.xaxis.tick_top()\nax.xaxis.set_label_position('top') \nax.set_xlabel(r'$\\lambda$ index')\nax.set_ylabel(r'$\\lambda$ index')\n", "_____no_output_____" ], [ "cmap = colors.ListedColormap(['#FBE8EB','#88CCEE','#78C592', '#117733'])\nbounds=[0.0, 0.025, 0.1, 0.3,0.8]\nnorm = colors.BoundaryNorm(bounds, cmap.N, clip=False)\ncbar_kws=dict(ticks=[.025, .1, .3,0.8])\nax = sbn.heatmap(bad,annot=True, fmt='.2f', linewidths=.3, annot_kws={\"size\": 14},square=True,robust=True,cmap=cmap, norm=norm, cbar_kws=cbar_kws )\nax.set_xlabel(r'$\\lambda$ index')\nax.set_ylabel(r'$\\lambda$ index')\nax.xaxis.tick_top()\nax.xaxis.set_label_position('top') \nax.set_xlabel(r'$\\lambda$ index')\nax.set_ylabel(r'$\\lambda$ index')", "_____no_output_____" ], [ "cmap = colors.ListedColormap(['#FBE8EB','#88CCEE','#78C592', '#117733'])\nbounds=[0.0, 0.025, 0.1, 0.3,0.8]\nnorm = colors.BoundaryNorm(bounds, cmap.N, clip=False)\ncbar_kws=dict(ticks=[.025, .1, .3,0.8])\nax = sbn.heatmap(good,annot=True, fmt='.2f', linewidths=.3, annot_kws={\"size\": 14},square=True,robust=True, cmap=cmap, norm=norm,vmin=0,vmax=1,cbar_kws=cbar_kws )\nax.set_xlabel(r'$\\lambda$ index')\nax.set_ylabel(r'$\\lambda$ index')\nax.xaxis.tick_top()\nax.xaxis.set_label_position('top') \nax.set_xlabel(r'$\\lambda$ index')\nax.set_ylabel(r'$\\lambda$ index')", "_____no_output_____" ], [ "cbar_kws={'ticks': '[0.0, 0.2, 0.4, 0.6, 0.8, 1.0]'}", "_____no_output_____" ], [ "# Playing with pandas and getting more exotic\ndf = pd.DataFrame(bad, columns=[\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"])", "_____no_output_____" ], [ "#https://towardsdatascience.com/better-heatmaps-and-correlation-matrix-plots-in-python-41445d0f2bec\n\n\ndef heatmap(x, y, x1,y1, **kwargs):\n if 'color' in kwargs:\n color = kwargs['color']\n else:\n color = [1]*len(x)\n\n if 'palette' in kwargs:\n palette = kwargs['palette']\n n_colors = len(palette)\n else:\n n_colors = 256 # Use 256 colors for the diverging color palette\n palette = sbn.color_palette(\"Blues\", n_colors) \n\n if 'color_range' in kwargs:\n color_min, color_max = kwargs['color_range']\n else:\n color_min, color_max = min(color), max(color) # Range of values that will be mapped to the palette, i.e. min and max possible correlation\n\n def value_to_color(val):\n if color_min == color_max:\n return palette[-1]\n else:\n val_position = float((val - color_min)) / (color_max - color_min) # position of value in the input range, relative to the length of the input range\n val_position = min(max(val_position, 0), 1) # bound the position betwen 0 and 1\n ind = int(val_position * (n_colors - 1)) # target index in the color palette\n return palette[ind]\n\n if 'size' in kwargs:\n size = kwargs['size']\n else:\n size = [1]*len(x)\n\n if 'size_range' in kwargs:\n size_min, size_max = kwargs['size_range'][0], kwargs['size_range'][1]\n else:\n size_min, size_max = min(size), max(size)\n\n size_scale = kwargs.get('size_scale', 500)\n\n def value_to_size(val):\n if size_min == size_max:\n return 1 * size_scale\n else:\n val_position = (val - size_min) * 0.99 / (size_max - size_min) + 0.01 # position of value in the input range, relative to the length of the input range\n val_position = min(max(val_position, 0), 1) # bound the position betwen 0 and 1\n return val_position * size_scale\n if 'x_order' in kwargs: \n x_names = [t for t in kwargs['x_order']]\n else:\n x_names = [t for t in sorted(set([v for v in x]))]\n x_to_num = {p[1]:p[0] for p in enumerate(x_names)}\n\n if 'y_order' in kwargs: \n y_names = [t for t in kwargs['y_order']]\n else:\n y_names = [t for t in sorted(set([v for v in y]))]\n y_to_num = {p[1]:p[0] for p in enumerate(y_names)}\n\n plot_grid = plt.GridSpec(1, 15, hspace=0.2, wspace=0.1) # Setup a 1x10 grid\n ax = plt.subplot(plot_grid[:,:-1]) # Use the left 14/15ths of the grid for the main plot\n\n marker = kwargs.get('marker', 's')\n\n kwargs_pass_on = {k:v for k,v in kwargs.items() if k not in [\n 'color', 'palette', 'color_range', 'size', 'size_range', 'size_scale', 'marker', 'x_order', 'y_order'\n ]}\n print(x_names)\n print(y_names)\n print('here------------')\n ax.scatter(\n x=x1,\n y=y1,\n marker=marker,\n s=[value_to_size(v) for v in size], \n c=[value_to_color(v) for v in color],\n **kwargs_pass_on\n )\n ax.set_xticks([v for k,v in x_to_num.items()])\n ax.set_xticklabels([k for k in x_to_num], rotation=45, horizontalalignment='right')\n ax.set_yticks([v for k,v in y_to_num.items()])\n ax.set_yticklabels([k for k in y_to_num])\n\n ax.grid(False, 'major')\n ax.grid(True, 'minor')\n ax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True)\n ax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True)\n\n ax.set_xlim([-0.5, max([v for v in x_to_num.values()]) + 0.5])\n ax.set_ylim([-0.5, max([v for v in y_to_num.values()]) + 0.5])\n ax.set_facecolor('#F1F1F1')\n\n # Add color legend on the right side of the plot\n if color_min < color_max:\n ax = plt.subplot(plot_grid[:,-1]) # Use the rightmost column of the plot\n\n col_x = [0]*len(palette) # Fixed x coordinate for the bars\n bar_y=np.linspace(color_min, color_max, n_colors) # y coordinates for each of the n_colors bars\n\n bar_height = bar_y[1] - bar_y[0]\n ax.barh(\n y=bar_y,\n width=[5]*len(palette), # Make bars 5 units wide\n left=col_x, # Make bars start at 0\n height=bar_height,\n color=palette,\n linewidth=0\n )\n ax.set_xlim(1, 2) # Bars are going from 0 to 5, so lets crop the plot somewhere in the middle\n ax.grid(False) # Hide grid\n ax.set_facecolor('white') # Make background white\n ax.set_xticks([]) # Remove horizontal ticks\n ax.set_yticks(np.linspace(min(bar_y), max(bar_y), 3)) # Show vertical ticks for min, middle and max\n ax.yaxis.tick_right() # Show vertical ticks on the right \n\n\ndef corrplot(data, size_scale=500, marker='s'):\n corr = pd.melt(data.reset_index(), id_vars='index')\n print(corr)\n corr.columns = ['index', 'variable', 'value']\n x_names = [t for t in sorted(set([v for v in corr['index']]))]\n x_to_num = {p[1]:p[0] for p in enumerate(x_names)}\n x=[x_to_num[v] for v in corr['index']]\n y_names = [t for t in sorted(set([v for v in corr['index']]))]\n y_to_num = {p[1]:p[0] for p in enumerate(y_names)}\n y=[y_to_num[v] for v in corr['index']]\n heatmap(\n corr['index'], corr['value'],x1,y1,\n color=corr['value'], color_range=[0, 1],\n palette=sbn.diverging_palette(20, 220, n=256),\n size=corr['value'].abs(), size_range=[0,1],\n marker=marker,\n x_order=data.columns,\n y_order=data.columns[::-1],\n size_scale=size_scale\n )\n", "_____no_output_____" ], [ "corrplot(df)", " index variable value\n0 0 1 0.6487\n1 1 1 0.2912\n2 2 1 0.0557\n3 3 1 0.0043\n4 4 1 0.0001\n.. ... ... ...\n76 4 9 0.0000\n77 5 9 0.0000\n78 6 9 0.0000\n79 7 9 0.0311\n80 8 9 0.9688\n\n[81 rows x 3 columns]\n" ], [ "corr = pd.melt(df.reset_index(), id_vars='index')\nprint(corr)", " index variable value\n0 0 1 0.6487\n1 1 1 0.2912\n2 2 1 0.0557\n3 3 1 0.0043\n4 4 1 0.0001\n.. ... ... ...\n76 4 9 0.0000\n77 5 9 0.0000\n78 6 9 0.0000\n79 7 9 0.0311\n80 8 9 0.9688\n\n[81 rows x 3 columns]\n" ], [ "x_names = [t for t in sorted(set([v for v in corr['index']]))]\nx_to_num = {p[1]:p[0] for p in enumerate(x_names)}\nx1=[x_to_num[v] for v in corr['index']]", "_____no_output_____" ], [ "y_names = [t for t in sorted(set([v for v in corr['variable']]))]\ny_to_num = {p[1]:p[0] for p in enumerate(y_names)}\ny1=[y_to_num[v] for v in corr['variable']]", "_____no_output_____" ], [ "def value_to_size(val):\n if size_min == size_max:\n return 1 * size_scale\n else:\n val_position = (val - size_min) * 0.99 / (size_max - size_min) + 0.01 # position of value in the input range, relative to the length of the input range\n val_position = min(max(val_position, 0), 1) # bound the position betwen 0 and 1\n return val_position * size_scale\nvalue_names = [t for t in sorted(set([v for v in corr['value']]))]\nvalue = []\nfor v in corr['value']:\n value.append(v)\n", "_____no_output_____" ], [ "for v in corr['value']:\n print (v)", "0.6487\n0.2912\n0.0557\n0.0043\n0.0001\n0.0\n0.0\n0.0\n0.0\n0.2912\n0.4188\n0.2395\n0.0473\n0.0031\n0.0001\n0.0\n0.0\n0.0\n0.0557\n0.2395\n0.4293\n0.2424\n0.0321\n0.001\n0.0\n0.0\n0.0\n0.0043\n0.0473\n0.2424\n0.5047\n0.1884\n0.0127\n0.0002\n0.0\n0.0\n0.0001\n0.0031\n0.0321\n0.1884\n0.5974\n0.1735\n0.0053\n0.0\n0.0\n0.0\n0.0001\n0.001\n0.0127\n0.1735\n0.6866\n0.1254\n0.0007\n0.0\n0.0\n0.0\n0.0\n0.0002\n0.0053\n0.1254\n0.8035\n0.0656\n0.0\n0.0\n0.0\n0.0\n0.0\n0.0\n0.0007\n0.0656\n0.9026\n0.0311\n0.0\n0.0\n0.0\n0.0\n0.0\n0.0\n0.0\n0.0311\n0.9688\n" ], [ "n_colors = 256 # Use 256 colors for the diverging color palette\npalette = sbn.cubehelix_palette(n_colors)", "_____no_output_____" ], [ "mapping = linspace(0,1,256)", "_____no_output_____" ], [ "c_index = np.digitize(value, mapping)", "_____no_output_____" ], [ "plot_colors =[]\nfor i in c_index:\n plot_colors.append(palette[i])", "_____no_output_____" ], [ "s =np.array(value)*4000", "_____no_output_____" ], [ "fig = figsize(10,10)\nplot_grid = plt.GridSpec(1, 15, hspace=0.2, wspace=0.1) # Setup a 1x10 grid\n\nax = plt.subplot(plot_grid[:,:-1]) # Use the left 14/15ths of the grid for the main plot\nax.scatter(x1,y1,marker='s',s=s,c=plot_colors)\nsbn.despine()\nax.grid(False, 'major')\nax.grid(True, 'minor', color='white')\nax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True)\nax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True)\n\nax.set_xlim([-0.5, max([v for v in x_to_num.values()]) + 0.5])\nax.set_ylim([-0.5, max([v for v in y_to_num.values()]) + 0.5])\nax.set_facecolor((0,0,0))\nplt.gca().invert_yaxis()\nax.xaxis.tick_top()\nax.xaxis.set_label_position('top') \nxlabel(r'$\\lambda$ index')\nylabel(r'$\\lambda$ index')", "_____no_output_____" ], [ "def value_to_size(val, vlaue):\n size_scale = 500\n size = [1]*len(value)\n size_min, size_max = min(size), max(size)\n if size_min == size_max:\n return 1 * size_scale\n else:\n val_position = (val - size_min) * 0.99 / (size_max - size_min) + 0.01 # position of value in the input range, relative to the length of the input range\n val_position = min(max(val_position, 0), 1) # bound the position betwen 0 and 1\n return val_position * size_scale", "_____no_output_____" ], [ "heatmap2", "_____no_output_____" ], [ "value_to_size(value[5], value)", "_____no_output_____" ], [ "from biokit.viz import corrplot\nc = corrplot.Corrplot(df)\nc.plot()", "Computing correlation\n" ], [ "def plot(index, columns):\n values = \"bad_status\"\n vmax = 0.10\n cellsize_vmax = 10000\n g_ratio = df.pivot_table(index=index, columns=columns, values=values, aggfunc=\"mean\")\n g_size = df.pivot_table(index=index, columns=columns, values=values, aggfunc=\"size\")\n annot = np.vectorize(lambda x: \"\" if np.isnan(x) else \"{:.1f}%\".format(x * 100))(g_ratio)\n \n # adjust visual balance\n figsize = (g_ratio.shape[1] * 0.8, g_ratio.shape[0] * 0.8)\n cbar_width = 0.05 * 6.0 / figsize[0] \n \n f, ax = plt.subplots(1, 1, figsize=figsize)\n cbar_ax = f.add_axes([.91, 0.1, cbar_width, 0.8])\n heatmap2(g_ratio, ax=ax, cbar_ax=cbar_ax, \n vmax=vmax, cmap=\"PuRd\", annot=annot, fmt=\"s\", annot_kws={\"fontsize\":\"small\"},\n cellsize=g_size, cellsize_vmax=cellsize_vmax,\n square=True, ax_kws={\"title\": \"{} x {}\".format(index, columns)})\n plt.show()", "_____no_output_____" ], [ "\n\"\"\"\nThis script is created by modifying seaborn matrix.py\nin https://github.com/mwaskom/seaborn, by Michael L. Waskom\n\"\"\"\n\nfrom __future__ import division\nimport itertools\n\nimport matplotlib as mpl\nfrom matplotlib.collections import LineCollection\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nimport matplotlib.patheffects as patheffects\nimport numpy as np\nimport pandas as pd\nfrom scipy.cluster import hierarchy\n\nimport seaborn as sns\nfrom seaborn import cm\nfrom seaborn.axisgrid import Grid\nfrom seaborn.utils import (despine, axis_ticklabels_overlap, relative_luminance, to_utf8)\nfrom seaborn.external.six import string_types\n\ndef _index_to_label(index):\n \"\"\"Convert a pandas index or multiindex to an axis label.\"\"\"\n if isinstance(index, pd.MultiIndex):\n return \"-\".join(map(to_utf8, index.names))\n else:\n return index.name\n\ndef _index_to_ticklabels(index):\n \"\"\"Convert a pandas index or multiindex into ticklabels.\"\"\"\n if isinstance(index, pd.MultiIndex):\n return [\"-\".join(map(to_utf8, i)) for i in index.values]\n else:\n return index.values\n\ndef _matrix_mask(data, mask):\n \"\"\"Ensure that data and mask are compatabile and add missing values.\n\n Values will be plotted for cells where ``mask`` is ``False``.\n\n ``data`` is expected to be a DataFrame; ``mask`` can be an array or\n a DataFrame.\n\n \"\"\"\n if mask is None:\n mask = np.zeros(data.shape, np.bool)\n\n if isinstance(mask, np.ndarray):\n # For array masks, ensure that shape matches data then convert\n if mask.shape != data.shape:\n raise ValueError(\"Mask must have the same shape as data.\")\n\n mask = pd.DataFrame(mask,\n index=data.index,\n columns=data.columns,\n dtype=np.bool)\n\n elif isinstance(mask, pd.DataFrame):\n # For DataFrame masks, ensure that semantic labels match data\n if not mask.index.equals(data.index) \\\n and mask.columns.equals(data.columns):\n err = \"Mask must have the same index and columns as data.\"\n raise ValueError(err)\n\n # Add any cells with missing data to the mask\n # This works around an issue where `plt.pcolormesh` doesn't represent\n # missing data properly\n mask = mask | pd.isnull(data)\n\n return mask\n\n\nclass _HeatMapper2(object):\n \"\"\"Draw a heatmap plot of a matrix with nice labels and colormaps.\"\"\"\n\n def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt,\n annot_kws, cellsize, cellsize_vmax,\n cbar, cbar_kws,\n xticklabels=True, yticklabels=True, mask=None, ax_kws=None, rect_kws=None):\n \"\"\"Initialize the plotting object.\"\"\"\n # We always want to have a DataFrame with semantic information\n # and an ndarray to pass to matplotlib\n if isinstance(data, pd.DataFrame):\n plot_data = data.values\n else:\n plot_data = np.asarray(data)\n data = pd.DataFrame(plot_data)\n\n # Validate the mask and convet to DataFrame\n mask = _matrix_mask(data, mask)\n\n plot_data = np.ma.masked_where(np.asarray(mask), plot_data)\n\n # Get good names for the rows and columns\n xtickevery = 1\n if isinstance(xticklabels, int):\n xtickevery = xticklabels\n xticklabels = _index_to_ticklabels(data.columns)\n elif xticklabels is True:\n xticklabels = _index_to_ticklabels(data.columns)\n elif xticklabels is False:\n xticklabels = []\n\n ytickevery = 1\n if isinstance(yticklabels, int):\n ytickevery = yticklabels\n yticklabels = _index_to_ticklabels(data.index)\n elif yticklabels is True:\n yticklabels = _index_to_ticklabels(data.index)\n elif yticklabels is False:\n yticklabels = []\n\n # Get the positions and used label for the ticks\n nx, ny = data.T.shape\n\n if not len(xticklabels):\n self.xticks = []\n self.xticklabels = []\n elif isinstance(xticklabels, string_types) and xticklabels == \"auto\":\n self.xticks = \"auto\"\n self.xticklabels = _index_to_ticklabels(data.columns)\n else:\n self.xticks, self.xticklabels = self._skip_ticks(xticklabels,\n xtickevery)\n\n if not len(yticklabels):\n self.yticks = []\n self.yticklabels = []\n elif isinstance(yticklabels, string_types) and yticklabels == \"auto\":\n self.yticks = \"auto\"\n self.yticklabels = _index_to_ticklabels(data.index)\n else:\n self.yticks, self.yticklabels = self._skip_ticks(yticklabels,\n ytickevery)\n\n # Get good names for the axis labels\n xlabel = _index_to_label(data.columns)\n ylabel = _index_to_label(data.index)\n self.xlabel = xlabel if xlabel is not None else \"\"\n self.ylabel = ylabel if ylabel is not None else \"\"\n\n # Determine good default values for the colormapping\n self._determine_cmap_params(plot_data, vmin, vmax,\n cmap, center, robust)\n\n # Determine good default values for cell size\n self._determine_cellsize_params(plot_data, cellsize, cellsize_vmax)\n\n # Sort out the annotations\n if annot is None:\n annot = False\n annot_data = None\n elif isinstance(annot, bool):\n if annot:\n annot_data = plot_data\n else:\n annot_data = None\n else:\n try:\n annot_data = annot.values\n except AttributeError:\n annot_data = annot\n if annot.shape != plot_data.shape:\n raise ValueError('Data supplied to \"annot\" must be the same '\n 'shape as the data to plot.')\n annot = True\n\n # Save other attributes to the object\n self.data = data\n self.plot_data = plot_data\n\n self.annot = annot\n self.annot_data = annot_data\n\n self.fmt = fmt\n self.annot_kws = {} if annot_kws is None else annot_kws\n #self.annot_kws.setdefault('color', \"black\")\n self.annot_kws.setdefault('ha', \"center\")\n self.annot_kws.setdefault('va', \"center\")\n self.cbar = cbar\n self.cbar_kws = {} if cbar_kws is None else cbar_kws\n self.cbar_kws.setdefault('ticks', mpl.ticker.MaxNLocator(6))\n self.ax_kws = {} if ax_kws is None else ax_kws\n self.rect_kws = {} if rect_kws is None else rect_kws\n # self.rect_kws.setdefault('edgecolor', \"black\")\n\n def _determine_cmap_params(self, plot_data, vmin, vmax,\n cmap, center, robust):\n \"\"\"Use some heuristics to set good defaults for colorbar and range.\"\"\"\n calc_data = plot_data.data[~np.isnan(plot_data.data)]\n if vmin is None:\n vmin = np.percentile(calc_data, 2) if robust else calc_data.min()\n if vmax is None:\n vmax = np.percentile(calc_data, 98) if robust else calc_data.max()\n self.vmin, self.vmax = vmin, vmax\n\n # Choose default colormaps if not provided\n if cmap is None:\n if center is None:\n self.cmap = cm.rocket\n else:\n self.cmap = cm.icefire\n elif isinstance(cmap, string_types):\n self.cmap = mpl.cm.get_cmap(cmap)\n elif isinstance(cmap, list):\n self.cmap = mpl.colors.ListedColormap(cmap)\n else:\n self.cmap = cmap\n\n # Recenter a divergent colormap\n if center is not None:\n vrange = max(vmax - center, center - vmin)\n normlize = mpl.colors.Normalize(center - vrange, center + vrange)\n cmin, cmax = normlize([vmin, vmax])\n cc = np.linspace(cmin, cmax, 256)\n self.cmap = mpl.colors.ListedColormap(self.cmap(cc))\n\n def _determine_cellsize_params(self, plot_data, cellsize, cellsize_vmax):\n\n if cellsize is None:\n self.cellsize = np.ones(plot_data.shape)\n self.cellsize_vmax = 1.0\n else:\n if isinstance(cellsize, pd.DataFrame):\n cellsize = cellsize.values\n self.cellsize = cellsize\n if cellsize_vmax is None:\n cellsize_vmax = cellsize.max()\n self.cellsize_vmax = cellsize_vmax\n\n def _skip_ticks(self, labels, tickevery):\n \"\"\"Return ticks and labels at evenly spaced intervals.\"\"\"\n n = len(labels)\n if tickevery == 0:\n ticks, labels = [], []\n elif tickevery == 1:\n ticks, labels = np.arange(n) + .5, labels\n else:\n start, end, step = 0, n, tickevery\n ticks = np.arange(start, end, step) + .5\n labels = labels[start:end:step]\n return ticks, labels\n\n def _auto_ticks(self, ax, labels, axis):\n \"\"\"Determine ticks and ticklabels that minimize overlap.\"\"\"\n transform = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(transform)\n size = [bbox.width, bbox.height][axis]\n axis = [ax.xaxis, ax.yaxis][axis]\n tick, = axis.set_ticks([0])\n fontsize = tick.label.get_size()\n max_ticks = int(size // (fontsize / 72))\n if max_ticks < 1:\n return [], []\n tick_every = len(labels) // max_ticks + 1\n tick_every = 1 if tick_every == 0 else tick_every\n ticks, labels = self._skip_ticks(labels, tick_every)\n return ticks, labels\n\n def plot(self, ax, cax):\n \"\"\"Draw the heatmap on the provided Axes.\"\"\"\n\n # Remove all the Axes spines\n #despine(ax=ax, left=True, bottom=True)\n\n # Draw the heatmap and annotate\n height, width = self.plot_data.shape\n xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5)\n\n data = self.plot_data.data\n cellsize = self.cellsize\n\n mask = self.plot_data.mask\n if not isinstance(mask, np.ndarray) and not mask:\n mask = np.zeros(self.plot_data.shape, np.bool)\n\n annot_data = self.annot_data\n if not self.annot:\n annot_data = np.zeros(self.plot_data.shape)\n\n # Draw rectangles instead of using pcolormesh\n # Might be slower than original heatmap\n for x, y, m, val, s, an_val in zip(xpos.flat, ypos.flat, mask.flat, data.flat, cellsize.flat, annot_data.flat):\n if not m:\n vv = (val - self.vmin) / (self.vmax - self.vmin)\n size = np.clip(s / self.cellsize_vmax, 0.1, 1.0)\n color = self.cmap(vv)\n rect = plt.Rectangle([x - size / 2, y - size / 2], size, size, facecolor=color, **self.rect_kws)\n ax.add_patch(rect)\n\n if self.annot:\n annotation = (\"{:\" + self.fmt + \"}\").format(an_val)\n text = ax.text(x, y, annotation, **self.annot_kws)\n print(text)\n # add edge to text\n text_luminance = relative_luminance(text.get_color())\n text_edge_color = \".15\" if text_luminance > .408 else \"w\"\n text.set_path_effects([mpl.patheffects.withStroke(linewidth=1, foreground=text_edge_color)])\n\n # Set the axis limits\n ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))\n\n # Set other attributes\n ax.set(**self.ax_kws)\n\n if self.cbar:\n norm = mpl.colors.Normalize(vmin=self.vmin, vmax=self.vmax)\n scalar_mappable = mpl.cm.ScalarMappable(cmap=self.cmap, norm=norm)\n scalar_mappable.set_array(self.plot_data.data)\n cb = ax.figure.colorbar(scalar_mappable, cax, ax, **self.cbar_kws)\n cb.outline.set_linewidth(0)\n # if kws.get('rasterized', False):\n # cb.solids.set_rasterized(True)\n\n # Add row and column labels\n if isinstance(self.xticks, string_types) and self.xticks == \"auto\":\n xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n else:\n xticks, xticklabels = self.xticks, self.xticklabels\n\n if isinstance(self.yticks, string_types) and self.yticks == \"auto\":\n yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n else:\n yticks, yticklabels = self.yticks, self.yticklabels\n\n ax.set(xticks=xticks, yticks=yticks)\n xtl = ax.set_xticklabels(xticklabels)\n ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n\n # Possibly rotate them if they overlap\n ax.figure.draw(ax.figure.canvas.get_renderer())\n if axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n if axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n\n # Add the axis labels\n ax.set(xlabel=self.xlabel, ylabel=self.ylabel)\n\n # Invert the y axis to show the plot in matrix form\n ax.invert_yaxis()\n\n\ndef heatmap2(data, vmin=None, vmax=None, cmap=None, center=None, robust=False,\n annot=None, fmt=\".2g\", annot_kws=None,\n cellsize=None, cellsize_vmax=None,\n cbar=True, cbar_kws=None, cbar_ax=None,\n square=False, xticklabels=\"auto\", yticklabels=\"auto\",\n mask=None, ax=None, ax_kws=None, rect_kws=None):\n\n # Initialize the plotter object\n plotter = _HeatMapper2(data, vmin, vmax, cmap, center, robust,\n annot, fmt, annot_kws,\n cellsize, cellsize_vmax,\n cbar, cbar_kws, xticklabels,\n yticklabels, mask, ax_kws, rect_kws)\n\n # Draw the plot and return the Axes\n if ax is None:\n ax = plt.gca()\n if square:\n ax.set_aspect(\"equal\")\n\n # delete grid\n ax.grid(False)\n\n plotter.plot(ax, cbar_ax)\n return ax", "_____no_output_____" ], [ "fig =figsize(10,10)\nax = heatmap2(good,annot=True, fmt='.2f',cellsize=np.array(value),cellsize_vmax=1, annot_kws={\"size\": 13},square=True,robust=True,cmap='PiYG' ) \nax.set_xlabel(r'$\\lambda$ index')\nax.set_ylabel(r'$\\lambda$ index')\n\n\nax.grid(False, 'major')\nax.grid(True, 'minor', color='black', alpha=0.3)\nax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True)\nax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True)\nax.xaxis.tick_top()\nax.xaxis.set_label_position('top') ", "Text(0.5, 0.5, '0.77')\nText(1.5, 0.5, '0.21')\nText(2.5, 0.5, '0.02')\nText(3.5, 0.5, '0.00')\nText(4.5, 0.5, '0.00')\nText(5.5, 0.5, '0.00')\nText(6.5, 0.5, '0.00')\nText(7.5, 0.5, '0.00')\nText(8.5, 0.5, '0.00')\nText(0.5, 1.5, '0.21')\nText(1.5, 1.5, '0.53')\nText(2.5, 1.5, '0.23')\nText(3.5, 1.5, '0.03')\nText(4.5, 1.5, '0.00')\nText(5.5, 1.5, '0.00')\nText(6.5, 1.5, '0.00')\nText(7.5, 1.5, '0.00')\nText(8.5, 1.5, '0.00')\nText(0.5, 2.5, '0.02')\nText(1.5, 2.5, '0.23')\nText(2.5, 2.5, '0.50')\nText(3.5, 2.5, '0.23')\nText(4.5, 2.5, '0.02')\nText(5.5, 2.5, '0.00')\nText(6.5, 2.5, '0.00')\nText(7.5, 2.5, '0.00')\nText(8.5, 2.5, '0.00')\nText(0.5, 3.5, '0.00')\nText(1.5, 3.5, '0.03')\nText(2.5, 3.5, '0.23')\nText(3.5, 3.5, '0.49')\nText(4.5, 3.5, '0.22')\nText(5.5, 3.5, '0.02')\nText(6.5, 3.5, '0.00')\nText(7.5, 3.5, '0.00')\nText(8.5, 3.5, '0.00')\nText(0.5, 4.5, '0.00')\nText(1.5, 4.5, '0.00')\nText(2.5, 4.5, '0.02')\nText(3.5, 4.5, '0.22')\nText(4.5, 4.5, '0.50')\nText(5.5, 4.5, '0.23')\nText(6.5, 4.5, '0.02')\nText(7.5, 4.5, '0.00')\nText(8.5, 4.5, '0.00')\nText(0.5, 5.5, '0.00')\nText(1.5, 5.5, '0.00')\nText(2.5, 5.5, '0.00')\nText(3.5, 5.5, '0.02')\nText(4.5, 5.5, '0.23')\nText(5.5, 5.5, '0.52')\nText(6.5, 5.5, '0.22')\nText(7.5, 5.5, '0.01')\nText(8.5, 5.5, '0.00')\nText(0.5, 6.5, '0.00')\nText(1.5, 6.5, '0.00')\nText(2.5, 6.5, '0.00')\nText(3.5, 6.5, '0.00')\nText(4.5, 6.5, '0.02')\nText(5.5, 6.5, '0.22')\nText(6.5, 6.5, '0.57')\nText(7.5, 6.5, '0.19')\nText(8.5, 6.5, '0.01')\nText(0.5, 7.5, '0.00')\nText(1.5, 7.5, '0.00')\nText(2.5, 7.5, '0.00')\nText(3.5, 7.5, '0.00')\nText(4.5, 7.5, '0.00')\nText(5.5, 7.5, '0.01')\nText(6.5, 7.5, '0.19')\nText(7.5, 7.5, '0.61')\nText(8.5, 7.5, '0.19')\nText(0.5, 8.5, '0.00')\nText(1.5, 8.5, '0.00')\nText(2.5, 8.5, '0.00')\nText(3.5, 8.5, '0.00')\nText(4.5, 8.5, '0.00')\nText(5.5, 8.5, '0.00')\nText(6.5, 8.5, '0.01')\nText(7.5, 8.5, '0.19')\nText(8.5, 8.5, '0.81')\n" ], [ "fig =figsize(8,8)\nax = sbn.heatmap(good,annot=True, fmt='.2f', linewidths=.3, annot_kws={\"size\": 14},cmap=sbn.light_palette((210, 90, 60), input=\"husl\") )\nax.set_xlabel(r'$\\lambda$ index')\nax.set_ylabel(r'$\\lambda$ index')\n\nsbn.despine()\nax.grid(False, 'major')\nax.grid(True, 'minor', color='white')\nax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True)\nax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True)", "_____no_output_____" ], [ "text = ax.text(x, y, annotation, **self.annot_kws)\n# add edge to text\ntext_luminance = relative_luminance(text.get_color())\ntext_edge_color = \".15\" if text_luminance > .408 else \"w\"\ntext.set_path_effects([mpl.patheffects.withStroke(linewidth=1, foreground=text_edge_color)])", "_____no_output_____" ], [ "ax.text()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a420a621125e9609273c77202ee5265c7064be3
214,087
ipynb
Jupyter Notebook
Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/jn/EDA_006_credit_card.ipynb
hehuanlin123/DeepLearning
6b7feabbbde9ac9489f76da4c06eeb6703fb165a
[ "MIT" ]
1
2020-02-28T12:03:39.000Z
2020-02-28T12:03:39.000Z
Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/jn/EDA_006_credit_card.ipynb
hehuanlin123/DeepLearning
6b7feabbbde9ac9489f76da4c06eeb6703fb165a
[ "MIT" ]
null
null
null
Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/jn/EDA_006_credit_card.ipynb
hehuanlin123/DeepLearning
6b7feabbbde9ac9489f76da4c06eeb6703fb165a
[ "MIT" ]
null
null
null
42.783173
134
0.282581
[ [ [ "import pandas as pd\nimport numpy as np\nfrom glob import glob\nfrom IPython.display import display\npd.set_option('display.max_columns', 99)\npd.set_option('display.max_rows', 99)\nimport EDA\nimport utils\n#sorted(glob('../data/*'))", "_____no_output_____" ], [ "df = utils.read_pickles('../data/credit_card_balance').sort_values(['SK_ID_CURR', 'MONTHS_BALANCE'], ascending=[True, False])\ndf.head()", "100%|██████████| 20/20 [00:06<00:00, 2.91it/s]\n" ], [ "EDA.df_info(df)", "Shape: (3840312, 23)\n" ], [ "df.columns", "_____no_output_____" ], [ "ids = df.SK_ID_CURR.unique()", "_____no_output_____" ], [ "tmp = df[df.SK_ID_CURR==np.random.choice(ids)]\ntmp", "_____no_output_____" ], [ "tmp = df[df.SK_ID_CURR.isin(np.random.choice(ids, size=9))]\ntmp", "_____no_output_____" ], [ "tmp.to_csv('sample_cre.csv', index=False)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a4216727ffed0ecde9634aad2f8099e4d4a879c
39,116
ipynb
Jupyter Notebook
code/model_zoo/tensorflow_ipynb/autoencoder-deconv.ipynb
wpsliu123/Sebastian_Raschka-Deep-Learning-Book
fc57a58b46921f057248bd8fd0f258e952a3cddb
[ "MIT" ]
3
2019-02-19T16:42:28.000Z
2020-10-11T05:16:12.000Z
code/model_zoo/tensorflow_ipynb/autoencoder-deconv.ipynb
bharat3012/deep-learning-book
839e076c5098084512c947a38878a9a545d9a87d
[ "MIT" ]
null
null
null
code/model_zoo/tensorflow_ipynb/autoencoder-deconv.ipynb
bharat3012/deep-learning-book
839e076c5098084512c947a38878a9a545d9a87d
[ "MIT" ]
2
2020-09-07T12:43:33.000Z
2021-06-11T12:10:09.000Z
129.953488
29,372
0.833623
[ [ [ "*Accompanying code examples of the book \"Introduction to Artificial Neural Networks and Deep Learning: A Practical Guide with Applications in Python\" by [Sebastian Raschka](https://sebastianraschka.com). All code examples are released under the [MIT license](https://github.com/rasbt/deep-learning-book/blob/master/LICENSE). If you find this content useful, please consider supporting the work by buying a [copy of the book](https://leanpub.com/ann-and-deeplearning).*\n \nOther code examples and content are available on [GitHub](https://github.com/rasbt/deep-learning-book). The PDF and ebook versions of the book are available through [Leanpub](https://leanpub.com/ann-and-deeplearning).", "_____no_output_____" ] ], [ [ "%load_ext watermark\n%watermark -a 'Sebastian Raschka' -v -p tensorflow", "Sebastian Raschka \n\nCPython 3.6.1\nIPython 6.0.0\n\ntensorflow 1.2.0\n" ] ], [ [ "# Model Zoo -- Convolutional Autoencoder with Deconvolutions", "_____no_output_____" ], [ "A convolutional autoencoder using deconvolutional layers that compresses 768-pixel MNIST images down to a 7x7x4 (196 pixel) representation.", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n\n##########################\n### DATASET\n##########################\n\nmnist = input_data.read_data_sets(\"./\", validation_size=0)\n\n\n##########################\n### SETTINGS\n##########################\n\n# Hyperparameters\nlearning_rate = 0.001\ntraining_epochs = 5\nbatch_size = 128\n\n# Architecture\nhidden_size = 16\ninput_size = 784\nimage_width = 28\n\n# Other\nprint_interval = 200\nrandom_seed = 123\n\n\n##########################\n### GRAPH DEFINITION\n##########################\n\ng = tf.Graph()\nwith g.as_default():\n \n tf.set_random_seed(random_seed)\n\n # Input data\n tf_x = tf.placeholder(tf.float32, [None, input_size], name='inputs')\n input_layer = tf.reshape(tf_x, shape=[-1, image_width, image_width, 1])\n\n ###########\n # Encoder\n ###########\n \n # 28x28x1 => 28x28x8\n conv1 = tf.layers.conv2d(input_layer, filters=8, kernel_size=(3, 3),\n strides=(1, 1), padding='same', \n activation=tf.nn.relu)\n # 28x28x8 => 14x14x8\n maxpool1 = tf.layers.max_pooling2d(conv1, pool_size=(2, 2), \n strides=(2, 2), padding='same')\n \n # 14x14x8 => 14x14x4\n conv2 = tf.layers.conv2d(maxpool1, filters=4, kernel_size=(3, 3), \n strides=(1, 1), padding='same', \n activation=tf.nn.relu)\n \n # 14x14x4 => 7x7x4\n encode = tf.layers.max_pooling2d(conv2, pool_size=(2, 2), \n strides=(2, 2), padding='same', \n name='encoding')\n\n ###########\n # Decoder\n ###########\n \n # 7x7x4 => 14x14x8\n deconv1 = tf.layers.conv2d_transpose(encode, filters=8, \n kernel_size=(3, 3), strides=(2, 2), \n padding='same',\n activation=tf.nn.relu)\n \n \n # 14x14x8 => 28x28x8\n deconv2 = tf.layers.conv2d_transpose(deconv1, filters=8, \n kernel_size=(3, 3), strides=(2, 2), \n padding='same',\n activation=tf.nn.relu)\n \n # 28x28x8 => 28x28x1\n logits = tf.layers.conv2d(deconv2, filters=1, kernel_size=(3,3), \n strides=(1, 1), padding='same', \n activation=None)\n \n decode = tf.nn.sigmoid(logits, name='decoding')\n\n ##################\n # Loss & Optimizer\n ##################\n \n loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=input_layer,\n logits=logits)\n cost = tf.reduce_mean(loss, name='cost')\n optimizer = tf.train.AdamOptimizer(learning_rate)\n train = optimizer.minimize(cost, name='train') \n\n # Saver to save session for reuse\n saver = tf.train.Saver()", "Extracting ./train-images-idx3-ubyte.gz\nExtracting ./train-labels-idx1-ubyte.gz\nExtracting ./t10k-images-idx3-ubyte.gz\nExtracting ./t10k-labels-idx1-ubyte.gz\n" ], [ "import numpy as np\n\n##########################\n### TRAINING & EVALUATION\n##########################\n \nwith tf.Session(graph=g) as sess:\n sess.run(tf.global_variables_initializer())\n\n np.random.seed(random_seed) # random seed for mnist iterator\n for epoch in range(training_epochs):\n avg_cost = 0.\n total_batch = mnist.train.num_examples // batch_size\n\n for i in range(total_batch):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n _, c = sess.run(['train', 'cost:0'], feed_dict={'inputs:0': batch_x})\n\n avg_cost += c\n\n if not i % print_interval:\n print(\"Minibatch: %03d | Cost: %.3f\" % (i + 1, c))\n\n print(\"Epoch: %03d | AvgCost: %.3f\" % (epoch + 1, avg_cost / (i + 1)))\n \n saver.save(sess, save_path='./autoencoder.ckpt')", "Minibatch: 001 | Cost: 0.693\nMinibatch: 201 | Cost: 0.153\nMinibatch: 401 | Cost: 0.104\nEpoch: 001 | AvgCost: 0.232\nMinibatch: 001 | Cost: 0.098\nMinibatch: 201 | Cost: 0.096\nMinibatch: 401 | Cost: 0.093\nEpoch: 002 | AvgCost: 0.093\nMinibatch: 001 | Cost: 0.090\nMinibatch: 201 | Cost: 0.086\nMinibatch: 401 | Cost: 0.089\nEpoch: 003 | AvgCost: 0.088\nMinibatch: 001 | Cost: 0.086\nMinibatch: 201 | Cost: 0.089\nMinibatch: 401 | Cost: 0.085\nEpoch: 004 | AvgCost: 0.086\nMinibatch: 001 | Cost: 0.090\nMinibatch: 201 | Cost: 0.083\nMinibatch: 401 | Cost: 0.087\nEpoch: 005 | AvgCost: 0.084\n" ], [ "%matplotlib inline\nimport matplotlib.pyplot as plt\n\n##########################\n### VISUALIZATION\n##########################\n\nn_images = 15\n\nfig, axes = plt.subplots(nrows=2, ncols=n_images, sharex=True, \n sharey=True, figsize=(20, 2.5))\ntest_images = mnist.test.images[:n_images]\n\nwith tf.Session(graph=g) as sess:\n saver.restore(sess, save_path='./autoencoder.ckpt')\n decoded = sess.run('decoding:0', feed_dict={'inputs:0': test_images})\n\nfor i in range(n_images):\n for ax, img in zip(axes, [test_images, decoded]):\n ax[i].imshow(img[i].reshape((image_width, image_width)), cmap='binary')", "INFO:tensorflow:Restoring parameters from ./autoencoder.ckpt\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
4a421cbb4c66a02337ca9b458a61124df98e5f16
47,143
ipynb
Jupyter Notebook
1 channel.ipynb
suyash091/EEG-MULTIPLE-CHANNEL
8b3685f839ac4c1dc56427ea603bd097053d29e0
[ "Apache-2.0" ]
null
null
null
1 channel.ipynb
suyash091/EEG-MULTIPLE-CHANNEL
8b3685f839ac4c1dc56427ea603bd097053d29e0
[ "Apache-2.0" ]
null
null
null
1 channel.ipynb
suyash091/EEG-MULTIPLE-CHANNEL
8b3685f839ac4c1dc56427ea603bd097053d29e0
[ "Apache-2.0" ]
null
null
null
58.345297
1,678
0.57315
[ [ [ "<a href=\"https://colab.research.google.com/github/suyash091/EEG-MULTIPLE-CHANNEL/blob/master/1%20channel.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "\n\n```\n# Mindwave | 1 channel | 512 sampling rate\n```\n\n", "_____no_output_____" ] ], [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n" ], [ "from os.path import isfile\nimport zipfile\nimport numpy as np\nimport random\n\ndataset_path = './data/MW.zip'\n\ndef get_dataset_file():\n if not isfile(dataset_path):\n import urllib\n origin = (\n 'http://www.mindbigdata.com/opendb/MindBigData-MW-v1.0.zip'\n )\n print('Downloading data from %s' % origin)\n urlretrieve(origin, dataset_path)\n return open(dataset_path, 'rb')\n\ndef get_datasets():\n f = get_dataset_file()\n zf = zipfile.ZipFile(f)\n \n data = [ line for line in zf.open('MW.txt')]\n entire_dataset = []\n current_event = np.zeros(1024 * 1 + 2)\n \n print('Reading data file')\n i = 0\n\n for l in data:\n #print(str(l))\n ids, event, device, channel, code, size, data = l.decode(\"utf-8\").split('\\t')\n\n signals = np.array([float(val) for val in data.split(',')])\n \n current_event[1+ i*1024:1+ i*1024 + min(len(signals), 1024)] = signals[:1024]\n i += 1\n\n if i == 1: # we assume all channels from an event are in sequence\n current_event[-1] = int(code)\n current_event[0] = min(len(signals), 1024)\n\n entire_dataset.append(current_event)\n current_event = np.zeros(1024 * 1 + 2)\n i = 0\n\n random.seed(111) # deterministic\n random.shuffle(entire_dataset)\n\n entire_dataset = np.array(entire_dataset)\n return entire_dataset[:60000], entire_dataset[60000:]\n\ndef split_into_subsequences(data, n_sequences, length):\n output = np.zeros((data.shape[0]*n_sequences, length*14+1))\n for i in range(data.shape[0]):\n if n_sequences == 1:\n steps = 0\n else:\n steps = (data[i, 0] - length) / (n_sequences - 1)\n\n for j in range(n_sequences):\n output[i*n_sequences+j,:length] = data[i, j*steps: j*steps+length]\n output[i*n_sequences+j,length:length*2] = data[i, j*steps+512: j*steps+512+length]\n output[i*n_sequences+j,length*2:length*3] = data[i, j*steps+512*2: j*steps+512*2+length]\n output[i*n_sequences+j,length*3:length*4] = data[i, j*steps+512*3: j*steps+512*3+length]\n output[i*n_sequences+j,-1] = data[i, -1]\n\n return output", "_____no_output_____" ], [ "\nimport numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.decomposition import PCA\nfrom sklearn import metrics\nfrom sklearn.metrics import confusion_matrix\nfrom urllib.request import urlretrieve\n#from sklearn.gaussian_process import GaussianProcess", "_____no_output_____" ], [ "(25/2050)*3586", "_____no_output_____" ], [ "train, test = get_datasets()\nprint(train.shape)\nprint(test.shape)", "Downloading data from http://www.mindbigdata.com/opendb/MindBigData-MW-v1.0.zip\nReading data file\n(60000, 1026)\n(7635, 1026)\n" ], [ "train[0].shape", "_____no_output_____" ], [ "len(train[-1])", "_____no_output_____" ], [ "#train = train[:int(len(train)/3),:]\n#test = test[:int(len(test)/3), :]\n\n#train[train[:,-1] >= 0,-1] = 0\n#test[test[:,-1] >= 0,-1] = 0\n\n#train[:,-1] = train[:,-1] + 1\n#test[:,-1] = test[:,-1] + 1\n\ntrain_target = train[:,-1]\ntest_target = test[:,-1]\n\nDECISIONBOUNDARY = 0", "_____no_output_____" ], [ "#FastFourierTransformation apply to the sets using the 4 sensors\n\ntrain_f = np.zeros(train.shape)\n\nfor i in range(len(train_f)):\n length = train[i][0]\n train_f[i][0] = train[i][0]\n train_f[i][-1] = train[i][-1]\n \n for j in range(1):\n train_f[int(i)][int(1+1024*j):int(1+1024*j+length)] = np.abs(np.fft.fft(train[int(i)][int(1+1024*j):int(1+1024*j+length)]))\n \ntest_f = np.zeros(test.shape)\n\nfor i in range(len(test_f)):\n length = test[i,0]\n test_f[i,0] = test[i,0]\n test_f[i,-1] = test[i,-1]\n \n for j in range(1):\n test_f[i][int(1+1024*j):int(1+1024*j+length)] = np.abs(np.fft.fft(test[i][int(1+1024*j):int(1+1024*j+length)]))", "_____no_output_____" ], [ "#PCA with 25 components\npca = PCA(n_components = 100)\ntrain_principal = pca.fit_transform(train_f)\ntest_principal = pca.transform(test_f)", "_____no_output_____" ], [ "test_principal.shape", "_____no_output_____" ], [ "#KNN\nerreur = []\nx = np.arange(1,20,1)\nfor n in (x):\n # train set\n neigh = KNeighborsClassifier(n_neighbors=n)\n neigh.fit(train_principal, train[:,-1])\n\n #test set\n total_correct = 0\n for i in range(test_principal.shape[0]):\n if neigh.predict(test_principal[i].reshape(1, -1))[0] == test[i,-1]:\n total_correct += 1\n\n erreur.append(float(total_correct) / test_principal.shape[0] * 100)\n print('Percentage correct %d:' % n, float(total_correct) / test_principal.shape[0] * 100)", "Pourcentage correct 1: 27.557301899148655\nPourcentage correct 2: 27.2822527832351\nPourcentage correct 3: 27.622789783889978\nPourcentage correct 4: 27.570399476096924\nPourcentage correct 5: 27.753765553372627\nPourcentage correct 6: 27.295350360183367\nPourcentage correct 7: 27.518009168303863\n" ], [ "#Import models from scikit learn module:\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import KFold #For K-fold cross validation\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier, export_graphviz\nfrom sklearn import metrics\nimport numpy as np\nfrom sklearn.metrics import roc_curve, auc, precision_score, confusion_matrix, explained_variance_score, max_error, mean_absolute_error, mean_squared_error\nfrom sklearn.linear_model import LogisticRegression, LinearRegression, Ridge, Lasso", "_____no_output_____" ], [ "\n#model = RandomForestClassifier(n_estimators=100)\n#model.fit(train_principal,train[:,-1])\n\n\npredictions = model.predict(test_principal)\nprc=precision_score(predictions,test[:,-1], average=None)\ncfm=confusion_matrix(predictions,test[:,-1])\naccuracy = metrics.accuracy_score(predictions,test[:,-1])\nprint(prc,cfm,accuracy)\n#print(explained_variance_score(predictions,test))\n#print(max_error(predictions,test))\n#print(mean_absolute_error(predictions, test, multioutput='raw_values'))\n#print(mean_squared_error(predictions, test, multioutput='raw_values'))", "[0.99799062 0.12055641 0.09937888 0.10651828 0.08085809 0.08417508\n 0.11383538 0.09105691 0.09166667 0.07565789 0.06210191] [[1490 8 12 14 7 11 6 11 13 4 12]\n [ 1 78 81 89 73 65 70 68 68 84 81]\n [ 1 59 64 56 80 70 63 59 61 73 54]\n [ 0 72 86 67 74 67 69 78 68 77 78]\n [ 0 63 78 56 49 55 70 56 62 56 62]\n [ 1 70 56 52 52 50 49 67 37 51 48]\n [ 0 71 50 72 72 58 65 72 78 66 71]\n [ 0 65 58 57 46 58 51 56 59 54 66]\n [ 0 57 56 54 68 66 43 58 55 46 56]\n [ 0 53 67 52 41 53 46 44 50 46 61]\n [ 0 51 36 60 44 41 39 46 49 51 39]] 0.2696791093647675\n" ], [ "model = LogisticRegression()\nmodel.fit(train_principal,train[:,-1])\n\n\npredictions = model.predict(test_principal)\nprc=precision_score(predictions,test[:,-1], average=None)\ncfm=confusion_matrix(predictions,test[:,-1])\naccuracy = metrics.accuracy_score(predictions,test[:,-1])\nprint(prc,cfm,accuracy)", "/usr/local/lib/python3.6/dist-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n/usr/local/lib/python3.6/dist-packages/sklearn/linear_model/logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\n" ], [ "model = LinearRegression()\n#model.fit(train_principal,train[:,-1])\n\n\n#predictions = model.predict(test_principal)\n#prc=precision_score(predictions,test[:,-1], average=None)\nprint(explained_variance_score(predictions,test[:,-1]))\nprint(max_error(predictions,test[:,-1]))\nprint(mean_absolute_error(predictions, test[:,-1], multioutput='raw_values'))\nprint(mean_squared_error(predictions, test[:,-1], multioutput='raw_values'))", "0.046288236316122555\n9.0\n[2.55769483]\n[12.22946955]\n" ], [ "model = RandomForestRegressor()\nmodel.fit(train_principal,train[:,-1])\n\n\npredictions = model.predict(test_principal)\nprint(explained_variance_score(predictions,test[:,-1]))\nprint(max_error(predictions,test[:,-1]))\nprint(mean_absolute_error(predictions, test[:,-1], multioutput='raw_values'))\nprint(mean_squared_error(predictions, test[:,-1], multioutput='raw_values'))", "/usr/local/lib/python3.6/dist-packages/sklearn/ensemble/forest.py:245: FutureWarning: The default value of n_estimators will change from 10 in version 0.20 to 100 in 0.22.\n \"10 in version 0.20 to 100 in 0.22.\", FutureWarning)\n" ], [ "from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport tensorflow as tf\nimport numpy as np\n\n!pip install scipy\nfrom scipy import signal", "_____no_output_____" ], [ "model = tf.keras.models.Sequential()\nmodel.add(tf.keras.layers.Dense(1024, input_dim=100, activation='relu'))\nmodel.add(tf.keras.layers.Dense(4096, activation='relu'))\nmodel.add(tf.keras.layers.Dropout(0.25))\nmodel.add(tf.keras.layers.Dense(4096, activation='relu'))\nmodel.add(tf.keras.layers.Dropout(0.25))\nmodel.add(tf.keras.layers.Dense(4096, activation='relu'))\nmodel.add(tf.keras.layers.Dense(11, activation='softmax'))\nalpha = 2e-3\nbatch_size = 256\nnum_epochs = 3000\nmodel.compile(\n optimizer=tf.train.GradientDescentOptimizer(learning_rate=alpha, ),\n loss=tf.keras.losses.sparse_categorical_crossentropy,\n metrics=['sparse_categorical_accuracy', 'accuracy']\n )\n\nHistory = model.fit(\n x = train_principal,\n y = train[:,-1],\n batch_size = batch_size,\n epochs=num_epochs,\n #validation_data = (x_valid, y_valid),\n #callbacks = [checkPointer,tensorBoard]\n )\npredictions = model.predict(test_principal)", "Train on 60000 samples\nEpoch 1/3000\n60000/60000 [==============================] - 8s 139us/sample - loss: nan - sparse_categorical_accuracy: 0.0813 - acc: 0.0813\nEpoch 2/3000\n60000/60000 [==============================] - 8s 136us/sample - loss: nan - sparse_categorical_accuracy: 0.0814 - acc: 0.0814\nEpoch 3/3000\n60000/60000 [==============================] - 8s 136us/sample - loss: nan - sparse_categorical_accuracy: 0.0814 - acc: 0.0814\nEpoch 4/3000\n60000/60000 [==============================] - 8s 137us/sample - loss: nan - sparse_categorical_accuracy: 0.0814 - acc: 0.0814\nEpoch 5/3000\n60000/60000 [==============================] - 8s 137us/sample - loss: nan - sparse_categorical_accuracy: 0.0814 - acc: 0.0814\nEpoch 6/3000\n16640/60000 [=======>......................] - ETA: 5s - loss: nan - sparse_categorical_accuracy: 0.0802 - acc: 0.0802" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a423b759a9eaceb0e3d8e6ff6e332caabcd58d7
4,464
ipynb
Jupyter Notebook
notebooks/parameter_tuning_ex_03.ipynb
Imarcos/scikit-learn-mooc
69a7a7e891c5a4a9bce8983d7c92326674fda071
[ "CC-BY-4.0" ]
null
null
null
notebooks/parameter_tuning_ex_03.ipynb
Imarcos/scikit-learn-mooc
69a7a7e891c5a4a9bce8983d7c92326674fda071
[ "CC-BY-4.0" ]
null
null
null
notebooks/parameter_tuning_ex_03.ipynb
Imarcos/scikit-learn-mooc
69a7a7e891c5a4a9bce8983d7c92326674fda071
[ "CC-BY-4.0" ]
null
null
null
27.386503
85
0.604839
[ [ [ "# 📝 Exercise M3.02\n\nThe goal is to find the best set of hyperparameters which maximize the\ngeneralization performance on a training set.\n\nHere again with limit the size of the training set to make computation\nrun faster. Feel free to increase the `train_size` value if your computer\nis powerful enough.", "_____no_output_____" ] ], [ [ "\nimport numpy as np\nimport pandas as pd\n\nadult_census = pd.read_csv(\"../datasets/adult-census.csv\")\n\ntarget_name = \"class\"\ntarget = adult_census[target_name]\ndata = adult_census.drop(columns=[target_name, \"education-num\"])\nfrom sklearn.model_selection import train_test_split\n\ndata_train, data_test, target_train, target_test = train_test_split(\n data, target, train_size=0.2, random_state=42)", "_____no_output_____" ] ], [ [ "In this exercise, we will progressively define the classification pipeline\nand later tune its hyperparameters.\n\nOur pipeline should:\n* preprocess the categorical columns using a `OneHotEncoder` and use a\n `StandardScaler` to normalize the numerical data.\n* use a `LogisticRegression` as a predictive model.\n\nStart by defining the columns and the preprocessing pipelines to be applied\non each group of columns.", "_____no_output_____" ] ], [ [ "from sklearn.compose import make_column_selector as selector\n\n# Write your code here.", "_____no_output_____" ], [ "from sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import StandardScaler\n\n# Write your code here.", "_____no_output_____" ] ], [ [ "Subsequently, create a `ColumnTransformer` to redirect the specific columns\na preprocessing pipeline.", "_____no_output_____" ] ], [ [ "from sklearn.compose import ColumnTransformer\n\n# Write your code here.", "_____no_output_____" ] ], [ [ "Assemble the final pipeline by combining the above preprocessor\nwith a logistic regression classifier. Force the maximum number of\niterations to `10_000` to ensure that the model will converge.", "_____no_output_____" ] ], [ [ "from sklearn.pipeline import make_pipeline\nfrom sklearn.linear_model import LogisticRegression\n\n# Write your code here.", "_____no_output_____" ] ], [ [ "Use `RandomizedSearchCV` with `n_iter=20` to find the best set of\nhyperparameters by tuning the following parameters of the `model`:\n\n- the parameter `C` of the `LogisticRegression` with values ranging from\n 0.001 to 10. You can use a log-uniform distribution\n (i.e. `scipy.stats.loguniform`);\n- the parameter `with_mean` of the `StandardScaler` with possible values\n `True` or `False`;\n- the parameter `with_std` of the `StandardScaler` with possible values\n `True` or `False`.\n\nOnce the computation has completed, print the best combination of parameters\nstored in the `best_params_` attribute.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import RandomizedSearchCV\nfrom scipy.stats import loguniform\n\n# Write your code here.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a4245555c5f97b89824ef1ddb0afadb2ee7bf13
575,784
ipynb
Jupyter Notebook
During_experiment/20220303-check_drift_merfish.ipynb
shiwei23/Chromatin_Analysis_Scripts
909b9b81de8fcf04dd4c39ac21a84864ce2003ff
[ "MIT" ]
null
null
null
During_experiment/20220303-check_drift_merfish.ipynb
shiwei23/Chromatin_Analysis_Scripts
909b9b81de8fcf04dd4c39ac21a84864ce2003ff
[ "MIT" ]
null
null
null
During_experiment/20220303-check_drift_merfish.ipynb
shiwei23/Chromatin_Analysis_Scripts
909b9b81de8fcf04dd4c39ac21a84864ce2003ff
[ "MIT" ]
null
null
null
141.43552
173,386
0.813057
[ [ [ "%run \"..\\Startup_py3.py\"\nsys.path.append(r\"..\\..\\..\\Documents\")\n\nimport ImageAnalysis3 as ia\n%matplotlib notebook\n\nfrom ImageAnalysis3 import *\nprint(os.getpid())\n\nimport re", "49100\n" ], [ "data_folder = r'I:\\MERFISH_Data\\20220303-P_brain_M1_nonclear_adaptors'\ndax_files =[os.path.join(data_folder, _fl) for _fl in os.listdir(data_folder) \n if _fl.split(os.extsep)[-1] == 'dax' ]\n\nsel_dax_files = []\nfor _fl in dax_files:\n _fov_id, _hyb = re.findall(r\".*_([0-9]+)_([0-9]+).dax\", _fl)[0]\n if _fov_id == '1':\n sel_dax_files.append(_fl)\n ", "_____no_output_____" ], [ "sel_dax_files", "_____no_output_____" ], [ "tar_im = ia.visual_tools.DaxReader(sel_dax_files[0]).loadAll()\nref_im = ia.visual_tools.DaxReader(sel_dax_files[-1]).loadAll()", "_____no_output_____" ], [ "bit3_im = tar_im[0::3]", "_____no_output_____" ], [ "ia.visual_tools.imshow_mark_3d_v2([bit1_im, im_0225_bit3,im_0208_bit3])", "_____no_output_____" ], [ "im_0225 = ia.visual_tools.DaxReader(r\"\\\\10.245.74.158\\Chromatin_NAS_0\\20220225-P_brain_M1_nonclear\\H1M2\\Conv_zscan_001.dax\").loadAll()\nim_0225_bit3 = im_0225[np.array([0,2,4,6,8,10,12,14,18,20,22,24,26])]", "_____no_output_____" ], [ "im_0208 = ia.visual_tools.DaxReader(r\"\\\\10.245.74.158\\Chromatin_NAS_0\\20220208-P_brain_M1_nonclear\\H1M2\\Conv_zscan_001.dax\").loadAll()\nim_0208_bit3 = im_0208[0::3]", "_____no_output_____" ], [ "ia.visual_tools.imshow_mark_3d_v2([ref_im[np.arange(2,250,20)], ref_im[np.arange(4,250,20)]])", "_____no_output_____" ], [ "from skimage.registration import phase_cross_correlation", "_____no_output_____" ], [ "phase_cross_correlation(tar_bead_im[8], ref_bead_im[32])", "_____no_output_____" ], [ "# check decoding\nimport h5py\nfeature_file = r'I:\\MERFISH_Analysis\\Cellpose\\20220303-P_brain_M1_nonclear_adaptors\\CellPoseSegment\\features\\feature_data_0.hdf5'\nwith h5py.File(feature_file, 'r') as _f:\n print(_f['labeldata'].keys())\n _labels = _f['labeldata']['label3D'][:]", "<KeysViewHDF5 ['label3D']>\n" ], [ "ia.visual_tools.imshow_mark_3d_v2([_labels])", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a4246be2bcdcce1f0c2e4b6ece12a504dfd085e
18,330
ipynb
Jupyter Notebook
02 -data pre-processing.ipynb
agonfo/u03_capstone_starbucks
482d57aed19aeed13c8c158f5c50345ea8f56df0
[ "FTL", "CNRI-Python" ]
null
null
null
02 -data pre-processing.ipynb
agonfo/u03_capstone_starbucks
482d57aed19aeed13c8c158f5c50345ea8f56df0
[ "FTL", "CNRI-Python" ]
null
null
null
02 -data pre-processing.ipynb
agonfo/u03_capstone_starbucks
482d57aed19aeed13c8c158f5c50345ea8f56df0
[ "FTL", "CNRI-Python" ]
null
null
null
37.030303
163
0.540971
[ [ [ "import pandas as pd\nimport numpy as np\nimport math\nimport json\n\n%matplotlib inline\n\n# read in the json files\nportfolio = pd.read_json('data/portfolio.json', orient='records', lines=True)\nprofile = pd.read_json('data/profile.json', orient='records', lines=True)\ntranscript = pd.read_json('data/transcript.json', orient='records', lines=True)", "_____no_output_____" ], [ "def id_mapper(df , column):\n '''\n Map a column in a DataFrame and create a dict to change its value to a sequence (1,2,3...) for easier use. \n \n INPUT:\n df - (DataFrame) \n column - (str) name of the column to create dictionary\n OUTPUT:\n coded_dict - (dict) A dictionary with the given column values as key and the 'new' encoded sequence as value\n ''' \n coded_dict = dict()\n cter = 1\n \n for x in df[column]:\n if x not in coded_dict:\n coded_dict[x] = cter\n cter+=1\n \n return coded_dict", "_____no_output_____" ], [ "def offer_type_mapper(df=portfolio):\n '''\n Create a data frame to relate each offer with offer type\n \n IMPUT: df - (DataFrame) - portfolio as default dataframe \n OUTPUT: \n offer_type - (DataFrame) - relation between offer id and type of offer \n coded_dict - (dict) - relation between type offer sequence and real type offer (see id_mapper)\n \n ''' \n # get sequence to name type of offers\n coded_dict = id_mapper(df, 'offer_type')\n \n coded_df = df.replace({\"offer_type\":coded_dict})\n offer_type = coded_df[['id' , 'offer_type' , 'duration']]\n \n return offer_type , coded_dict", "_____no_output_____" ], [ "def arrange_events(df , offer_df):\n '''\n Rearange the dataframe -transcript- by merging each offer into one row, creating columns for each event and time as values.\n \n IMPUT: df (DataFrame) - default Dataframe is transcript\n OUTPUT: df (DataFrame) - modified dataFrame\n '''\n \n # read dictionary from 'value' feature and create columns \n df = pd.concat([df.drop(['value'], axis=1), df['value'].apply(pd.Series)], axis=1)\n \n # merge offer id and offer_id columns\n df['offer id'] = df['offer id'].combine_first(df['offer_id'])\n df = df.drop(columns = ['offer_id'])\n \n # split into three dataFrames and then merge rows with transaction and offer completed in the same time\n df1 = df[df['event'] == 'offer completed'][['person' , 'event' , 'time' , 'offer id' , 'reward']]\n df2 = df[df['event'] == 'transaction'][['person' , 'time' , 'amount']]\n df3 = df[df['event'] != 'offer completed']\n df3 = df3[df3['event'] != 'transaction'][['person' , 'event' , 'time' , 'offer id']] \n # merge the two dataFrames on time\n df_trans_completed = pd.merge(df1, df2, how='outer', on=['person', 'time'])\n # merge with main dataFrames\n df = pd.merge(df3, df_trans_completed, how='outer', on=['person', 'time' , 'event' , 'offer id'])\n \n # create columns of type of event with the value of time\n df = pd.concat([df, df.pivot_table(values='time', index=df.index, columns='event', aggfunc='first')], axis=1, sort=False)\n \n # fill NaN values in the offer id feature as 'no offer' to keep track of the transactions without an offer\n df['offer id'] = df['offer id'].fillna(value = 'no offer')\n df = df.rename(columns={'person':'user id'})\n\n # merge with offer_type dataframe\n df = pd.merge(df, offer_df, how='outer', on=['offer id'])\n df.rename(columns={'offer_type': 'offer type'} , inplace = True) \n \n return df", "_____no_output_____" ], [ "def fill_amount(df):\n\n dict_values = {}\n index_lst = []\n\n a = df.loc[(df['event'] == 'offer received') & (df['offer type'] == 2)]\n\n for i in range(a.shape[0]): \n b = df.loc[(df['time'] >= a['time'].values[i]) & (df['time'] <= (a['time'].values[i] + a['duration'].values[i])) & (df['offer id'] == 'no offer')]\n if (b.shape[0] != 0):\n index_lst.append(b.index[0])\n c = b['amount'].to_list()[0]\n dict_values.update({a.index.to_list()[i]: c})\n else:\n dict_values.update({a.index.to_list()[i]: np.nan})\n\n df[\"amount\"].fillna(dict_values, inplace=True)\n df.drop(index=index_lst, axis=0 , inplace=True)\n\n return df", "_____no_output_____" ], [ "def offer_merge(df):\n \n '''\n For each user, map and select rows of a singular offer and merge them into one.\n \n IMPUT: df - (DataFrame) - modified transcription as default dataframe.\n OUTPUT: df - (DataFrame) - rearange user data where each offer is in one row. \n ''' \n offers_received_lst = df['offer id'].unique().tolist()\n total_offers_received = df['offer received'].count()\n temp_df = df.head(0)\n user_id = df['user id'].unique()[0]\n \n \n for offer in offers_received_lst:\n \n #create data frame of an offer\n offer_df = df[df['offer id'] == offer].copy() \n # check if the same offer has been receved more than one time if so, create flags to treat each offer independently.\n if offer_df['offer received'].count() > 1:\n cter = 0\n flag = []\n \n #create list to flag each offer \n for index, row in offer_df.iterrows():\n if not np.isnan(row['offer received']):\n cter+=1\n flag.append(cter) \n else:\n flag.append(cter) \n offer_df['flag'] = flag \n offer_df = offer_df.groupby(['flag' , 'offer id']).mean().reset_index().drop(columns='flag')\n\n else:\n offer_df = offer_df.groupby('offer id').mean().reset_index()\n \n temp_df = temp_df.append(offer_df , sort=False)\n \n temp_df = temp_df.reset_index()\n temp_df = temp_df.drop(columns=['index'])\n \n df = temp_df\n \n return df , user_id", "_____no_output_____" ], [ "def check_completed_offers(df , user_id):\n '''\n For a given user, checks and drop transactions that were not influenced by an offer\n \n IMPUT: df - (DataFrame)\n OUTPUT: df - (DataFrame) - rearange data \n '''\n \n # fill NaN values with 0 for offers that were not completed\n df[['reward' , 'amount']] = df[['reward' , 'amount']].fillna(value = 0)\n\n # add column with the type of offer \n # df = pd.merge(df, map_offer_type, how='left', left_on=['offer id'] , right_on=['id'])\n \n # fill with offer type 4, for transactions that are not related with an offer\n df['offer type'] = df['offer type'].fillna(value = 4)\n df['user id'] = df['user id'].fillna(value = user_id)\n \n # check if an offer was completed before it was viewed or if it was not viewed, if so, drop it (the offer did not influenciate the transaction)\n for row in range(len(df)):\n if df.loc[row]['offer viewed'] > df.loc[row]['offer completed']:\n df = df.drop([row])\n elif np.isnan(df.loc[row]['offer viewed']) and not np.isnan(df.loc[row]['offer completed']):\n df = df.drop([row])\n else:\n pass\n \n return df", "_____no_output_____" ], [ "def get_events(df):\n '''\n for each user rearange transactions influenced by an offer\n and for each type of offer get:\n transaction amount, number of offers recived, number of offers viewed and number of offers completed\n note: it takes some time to process\n \n IMPUT: df - (dtaFrame)\n OUTPUT:\n amount_lst (lst) - list of dictionaries that contains amount spend and type of offer for each user\n offers_lst (lst) - list of dictionaries that contains number of offers recived for each type\n offers_view_lst (lst) - list of dictionaries that contains number of offers viewed for each type\n offers_completed_lst (lst) - list of dictionaries that contains number of offers completed for each type\n \n '''\n \n user_id_lst = profile['id'].tolist()\n amount_lst = []\n offers_lst = []\n offers_view_lst = []\n offers_completed_lst = []\n \n for user in user_id_lst:\n \n user_events = df[df['user id'] == user]\n user_fill_amount = fill_amount(user_events)\n user_events, user_id = offer_merge(user_fill_amount)\n user_events = check_completed_offers (user_events , user_id)\n\n amount = {'user id' : user}\n offers = {'user id' : user}\n offers_view = {'user id' : user}\n offers_completed = {'user id' : user}\n\n amount.update(user_events.groupby('offer type').mean()['amount'].to_dict())\n offers.update(user_events.groupby('offer type').count()['offer id'].to_dict())\n offers_view.update(user_events.groupby('offer type').count()['offer viewed'].to_dict())\n offers_completed.update(user_events.groupby('offer type').count()['offer completed'].to_dict())\n\n amount_lst.append(amount)\n offers_lst.append(offers)\n offers_view_lst.append(offers_view)\n offers_completed_lst.append(offers_completed)\n \n return amount_lst , offers_lst , offers_view_lst , offers_completed_lst ", "_____no_output_____" ], [ "def df_from_lst (lst):\n '''\n create dataframe from a list of dictionaries\n IMPUT: lst (list)\n OUTPUT: DF (dataFrame)\n '''\n \n df = pd.DataFrame(lst).drop(columns=4)\n df.fillna(value = 0 , inplace = True)\n \n return df ", "_____no_output_____" ] ], [ [ "# main", "_____no_output_____" ] ], [ [ "# split gender into dummies columns\nprofile_mod = pd.concat([profile , pd.get_dummies(profile['gender'])],axis=1)\nprofile_mod.drop(['gender' , 'became_member_on'],axis=1, inplace=True)", "_____no_output_____" ], [ "#map offer type \nmap_offer_type, dict_offer_type = offer_type_mapper(portfolio)\n#transform days\nmap_offer_type['duration'] = map_offer_type['duration'] * 24\nmap_offer_type.rename(columns={'id': 'offer id'} , inplace = True)\n\nmap_offer_type", "_____no_output_____" ], [ "# arrange transcript df\narrange_transcript = arrange_events(transcript , map_offer_type)", "_____no_output_____" ], [ "#note: this will take some time to execute, you can grab a coffee ;)\namount_lst , offers_lst , offers_view_lst , offers_completed_lst = get_events(arrange_transcript)", "_____no_output_____" ], [ "# Amount data\namount_type = pd.DataFrame(amount_lst)\namount_type.rename(columns={1: 'type 1', 2: 'type 2', 3: 'type 3' , 4: 'type 4'} , inplace = True)", "_____no_output_____" ], [ "user_offers = df_from_lst (offers_lst)\noffers_viewed = df_from_lst (offers_view_lst)\noffers_completed = df_from_lst (offers_completed_lst)\n\nuser_offers.rename(columns={1: 'offers type 1', 2: 'offers type 2', 3: 'offers type 3'} , inplace = True)\noffers_viewed.rename(columns={1: 'viewed type 1', 2: 'viewed type 2', 3: 'viewed type 3'} , inplace = True)\noffers_completed.rename(columns={1: 'completed type 1', 2: 'completed type 2', 3: 'completed type 3'} , inplace = True)", "_____no_output_____" ], [ "# merge data frames\namount_offer = pd.merge(amount_type, user_offers, how='inner' , on=\"user id\")\namount_offer = pd.merge(amount_offer, offers_viewed, how='inner' , on=\"user id\")\namount_offer = pd.merge(amount_offer, offers_completed, how='inner' , on=\"user id\")", "_____no_output_____" ], [ "amount_offer", "_____no_output_____" ], [ "#split into 3 datasets\namount_type_1 = amount_offer[['user id' , 'type 1' , 'offers type 1' , 'viewed type 1' , 'completed type 1']].copy() # 'dif 1'\namount_type_2 = amount_offer[['user id' , 'type 2' , 'offers type 2' , 'viewed type 2' , 'completed type 2']].copy()\namount_type_3 = amount_offer[['user id' , 'type 3' , 'offers type 3' , 'viewed type 3' , 'completed type 3']].copy()\namount_type_4 = amount_offer[['user id' , 'type 4']].copy()", "_____no_output_____" ], [ "amount_type_1 = pd.merge(profile_mod, amount_type_1, how='inner' , left_on=\"id\" , right_on=\"user id\")\namount_type_1.drop(['id'],axis=1, inplace=True)\n\namount_type_2 = pd.merge(profile_mod, amount_type_2, how='inner' , left_on=\"id\" , right_on=\"user id\")\namount_type_2.drop(['id'],axis=1, inplace=True)\n\namount_type_3 = pd.merge(profile_mod, amount_type_3, how='inner' , left_on=\"id\" , right_on=\"user id\")\namount_type_3.drop(['id'],axis=1, inplace=True)\n\namount_type_4 = pd.merge(profile_mod, amount_type_4, how='inner' , left_on=\"id\" , right_on=\"user id\")\namount_type_4.drop(['id'],axis=1, inplace=True)", "_____no_output_____" ], [ "# clear NaN for each data Set, users that did not recive that type of offer\namount_type_1.dropna(axis=0 , inplace=True)\namount_type_1.drop(amount_type_1[amount_type_1['viewed type 1'] == 0].index , inplace=True)\n\namount_type_2.dropna(axis=0 , inplace=True)\namount_type_2.drop(amount_type_2[amount_type_2['viewed type 2'] == 0].index , inplace=True)\n\namount_type_3.dropna(axis=0 , inplace=True)\namount_type_3.drop(amount_type_3[amount_type_3['viewed type 3'] == 0].index , inplace=True)\n\namount_type_4.dropna(axis=0 , inplace=True)", "_____no_output_____" ], [ "# save dataFrames to CSV files\namount_type_1.to_csv('data/amount_type_1.csv' , index=False)\namount_type_2.to_csv('data/amount_type_2.csv' , index=False)\namount_type_3.to_csv('data/amount_type_3.csv' , index=False)\namount_type_4.to_csv('data/amount_type_4.csv' , index=False)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a424c93bfb3a8da851bd044893e9ea689236343
22,367
ipynb
Jupyter Notebook
02_usecases/archive/02_Object_Detection.ipynb
MarcusFra/workshop
83f16d41f5e10f9c23242066f77a14bb61ac78d7
[ "Apache-2.0" ]
2,327
2020-03-01T09:47:34.000Z
2021-11-25T12:38:42.000Z
02_usecases/archive/02_Object_Detection.ipynb
MarcusFra/workshop
83f16d41f5e10f9c23242066f77a14bb61ac78d7
[ "Apache-2.0" ]
209
2020-03-01T17:14:12.000Z
2021-11-08T20:35:42.000Z
02_usecases/archive/02_Object_Detection.ipynb
MarcusFra/workshop
83f16d41f5e10f9c23242066f77a14bb61ac78d7
[ "Apache-2.0" ]
686
2020-03-03T17:24:51.000Z
2021-11-25T23:39:12.000Z
29.782956
262
0.542809
[ [ [ "# Object and Scene Detection using Amazon Rekognition", "_____no_output_____" ], [ "This notebook provides a walkthrough of [object detection API](https://docs.aws.amazon.com/rekognition/latest/dg/labels.html) in Amazon Rekognition to identify objects.", "_____no_output_____" ] ], [ [ "import boto3\nfrom IPython.display import HTML, display, Image as IImage\nfrom PIL import Image, ImageDraw, ImageFont\nimport time\nimport os", "_____no_output_____" ], [ "import sagemaker\nimport boto3\n\nsagemaker_session = sagemaker.Session()\nrole = sagemaker.get_execution_role()\nbucket = sagemaker_session.default_bucket()\nregion = boto3.Session().region_name", "_____no_output_____" ], [ "rekognition = boto3.client('rekognition')\ns3 = boto3.client('s3')", "_____no_output_____" ], [ "!mkdir -p ./tmp\ntemp_folder = 'tmp/'", "_____no_output_____" ] ], [ [ "# Detect Objects in Image", "_____no_output_____" ] ], [ [ "imageName = 'content-moderation/media/cars.png'", "_____no_output_____" ], [ "display(IImage(url=s3.generate_presigned_url('get_object', Params={'Bucket': bucket, 'Key': imageName})))", "_____no_output_____" ] ], [ [ "# Call Rekognition to Detect Objects in the Image\nhttps://docs.aws.amazon.com/rekognition/latest/dg/API_DetectLabels.html", "_____no_output_____" ] ], [ [ "detectLabelsResponse = rekognition.detect_labels(\n Image={\n 'S3Object': {\n 'Bucket': bucket,\n 'Name': imageName,\n }\n }\n)", "_____no_output_____" ] ], [ [ "# Review the Raw JSON Response from Rekognition\nShow JSON response returned by Rekognition Labels API (Object Detection).\n\nIn the JSON response below, you will see Label, detected instances, confidence score and additional information.", "_____no_output_____" ] ], [ [ "display(detectLabelsResponse)", "_____no_output_____" ] ], [ [ "# Show Bounding Boxes Around Recognized Objects\n", "_____no_output_____" ] ], [ [ " def drawBoundingBoxes (sourceImage, boxes):\n # blue, green, red, grey\n colors = ((255,255,255),(255,255,255),(76,182,252),(52,194,123))\n \n # Download image locally\n imageLocation = temp_folder + os.path.basename(sourceImage)\n s3.download_file(bucket, sourceImage, imageLocation)\n\n # Draws BB on Image\n bbImage = Image.open(imageLocation)\n draw = ImageDraw.Draw(bbImage)\n width, height = bbImage.size\n col = 0\n maxcol = len(colors)\n line= 3\n for box in boxes:\n x1 = int(box[1]['Left'] * width)\n y1 = int(box[1]['Top'] * height)\n x2 = int(box[1]['Left'] * width + box[1]['Width'] * width)\n y2 = int(box[1]['Top'] * height + box[1]['Height'] * height)\n \n draw.text((x1,y1),box[0],colors[col])\n for l in range(line):\n draw.rectangle((x1-l,y1-l,x2+l,y2+l),outline=colors[col])\n col = (col+1)%maxcol\n \n imageFormat = \"PNG\"\n ext = sourceImage.lower()\n if(ext.endswith('jpg') or ext.endswith('jpeg')):\n imageFormat = 'JPEG'\n\n bbImage.save(imageLocation,format=imageFormat)\n\n display(bbImage)", "_____no_output_____" ], [ "boxes = []\nobjects = detectLabelsResponse['Labels']\nfor obj in objects:\n for einstance in obj[\"Instances\"]:\n boxes.append ((obj['Name'], einstance['BoundingBox']))\n \ndrawBoundingBoxes(imageName, boxes)\n", "_____no_output_____" ] ], [ [ "# Display List of Detected Objects", "_____no_output_____" ] ], [ [ "flaggedObjects = [\"Car\"]\n\nfor label in detectLabelsResponse[\"Labels\"]:\n if(label[\"Name\"] in flaggedObjects):\n print(\"Detected object:\")\n print(\"- {} (Confidence: {})\".format(label[\"Name\"], label[\"Confidence\"]))\n print(\" - Parents: {}\".format(label[\"Parents\"]))", "_____no_output_____" ] ], [ [ "# Recognize Objects in Video\n Object recognition in video is an async operation. \nhttps://docs.aws.amazon.com/rekognition/latest/dg/API_StartLabelDetection.html. \n\n- First we start a label detection job which returns a Job Id.\n- We can then call `get_label_detection` to get the job status and after job is complete, we can get object metadata.\n- In production use cases, you would usually use StepFunction or SNS topic to get notified when job is complete.", "_____no_output_____" ] ], [ [ "videoName = 'content-moderation/media/GrandTour720.mp4'\n\nstrDetail = 'Objects detected in video<br>=======================================<br>'\nstrOverall = 'Objects in the overall video:<br>=======================================<br>'", "_____no_output_____" ], [ "# Show video in a player\n\ns3VideoUrl = s3.generate_presigned_url('get_object', Params={'Bucket': bucket, 'Key': videoName})\n\nvideoTag = \"<video controls='controls' autoplay width='640' height='360' name='Video' src='{0}'></video>\".format(s3VideoUrl)\n\nvideoui = \"<table><tr><td style='vertical-align: top'>{}</td></tr></table>\".format(videoTag)\n\ndisplay(HTML(videoui))", "_____no_output_____" ] ], [ [ "# Call Rekognition to Start a Job for Object Detection\n\n### Additional (Optional) Request Attributes\n\nClientRequestToken:\nhttps://docs.aws.amazon.com/rekognition/latest/dg/API_StartLabelDetection.html#rekognition-StartLabelDetection-request-ClientRequestToken\n\nJobTag:\nhttps://docs.aws.amazon.com/rekognition/latest/dg/API_StartLabelDetection.html#rekognition-StartLabelDetection-request-JobTag\n\nMinConfidence:\nhttps://docs.aws.amazon.com/rekognition/latest/dg/API_StartLabelDetection.html#rekognition-StartLabelDetection-request-MinConfidence\n\nNotificationChannel:\nhttps://docs.aws.amazon.com/rekognition/latest/dg/API_StartLabelDetection.html#rekognition-StartLabelDetection-request-NotificationChannel\n", "_____no_output_____" ] ], [ [ "# Start video label recognition job\nstartLabelDetection = rekognition.start_label_detection(\n Video={\n 'S3Object': {\n 'Bucket': bucket,\n 'Name': videoName,\n }\n },\n)\n\nlabelsJobId = startLabelDetection['JobId']\ndisplay(\"Job Id: {0}\".format(labelsJobId))", "_____no_output_____" ] ], [ [ "# Wait for Object Detection Job to Complete", "_____no_output_____" ] ], [ [ "# Wait for object detection job to complete\n# In production use cases, you would usually use StepFunction or SNS topic to get notified when job is complete.\ngetObjectDetection = rekognition.get_label_detection(\n JobId=labelsJobId,\n SortBy='TIMESTAMP'\n)\n\nwhile(getObjectDetection['JobStatus'] == 'IN_PROGRESS'):\n time.sleep(5)\n print('.', end='')\n \n getObjectDetection = rekognition.get_label_detection(\n JobId=labelsJobId,\n SortBy='TIMESTAMP')\n \ndisplay(getObjectDetection['JobStatus'])", "_____no_output_____" ] ], [ [ "# Review Raw JSON Response from Rekognition\n* Show JSON response returned by Rekognition Object Detection API.\n* In the JSON response below, you will see list of detected objects and activities.\n* For each detected object, you will see the `Timestamp` of the frame within the video.\n", "_____no_output_____" ] ], [ [ "display(getObjectDetection)", "_____no_output_____" ] ], [ [ "# Display Recognized Objects in the Video\nDisplay timestamps and objects detected at that time.", "_____no_output_____" ] ], [ [ "flaggedObjectsInVideo = [\"Car\"]\n\ntheObjects = {}\n\n# Objects detected in each frame\nfor obj in getObjectDetection['Labels']:\n ts = obj [\"Timestamp\"]\n cconfidence = obj['Label'][\"Confidence\"]\n oname = obj['Label'][\"Name\"]\n \n if(oname in flaggedObjectsInVideo):\n print(\"Found flagged object at {} ms: {} (Confidence: {})\".format(ts, oname, round(cconfidence,2)))\n \n strDetail = strDetail + \"At {} ms: {} (Confidence: {})<br>\".format(ts, oname, round(cconfidence,2))\n if oname in theObjects:\n cojb = theObjects[oname]\n theObjects[oname] = {\"Name\" : oname, \"Count\": 1+cojb[\"Count\"]}\n else:\n theObjects[oname] = {\"Name\" : oname, \"Count\": 1}\n\n# Unique objects detected in video\nfor theObject in theObjects:\n strOverall = strOverall + \"Name: {}, Count: {}<br>\".format(theObject, theObjects[theObject][\"Count\"])\n\n# Display results\ndisplay(HTML(strOverall))", "_____no_output_____" ], [ "listui = \"<table><tr><td style='vertical-align: top'>{}</td></tr></table>\".format(strDetail)\ndisplay(HTML(listui))", "_____no_output_____" ] ], [ [ "# Worker Safety with Amazon Rekognition\nYou can use Amazon Rekognition to detect if certain objects are not present in the image or video. For example you can perform worker safety audit by revieweing images/video of a construction site and detecting if there are any workers without safety hat.", "_____no_output_____" ] ], [ [ "imageName = \"content-moderation/media/hat-detection.png\"", "_____no_output_____" ], [ "display(IImage(url=s3.generate_presigned_url('get_object', Params={'Bucket': bucket, 'Key': imageName})))", "_____no_output_____" ] ], [ [ "# Call Amazon Rekognition to Detect Objects in the Image", "_____no_output_____" ] ], [ [ "detectLabelsResponse = rekognition.detect_labels(\n Image={\n 'S3Object': {\n 'Bucket': bucket,\n 'Name': imageName,\n }\n }\n)", "_____no_output_____" ] ], [ [ "# Display Rekognition Response", "_____no_output_____" ] ], [ [ "display(detectLabelsResponse)", "_____no_output_____" ] ], [ [ "# Show Bounding Boxes Around Recognized Objects\n", "_____no_output_____" ] ], [ [ " def drawBoundingBoxes (sourceImage, boxes):\n # blue, green, red, grey\n colors = ((255,255,255),(255,255,255),(76,182,252),(52,194,123))\n \n # Download image locally\n imageLocation = temp_folder + os.path.basename(sourceImage)\n s3.download_file(bucket, sourceImage, imageLocation)\n\n # Draws BB on Image\n bbImage = Image.open(imageLocation)\n draw = ImageDraw.Draw(bbImage)\n width, height = bbImage.size\n col = 0\n maxcol = len(colors)\n line= 3\n for box in boxes:\n x1 = int(box[1]['Left'] * width)\n y1 = int(box[1]['Top'] * height)\n x2 = int(box[1]['Left'] * width + box[1]['Width'] * width)\n y2 = int(box[1]['Top'] * height + box[1]['Height'] * height)\n \n draw.text((x1,y1),box[0],colors[col])\n for l in range(line):\n draw.rectangle((x1-l,y1-l,x2+l,y2+l),outline=colors[col])\n col = (col+1)%maxcol\n \n imageFormat = \"PNG\"\n ext = sourceImage.lower()\n if(ext.endswith('jpg') or ext.endswith('jpeg')):\n imageFormat = 'JPEG'\n\n bbImage.save(imageLocation,format=imageFormat)\n\n display(bbImage)", "_____no_output_____" ], [ "boxes = []\nobjects = detectLabelsResponse['Labels']\nfor obj in objects:\n for einstance in obj[\"Instances\"]:\n boxes.append ((obj['Name'], einstance['BoundingBox']))\n \ndrawBoundingBoxes(imageName, boxes)\n", "_____no_output_____" ], [ "def matchPersonsAndHats(personsList, hardhatsList):\n\n persons = []\n hardhats = []\n personsWithHats = []\n\n for person in personsList:\n persons.append(person)\n for hardhat in hardhatsList:\n hardhats.append(hardhat)\n\n h = 0\n matched = 0\n totalHats = len(hardhats)\n while(h < totalHats):\n hardhat = hardhats[h-matched]\n totalPersons = len(persons)\n p = 0\n while(p < totalPersons):\n person = persons[p]\n if(not (hardhat['BoundingBoxCoordinates']['x2'] < person['BoundingBoxCoordinates']['x1']\n or hardhat['BoundingBoxCoordinates']['x1'] > person['BoundingBoxCoordinates']['x2']\n or hardhat['BoundingBoxCoordinates']['y4'] < person['BoundingBoxCoordinates']['y1']\n or hardhat['BoundingBoxCoordinates']['y1'] > person['BoundingBoxCoordinates']['y4']\n )):\n\n personsWithHats.append({'Person' : person, 'Hardhat' : hardhat})\n\n del persons[p]\n del hardhats[h - matched]\n\n matched = matched + 1\n\n break\n p = p + 1\n h = h + 1\n\n return (personsWithHats, persons, hardhats)\n\ndef getBoundingBoxCoordinates(boundingBox, imageWidth, imageHeight):\n x1 = 0\n y1 = 0\n x2 = 0\n y2 = 0\n x3 = 0\n y3 = 0\n x4 = 0\n y4 = 0\n\n boxWidth = boundingBox['Width']*imageWidth\n boxHeight = boundingBox['Height']*imageHeight\n\n x1 = boundingBox['Left']*imageWidth\n y1 = boundingBox['Top']*imageWidth\n\n x2 = x1 + boxWidth\n y2 = y1\n\n x3 = x2\n y3 = y1 + boxHeight\n\n x4 = x1\n y4 = y3\n\n return({'x1': x1, 'y1' : y1, 'x2' : x2, 'y2' : y2, 'x3' : x3, 'y3' : y3, 'x4' : x4, 'y4' : y4})\n\ndef getPersonsAndHardhats(labelsResponse, imageWidth, imageHeight):\n\n persons = []\n hardhats = []\n\n for label in labelsResponse['Labels']:\n if label['Name'] == 'Person' and 'Instances' in label:\n for person in label['Instances']:\n persons.append({'BoundingBox' : person['BoundingBox'], 'BoundingBoxCoordinates' : getBoundingBoxCoordinates(person['BoundingBox'], imageWidth, imageHeight), 'Confidence' : person['Confidence']})\n elif ((label['Name'] == 'Hardhat' or label['Name'] == 'Helmet') and 'Instances' in label):\n for hardhat in label['Instances']:\n hardhats.append({'BoundingBox' : hardhat['BoundingBox'], 'BoundingBoxCoordinates' : getBoundingBoxCoordinates(hardhat['BoundingBox'], imageWidth, imageHeight), 'Confidence' : hardhat['Confidence']})\n\n return (persons, hardhats)", "_____no_output_____" ], [ "s3Resource = boto3.resource('s3')\nbucket = s3Resource.Bucket(bucket)\niojb = bucket.Object(imageName)\nresponse = iojb.get()\nfile_stream = response['Body']\nim = Image.open(file_stream)\nimageWidth, imageHeight = im.size", "_____no_output_____" ], [ "persons, hardhats = getPersonsAndHardhats(detectLabelsResponse, imageWidth, imageHeight)\n\npersonsWithHats, personsWithoutHats, hatsWihoutPerson = matchPersonsAndHats(persons, hardhats)\n\npersonsWithHatsCount = len(personsWithHats)\npersonsWithoutHatsCount = len(personsWithoutHats)\nhatsWihoutPersonCount = len(hatsWihoutPerson)\n\noutputMessage = \"Person(s): {}\".format(personsWithHatsCount+personsWithoutHatsCount)\noutputMessage = outputMessage + \"\\nPerson(s) With Safety Hat: {}\\nPerson(s) Without Safety Hat: {}\".format(personsWithHatsCount, personsWithoutHatsCount)\nprint(outputMessage)", "_____no_output_____" ] ], [ [ "# Congratulations!\nYou have successfully used Amazon Rekognition to identify specific objects in images and videos.", "_____no_output_____" ], [ "# References\n- https://docs.aws.amazon.com/rekognition/latest/dg/API_DetectLabels.html\n- https://docs.aws.amazon.com/rekognition/latest/dg/API_StartLabelDetection.html\n- https://docs.aws.amazon.com/rekognition/latest/dg/API_GetLabelDetection.html", "_____no_output_____" ], [ "# Release Resources", "_____no_output_____" ] ], [ [ "%%html\n\n<p><b>Shutting down your kernel for this notebook to release resources.</b></p>\n<button class=\"sm-command-button\" data-commandlinker-command=\"kernelmenu:shutdown\" style=\"display:none;\">Shutdown Kernel</button>\n \n<script>\ntry {\n els = document.getElementsByClassName(\"sm-command-button\");\n els[0].click();\n}\ncatch(err) {\n // NoOp\n} \n</script>", "_____no_output_____" ], [ "%%javascript\n\ntry {\n Jupyter.notebook.save_checkpoint();\n Jupyter.notebook.session.delete();\n}\ncatch(err) {\n // NoOp\n}", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ] ]
4a425dbda9c0abbef7fab868432078191a576517
435,158
ipynb
Jupyter Notebook
Week5/Practical5.ipynb
MARCOOTTO-GIS/egm722
825f5925a452d645795f51830cc60bd0ba864cc4
[ "CC-BY-4.0" ]
null
null
null
Week5/Practical5.ipynb
MARCOOTTO-GIS/egm722
825f5925a452d645795f51830cc60bd0ba864cc4
[ "CC-BY-4.0" ]
null
null
null
Week5/Practical5.ipynb
MARCOOTTO-GIS/egm722
825f5925a452d645795f51830cc60bd0ba864cc4
[ "CC-BY-4.0" ]
null
null
null
90.771381
74,303
0.731741
[ [ [ "# EGM722 - Week 5 Practical: Vector and raster operations using python\n\n## Overview\n\nUp to now, we have worked with either vector data or raster data, but we haven't really used them together. In this week's practical, we'll learn how we can combine these two data types, and see some examples of different analyses, such as zonal statistics or sampling raster data, that we can automate using python.\n\n## Objectives\n- learn how to use `rasterstats` to perform zonal statistics\n- learn how to handle exceptions using try...except\n- rasterize polygon data using `rasterio`\n- learn how to mask and select (index) rasters using vector data\n- see additional plotting examples using matplotlib\n\n## Data provided\n\nIn the data\\_files folder, you should have the following:\n- LCM2015_Aggregate_100m.tif\n- NI_DEM.tif\n\n\n## 1. Getting started\n\nIn this practical, we'll look at a number of different GIS tasks related to working with both raster and vector data in python, as well as a few different python and programming concepts. To get started, run the cell below.", "_____no_output_____" ] ], [ [ "%matplotlib notebook\n\nimport numpy as np\nimport rasterio as rio\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\nfrom rasterstats import zonal_stats\n\nplt.rcParams.update({'font.size': 22}) # update the font size for our plots to be size 22", "_____no_output_____" ] ], [ [ "## 2. Zonal statistics\nIn GIS, [_zonal statistics_](https://pro.arcgis.com/en/pro-app/latest/tool-reference/spatial-analyst/how-zonal-statistics-works.htm) is a process whereby you calculate statistics for the pixels of a raster in different groups, or zones, defined by properties in another dataset. In this example, we're going to use the Northern Ireland County border dataset from Week 2, along with a re-classified version of the Northern Ireland [Land Cover Map](https://catalogue.ceh.ac.uk/documents/47f053a0-e34f-4534-a843-76f0a0998a2f) 2015[<sup id=\"fn1-back\">1</sup>](#fn1 \"footnote 1\").\n\nThe Land Cover Map tells, for each pixel, what type of land cover is associated with a location - that is, whether it's woodland (and what kind of woodland), grassland, urban or built-up areas, and so on. For our re-classified version of the dataset, we're working with the aggregate class data, re-sampled to 100m resolution from the original 25m resolution.\n\nThe raster data type is _unsigned integer_ with a _bitdepth_ of 8 bits - that is, it has a range of possible values from 0 to 255. Even though it has this range of possible values, we only use 10 (11) of them:\n\n| Raster value | Aggregate class name |\n| :------------|:---------------------------|\n| 0 | No Data |\n| 1 | Broadleaf woodland |\n| 2 | Coniferous woodland |\n| 3 | Arable |\n| 4 | Improved grassland |\n| 5 | Semi-natural grassland |\n| 6 | Mountain, heath, bog |\n| 7 | Saltwater |\n| 8 | Freshwater |\n| 9 | Coastal |\n| 10 | Built-up areas and gardens |\n\nIn this part of the practical, we'll try to work out the percentage of the entire country that is covered by each of these different landcovers, as well as each of the different counties. To start, we'll load the `LCM2015_Aggregate_100m.tif` raster, as well as the counties shapefile from Week 2:", "_____no_output_____" ] ], [ [ "# open the land cover raster and read the data\nwith rio.open('data_files/LCM2015_Aggregate_100m.tif') as dataset:\n xmin, ymin, xmax, ymax = dataset.bounds \n crs = dataset.crs\n landcover = dataset.read(1)\n affine_tfm = dataset.transform\n\n# now, load the county dataset from the week 2 folder\ncounties = gpd.read_file('../Week2/data_files/Counties.shp').to_crs(crs)", "_____no_output_____" ] ], [ [ "Next, we'll define a function that takes an array, and returns a __dict__ object containing the count (number of pixels) for each of the unique values in the array:\n\n```python\ndef count_unique(array, nodata=0):\n '''\n Count the unique elements of an array.\n\n :param array: Input array\n :param nodata: nodata value to ignore in the counting\n \n :returns count_dict: a dictionary of unique values and counts\n '''\n count_dict = {}\n for val in np.unique(array):\n if val == nodata:\n continue\n count_dict[str(val)] = np.count_nonzero(array == val)\n return count_dict\n```\n\nHere, we have two input parameters: the first, `array`, is our array (or raster data). The next, `nodata`, is the value of the array that we should ignore. We then define an empty __dict__ (`count_dict = {}`). \n\nWith [`numpy.unique()`](https://numpy.org/doc/stable/reference/generated/numpy.unique.html), we get an array containing the unique values of the input array. Note that this works for data like this raster, where we have a limited number of pre-defined values. For something like a digital elevation model, which represents continuous floating-point values, we wouldn't want to use this approach to bin the data.\n\nNext, for each of the different unique values `val`, we find all of the locations in `array` that have that value (`array == val`). Note that this is actually a boolean array, with values of either `True` where `array == val`, and `False` where `array != val`. [`numpy.count_nonzero()`](https://numpy.org/doc/stable/reference/generated/numpy.count_nonzero.html) the counts the number of non-zero (in this case, `True`) values in the array - that is, this:\n\n```python\nnp.count_nonzero(array == val)\n```\n\ntells us the number of pixels in `array` that are equal to `val`. We then assign this to our dictionary with a key that is a __str__ representation of the value, before returning our `count_dict` variable at the end of the function.\n\nRun the cell below to define the function and run it on our `landcover` raster.", "_____no_output_____" ] ], [ [ "def count_unique(array, nodata=0):\n '''\n Count the unique elements of an array.\n\n :param array: Input array\n :param nodata: nodata value to ignore in the counting\n \n :returns count_dict: a dictionary of unique values and counts\n '''\n count_dict = {}\n for val in np.unique(array):\n if val == nodata:\n continue\n count_dict[str(val)] = np.count_nonzero(array == val)\n return count_dict\n\nunique_landcover = count_unique(landcover)\nprint(unique_landcover)", "{'1': 40311, '2': 73386, '3': 90736, '4': 834512, '5': 75980, '6': 179942, '7': 16991, '8': 60877, '9': 14085, '10': 56598}\n" ] ], [ [ "So this provides us with a __dict__ object with keys corresponding to each of the unique values (1-10).\n\n<span style=\"color:#009fdf;font-size:1.1em;font-weight:bold\">Can you work out the percentage area of Northern Ireland that is covered by each of the 10 landcover classes?</span>", "_____no_output_____" ], [ "In the following cell, we use [`rasterstats.zonal_stats()`](https://pythonhosted.org/rasterstats/manual.html#zonal-statistics) with our `counties` and `landcover` datasets to do the same exercise as above (counting unique pixel values). Rather than counting the pixels in the entire raster, however, we want to count the number of pixels with each land cover value that fall within a specific area defined by the features in the `counties` dataset:", "_____no_output_____" ] ], [ [ "county_stats = zonal_stats(counties, landcover, affine=affine_tfm, categorical=True, nodata=0)\n\nprint(county_stats[0])", "{1.0: 7221, 2.0: 20069, 3.0: 6178, 4.0: 204078, 5.0: 17176, 6.0: 54540, 8.0: 11464, 9.0: 1, 10.0: 5649}\n" ] ], [ [ "## 3. The zip built-in\n\nThis isn't a very readable result, though. If we want to interpret the results for each county, we have to know what land cover name corresponds to each of the values in the raster. One way that we could do this is by writing a function that re-names each of the keys in the __dict__. This example shows one way we could do this: the function takes the original __dict__ object (_dict_in_), as well as a list of the 'old' keys (_old_names_), and the corresponding 'new' keys (_new_names_).", "_____no_output_____" ] ], [ [ "def rename_dict(dict_in, old_names, new_names):\n '''\n Rename the keys of a dictionary, given a list of old and new keynames\n\n :param dict_in: the dictionary to rename\n :param old_names: a list of old keys\n :param new_names: a list of new key names\n \n :returns dict_out: a dictionary with the keys re-named\n '''\n dict_out = {}\n for new, old in zip(new_names, old_names):\n dict_out[new] = dict_in[old]\n\n return dict_out", "_____no_output_____" ] ], [ [ "For this function, we're also making use of the built-in function `zip()` ([documentation](https://docs.python.org/3.8/library/functions.html#zip)). In Python 3, `zip()` returns an __iterator__ object that combines elements from each of the iterable objects passed as arguments. As an example:", "_____no_output_____" ] ], [ [ "x = [1, 2, 3, 4]\ny = ['a', 'b', 'c', 'd']\n\nlist(zip(x, y))", "_____no_output_____" ] ], [ [ "So, with `zip(x, y)`, each of the elements of `x` is paired with the corresponding element from `y`. If `x` and `y` are different lengths, `zip(x, y)` will only use up to the shorter of the two:", "_____no_output_____" ] ], [ [ "x = [1, 2, 3]\n\nlist(zip(x, y))", "_____no_output_____" ] ], [ [ "Let's see what happens when we run our function `rename_dict()` using the stats for our first county (County Tyrone - remember that the output from zonal_stats will have correspond to the rows of our input vector data):", "_____no_output_____" ] ], [ [ "old_names = [float(i) for i in range(1, 11)]\nnew_names = ['Broadleaf woodland', 'Coniferous woodland', 'Arable', 'Improved grassland',\n 'Semi-natural grassland', 'Mountain, heath, bog', 'Saltwater', 'Freshwater',\n 'Coastal', 'Built-up areas and gardens']\n\nrename_dict(county_stats[0], old_names, new_names)", "_____no_output_____" ] ], [ [ "Have a look at the _keys_ for `county_stats` - you should notice that there are no pixels corresponding to landcover class 7 (Saltwater), which makes sense given that County Tyrone is an inland county:", "_____no_output_____" ] ], [ [ "print(county_stats[0].keys())\nprint(county_stats[0])", "dict_keys([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 9.0, 10.0])\n{1.0: 7221, 2.0: 20069, 3.0: 6178, 4.0: 204078, 5.0: 17176, 6.0: 54540, 8.0: 11464, 9.0: 1, 10.0: 5649}\n" ] ], [ [ "To run this for each of our counties, we could run some checks to make sure that we only try to access keys that exist in `dict_in`. For example, we could add an `if` statement to the function:\n\n```python\ndef rename_dict(dict_in, old_names, new_names):\n dict_out = {}\n for new, old in zip(new_names, old_names)\n if old in dict_in.keys():\n dict_out[new] = dict_in[old]\n else:\n continue\n return dict_out\n```\n\nBut, this is also an example of an exception that isn't necessarily something that requires us to stop executing our program. We don't expect each landcover type to be present in each county, so we don't want our program to stop as soon as it finds out that one of the counties doesn't have a particular landcover type.\n\n## 4. Handling Exceptions with try ... except\nPython provides a way to handle these kind of exceptions: the [try...except](https://realpython.com/python-exceptions/#the-try-and-except-block-handling-exceptions) block:\n```python\n\ntry:\n # run some code\nexcept:\n # run this if the try block causes an exception\n```\n\nIn general, it's [not recommended](https://www.python.org/dev/peps/pep-0008/#programming-recommendations) to just have a bare `except:` clause, as this will make it harder to interrupt a program. In our specific case, we only want the interpreter to ignore `KeyError` exceptions - if there are other problems, we still need to know about those:", "_____no_output_____" ] ], [ [ "def rename_dict(dict_in, old_names, new_names):\n '''\n Rename the keys of a dictionary, given a list of old and new keynames\n\n :param dict_in: the dictionary to rename\n :param old_names: a list of old keys\n :param new_names: a list of new key names\n \n :returns dict_out: a dictionary with the keys re-named\n '''\n dict_out = {}\n for new, old in zip(new_names, old_names):\n try:\n dict_out[new] = dict_in[old]\n except KeyError:\n continue\n return dict_out", "_____no_output_____" ] ], [ [ "Notice how for each pair of names, we try to assign the value corresponding to `old` in `dict_in`. If `old` is not a valid key for `dict_in`, we just move onto the next one. Now, let's run this new function on `county_stats[0]` again:", "_____no_output_____" ] ], [ [ "rename_dict(county_stats[0], old_names, new_names)", "_____no_output_____" ] ], [ [ "We'll do one last thing before moving on here. Just like with the __dict__ outputs of `zonal_stats()`, the __list__ of __dict__ objects isn't very readable. Let's create a new __dict__ object that takes the county names as keys, and returns the re-named __dict__ objects for each:", "_____no_output_____" ] ], [ [ "renamed_list = [rename_dict(d, old_names, new_names) for d in county_stats] # create a list of renamed dict objects\nnice_names = [n.title() for n in counties.CountyName]\nstats_dict = dict(zip(nice_names, renamed_list))\n\nprint(stats_dict['Tyrone'])\nprint(stats_dict['Antrim'])", "{'Broadleaf woodland': 7221, 'Coniferous woodland': 20069, 'Arable': 6178, 'Improved grassland': 204078, 'Semi-natural grassland': 17176, 'Mountain, heath, bog': 54540, 'Freshwater': 11464, 'Coastal': 1, 'Built-up areas and gardens': 5649}\n{'Broadleaf woodland': 6874, 'Coniferous woodland': 12401, 'Arable': 12766, 'Improved grassland': 173631, 'Semi-natural grassland': 15442, 'Mountain, heath, bog': 47318, 'Saltwater': 168, 'Freshwater': 20710, 'Coastal': 1603, 'Built-up areas and gardens': 18744}\n" ] ], [ [ "Depending on how we're using the data, it might be easier to keep the output of `zonal_stats()` as-is, rather than using these long, complicated keys. For visualization and readability purposes, though, it helps to be able to easily and quickly understand what the outputs actually represent.\n\n<span style=\"color:#009fdf;font-size:1.1em;font-weight:bold\">What is the total area (in km<sup>2</sup>) covered by \"Mountain, heath, bog\" in County Down?</span>", "_____no_output_____" ], [ "## 5. Rasterizing vector data using rasterio\n`rasterstats` provides a nice tool for quickly and easily extracting zonal statistics from a raster using vector data. Sometimes, though, we might want to _rasterize_ our vector data - for example, in order to mask our raster data, or to be able to select pixels. To do this, we can use the [`rasterio.features`](https://rasterio.readthedocs.io/en/latest/api/rasterio.features.html) module:", "_____no_output_____" ] ], [ [ "import rasterio.features # we have imported rasterio as rio, so this will be rio.features (and rasterio.features)", "_____no_output_____" ] ], [ [ "`rasterio.features`has a number of different methods, but the one we are interested in here is `rasterize()`:\n\n```\nrio.features.rasterize(\n shapes,\n out_shape=None,\n fill=0,\n out=None,\n transform=Affine(1.0, 0.0, 0.0,\n 0.0, 1.0, 0.0),\n all_touched=False,\n merge_alg=<MergeAlg.replace: 'REPLACE'>,\n default_value=1,\n dtype=None,\n)\nDocstring:\nReturn an image array with input geometries burned in.\n\nWarnings will be raised for any invalid or empty geometries, and\nan exception will be raised if there are no valid shapes\nto rasterize.\n\nParameters\n----------\nshapes : iterable of (`geometry`, `value`) pairs or iterable over\n geometries. The `geometry` can either be an object that\n implements the geo interface or GeoJSON-like object. If no\n `value` is provided the `default_value` will be used. If `value`\n is `None` the `fill` value will be used.\nout_shape : tuple or list with 2 integers\n Shape of output numpy ndarray.\nfill : int or float, optional\n Used as fill value for all areas not covered by input\n geometries.\n...\n```\n\nHere, we pass an __iterable__ (__list__, __tuple__, __array__, etc.) that contains (__geometry__, __value__) pairs. __value__ determines the pixel values in the output raster that the __geometry__ overlaps. If we don't provide a __value__, it takes the `default_value` or the `fill` value.\n\nSo, to create a rasterized version of our county outlines, we could do the following:\n\n```python\nshapes = list(zip(counties['geometry'], counties['COUNTY_ID']))\n\ncounty_mask = rio.features.rasterize(shapes=shapes, fill=0, \n out_shape=landcover.shape, transform=affine_tfm)\n```\n\nThe first line uses `zip()` and `list()` to create a list of (__geometry__, __value__) pairs, and the second line actually creates the rasterized array, `county_mask`. Note that in the call to `rasterio.features.rasterize()`, we have to set the output shape (`out_shape`) of the raster, as well as the `transform` - that is, how we go from pixel coordinates in the array to real-world coordinates. Since we want to use this rasterized output with our `landcover`, we use the `shape` of the `landcover` raster, as well as its `transform` (`affine_tfm`) - that way, the outputs will line up as we expect. Run the cell below to see what the output looks like:", "_____no_output_____" ] ], [ [ "shapes = list(zip(counties['geometry'], counties['COUNTY_ID']))\n\ncounty_mask = rio.features.rasterize(shapes=shapes, fill=0,\n out_shape=landcover.shape, transform=affine_tfm)\nplt.figure()\nplt.imshow(county_mask) # visualize the rasterized output", "_____no_output_____" ] ], [ [ "As you can see, this provides us with an __array__ whose values correspond to the `COUNTY_ID` of the county feature at that location (check the `counties` __GeoDataFrame__ again to see which county corresponds to which ID). In the next section, we'll see how we can use arrays like this to investigate our data further.\n\n## 6. Masking and indexing rasters\nSo far, we've seen how we can index an array (or a list, a tuple, ...) using simple indexing (e.g., `myList[0]`) or _slicing_ (e.g., `myList[2:4]`). `numpy` arrays, however, can [actually be indexed](https://numpy.org/doc/stable/reference/arrays.indexing.html) using other arrays of type `bool` (the elements of the array are boolean (`True`/`False`) values). In this section, we'll see how we can use this, along with our rasterized vectors, to select and investigate values from a raster using boolean indexing.\n\nTo start, we'll open our dem raster - note that this raster has the same georeferencing information as our landcover raster, so we don't have to load all of that information, just the raster band:", "_____no_output_____" ] ], [ [ "with rio.open('data_files/NI_DEM.tif') as dataset:\n dem = dataset.read(1)", "_____no_output_____" ] ], [ [ "From the previous section, we have an array with values corresponding each of the counties of Northern Ireland. Using `numpy`, we can use this array to select elements of other rasters by creating a _mask_, or a boolean array - that is, an array with values of `True` and `False`. For example, we can create a mask corresponding to County Antrim (`COUNTY_ID=1`) like this:\n\n```python\ncounty_antrim = county_mask == 1\n```\nLet's see what this mask looks like:", "_____no_output_____" ] ], [ [ "county_antrim = county_mask == 1\n\nplt.figure()\nplt.imshow(county_antrim)", "_____no_output_____" ] ], [ [ "We can also combine expressions using functions like [`np.logical_and()`](https://numpy.org/doc/stable/reference/generated/numpy.logical_and.html) or [`np.logical_or()`](https://numpy.org/doc/stable/reference/generated/numpy.logical_or.html). If we wanted to create a mask corresponding to both County Antrim and County Down, we could do the following:", "_____no_output_____" ] ], [ [ "antrim_and_down = np.logical_or(county_mask == 3, county_mask == 1)\n\nplt.figure()\nplt.imshow(antrim_and_down)", "_____no_output_____" ] ], [ [ "We could then find the mean elevation of these two counties by indexing, or selecting, pixels from `dem` using our mask:", "_____no_output_____" ] ], [ [ "ad_elevation = dem[antrim_and_down]\nprint('Mean elevation: {:.2f} m'.format(ad_elevation.mean()))", "Mean elevation: 112.61 m\n" ] ], [ [ "Now let's say we wanted to investigate the two types of woodland we have, broadleaf and conifer. One thing we might want to look at is the area-elevation distribution of each type. To do this, we first have to select the pixels from the DEM that correspond to the broadleaf woodlands, and all of the pixels corresponding to conifer woodlands:", "_____no_output_____" ] ], [ [ "broad_els = dem[landcover == 1] # get all dem values where landcover = 1\nconif_els = dem[landcover == 2] # get all dem values where landcover = 2", "_____no_output_____" ] ], [ [ "Now, we have two different arrays, `broad_els` and `conif_els`, each corresponding to the DEM pixel values of each landcover type. We can plot a histogram of these arrays using [`plt.hist()`](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.hist.html), but this will only tell us the number of pixels - for the area-elevation distribution, we have to convert the pixel counts into areas by multiplying with the pixel area (100 m x 100 m).\n\nFirst, though, we can use `numpy.histogram()`, along with an array representing our elevation bins, to produce a count of the number of pixels with an elevation that falls within each bin. Let's try elevations ranging from 0 to 600 meters, with a spacing of 5 meters:", "_____no_output_____" ] ], [ [ "el_bins = np.arange(0, 600, 5) # create an array of values ranging from 0 to 600, spaced by 5.\n\nbroad_count, _ = np.histogram(broad_els, el_bins) # bin the broadleaf elevations using the elevation bins\nconif_count, _ = np.histogram(conif_els, el_bins) # bin the conifer elevations using the elevation bins\n\nbroad_area = broad_count * 100 * 100 # convert the pixel counts to an area by multipling by the pixel size in x, y\nconif_area = conif_count * 100 * 100", "_____no_output_____" ] ], [ [ "Finally, we can plot the area-elevation distribution for each land cover type using [`matplotlib.pyplot.bar()`](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.bar.html):", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1, 1, figsize=(8, 8)) # create a new figure and axes object\n\n# plot the area-elevation distributions using matplotlib.pyplot.bar(), converting from sq m to sq km:\n_ = ax.bar(el_bins[:-1], broad_area / 1e6, align='edge', width=5, alpha=0.8, label='Broadleaf Woodland')\n_ = ax.bar(el_bins[:-1], conif_area / 1e6, align='edge', width=5, alpha=0.8, label='Conifer Woodland')\n\nax.set_xlim(0, 550) # set the x limits of the plot\nax.set_ylim(0, 30) # set the y limits of the plot\n\nax.set_xlabel('Elevation (m)') # add an x label\nax.set_ylabel('Area (km$^2$)') # add a y label\nax.legend() # add a legend", "_____no_output_____" ] ], [ [ "From this, we can clearly see that Conifer woodlands tend to be found at much higher elevations than Broadleaf woodlands, and at a much larger range of elevations (0-500 m, compared to 0-250 m or so). With these samples (`broad_els`, `conif_els`), we can also calculate statistics for each of these samples using `numpy` functions such as `np.mean()`, `np.median()`, `np.std()`, and so on.\n\n<span style=\"color:#009fdf;font-size:1.1em;font-weight:bold\">Of the 10 different landcover types shown here, which one has the highest mean elevation? What about the largest spread in elevation values?</span>", "_____no_output_____" ], [ "## Next steps\n\nThat's all for this practical. In lieu of an an additional exercise this week, spend some time working on your project - are there concepts or examples from this practical that you can incorporate into your project?\n\n### Footnotes\n[<sup id=\"fn1\">1</sup>](#fn1-back)Rowland, C.S.; Morton, R.D.; Carrasco, L.; McShane, G.; O'Neil, A.W.; Wood, C.M. (2017). Land Cover Map 2015 (25m raster, N. Ireland). NERC Environmental Information Data Centre. [doi:10.5285/47f053a0-e34f-4534-a843-76f0a0998a2f](https://doi.org/10.5285/47f053a0-e34f-4534-a843-76f0a0998a2f)</span>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4a4263a26f582d43a5a30c8f002dbb0ecf453935
894,126
ipynb
Jupyter Notebook
Notebooks/5 - Statistics for Mathematicians.ipynb
Roshanmahes/Python-for-Teachers
bc82948cb02305fc944caea0ece0941643d344a6
[ "MIT" ]
12
2020-08-15T12:45:39.000Z
2022-01-29T13:46:15.000Z
Notebooks/5 - Statistics for Mathematicians.ipynb
Roshanmahes/Python-for-Teachers
bc82948cb02305fc944caea0ece0941643d344a6
[ "MIT" ]
null
null
null
Notebooks/5 - Statistics for Mathematicians.ipynb
Roshanmahes/Python-for-Teachers
bc82948cb02305fc944caea0ece0941643d344a6
[ "MIT" ]
3
2020-10-02T22:20:42.000Z
2021-06-03T10:30:32.000Z
67.880808
141,256
0.602213
[ [ [ "# 5. Statistical Packages in Python for Mathematicians", "_____no_output_____" ], [ "Statisticians use the following packages in Python:\n\n- Data creation: `random`\n- Data analysis/manipulation: `pandas`, `scikit-learn`\n- Statistical functions: `scipy.stats`\n- Statistical data visualization: `matplotlib`, `seaborn`\n- Statistical data exploration: `statsmodels`\n", "_____no_output_____" ], [ "## Table of Contents\n\n- Random\n- Scipy Statistics\n- Seaborn\n- Statistical Models\n- Python vs. R\n\nNext week? Choose among:\n\n- Machine Learning 2/Deep Learning: `scikit-learn`, `keras`, `tensorflow`\n- SAGE\n- Other: ___________?\n\n", "_____no_output_____" ], [ "## 5.1 Random", "_____no_output_____" ], [ "The `random` package implements pseudo-random number generators for various distributions.", "_____no_output_____" ] ], [ [ "import random", "_____no_output_____" ] ], [ [ "The documentation is available here: https://docs.python.org/3/library/random.html.", "_____no_output_____" ] ], [ [ "help(random)", "Help on module random:\n\nNAME\n random - Random variable generators.\n\nMODULE REFERENCE\n https://docs.python.org/3.7/library/random\n \n The following documentation is automatically generated from the Python\n source files. It may be incomplete, incorrect or include features that\n are considered implementation detail and may vary between Python\n implementations. When in doubt, consult the module reference at the\n location listed above.\n\nDESCRIPTION\n integers\n --------\n uniform within range\n \n sequences\n ---------\n pick random element\n pick random sample\n pick weighted random sample\n generate random permutation\n \n distributions on the real line:\n ------------------------------\n uniform\n triangular\n normal (Gaussian)\n lognormal\n negative exponential\n gamma\n beta\n pareto\n Weibull\n \n distributions on the circle (angles 0 to 2pi)\n ---------------------------------------------\n circular uniform\n von Mises\n \n General notes on the underlying Mersenne Twister core generator:\n \n * The period is 2**19937-1.\n * It is one of the most extensively tested generators in existence.\n * The random() method is implemented in C, executes in a single Python step,\n and is, therefore, threadsafe.\n\nCLASSES\n _random.Random(builtins.object)\n Random\n SystemRandom\n \n class Random(_random.Random)\n | Random(x=None)\n | \n | Random number generator base class used by bound module functions.\n | \n | Used to instantiate instances of Random to get generators that don't\n | share state.\n | \n | Class Random can also be subclassed if you want to use a different basic\n | generator of your own devising: in that case, override the following\n | methods: random(), seed(), getstate(), and setstate().\n | Optionally, implement a getrandbits() method so that randrange()\n | can cover arbitrarily large ranges.\n | \n | Method resolution order:\n | Random\n | _random.Random\n | builtins.object\n | \n | Methods defined here:\n | \n | __getstate__(self)\n | # Issue 17489: Since __reduce__ was defined to fix #759889 this is no\n | # longer called; we leave it here because it has been here since random was\n | # rewritten back in 2001 and why risk breaking something.\n | \n | __init__(self, x=None)\n | Initialize an instance.\n | \n | Optional argument x controls seeding, as for Random.seed().\n | \n | __reduce__(self)\n | Helper for pickle.\n | \n | __setstate__(self, state)\n | \n | betavariate(self, alpha, beta)\n | Beta distribution.\n | \n | Conditions on the parameters are alpha > 0 and beta > 0.\n | Returned values range between 0 and 1.\n | \n | choice(self, seq)\n | Choose a random element from a non-empty sequence.\n | \n | choices(self, population, weights=None, *, cum_weights=None, k=1)\n | Return a k sized list of population elements chosen with replacement.\n | \n | If the relative weights or cumulative weights are not specified,\n | the selections are made with equal probability.\n | \n | expovariate(self, lambd)\n | Exponential distribution.\n | \n | lambd is 1.0 divided by the desired mean. It should be\n | nonzero. (The parameter would be called \"lambda\", but that is\n | a reserved word in Python.) Returned values range from 0 to\n | positive infinity if lambd is positive, and from negative\n | infinity to 0 if lambd is negative.\n | \n | gammavariate(self, alpha, beta)\n | Gamma distribution. Not the gamma function!\n | \n | Conditions on the parameters are alpha > 0 and beta > 0.\n | \n | The probability distribution function is:\n | \n | x ** (alpha - 1) * math.exp(-x / beta)\n | pdf(x) = --------------------------------------\n | math.gamma(alpha) * beta ** alpha\n | \n | gauss(self, mu, sigma)\n | Gaussian distribution.\n | \n | mu is the mean, and sigma is the standard deviation. This is\n | slightly faster than the normalvariate() function.\n | \n | Not thread-safe without a lock around calls.\n | \n | getstate(self)\n | Return internal state; can be passed to setstate() later.\n | \n | lognormvariate(self, mu, sigma)\n | Log normal distribution.\n | \n | If you take the natural logarithm of this distribution, you'll get a\n | normal distribution with mean mu and standard deviation sigma.\n | mu can have any value, and sigma must be greater than zero.\n | \n | normalvariate(self, mu, sigma)\n | Normal distribution.\n | \n | mu is the mean, and sigma is the standard deviation.\n | \n | paretovariate(self, alpha)\n | Pareto distribution. alpha is the shape parameter.\n | \n | randint(self, a, b)\n | Return random integer in range [a, b], including both end points.\n | \n | randrange(self, start, stop=None, step=1, _int=<class 'int'>)\n | Choose a random item from range(start, stop[, step]).\n | \n | This fixes the problem with randint() which includes the\n | endpoint; in Python this is usually not what you want.\n | \n | sample(self, population, k)\n | Chooses k unique random elements from a population sequence or set.\n | \n | Returns a new list containing elements from the population while\n | leaving the original population unchanged. The resulting list is\n | in selection order so that all sub-slices will also be valid random\n | samples. This allows raffle winners (the sample) to be partitioned\n | into grand prize and second place winners (the subslices).\n | \n | Members of the population need not be hashable or unique. If the\n | population contains repeats, then each occurrence is a possible\n | selection in the sample.\n | \n | To choose a sample in a range of integers, use range as an argument.\n | This is especially fast and space efficient for sampling from a\n | large population: sample(range(10000000), 60)\n | \n | seed(self, a=None, version=2)\n | Initialize internal state from hashable object.\n | \n | None or no argument seeds from current time or from an operating\n | system specific randomness source if available.\n | \n | If *a* is an int, all bits are used.\n | \n | For version 2 (the default), all of the bits are used if *a* is a str,\n | bytes, or bytearray. For version 1 (provided for reproducing random\n | sequences from older versions of Python), the algorithm for str and\n | bytes generates a narrower range of seeds.\n | \n | setstate(self, state)\n | Restore internal state from object returned by getstate().\n | \n | shuffle(self, x, random=None)\n | Shuffle list x in place, and return None.\n | \n | Optional argument random is a 0-argument function returning a\n | random float in [0.0, 1.0); if it is the default None, the\n | standard random.random will be used.\n | \n | triangular(self, low=0.0, high=1.0, mode=None)\n | Triangular distribution.\n | \n | Continuous distribution bounded by given lower and upper limits,\n | and having a given mode value in-between.\n | \n | http://en.wikipedia.org/wiki/Triangular_distribution\n | \n | uniform(self, a, b)\n | Get a random number in the range [a, b) or [a, b] depending on rounding.\n | \n | vonmisesvariate(self, mu, kappa)\n | Circular data distribution.\n | \n | mu is the mean angle, expressed in radians between 0 and 2*pi, and\n | kappa is the concentration parameter, which must be greater than or\n | equal to zero. If kappa is equal to zero, this distribution reduces\n | to a uniform random angle over the range 0 to 2*pi.\n | \n | weibullvariate(self, alpha, beta)\n | Weibull distribution.\n | \n | alpha is the scale parameter and beta is the shape parameter.\n | \n | ----------------------------------------------------------------------\n | Data descriptors defined here:\n | \n | __dict__\n | dictionary for instance variables (if defined)\n | \n | __weakref__\n | list of weak references to the object (if defined)\n | \n | ----------------------------------------------------------------------\n | Data and other attributes defined here:\n | \n | VERSION = 3\n | \n | ----------------------------------------------------------------------\n | Methods inherited from _random.Random:\n | \n | __getattribute__(self, name, /)\n | Return getattr(self, name).\n | \n | getrandbits(...)\n | getrandbits(k) -> x. Generates an int with k random bits.\n | \n | random(...)\n | random() -> x in the interval [0, 1).\n | \n | ----------------------------------------------------------------------\n | Static methods inherited from _random.Random:\n | \n | __new__(*args, **kwargs) from builtins.type\n | Create and return a new object. See help(type) for accurate signature.\n \n class SystemRandom(Random)\n | SystemRandom(x=None)\n | \n | Alternate random number generator using sources provided\n | by the operating system (such as /dev/urandom on Unix or\n | CryptGenRandom on Windows).\n | \n | Not available on all systems (see os.urandom() for details).\n | \n | Method resolution order:\n | SystemRandom\n | Random\n | _random.Random\n | builtins.object\n | \n | Methods defined here:\n | \n | getrandbits(self, k)\n | getrandbits(k) -> x. Generates an int with k random bits.\n | \n | getstate = _notimplemented(self, *args, **kwds)\n | \n | random(self)\n | Get the next random number in the range [0.0, 1.0).\n | \n | seed(self, *args, **kwds)\n | Stub method. Not used for a system random number generator.\n | \n | setstate = _notimplemented(self, *args, **kwds)\n | \n | ----------------------------------------------------------------------\n | Methods inherited from Random:\n | \n | __getstate__(self)\n | # Issue 17489: Since __reduce__ was defined to fix #759889 this is no\n | # longer called; we leave it here because it has been here since random was\n | # rewritten back in 2001 and why risk breaking something.\n | \n | __init__(self, x=None)\n | Initialize an instance.\n | \n | Optional argument x controls seeding, as for Random.seed().\n | \n | __reduce__(self)\n | Helper for pickle.\n | \n | __setstate__(self, state)\n | \n | betavariate(self, alpha, beta)\n | Beta distribution.\n | \n | Conditions on the parameters are alpha > 0 and beta > 0.\n | Returned values range between 0 and 1.\n | \n | choice(self, seq)\n | Choose a random element from a non-empty sequence.\n | \n | choices(self, population, weights=None, *, cum_weights=None, k=1)\n | Return a k sized list of population elements chosen with replacement.\n | \n | If the relative weights or cumulative weights are not specified,\n | the selections are made with equal probability.\n | \n | expovariate(self, lambd)\n | Exponential distribution.\n | \n | lambd is 1.0 divided by the desired mean. It should be\n | nonzero. (The parameter would be called \"lambda\", but that is\n | a reserved word in Python.) Returned values range from 0 to\n | positive infinity if lambd is positive, and from negative\n | infinity to 0 if lambd is negative.\n | \n | gammavariate(self, alpha, beta)\n | Gamma distribution. Not the gamma function!\n | \n | Conditions on the parameters are alpha > 0 and beta > 0.\n | \n | The probability distribution function is:\n | \n | x ** (alpha - 1) * math.exp(-x / beta)\n | pdf(x) = --------------------------------------\n | math.gamma(alpha) * beta ** alpha\n | \n | gauss(self, mu, sigma)\n | Gaussian distribution.\n | \n | mu is the mean, and sigma is the standard deviation. This is\n | slightly faster than the normalvariate() function.\n | \n | Not thread-safe without a lock around calls.\n | \n | lognormvariate(self, mu, sigma)\n | Log normal distribution.\n | \n | If you take the natural logarithm of this distribution, you'll get a\n | normal distribution with mean mu and standard deviation sigma.\n | mu can have any value, and sigma must be greater than zero.\n | \n | normalvariate(self, mu, sigma)\n | Normal distribution.\n | \n | mu is the mean, and sigma is the standard deviation.\n | \n | paretovariate(self, alpha)\n | Pareto distribution. alpha is the shape parameter.\n | \n | randint(self, a, b)\n | Return random integer in range [a, b], including both end points.\n | \n | randrange(self, start, stop=None, step=1, _int=<class 'int'>)\n | Choose a random item from range(start, stop[, step]).\n | \n | This fixes the problem with randint() which includes the\n | endpoint; in Python this is usually not what you want.\n | \n | sample(self, population, k)\n | Chooses k unique random elements from a population sequence or set.\n | \n | Returns a new list containing elements from the population while\n | leaving the original population unchanged. The resulting list is\n | in selection order so that all sub-slices will also be valid random\n | samples. This allows raffle winners (the sample) to be partitioned\n | into grand prize and second place winners (the subslices).\n | \n | Members of the population need not be hashable or unique. If the\n | population contains repeats, then each occurrence is a possible\n | selection in the sample.\n | \n | To choose a sample in a range of integers, use range as an argument.\n | This is especially fast and space efficient for sampling from a\n | large population: sample(range(10000000), 60)\n | \n | shuffle(self, x, random=None)\n | Shuffle list x in place, and return None.\n | \n | Optional argument random is a 0-argument function returning a\n | random float in [0.0, 1.0); if it is the default None, the\n | standard random.random will be used.\n | \n | triangular(self, low=0.0, high=1.0, mode=None)\n | Triangular distribution.\n | \n | Continuous distribution bounded by given lower and upper limits,\n | and having a given mode value in-between.\n | \n | http://en.wikipedia.org/wiki/Triangular_distribution\n | \n | uniform(self, a, b)\n | Get a random number in the range [a, b) or [a, b] depending on rounding.\n | \n | vonmisesvariate(self, mu, kappa)\n | Circular data distribution.\n | \n | mu is the mean angle, expressed in radians between 0 and 2*pi, and\n | kappa is the concentration parameter, which must be greater than or\n | equal to zero. If kappa is equal to zero, this distribution reduces\n | to a uniform random angle over the range 0 to 2*pi.\n | \n | weibullvariate(self, alpha, beta)\n | Weibull distribution.\n | \n | alpha is the scale parameter and beta is the shape parameter.\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from Random:\n | \n | __dict__\n | dictionary for instance variables (if defined)\n | \n | __weakref__\n | list of weak references to the object (if defined)\n | \n | ----------------------------------------------------------------------\n | Data and other attributes inherited from Random:\n | \n | VERSION = 3\n | \n | ----------------------------------------------------------------------\n | Methods inherited from _random.Random:\n | \n | __getattribute__(self, name, /)\n | Return getattr(self, name).\n | \n | ----------------------------------------------------------------------\n | Static methods inherited from _random.Random:\n | \n | __new__(*args, **kwargs) from builtins.type\n | Create and return a new object. See help(type) for accurate signature.\n\nFUNCTIONS\n betavariate(alpha, beta) method of Random instance\n Beta distribution.\n \n Conditions on the parameters are alpha > 0 and beta > 0.\n Returned values range between 0 and 1.\n \n choice(seq) method of Random instance\n Choose a random element from a non-empty sequence.\n \n choices(population, weights=None, *, cum_weights=None, k=1) method of Random instance\n Return a k sized list of population elements chosen with replacement.\n \n If the relative weights or cumulative weights are not specified,\n the selections are made with equal probability.\n \n expovariate(lambd) method of Random instance\n Exponential distribution.\n \n lambd is 1.0 divided by the desired mean. It should be\n nonzero. (The parameter would be called \"lambda\", but that is\n a reserved word in Python.) Returned values range from 0 to\n positive infinity if lambd is positive, and from negative\n infinity to 0 if lambd is negative.\n \n gammavariate(alpha, beta) method of Random instance\n Gamma distribution. Not the gamma function!\n \n Conditions on the parameters are alpha > 0 and beta > 0.\n \n The probability distribution function is:\n \n x ** (alpha - 1) * math.exp(-x / beta)\n pdf(x) = --------------------------------------\n math.gamma(alpha) * beta ** alpha\n \n gauss(mu, sigma) method of Random instance\n Gaussian distribution.\n \n mu is the mean, and sigma is the standard deviation. This is\n slightly faster than the normalvariate() function.\n \n Not thread-safe without a lock around calls.\n \n getrandbits(...) method of Random instance\n getrandbits(k) -> x. Generates an int with k random bits.\n \n getstate() method of Random instance\n Return internal state; can be passed to setstate() later.\n \n lognormvariate(mu, sigma) method of Random instance\n Log normal distribution.\n \n If you take the natural logarithm of this distribution, you'll get a\n normal distribution with mean mu and standard deviation sigma.\n mu can have any value, and sigma must be greater than zero.\n \n normalvariate(mu, sigma) method of Random instance\n Normal distribution.\n \n mu is the mean, and sigma is the standard deviation.\n \n paretovariate(alpha) method of Random instance\n Pareto distribution. alpha is the shape parameter.\n \n randint(a, b) method of Random instance\n Return random integer in range [a, b], including both end points.\n \n random(...) method of Random instance\n random() -> x in the interval [0, 1).\n \n randrange(start, stop=None, step=1, _int=<class 'int'>) method of Random instance\n Choose a random item from range(start, stop[, step]).\n \n This fixes the problem with randint() which includes the\n endpoint; in Python this is usually not what you want.\n \n sample(population, k) method of Random instance\n Chooses k unique random elements from a population sequence or set.\n \n Returns a new list containing elements from the population while\n leaving the original population unchanged. The resulting list is\n in selection order so that all sub-slices will also be valid random\n samples. This allows raffle winners (the sample) to be partitioned\n into grand prize and second place winners (the subslices).\n \n Members of the population need not be hashable or unique. If the\n population contains repeats, then each occurrence is a possible\n selection in the sample.\n \n To choose a sample in a range of integers, use range as an argument.\n This is especially fast and space efficient for sampling from a\n large population: sample(range(10000000), 60)\n \n seed(a=None, version=2) method of Random instance\n Initialize internal state from hashable object.\n \n None or no argument seeds from current time or from an operating\n system specific randomness source if available.\n \n If *a* is an int, all bits are used.\n \n For version 2 (the default), all of the bits are used if *a* is a str,\n bytes, or bytearray. For version 1 (provided for reproducing random\n sequences from older versions of Python), the algorithm for str and\n bytes generates a narrower range of seeds.\n \n setstate(state) method of Random instance\n Restore internal state from object returned by getstate().\n \n shuffle(x, random=None) method of Random instance\n Shuffle list x in place, and return None.\n \n Optional argument random is a 0-argument function returning a\n random float in [0.0, 1.0); if it is the default None, the\n standard random.random will be used.\n \n triangular(low=0.0, high=1.0, mode=None) method of Random instance\n Triangular distribution.\n \n Continuous distribution bounded by given lower and upper limits,\n and having a given mode value in-between.\n \n http://en.wikipedia.org/wiki/Triangular_distribution\n \n uniform(a, b) method of Random instance\n Get a random number in the range [a, b) or [a, b] depending on rounding.\n \n vonmisesvariate(mu, kappa) method of Random instance\n Circular data distribution.\n \n mu is the mean angle, expressed in radians between 0 and 2*pi, and\n kappa is the concentration parameter, which must be greater than or\n equal to zero. If kappa is equal to zero, this distribution reduces\n to a uniform random angle over the range 0 to 2*pi.\n \n weibullvariate(alpha, beta) method of Random instance\n Weibull distribution.\n \n alpha is the scale parameter and beta is the shape parameter.\n\nDATA\n __all__ = ['Random', 'seed', 'random', 'uniform', 'randint', 'choice',...\n\nFILE\n c:\\programdata\\anaconda3\\lib\\random.py\n\n\n" ] ], [ [ "Almost all module functions depend on the basic function `random()`, which generates a random float uniformly in the semi-open range `[0.0, 1.0)`. Python uses the Mersenne Twister as the core generator. It produces 53-bit precision floats and has a period of `2**19937-1`. The underlying implementation in C is both fast and threadsafe. The Mersenne Twister is one of the most extensively tested random number generators in existence. However, being completely deterministic, it is not suitable for all purposes, and is completely unsuitable for cryptographic purposes.", "_____no_output_____" ] ], [ [ "random.uniform(0,1)", "_____no_output_____" ] ], [ [ "For integers, there is uniform selection from a range. For sequences, there is uniform selection of a random element. Let's play a simple game.", "_____no_output_____" ] ], [ [ "number = random.choice(range(1,11))\nchoice = 0\n\nwhile number != choice:\n choice = int(input('Choose a number between 1 and 10 (inclusive): '))\n\nprint('Congratulations, you have guessed the right number!')", "Choose a number between 1 and 10 (inclusive): 3\nCongratulations, you have guessed the right number!\n" ] ], [ [ "If we used the following line, the number above would be equal to `3`:", "_____no_output_____" ] ], [ [ "random.seed(2) # initialize the random number generator", "_____no_output_____" ] ], [ [ "We can also use NumPy's random sampling package `numpy.random` (https://docs.scipy.org/doc/numpy-1.15.0/reference/routines.random.html):", "_____no_output_____" ] ], [ [ "import numpy as np\n\nnp.random.uniform(0,1)", "_____no_output_____" ], [ "# dir(np.random)", "_____no_output_____" ] ], [ [ "With this package, we could immediately create samples drawn from a specific distribution:", "_____no_output_____" ] ], [ [ "sample = np.random.normal(0,1,100000)\n# sample", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\nplt.hist(sample, bins=50, density=True)\nplt.show()", "_____no_output_____" ] ], [ [ "## 5.2 Scipy Statistics", "_____no_output_____" ], [ "This module contains a large number of probability distributions.", "_____no_output_____" ] ], [ [ "import scipy.stats", "_____no_output_____" ], [ "help(scipy.stats)", "Help on package scipy.stats in scipy:\n\nNAME\n scipy.stats - .. _statsrefmanual:\n\nDESCRIPTION\n ==========================================\n Statistical functions (:mod:`scipy.stats`)\n ==========================================\n \n .. currentmodule:: scipy.stats\n \n This module contains a large number of probability distributions as\n well as a growing library of statistical functions.\n \n Each univariate distribution is an instance of a subclass of `rv_continuous`\n (`rv_discrete` for discrete distributions):\n \n .. autosummary::\n :toctree: generated/\n \n rv_continuous\n rv_discrete\n rv_histogram\n \n Continuous distributions\n ========================\n \n .. autosummary::\n :toctree: generated/\n \n alpha -- Alpha\n anglit -- Anglit\n arcsine -- Arcsine\n argus -- Argus\n beta -- Beta\n betaprime -- Beta Prime\n bradford -- Bradford\n burr -- Burr (Type III)\n burr12 -- Burr (Type XII)\n cauchy -- Cauchy\n chi -- Chi\n chi2 -- Chi-squared\n cosine -- Cosine\n crystalball -- Crystalball\n dgamma -- Double Gamma\n dweibull -- Double Weibull\n erlang -- Erlang\n expon -- Exponential\n exponnorm -- Exponentially Modified Normal\n exponweib -- Exponentiated Weibull\n exponpow -- Exponential Power\n f -- F (Snecdor F)\n fatiguelife -- Fatigue Life (Birnbaum-Saunders)\n fisk -- Fisk\n foldcauchy -- Folded Cauchy\n foldnorm -- Folded Normal\n frechet_r -- Deprecated. Alias for weibull_min\n frechet_l -- Deprecated. Alias for weibull_max\n genlogistic -- Generalized Logistic\n gennorm -- Generalized normal\n genpareto -- Generalized Pareto\n genexpon -- Generalized Exponential\n genextreme -- Generalized Extreme Value\n gausshyper -- Gauss Hypergeometric\n gamma -- Gamma\n gengamma -- Generalized gamma\n genhalflogistic -- Generalized Half Logistic\n geninvgauss -- Generalized Inverse Gaussian\n gilbrat -- Gilbrat\n gompertz -- Gompertz (Truncated Gumbel)\n gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I\n gumbel_l -- Left Sided Gumbel, etc.\n halfcauchy -- Half Cauchy\n halflogistic -- Half Logistic\n halfnorm -- Half Normal\n halfgennorm -- Generalized Half Normal\n hypsecant -- Hyperbolic Secant\n invgamma -- Inverse Gamma\n invgauss -- Inverse Gaussian\n invweibull -- Inverse Weibull\n johnsonsb -- Johnson SB\n johnsonsu -- Johnson SU\n kappa4 -- Kappa 4 parameter\n kappa3 -- Kappa 3 parameter\n ksone -- Kolmogorov-Smirnov one-sided (no stats)\n kstwobign -- Kolmogorov-Smirnov two-sided test for Large N (no stats)\n laplace -- Laplace\n levy -- Levy\n levy_l\n levy_stable\n logistic -- Logistic\n loggamma -- Log-Gamma\n loglaplace -- Log-Laplace (Log Double Exponential)\n lognorm -- Log-Normal\n loguniform -- Log-Uniform\n lomax -- Lomax (Pareto of the second kind)\n maxwell -- Maxwell\n mielke -- Mielke's Beta-Kappa\n moyal -- Moyal\n nakagami -- Nakagami\n ncx2 -- Non-central chi-squared\n ncf -- Non-central F\n nct -- Non-central Student's T\n norm -- Normal (Gaussian)\n norminvgauss -- Normal Inverse Gaussian\n pareto -- Pareto\n pearson3 -- Pearson type III\n powerlaw -- Power-function\n powerlognorm -- Power log normal\n powernorm -- Power normal\n rdist -- R-distribution\n rayleigh -- Rayleigh\n rice -- Rice\n recipinvgauss -- Reciprocal Inverse Gaussian\n semicircular -- Semicircular\n skewnorm -- Skew normal\n t -- Student's T\n trapz -- Trapezoidal\n triang -- Triangular\n truncexpon -- Truncated Exponential\n truncnorm -- Truncated Normal\n tukeylambda -- Tukey-Lambda\n uniform -- Uniform\n vonmises -- Von-Mises (Circular)\n vonmises_line -- Von-Mises (Line)\n wald -- Wald\n weibull_min -- Minimum Weibull (see Frechet)\n weibull_max -- Maximum Weibull (see Frechet)\n wrapcauchy -- Wrapped Cauchy\n \n Multivariate distributions\n ==========================\n \n .. autosummary::\n :toctree: generated/\n \n multivariate_normal -- Multivariate normal distribution\n matrix_normal -- Matrix normal distribution\n dirichlet -- Dirichlet\n wishart -- Wishart\n invwishart -- Inverse Wishart\n multinomial -- Multinomial distribution\n special_ortho_group -- SO(N) group\n ortho_group -- O(N) group\n unitary_group -- U(N) group\n random_correlation -- random correlation matrices\n \n Discrete distributions\n ======================\n \n .. autosummary::\n :toctree: generated/\n \n bernoulli -- Bernoulli\n betabinom -- Beta-Binomial\n binom -- Binomial\n boltzmann -- Boltzmann (Truncated Discrete Exponential)\n dlaplace -- Discrete Laplacian\n geom -- Geometric\n hypergeom -- Hypergeometric\n logser -- Logarithmic (Log-Series, Series)\n nbinom -- Negative Binomial\n planck -- Planck (Discrete Exponential)\n poisson -- Poisson\n randint -- Discrete Uniform\n skellam -- Skellam\n zipf -- Zipf\n yulesimon -- Yule-Simon\n \n An overview of statistical functions is given below.\n Several of these functions have a similar version in\n `scipy.stats.mstats` which work for masked arrays.\n \n Summary statistics\n ==================\n \n .. autosummary::\n :toctree: generated/\n \n describe -- Descriptive statistics\n gmean -- Geometric mean\n hmean -- Harmonic mean\n kurtosis -- Fisher or Pearson kurtosis\n mode -- Modal value\n moment -- Central moment\n skew -- Skewness\n kstat --\n kstatvar --\n tmean -- Truncated arithmetic mean\n tvar -- Truncated variance\n tmin --\n tmax --\n tstd --\n tsem --\n variation -- Coefficient of variation\n find_repeats\n trim_mean\n gstd -- Geometric Standard Deviation\n iqr\n sem\n bayes_mvs\n mvsdist\n entropy\n median_absolute_deviation\n \n Frequency statistics\n ====================\n \n .. autosummary::\n :toctree: generated/\n \n cumfreq\n itemfreq\n percentileofscore\n scoreatpercentile\n relfreq\n \n .. autosummary::\n :toctree: generated/\n \n binned_statistic -- Compute a binned statistic for a set of data.\n binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.\n binned_statistic_dd -- Compute a d-D binned statistic for a set of data.\n \n Correlation functions\n =====================\n \n .. autosummary::\n :toctree: generated/\n \n f_oneway\n pearsonr\n spearmanr\n pointbiserialr\n kendalltau\n weightedtau\n linregress\n siegelslopes\n theilslopes\n multiscale_graphcorr\n \n Statistical tests\n =================\n \n .. autosummary::\n :toctree: generated/\n \n ttest_1samp\n ttest_ind\n ttest_ind_from_stats\n ttest_rel\n kstest\n chisquare\n power_divergence\n ks_2samp\n epps_singleton_2samp\n mannwhitneyu\n tiecorrect\n rankdata\n ranksums\n wilcoxon\n kruskal\n friedmanchisquare\n brunnermunzel\n combine_pvalues\n jarque_bera\n \n .. autosummary::\n :toctree: generated/\n \n ansari\n bartlett\n levene\n shapiro\n anderson\n anderson_ksamp\n binom_test\n fligner\n median_test\n mood\n skewtest\n kurtosistest\n normaltest\n \n Transformations\n ===============\n \n .. autosummary::\n :toctree: generated/\n \n boxcox\n boxcox_normmax\n boxcox_llf\n yeojohnson\n yeojohnson_normmax\n yeojohnson_llf\n obrientransform\n sigmaclip\n trimboth\n trim1\n zmap\n zscore\n \n Statistical distances\n =====================\n \n .. autosummary::\n :toctree: generated/\n \n wasserstein_distance\n energy_distance\n \n Random variate generation\n =========================\n \n .. autosummary::\n :toctree: generated/\n \n rvs_ratio_uniforms\n \n Circular statistical functions\n ==============================\n \n .. autosummary::\n :toctree: generated/\n \n circmean\n circvar\n circstd\n \n Contingency table functions\n ===========================\n \n .. autosummary::\n :toctree: generated/\n \n chi2_contingency\n contingency.expected_freq\n contingency.margins\n fisher_exact\n \n Plot-tests\n ==========\n \n .. autosummary::\n :toctree: generated/\n \n ppcc_max\n ppcc_plot\n probplot\n boxcox_normplot\n yeojohnson_normplot\n \n \n Masked statistics functions\n ===========================\n \n .. toctree::\n \n stats.mstats\n \n \n Univariate and multivariate kernel density estimation\n =====================================================\n \n .. autosummary::\n :toctree: generated/\n \n gaussian_kde\n \n Warnings used in :mod:`scipy.stats`\n ===================================\n \n .. autosummary::\n :toctree: generated/\n \n PearsonRConstantInputWarning\n PearsonRNearConstantInputWarning\n \n For many more stat related functions install the software R and the\n interface package rpy.\n\nPACKAGE CONTENTS\n _binned_statistic\n _constants\n _continuous_distns\n _discrete_distns\n _distn_infrastructure\n _distr_params\n _hypotests\n _multivariate\n _rvs_sampling\n _stats\n _stats_mstats_common\n _tukeylambda_stats\n contingency\n distributions\n kde\n morestats\n mstats\n mstats_basic\n mstats_extras\n mvn\n setup\n statlib\n stats\n tests (package)\n vonmises\n\nCLASSES\n builtins.RuntimeWarning(builtins.Warning)\n scipy.stats.stats.PearsonRConstantInputWarning\n scipy.stats.stats.PearsonRNearConstantInputWarning\n builtins.object\n scipy.stats.kde.gaussian_kde\n scipy.stats._distn_infrastructure.rv_generic(builtins.object)\n scipy.stats._distn_infrastructure.rv_continuous\n scipy.stats._continuous_distns.rv_histogram\n scipy.stats._distn_infrastructure.rv_discrete\n \n class PearsonRConstantInputWarning(builtins.RuntimeWarning)\n | PearsonRConstantInputWarning(msg=None)\n | \n | Warning generated by `pearsonr` when an input is constant.\n | \n | Method resolution order:\n | PearsonRConstantInputWarning\n | builtins.RuntimeWarning\n | builtins.Warning\n | builtins.Exception\n | builtins.BaseException\n | builtins.object\n | \n | Methods defined here:\n | \n | __init__(self, msg=None)\n | Initialize self. See help(type(self)) for accurate signature.\n | \n | ----------------------------------------------------------------------\n | Data descriptors defined here:\n | \n | __weakref__\n | list of weak references to the object (if defined)\n | \n | ----------------------------------------------------------------------\n | Static methods inherited from builtins.RuntimeWarning:\n | \n | __new__(*args, **kwargs) from builtins.type\n | Create and return a new object. See help(type) for accurate signature.\n | \n | ----------------------------------------------------------------------\n | Methods inherited from builtins.BaseException:\n | \n | __delattr__(self, name, /)\n | Implement delattr(self, name).\n | \n | __getattribute__(self, name, /)\n | Return getattr(self, name).\n | \n | __reduce__(...)\n | Helper for pickle.\n | \n | __repr__(self, /)\n | Return repr(self).\n | \n | __setattr__(self, name, value, /)\n | Implement setattr(self, name, value).\n | \n | __setstate__(...)\n | \n | __str__(self, /)\n | Return str(self).\n | \n | with_traceback(...)\n | Exception.with_traceback(tb) --\n | set self.__traceback__ to tb and return self.\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from builtins.BaseException:\n | \n | __cause__\n | exception cause\n | \n | __context__\n | exception context\n | \n | __dict__\n | \n | __suppress_context__\n | \n | __traceback__\n | \n | args\n \n class PearsonRNearConstantInputWarning(builtins.RuntimeWarning)\n | PearsonRNearConstantInputWarning(msg=None)\n | \n | Warning generated by `pearsonr` when an input is nearly constant.\n | \n | Method resolution order:\n | PearsonRNearConstantInputWarning\n | builtins.RuntimeWarning\n | builtins.Warning\n | builtins.Exception\n | builtins.BaseException\n | builtins.object\n | \n | Methods defined here:\n | \n | __init__(self, msg=None)\n | Initialize self. See help(type(self)) for accurate signature.\n | \n | ----------------------------------------------------------------------\n | Data descriptors defined here:\n | \n | __weakref__\n | list of weak references to the object (if defined)\n | \n | ----------------------------------------------------------------------\n | Static methods inherited from builtins.RuntimeWarning:\n | \n | __new__(*args, **kwargs) from builtins.type\n | Create and return a new object. See help(type) for accurate signature.\n | \n | ----------------------------------------------------------------------\n | Methods inherited from builtins.BaseException:\n | \n | __delattr__(self, name, /)\n | Implement delattr(self, name).\n | \n | __getattribute__(self, name, /)\n | Return getattr(self, name).\n | \n | __reduce__(...)\n | Helper for pickle.\n | \n | __repr__(self, /)\n | Return repr(self).\n | \n | __setattr__(self, name, value, /)\n | Implement setattr(self, name, value).\n | \n | __setstate__(...)\n | \n | __str__(self, /)\n | Return str(self).\n | \n | with_traceback(...)\n | Exception.with_traceback(tb) --\n | set self.__traceback__ to tb and return self.\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from builtins.BaseException:\n | \n | __cause__\n | exception cause\n | \n | __context__\n | exception context\n | \n | __dict__\n | \n | __suppress_context__\n | \n | __traceback__\n | \n | args\n \n class gaussian_kde(builtins.object)\n | gaussian_kde(dataset, bw_method=None, weights=None)\n | \n | Representation of a kernel-density estimate using Gaussian kernels.\n | \n | Kernel density estimation is a way to estimate the probability density\n | function (PDF) of a random variable in a non-parametric way.\n | `gaussian_kde` works for both uni-variate and multi-variate data. It\n | includes automatic bandwidth determination. The estimation works best for\n | a unimodal distribution; bimodal or multi-modal distributions tend to be\n | oversmoothed.\n | \n | Parameters\n | ----------\n | dataset : array_like\n | Datapoints to estimate from. In case of univariate data this is a 1-D\n | array, otherwise a 2-D array with shape (# of dims, # of data).\n | bw_method : str, scalar or callable, optional\n | The method used to calculate the estimator bandwidth. This can be\n | 'scott', 'silverman', a scalar constant or a callable. If a scalar,\n | this will be used directly as `kde.factor`. If a callable, it should\n | take a `gaussian_kde` instance as only parameter and return a scalar.\n | If None (default), 'scott' is used. See Notes for more details.\n | weights : array_like, optional\n | weights of datapoints. This must be the same shape as dataset.\n | If None (default), the samples are assumed to be equally weighted\n | \n | Attributes\n | ----------\n | dataset : ndarray\n | The dataset with which `gaussian_kde` was initialized.\n | d : int\n | Number of dimensions.\n | n : int\n | Number of datapoints.\n | neff : int\n | Effective number of datapoints.\n | \n | .. versionadded:: 1.2.0\n | factor : float\n | The bandwidth factor, obtained from `kde.covariance_factor`, with which\n | the covariance matrix is multiplied.\n | covariance : ndarray\n | The covariance matrix of `dataset`, scaled by the calculated bandwidth\n | (`kde.factor`).\n | inv_cov : ndarray\n | The inverse of `covariance`.\n | \n | Methods\n | -------\n | evaluate\n | __call__\n | integrate_gaussian\n | integrate_box_1d\n | integrate_box\n | integrate_kde\n | pdf\n | logpdf\n | resample\n | set_bandwidth\n | covariance_factor\n | \n | Notes\n | -----\n | Bandwidth selection strongly influences the estimate obtained from the KDE\n | (much more so than the actual shape of the kernel). Bandwidth selection\n | can be done by a \"rule of thumb\", by cross-validation, by \"plug-in\n | methods\" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`\n | uses a rule of thumb, the default is Scott's Rule.\n | \n | Scott's Rule [1]_, implemented as `scotts_factor`, is::\n | \n | n**(-1./(d+4)),\n | \n | with ``n`` the number of data points and ``d`` the number of dimensions.\n | In the case of unequally weighted points, `scotts_factor` becomes::\n | \n | neff**(-1./(d+4)),\n | \n | with ``neff`` the effective number of datapoints.\n | Silverman's Rule [2]_, implemented as `silverman_factor`, is::\n | \n | (n * (d + 2) / 4.)**(-1. / (d + 4)).\n | \n | or in the case of unequally weighted points::\n | \n | (neff * (d + 2) / 4.)**(-1. / (d + 4)).\n | \n | Good general descriptions of kernel density estimation can be found in [1]_\n | and [2]_, the mathematics for this multi-dimensional implementation can be\n | found in [1]_.\n | \n | With a set of weighted samples, the effective number of datapoints ``neff``\n | is defined by::\n | \n | neff = sum(weights)^2 / sum(weights^2)\n | \n | as detailed in [5]_.\n | \n | References\n | ----------\n | .. [1] D.W. Scott, \"Multivariate Density Estimation: Theory, Practice, and\n | Visualization\", John Wiley & Sons, New York, Chicester, 1992.\n | .. [2] B.W. Silverman, \"Density Estimation for Statistics and Data\n | Analysis\", Vol. 26, Monographs on Statistics and Applied Probability,\n | Chapman and Hall, London, 1986.\n | .. [3] B.A. Turlach, \"Bandwidth Selection in Kernel Density Estimation: A\n | Review\", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.\n | .. [4] D.M. Bashtannyk and R.J. Hyndman, \"Bandwidth selection for kernel\n | conditional density estimation\", Computational Statistics & Data\n | Analysis, Vol. 36, pp. 279-298, 2001.\n | .. [5] Gray P. G., 1969, Journal of the Royal Statistical Society.\n | Series A (General), 132, 272\n | \n | Examples\n | --------\n | Generate some random two-dimensional data:\n | \n | >>> from scipy import stats\n | >>> def measure(n):\n | ... \"Measurement model, return two coupled measurements.\"\n | ... m1 = np.random.normal(size=n)\n | ... m2 = np.random.normal(scale=0.5, size=n)\n | ... return m1+m2, m1-m2\n | \n | >>> m1, m2 = measure(2000)\n | >>> xmin = m1.min()\n | >>> xmax = m1.max()\n | >>> ymin = m2.min()\n | >>> ymax = m2.max()\n | \n | Perform a kernel density estimate on the data:\n | \n | >>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]\n | >>> positions = np.vstack([X.ravel(), Y.ravel()])\n | >>> values = np.vstack([m1, m2])\n | >>> kernel = stats.gaussian_kde(values)\n | >>> Z = np.reshape(kernel(positions).T, X.shape)\n | \n | Plot the results:\n | \n | >>> import matplotlib.pyplot as plt\n | >>> fig, ax = plt.subplots()\n | >>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,\n | ... extent=[xmin, xmax, ymin, ymax])\n | >>> ax.plot(m1, m2, 'k.', markersize=2)\n | >>> ax.set_xlim([xmin, xmax])\n | >>> ax.set_ylim([ymin, ymax])\n | >>> plt.show()\n | \n | Methods defined here:\n | \n | __call__ = evaluate(self, points)\n | \n | __init__(self, dataset, bw_method=None, weights=None)\n | Initialize self. See help(type(self)) for accurate signature.\n | \n | covariance_factor = scotts_factor(self)\n | \n | evaluate(self, points)\n | Evaluate the estimated pdf on a set of points.\n | \n | Parameters\n | ----------\n | points : (# of dimensions, # of points)-array\n | Alternatively, a (# of dimensions,) vector can be passed in and\n | treated as a single point.\n | \n | Returns\n | -------\n | values : (# of points,)-array\n | The values at each point.\n | \n | Raises\n | ------\n | ValueError : if the dimensionality of the input points is different than\n | the dimensionality of the KDE.\n | \n | integrate_box(self, low_bounds, high_bounds, maxpts=None)\n | Computes the integral of a pdf over a rectangular interval.\n | \n | Parameters\n | ----------\n | low_bounds : array_like\n | A 1-D array containing the lower bounds of integration.\n | high_bounds : array_like\n | A 1-D array containing the upper bounds of integration.\n | maxpts : int, optional\n | The maximum number of points to use for integration.\n | \n | Returns\n | -------\n | value : scalar\n | The result of the integral.\n | \n | integrate_box_1d(self, low, high)\n | Computes the integral of a 1D pdf between two bounds.\n | \n | Parameters\n | ----------\n | low : scalar\n | Lower bound of integration.\n | high : scalar\n | Upper bound of integration.\n | \n | Returns\n | -------\n | value : scalar\n | The result of the integral.\n | \n | Raises\n | ------\n | ValueError\n | If the KDE is over more than one dimension.\n | \n | integrate_gaussian(self, mean, cov)\n | Multiply estimated density by a multivariate Gaussian and integrate\n | over the whole space.\n | \n | Parameters\n | ----------\n | mean : aray_like\n | A 1-D array, specifying the mean of the Gaussian.\n | cov : array_like\n | A 2-D array, specifying the covariance matrix of the Gaussian.\n | \n | Returns\n | -------\n | result : scalar\n | The value of the integral.\n | \n | Raises\n | ------\n | ValueError\n | If the mean or covariance of the input Gaussian differs from\n | the KDE's dimensionality.\n | \n | integrate_kde(self, other)\n | Computes the integral of the product of this kernel density estimate\n | with another.\n | \n | Parameters\n | ----------\n | other : gaussian_kde instance\n | The other kde.\n | \n | Returns\n | -------\n | value : scalar\n | The result of the integral.\n | \n | Raises\n | ------\n | ValueError\n | If the KDEs have different dimensionality.\n | \n | logpdf(self, x)\n | Evaluate the log of the estimated pdf on a provided set of points.\n | \n | pdf(self, x)\n | Evaluate the estimated pdf on a provided set of points.\n | \n | Notes\n | -----\n | This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n | docstring for more details.\n | \n | resample(self, size=None, seed=None)\n | Randomly sample a dataset from the estimated pdf.\n | \n | Parameters\n | ----------\n | size : int, optional\n | The number of samples to draw. If not provided, then the size is\n | the same as the effective number of samples in the underlying\n | dataset.\n | seed : None or int or `np.random.RandomState`, optional\n | If `seed` is None, random variates are drawn by the RandomState\n | singleton used by np.random.\n | If `seed` is an int, a new `np.random.RandomState` instance is used,\n | seeded with seed.\n | If `seed` is already a `np.random.RandomState instance`, then that\n | `np.random.RandomState` instance is used.\n | Specify `seed` for reproducible drawing of random variates.\n | \n | Returns\n | -------\n | resample : (self.d, `size`) ndarray\n | The sampled dataset.\n | \n | scotts_factor(self)\n | Computes the coefficient (`kde.factor`) that\n | multiplies the data covariance matrix to obtain the kernel covariance\n | matrix. The default is `scotts_factor`. A subclass can overwrite this\n | method to provide a different method, or set it through a call to\n | `kde.set_bandwidth`.\n | \n | set_bandwidth(self, bw_method=None)\n | Compute the estimator bandwidth with given method.\n | \n | The new bandwidth calculated after a call to `set_bandwidth` is used\n | for subsequent evaluations of the estimated density.\n | \n | Parameters\n | ----------\n | bw_method : str, scalar or callable, optional\n | The method used to calculate the estimator bandwidth. This can be\n | 'scott', 'silverman', a scalar constant or a callable. If a\n | scalar, this will be used directly as `kde.factor`. If a callable,\n | it should take a `gaussian_kde` instance as only parameter and\n | return a scalar. If None (default), nothing happens; the current\n | `kde.covariance_factor` method is kept.\n | \n | Notes\n | -----\n | .. versionadded:: 0.11\n | \n | Examples\n | --------\n | >>> import scipy.stats as stats\n | >>> x1 = np.array([-7, -5, 1, 4, 5.])\n | >>> kde = stats.gaussian_kde(x1)\n | >>> xs = np.linspace(-10, 10, num=50)\n | >>> y1 = kde(xs)\n | >>> kde.set_bandwidth(bw_method='silverman')\n | >>> y2 = kde(xs)\n | >>> kde.set_bandwidth(bw_method=kde.factor / 3.)\n | >>> y3 = kde(xs)\n | \n | >>> import matplotlib.pyplot as plt\n | >>> fig, ax = plt.subplots()\n | >>> ax.plot(x1, np.full(x1.shape, 1 / (4. * x1.size)), 'bo',\n | ... label='Data points (rescaled)')\n | >>> ax.plot(xs, y1, label='Scott (default)')\n | >>> ax.plot(xs, y2, label='Silverman')\n | >>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')\n | >>> ax.legend()\n | >>> plt.show()\n | \n | silverman_factor(self)\n | Compute the Silverman factor.\n | \n | Returns\n | -------\n | s : float\n | The silverman factor.\n | \n | ----------------------------------------------------------------------\n | Data descriptors defined here:\n | \n | __dict__\n | dictionary for instance variables (if defined)\n | \n | __weakref__\n | list of weak references to the object (if defined)\n | \n | neff\n | \n | weights\n \n class rv_continuous(rv_generic)\n | rv_continuous(momtype=1, a=None, b=None, xtol=1e-14, badvalue=None, name=None, longname=None, shapes=None, extradoc=None, seed=None)\n | \n | A generic continuous random variable class meant for subclassing.\n | \n | `rv_continuous` is a base class to construct specific distribution classes\n | and instances for continuous random variables. It cannot be used\n | directly as a distribution.\n | \n | Parameters\n | ----------\n | momtype : int, optional\n | The type of generic moment calculation to use: 0 for pdf, 1 (default)\n | for ppf.\n | a : float, optional\n | Lower bound of the support of the distribution, default is minus\n | infinity.\n | b : float, optional\n | Upper bound of the support of the distribution, default is plus\n | infinity.\n | xtol : float, optional\n | The tolerance for fixed point calculation for generic ppf.\n | badvalue : float, optional\n | The value in a result arrays that indicates a value that for which\n | some argument restriction is violated, default is np.nan.\n | name : str, optional\n | The name of the instance. This string is used to construct the default\n | example for distributions.\n | longname : str, optional\n | This string is used as part of the first line of the docstring returned\n | when a subclass has no docstring of its own. Note: `longname` exists\n | for backwards compatibility, do not use for new subclasses.\n | shapes : str, optional\n | The shape of the distribution. For example ``\"m, n\"`` for a\n | distribution that takes two integers as the two shape arguments for all\n | its methods. If not provided, shape parameters will be inferred from\n | the signature of the private methods, ``_pdf`` and ``_cdf`` of the\n | instance.\n | extradoc : str, optional, deprecated\n | This string is used as the last part of the docstring returned when a\n | subclass has no docstring of its own. Note: `extradoc` exists for\n | backwards compatibility, do not use for new subclasses.\n | seed : None or int or ``numpy.random.RandomState`` instance, optional\n | This parameter defines the RandomState object to use for drawing\n | random variates.\n | If None (or np.random), the global np.random state is used.\n | If integer, it is used to seed the local RandomState instance.\n | Default is None.\n | \n | Methods\n | -------\n | rvs\n | pdf\n | logpdf\n | cdf\n | logcdf\n | sf\n | logsf\n | ppf\n | isf\n | moment\n | stats\n | entropy\n | expect\n | median\n | mean\n | std\n | var\n | interval\n | __call__\n | fit\n | fit_loc_scale\n | nnlf\n | support\n | \n | Notes\n | -----\n | Public methods of an instance of a distribution class (e.g., ``pdf``,\n | ``cdf``) check their arguments and pass valid arguments to private,\n | computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid\n | if it is within the support of the distribution.\n | Whether a shape parameter is valid is decided by an ``_argcheck`` method\n | (which defaults to checking that its arguments are strictly positive.)\n | \n | **Subclassing**\n | \n | New random variables can be defined by subclassing the `rv_continuous` class\n | and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized\n | to location 0 and scale 1).\n | \n | If positive argument checking is not correct for your RV\n | then you will also need to re-define the ``_argcheck`` method.\n | \n | For most of the scipy.stats distributions, the support interval doesn't\n | depend on the shape parameters. ``x`` being in the support interval is\n | equivalent to ``self.a <= x <= self.b``. If either of the endpoints of\n | the support do depend on the shape parameters, then\n | i) the distribution must implement the ``_get_support`` method; and\n | ii) those dependent endpoints must be omitted from the distribution's\n | call to the ``rv_continuous`` initializer.\n | \n | Correct, but potentially slow defaults exist for the remaining\n | methods but for speed and/or accuracy you can over-ride::\n | \n | _logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf\n | \n | The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``,\n | applied to a uniform random variate. In order to generate random variates\n | efficiently, either the default ``_ppf`` needs to be overwritten (e.g.\n | if the inverse cdf can expressed in an explicit form) or a sampling\n | method needs to be implemented in a custom ``_rvs`` method.\n | \n | If possible, you should override ``_isf``, ``_sf`` or ``_logsf``.\n | The main reason would be to improve numerical accuracy: for example,\n | the survival function ``_sf`` is computed as ``1 - _cdf`` which can\n | result in loss of precision if ``_cdf(x)`` is close to one.\n | \n | **Methods that can be overwritten by subclasses**\n | ::\n | \n | _rvs\n | _pdf\n | _cdf\n | _sf\n | _ppf\n | _isf\n | _stats\n | _munp\n | _entropy\n | _argcheck\n | _get_support\n | \n | There are additional (internal and private) generic methods that can\n | be useful for cross-checking and for debugging, but might work in all\n | cases when directly called.\n | \n | A note on ``shapes``: subclasses need not specify them explicitly. In this\n | case, `shapes` will be automatically deduced from the signatures of the\n | overridden methods (`pdf`, `cdf` etc).\n | If, for some reason, you prefer to avoid relying on introspection, you can\n | specify ``shapes`` explicitly as an argument to the instance constructor.\n | \n | \n | **Frozen Distributions**\n | \n | Normally, you must provide shape parameters (and, optionally, location and\n | scale parameters to each call of a method of a distribution.\n | \n | Alternatively, the object may be called (as a function) to fix the shape,\n | location, and scale parameters returning a \"frozen\" continuous RV object:\n | \n | rv = generic(<shape(s)>, loc=0, scale=1)\n | `rv_frozen` object with the same methods but holding the given shape,\n | location, and scale fixed\n | \n | **Statistics**\n | \n | Statistics are computed using numerical integration by default.\n | For speed you can redefine this using ``_stats``:\n | \n | - take shape parameters and return mu, mu2, g1, g2\n | - If you can't compute one of these, return it as None\n | - Can also be defined with a keyword argument ``moments``, which is a\n | string composed of \"m\", \"v\", \"s\", and/or \"k\".\n | Only the components appearing in string should be computed and\n | returned in the order \"m\", \"v\", \"s\", or \"k\" with missing values\n | returned as None.\n | \n | Alternatively, you can override ``_munp``, which takes ``n`` and shape\n | parameters and returns the n-th non-central moment of the distribution.\n | \n | Examples\n | --------\n | To create a new Gaussian distribution, we would do the following:\n | \n | >>> from scipy.stats import rv_continuous\n | >>> class gaussian_gen(rv_continuous):\n | ... \"Gaussian distribution\"\n | ... def _pdf(self, x):\n | ... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)\n | >>> gaussian = gaussian_gen(name='gaussian')\n | \n | ``scipy.stats`` distributions are *instances*, so here we subclass\n | `rv_continuous` and create an instance. With this, we now have\n | a fully functional distribution with all relevant methods automagically\n | generated by the framework.\n | \n | Note that above we defined a standard normal distribution, with zero mean\n | and unit variance. Shifting and scaling of the distribution can be done\n | by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``\n | essentially computes ``y = (x - loc) / scale`` and\n | ``gaussian._pdf(y) / scale``.\n | \n | Method resolution order:\n | rv_continuous\n | rv_generic\n | builtins.object\n | \n | Methods defined here:\n | \n | __init__(self, momtype=1, a=None, b=None, xtol=1e-14, badvalue=None, name=None, longname=None, shapes=None, extradoc=None, seed=None)\n | Initialize self. See help(type(self)) for accurate signature.\n | \n | cdf(self, x, *args, **kwds)\n | Cumulative distribution function of the given RV.\n | \n | Parameters\n | ----------\n | x : array_like\n | quantiles\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | cdf : ndarray\n | Cumulative distribution function evaluated at `x`\n | \n | expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)\n | Calculate expected value of a function with respect to the\n | distribution by numerical integration.\n | \n | The expected value of a function ``f(x)`` with respect to a\n | distribution ``dist`` is defined as::\n | \n | ub\n | E[f(x)] = Integral(f(x) * dist.pdf(x)),\n | lb\n | \n | where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)``\n | distribution. If the bounds ``lb`` and ``ub`` correspond to the\n | support of the distribution, e.g. ``[-inf, inf]`` in the default\n | case, then the integral is the unrestricted expectation of ``f(x)``.\n | Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0``\n | outside a finite interval in which case the expectation is\n | calculated within the finite range ``[lb, ub]``.\n | \n | Parameters\n | ----------\n | func : callable, optional\n | Function for which integral is calculated. Takes only one argument.\n | The default is the identity mapping f(x) = x.\n | args : tuple, optional\n | Shape parameters of the distribution.\n | loc : float, optional\n | Location parameter (default=0).\n | scale : float, optional\n | Scale parameter (default=1).\n | lb, ub : scalar, optional\n | Lower and upper bound for integration. Default is set to the\n | support of the distribution.\n | conditional : bool, optional\n | If True, the integral is corrected by the conditional probability\n | of the integration interval. The return value is the expectation\n | of the function, conditional on being in the given interval.\n | Default is False.\n | \n | Additional keyword arguments are passed to the integration routine.\n | \n | Returns\n | -------\n | expect : float\n | The calculated expected value.\n | \n | Notes\n | -----\n | The integration behavior of this function is inherited from\n | `scipy.integrate.quad`. Neither this function nor\n | `scipy.integrate.quad` can verify whether the integral exists or is\n | finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and\n | ``cauchy(0).expect()`` returns ``0.0``.\n | \n | Examples\n | --------\n | \n | To understand the effect of the bounds of integration consider\n | \n | >>> from scipy.stats import expon\n | >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0)\n | 0.6321205588285578\n | \n | This is close to\n | \n | >>> expon(1).cdf(2.0) - expon(1).cdf(0.0)\n | 0.6321205588285577\n | \n | If ``conditional=True``\n | \n | >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True)\n | 1.0000000000000002\n | \n | The slight deviation from 1 is due to numerical integration.\n | \n | fit(self, data, *args, **kwds)\n | Return MLEs for shape (if applicable), location, and scale\n | parameters from data.\n | \n | MLE stands for Maximum Likelihood Estimate. Starting estimates for\n | the fit are given by input arguments; for any arguments not provided\n | with starting estimates, ``self._fitstart(data)`` is called to generate\n | such.\n | \n | One can hold some parameters fixed to specific values by passing in\n | keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)\n | and ``floc`` and ``fscale`` (for location and scale parameters,\n | respectively).\n | \n | Parameters\n | ----------\n | data : array_like\n | Data to use in calculating the MLEs.\n | args : floats, optional\n | Starting value(s) for any shape-characterizing arguments (those not\n | provided will be determined by a call to ``_fitstart(data)``).\n | No default value.\n | kwds : floats, optional\n | Starting values for the location and scale parameters; no default.\n | Special keyword arguments are recognized as holding certain\n | parameters fixed:\n | \n | - f0...fn : hold respective shape parameters fixed.\n | Alternatively, shape parameters to fix can be specified by name.\n | For example, if ``self.shapes == \"a, b\"``, ``fa``and ``fix_a``\n | are equivalent to ``f0``, and ``fb`` and ``fix_b`` are\n | equivalent to ``f1``.\n | \n | - floc : hold location parameter fixed to specified value.\n | \n | - fscale : hold scale parameter fixed to specified value.\n | \n | - optimizer : The optimizer to use. The optimizer must take ``func``,\n | and starting position as the first two arguments,\n | plus ``args`` (for extra arguments to pass to the\n | function to be optimized) and ``disp=0`` to suppress\n | output as keyword arguments.\n | \n | Returns\n | -------\n | mle_tuple : tuple of floats\n | MLEs for any shape parameters (if applicable), followed by those\n | for location and scale. For most random variables, shape statistics\n | will be returned, but there are exceptions (e.g. ``norm``).\n | \n | Notes\n | -----\n | This fit is computed by maximizing a log-likelihood function, with\n | penalty applied for samples outside of range of the distribution. The\n | returned answer is not guaranteed to be the globally optimal MLE, it\n | may only be locally optimal, or the optimization may fail altogether.\n | If the data contain any of np.nan, np.inf, or -np.inf, the fit routine\n | will throw a RuntimeError.\n | \n | Examples\n | --------\n | \n | Generate some data to fit: draw random variates from the `beta`\n | distribution\n | \n | >>> from scipy.stats import beta\n | >>> a, b = 1., 2.\n | >>> x = beta.rvs(a, b, size=1000)\n | \n | Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):\n | \n | >>> a1, b1, loc1, scale1 = beta.fit(x)\n | \n | We can also use some prior knowledge about the dataset: let's keep\n | ``loc`` and ``scale`` fixed:\n | \n | >>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)\n | >>> loc1, scale1\n | (0, 1)\n | \n | We can also keep shape parameters fixed by using ``f``-keywords. To\n | keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,\n | equivalently, ``fa=1``:\n | \n | >>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)\n | >>> a1\n | 1\n | \n | Not all distributions return estimates for the shape parameters.\n | ``norm`` for example just returns estimates for location and scale:\n | \n | >>> from scipy.stats import norm\n | >>> x = norm.rvs(a, b, size=1000, random_state=123)\n | >>> loc1, scale1 = norm.fit(x)\n | >>> loc1, scale1\n | (0.92087172783841631, 2.0015750750324668)\n | \n | fit_loc_scale(self, data, *args)\n | Estimate loc and scale parameters from data using 1st and 2nd moments.\n | \n | Parameters\n | ----------\n | data : array_like\n | Data to fit.\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | \n | Returns\n | -------\n | Lhat : float\n | Estimated location parameter for the data.\n | Shat : float\n | Estimated scale parameter for the data.\n | \n | isf(self, q, *args, **kwds)\n | Inverse survival function (inverse of `sf`) at q of the given RV.\n | \n | Parameters\n | ----------\n | q : array_like\n | upper tail probability\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | x : ndarray or scalar\n | Quantile corresponding to the upper tail probability q.\n | \n | logcdf(self, x, *args, **kwds)\n | Log of the cumulative distribution function at x of the given RV.\n | \n | Parameters\n | ----------\n | x : array_like\n | quantiles\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | logcdf : array_like\n | Log of the cumulative distribution function evaluated at x\n | \n | logpdf(self, x, *args, **kwds)\n | Log of the probability density function at x of the given RV.\n | \n | This uses a more numerically accurate calculation if available.\n | \n | Parameters\n | ----------\n | x : array_like\n | quantiles\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | logpdf : array_like\n | Log of the probability density function evaluated at x\n | \n | logsf(self, x, *args, **kwds)\n | Log of the survival function of the given RV.\n | \n | Returns the log of the \"survival function,\" defined as (1 - `cdf`),\n | evaluated at `x`.\n | \n | Parameters\n | ----------\n | x : array_like\n | quantiles\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | logsf : ndarray\n | Log of the survival function evaluated at `x`.\n | \n | nnlf(self, theta, x)\n | Return negative loglikelihood function.\n | \n | Notes\n | -----\n | This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the\n | parameters (including loc and scale).\n | \n | pdf(self, x, *args, **kwds)\n | Probability density function at x of the given RV.\n | \n | Parameters\n | ----------\n | x : array_like\n | quantiles\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | pdf : ndarray\n | Probability density function evaluated at x\n | \n | ppf(self, q, *args, **kwds)\n | Percent point function (inverse of `cdf`) at q of the given RV.\n | \n | Parameters\n | ----------\n | q : array_like\n | lower tail probability\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | x : array_like\n | quantile corresponding to the lower tail probability q.\n | \n | sf(self, x, *args, **kwds)\n | Survival function (1 - `cdf`) at x of the given RV.\n | \n | Parameters\n | ----------\n | x : array_like\n | quantiles\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | sf : array_like\n | Survival function evaluated at x\n | \n | ----------------------------------------------------------------------\n | Methods inherited from rv_generic:\n | \n | __call__(self, *args, **kwds)\n | Freeze the distribution for the given arguments.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution. Should include all\n | the non-optional arguments, may include ``loc`` and ``scale``.\n | \n | Returns\n | -------\n | rv_frozen : rv_frozen instance\n | The frozen distribution.\n | \n | __getstate__(self)\n | \n | __setstate__(self, state)\n | \n | entropy(self, *args, **kwds)\n | Differential entropy of the RV.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | Location parameter (default=0).\n | scale : array_like, optional (continuous distributions only).\n | Scale parameter (default=1).\n | \n | Notes\n | -----\n | Entropy is defined base `e`:\n | \n | >>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))\n | >>> np.allclose(drv.entropy(), np.log(2.0))\n | True\n | \n | freeze(self, *args, **kwds)\n | Freeze the distribution for the given arguments.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution. Should include all\n | the non-optional arguments, may include ``loc`` and ``scale``.\n | \n | Returns\n | -------\n | rv_frozen : rv_frozen instance\n | The frozen distribution.\n | \n | interval(self, alpha, *args, **kwds)\n | Confidence interval with equal areas around the median.\n | \n | Parameters\n | ----------\n | alpha : array_like of float\n | Probability that an rv will be drawn from the returned range.\n | Each value should be in the range [0, 1].\n | arg1, arg2, ... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | location parameter, Default is 0.\n | scale : array_like, optional\n | scale parameter, Default is 1.\n | \n | Returns\n | -------\n | a, b : ndarray of float\n | end-points of range that contain ``100 * alpha %`` of the rv's\n | possible values.\n | \n | mean(self, *args, **kwds)\n | Mean of the distribution.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | mean : float\n | the mean of the distribution\n | \n | median(self, *args, **kwds)\n | Median of the distribution.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | Location parameter, Default is 0.\n | scale : array_like, optional\n | Scale parameter, Default is 1.\n | \n | Returns\n | -------\n | median : float\n | The median of the distribution.\n | \n | See Also\n | --------\n | rv_discrete.ppf\n | Inverse of the CDF\n | \n | moment(self, n, *args, **kwds)\n | n-th order non-central moment of distribution.\n | \n | Parameters\n | ----------\n | n : int, n >= 1\n | Order of moment.\n | arg1, arg2, arg3,... : float\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | rvs(self, *args, **kwds)\n | Random variates of given type.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | Location parameter (default=0).\n | scale : array_like, optional\n | Scale parameter (default=1).\n | size : int or tuple of ints, optional\n | Defining number of random variates (default is 1).\n | random_state : None or int or ``np.random.RandomState`` instance, optional\n | If int or RandomState, use it for drawing the random variates.\n | If None, rely on ``self.random_state``.\n | Default is None.\n | \n | Returns\n | -------\n | rvs : ndarray or scalar\n | Random variates of given `size`.\n | \n | stats(self, *args, **kwds)\n | Some statistics of the given RV.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional (continuous RVs only)\n | scale parameter (default=1)\n | moments : str, optional\n | composed of letters ['mvsk'] defining which moments to compute:\n | 'm' = mean,\n | 'v' = variance,\n | 's' = (Fisher's) skew,\n | 'k' = (Fisher's) kurtosis.\n | (default is 'mv')\n | \n | Returns\n | -------\n | stats : sequence\n | of requested moments.\n | \n | std(self, *args, **kwds)\n | Standard deviation of the distribution.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | std : float\n | standard deviation of the distribution\n | \n | support(self, *args, **kwargs)\n | Return the support of the distribution.\n | \n | Parameters\n | ----------\n | arg1, arg2, ... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | location parameter, Default is 0.\n | scale : array_like, optional\n | scale parameter, Default is 1.\n | Returns\n | -------\n | a, b : float\n | end-points of the distribution's support.\n | \n | var(self, *args, **kwds)\n | Variance of the distribution.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | var : float\n | the variance of the distribution\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from rv_generic:\n | \n | __dict__\n | dictionary for instance variables (if defined)\n | \n | __weakref__\n | list of weak references to the object (if defined)\n | \n | random_state\n | Get or set the RandomState object for generating random variates.\n | \n | This can be either None or an existing RandomState object.\n | \n | If None (or np.random), use the RandomState singleton used by np.random.\n | If already a RandomState instance, use it.\n | If an int, use a new RandomState instance seeded with seed.\n \n class rv_discrete(rv_generic)\n | rv_discrete(a=0, b=inf, name=None, badvalue=None, moment_tol=1e-08, values=None, inc=1, longname=None, shapes=None, extradoc=None, seed=None)\n | \n | A generic discrete random variable class meant for subclassing.\n | \n | `rv_discrete` is a base class to construct specific distribution classes\n | and instances for discrete random variables. It can also be used\n | to construct an arbitrary distribution defined by a list of support\n | points and corresponding probabilities.\n | \n | Parameters\n | ----------\n | a : float, optional\n | Lower bound of the support of the distribution, default: 0\n | b : float, optional\n | Upper bound of the support of the distribution, default: plus infinity\n | moment_tol : float, optional\n | The tolerance for the generic calculation of moments.\n | values : tuple of two array_like, optional\n | ``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero\n | probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk``\n | and ``pk`` must have the same shape.\n | inc : integer, optional\n | Increment for the support of the distribution.\n | Default is 1. (other values have not been tested)\n | badvalue : float, optional\n | The value in a result arrays that indicates a value that for which\n | some argument restriction is violated, default is np.nan.\n | name : str, optional\n | The name of the instance. This string is used to construct the default\n | example for distributions.\n | longname : str, optional\n | This string is used as part of the first line of the docstring returned\n | when a subclass has no docstring of its own. Note: `longname` exists\n | for backwards compatibility, do not use for new subclasses.\n | shapes : str, optional\n | The shape of the distribution. For example \"m, n\" for a distribution\n | that takes two integers as the two shape arguments for all its methods\n | If not provided, shape parameters will be inferred from\n | the signatures of the private methods, ``_pmf`` and ``_cdf`` of\n | the instance.\n | extradoc : str, optional\n | This string is used as the last part of the docstring returned when a\n | subclass has no docstring of its own. Note: `extradoc` exists for\n | backwards compatibility, do not use for new subclasses.\n | seed : None or int or ``numpy.random.RandomState`` instance, optional\n | This parameter defines the RandomState object to use for drawing\n | random variates.\n | If None, the global np.random state is used.\n | If integer, it is used to seed the local RandomState instance.\n | Default is None.\n | \n | Methods\n | -------\n | rvs\n | pmf\n | logpmf\n | cdf\n | logcdf\n | sf\n | logsf\n | ppf\n | isf\n | moment\n | stats\n | entropy\n | expect\n | median\n | mean\n | std\n | var\n | interval\n | __call__\n | support\n | \n | \n | Notes\n | -----\n | \n | This class is similar to `rv_continuous`. Whether a shape parameter is\n | valid is decided by an ``_argcheck`` method (which defaults to checking\n | that its arguments are strictly positive.)\n | The main differences are:\n | \n | - the support of the distribution is a set of integers\n | - instead of the probability density function, ``pdf`` (and the\n | corresponding private ``_pdf``), this class defines the\n | *probability mass function*, `pmf` (and the corresponding\n | private ``_pmf``.)\n | - scale parameter is not defined.\n | \n | To create a new discrete distribution, we would do the following:\n | \n | >>> from scipy.stats import rv_discrete\n | >>> class poisson_gen(rv_discrete):\n | ... \"Poisson distribution\"\n | ... def _pmf(self, k, mu):\n | ... return exp(-mu) * mu**k / factorial(k)\n | \n | and create an instance::\n | \n | >>> poisson = poisson_gen(name=\"poisson\")\n | \n | Note that above we defined the Poisson distribution in the standard form.\n | Shifting the distribution can be done by providing the ``loc`` parameter\n | to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``\n | delegates the work to ``poisson._pmf(x-loc, mu)``.\n | \n | **Discrete distributions from a list of probabilities**\n | \n | Alternatively, you can construct an arbitrary discrete rv defined\n | on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the\n | ``values`` keyword argument to the `rv_discrete` constructor.\n | \n | Examples\n | --------\n | \n | Custom made discrete distribution:\n | \n | >>> from scipy import stats\n | >>> xk = np.arange(7)\n | >>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)\n | >>> custm = stats.rv_discrete(name='custm', values=(xk, pk))\n | >>>\n | >>> import matplotlib.pyplot as plt\n | >>> fig, ax = plt.subplots(1, 1)\n | >>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')\n | >>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)\n | >>> plt.show()\n | \n | Random number generation:\n | \n | >>> R = custm.rvs(size=100)\n | \n | Method resolution order:\n | rv_discrete\n | rv_generic\n | builtins.object\n | \n | Methods defined here:\n | \n | __init__(self, a=0, b=inf, name=None, badvalue=None, moment_tol=1e-08, values=None, inc=1, longname=None, shapes=None, extradoc=None, seed=None)\n | Initialize self. See help(type(self)) for accurate signature.\n | \n | cdf(self, k, *args, **kwds)\n | Cumulative distribution function of the given RV.\n | \n | Parameters\n | ----------\n | k : array_like, int\n | Quantiles.\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | Location parameter (default=0).\n | \n | Returns\n | -------\n | cdf : ndarray\n | Cumulative distribution function evaluated at `k`.\n | \n | expect(self, func=None, args=(), loc=0, lb=None, ub=None, conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32)\n | Calculate expected value of a function with respect to the distribution\n | for discrete distribution by numerical summation.\n | \n | Parameters\n | ----------\n | func : callable, optional\n | Function for which the expectation value is calculated.\n | Takes only one argument.\n | The default is the identity mapping f(k) = k.\n | args : tuple, optional\n | Shape parameters of the distribution.\n | loc : float, optional\n | Location parameter.\n | Default is 0.\n | lb, ub : int, optional\n | Lower and upper bound for the summation, default is set to the\n | support of the distribution, inclusive (``ul <= k <= ub``).\n | conditional : bool, optional\n | If true then the expectation is corrected by the conditional\n | probability of the summation interval. The return value is the\n | expectation of the function, `func`, conditional on being in\n | the given interval (k such that ``ul <= k <= ub``).\n | Default is False.\n | maxcount : int, optional\n | Maximal number of terms to evaluate (to avoid an endless loop for\n | an infinite sum). Default is 1000.\n | tolerance : float, optional\n | Absolute tolerance for the summation. Default is 1e-10.\n | chunksize : int, optional\n | Iterate over the support of a distributions in chunks of this size.\n | Default is 32.\n | \n | Returns\n | -------\n | expect : float\n | Expected value.\n | \n | Notes\n | -----\n | For heavy-tailed distributions, the expected value may or may not exist,\n | depending on the function, `func`. If it does exist, but the sum converges\n | slowly, the accuracy of the result may be rather low. For instance, for\n | ``zipf(4)``, accuracy for mean, variance in example is only 1e-5.\n | increasing `maxcount` and/or `chunksize` may improve the result, but may\n | also make zipf very slow.\n | \n | The function is not vectorized.\n | \n | isf(self, q, *args, **kwds)\n | Inverse survival function (inverse of `sf`) at q of the given RV.\n | \n | Parameters\n | ----------\n | q : array_like\n | Upper tail probability.\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | Location parameter (default=0).\n | \n | Returns\n | -------\n | k : ndarray or scalar\n | Quantile corresponding to the upper tail probability, q.\n | \n | logcdf(self, k, *args, **kwds)\n | Log of the cumulative distribution function at k of the given RV.\n | \n | Parameters\n | ----------\n | k : array_like, int\n | Quantiles.\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | Location parameter (default=0).\n | \n | Returns\n | -------\n | logcdf : array_like\n | Log of the cumulative distribution function evaluated at k.\n | \n | logpmf(self, k, *args, **kwds)\n | Log of the probability mass function at k of the given RV.\n | \n | Parameters\n | ----------\n | k : array_like\n | Quantiles.\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | Location parameter. Default is 0.\n | \n | Returns\n | -------\n | logpmf : array_like\n | Log of the probability mass function evaluated at k.\n | \n | logsf(self, k, *args, **kwds)\n | Log of the survival function of the given RV.\n | \n | Returns the log of the \"survival function,\" defined as 1 - `cdf`,\n | evaluated at `k`.\n | \n | Parameters\n | ----------\n | k : array_like\n | Quantiles.\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | Location parameter (default=0).\n | \n | Returns\n | -------\n | logsf : ndarray\n | Log of the survival function evaluated at `k`.\n | \n | pmf(self, k, *args, **kwds)\n | Probability mass function at k of the given RV.\n | \n | Parameters\n | ----------\n | k : array_like\n | Quantiles.\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | Location parameter (default=0).\n | \n | Returns\n | -------\n | pmf : array_like\n | Probability mass function evaluated at k\n | \n | ppf(self, q, *args, **kwds)\n | Percent point function (inverse of `cdf`) at q of the given RV.\n | \n | Parameters\n | ----------\n | q : array_like\n | Lower tail probability.\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | Location parameter (default=0).\n | \n | Returns\n | -------\n | k : array_like\n | Quantile corresponding to the lower tail probability, q.\n | \n | rvs(self, *args, **kwargs)\n | Random variates of given type.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | Location parameter (default=0).\n | size : int or tuple of ints, optional\n | Defining number of random variates (Default is 1). Note that `size`\n | has to be given as keyword, not as positional argument.\n | random_state : None or int or ``np.random.RandomState`` instance, optional\n | If int or RandomState, use it for drawing the random variates.\n | If None, rely on ``self.random_state``.\n | Default is None.\n | \n | Returns\n | -------\n | rvs : ndarray or scalar\n | Random variates of given `size`.\n | \n | sf(self, k, *args, **kwds)\n | Survival function (1 - `cdf`) at k of the given RV.\n | \n | Parameters\n | ----------\n | k : array_like\n | Quantiles.\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | Location parameter (default=0).\n | \n | Returns\n | -------\n | sf : array_like\n | Survival function evaluated at k.\n | \n | ----------------------------------------------------------------------\n | Static methods defined here:\n | \n | __new__(cls, a=0, b=inf, name=None, badvalue=None, moment_tol=1e-08, values=None, inc=1, longname=None, shapes=None, extradoc=None, seed=None)\n | Create and return a new object. See help(type) for accurate signature.\n | \n | ----------------------------------------------------------------------\n | Methods inherited from rv_generic:\n | \n | __call__(self, *args, **kwds)\n | Freeze the distribution for the given arguments.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution. Should include all\n | the non-optional arguments, may include ``loc`` and ``scale``.\n | \n | Returns\n | -------\n | rv_frozen : rv_frozen instance\n | The frozen distribution.\n | \n | __getstate__(self)\n | \n | __setstate__(self, state)\n | \n | entropy(self, *args, **kwds)\n | Differential entropy of the RV.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | Location parameter (default=0).\n | scale : array_like, optional (continuous distributions only).\n | Scale parameter (default=1).\n | \n | Notes\n | -----\n | Entropy is defined base `e`:\n | \n | >>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))\n | >>> np.allclose(drv.entropy(), np.log(2.0))\n | True\n | \n | freeze(self, *args, **kwds)\n | Freeze the distribution for the given arguments.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution. Should include all\n | the non-optional arguments, may include ``loc`` and ``scale``.\n | \n | Returns\n | -------\n | rv_frozen : rv_frozen instance\n | The frozen distribution.\n | \n | interval(self, alpha, *args, **kwds)\n | Confidence interval with equal areas around the median.\n | \n | Parameters\n | ----------\n | alpha : array_like of float\n | Probability that an rv will be drawn from the returned range.\n | Each value should be in the range [0, 1].\n | arg1, arg2, ... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | location parameter, Default is 0.\n | scale : array_like, optional\n | scale parameter, Default is 1.\n | \n | Returns\n | -------\n | a, b : ndarray of float\n | end-points of range that contain ``100 * alpha %`` of the rv's\n | possible values.\n | \n | mean(self, *args, **kwds)\n | Mean of the distribution.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | mean : float\n | the mean of the distribution\n | \n | median(self, *args, **kwds)\n | Median of the distribution.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | Location parameter, Default is 0.\n | scale : array_like, optional\n | Scale parameter, Default is 1.\n | \n | Returns\n | -------\n | median : float\n | The median of the distribution.\n | \n | See Also\n | --------\n | rv_discrete.ppf\n | Inverse of the CDF\n | \n | moment(self, n, *args, **kwds)\n | n-th order non-central moment of distribution.\n | \n | Parameters\n | ----------\n | n : int, n >= 1\n | Order of moment.\n | arg1, arg2, arg3,... : float\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | stats(self, *args, **kwds)\n | Some statistics of the given RV.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional (continuous RVs only)\n | scale parameter (default=1)\n | moments : str, optional\n | composed of letters ['mvsk'] defining which moments to compute:\n | 'm' = mean,\n | 'v' = variance,\n | 's' = (Fisher's) skew,\n | 'k' = (Fisher's) kurtosis.\n | (default is 'mv')\n | \n | Returns\n | -------\n | stats : sequence\n | of requested moments.\n | \n | std(self, *args, **kwds)\n | Standard deviation of the distribution.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | std : float\n | standard deviation of the distribution\n | \n | support(self, *args, **kwargs)\n | Return the support of the distribution.\n | \n | Parameters\n | ----------\n | arg1, arg2, ... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | location parameter, Default is 0.\n | scale : array_like, optional\n | scale parameter, Default is 1.\n | Returns\n | -------\n | a, b : float\n | end-points of the distribution's support.\n | \n | var(self, *args, **kwds)\n | Variance of the distribution.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | var : float\n | the variance of the distribution\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from rv_generic:\n | \n | __dict__\n | dictionary for instance variables (if defined)\n | \n | __weakref__\n | list of weak references to the object (if defined)\n | \n | random_state\n | Get or set the RandomState object for generating random variates.\n | \n | This can be either None or an existing RandomState object.\n | \n | If None (or np.random), use the RandomState singleton used by np.random.\n | If already a RandomState instance, use it.\n | If an int, use a new RandomState instance seeded with seed.\n \n class rv_histogram(scipy.stats._distn_infrastructure.rv_continuous)\n | rv_histogram(histogram, *args, **kwargs)\n | \n | Generates a distribution given by a histogram.\n | This is useful to generate a template distribution from a binned\n | datasample.\n | \n | As a subclass of the `rv_continuous` class, `rv_histogram` inherits from it\n | a collection of generic methods (see `rv_continuous` for the full list),\n | and implements them based on the properties of the provided binned\n | datasample.\n | \n | Parameters\n | ----------\n | histogram : tuple of array_like\n | Tuple containing two array_like objects\n | The first containing the content of n bins\n | The second containing the (n+1) bin boundaries\n | In particular the return value np.histogram is accepted\n | \n | Notes\n | -----\n | There are no additional shape parameters except for the loc and scale.\n | The pdf is defined as a stepwise function from the provided histogram\n | The cdf is a linear interpolation of the pdf.\n | \n | .. versionadded:: 0.19.0\n | \n | Examples\n | --------\n | \n | Create a scipy.stats distribution from a numpy histogram\n | \n | >>> import scipy.stats\n | >>> import numpy as np\n | >>> data = scipy.stats.norm.rvs(size=100000, loc=0, scale=1.5, random_state=123)\n | >>> hist = np.histogram(data, bins=100)\n | >>> hist_dist = scipy.stats.rv_histogram(hist)\n | \n | Behaves like an ordinary scipy rv_continuous distribution\n | \n | >>> hist_dist.pdf(1.0)\n | 0.20538577847618705\n | >>> hist_dist.cdf(2.0)\n | 0.90818568543056499\n | \n | PDF is zero above (below) the highest (lowest) bin of the histogram,\n | defined by the max (min) of the original dataset\n | \n | >>> hist_dist.pdf(np.max(data))\n | 0.0\n | >>> hist_dist.cdf(np.max(data))\n | 1.0\n | >>> hist_dist.pdf(np.min(data))\n | 7.7591907244498314e-05\n | >>> hist_dist.cdf(np.min(data))\n | 0.0\n | \n | PDF and CDF follow the histogram\n | \n | >>> import matplotlib.pyplot as plt\n | >>> X = np.linspace(-5.0, 5.0, 100)\n | >>> plt.title(\"PDF from Template\")\n | >>> plt.hist(data, density=True, bins=100)\n | >>> plt.plot(X, hist_dist.pdf(X), label='PDF')\n | >>> plt.plot(X, hist_dist.cdf(X), label='CDF')\n | >>> plt.show()\n | \n | Method resolution order:\n | rv_histogram\n | scipy.stats._distn_infrastructure.rv_continuous\n | scipy.stats._distn_infrastructure.rv_generic\n | builtins.object\n | \n | Methods defined here:\n | \n | __init__(self, histogram, *args, **kwargs)\n | Create a new distribution using the given histogram\n | \n | Parameters\n | ----------\n | histogram : tuple of array_like\n | Tuple containing two array_like objects\n | The first containing the content of n bins\n | The second containing the (n+1) bin boundaries\n | In particular the return value np.histogram is accepted\n | \n | ----------------------------------------------------------------------\n | Methods inherited from scipy.stats._distn_infrastructure.rv_continuous:\n | \n | cdf(self, x, *args, **kwds)\n | Cumulative distribution function of the given RV.\n | \n | Parameters\n | ----------\n | x : array_like\n | quantiles\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | cdf : ndarray\n | Cumulative distribution function evaluated at `x`\n | \n | expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)\n | Calculate expected value of a function with respect to the\n | distribution by numerical integration.\n | \n | The expected value of a function ``f(x)`` with respect to a\n | distribution ``dist`` is defined as::\n | \n | ub\n | E[f(x)] = Integral(f(x) * dist.pdf(x)),\n | lb\n | \n | where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)``\n | distribution. If the bounds ``lb`` and ``ub`` correspond to the\n | support of the distribution, e.g. ``[-inf, inf]`` in the default\n | case, then the integral is the unrestricted expectation of ``f(x)``.\n | Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0``\n | outside a finite interval in which case the expectation is\n | calculated within the finite range ``[lb, ub]``.\n | \n | Parameters\n | ----------\n | func : callable, optional\n | Function for which integral is calculated. Takes only one argument.\n | The default is the identity mapping f(x) = x.\n | args : tuple, optional\n | Shape parameters of the distribution.\n | loc : float, optional\n | Location parameter (default=0).\n | scale : float, optional\n | Scale parameter (default=1).\n | lb, ub : scalar, optional\n | Lower and upper bound for integration. Default is set to the\n | support of the distribution.\n | conditional : bool, optional\n | If True, the integral is corrected by the conditional probability\n | of the integration interval. The return value is the expectation\n | of the function, conditional on being in the given interval.\n | Default is False.\n | \n | Additional keyword arguments are passed to the integration routine.\n | \n | Returns\n | -------\n | expect : float\n | The calculated expected value.\n | \n | Notes\n | -----\n | The integration behavior of this function is inherited from\n | `scipy.integrate.quad`. Neither this function nor\n | `scipy.integrate.quad` can verify whether the integral exists or is\n | finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and\n | ``cauchy(0).expect()`` returns ``0.0``.\n | \n | Examples\n | --------\n | \n | To understand the effect of the bounds of integration consider\n | \n | >>> from scipy.stats import expon\n | >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0)\n | 0.6321205588285578\n | \n | This is close to\n | \n | >>> expon(1).cdf(2.0) - expon(1).cdf(0.0)\n | 0.6321205588285577\n | \n | If ``conditional=True``\n | \n | >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True)\n | 1.0000000000000002\n | \n | The slight deviation from 1 is due to numerical integration.\n | \n | fit(self, data, *args, **kwds)\n | Return MLEs for shape (if applicable), location, and scale\n | parameters from data.\n | \n | MLE stands for Maximum Likelihood Estimate. Starting estimates for\n | the fit are given by input arguments; for any arguments not provided\n | with starting estimates, ``self._fitstart(data)`` is called to generate\n | such.\n | \n | One can hold some parameters fixed to specific values by passing in\n | keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)\n | and ``floc`` and ``fscale`` (for location and scale parameters,\n | respectively).\n | \n | Parameters\n | ----------\n | data : array_like\n | Data to use in calculating the MLEs.\n | args : floats, optional\n | Starting value(s) for any shape-characterizing arguments (those not\n | provided will be determined by a call to ``_fitstart(data)``).\n | No default value.\n | kwds : floats, optional\n | Starting values for the location and scale parameters; no default.\n | Special keyword arguments are recognized as holding certain\n | parameters fixed:\n | \n | - f0...fn : hold respective shape parameters fixed.\n | Alternatively, shape parameters to fix can be specified by name.\n | For example, if ``self.shapes == \"a, b\"``, ``fa``and ``fix_a``\n | are equivalent to ``f0``, and ``fb`` and ``fix_b`` are\n | equivalent to ``f1``.\n | \n | - floc : hold location parameter fixed to specified value.\n | \n | - fscale : hold scale parameter fixed to specified value.\n | \n | - optimizer : The optimizer to use. The optimizer must take ``func``,\n | and starting position as the first two arguments,\n | plus ``args`` (for extra arguments to pass to the\n | function to be optimized) and ``disp=0`` to suppress\n | output as keyword arguments.\n | \n | Returns\n | -------\n | mle_tuple : tuple of floats\n | MLEs for any shape parameters (if applicable), followed by those\n | for location and scale. For most random variables, shape statistics\n | will be returned, but there are exceptions (e.g. ``norm``).\n | \n | Notes\n | -----\n | This fit is computed by maximizing a log-likelihood function, with\n | penalty applied for samples outside of range of the distribution. The\n | returned answer is not guaranteed to be the globally optimal MLE, it\n | may only be locally optimal, or the optimization may fail altogether.\n | If the data contain any of np.nan, np.inf, or -np.inf, the fit routine\n | will throw a RuntimeError.\n | \n | Examples\n | --------\n | \n | Generate some data to fit: draw random variates from the `beta`\n | distribution\n | \n | >>> from scipy.stats import beta\n | >>> a, b = 1., 2.\n | >>> x = beta.rvs(a, b, size=1000)\n | \n | Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):\n | \n | >>> a1, b1, loc1, scale1 = beta.fit(x)\n | \n | We can also use some prior knowledge about the dataset: let's keep\n | ``loc`` and ``scale`` fixed:\n | \n | >>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)\n | >>> loc1, scale1\n | (0, 1)\n | \n | We can also keep shape parameters fixed by using ``f``-keywords. To\n | keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,\n | equivalently, ``fa=1``:\n | \n | >>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)\n | >>> a1\n | 1\n | \n | Not all distributions return estimates for the shape parameters.\n | ``norm`` for example just returns estimates for location and scale:\n | \n | >>> from scipy.stats import norm\n | >>> x = norm.rvs(a, b, size=1000, random_state=123)\n | >>> loc1, scale1 = norm.fit(x)\n | >>> loc1, scale1\n | (0.92087172783841631, 2.0015750750324668)\n | \n | fit_loc_scale(self, data, *args)\n | Estimate loc and scale parameters from data using 1st and 2nd moments.\n | \n | Parameters\n | ----------\n | data : array_like\n | Data to fit.\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | \n | Returns\n | -------\n | Lhat : float\n | Estimated location parameter for the data.\n | Shat : float\n | Estimated scale parameter for the data.\n | \n | isf(self, q, *args, **kwds)\n | Inverse survival function (inverse of `sf`) at q of the given RV.\n | \n | Parameters\n | ----------\n | q : array_like\n | upper tail probability\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | x : ndarray or scalar\n | Quantile corresponding to the upper tail probability q.\n | \n | logcdf(self, x, *args, **kwds)\n | Log of the cumulative distribution function at x of the given RV.\n | \n | Parameters\n | ----------\n | x : array_like\n | quantiles\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | logcdf : array_like\n | Log of the cumulative distribution function evaluated at x\n | \n | logpdf(self, x, *args, **kwds)\n | Log of the probability density function at x of the given RV.\n | \n | This uses a more numerically accurate calculation if available.\n | \n | Parameters\n | ----------\n | x : array_like\n | quantiles\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | logpdf : array_like\n | Log of the probability density function evaluated at x\n | \n | logsf(self, x, *args, **kwds)\n | Log of the survival function of the given RV.\n | \n | Returns the log of the \"survival function,\" defined as (1 - `cdf`),\n | evaluated at `x`.\n | \n | Parameters\n | ----------\n | x : array_like\n | quantiles\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | logsf : ndarray\n | Log of the survival function evaluated at `x`.\n | \n | nnlf(self, theta, x)\n | Return negative loglikelihood function.\n | \n | Notes\n | -----\n | This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the\n | parameters (including loc and scale).\n | \n | pdf(self, x, *args, **kwds)\n | Probability density function at x of the given RV.\n | \n | Parameters\n | ----------\n | x : array_like\n | quantiles\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | pdf : ndarray\n | Probability density function evaluated at x\n | \n | ppf(self, q, *args, **kwds)\n | Percent point function (inverse of `cdf`) at q of the given RV.\n | \n | Parameters\n | ----------\n | q : array_like\n | lower tail probability\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | x : array_like\n | quantile corresponding to the lower tail probability q.\n | \n | sf(self, x, *args, **kwds)\n | Survival function (1 - `cdf`) at x of the given RV.\n | \n | Parameters\n | ----------\n | x : array_like\n | quantiles\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | sf : array_like\n | Survival function evaluated at x\n | \n | ----------------------------------------------------------------------\n | Methods inherited from scipy.stats._distn_infrastructure.rv_generic:\n | \n | __call__(self, *args, **kwds)\n | Freeze the distribution for the given arguments.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution. Should include all\n | the non-optional arguments, may include ``loc`` and ``scale``.\n | \n | Returns\n | -------\n | rv_frozen : rv_frozen instance\n | The frozen distribution.\n | \n | __getstate__(self)\n | \n | __setstate__(self, state)\n | \n | entropy(self, *args, **kwds)\n | Differential entropy of the RV.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | Location parameter (default=0).\n | scale : array_like, optional (continuous distributions only).\n | Scale parameter (default=1).\n | \n | Notes\n | -----\n | Entropy is defined base `e`:\n | \n | >>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))\n | >>> np.allclose(drv.entropy(), np.log(2.0))\n | True\n | \n | freeze(self, *args, **kwds)\n | Freeze the distribution for the given arguments.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution. Should include all\n | the non-optional arguments, may include ``loc`` and ``scale``.\n | \n | Returns\n | -------\n | rv_frozen : rv_frozen instance\n | The frozen distribution.\n | \n | interval(self, alpha, *args, **kwds)\n | Confidence interval with equal areas around the median.\n | \n | Parameters\n | ----------\n | alpha : array_like of float\n | Probability that an rv will be drawn from the returned range.\n | Each value should be in the range [0, 1].\n | arg1, arg2, ... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | location parameter, Default is 0.\n | scale : array_like, optional\n | scale parameter, Default is 1.\n | \n | Returns\n | -------\n | a, b : ndarray of float\n | end-points of range that contain ``100 * alpha %`` of the rv's\n | possible values.\n | \n | mean(self, *args, **kwds)\n | Mean of the distribution.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | mean : float\n | the mean of the distribution\n | \n | median(self, *args, **kwds)\n | Median of the distribution.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | Location parameter, Default is 0.\n | scale : array_like, optional\n | Scale parameter, Default is 1.\n | \n | Returns\n | -------\n | median : float\n | The median of the distribution.\n | \n | See Also\n | --------\n | rv_discrete.ppf\n | Inverse of the CDF\n | \n | moment(self, n, *args, **kwds)\n | n-th order non-central moment of distribution.\n | \n | Parameters\n | ----------\n | n : int, n >= 1\n | Order of moment.\n | arg1, arg2, arg3,... : float\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | rvs(self, *args, **kwds)\n | Random variates of given type.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | Location parameter (default=0).\n | scale : array_like, optional\n | Scale parameter (default=1).\n | size : int or tuple of ints, optional\n | Defining number of random variates (default is 1).\n | random_state : None or int or ``np.random.RandomState`` instance, optional\n | If int or RandomState, use it for drawing the random variates.\n | If None, rely on ``self.random_state``.\n | Default is None.\n | \n | Returns\n | -------\n | rvs : ndarray or scalar\n | Random variates of given `size`.\n | \n | stats(self, *args, **kwds)\n | Some statistics of the given RV.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional (continuous RVs only)\n | scale parameter (default=1)\n | moments : str, optional\n | composed of letters ['mvsk'] defining which moments to compute:\n | 'm' = mean,\n | 'v' = variance,\n | 's' = (Fisher's) skew,\n | 'k' = (Fisher's) kurtosis.\n | (default is 'mv')\n | \n | Returns\n | -------\n | stats : sequence\n | of requested moments.\n | \n | std(self, *args, **kwds)\n | Standard deviation of the distribution.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | std : float\n | standard deviation of the distribution\n | \n | support(self, *args, **kwargs)\n | Return the support of the distribution.\n | \n | Parameters\n | ----------\n | arg1, arg2, ... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information).\n | loc : array_like, optional\n | location parameter, Default is 0.\n | scale : array_like, optional\n | scale parameter, Default is 1.\n | Returns\n | -------\n | a, b : float\n | end-points of the distribution's support.\n | \n | var(self, *args, **kwds)\n | Variance of the distribution.\n | \n | Parameters\n | ----------\n | arg1, arg2, arg3,... : array_like\n | The shape parameter(s) for the distribution (see docstring of the\n | instance object for more information)\n | loc : array_like, optional\n | location parameter (default=0)\n | scale : array_like, optional\n | scale parameter (default=1)\n | \n | Returns\n | -------\n | var : float\n | the variance of the distribution\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from scipy.stats._distn_infrastructure.rv_generic:\n | \n | __dict__\n | dictionary for instance variables (if defined)\n | \n | __weakref__\n | list of weak references to the object (if defined)\n | \n | random_state\n | Get or set the RandomState object for generating random variates.\n | \n | This can be either None or an existing RandomState object.\n | \n | If None (or np.random), use the RandomState singleton used by np.random.\n | If already a RandomState instance, use it.\n | If an int, use a new RandomState instance seeded with seed.\n\nFUNCTIONS\n anderson(x, dist='norm')\n Anderson-Darling test for data coming from a particular distribution.\n \n The Anderson-Darling tests the null hypothesis that a sample is\n drawn from a population that follows a particular distribution.\n For the Anderson-Darling test, the critical values depend on\n which distribution is being tested against. This function works\n for normal, exponential, logistic, or Gumbel (Extreme Value\n Type I) distributions.\n \n Parameters\n ----------\n x : array_like\n Array of sample data.\n dist : {'norm','expon','logistic','gumbel','gumbel_l', gumbel_r',\n 'extreme1'}, optional\n the type of distribution to test against. The default is 'norm'\n and 'extreme1', 'gumbel_l' and 'gumbel' are synonyms.\n \n Returns\n -------\n statistic : float\n The Anderson-Darling test statistic.\n critical_values : list\n The critical values for this distribution.\n significance_level : list\n The significance levels for the corresponding critical values\n in percents. The function returns critical values for a\n differing set of significance levels depending on the\n distribution that is being tested against.\n \n See Also\n --------\n kstest : The Kolmogorov-Smirnov test for goodness-of-fit.\n \n Notes\n -----\n Critical values provided are for the following significance levels:\n \n normal/exponenential\n 15%, 10%, 5%, 2.5%, 1%\n logistic\n 25%, 10%, 5%, 2.5%, 1%, 0.5%\n Gumbel\n 25%, 10%, 5%, 2.5%, 1%\n \n If the returned statistic is larger than these critical values then\n for the corresponding significance level, the null hypothesis that\n the data come from the chosen distribution can be rejected.\n The returned statistic is referred to as 'A2' in the references.\n \n References\n ----------\n .. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm\n .. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and\n Some Comparisons, Journal of the American Statistical Association,\n Vol. 69, pp. 730-737.\n .. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit\n Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,\n pp. 357-369.\n .. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value\n Distribution, Biometrika, Vol. 64, pp. 583-588.\n .. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference\n to Tests for Exponentiality , Technical Report No. 262,\n Department of Statistics, Stanford University, Stanford, CA.\n .. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution\n Based on the Empirical Distribution Function, Biometrika, Vol. 66,\n pp. 591-595.\n \n anderson_ksamp(samples, midrank=True)\n The Anderson-Darling test for k-samples.\n \n The k-sample Anderson-Darling test is a modification of the\n one-sample Anderson-Darling test. It tests the null hypothesis\n that k-samples are drawn from the same population without having\n to specify the distribution function of that population. The\n critical values depend on the number of samples.\n \n Parameters\n ----------\n samples : sequence of 1-D array_like\n Array of sample data in arrays.\n midrank : bool, optional\n Type of Anderson-Darling test which is computed. Default\n (True) is the midrank test applicable to continuous and\n discrete populations. If False, the right side empirical\n distribution is used.\n \n Returns\n -------\n statistic : float\n Normalized k-sample Anderson-Darling test statistic.\n critical_values : array\n The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%,\n 0.5%, 0.1%.\n significance_level : float\n An approximate significance level at which the null hypothesis for the\n provided samples can be rejected. The value is floored / capped at\n 0.1% / 25%.\n \n Raises\n ------\n ValueError\n If less than 2 samples are provided, a sample is empty, or no\n distinct observations are in the samples.\n \n See Also\n --------\n ks_2samp : 2 sample Kolmogorov-Smirnov test\n anderson : 1 sample Anderson-Darling test\n \n Notes\n -----\n [1]_ defines three versions of the k-sample Anderson-Darling test:\n one for continuous distributions and two for discrete\n distributions, in which ties between samples may occur. The\n default of this routine is to compute the version based on the\n midrank empirical distribution function. This test is applicable\n to continuous and discrete data. If midrank is set to False, the\n right side empirical distribution is used for a test for discrete\n data. According to [1]_, the two discrete test statistics differ\n only slightly if a few collisions due to round-off errors occur in\n the test not adjusted for ties between samples.\n \n The critical values corresponding to the significance levels from 0.01\n to 0.25 are taken from [1]_. p-values are floored / capped\n at 0.1% / 25%. Since the range of critical values might be extended in\n future releases, it is recommended not to test ``p == 0.25``, but rather\n ``p >= 0.25`` (analogously for the lower bound).\n \n .. versionadded:: 0.14.0\n \n References\n ----------\n .. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample\n Anderson-Darling Tests, Journal of the American Statistical\n Association, Vol. 82, pp. 918-924.\n \n Examples\n --------\n >>> from scipy import stats\n >>> np.random.seed(314159)\n \n The null hypothesis that the two random samples come from the same\n distribution can be rejected at the 5% level because the returned\n test value is greater than the critical value for 5% (1.961) but\n not at the 2.5% level. The interpolation gives an approximate\n significance level of 3.2%:\n \n >>> stats.anderson_ksamp([np.random.normal(size=50),\n ... np.random.normal(loc=0.5, size=30)])\n (2.4615796189876105,\n array([ 0.325, 1.226, 1.961, 2.718, 3.752, 4.592, 6.546]),\n 0.03176687568842282)\n \n \n The null hypothesis cannot be rejected for three samples from an\n identical distribution. The reported p-value (25%) has been capped and\n may not be very accurate (since it corresponds to the value 0.449\n whereas the statistic is -0.731):\n \n >>> stats.anderson_ksamp([np.random.normal(size=50),\n ... np.random.normal(size=30), np.random.normal(size=20)])\n (-0.73091722665244196,\n array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856,\n 4.07210043, 5.56419101]),\n 0.25)\n \n ansari(x, y)\n Perform the Ansari-Bradley test for equal scale parameters.\n \n The Ansari-Bradley test is a non-parametric test for the equality\n of the scale parameter of the distributions from which two\n samples were drawn.\n \n Parameters\n ----------\n x, y : array_like\n Arrays of sample data.\n \n Returns\n -------\n statistic : float\n The Ansari-Bradley test statistic.\n pvalue : float\n The p-value of the hypothesis test.\n \n See Also\n --------\n fligner : A non-parametric test for the equality of k variances\n mood : A non-parametric test for the equality of two scale parameters\n \n Notes\n -----\n The p-value given is exact when the sample sizes are both less than\n 55 and there are no ties, otherwise a normal approximation for the\n p-value is used.\n \n References\n ----------\n .. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical\n methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.\n \n bartlett(*args)\n Perform Bartlett's test for equal variances.\n \n Bartlett's test tests the null hypothesis that all input samples\n are from populations with equal variances. For samples\n from significantly non-normal populations, Levene's test\n `levene` is more robust.\n \n Parameters\n ----------\n sample1, sample2,... : array_like\n arrays of sample data. Only 1d arrays are accepted, they may have\n different lengths.\n \n Returns\n -------\n statistic : float\n The test statistic.\n pvalue : float\n The p-value of the test.\n \n See Also\n --------\n fligner : A non-parametric test for the equality of k variances\n levene : A robust parametric test for equality of k variances\n \n Notes\n -----\n Conover et al. (1981) examine many of the existing parametric and\n nonparametric tests by extensive simulations and they conclude that the\n tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be\n superior in terms of robustness of departures from normality and power\n ([3]_).\n \n References\n ----------\n .. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm\n \n .. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical\n Methods, Eighth Edition, Iowa State University Press.\n \n .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and\n Hypothesis Testing based on Quadratic Inference Function. Technical\n Report #99-03, Center for Likelihood Studies, Pennsylvania State\n University.\n \n .. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical\n Tests. Proceedings of the Royal Society of London. Series A,\n Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282.\n \n bayes_mvs(data, alpha=0.9)\n Bayesian confidence intervals for the mean, var, and std.\n \n Parameters\n ----------\n data : array_like\n Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.\n Requires 2 or more data points.\n alpha : float, optional\n Probability that the returned confidence interval contains\n the true parameter.\n \n Returns\n -------\n mean_cntr, var_cntr, std_cntr : tuple\n The three results are for the mean, variance and standard deviation,\n respectively. Each result is a tuple of the form::\n \n (center, (lower, upper))\n \n with `center` the mean of the conditional pdf of the value given the\n data, and `(lower, upper)` a confidence interval, centered on the\n median, containing the estimate to a probability ``alpha``.\n \n See Also\n --------\n mvsdist\n \n Notes\n -----\n Each tuple of mean, variance, and standard deviation estimates represent\n the (center, (lower, upper)) with center the mean of the conditional pdf\n of the value given the data and (lower, upper) is a confidence interval\n centered on the median, containing the estimate to a probability\n ``alpha``.\n \n Converts data to 1-D and assumes all data has the same mean and variance.\n Uses Jeffrey's prior for variance and std.\n \n Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``\n \n References\n ----------\n T.E. Oliphant, \"A Bayesian perspective on estimating mean, variance, and\n standard-deviation from data\", https://scholarsarchive.byu.edu/facpub/278,\n 2006.\n \n Examples\n --------\n First a basic example to demonstrate the outputs:\n \n >>> from scipy import stats\n >>> data = [6, 9, 12, 7, 8, 8, 13]\n >>> mean, var, std = stats.bayes_mvs(data)\n >>> mean\n Mean(statistic=9.0, minmax=(7.103650222612533, 10.896349777387467))\n >>> var\n Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...))\n >>> std\n Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.945614605014631))\n \n Now we generate some normally distributed random data, and get estimates of\n mean and standard deviation with 95% confidence intervals for those\n estimates:\n \n >>> n_samples = 100000\n >>> data = stats.norm.rvs(size=n_samples)\n >>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95)\n \n >>> import matplotlib.pyplot as plt\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> ax.hist(data, bins=100, density=True, label='Histogram of data')\n >>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean')\n >>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r',\n ... alpha=0.2, label=r'Estimated mean (95% limits)')\n >>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale')\n >>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2,\n ... label=r'Estimated scale (95% limits)')\n \n >>> ax.legend(fontsize=10)\n >>> ax.set_xlim([-4, 4])\n >>> ax.set_ylim([0, 0.5])\n >>> plt.show()\n \n binned_statistic(x, values, statistic='mean', bins=10, range=None)\n Compute a binned statistic for one or more sets of data.\n \n This is a generalization of a histogram function. A histogram divides\n the space into bins, and returns the count of the number of points in\n each bin. This function allows the computation of the sum, mean, median,\n or other statistic of the values (or set of values) within each bin.\n \n Parameters\n ----------\n x : (N,) array_like\n A sequence of values to be binned.\n values : (N,) array_like or list of (N,) array_like\n The data on which the statistic will be computed. This must be\n the same shape as `x`, or a set of sequences - each the same shape as\n `x`. If `values` is a set of sequences, the statistic will be computed\n on each independently.\n statistic : string or callable, optional\n The statistic to compute (default is 'mean').\n The following statistics are available:\n \n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'std' : compute the standard deviation within each bin. This\n is implicitly calculated with ddof=0.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * 'min' : compute the minimum of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'max' : compute the maximum of values for point within each bin.\n Empty bins will be represented by NaN.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n \n bins : int or sequence of scalars, optional\n If `bins` is an int, it defines the number of equal-width bins in the\n given range (10 by default). If `bins` is a sequence, it defines the\n bin edges, including the rightmost edge, allowing for non-uniform bin\n widths. Values in `x` that are smaller than lowest bin edge are\n assigned to bin number 0, values beyond the highest bin are assigned to\n ``bins[-1]``. If the bin edges are specified, the number of bins will\n be, (nx = len(bins)-1).\n range : (float, float) or [(float, float)], optional\n The lower and upper range of the bins. If not provided, range\n is simply ``(x.min(), x.max())``. Values outside the range are\n ignored.\n \n Returns\n -------\n statistic : array\n The values of the selected statistic in each bin.\n bin_edges : array of dtype float\n Return the bin edges ``(length(statistic)+1)``.\n binnumber: 1-D ndarray of ints\n Indices of the bins (corresponding to `bin_edges`) in which each value\n of `x` belongs. Same length as `values`. A binnumber of `i` means the\n corresponding value is between (bin_edges[i-1], bin_edges[i]).\n \n See Also\n --------\n numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd\n \n Notes\n -----\n All but the last (righthand-most) bin is half-open. In other words, if\n `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,\n but excluding 2) and the second ``[2, 3)``. The last bin, however, is\n ``[3, 4]``, which *includes* 4.\n \n .. versionadded:: 0.11.0\n \n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n \n First some basic examples:\n \n Create two evenly spaced bins in the range of the given sample, and sum the\n corresponding values in each of those bins:\n \n >>> values = [1.0, 1.0, 2.0, 1.5, 3.0]\n >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)\n BinnedStatisticResult(statistic=array([4. , 4.5]), bin_edges=array([1., 4., 7.]), binnumber=array([1, 1, 1, 2, 2]))\n \n Multiple arrays of values can also be passed. The statistic is calculated\n on each set independently:\n \n >>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]\n >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)\n BinnedStatisticResult(statistic=array([[4. , 4.5],\n [8. , 9. ]]), bin_edges=array([1., 4., 7.]), binnumber=array([1, 1, 1, 2, 2]))\n \n >>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',\n ... bins=3)\n BinnedStatisticResult(statistic=array([1., 2., 4.]), bin_edges=array([1., 2., 3., 4.]), binnumber=array([1, 2, 1, 2, 3]))\n \n As a second example, we now generate some random data of sailing boat speed\n as a function of wind speed, and then determine how fast our boat is for\n certain wind speeds:\n \n >>> windspeed = 8 * np.random.rand(500)\n >>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)\n >>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,\n ... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])\n >>> plt.figure()\n >>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')\n >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,\n ... label='binned statistic of data')\n >>> plt.legend()\n \n Now we can use ``binnumber`` to select all datapoints with a windspeed\n below 1:\n \n >>> low_boatspeed = boatspeed[binnumber == 0]\n \n As a final example, we will use ``bin_edges`` and ``binnumber`` to make a\n plot of a distribution that shows the mean and distribution around that\n mean per bin, on top of a regular histogram and the probability\n distribution function:\n \n >>> x = np.linspace(0, 5, num=500)\n >>> x_pdf = stats.maxwell.pdf(x)\n >>> samples = stats.maxwell.rvs(size=10000)\n \n >>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,\n ... statistic='mean', bins=25)\n >>> bin_width = (bin_edges[1] - bin_edges[0])\n >>> bin_centers = bin_edges[1:] - bin_width/2\n \n >>> plt.figure()\n >>> plt.hist(samples, bins=50, density=True, histtype='stepfilled',\n ... alpha=0.2, label='histogram of data')\n >>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')\n >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,\n ... label='binned statistic of data')\n >>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)\n >>> plt.legend(fontsize=10)\n >>> plt.show()\n \n binned_statistic_2d(x, y, values, statistic='mean', bins=10, range=None, expand_binnumbers=False)\n Compute a bidimensional binned statistic for one or more sets of data.\n \n This is a generalization of a histogram2d function. A histogram divides\n the space into bins, and returns the count of the number of points in\n each bin. This function allows the computation of the sum, mean, median,\n or other statistic of the values (or set of values) within each bin.\n \n Parameters\n ----------\n x : (N,) array_like\n A sequence of values to be binned along the first dimension.\n y : (N,) array_like\n A sequence of values to be binned along the second dimension.\n values : (N,) array_like or list of (N,) array_like\n The data on which the statistic will be computed. This must be\n the same shape as `x`, or a list of sequences - each with the same\n shape as `x`. If `values` is such a list, the statistic will be\n computed on each independently.\n statistic : string or callable, optional\n The statistic to compute (default is 'mean').\n The following statistics are available:\n \n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'std' : compute the standard deviation within each bin. This\n is implicitly calculated with ddof=0.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * 'min' : compute the minimum of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'max' : compute the maximum of values for point within each bin.\n Empty bins will be represented by NaN.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n \n bins : int or [int, int] or array_like or [array, array], optional\n The bin specification:\n \n * the number of bins for the two dimensions (nx = ny = bins),\n * the number of bins in each dimension (nx, ny = bins),\n * the bin edges for the two dimensions (x_edge = y_edge = bins),\n * the bin edges in each dimension (x_edge, y_edge = bins).\n \n If the bin edges are specified, the number of bins will be,\n (nx = len(x_edge)-1, ny = len(y_edge)-1).\n \n range : (2,2) array_like, optional\n The leftmost and rightmost edges of the bins along each dimension\n (if not specified explicitly in the `bins` parameters):\n [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be\n considered outliers and not tallied in the histogram.\n expand_binnumbers : bool, optional\n 'False' (default): the returned `binnumber` is a shape (N,) array of\n linearized bin indices.\n 'True': the returned `binnumber` is 'unraveled' into a shape (2,N)\n ndarray, where each row gives the bin numbers in the corresponding\n dimension.\n See the `binnumber` returned value, and the `Examples` section.\n \n .. versionadded:: 0.17.0\n \n Returns\n -------\n statistic : (nx, ny) ndarray\n The values of the selected statistic in each two-dimensional bin.\n x_edge : (nx + 1) ndarray\n The bin edges along the first dimension.\n y_edge : (ny + 1) ndarray\n The bin edges along the second dimension.\n binnumber : (N,) array of ints or (2,N) ndarray of ints\n This assigns to each element of `sample` an integer that represents the\n bin in which this observation falls. The representation depends on the\n `expand_binnumbers` argument. See `Notes` for details.\n \n \n See Also\n --------\n numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd\n \n Notes\n -----\n Binedges:\n All but the last (righthand-most) bin is half-open. In other words, if\n `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,\n but excluding 2) and the second ``[2, 3)``. The last bin, however, is\n ``[3, 4]``, which *includes* 4.\n \n `binnumber`:\n This returned argument assigns to each element of `sample` an integer that\n represents the bin in which it belongs. The representation depends on the\n `expand_binnumbers` argument. If 'False' (default): The returned\n `binnumber` is a shape (N,) array of linearized indices mapping each\n element of `sample` to its corresponding bin (using row-major ordering).\n If 'True': The returned `binnumber` is a shape (2,N) ndarray where\n each row indicates bin placements for each dimension respectively. In each\n dimension, a binnumber of `i` means the corresponding value is between\n (D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.\n \n .. versionadded:: 0.11.0\n \n Examples\n --------\n >>> from scipy import stats\n \n Calculate the counts with explicit bin-edges:\n \n >>> x = [0.1, 0.1, 0.1, 0.6]\n >>> y = [2.1, 2.6, 2.1, 2.1]\n >>> binx = [0.0, 0.5, 1.0]\n >>> biny = [2.0, 2.5, 3.0]\n >>> ret = stats.binned_statistic_2d(x, y, x, 'count', bins=[binx,biny])\n >>> ret.statistic\n array([[2., 1.],\n [1., 0.]])\n \n The bin in which each sample is placed is given by the `binnumber`\n returned parameter. By default, these are the linearized bin indices:\n \n >>> ret.binnumber\n array([5, 6, 5, 9])\n \n The bin indices can also be expanded into separate entries for each\n dimension using the `expand_binnumbers` parameter:\n \n >>> ret = stats.binned_statistic_2d(x, y, x, 'count', bins=[binx,biny],\n ... expand_binnumbers=True)\n >>> ret.binnumber\n array([[1, 1, 1, 2],\n [1, 2, 1, 1]])\n \n Which shows that the first three elements belong in the xbin 1, and the\n fourth into xbin 2; and so on for y.\n \n binned_statistic_dd(sample, values, statistic='mean', bins=10, range=None, expand_binnumbers=False, binned_statistic_result=None)\n Compute a multidimensional binned statistic for a set of data.\n \n This is a generalization of a histogramdd function. A histogram divides\n the space into bins, and returns the count of the number of points in\n each bin. This function allows the computation of the sum, mean, median,\n or other statistic of the values within each bin.\n \n Parameters\n ----------\n sample : array_like\n Data to histogram passed as a sequence of N arrays of length D, or\n as an (N,D) array.\n values : (N,) array_like or list of (N,) array_like\n The data on which the statistic will be computed. This must be\n the same shape as `sample`, or a list of sequences - each with the\n same shape as `sample`. If `values` is such a list, the statistic\n will be computed on each independently.\n statistic : string or callable, optional\n The statistic to compute (default is 'mean').\n The following statistics are available:\n \n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * 'std' : compute the standard deviation within each bin. This\n is implicitly calculated with ddof=0.\n * 'min' : compute the minimum of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'max' : compute the maximum of values for point within each bin.\n Empty bins will be represented by NaN.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n \n bins : sequence or positive int, optional\n The bin specification must be in one of the following forms:\n \n * A sequence of arrays describing the bin edges along each dimension.\n * The number of bins for each dimension (nx, ny, ... = bins).\n * The number of bins for all dimensions (nx = ny = ... = bins).\n range : sequence, optional\n A sequence of lower and upper bin edges to be used if the edges are\n not given explicitly in `bins`. Defaults to the minimum and maximum\n values along each dimension.\n expand_binnumbers : bool, optional\n 'False' (default): the returned `binnumber` is a shape (N,) array of\n linearized bin indices.\n 'True': the returned `binnumber` is 'unraveled' into a shape (D,N)\n ndarray, where each row gives the bin numbers in the corresponding\n dimension.\n See the `binnumber` returned value, and the `Examples` section of\n `binned_statistic_2d`.\n binned_statistic_result : binnedStatisticddResult\n Result of a previous call to the function in order to reuse bin edges\n and bin numbers with new values and/or a different statistic.\n To reuse bin numbers, `expand_binnumbers` must have been set to False\n (the default)\n \n .. versionadded:: 0.17.0\n \n Returns\n -------\n statistic : ndarray, shape(nx1, nx2, nx3,...)\n The values of the selected statistic in each two-dimensional bin.\n bin_edges : list of ndarrays\n A list of D arrays describing the (nxi + 1) bin edges for each\n dimension.\n binnumber : (N,) array of ints or (D,N) ndarray of ints\n This assigns to each element of `sample` an integer that represents the\n bin in which this observation falls. The representation depends on the\n `expand_binnumbers` argument. See `Notes` for details.\n \n \n See Also\n --------\n numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d\n \n Notes\n -----\n Binedges:\n All but the last (righthand-most) bin is half-open in each dimension. In\n other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is\n ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The\n last bin, however, is ``[3, 4]``, which *includes* 4.\n \n `binnumber`:\n This returned argument assigns to each element of `sample` an integer that\n represents the bin in which it belongs. The representation depends on the\n `expand_binnumbers` argument. If 'False' (default): The returned\n `binnumber` is a shape (N,) array of linearized indices mapping each\n element of `sample` to its corresponding bin (using row-major ordering).\n If 'True': The returned `binnumber` is a shape (D,N) ndarray where\n each row indicates bin placements for each dimension respectively. In each\n dimension, a binnumber of `i` means the corresponding value is between\n (bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.\n \n .. versionadded:: 0.11.0\n \n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n >>> from mpl_toolkits.mplot3d import Axes3D\n \n Take an array of 600 (x, y) coordinates as an example.\n `binned_statistic_dd` can handle arrays of higher dimension `D`. But a plot\n of dimension `D+1` is required.\n \n >>> mu = np.array([0., 1.])\n >>> sigma = np.array([[1., -0.5],[-0.5, 1.5]])\n >>> multinormal = stats.multivariate_normal(mu, sigma)\n >>> data = multinormal.rvs(size=600, random_state=235412)\n >>> data.shape\n (600, 2)\n \n Create bins and count how many arrays fall in each bin:\n \n >>> N = 60\n >>> x = np.linspace(-3, 3, N)\n >>> y = np.linspace(-3, 4, N)\n >>> ret = stats.binned_statistic_dd(data, np.arange(600), bins=[x, y],\n ... statistic='count')\n >>> bincounts = ret.statistic\n \n Set the volume and the location of bars:\n \n >>> dx = x[1] - x[0]\n >>> dy = y[1] - y[0]\n >>> x, y = np.meshgrid(x[:-1]+dx/2, y[:-1]+dy/2)\n >>> z = 0\n \n >>> bincounts = bincounts.ravel()\n >>> x = x.ravel()\n >>> y = y.ravel()\n \n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111, projection='3d')\n >>> with np.errstate(divide='ignore'): # silence random axes3d warning\n ... ax.bar3d(x, y, z, dx, dy, bincounts)\n \n Reuse bin numbers and bin edges with new values:\n \n >>> ret2 = stats.binned_statistic_dd(data, -np.arange(600),\n ... binned_statistic_result=ret,\n ... statistic='mean')\n \n binom_test(x, n=None, p=0.5, alternative='two-sided')\n Perform a test that the probability of success is p.\n \n This is an exact, two-sided test of the null hypothesis\n that the probability of success in a Bernoulli experiment\n is `p`.\n \n Parameters\n ----------\n x : int or array_like\n The number of successes, or if x has length 2, it is the\n number of successes and the number of failures.\n n : int\n The number of trials. This is ignored if x gives both the\n number of successes and failures.\n p : float, optional\n The hypothesized probability of success. ``0 <= p <= 1``. The\n default value is ``p = 0.5``.\n alternative : {'two-sided', 'greater', 'less'}, optional\n Indicates the alternative hypothesis. The default value is\n 'two-sided'.\n \n Returns\n -------\n p-value : float\n The p-value of the hypothesis test.\n \n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Binomial_test\n \n Examples\n --------\n >>> from scipy import stats\n \n A car manufacturer claims that no more than 10% of their cars are unsafe.\n 15 cars are inspected for safety, 3 were found to be unsafe. Test the\n manufacturer's claim:\n \n >>> stats.binom_test(3, n=15, p=0.1, alternative='greater')\n 0.18406106910639114\n \n The null hypothesis cannot be rejected at the 5% level of significance\n because the returned p-value is greater than the critical value of 5%.\n \n boxcox(x, lmbda=None, alpha=None)\n Return a dataset transformed by a Box-Cox power transformation.\n \n Parameters\n ----------\n x : ndarray\n Input array. Must be positive 1-dimensional. Must not be constant.\n lmbda : {None, scalar}, optional\n If `lmbda` is not None, do the transformation for that value.\n \n If `lmbda` is None, find the lambda that maximizes the log-likelihood\n function and return it as the second output argument.\n alpha : {None, float}, optional\n If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence\n interval for `lmbda` as the third output argument.\n Must be between 0.0 and 1.0.\n \n Returns\n -------\n boxcox : ndarray\n Box-Cox power transformed array.\n maxlog : float, optional\n If the `lmbda` parameter is None, the second returned argument is\n the lambda that maximizes the log-likelihood function.\n (min_ci, max_ci) : tuple of float, optional\n If `lmbda` parameter is None and ``alpha`` is not None, this returned\n tuple of floats represents the minimum and maximum confidence limits\n given ``alpha``.\n \n See Also\n --------\n probplot, boxcox_normplot, boxcox_normmax, boxcox_llf\n \n Notes\n -----\n The Box-Cox transform is given by::\n \n y = (x**lmbda - 1) / lmbda, for lmbda > 0\n log(x), for lmbda = 0\n \n `boxcox` requires the input data to be positive. Sometimes a Box-Cox\n transformation provides a shift parameter to achieve this; `boxcox` does\n not. Such a shift parameter is equivalent to adding a positive constant to\n `x` before calling `boxcox`.\n \n The confidence limits returned when ``alpha`` is provided give the interval\n where:\n \n .. math::\n \n llf(\\hat{\\lambda}) - llf(\\lambda) < \\frac{1}{2}\\chi^2(1 - \\alpha, 1),\n \n with ``llf`` the log-likelihood function and :math:`\\chi^2` the chi-squared\n function.\n \n References\n ----------\n G.E.P. Box and D.R. Cox, \"An Analysis of Transformations\", Journal of the\n Royal Statistical Society B, 26, 211-252 (1964).\n \n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n \n We generate some random variates from a non-normal distribution and make a\n probability plot for it, to show it is non-normal in the tails:\n \n >>> fig = plt.figure()\n >>> ax1 = fig.add_subplot(211)\n >>> x = stats.loggamma.rvs(5, size=500) + 5\n >>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)\n >>> ax1.set_xlabel('')\n >>> ax1.set_title('Probplot against normal distribution')\n \n We now use `boxcox` to transform the data so it's closest to normal:\n \n >>> ax2 = fig.add_subplot(212)\n >>> xt, _ = stats.boxcox(x)\n >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)\n >>> ax2.set_title('Probplot after Box-Cox transformation')\n \n >>> plt.show()\n \n boxcox_llf(lmb, data)\n The boxcox log-likelihood function.\n \n Parameters\n ----------\n lmb : scalar\n Parameter for Box-Cox transformation. See `boxcox` for details.\n data : array_like\n Data to calculate Box-Cox log-likelihood for. If `data` is\n multi-dimensional, the log-likelihood is calculated along the first\n axis.\n \n Returns\n -------\n llf : float or ndarray\n Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,\n an array otherwise.\n \n See Also\n --------\n boxcox, probplot, boxcox_normplot, boxcox_normmax\n \n Notes\n -----\n The Box-Cox log-likelihood function is defined here as\n \n .. math::\n \n llf = (\\lambda - 1) \\sum_i(\\log(x_i)) -\n N/2 \\log(\\sum_i (y_i - \\bar{y})^2 / N),\n \n where ``y`` is the Box-Cox transformed input data ``x``.\n \n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n >>> np.random.seed(1245)\n \n Generate some random variates and calculate Box-Cox log-likelihood values\n for them for a range of ``lmbda`` values:\n \n >>> x = stats.loggamma.rvs(5, loc=10, size=1000)\n >>> lmbdas = np.linspace(-2, 10)\n >>> llf = np.zeros(lmbdas.shape, dtype=float)\n >>> for ii, lmbda in enumerate(lmbdas):\n ... llf[ii] = stats.boxcox_llf(lmbda, x)\n \n Also find the optimal lmbda value with `boxcox`:\n \n >>> x_most_normal, lmbda_optimal = stats.boxcox(x)\n \n Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a\n horizontal line to check that that's really the optimum:\n \n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> ax.plot(lmbdas, llf, 'b.-')\n >>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')\n >>> ax.set_xlabel('lmbda parameter')\n >>> ax.set_ylabel('Box-Cox log-likelihood')\n \n Now add some probability plots to show that where the log-likelihood is\n maximized the data transformed with `boxcox` looks closest to normal:\n \n >>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'\n >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):\n ... xt = stats.boxcox(x, lmbda=lmbda)\n ... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)\n ... ax_inset = inset_axes(ax, width=\"20%\", height=\"20%\", loc=loc)\n ... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')\n ... ax_inset.set_xticklabels([])\n ... ax_inset.set_yticklabels([])\n ... ax_inset.set_title(r'$\\lambda=%1.2f$' % lmbda)\n \n >>> plt.show()\n \n boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr')\n Compute optimal Box-Cox transform parameter for input data.\n \n Parameters\n ----------\n x : array_like\n Input array.\n brack : 2-tuple, optional\n The starting interval for a downhill bracket search with\n `optimize.brent`. Note that this is in most cases not critical; the\n final result is allowed to be outside this bracket.\n method : str, optional\n The method to determine the optimal transform parameter (`boxcox`\n ``lmbda`` parameter). Options are:\n \n 'pearsonr' (default)\n Maximizes the Pearson correlation coefficient between\n ``y = boxcox(x)`` and the expected values for ``y`` if `x` would be\n normally-distributed.\n \n 'mle'\n Minimizes the log-likelihood `boxcox_llf`. This is the method used\n in `boxcox`.\n \n 'all'\n Use all optimization methods available, and return all results.\n Useful to compare different methods.\n \n Returns\n -------\n maxlog : float or ndarray\n The optimal transform parameter found. An array instead of a scalar\n for ``method='all'``.\n \n See Also\n --------\n boxcox, boxcox_llf, boxcox_normplot\n \n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n >>> np.random.seed(1234) # make this example reproducible\n \n Generate some data and determine optimal ``lmbda`` in various ways:\n \n >>> x = stats.loggamma.rvs(5, size=30) + 5\n >>> y, lmax_mle = stats.boxcox(x)\n >>> lmax_pearsonr = stats.boxcox_normmax(x)\n \n >>> lmax_mle\n 7.177...\n >>> lmax_pearsonr\n 7.916...\n >>> stats.boxcox_normmax(x, method='all')\n array([ 7.91667384, 7.17718692])\n \n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax)\n >>> ax.axvline(lmax_mle, color='r')\n >>> ax.axvline(lmax_pearsonr, color='g', ls='--')\n \n >>> plt.show()\n \n boxcox_normplot(x, la, lb, plot=None, N=80)\n Compute parameters for a Box-Cox normality plot, optionally show it.\n \n A Box-Cox normality plot shows graphically what the best transformation\n parameter is to use in `boxcox` to obtain a distribution that is close\n to normal.\n \n Parameters\n ----------\n x : array_like\n Input array.\n la, lb : scalar\n The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`\n for Box-Cox transformations. These are also the limits of the\n horizontal axis of the plot if that is generated.\n plot : object, optional\n If given, plots the quantiles and least squares fit.\n `plot` is an object that has to have methods \"plot\" and \"text\".\n The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,\n or a custom object with the same methods.\n Default is None, which means that no plot is created.\n N : int, optional\n Number of points on the horizontal axis (equally distributed from\n `la` to `lb`).\n \n Returns\n -------\n lmbdas : ndarray\n The ``lmbda`` values for which a Box-Cox transform was done.\n ppcc : ndarray\n Probability Plot Correlelation Coefficient, as obtained from `probplot`\n when fitting the Box-Cox transformed input `x` against a normal\n distribution.\n \n See Also\n --------\n probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max\n \n Notes\n -----\n Even if `plot` is given, the figure is not shown or saved by\n `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``\n should be used after calling `probplot`.\n \n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n \n Generate some non-normally distributed data, and create a Box-Cox plot:\n \n >>> x = stats.loggamma.rvs(5, size=500) + 5\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax)\n \n Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in\n the same plot:\n \n >>> _, maxlog = stats.boxcox(x)\n >>> ax.axvline(maxlog, color='r')\n \n >>> plt.show()\n \n brunnermunzel(x, y, alternative='two-sided', distribution='t', nan_policy='propagate')\n Compute the Brunner-Munzel test on samples x and y.\n \n The Brunner-Munzel test is a nonparametric test of the null hypothesis that\n when values are taken one by one from each group, the probabilities of\n getting large values in both groups are equal.\n Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the\n assumption of equivariance of two groups. Note that this does not assume\n the distributions are same. This test works on two independent samples,\n which may have different sizes.\n \n Parameters\n ----------\n x, y : array_like\n Array of samples, should be one-dimensional.\n alternative : {'two-sided', 'less', 'greater'}, optional\n Defines the alternative hypothesis.\n The following options are available (default is 'two-sided'):\n \n * 'two-sided'\n * 'less': one-sided\n * 'greater': one-sided\n distribution : {'t', 'normal'}, optional\n Defines how to get the p-value.\n The following options are available (default is 't'):\n \n * 't': get the p-value by t-distribution\n * 'normal': get the p-value by standard normal distribution.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n Returns\n -------\n statistic : float\n The Brunner-Munzer W statistic.\n pvalue : float\n p-value assuming an t distribution. One-sided or\n two-sided, depending on the choice of `alternative` and `distribution`.\n \n See Also\n --------\n mannwhitneyu : Mann-Whitney rank test on two samples.\n \n Notes\n -----\n Brunner and Munzel recommended to estimate the p-value by t-distribution\n when the size of data is 50 or less. If the size is lower than 10, it would\n be better to use permuted Brunner Munzel test (see [2]_).\n \n References\n ----------\n .. [1] Brunner, E. and Munzel, U. \"The nonparametric Benhrens-Fisher\n problem: Asymptotic theory and a small-sample approximation\".\n Biometrical Journal. Vol. 42(2000): 17-25.\n .. [2] Neubert, K. and Brunner, E. \"A studentized permutation test for the\n non-parametric Behrens-Fisher problem\". Computational Statistics and\n Data Analysis. Vol. 51(2007): 5192-5204.\n \n Examples\n --------\n >>> from scipy import stats\n >>> x1 = [1,2,1,1,1,1,1,1,1,1,2,4,1,1]\n >>> x2 = [3,3,4,3,1,2,3,1,1,5,4]\n >>> w, p_value = stats.brunnermunzel(x1, x2)\n >>> w\n 3.1374674823029505\n >>> p_value\n 0.0057862086661515377\n \n chi2_contingency(observed, correction=True, lambda_=None)\n Chi-square test of independence of variables in a contingency table.\n \n This function computes the chi-square statistic and p-value for the\n hypothesis test of independence of the observed frequencies in the\n contingency table [1]_ `observed`. The expected frequencies are computed\n based on the marginal sums under the assumption of independence; see\n `scipy.stats.contingency.expected_freq`. The number of degrees of\n freedom is (expressed using numpy functions and attributes)::\n \n dof = observed.size - sum(observed.shape) + observed.ndim - 1\n \n \n Parameters\n ----------\n observed : array_like\n The contingency table. The table contains the observed frequencies\n (i.e. number of occurrences) in each category. In the two-dimensional\n case, the table is often described as an \"R x C table\".\n correction : bool, optional\n If True, *and* the degrees of freedom is 1, apply Yates' correction\n for continuity. The effect of the correction is to adjust each\n observed value by 0.5 towards the corresponding expected value.\n lambda_ : float or str, optional.\n By default, the statistic computed in this test is Pearson's\n chi-squared statistic [2]_. `lambda_` allows a statistic from the\n Cressie-Read power divergence family [3]_ to be used instead. See\n `power_divergence` for details.\n \n Returns\n -------\n chi2 : float\n The test statistic.\n p : float\n The p-value of the test\n dof : int\n Degrees of freedom\n expected : ndarray, same shape as `observed`\n The expected frequencies, based on the marginal sums of the table.\n \n See Also\n --------\n contingency.expected_freq\n fisher_exact\n chisquare\n power_divergence\n \n Notes\n -----\n An often quoted guideline for the validity of this calculation is that\n the test should be used only if the observed and expected frequencies\n in each cell are at least 5.\n \n This is a test for the independence of different categories of a\n population. The test is only meaningful when the dimension of\n `observed` is two or more. Applying the test to a one-dimensional\n table will always result in `expected` equal to `observed` and a\n chi-square statistic equal to 0.\n \n This function does not handle masked arrays, because the calculation\n does not make sense with missing values.\n \n Like stats.chisquare, this function computes a chi-square statistic;\n the convenience this function provides is to figure out the expected\n frequencies and degrees of freedom from the given contingency table.\n If these were already known, and if the Yates' correction was not\n required, one could use stats.chisquare. That is, if one calls::\n \n chi2, p, dof, ex = chi2_contingency(obs, correction=False)\n \n then the following is true::\n \n (chi2, p) == stats.chisquare(obs.ravel(), f_exp=ex.ravel(),\n ddof=obs.size - 1 - dof)\n \n The `lambda_` argument was added in version 0.13.0 of scipy.\n \n References\n ----------\n .. [1] \"Contingency table\",\n https://en.wikipedia.org/wiki/Contingency_table\n .. [2] \"Pearson's chi-squared test\",\n https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test\n .. [3] Cressie, N. and Read, T. R. C., \"Multinomial Goodness-of-Fit\n Tests\", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),\n pp. 440-464.\n \n Examples\n --------\n A two-way example (2 x 3):\n \n >>> from scipy.stats import chi2_contingency\n >>> obs = np.array([[10, 10, 20], [20, 20, 20]])\n >>> chi2_contingency(obs)\n (2.7777777777777777,\n 0.24935220877729619,\n 2,\n array([[ 12., 12., 16.],\n [ 18., 18., 24.]]))\n \n Perform the test using the log-likelihood ratio (i.e. the \"G-test\")\n instead of Pearson's chi-squared statistic.\n \n >>> g, p, dof, expctd = chi2_contingency(obs, lambda_=\"log-likelihood\")\n >>> g, p\n (2.7688587616781319, 0.25046668010954165)\n \n A four-way example (2 x 2 x 2 x 2):\n \n >>> obs = np.array(\n ... [[[[12, 17],\n ... [11, 16]],\n ... [[11, 12],\n ... [15, 16]]],\n ... [[[23, 15],\n ... [30, 22]],\n ... [[14, 17],\n ... [15, 16]]]])\n >>> chi2_contingency(obs)\n (8.7584514426741897,\n 0.64417725029295503,\n 11,\n array([[[[ 14.15462386, 14.15462386],\n [ 16.49423111, 16.49423111]],\n [[ 11.2461395 , 11.2461395 ],\n [ 13.10500554, 13.10500554]]],\n [[[ 19.5591166 , 19.5591166 ],\n [ 22.79202844, 22.79202844]],\n [[ 15.54012004, 15.54012004],\n [ 18.10873492, 18.10873492]]]]))\n \n chisquare(f_obs, f_exp=None, ddof=0, axis=0)\n Calculate a one-way chi-square test.\n \n The chi-square test tests the null hypothesis that the categorical data\n has the given frequencies.\n \n Parameters\n ----------\n f_obs : array_like\n Observed frequencies in each category.\n f_exp : array_like, optional\n Expected frequencies in each category. By default the categories are\n assumed to be equally likely.\n ddof : int, optional\n \"Delta degrees of freedom\": adjustment to the degrees of freedom\n for the p-value. The p-value is computed using a chi-squared\n distribution with ``k - 1 - ddof`` degrees of freedom, where `k`\n is the number of observed frequencies. The default value of `ddof`\n is 0.\n axis : int or None, optional\n The axis of the broadcast result of `f_obs` and `f_exp` along which to\n apply the test. If axis is None, all values in `f_obs` are treated\n as a single data set. Default is 0.\n \n Returns\n -------\n chisq : float or ndarray\n The chi-squared test statistic. The value is a float if `axis` is\n None or `f_obs` and `f_exp` are 1-D.\n p : float or ndarray\n The p-value of the test. The value is a float if `ddof` and the\n return value `chisq` are scalars.\n \n See Also\n --------\n scipy.stats.power_divergence\n \n Notes\n -----\n This test is invalid when the observed or expected frequencies in each\n category are too small. A typical rule is that all of the observed\n and expected frequencies should be at least 5.\n \n The default degrees of freedom, k-1, are for the case when no parameters\n of the distribution are estimated. If p parameters are estimated by\n efficient maximum likelihood then the correct degrees of freedom are\n k-1-p. If the parameters are estimated in a different way, then the\n dof can be between k-1-p and k-1. However, it is also possible that\n the asymptotic distribution is not chi-square, in which case this test\n is not appropriate.\n \n References\n ----------\n .. [1] Lowry, Richard. \"Concepts and Applications of Inferential\n Statistics\". Chapter 8.\n https://web.archive.org/web/20171022032306/http://vassarstats.net:80/textbook/ch8pt1.html\n .. [2] \"Chi-squared test\", https://en.wikipedia.org/wiki/Chi-squared_test\n \n Examples\n --------\n When just `f_obs` is given, it is assumed that the expected frequencies\n are uniform and given by the mean of the observed frequencies.\n \n >>> from scipy.stats import chisquare\n >>> chisquare([16, 18, 16, 14, 12, 12])\n (2.0, 0.84914503608460956)\n \n With `f_exp` the expected frequencies can be given.\n \n >>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])\n (3.5, 0.62338762774958223)\n \n When `f_obs` is 2-D, by default the test is applied to each column.\n \n >>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T\n >>> obs.shape\n (6, 2)\n >>> chisquare(obs)\n (array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))\n \n By setting ``axis=None``, the test is applied to all data in the array,\n which is equivalent to applying the test to the flattened array.\n \n >>> chisquare(obs, axis=None)\n (23.31034482758621, 0.015975692534127565)\n >>> chisquare(obs.ravel())\n (23.31034482758621, 0.015975692534127565)\n \n `ddof` is the change to make to the default degrees of freedom.\n \n >>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)\n (2.0, 0.73575888234288467)\n \n The calculation of the p-values is done by broadcasting the\n chi-squared statistic with `ddof`.\n \n >>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])\n (2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))\n \n `f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has\n shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting\n `f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared\n statistics, we use ``axis=1``:\n \n >>> chisquare([16, 18, 16, 14, 12, 12],\n ... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],\n ... axis=1)\n (array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))\n \n circmean(samples, high=6.283185307179586, low=0, axis=None, nan_policy='propagate')\n Compute the circular mean for samples in a range.\n \n Parameters\n ----------\n samples : array_like\n Input array.\n high : float or int, optional\n High boundary for circular mean range. Default is ``2*pi``.\n low : float or int, optional\n Low boundary for circular mean range. Default is 0.\n axis : int, optional\n Axis along which means are computed. The default is to compute\n the mean of the flattened array.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan. 'propagate' returns nan,\n 'raise' throws an error, 'omit' performs the calculations ignoring nan\n values. Default is 'propagate'.\n \n Returns\n -------\n circmean : float\n Circular mean.\n \n Examples\n --------\n >>> from scipy.stats import circmean\n >>> circmean([0.1, 2*np.pi+0.2, 6*np.pi+0.3])\n 0.2\n \n >>> from scipy.stats import circmean\n >>> circmean([0.2, 1.4, 2.6], high = 1, low = 0)\n 0.4\n \n circstd(samples, high=6.283185307179586, low=0, axis=None, nan_policy='propagate')\n Compute the circular standard deviation for samples assumed to be in the\n range [low to high].\n \n Parameters\n ----------\n samples : array_like\n Input array.\n high : float or int, optional\n High boundary for circular standard deviation range.\n Default is ``2*pi``.\n low : float or int, optional\n Low boundary for circular standard deviation range. Default is 0.\n axis : int, optional\n Axis along which standard deviations are computed. The default is\n to compute the standard deviation of the flattened array.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan. 'propagate' returns nan,\n 'raise' throws an error, 'omit' performs the calculations ignoring nan\n values. Default is 'propagate'.\n \n Returns\n -------\n circstd : float\n Circular standard deviation.\n \n Notes\n -----\n This uses a definition of circular standard deviation that in the limit of\n small angles returns a number close to the 'linear' standard deviation.\n \n Examples\n --------\n >>> from scipy.stats import circstd\n >>> circstd([0, 0.1*np.pi/2, 0.001*np.pi, 0.03*np.pi/2])\n 0.063564063306\n \n circvar(samples, high=6.283185307179586, low=0, axis=None, nan_policy='propagate')\n Compute the circular variance for samples assumed to be in a range.\n \n Parameters\n ----------\n samples : array_like\n Input array.\n high : float or int, optional\n High boundary for circular variance range. Default is ``2*pi``.\n low : float or int, optional\n Low boundary for circular variance range. Default is 0.\n axis : int, optional\n Axis along which variances are computed. The default is to compute\n the variance of the flattened array.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan. 'propagate' returns nan,\n 'raise' throws an error, 'omit' performs the calculations ignoring nan\n values. Default is 'propagate'.\n \n Returns\n -------\n circvar : float\n Circular variance.\n \n Notes\n -----\n This uses a definition of circular variance that in the limit of small\n angles returns a number close to the 'linear' variance.\n \n Examples\n --------\n >>> from scipy.stats import circvar\n >>> circvar([0, 2*np.pi/3, 5*np.pi/3])\n 2.19722457734\n \n combine_pvalues(pvalues, method='fisher', weights=None)\n Combine p-values from independent tests bearing upon the same hypothesis.\n \n Parameters\n ----------\n pvalues : array_like, 1-D\n Array of p-values assumed to come from independent tests.\n method : {'fisher', 'pearson', 'tippett', 'stouffer', 'mudholkar_george'}, optional\n Name of method to use to combine p-values.\n The following methods are available (default is 'fisher'):\n \n * 'fisher': Fisher's method (Fisher's combined probability test), the\n sum of the logarithm of the p-values\n * 'pearson': Pearson's method (similar to Fisher's but uses sum of the\n complement of the p-values inside the logarithms)\n * 'tippett': Tippett's method (minimum of p-values)\n * 'stouffer': Stouffer's Z-score method\n * 'mudholkar_george': the difference of Fisher's and Pearson's methods\n divided by 2\n weights : array_like, 1-D, optional\n Optional array of weights used only for Stouffer's Z-score method.\n \n Returns\n -------\n statistic: float\n The statistic calculated by the specified method.\n pval: float\n The combined p-value.\n \n Notes\n -----\n Fisher's method (also known as Fisher's combined probability test) [1]_ uses\n a chi-squared statistic to compute a combined p-value. The closely related\n Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The\n advantage of Stouffer's method is that it is straightforward to introduce\n weights, which can make Stouffer's method more powerful than Fisher's\n method when the p-values are from studies of different size [6]_ [7]_.\n The Pearson's method uses :math:`log(1-p_i)` inside the sum whereas Fisher's\n method uses :math:`log(p_i)` [4]_. For Fisher's and Pearson's method, the\n sum of the logarithms is multiplied by -2 in the implementation. This\n quantity has a chi-square distribution that determines the p-value. The\n `mudholkar_george` method is the difference of the Fisher's and Pearson's\n test statistics, each of which include the -2 factor [4]_. However, the\n `mudholkar_george` method does not include these -2 factors. The test\n statistic of `mudholkar_george` is the sum of logisitic random variables and\n equation 3.6 in [3]_ is used to approximate the p-value based on Student's\n t-distribution.\n \n Fisher's method may be extended to combine p-values from dependent tests\n [5]_. Extensions such as Brown's method and Kost's method are not currently\n implemented.\n \n .. versionadded:: 0.15.0\n \n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Fisher%27s_method\n .. [2] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method\n .. [3] George, E. O., and G. S. Mudholkar. \"On the convolution of logistic\n random variables.\" Metrika 30.1 (1983): 1-13.\n .. [4] Heard, N. and Rubin-Delanchey, P. \"Choosing between methods of\n combining p-values.\" Biometrika 105.1 (2018): 239-246.\n .. [5] Whitlock, M. C. \"Combining probability from independent tests: the\n weighted Z-method is superior to Fisher's approach.\" Journal of\n Evolutionary Biology 18, no. 5 (2005): 1368-1373.\n .. [6] Zaykin, Dmitri V. \"Optimally weighted Z-test is a powerful method\n for combining probabilities in meta-analysis.\" Journal of\n Evolutionary Biology 24, no. 8 (2011): 1836-1841.\n .. [7] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method\n \n cumfreq(a, numbins=10, defaultreallimits=None, weights=None)\n Return a cumulative frequency histogram, using the histogram function.\n \n A cumulative histogram is a mapping that counts the cumulative number of\n observations in all of the bins up to the specified bin.\n \n Parameters\n ----------\n a : array_like\n Input array.\n numbins : int, optional\n The number of bins to use for the histogram. Default is 10.\n defaultreallimits : tuple (lower, upper), optional\n The lower and upper values for the range of the histogram.\n If no value is given, a range slightly larger than the range of the\n values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,\n where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.\n weights : array_like, optional\n The weights for each value in `a`. Default is None, which gives each\n value a weight of 1.0\n \n Returns\n -------\n cumcount : ndarray\n Binned values of cumulative frequency.\n lowerlimit : float\n Lower real limit\n binsize : float\n Width of each bin.\n extrapoints : int\n Extra points.\n \n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from scipy import stats\n >>> x = [1, 4, 2, 1, 3, 1]\n >>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))\n >>> res.cumcount\n array([ 1., 2., 3., 3.])\n >>> res.extrapoints\n 3\n \n Create a normal distribution with 1000 random values\n \n >>> rng = np.random.RandomState(seed=12345)\n >>> samples = stats.norm.rvs(size=1000, random_state=rng)\n \n Calculate cumulative frequencies\n \n >>> res = stats.cumfreq(samples, numbins=25)\n \n Calculate space of values for x\n \n >>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,\n ... res.cumcount.size)\n \n Plot histogram and cumulative histogram\n \n >>> fig = plt.figure(figsize=(10, 4))\n >>> ax1 = fig.add_subplot(1, 2, 1)\n >>> ax2 = fig.add_subplot(1, 2, 2)\n >>> ax1.hist(samples, bins=25)\n >>> ax1.set_title('Histogram')\n >>> ax2.bar(x, res.cumcount, width=res.binsize)\n >>> ax2.set_title('Cumulative histogram')\n >>> ax2.set_xlim([x.min(), x.max()])\n \n >>> plt.show()\n \n describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate')\n Compute several descriptive statistics of the passed array.\n \n Parameters\n ----------\n a : array_like\n Input data.\n axis : int or None, optional\n Axis along which statistics are calculated. Default is 0.\n If None, compute over the whole array `a`.\n ddof : int, optional\n Delta degrees of freedom (only for variance). Default is 1.\n bias : bool, optional\n If False, then the skewness and kurtosis calculations are corrected for\n statistical bias.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n Returns\n -------\n nobs : int or ndarray of ints\n Number of observations (length of data along `axis`).\n When 'omit' is chosen as nan_policy, each column is counted separately.\n minmax: tuple of ndarrays or floats\n Minimum and maximum value of data array.\n mean : ndarray or float\n Arithmetic mean of data along axis.\n variance : ndarray or float\n Unbiased variance of the data along axis, denominator is number of\n observations minus one.\n skewness : ndarray or float\n Skewness, based on moment calculations with denominator equal to\n the number of observations, i.e. no degrees of freedom correction.\n kurtosis : ndarray or float\n Kurtosis (Fisher). The kurtosis is normalized so that it is\n zero for the normal distribution. No degrees of freedom are used.\n \n See Also\n --------\n skew, kurtosis\n \n Examples\n --------\n >>> from scipy import stats\n >>> a = np.arange(10)\n >>> stats.describe(a)\n DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.166666666666666,\n skewness=0.0, kurtosis=-1.2242424242424244)\n >>> b = [[1, 2], [3, 4]]\n >>> stats.describe(b)\n DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),\n mean=array([2., 3.]), variance=array([2., 2.]),\n skewness=array([0., 0.]), kurtosis=array([-2., -2.]))\n \n energy_distance(u_values, v_values, u_weights=None, v_weights=None)\n Compute the energy distance between two 1D distributions.\n \n .. versionadded:: 1.0.0\n \n Parameters\n ----------\n u_values, v_values : array_like\n Values observed in the (empirical) distribution.\n u_weights, v_weights : array_like, optional\n Weight for each value. If unspecified, each value is assigned the same\n weight.\n `u_weights` (resp. `v_weights`) must have the same length as\n `u_values` (resp. `v_values`). If the weight sum differs from 1, it\n must still be positive and finite so that the weights can be normalized\n to sum to 1.\n \n Returns\n -------\n distance : float\n The computed distance between the distributions.\n \n Notes\n -----\n The energy distance between two distributions :math:`u` and :math:`v`, whose\n respective CDFs are :math:`U` and :math:`V`, equals to:\n \n .. math::\n \n D(u, v) = \\left( 2\\mathbb E|X - Y| - \\mathbb E|X - X'| -\n \\mathbb E|Y - Y'| \\right)^{1/2}\n \n where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are\n independent random variables whose probability distribution is :math:`u`\n (resp. :math:`v`).\n \n As shown in [2]_, for one-dimensional real-valued variables, the energy\n distance is linked to the non-distribution-free version of the Cramer-von\n Mises distance:\n \n .. math::\n \n D(u, v) = \\sqrt{2} l_2(u, v) = \\left( 2 \\int_{-\\infty}^{+\\infty} (U-V)^2\n \\right)^{1/2}\n \n Note that the common Cramer-von Mises criterion uses the distribution-free\n version of the distance. See [2]_ (section 2), for more details about both\n versions of the distance.\n \n The input distributions can be empirical, therefore coming from samples\n whose values are effectively inputs of the function, or they can be seen as\n generalized functions, in which case they are weighted sums of Dirac delta\n functions located at the specified values.\n \n References\n ----------\n .. [1] \"Energy distance\", https://en.wikipedia.org/wiki/Energy_distance\n .. [2] Szekely \"E-statistics: The energy of statistical samples.\" Bowling\n Green State University, Department of Mathematics and Statistics,\n Technical Report 02-16 (2002).\n .. [3] Rizzo, Szekely \"Energy distance.\" Wiley Interdisciplinary Reviews:\n Computational Statistics, 8(1):27-38 (2015).\n .. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,\n Munos \"The Cramer Distance as a Solution to Biased Wasserstein\n Gradients\" (2017). :arXiv:`1705.10743`.\n \n Examples\n --------\n >>> from scipy.stats import energy_distance\n >>> energy_distance([0], [2])\n 2.0000000000000004\n >>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2])\n 1.0000000000000002\n >>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ],\n ... [2.1, 4.2, 7.4, 8. ], [7.6, 8.8])\n 0.88003340976158217\n \n entropy(pk, qk=None, base=None, axis=0)\n Calculate the entropy of a distribution for given probability values.\n \n If only probabilities `pk` are given, the entropy is calculated as\n ``S = -sum(pk * log(pk), axis=axis)``.\n \n If `qk` is not None, then compute the Kullback-Leibler divergence\n ``S = sum(pk * log(pk / qk), axis=axis)``.\n \n This routine will normalize `pk` and `qk` if they don't sum to 1.\n \n Parameters\n ----------\n pk : sequence\n Defines the (discrete) distribution. ``pk[i]`` is the (possibly\n unnormalized) probability of event ``i``.\n qk : sequence, optional\n Sequence against which the relative entropy is computed. Should be in\n the same format as `pk`.\n base : float, optional\n The logarithmic base to use, defaults to ``e`` (natural logarithm).\n axis: int, optional\n The axis along which the entropy is calculated. Default is 0.\n \n Returns\n -------\n S : float\n The calculated entropy.\n \n Examples\n --------\n \n >>> from scipy.stats import entropy\n \n Bernoulli trial with different p.\n The outcome of a fair coin is the most uncertain:\n \n >>> entropy([1/2, 1/2], base=2)\n 1.0\n \n The outcome of a biased coin is less uncertain:\n \n >>> entropy([9/10, 1/10], base=2)\n 0.46899559358928117\n \n Relative entropy:\n \n >>> entropy([1/2, 1/2], qk=[9/10, 1/10])\n 0.5108256237659907\n \n epps_singleton_2samp(x, y, t=(0.4, 0.8))\n Compute the Epps-Singleton (ES) test statistic.\n \n Test the null hypothesis that two samples have the same underlying\n probability distribution.\n \n Parameters\n ----------\n x, y : array-like\n The two samples of observations to be tested. Input must not have more\n than one dimension. Samples can have different lengths.\n t : array-like, optional\n The points (t1, ..., tn) where the empirical characteristic function is\n to be evaluated. It should be positive distinct numbers. The default\n value (0.4, 0.8) is proposed in [1]_. Input must not have more than\n one dimension.\n \n Returns\n -------\n statistic : float\n The test statistic.\n pvalue : float\n The associated p-value based on the asymptotic chi2-distribution.\n \n See Also\n --------\n ks_2samp, anderson_ksamp\n \n Notes\n -----\n Testing whether two samples are generated by the same underlying\n distribution is a classical question in statistics. A widely used test is\n the Kolmogorov-Smirnov (KS) test which relies on the empirical\n distribution function. Epps and Singleton introduce a test based on the\n empirical characteristic function in [1]_.\n \n One advantage of the ES test compared to the KS test is that is does\n not assume a continuous distribution. In [1]_, the authors conclude\n that the test also has a higher power than the KS test in many\n examples. They recommend the use of the ES test for discrete samples as\n well as continuous samples with at least 25 observations each, whereas\n `anderson_ksamp` is recommended for smaller sample sizes in the\n continuous case.\n \n The p-value is computed from the asymptotic distribution of the test\n statistic which follows a `chi2` distribution. If the sample size of both\n `x` and `y` is below 25, the small sample correction proposed in [1]_ is\n applied to the test statistic.\n \n The default values of `t` are determined in [1]_ by considering\n various distributions and finding good values that lead to a high power\n of the test in general. Table III in [1]_ gives the optimal values for\n the distributions tested in that study. The values of `t` are scaled by\n the semi-interquartile range in the implementation, see [1]_.\n \n References\n ----------\n .. [1] T. W. Epps and K. J. Singleton, \"An omnibus test for the two-sample\n problem using the empirical characteristic function\", Journal of\n Statistical Computation and Simulation 26, p. 177--203, 1986.\n \n .. [2] S. J. Goerg and J. Kaiser, \"Nonparametric testing of distributions\n - the Epps-Singleton two-sample test using the empirical characteristic\n function\", The Stata Journal 9(3), p. 454--465, 2009.\n \n f_oneway(*args)\n Perform one-way ANOVA.\n \n The one-way ANOVA tests the null hypothesis that two or more groups have\n the same population mean. The test is applied to samples from two or\n more groups, possibly with differing sizes.\n \n Parameters\n ----------\n sample1, sample2, ... : array_like\n The sample measurements for each group.\n \n Returns\n -------\n statistic : float\n The computed F-value of the test.\n pvalue : float\n The associated p-value from the F-distribution.\n \n Notes\n -----\n The ANOVA test has important assumptions that must be satisfied in order\n for the associated p-value to be valid.\n \n 1. The samples are independent.\n 2. Each sample is from a normally distributed population.\n 3. The population standard deviations of the groups are all equal. This\n property is known as homoscedasticity.\n \n If these assumptions are not true for a given set of data, it may still be\n possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although\n with some loss of power.\n \n The algorithm is from Heiman[2], pp.394-7.\n \n References\n ----------\n .. [1] R. Lowry, \"Concepts and Applications of Inferential Statistics\",\n Chapter 14, 2014, http://vassarstats.net/textbook/\n \n .. [2] G.W. Heiman, \"Understanding research methods and statistics: An\n integrated introduction for psychology\", Houghton, Mifflin and\n Company, 2001.\n \n .. [3] G.H. McDonald, \"Handbook of Biological Statistics\", One-way ANOVA.\n http://www.biostathandbook.com/onewayanova.html\n \n Examples\n --------\n >>> import scipy.stats as stats\n \n [3]_ Here are some data on a shell measurement (the length of the anterior\n adductor muscle scar, standardized by dividing by length) in the mussel\n Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;\n Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a\n much larger data set used in McDonald et al. (1991).\n \n >>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,\n ... 0.0659, 0.0923, 0.0836]\n >>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,\n ... 0.0725]\n >>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]\n >>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,\n ... 0.0689]\n >>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]\n >>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)\n (7.1210194716424473, 0.00028122423145345439)\n \n find_repeats(arr)\n Find repeats and repeat counts.\n \n Parameters\n ----------\n arr : array_like\n Input array. This is cast to float64.\n \n Returns\n -------\n values : ndarray\n The unique values from the (flattened) input that are repeated.\n \n counts : ndarray\n Number of times the corresponding 'value' is repeated.\n \n Notes\n -----\n In numpy >= 1.9 `numpy.unique` provides similar functionality. The main\n difference is that `find_repeats` only returns repeated values.\n \n Examples\n --------\n >>> from scipy import stats\n >>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])\n RepeatedResults(values=array([2.]), counts=array([4]))\n \n >>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])\n RepeatedResults(values=array([4., 5.]), counts=array([2, 2]))\n \n fisher_exact(table, alternative='two-sided')\n Perform a Fisher exact test on a 2x2 contingency table.\n \n Parameters\n ----------\n table : array_like of ints\n A 2x2 contingency table. Elements should be non-negative integers.\n alternative : {'two-sided', 'less', 'greater'}, optional\n Defines the alternative hypothesis.\n The following options are available (default is 'two-sided'):\n \n * 'two-sided'\n * 'less': one-sided\n * 'greater': one-sided\n \n Returns\n -------\n oddsratio : float\n This is prior odds ratio and not a posterior estimate.\n p_value : float\n P-value, the probability of obtaining a distribution at least as\n extreme as the one that was actually observed, assuming that the\n null hypothesis is true.\n \n See Also\n --------\n chi2_contingency : Chi-square test of independence of variables in a\n contingency table.\n \n Notes\n -----\n The calculated odds ratio is different from the one R uses. This scipy\n implementation returns the (more common) \"unconditional Maximum\n Likelihood Estimate\", while R uses the \"conditional Maximum Likelihood\n Estimate\".\n \n For tables with large numbers, the (inexact) chi-square test implemented\n in the function `chi2_contingency` can also be used.\n \n Examples\n --------\n Say we spend a few days counting whales and sharks in the Atlantic and\n Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the\n Indian ocean 2 whales and 5 sharks. Then our contingency table is::\n \n Atlantic Indian\n whales 8 2\n sharks 1 5\n \n We use this table to find the p-value:\n \n >>> import scipy.stats as stats\n >>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])\n >>> pvalue\n 0.0349...\n \n The probability that we would observe this or an even more imbalanced ratio\n by chance is about 3.5%. A commonly used significance level is 5%--if we\n adopt that, we can therefore conclude that our observed imbalance is\n statistically significant; whales prefer the Atlantic while sharks prefer\n the Indian ocean.\n \n fligner(*args, **kwds)\n Perform Fligner-Killeen test for equality of variance.\n \n Fligner's test tests the null hypothesis that all input samples\n are from populations with equal variances. Fligner-Killeen's test is\n distribution free when populations are identical [2]_.\n \n Parameters\n ----------\n sample1, sample2, ... : array_like\n Arrays of sample data. Need not be the same length.\n center : {'mean', 'median', 'trimmed'}, optional\n Keyword argument controlling which function of the data is used in\n computing the test statistic. The default is 'median'.\n proportiontocut : float, optional\n When `center` is 'trimmed', this gives the proportion of data points\n to cut from each end. (See `scipy.stats.trim_mean`.)\n Default is 0.05.\n \n Returns\n -------\n statistic : float\n The test statistic.\n pvalue : float\n The p-value for the hypothesis test.\n \n See Also\n --------\n bartlett : A parametric test for equality of k variances in normal samples\n levene : A robust parametric test for equality of k variances\n \n Notes\n -----\n As with Levene's test there are three variants of Fligner's test that\n differ by the measure of central tendency used in the test. See `levene`\n for more information.\n \n Conover et al. (1981) examine many of the existing parametric and\n nonparametric tests by extensive simulations and they conclude that the\n tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be\n superior in terms of robustness of departures from normality and power [3]_.\n \n References\n ----------\n .. [1] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and\n Hypothesis Testing based on Quadratic Inference Function. Technical\n Report #99-03, Center for Likelihood Studies, Pennsylvania State\n University.\n https://cecas.clemson.edu/~cspark/cv/paper/qif/draftqif2.pdf\n \n .. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample\n tests for scale. 'Journal of the American Statistical Association.'\n 71(353), 210-213.\n \n .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and\n Hypothesis Testing based on Quadratic Inference Function. Technical\n Report #99-03, Center for Likelihood Studies, Pennsylvania State\n University.\n \n .. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A\n comparative study of tests for homogeneity of variances, with\n applications to the outer continental shelf biding data.\n Technometrics, 23(4), 351-361.\n \n friedmanchisquare(*args)\n Compute the Friedman test for repeated measurements.\n \n The Friedman test tests the null hypothesis that repeated measurements of\n the same individuals have the same distribution. It is often used\n to test for consistency among measurements obtained in different ways.\n For example, if two measurement techniques are used on the same set of\n individuals, the Friedman test can be used to determine if the two\n measurement techniques are consistent.\n \n Parameters\n ----------\n measurements1, measurements2, measurements3... : array_like\n Arrays of measurements. All of the arrays must have the same number\n of elements. At least 3 sets of measurements must be given.\n \n Returns\n -------\n statistic : float\n The test statistic, correcting for ties.\n pvalue : float\n The associated p-value assuming that the test statistic has a chi\n squared distribution.\n \n Notes\n -----\n Due to the assumption that the test statistic has a chi squared\n distribution, the p-value is only reliable for n > 10 and more than\n 6 repeated measurements.\n \n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Friedman_test\n \n gmean(a, axis=0, dtype=None)\n Compute the geometric mean along the specified axis.\n \n Return the geometric average of the array elements.\n That is: n-th root of (x1 * x2 * ... * xn)\n \n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array.\n axis : int or None, optional\n Axis along which the geometric mean is computed. Default is 0.\n If None, compute over the whole array `a`.\n dtype : dtype, optional\n Type of the returned array and of the accumulator in which the\n elements are summed. If dtype is not specified, it defaults to the\n dtype of a, unless a has an integer dtype with a precision less than\n that of the default platform integer. In that case, the default\n platform integer is used.\n \n Returns\n -------\n gmean : ndarray\n See `dtype` parameter above.\n \n See Also\n --------\n numpy.mean : Arithmetic average\n numpy.average : Weighted average\n hmean : Harmonic mean\n \n Notes\n -----\n The geometric average is computed over a single dimension of the input\n array, axis=0 by default, or all values in the array if axis=None.\n float64 intermediate and return values are used for integer inputs.\n \n Use masked arrays to ignore any non-finite values in the input or that\n arise in the calculations such as Not a Number and infinity because masked\n arrays automatically mask any non-finite values.\n \n Examples\n --------\n >>> from scipy.stats import gmean\n >>> gmean([1, 4])\n 2.0\n >>> gmean([1, 2, 3, 4, 5, 6, 7])\n 3.3800151591412964\n \n gstd(a, axis=0, ddof=1)\n Calculate the geometric standard deviation of an array.\n \n The geometric standard deviation describes the spread of a set of numbers\n where the geometric mean is preferred. It is a multiplicative factor, and\n so a dimensionless quantity.\n \n It is defined as the exponent of the standard deviation of ``log(a)``.\n Mathematically the population geometric standard deviation can be\n evaluated as::\n \n gstd = exp(std(log(a)))\n \n .. versionadded:: 1.3.0\n \n Parameters\n ----------\n a : array_like\n An array like object containing the sample data.\n axis : int, tuple or None, optional\n Axis along which to operate. Default is 0. If None, compute over\n the whole array `a`.\n ddof : int, optional\n Degree of freedom correction in the calculation of the\n geometric standard deviation. Default is 1.\n \n Returns\n -------\n ndarray or float\n An array of the geometric standard deviation. If `axis` is None or `a`\n is a 1d array a float is returned.\n \n Notes\n -----\n As the calculation requires the use of logarithms the geometric standard\n deviation only supports strictly positive values. Any non-positive or\n infinite values will raise a `ValueError`.\n The geometric standard deviation is sometimes confused with the exponent of\n the standard deviation, ``exp(std(a))``. Instead the geometric standard\n deviation is ``exp(std(log(a)))``.\n The default value for `ddof` is different to the default value (0) used\n by other ddof containing functions, such as ``np.std`` and ``np.nanstd``.\n \n Examples\n --------\n Find the geometric standard deviation of a log-normally distributed sample.\n Note that the standard deviation of the distribution is one, on a\n log scale this evaluates to approximately ``exp(1)``.\n \n >>> from scipy.stats import gstd\n >>> np.random.seed(123)\n >>> sample = np.random.lognormal(mean=0, sigma=1, size=1000)\n >>> gstd(sample)\n 2.7217860664589946\n \n Compute the geometric standard deviation of a multidimensional array and\n of a given axis.\n \n >>> a = np.arange(1, 25).reshape(2, 3, 4)\n >>> gstd(a, axis=None)\n 2.2944076136018947\n >>> gstd(a, axis=2)\n array([[1.82424757, 1.22436866, 1.13183117],\n [1.09348306, 1.07244798, 1.05914985]])\n >>> gstd(a, axis=(1,2))\n array([2.12939215, 1.22120169])\n \n The geometric standard deviation further handles masked arrays.\n \n >>> a = np.arange(1, 25).reshape(2, 3, 4)\n >>> ma = np.ma.masked_where(a > 16, a)\n >>> ma\n masked_array(\n data=[[[1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12]],\n [[13, 14, 15, 16],\n [--, --, --, --],\n [--, --, --, --]]],\n mask=[[[False, False, False, False],\n [False, False, False, False],\n [False, False, False, False]],\n [[False, False, False, False],\n [ True, True, True, True],\n [ True, True, True, True]]],\n fill_value=999999)\n >>> gstd(ma, axis=2)\n masked_array(\n data=[[1.8242475707663655, 1.2243686572447428, 1.1318311657788478],\n [1.0934830582350938, --, --]],\n mask=[[False, False, False],\n [False, True, True]],\n fill_value=999999)\n \n hmean(a, axis=0, dtype=None)\n Calculate the harmonic mean along the specified axis.\n \n That is: n / (1/x1 + 1/x2 + ... + 1/xn)\n \n Parameters\n ----------\n a : array_like\n Input array, masked array or object that can be converted to an array.\n axis : int or None, optional\n Axis along which the harmonic mean is computed. Default is 0.\n If None, compute over the whole array `a`.\n dtype : dtype, optional\n Type of the returned array and of the accumulator in which the\n elements are summed. If `dtype` is not specified, it defaults to the\n dtype of `a`, unless `a` has an integer `dtype` with a precision less\n than that of the default platform integer. In that case, the default\n platform integer is used.\n \n Returns\n -------\n hmean : ndarray\n See `dtype` parameter above.\n \n See Also\n --------\n numpy.mean : Arithmetic average\n numpy.average : Weighted average\n gmean : Geometric mean\n \n Notes\n -----\n The harmonic mean is computed over a single dimension of the input\n array, axis=0 by default, or all values in the array if axis=None.\n float64 intermediate and return values are used for integer inputs.\n \n Use masked arrays to ignore any non-finite values in the input or that\n arise in the calculations such as Not a Number and infinity.\n \n Examples\n --------\n >>> from scipy.stats import hmean\n >>> hmean([1, 4])\n 1.6000000000000001\n >>> hmean([1, 2, 3, 4, 5, 6, 7])\n 2.6997245179063363\n \n iqr(x, axis=None, rng=(25, 75), scale='raw', nan_policy='propagate', interpolation='linear', keepdims=False)\n Compute the interquartile range of the data along the specified axis.\n \n The interquartile range (IQR) is the difference between the 75th and\n 25th percentile of the data. It is a measure of the dispersion\n similar to standard deviation or variance, but is much more robust\n against outliers [2]_.\n \n The ``rng`` parameter allows this function to compute other\n percentile ranges than the actual IQR. For example, setting\n ``rng=(0, 100)`` is equivalent to `numpy.ptp`.\n \n The IQR of an empty array is `np.nan`.\n \n .. versionadded:: 0.18.0\n \n Parameters\n ----------\n x : array_like\n Input array or object that can be converted to an array.\n axis : int or sequence of int, optional\n Axis along which the range is computed. The default is to\n compute the IQR for the entire array.\n rng : Two-element sequence containing floats in range of [0,100] optional\n Percentiles over which to compute the range. Each must be\n between 0 and 100, inclusive. The default is the true IQR:\n `(25, 75)`. The order of the elements is not important.\n scale : scalar or str, optional\n The numerical value of scale will be divided out of the final\n result. The following string values are recognized:\n \n 'raw' : No scaling, just return the raw IQR.\n 'normal' : Scale by :math:`2 \\sqrt{2} erf^{-1}(\\frac{1}{2}) \\approx 1.349`.\n \n The default is 'raw'. Array-like scale is also allowed, as long\n as it broadcasts correctly to the output such that\n ``out / scale`` is a valid operation. The output dimensions\n depend on the input array, `x`, the `axis` argument, and the\n `keepdims` flag.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional\n Specifies the interpolation method to use when the percentile\n boundaries lie between two data points `i` and `j`.\n The following options are available (default is 'linear'):\n \n * 'linear': `i + (j - i) * fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n * 'lower': `i`.\n * 'higher': `j`.\n * 'nearest': `i` or `j` whichever is nearest.\n * 'midpoint': `(i + j) / 2`.\n \n keepdims : bool, optional\n If this is set to `True`, the reduced axes are left in the\n result as dimensions with size one. With this option, the result\n will broadcast correctly against the original array `x`.\n \n Returns\n -------\n iqr : scalar or ndarray\n If ``axis=None``, a scalar is returned. If the input contains\n integers or floats of smaller precision than ``np.float64``, then the\n output data-type is ``np.float64``. Otherwise, the output data-type is\n the same as that of the input.\n \n See Also\n --------\n numpy.std, numpy.var\n \n Notes\n -----\n This function is heavily dependent on the version of `numpy` that is\n installed. Versions greater than 1.11.0b3 are highly recommended, as they\n include a number of enhancements and fixes to `numpy.percentile` and\n `numpy.nanpercentile` that affect the operation of this function. The\n following modifications apply:\n \n Below 1.10.0 : `nan_policy` is poorly defined.\n The default behavior of `numpy.percentile` is used for 'propagate'. This\n is a hybrid of 'omit' and 'propagate' that mostly yields a skewed\n version of 'omit' since NaNs are sorted to the end of the data. A\n warning is raised if there are NaNs in the data.\n Below 1.9.0: `numpy.nanpercentile` does not exist.\n This means that `numpy.percentile` is used regardless of `nan_policy`\n and a warning is issued. See previous item for a description of the\n behavior.\n Below 1.9.0: `keepdims` and `interpolation` are not supported.\n The keywords get ignored with a warning if supplied with non-default\n values. However, multiple axes are still supported.\n \n References\n ----------\n .. [1] \"Interquartile range\" https://en.wikipedia.org/wiki/Interquartile_range\n .. [2] \"Robust measures of scale\" https://en.wikipedia.org/wiki/Robust_measures_of_scale\n .. [3] \"Quantile\" https://en.wikipedia.org/wiki/Quantile\n \n Examples\n --------\n >>> from scipy.stats import iqr\n >>> x = np.array([[10, 7, 4], [3, 2, 1]])\n >>> x\n array([[10, 7, 4],\n [ 3, 2, 1]])\n >>> iqr(x)\n 4.0\n >>> iqr(x, axis=0)\n array([ 3.5, 2.5, 1.5])\n >>> iqr(x, axis=1)\n array([ 3., 1.])\n >>> iqr(x, axis=1, keepdims=True)\n array([[ 3.],\n [ 1.]])\n \n itemfreq(*args, **kwds)\n `itemfreq` is deprecated!\n `itemfreq` is deprecated and will be removed in a future version. Use instead `np.unique(..., return_counts=True)`\n \n Return a 2-D array of item frequencies.\n \n Parameters\n ----------\n a : (N,) array_like\n Input array.\n \n Returns\n -------\n itemfreq : (K, 2) ndarray\n A 2-D frequency table. Column 1 contains sorted, unique values from\n `a`, column 2 contains their respective counts.\n \n Examples\n --------\n >>> from scipy import stats\n >>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])\n >>> stats.itemfreq(a)\n array([[ 0., 2.],\n [ 1., 4.],\n [ 2., 2.],\n [ 4., 1.],\n [ 5., 1.]])\n >>> np.bincount(a)\n array([2, 4, 2, 0, 1, 1])\n \n >>> stats.itemfreq(a/10.)\n array([[ 0. , 2. ],\n [ 0.1, 4. ],\n [ 0.2, 2. ],\n [ 0.4, 1. ],\n [ 0.5, 1. ]])\n \n jarque_bera(x)\n Perform the Jarque-Bera goodness of fit test on sample data.\n \n The Jarque-Bera test tests whether the sample data has the skewness and\n kurtosis matching a normal distribution.\n \n Note that this test only works for a large enough number of data samples\n (>2000) as the test statistic asymptotically has a Chi-squared distribution\n with 2 degrees of freedom.\n \n Parameters\n ----------\n x : array_like\n Observations of a random variable.\n \n Returns\n -------\n jb_value : float\n The test statistic.\n p : float\n The p-value for the hypothesis test.\n \n References\n ----------\n .. [1] Jarque, C. and Bera, A. (1980) \"Efficient tests for normality,\n homoscedasticity and serial independence of regression residuals\",\n 6 Econometric Letters 255-259.\n \n Examples\n --------\n >>> from scipy import stats\n >>> np.random.seed(987654321)\n >>> x = np.random.normal(0, 1, 100000)\n >>> y = np.random.rayleigh(1, 100000)\n >>> stats.jarque_bera(x)\n (4.7165707989581342, 0.09458225503041906)\n >>> stats.jarque_bera(y)\n (6713.7098548143422, 0.0)\n \n kendalltau(x, y, initial_lexsort=None, nan_policy='propagate', method='auto')\n Calculate Kendall's tau, a correlation measure for ordinal data.\n \n Kendall's tau is a measure of the correspondence between two rankings.\n Values close to 1 indicate strong agreement, values close to -1 indicate\n strong disagreement. This is the 1945 \"tau-b\" version of Kendall's\n tau [2]_, which can account for ties and which reduces to the 1938 \"tau-a\"\n version [1]_ in absence of ties.\n \n Parameters\n ----------\n x, y : array_like\n Arrays of rankings, of the same shape. If arrays are not 1-D, they will\n be flattened to 1-D.\n initial_lexsort : bool, optional\n Unused (deprecated).\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n method : {'auto', 'asymptotic', 'exact'}, optional\n Defines which method is used to calculate the p-value [5]_.\n The following options are available (default is 'auto'):\n \n * 'auto': selects the appropriate method based on a trade-off between\n speed and accuracy\n * 'asymptotic': uses a normal approximation valid for large samples\n * 'exact': computes the exact p-value, but can only be used if no ties\n are present\n \n Returns\n -------\n correlation : float\n The tau statistic.\n pvalue : float\n The two-sided p-value for a hypothesis test whose null hypothesis is\n an absence of association, tau = 0.\n \n See Also\n --------\n spearmanr : Calculates a Spearman rank-order correlation coefficient.\n theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).\n weightedtau : Computes a weighted version of Kendall's tau.\n \n Notes\n -----\n The definition of Kendall's tau that is used is [2]_::\n \n tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))\n \n where P is the number of concordant pairs, Q the number of discordant\n pairs, T the number of ties only in `x`, and U the number of ties only in\n `y`. If a tie occurs for the same pair in both `x` and `y`, it is not\n added to either T or U.\n \n References\n ----------\n .. [1] Maurice G. Kendall, \"A New Measure of Rank Correlation\", Biometrika\n Vol. 30, No. 1/2, pp. 81-93, 1938.\n .. [2] Maurice G. Kendall, \"The treatment of ties in ranking problems\",\n Biometrika Vol. 33, No. 3, pp. 239-251. 1945.\n .. [3] Gottfried E. Noether, \"Elements of Nonparametric Statistics\", John\n Wiley & Sons, 1967.\n .. [4] Peter M. Fenwick, \"A new data structure for cumulative frequency\n tables\", Software: Practice and Experience, Vol. 24, No. 3,\n pp. 327-336, 1994.\n .. [5] Maurice G. Kendall, \"Rank Correlation Methods\" (4th Edition),\n Charles Griffin & Co., 1970.\n \n Examples\n --------\n >>> from scipy import stats\n >>> x1 = [12, 2, 1, 12, 2]\n >>> x2 = [1, 4, 7, 1, 0]\n >>> tau, p_value = stats.kendalltau(x1, x2)\n >>> tau\n -0.47140452079103173\n >>> p_value\n 0.2827454599327748\n \n kruskal(*args, **kwargs)\n Compute the Kruskal-Wallis H-test for independent samples.\n \n The Kruskal-Wallis H-test tests the null hypothesis that the population\n median of all of the groups are equal. It is a non-parametric version of\n ANOVA. The test works on 2 or more independent samples, which may have\n different sizes. Note that rejecting the null hypothesis does not\n indicate which of the groups differs. Post hoc comparisons between\n groups are required to determine which groups are different.\n \n Parameters\n ----------\n sample1, sample2, ... : array_like\n Two or more arrays with the sample measurements can be given as\n arguments.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n Returns\n -------\n statistic : float\n The Kruskal-Wallis H statistic, corrected for ties.\n pvalue : float\n The p-value for the test using the assumption that H has a chi\n square distribution.\n \n See Also\n --------\n f_oneway : 1-way ANOVA.\n mannwhitneyu : Mann-Whitney rank test on two samples.\n friedmanchisquare : Friedman test for repeated measurements.\n \n Notes\n -----\n Due to the assumption that H has a chi square distribution, the number\n of samples in each group must not be too small. A typical rule is\n that each sample must have at least 5 measurements.\n \n References\n ----------\n .. [1] W. H. Kruskal & W. W. Wallis, \"Use of Ranks in\n One-Criterion Variance Analysis\", Journal of the American Statistical\n Association, Vol. 47, Issue 260, pp. 583-621, 1952.\n .. [2] https://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance\n \n Examples\n --------\n >>> from scipy import stats\n >>> x = [1, 3, 5, 7, 9]\n >>> y = [2, 4, 6, 8, 10]\n >>> stats.kruskal(x, y)\n KruskalResult(statistic=0.2727272727272734, pvalue=0.6015081344405895)\n \n >>> x = [1, 1, 1]\n >>> y = [2, 2, 2]\n >>> z = [2, 2]\n >>> stats.kruskal(x, y, z)\n KruskalResult(statistic=7.0, pvalue=0.0301973834223185)\n \n ks_2samp(data1, data2, alternative='two-sided', mode='auto')\n Compute the Kolmogorov-Smirnov statistic on 2 samples.\n \n This is a two-sided test for the null hypothesis that 2 independent samples\n are drawn from the same continuous distribution. The alternative hypothesis\n can be either 'two-sided' (default), 'less' or 'greater'.\n \n Parameters\n ----------\n data1, data2 : sequence of 1-D ndarrays\n Two arrays of sample observations assumed to be drawn from a continuous\n distribution, sample sizes can be different.\n alternative : {'two-sided', 'less', 'greater'}, optional\n Defines the alternative hypothesis.\n The following options are available (default is 'two-sided'):\n \n * 'two-sided'\n * 'less': one-sided, see explanation in Notes\n * 'greater': one-sided, see explanation in Notes\n mode : {'auto', 'exact', 'asymp'}, optional\n Defines the method used for calculating the p-value.\n The following options are available (default is 'auto'):\n \n * 'auto' : use 'exact' for small size arrays, 'asymp' for large\n * 'exact' : use approximation to exact distribution of test statistic\n * 'asymp' : use asymptotic distribution of test statistic\n \n Returns\n -------\n statistic : float\n KS statistic.\n pvalue : float\n Two-tailed p-value.\n \n See Also\n --------\n kstest\n \n Notes\n -----\n This tests whether 2 samples are drawn from the same distribution. Note\n that, like in the case of the one-sample KS test, the distribution is\n assumed to be continuous.\n \n In the one-sided test, the alternative is that the empirical\n cumulative distribution function F(x) of the data1 variable is \"less\"\n or \"greater\" than the empirical cumulative distribution function G(x)\n of the data2 variable, ``F(x)<=G(x)``, resp. ``F(x)>=G(x)``.\n \n If the KS statistic is small or the p-value is high, then we cannot\n reject the hypothesis that the distributions of the two samples\n are the same.\n \n If the mode is 'auto', the computation is exact if the sample sizes are\n less than 10000. For larger sizes, the computation uses the\n Kolmogorov-Smirnov distributions to compute an approximate value.\n \n We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_.\n \n References\n ----------\n .. [1] Hodges, J.L. Jr., \"The Significance Probability of the Smirnov\n Two-Sample Test,\" Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.\n \n \n Examples\n --------\n >>> from scipy import stats\n >>> np.random.seed(12345678) #fix random seed to get the same result\n >>> n1 = 200 # size of first sample\n >>> n2 = 300 # size of second sample\n \n For a different distribution, we can reject the null hypothesis since the\n pvalue is below 1%:\n \n >>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)\n >>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)\n >>> stats.ks_2samp(rvs1, rvs2)\n (0.20833333333333334, 5.129279597781977e-05)\n \n For a slightly different distribution, we cannot reject the null hypothesis\n at a 10% or lower alpha since the p-value at 0.144 is higher than 10%\n \n >>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)\n >>> stats.ks_2samp(rvs1, rvs3)\n (0.10333333333333333, 0.14691437867433876)\n \n For an identical distribution, we cannot reject the null hypothesis since\n the p-value is high, 41%:\n \n >>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)\n >>> stats.ks_2samp(rvs1, rvs4)\n (0.07999999999999996, 0.41126949729859719)\n \n kstat(data, n=2)\n Return the nth k-statistic (1<=n<=4 so far).\n \n The nth k-statistic k_n is the unique symmetric unbiased estimator of the\n nth cumulant kappa_n.\n \n Parameters\n ----------\n data : array_like\n Input array. Note that n-D input gets flattened.\n n : int, {1, 2, 3, 4}, optional\n Default is equal to 2.\n \n Returns\n -------\n kstat : float\n The nth k-statistic.\n \n See Also\n --------\n kstatvar: Returns an unbiased estimator of the variance of the k-statistic.\n moment: Returns the n-th central moment about the mean for a sample.\n \n Notes\n -----\n For a sample size n, the first few k-statistics are given by:\n \n .. math::\n \n k_{1} = \\mu\n k_{2} = \\frac{n}{n-1} m_{2}\n k_{3} = \\frac{ n^{2} } {(n-1) (n-2)} m_{3}\n k_{4} = \\frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)}\n \n where :math:`\\mu` is the sample mean, :math:`m_2` is the sample\n variance, and :math:`m_i` is the i-th sample central moment.\n \n References\n ----------\n http://mathworld.wolfram.com/k-Statistic.html\n \n http://mathworld.wolfram.com/Cumulant.html\n \n Examples\n --------\n >>> from scipy import stats\n >>> rndm = np.random.RandomState(1234)\n \n As sample size increases, n-th moment and n-th k-statistic converge to the\n same number (although they aren't identical). In the case of the normal\n distribution, they converge to zero.\n \n >>> for n in [2, 3, 4, 5, 6, 7]:\n ... x = rndm.normal(size=10**n)\n ... m, k = stats.moment(x, 3), stats.kstat(x, 3)\n ... print(\"%.3g %.3g %.3g\" % (m, k, m-k))\n -0.631 -0.651 0.0194\n 0.0282 0.0283 -8.49e-05\n -0.0454 -0.0454 1.36e-05\n 7.53e-05 7.53e-05 -2.26e-09\n 0.00166 0.00166 -4.99e-09\n -2.88e-06 -2.88e-06 8.63e-13\n \n kstatvar(data, n=2)\n Return an unbiased estimator of the variance of the k-statistic.\n \n See `kstat` for more details of the k-statistic.\n \n Parameters\n ----------\n data : array_like\n Input array. Note that n-D input gets flattened.\n n : int, {1, 2}, optional\n Default is equal to 2.\n \n Returns\n -------\n kstatvar : float\n The nth k-statistic variance.\n \n See Also\n --------\n kstat: Returns the n-th k-statistic.\n moment: Returns the n-th central moment about the mean for a sample.\n \n Notes\n -----\n The variances of the first few k-statistics are given by:\n \n .. math::\n \n var(k_{1}) = \\frac{\\kappa^2}{n}\n var(k_{2}) = \\frac{\\kappa^4}{n} + \\frac{2\\kappa^2_{2}}{n - 1}\n var(k_{3}) = \\frac{\\kappa^6}{n} + \\frac{9 \\kappa_2 \\kappa_4}{n - 1} +\n \\frac{9 \\kappa^2_{3}}{n - 1} +\n \\frac{6 n \\kappa^3_{2}}{(n-1) (n-2)}\n var(k_{4}) = \\frac{\\kappa^8}{n} + \\frac{16 \\kappa_2 \\kappa_6}{n - 1} +\n \\frac{48 \\kappa_{3} \\kappa_5}{n - 1} +\n \\frac{34 \\kappa^2_{4}}{n-1} + \\frac{72 n \\kappa^2_{2} \\kappa_4}{(n - 1) (n - 2)} +\n \\frac{144 n \\kappa_{2} \\kappa^2_{3}}{(n - 1) (n - 2)} +\n \\frac{24 (n + 1) n \\kappa^4_{2}}{(n - 1) (n - 2) (n - 3)}\n \n kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx')\n Perform the Kolmogorov-Smirnov test for goodness of fit.\n \n This performs a test of the distribution F(x) of an observed\n random variable against a given distribution G(x). Under the null\n hypothesis, the two distributions are identical, F(x)=G(x). The\n alternative hypothesis can be either 'two-sided' (default), 'less'\n or 'greater'. The KS test is only valid for continuous distributions.\n \n Parameters\n ----------\n rvs : str, array_like, or callable\n If a string, it should be the name of a distribution in `scipy.stats`.\n If an array, it should be a 1-D array of observations of random\n variables.\n If a callable, it should be a function to generate random variables;\n it is required to have a keyword argument `size`.\n cdf : str or callable\n If a string, it should be the name of a distribution in `scipy.stats`.\n If `rvs` is a string then `cdf` can be False or the same as `rvs`.\n If a callable, that callable is used to calculate the cdf.\n args : tuple, sequence, optional\n Distribution parameters, used if `rvs` or `cdf` are strings.\n N : int, optional\n Sample size if `rvs` is string or callable. Default is 20.\n alternative : {'two-sided', 'less', 'greater'}, optional\n Defines the alternative hypothesis.\n The following options are available (default is 'two-sided'):\n \n * 'two-sided'\n * 'less': one-sided, see explanation in Notes\n * 'greater': one-sided, see explanation in Notes\n mode : {'approx', 'asymp'}, optional\n Defines the distribution used for calculating the p-value.\n The following options are available (default is 'approx'):\n \n * 'approx': use approximation to exact distribution of test statistic\n * 'asymp': use asymptotic distribution of test statistic\n \n Returns\n -------\n statistic : float\n KS test statistic, either D, D+ or D-.\n pvalue : float\n One-tailed or two-tailed p-value.\n \n See Also\n --------\n ks_2samp\n \n Notes\n -----\n In the one-sided test, the alternative is that the empirical\n cumulative distribution function of the random variable is \"less\"\n or \"greater\" than the cumulative distribution function G(x) of the\n hypothesis, ``F(x)<=G(x)``, resp. ``F(x)>=G(x)``.\n \n Examples\n --------\n >>> from scipy import stats\n \n >>> x = np.linspace(-15, 15, 9)\n >>> stats.kstest(x, 'norm')\n (0.44435602715924361, 0.038850142705171065)\n \n >>> np.random.seed(987654321) # set random seed to get the same result\n >>> stats.kstest('norm', False, N=100)\n (0.058352892479417884, 0.88531190944151261)\n \n The above lines are equivalent to:\n \n >>> np.random.seed(987654321)\n >>> stats.kstest(stats.norm.rvs(size=100), 'norm')\n (0.058352892479417884, 0.88531190944151261)\n \n *Test against one-sided alternative hypothesis*\n \n Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:\n \n >>> np.random.seed(987654321)\n >>> x = stats.norm.rvs(loc=0.2, size=100)\n >>> stats.kstest(x,'norm', alternative = 'less')\n (0.12464329735846891, 0.040989164077641749)\n \n Reject equal distribution against alternative hypothesis: less\n \n >>> stats.kstest(x,'norm', alternative = 'greater')\n (0.0072115233216311081, 0.98531158590396395)\n \n Don't reject equal distribution against alternative hypothesis: greater\n \n >>> stats.kstest(x,'norm', mode='asymp')\n (0.12464329735846891, 0.08944488871182088)\n \n *Testing t distributed random variables against normal distribution*\n \n With 100 degrees of freedom the t distribution looks close to the normal\n distribution, and the K-S test does not reject the hypothesis that the\n sample came from the normal distribution:\n \n >>> np.random.seed(987654321)\n >>> stats.kstest(stats.t.rvs(100,size=100),'norm')\n (0.072018929165471257, 0.67630062862479168)\n \n With 3 degrees of freedom the t distribution looks sufficiently different\n from the normal distribution, that we can reject the hypothesis that the\n sample came from the normal distribution at the 10% level:\n \n >>> np.random.seed(987654321)\n >>> stats.kstest(stats.t.rvs(3,size=100),'norm')\n (0.131016895759829, 0.058826222555312224)\n \n kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate')\n Compute the kurtosis (Fisher or Pearson) of a dataset.\n \n Kurtosis is the fourth central moment divided by the square of the\n variance. If Fisher's definition is used, then 3.0 is subtracted from\n the result to give 0.0 for a normal distribution.\n \n If bias is False then the kurtosis is calculated using k statistics to\n eliminate bias coming from biased moment estimators\n \n Use `kurtosistest` to see if result is close enough to normal.\n \n Parameters\n ----------\n a : array\n Data for which the kurtosis is calculated.\n axis : int or None, optional\n Axis along which the kurtosis is calculated. Default is 0.\n If None, compute over the whole array `a`.\n fisher : bool, optional\n If True, Fisher's definition is used (normal ==> 0.0). If False,\n Pearson's definition is used (normal ==> 3.0).\n bias : bool, optional\n If False, then the calculations are corrected for statistical bias.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan. 'propagate' returns nan,\n 'raise' throws an error, 'omit' performs the calculations ignoring nan\n values. Default is 'propagate'.\n \n Returns\n -------\n kurtosis : array\n The kurtosis of values along an axis. If all values are equal,\n return -3 for Fisher's definition and 0 for Pearson's definition.\n \n References\n ----------\n .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard\n Probability and Statistics Tables and Formulae. Chapman & Hall: New\n York. 2000.\n \n Examples\n --------\n In Fisher's definiton, the kurtosis of the normal distribution is zero.\n In the following example, the kurtosis is close to zero, because it was\n calculated from the dataset, not from the continuous distribution.\n \n >>> from scipy.stats import norm, kurtosis\n >>> data = norm.rvs(size=1000, random_state=3)\n >>> kurtosis(data)\n -0.06928694200380558\n \n The distribution with a higher kurtosis has a heavier tail.\n The zero valued kurtosis of the normal distribution in Fisher's definition\n can serve as a reference point.\n \n >>> import matplotlib.pyplot as plt\n >>> import scipy.stats as stats\n >>> from scipy.stats import kurtosis\n \n >>> x = np.linspace(-5, 5, 100)\n >>> ax = plt.subplot()\n >>> distnames = ['laplace', 'norm', 'uniform']\n \n >>> for distname in distnames:\n ... if distname == 'uniform':\n ... dist = getattr(stats, distname)(loc=-2, scale=4)\n ... else:\n ... dist = getattr(stats, distname)\n ... data = dist.rvs(size=1000)\n ... kur = kurtosis(data, fisher=True)\n ... y = dist.pdf(x)\n ... ax.plot(x, y, label=\"{}, {}\".format(distname, round(kur, 3)))\n ... ax.legend()\n \n The Laplace distribution has a heavier tail than the normal distribution.\n The uniform distribution (which has negative kurtosis) has the thinnest\n tail.\n \n kurtosistest(a, axis=0, nan_policy='propagate')\n Test whether a dataset has normal kurtosis.\n \n This function tests the null hypothesis that the kurtosis\n of the population from which the sample was drawn is that\n of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.\n \n Parameters\n ----------\n a : array\n Array of the sample data.\n axis : int or None, optional\n Axis along which to compute test. Default is 0. If None,\n compute over the whole array `a`.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n Returns\n -------\n statistic : float\n The computed z-score for this test.\n pvalue : float\n The two-sided p-value for the hypothesis test.\n \n Notes\n -----\n Valid only for n>20. This function uses the method described in [1]_.\n \n References\n ----------\n .. [1] see e.g. F. J. Anscombe, W. J. Glynn, \"Distribution of the kurtosis\n statistic b2 for normal samples\", Biometrika, vol. 70, pp. 227-234, 1983.\n \n Examples\n --------\n >>> from scipy.stats import kurtosistest\n >>> kurtosistest(list(range(20)))\n KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.08804338332528348)\n \n >>> np.random.seed(28041990)\n >>> s = np.random.normal(0, 1, 1000)\n >>> kurtosistest(s)\n KurtosistestResult(statistic=1.2317590987707365, pvalue=0.21803908613450895)\n \n levene(*args, **kwds)\n Perform Levene test for equal variances.\n \n The Levene test tests the null hypothesis that all input samples\n are from populations with equal variances. Levene's test is an\n alternative to Bartlett's test `bartlett` in the case where\n there are significant deviations from normality.\n \n Parameters\n ----------\n sample1, sample2, ... : array_like\n The sample data, possibly with different lengths. Only one-dimensional\n samples are accepted.\n center : {'mean', 'median', 'trimmed'}, optional\n Which function of the data to use in the test. The default\n is 'median'.\n proportiontocut : float, optional\n When `center` is 'trimmed', this gives the proportion of data points\n to cut from each end. (See `scipy.stats.trim_mean`.)\n Default is 0.05.\n \n Returns\n -------\n statistic : float\n The test statistic.\n pvalue : float\n The p-value for the test.\n \n Notes\n -----\n Three variations of Levene's test are possible. The possibilities\n and their recommended usages are:\n \n * 'median' : Recommended for skewed (non-normal) distributions>\n * 'mean' : Recommended for symmetric, moderate-tailed distributions.\n * 'trimmed' : Recommended for heavy-tailed distributions.\n \n The test version using the mean was proposed in the original article\n of Levene ([2]_) while the median and trimmed mean have been studied by\n Brown and Forsythe ([3]_), sometimes also referred to as Brown-Forsythe\n test.\n \n References\n ----------\n .. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm\n .. [2] Levene, H. (1960). In Contributions to Probability and Statistics:\n Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,\n Stanford University Press, pp. 278-292.\n .. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American\n Statistical Association, 69, 364-367\n \n linregress(x, y=None)\n Calculate a linear least-squares regression for two sets of measurements.\n \n Parameters\n ----------\n x, y : array_like\n Two sets of measurements. Both arrays should have the same length. If\n only `x` is given (and ``y=None``), then it must be a two-dimensional\n array where one dimension has length 2. The two sets of measurements\n are then found by splitting the array along the length-2 dimension. In\n the case where ``y=None`` and `x` is a 2x2 array, ``linregress(x)`` is\n equivalent to ``linregress(x[0], x[1])``.\n \n Returns\n -------\n slope : float\n Slope of the regression line.\n intercept : float\n Intercept of the regression line.\n rvalue : float\n Correlation coefficient.\n pvalue : float\n Two-sided p-value for a hypothesis test whose null hypothesis is\n that the slope is zero, using Wald Test with t-distribution of\n the test statistic.\n stderr : float\n Standard error of the estimated gradient.\n \n See also\n --------\n :func:`scipy.optimize.curve_fit` : Use non-linear\n least squares to fit a function to data.\n :func:`scipy.optimize.leastsq` : Minimize the sum of\n squares of a set of equations.\n \n Notes\n -----\n Missing values are considered pair-wise: if a value is missing in `x`,\n the corresponding value in `y` is masked.\n \n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from scipy import stats\n \n Generate some data:\n \n >>> np.random.seed(12345678)\n >>> x = np.random.random(10)\n >>> y = 1.6*x + np.random.random(10)\n \n Perform the linear regression:\n \n >>> slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)\n >>> print(\"slope: %f intercept: %f\" % (slope, intercept))\n slope: 1.944864 intercept: 0.268578\n \n To get coefficient of determination (R-squared):\n \n >>> print(\"R-squared: %f\" % r_value**2)\n R-squared: 0.735498\n \n Plot the data along with the fitted line:\n \n >>> plt.plot(x, y, 'o', label='original data')\n >>> plt.plot(x, intercept + slope*x, 'r', label='fitted line')\n >>> plt.legend()\n >>> plt.show()\n \n Example for the case where only x is provided as a 2x2 array:\n \n >>> x = np.array([[0, 1], [0, 2]])\n >>> r = stats.linregress(x)\n >>> r.slope, r.intercept\n (2.0, 0.0)\n \n mannwhitneyu(x, y, use_continuity=True, alternative=None)\n Compute the Mann-Whitney rank test on samples x and y.\n \n Parameters\n ----------\n x, y : array_like\n Array of samples, should be one-dimensional.\n use_continuity : bool, optional\n Whether a continuity correction (1/2.) should be taken into\n account. Default is True.\n alternative : {None, 'two-sided', 'less', 'greater'}, optional\n Defines the alternative hypothesis.\n The following options are available (default is None):\n \n * None: computes p-value half the size of the 'two-sided' p-value and\n a different U statistic. The default behavior is not the same as\n using 'less' or 'greater'; it only exists for backward compatibility\n and is deprecated.\n * 'two-sided'\n * 'less': one-sided\n * 'greater': one-sided\n \n Use of the None option is deprecated.\n \n Returns\n -------\n statistic : float\n The Mann-Whitney U statistic, equal to min(U for x, U for y) if\n `alternative` is equal to None (deprecated; exists for backward\n compatibility), and U for y otherwise.\n pvalue : float\n p-value assuming an asymptotic normal distribution. One-sided or\n two-sided, depending on the choice of `alternative`.\n \n Notes\n -----\n Use only when the number of observation in each sample is > 20 and\n you have 2 independent samples of ranks. Mann-Whitney U is\n significant if the u-obtained is LESS THAN or equal to the critical\n value of U.\n \n This test corrects for ties and by default uses a continuity correction.\n \n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Mann-Whitney_U_test\n \n .. [2] H.B. Mann and D.R. Whitney, \"On a Test of Whether one of Two Random\n Variables is Stochastically Larger than the Other,\" The Annals of\n Mathematical Statistics, vol. 18, no. 1, pp. 50-60, 1947.\n \n median_absolute_deviation(x, axis=0, center=<function median at 0x0000023F3B6994C8>, scale=1.4826, nan_policy='propagate')\n Compute the median absolute deviation of the data along the given axis.\n \n The median absolute deviation (MAD, [1]_) computes the median over the\n absolute deviations from the median. It is a measure of dispersion\n similar to the standard deviation but more robust to outliers [2]_.\n \n The MAD of an empty array is ``np.nan``.\n \n .. versionadded:: 1.3.0\n \n Parameters\n ----------\n x : array_like\n Input array or object that can be converted to an array.\n axis : int or None, optional\n Axis along which the range is computed. Default is 0. If None, compute\n the MAD over the entire array.\n center : callable, optional\n A function that will return the central value. The default is to use\n np.median. Any user defined function used will need to have the function\n signature ``func(arr, axis)``.\n scale : int, optional\n The scaling factor applied to the MAD. The default scale (1.4826)\n ensures consistency with the standard deviation for normally distributed\n data.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n Returns\n -------\n mad : scalar or ndarray\n If ``axis=None``, a scalar is returned. If the input contains\n integers or floats of smaller precision than ``np.float64``, then the\n output data-type is ``np.float64``. Otherwise, the output data-type is\n the same as that of the input.\n \n See Also\n --------\n numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean,\n scipy.stats.tstd, scipy.stats.tvar\n \n Notes\n -----\n The `center` argument only affects the calculation of the central value\n around which the MAD is calculated. That is, passing in ``center=np.mean``\n will calculate the MAD around the mean - it will not calculate the *mean*\n absolute deviation.\n \n References\n ----------\n .. [1] \"Median absolute deviation\" https://en.wikipedia.org/wiki/Median_absolute_deviation\n .. [2] \"Robust measures of scale\" https://en.wikipedia.org/wiki/Robust_measures_of_scale\n \n Examples\n --------\n When comparing the behavior of `median_absolute_deviation` with ``np.std``,\n the latter is affected when we change a single value of an array to have an\n outlier value while the MAD hardly changes:\n \n >>> from scipy import stats\n >>> x = stats.norm.rvs(size=100, scale=1, random_state=123456)\n >>> x.std()\n 0.9973906394005013\n >>> stats.median_absolute_deviation(x)\n 1.2280762773108278\n >>> x[0] = 345.6\n >>> x.std()\n 34.42304872314415\n >>> stats.median_absolute_deviation(x)\n 1.2340335571164334\n \n Axis handling example:\n \n >>> x = np.array([[10, 7, 4], [3, 2, 1]])\n >>> x\n array([[10, 7, 4],\n [ 3, 2, 1]])\n >>> stats.median_absolute_deviation(x)\n array([5.1891, 3.7065, 2.2239])\n >>> stats.median_absolute_deviation(x, axis=None)\n 2.9652\n \n median_test(*args, **kwds)\n Perform a Mood's median test.\n \n Test that two or more samples come from populations with the same median.\n \n Let ``n = len(args)`` be the number of samples. The \"grand median\" of\n all the data is computed, and a contingency table is formed by\n classifying the values in each sample as being above or below the grand\n median. The contingency table, along with `correction` and `lambda_`,\n are passed to `scipy.stats.chi2_contingency` to compute the test statistic\n and p-value.\n \n Parameters\n ----------\n sample1, sample2, ... : array_like\n The set of samples. There must be at least two samples.\n Each sample must be a one-dimensional sequence containing at least\n one value. The samples are not required to have the same length.\n ties : str, optional\n Determines how values equal to the grand median are classified in\n the contingency table. The string must be one of::\n \n \"below\":\n Values equal to the grand median are counted as \"below\".\n \"above\":\n Values equal to the grand median are counted as \"above\".\n \"ignore\":\n Values equal to the grand median are not counted.\n \n The default is \"below\".\n correction : bool, optional\n If True, *and* there are just two samples, apply Yates' correction\n for continuity when computing the test statistic associated with\n the contingency table. Default is True.\n lambda_ : float or str, optional\n By default, the statistic computed in this test is Pearson's\n chi-squared statistic. `lambda_` allows a statistic from the\n Cressie-Read power divergence family to be used instead. See\n `power_divergence` for details.\n Default is 1 (Pearson's chi-squared statistic).\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan. 'propagate' returns nan,\n 'raise' throws an error, 'omit' performs the calculations ignoring nan\n values. Default is 'propagate'.\n \n Returns\n -------\n stat : float\n The test statistic. The statistic that is returned is determined by\n `lambda_`. The default is Pearson's chi-squared statistic.\n p : float\n The p-value of the test.\n m : float\n The grand median.\n table : ndarray\n The contingency table. The shape of the table is (2, n), where\n n is the number of samples. The first row holds the counts of the\n values above the grand median, and the second row holds the counts\n of the values below the grand median. The table allows further\n analysis with, for example, `scipy.stats.chi2_contingency`, or with\n `scipy.stats.fisher_exact` if there are two samples, without having\n to recompute the table. If ``nan_policy`` is \"propagate\" and there\n are nans in the input, the return value for ``table`` is ``None``.\n \n See Also\n --------\n kruskal : Compute the Kruskal-Wallis H-test for independent samples.\n mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.\n \n Notes\n -----\n .. versionadded:: 0.15.0\n \n References\n ----------\n .. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill\n (1950), pp. 394-399.\n .. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).\n See Sections 8.12 and 10.15.\n \n Examples\n --------\n A biologist runs an experiment in which there are three groups of plants.\n Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.\n Each plant produces a number of seeds. The seed counts for each group\n are::\n \n Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49\n Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99\n Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84\n \n The following code applies Mood's median test to these samples.\n \n >>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]\n >>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]\n >>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]\n >>> from scipy.stats import median_test\n >>> stat, p, med, tbl = median_test(g1, g2, g3)\n \n The median is\n \n >>> med\n 34.0\n \n and the contingency table is\n \n >>> tbl\n array([[ 5, 10, 7],\n [11, 5, 10]])\n \n `p` is too large to conclude that the medians are not the same:\n \n >>> p\n 0.12609082774093244\n \n The \"G-test\" can be performed by passing ``lambda_=\"log-likelihood\"`` to\n `median_test`.\n \n >>> g, p, med, tbl = median_test(g1, g2, g3, lambda_=\"log-likelihood\")\n >>> p\n 0.12224779737117837\n \n The median occurs several times in the data, so we'll get a different\n result if, for example, ``ties=\"above\"`` is used:\n \n >>> stat, p, med, tbl = median_test(g1, g2, g3, ties=\"above\")\n >>> p\n 0.063873276069553273\n \n >>> tbl\n array([[ 5, 11, 9],\n [11, 4, 8]])\n \n This example demonstrates that if the data set is not large and there\n are values equal to the median, the p-value can be sensitive to the\n choice of `ties`.\n \n mode(a, axis=0, nan_policy='propagate')\n Return an array of the modal (most common) value in the passed array.\n \n If there is more than one such value, only the smallest is returned.\n The bin-count for the modal bins is also returned.\n \n Parameters\n ----------\n a : array_like\n n-dimensional array of which to find mode(s).\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over\n the whole array `a`.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n Returns\n -------\n mode : ndarray\n Array of modal values.\n count : ndarray\n Array of counts for each mode.\n \n Examples\n --------\n >>> a = np.array([[6, 8, 3, 0],\n ... [3, 2, 1, 7],\n ... [8, 1, 8, 4],\n ... [5, 3, 0, 5],\n ... [4, 7, 5, 9]])\n >>> from scipy import stats\n >>> stats.mode(a)\n (array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))\n \n To get mode of whole array, specify ``axis=None``:\n \n >>> stats.mode(a, axis=None)\n (array([3]), array([3]))\n \n moment(a, moment=1, axis=0, nan_policy='propagate')\n Calculate the nth moment about the mean for a sample.\n \n A moment is a specific quantitative measure of the shape of a set of\n points. It is often used to calculate coefficients of skewness and kurtosis\n due to its close relationship with them.\n \n Parameters\n ----------\n a : array_like\n Input array.\n moment : int or array_like of ints, optional\n Order of central moment that is returned. Default is 1.\n axis : int or None, optional\n Axis along which the central moment is computed. Default is 0.\n If None, compute over the whole array `a`.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n Returns\n -------\n n-th central moment : ndarray or float\n The appropriate moment along the given axis or over all values if axis\n is None. The denominator for the moment calculation is the number of\n observations, no degrees of freedom correction is done.\n \n See Also\n --------\n kurtosis, skew, describe\n \n Notes\n -----\n The k-th central moment of a data sample is:\n \n .. math::\n \n m_k = \\frac{1}{n} \\sum_{i = 1}^n (x_i - \\bar{x})^k\n \n Where n is the number of samples and x-bar is the mean. This function uses\n exponentiation by squares [1]_ for efficiency.\n \n References\n ----------\n .. [1] https://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms\n \n Examples\n --------\n >>> from scipy.stats import moment\n >>> moment([1, 2, 3, 4, 5], moment=1)\n 0.0\n >>> moment([1, 2, 3, 4, 5], moment=2)\n 2.0\n \n mood(x, y, axis=0)\n Perform Mood's test for equal scale parameters.\n \n Mood's two-sample test for scale parameters is a non-parametric\n test for the null hypothesis that two samples are drawn from the\n same distribution with the same scale parameter.\n \n Parameters\n ----------\n x, y : array_like\n Arrays of sample data.\n axis : int, optional\n The axis along which the samples are tested. `x` and `y` can be of\n different length along `axis`.\n If `axis` is None, `x` and `y` are flattened and the test is done on\n all values in the flattened arrays.\n \n Returns\n -------\n z : scalar or ndarray\n The z-score for the hypothesis test. For 1-D inputs a scalar is\n returned.\n p-value : scalar ndarray\n The p-value for the hypothesis test.\n \n See Also\n --------\n fligner : A non-parametric test for the equality of k variances\n ansari : A non-parametric test for the equality of 2 variances\n bartlett : A parametric test for equality of k variances in normal samples\n levene : A parametric test for equality of k variances\n \n Notes\n -----\n The data are assumed to be drawn from probability distributions ``f(x)``\n and ``f(x/s) / s`` respectively, for some probability density function f.\n The null hypothesis is that ``s == 1``.\n \n For multi-dimensional arrays, if the inputs are of shapes\n ``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the\n resulting z and p values will have shape ``(n0, n2, n3)``. Note that\n ``n1`` and ``m1`` don't have to be equal, but the other dimensions do.\n \n Examples\n --------\n >>> from scipy import stats\n >>> np.random.seed(1234)\n >>> x2 = np.random.randn(2, 45, 6, 7)\n >>> x1 = np.random.randn(2, 30, 6, 7)\n >>> z, p = stats.mood(x1, x2, axis=1)\n >>> p.shape\n (2, 6, 7)\n \n Find the number of points where the difference in scale is not significant:\n \n >>> (p > 0.1).sum()\n 74\n \n Perform the test with different scales:\n \n >>> x1 = np.random.randn(2, 30)\n >>> x2 = np.random.randn(2, 35) * 10.0\n >>> stats.mood(x1, x2, axis=1)\n (array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07]))\n \n multiscale_graphcorr(x, y, compute_distance=<function _euclidean_dist at 0x0000023F4CB22048>, reps=1000, workers=1, is_twosamp=False, random_state=None)\n Computes the Multiscale Graph Correlation (MGC) test statistic.\n \n Specifically, for each point, MGC finds the :math:`k`-nearest neighbors for\n one property (e.g. cloud density), and the :math:`l`-nearest neighbors for\n the other property (e.g. grass wetness) [1]_. This pair :math:`(k, l)` is\n called the \"scale\". A priori, however, it is not know which scales will be\n most informative. So, MGC computes all distance pairs, and then efficiently\n computes the distance correlations for all scales. The local correlations\n illustrate which scales are relatively informative about the relationship.\n The key, therefore, to successfully discover and decipher relationships\n between disparate data modalities is to adaptively determine which scales\n are the most informative, and the geometric implication for the most\n informative scales. Doing so not only provides an estimate of whether the\n modalities are related, but also provides insight into how the\n determination was made. This is especially important in high-dimensional\n data, where simple visualizations do not reveal relationships to the\n unaided human eye. Characterizations of this implementation in particular\n have been derived from and benchmarked within in [2]_.\n \n Parameters\n ----------\n x, y : ndarray\n If ``x`` and ``y`` have shapes ``(n, p)`` and ``(n, q)`` where `n` is\n the number of samples and `p` and `q` are the number of dimensions,\n then the MGC independence test will be run. Alternatively, ``x`` and\n ``y`` can have shapes ``(n, n)`` if they are distance or similarity\n matrices, and ``compute_distance`` must be sent to ``None``. If ``x``\n and ``y`` have shapes ``(n, p)`` and ``(m, p)``, an unpaired\n two-sample MGC test will be run.\n compute_distance : callable, optional\n A function that computes the distance or similarity among the samples\n within each data matrix. Set to ``None`` if ``x`` and ``y`` are\n already distance matrices. The default uses the euclidean norm metric.\n If you are calling a custom function, either create the distance\n matrix before-hand or create a function of the form\n ``compute_distance(x)`` where `x` is the data matrix for which\n pairwise distances are calculated.\n reps : int, optional\n The number of replications used to estimate the null when using the\n permutation test. The default is ``1000``.\n workers : int or map-like callable, optional\n If ``workers`` is an int the population is subdivided into ``workers``\n sections and evaluated in parallel (uses ``multiprocessing.Pool\n <multiprocessing>``). Supply ``-1`` to use all cores available to the\n Process. Alternatively supply a map-like callable, such as\n ``multiprocessing.Pool.map`` for evaluating the p-value in parallel.\n This evaluation is carried out as ``workers(func, iterable)``.\n Requires that `func` be pickleable. The default is ``1``.\n is_twosamp : bool, optional\n If `True`, a two sample test will be run. If ``x`` and ``y`` have\n shapes ``(n, p)`` and ``(m, p)``, this optional will be overriden and\n set to ``True``. Set to ``True`` if ``x`` and ``y`` both have shapes\n ``(n, p)`` and a two sample test is desired. The default is ``False``.\n random_state : int or np.random.RandomState instance, optional\n If already a RandomState instance, use it.\n If seed is an int, return a new RandomState instance seeded with seed.\n If None, use np.random.RandomState. Default is None.\n \n Returns\n -------\n stat : float\n The sample MGC test statistic within `[-1, 1]`.\n pvalue : float\n The p-value obtained via permutation.\n mgc_dict : dict\n Contains additional useful additional returns containing the following\n keys:\n \n - mgc_map : ndarray\n A 2D representation of the latent geometry of the relationship.\n of the relationship.\n - opt_scale : (int, int)\n The estimated optimal scale as a `(x, y)` pair.\n - null_dist : list\n The null distribution derived from the permuted matrices\n \n See Also\n --------\n pearsonr : Pearson correlation coefficient and p-value for testing\n non-correlation.\n kendalltau : Calculates Kendall's tau.\n spearmanr : Calculates a Spearman rank-order correlation coefficient.\n \n Notes\n -----\n A description of the process of MGC and applications on neuroscience data\n can be found in [1]_. It is performed using the following steps:\n \n #. Two distance matrices :math:`D^X` and :math:`D^Y` are computed and\n modified to be mean zero columnwise. This results in two\n :math:`n \\times n` distance matrices :math:`A` and :math:`B` (the\n centering and unbiased modification) [3]_.\n \n #. For all values :math:`k` and :math:`l` from :math:`1, ..., n`,\n \n * The :math:`k`-nearest neighbor and :math:`l`-nearest neighbor graphs\n are calculated for each property. Here, :math:`G_k (i, j)` indicates\n the :math:`k`-smallest values of the :math:`i`-th row of :math:`A`\n and :math:`H_l (i, j)` indicates the :math:`l` smallested values of\n the :math:`i`-th row of :math:`B`\n \n * Let :math:`\\circ` denotes the entry-wise matrix product, then local\n correlations are summed and normalized using the following statistic:\n \n .. math::\n \n c^{kl} = \\frac{\\sum_{ij} A G_k B H_l}\n {\\sqrt{\\sum_{ij} A^2 G_k \\times \\sum_{ij} B^2 H_l}}\n \n #. The MGC test statistic is the smoothed optimal local correlation of\n :math:`\\{ c^{kl} \\}`. Denote the smoothing operation as :math:`R(\\cdot)`\n (which essentially set all isolated large correlations) as 0 and\n connected large correlations the same as before, see [3]_.) MGC is,\n \n .. math::\n \n MGC_n (x, y) = \\max_{(k, l)} R \\left(c^{kl} \\left( x_n, y_n \\right)\n \\right)\n \n The test statistic returns a value between :math:`(-1, 1)` since it is\n normalized.\n \n The p-value returned is calculated using a permutation test. This process\n is completed by first randomly permuting :math:`y` to estimate the null\n distribution and then calculating the probability of observing a test\n statistic, under the null, at least as extreme as the observed test\n statistic.\n \n MGC requires at least 5 samples to run with reliable results. It can also\n handle high-dimensional data sets.\n \n In addition, by manipulating the input data matrices, the two-sample\n testing problem can be reduced to the independence testing problem [4]_.\n Given sample data :math:`U` and :math:`V` of sizes :math:`p \\times n`\n :math:`p \\times m`, data matrix :math:`X` and :math:`Y` can be created as\n follows:\n \n .. math::\n \n X = [U | V] \\in \\mathcal{R}^{p \\times (n + m)}\n \n Y = [0_{1 \\times n} | 1_{1 \\times m}] \\in \\mathcal{R}^{(n + m)}\n \n Then, the MGC statistic can be calculated as normal. This methodology can\n be extended to similar tests such as distance correlation [4]_.\n \n .. versionadded:: 1.4.0\n \n References\n ----------\n .. [1] Vogelstein, J. T., Bridgeford, E. W., Wang, Q., Priebe, C. E.,\n Maggioni, M., & Shen, C. (2019). Discovering and deciphering\n relationships across disparate data modalities. ELife.\n .. [2] Panda, S., Palaniappan, S., Xiong, J., Swaminathan, A.,\n Ramachandran, S., Bridgeford, E. W., ... Vogelstein, J. T. (2019).\n mgcpy: A Comprehensive High Dimensional Independence Testing Python\n Package. ArXiv:1907.02088 [Cs, Stat].\n .. [3] Shen, C., Priebe, C.E., & Vogelstein, J. T. (2019). From distance\n correlation to multiscale graph correlation. Journal of the American\n Statistical Association.\n .. [4] Shen, C. & Vogelstein, J. T. (2018). The Exact Equivalence of\n Distance and Kernel Methods for Hypothesis Testing. ArXiv:1806.05514\n [Cs, Stat].\n \n Examples\n --------\n >>> from scipy.stats import multiscale_graphcorr\n >>> x = np.arange(100)\n >>> y = x\n >>> stat, pvalue, _ = multiscale_graphcorr(x, y, workers=-1)\n >>> '%.1f, %.3f' % (stat, pvalue)\n '1.0, 0.001'\n \n Alternatively,\n \n >>> x = np.arange(100)\n >>> y = x\n >>> mgc = multiscale_graphcorr(x, y)\n >>> '%.1f, %.3f' % (mgc.stat, mgc.pvalue)\n '1.0, 0.001'\n \n To run an unpaired two-sample test,\n \n >>> x = np.arange(100)\n >>> y = np.arange(79)\n >>> mgc = multiscale_graphcorr(x, y, random_state=1)\n >>> '%.3f, %.2f' % (mgc.stat, mgc.pvalue)\n '0.033, 0.02'\n \n or, if shape of the inputs are the same,\n \n >>> x = np.arange(100)\n >>> y = x\n >>> mgc = multiscale_graphcorr(x, y, is_twosamp=True)\n >>> '%.3f, %.1f' % (mgc.stat, mgc.pvalue)\n '-0.008, 1.0'\n \n mvsdist(data)\n 'Frozen' distributions for mean, variance, and standard deviation of data.\n \n Parameters\n ----------\n data : array_like\n Input array. Converted to 1-D using ravel.\n Requires 2 or more data-points.\n \n Returns\n -------\n mdist : \"frozen\" distribution object\n Distribution object representing the mean of the data.\n vdist : \"frozen\" distribution object\n Distribution object representing the variance of the data.\n sdist : \"frozen\" distribution object\n Distribution object representing the standard deviation of the data.\n \n See Also\n --------\n bayes_mvs\n \n Notes\n -----\n The return values from ``bayes_mvs(data)`` is equivalent to\n ``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.\n \n In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``\n on the three distribution objects returned from this function will give\n the same results that are returned from `bayes_mvs`.\n \n References\n ----------\n T.E. Oliphant, \"A Bayesian perspective on estimating mean, variance, and\n standard-deviation from data\", https://scholarsarchive.byu.edu/facpub/278,\n 2006.\n \n Examples\n --------\n >>> from scipy import stats\n >>> data = [6, 9, 12, 7, 8, 8, 13]\n >>> mean, var, std = stats.mvsdist(data)\n \n We now have frozen distribution objects \"mean\", \"var\" and \"std\" that we can\n examine:\n \n >>> mean.mean()\n 9.0\n >>> mean.interval(0.95)\n (6.6120585482655692, 11.387941451734431)\n >>> mean.std()\n 1.1952286093343936\n \n normaltest(a, axis=0, nan_policy='propagate')\n Test whether a sample differs from a normal distribution.\n \n This function tests the null hypothesis that a sample comes\n from a normal distribution. It is based on D'Agostino and\n Pearson's [1]_, [2]_ test that combines skew and kurtosis to\n produce an omnibus test of normality.\n \n Parameters\n ----------\n a : array_like\n The array containing the sample to be tested.\n axis : int or None, optional\n Axis along which to compute test. Default is 0. If None,\n compute over the whole array `a`.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n Returns\n -------\n statistic : float or array\n ``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and\n ``k`` is the z-score returned by `kurtosistest`.\n pvalue : float or array\n A 2-sided chi squared probability for the hypothesis test.\n \n References\n ----------\n .. [1] D'Agostino, R. B. (1971), \"An omnibus test of normality for\n moderate and large sample size\", Biometrika, 58, 341-348\n \n .. [2] D'Agostino, R. and Pearson, E. S. (1973), \"Tests for departure from\n normality\", Biometrika, 60, 613-622\n \n Examples\n --------\n >>> from scipy import stats\n >>> pts = 1000\n >>> np.random.seed(28041990)\n >>> a = np.random.normal(0, 1, size=pts)\n >>> b = np.random.normal(2, 1, size=pts)\n >>> x = np.concatenate((a, b))\n >>> k2, p = stats.normaltest(x)\n >>> alpha = 1e-3\n >>> print(\"p = {:g}\".format(p))\n p = 3.27207e-11\n >>> if p < alpha: # null hypothesis: x comes from a normal distribution\n ... print(\"The null hypothesis can be rejected\")\n ... else:\n ... print(\"The null hypothesis cannot be rejected\")\n The null hypothesis can be rejected\n \n obrientransform(*args)\n Compute the O'Brien transform on input data (any number of arrays).\n \n Used to test for homogeneity of variance prior to running one-way stats.\n Each array in ``*args`` is one level of a factor.\n If `f_oneway` is run on the transformed data and found significant,\n the variances are unequal. From Maxwell and Delaney [1]_, p.112.\n \n Parameters\n ----------\n args : tuple of array_like\n Any number of arrays.\n \n Returns\n -------\n obrientransform : ndarray\n Transformed data for use in an ANOVA. The first dimension\n of the result corresponds to the sequence of transformed\n arrays. If the arrays given are all 1-D of the same length,\n the return value is a 2-D array; otherwise it is a 1-D array\n of type object, with each element being an ndarray.\n \n References\n ----------\n .. [1] S. E. Maxwell and H. D. Delaney, \"Designing Experiments and\n Analyzing Data: A Model Comparison Perspective\", Wadsworth, 1990.\n \n Examples\n --------\n We'll test the following data sets for differences in their variance.\n \n >>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]\n >>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]\n \n Apply the O'Brien transform to the data.\n \n >>> from scipy.stats import obrientransform\n >>> tx, ty = obrientransform(x, y)\n \n Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the\n transformed data.\n \n >>> from scipy.stats import f_oneway\n >>> F, p = f_oneway(tx, ty)\n >>> p\n 0.1314139477040335\n \n If we require that ``p < 0.05`` for significance, we cannot conclude\n that the variances are different.\n \n pearsonr(x, y)\n Pearson correlation coefficient and p-value for testing non-correlation.\n \n The Pearson correlation coefficient [1]_ measures the linear relationship\n between two datasets. The calculation of the p-value relies on the\n assumption that each dataset is normally distributed. (See Kowalski [3]_\n for a discussion of the effects of non-normality of the input on the\n distribution of the correlation coefficient.) Like other correlation\n coefficients, this one varies between -1 and +1 with 0 implying no\n correlation. Correlations of -1 or +1 imply an exact linear relationship.\n Positive correlations imply that as x increases, so does y. Negative\n correlations imply that as x increases, y decreases.\n \n The p-value roughly indicates the probability of an uncorrelated system\n producing datasets that have a Pearson correlation at least as extreme\n as the one computed from these datasets.\n \n Parameters\n ----------\n x : (N,) array_like\n Input array.\n y : (N,) array_like\n Input array.\n \n Returns\n -------\n r : float\n Pearson's correlation coefficient.\n p-value : float\n Two-tailed p-value.\n \n Warns\n -----\n PearsonRConstantInputWarning\n Raised if an input is a constant array. The correlation coefficient\n is not defined in this case, so ``np.nan`` is returned.\n \n PearsonRNearConstantInputWarning\n Raised if an input is \"nearly\" constant. The array ``x`` is considered\n nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.\n Numerical errors in the calculation ``x - mean(x)`` in this case might\n result in an inaccurate calculation of r.\n \n See Also\n --------\n spearmanr : Spearman rank-order correlation coefficient.\n kendalltau : Kendall's tau, a correlation measure for ordinal data.\n \n Notes\n -----\n The correlation coefficient is calculated as follows:\n \n .. math::\n \n r = \\frac{\\sum (x - m_x) (y - m_y)}\n {\\sqrt{\\sum (x - m_x)^2 \\sum (y - m_y)^2}}\n \n where :math:`m_x` is the mean of the vector :math:`x` and :math:`m_y` is\n the mean of the vector :math:`y`.\n \n Under the assumption that x and y are drawn from independent normal\n distributions (so the population correlation coefficient is 0), the\n probability density function of the sample correlation coefficient r\n is ([1]_, [2]_)::\n \n (1 - r**2)**(n/2 - 2)\n f(r) = ---------------------\n B(1/2, n/2 - 1)\n \n where n is the number of samples, and B is the beta function. This\n is sometimes referred to as the exact distribution of r. This is\n the distribution that is used in `pearsonr` to compute the p-value.\n The distribution is a beta distribution on the interval [-1, 1],\n with equal shape parameters a = b = n/2 - 1. In terms of SciPy's\n implementation of the beta distribution, the distribution of r is::\n \n dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)\n \n The p-value returned by `pearsonr` is a two-sided p-value. For a\n given sample with correlation coefficient r, the p-value is\n the probability that abs(r') of a random sample x' and y' drawn from\n the population with zero correlation would be greater than or equal\n to abs(r). In terms of the object ``dist`` shown above, the p-value\n for a given r and length n can be computed as::\n \n p = 2*dist.cdf(-abs(r))\n \n When n is 2, the above continuous distribution is not well-defined.\n One can interpret the limit of the beta distribution as the shape\n parameters a and b approach a = b = 0 as a discrete distribution with\n equal probability masses at r = 1 and r = -1. More directly, one\n can observe that, given the data x = [x1, x2] and y = [y1, y2], and\n assuming x1 != x2 and y1 != y2, the only possible values for r are 1\n and -1. Because abs(r') for any sample x' and y' with length 2 will\n be 1, the two-sided p-value for a sample of length 2 is always 1.\n \n References\n ----------\n .. [1] \"Pearson correlation coefficient\", Wikipedia,\n https://en.wikipedia.org/wiki/Pearson_correlation_coefficient\n .. [2] Student, \"Probable error of a correlation coefficient\",\n Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.\n .. [3] C. J. Kowalski, \"On the Effects of Non-Normality on the Distribution\n of the Sample Product-Moment Correlation Coefficient\"\n Journal of the Royal Statistical Society. Series C (Applied\n Statistics), Vol. 21, No. 1 (1972), pp. 1-12.\n \n Examples\n --------\n >>> from scipy import stats\n >>> a = np.array([0, 0, 0, 1, 1, 1, 1])\n >>> b = np.arange(7)\n >>> stats.pearsonr(a, b)\n (0.8660254037844386, 0.011724811003954649)\n \n >>> stats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])\n (-0.7426106572325057, 0.1505558088534455)\n \n percentileofscore(a, score, kind='rank')\n Compute the percentile rank of a score relative to a list of scores.\n \n A `percentileofscore` of, for example, 80% means that 80% of the\n scores in `a` are below the given score. In the case of gaps or\n ties, the exact definition depends on the optional keyword, `kind`.\n \n Parameters\n ----------\n a : array_like\n Array of scores to which `score` is compared.\n score : int or float\n Score that is compared to the elements in `a`.\n kind : {'rank', 'weak', 'strict', 'mean'}, optional\n Specifies the interpretation of the resulting score.\n The following options are available (default is 'rank'):\n \n * 'rank': Average percentage ranking of score. In case of multiple\n matches, average the percentage rankings of all matching scores.\n * 'weak': This kind corresponds to the definition of a cumulative\n distribution function. A percentileofscore of 80% means that 80%\n of values are less than or equal to the provided score.\n * 'strict': Similar to \"weak\", except that only values that are\n strictly less than the given score are counted.\n * 'mean': The average of the \"weak\" and \"strict\" scores, often used\n in testing. See https://en.wikipedia.org/wiki/Percentile_rank\n \n Returns\n -------\n pcos : float\n Percentile-position of score (0-100) relative to `a`.\n \n See Also\n --------\n numpy.percentile\n \n Examples\n --------\n Three-quarters of the given values lie below a given score:\n \n >>> from scipy import stats\n >>> stats.percentileofscore([1, 2, 3, 4], 3)\n 75.0\n \n With multiple matches, note how the scores of the two matches, 0.6\n and 0.8 respectively, are averaged:\n \n >>> stats.percentileofscore([1, 2, 3, 3, 4], 3)\n 70.0\n \n Only 2/5 values are strictly less than 3:\n \n >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')\n 40.0\n \n But 4/5 values are less than or equal to 3:\n \n >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')\n 80.0\n \n The average between the weak and the strict scores is:\n \n >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')\n 60.0\n \n pointbiserialr(x, y)\n Calculate a point biserial correlation coefficient and its p-value.\n \n The point biserial correlation is used to measure the relationship\n between a binary variable, x, and a continuous variable, y. Like other\n correlation coefficients, this one varies between -1 and +1 with 0\n implying no correlation. Correlations of -1 or +1 imply a determinative\n relationship.\n \n This function uses a shortcut formula but produces the same result as\n `pearsonr`.\n \n Parameters\n ----------\n x : array_like of bools\n Input array.\n y : array_like\n Input array.\n \n Returns\n -------\n correlation : float\n R value.\n pvalue : float\n Two-sided p-value.\n \n Notes\n -----\n `pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.\n It is equivalent to `pearsonr.`\n \n The value of the point-biserial correlation can be calculated from:\n \n .. math::\n \n r_{pb} = \\frac{\\overline{Y_{1}} -\n \\overline{Y_{0}}}{s_{y}}\\sqrt{\\frac{N_{1} N_{2}}{N (N - 1))}}\n \n Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric\n observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`\n are number of observations coded 0 and 1 respectively; :math:`N` is the\n total number of observations and :math:`s_{y}` is the standard\n deviation of all the metric observations.\n \n A value of :math:`r_{pb}` that is significantly different from zero is\n completely equivalent to a significant difference in means between the two\n groups. Thus, an independent groups t Test with :math:`N-2` degrees of\n freedom may be used to test whether :math:`r_{pb}` is nonzero. The\n relation between the t-statistic for comparing two independent groups and\n :math:`r_{pb}` is given by:\n \n .. math::\n \n t = \\sqrt{N - 2}\\frac{r_{pb}}{\\sqrt{1 - r^{2}_{pb}}}\n \n References\n ----------\n .. [1] J. Lev, \"The Point Biserial Coefficient of Correlation\", Ann. Math.\n Statist., Vol. 20, no.1, pp. 125-126, 1949.\n \n .. [2] R.F. Tate, \"Correlation Between a Discrete and a Continuous\n Variable. Point-Biserial Correlation.\", Ann. Math. Statist., Vol. 25,\n np. 3, pp. 603-607, 1954.\n \n .. [3] D. Kornbrot \"Point Biserial Correlation\", In Wiley StatsRef:\n Statistics Reference Online (eds N. Balakrishnan, et al.), 2014.\n https://doi.org/10.1002/9781118445112.stat06227\n \n Examples\n --------\n >>> from scipy import stats\n >>> a = np.array([0, 0, 0, 1, 1, 1, 1])\n >>> b = np.arange(7)\n >>> stats.pointbiserialr(a, b)\n (0.8660254037844386, 0.011724811003954652)\n >>> stats.pearsonr(a, b)\n (0.86602540378443871, 0.011724811003954626)\n >>> np.corrcoef(a, b)\n array([[ 1. , 0.8660254],\n [ 0.8660254, 1. ]])\n \n power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None)\n Cressie-Read power divergence statistic and goodness of fit test.\n \n This function tests the null hypothesis that the categorical data\n has the given frequencies, using the Cressie-Read power divergence\n statistic.\n \n Parameters\n ----------\n f_obs : array_like\n Observed frequencies in each category.\n f_exp : array_like, optional\n Expected frequencies in each category. By default the categories are\n assumed to be equally likely.\n ddof : int, optional\n \"Delta degrees of freedom\": adjustment to the degrees of freedom\n for the p-value. The p-value is computed using a chi-squared\n distribution with ``k - 1 - ddof`` degrees of freedom, where `k`\n is the number of observed frequencies. The default value of `ddof`\n is 0.\n axis : int or None, optional\n The axis of the broadcast result of `f_obs` and `f_exp` along which to\n apply the test. If axis is None, all values in `f_obs` are treated\n as a single data set. Default is 0.\n lambda_ : float or str, optional\n The power in the Cressie-Read power divergence statistic. The default\n is 1. For convenience, `lambda_` may be assigned one of the following\n strings, in which case the corresponding numerical value is used::\n \n String Value Description\n \"pearson\" 1 Pearson's chi-squared statistic.\n In this case, the function is\n equivalent to `stats.chisquare`.\n \"log-likelihood\" 0 Log-likelihood ratio. Also known as\n the G-test [3]_.\n \"freeman-tukey\" -1/2 Freeman-Tukey statistic.\n \"mod-log-likelihood\" -1 Modified log-likelihood ratio.\n \"neyman\" -2 Neyman's statistic.\n \"cressie-read\" 2/3 The power recommended in [5]_.\n \n Returns\n -------\n statistic : float or ndarray\n The Cressie-Read power divergence test statistic. The value is\n a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.\n pvalue : float or ndarray\n The p-value of the test. The value is a float if `ddof` and the\n return value `stat` are scalars.\n \n See Also\n --------\n chisquare\n \n Notes\n -----\n This test is invalid when the observed or expected frequencies in each\n category are too small. A typical rule is that all of the observed\n and expected frequencies should be at least 5.\n \n When `lambda_` is less than zero, the formula for the statistic involves\n dividing by `f_obs`, so a warning or error may be generated if any value\n in `f_obs` is 0.\n \n Similarly, a warning or error may be generated if any value in `f_exp` is\n zero when `lambda_` >= 0.\n \n The default degrees of freedom, k-1, are for the case when no parameters\n of the distribution are estimated. If p parameters are estimated by\n efficient maximum likelihood then the correct degrees of freedom are\n k-1-p. If the parameters are estimated in a different way, then the\n dof can be between k-1-p and k-1. However, it is also possible that\n the asymptotic distribution is not a chisquare, in which case this\n test is not appropriate.\n \n This function handles masked arrays. If an element of `f_obs` or `f_exp`\n is masked, then data at that position is ignored, and does not count\n towards the size of the data set.\n \n .. versionadded:: 0.13.0\n \n References\n ----------\n .. [1] Lowry, Richard. \"Concepts and Applications of Inferential\n Statistics\". Chapter 8.\n https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html\n .. [2] \"Chi-squared test\", https://en.wikipedia.org/wiki/Chi-squared_test\n .. [3] \"G-test\", https://en.wikipedia.org/wiki/G-test\n .. [4] Sokal, R. R. and Rohlf, F. J. \"Biometry: the principles and\n practice of statistics in biological research\", New York: Freeman\n (1981)\n .. [5] Cressie, N. and Read, T. R. C., \"Multinomial Goodness-of-Fit\n Tests\", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),\n pp. 440-464.\n \n Examples\n --------\n (See `chisquare` for more examples.)\n \n When just `f_obs` is given, it is assumed that the expected frequencies\n are uniform and given by the mean of the observed frequencies. Here we\n perform a G-test (i.e. use the log-likelihood ratio statistic):\n \n >>> from scipy.stats import power_divergence\n >>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')\n (2.006573162632538, 0.84823476779463769)\n \n The expected frequencies can be given with the `f_exp` argument:\n \n >>> power_divergence([16, 18, 16, 14, 12, 12],\n ... f_exp=[16, 16, 16, 16, 16, 8],\n ... lambda_='log-likelihood')\n (3.3281031458963746, 0.6495419288047497)\n \n When `f_obs` is 2-D, by default the test is applied to each column.\n \n >>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T\n >>> obs.shape\n (6, 2)\n >>> power_divergence(obs, lambda_=\"log-likelihood\")\n (array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))\n \n By setting ``axis=None``, the test is applied to all data in the array,\n which is equivalent to applying the test to the flattened array.\n \n >>> power_divergence(obs, axis=None)\n (23.31034482758621, 0.015975692534127565)\n >>> power_divergence(obs.ravel())\n (23.31034482758621, 0.015975692534127565)\n \n `ddof` is the change to make to the default degrees of freedom.\n \n >>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)\n (2.0, 0.73575888234288467)\n \n The calculation of the p-values is done by broadcasting the\n test statistic with `ddof`.\n \n >>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])\n (2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))\n \n `f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has\n shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting\n `f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared\n statistics, we must use ``axis=1``:\n \n >>> power_divergence([16, 18, 16, 14, 12, 12],\n ... f_exp=[[16, 16, 16, 16, 16, 8],\n ... [8, 20, 20, 16, 12, 12]],\n ... axis=1)\n (array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))\n \n ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda')\n Calculate the shape parameter that maximizes the PPCC.\n \n The probability plot correlation coefficient (PPCC) plot can be used to\n determine the optimal shape parameter for a one-parameter family of\n distributions. ppcc_max returns the shape parameter that would maximize the\n probability plot correlation coefficient for the given data to a\n one-parameter family of distributions.\n \n Parameters\n ----------\n x : array_like\n Input array.\n brack : tuple, optional\n Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c)\n then they are assumed to be a starting interval for a downhill bracket\n search (see `scipy.optimize.brent`).\n dist : str or stats.distributions instance, optional\n Distribution or distribution function name. Objects that look enough\n like a stats.distributions instance (i.e. they have a ``ppf`` method)\n are also accepted. The default is ``'tukeylambda'``.\n \n Returns\n -------\n shape_value : float\n The shape parameter at which the probability plot correlation\n coefficient reaches its max value.\n \n See Also\n --------\n ppcc_plot, probplot, boxcox\n \n Notes\n -----\n The brack keyword serves as a starting point which is useful in corner\n cases. One can use a plot to obtain a rough visual estimate of the location\n for the maximum to start the search near it.\n \n References\n ----------\n .. [1] J.J. Filliben, \"The Probability Plot Correlation Coefficient Test for\n Normality\", Technometrics, Vol. 17, pp. 111-117, 1975.\n \n .. [2] https://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm\n \n Examples\n --------\n First we generate some random data from a Tukey-Lambda distribution,\n with shape parameter -0.7:\n \n >>> from scipy import stats\n >>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,\n ... random_state=1234567) + 1e4\n \n Now we explore this data with a PPCC plot as well as the related\n probability plot and Box-Cox normplot. A red line is drawn where we\n expect the PPCC value to be maximal (at the shape parameter -0.7 used\n above):\n \n >>> import matplotlib.pyplot as plt\n >>> fig = plt.figure(figsize=(8, 6))\n >>> ax = fig.add_subplot(111)\n >>> res = stats.ppcc_plot(x, -5, 5, plot=ax)\n \n We calculate the value where the shape should reach its maximum and a red\n line is drawn there. The line should coincide with the highest point in the\n ppcc_plot.\n \n >>> max = stats.ppcc_max(x)\n >>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value')\n \n >>> plt.show()\n \n ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80)\n Calculate and optionally plot probability plot correlation coefficient.\n \n The probability plot correlation coefficient (PPCC) plot can be used to\n determine the optimal shape parameter for a one-parameter family of\n distributions. It cannot be used for distributions without shape parameters\n (like the normal distribution) or with multiple shape parameters.\n \n By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A\n Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed\n distributions via an approximately normal one, and is therefore particularly\n useful in practice.\n \n Parameters\n ----------\n x : array_like\n Input array.\n a, b : scalar\n Lower and upper bounds of the shape parameter to use.\n dist : str or stats.distributions instance, optional\n Distribution or distribution function name. Objects that look enough\n like a stats.distributions instance (i.e. they have a ``ppf`` method)\n are also accepted. The default is ``'tukeylambda'``.\n plot : object, optional\n If given, plots PPCC against the shape parameter.\n `plot` is an object that has to have methods \"plot\" and \"text\".\n The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,\n or a custom object with the same methods.\n Default is None, which means that no plot is created.\n N : int, optional\n Number of points on the horizontal axis (equally distributed from\n `a` to `b`).\n \n Returns\n -------\n svals : ndarray\n The shape values for which `ppcc` was calculated.\n ppcc : ndarray\n The calculated probability plot correlation coefficient values.\n \n See Also\n --------\n ppcc_max, probplot, boxcox_normplot, tukeylambda\n \n References\n ----------\n J.J. Filliben, \"The Probability Plot Correlation Coefficient Test for\n Normality\", Technometrics, Vol. 17, pp. 111-117, 1975.\n \n Examples\n --------\n First we generate some random data from a Tukey-Lambda distribution,\n with shape parameter -0.7:\n \n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n >>> np.random.seed(1234567)\n >>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4\n \n Now we explore this data with a PPCC plot as well as the related\n probability plot and Box-Cox normplot. A red line is drawn where we\n expect the PPCC value to be maximal (at the shape parameter -0.7 used\n above):\n \n >>> fig = plt.figure(figsize=(12, 4))\n >>> ax1 = fig.add_subplot(131)\n >>> ax2 = fig.add_subplot(132)\n >>> ax3 = fig.add_subplot(133)\n >>> res = stats.probplot(x, plot=ax1)\n >>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2)\n >>> res = stats.ppcc_plot(x, -5, 5, plot=ax3)\n >>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value')\n >>> plt.show()\n \n probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False)\n Calculate quantiles for a probability plot, and optionally show the plot.\n \n Generates a probability plot of sample data against the quantiles of a\n specified theoretical distribution (the normal distribution by default).\n `probplot` optionally calculates a best-fit line for the data and plots the\n results using Matplotlib or a given plot function.\n \n Parameters\n ----------\n x : array_like\n Sample/response data from which `probplot` creates the plot.\n sparams : tuple, optional\n Distribution-specific shape parameters (shape parameters plus location\n and scale).\n dist : str or stats.distributions instance, optional\n Distribution or distribution function name. The default is 'norm' for a\n normal probability plot. Objects that look enough like a\n stats.distributions instance (i.e. they have a ``ppf`` method) are also\n accepted.\n fit : bool, optional\n Fit a least-squares regression (best-fit) line to the sample data if\n True (default).\n plot : object, optional\n If given, plots the quantiles and least squares fit.\n `plot` is an object that has to have methods \"plot\" and \"text\".\n The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,\n or a custom object with the same methods.\n Default is None, which means that no plot is created.\n \n Returns\n -------\n (osm, osr) : tuple of ndarrays\n Tuple of theoretical quantiles (osm, or order statistic medians) and\n ordered responses (osr). `osr` is simply sorted input `x`.\n For details on how `osm` is calculated see the Notes section.\n (slope, intercept, r) : tuple of floats, optional\n Tuple containing the result of the least-squares fit, if that is\n performed by `probplot`. `r` is the square root of the coefficient of\n determination. If ``fit=False`` and ``plot=None``, this tuple is not\n returned.\n \n Notes\n -----\n Even if `plot` is given, the figure is not shown or saved by `probplot`;\n ``plt.show()`` or ``plt.savefig('figname.png')`` should be used after\n calling `probplot`.\n \n `probplot` generates a probability plot, which should not be confused with\n a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this\n type, see ``statsmodels.api.ProbPlot``.\n \n The formula used for the theoretical quantiles (horizontal axis of the\n probability plot) is Filliben's estimate::\n \n quantiles = dist.ppf(val), for\n \n 0.5**(1/n), for i = n\n val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1\n 1 - 0.5**(1/n), for i = 1\n \n where ``i`` indicates the i-th ordered value and ``n`` is the total number\n of values.\n \n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n >>> nsample = 100\n >>> np.random.seed(7654321)\n \n A t distribution with small degrees of freedom:\n \n >>> ax1 = plt.subplot(221)\n >>> x = stats.t.rvs(3, size=nsample)\n >>> res = stats.probplot(x, plot=plt)\n \n A t distribution with larger degrees of freedom:\n \n >>> ax2 = plt.subplot(222)\n >>> x = stats.t.rvs(25, size=nsample)\n >>> res = stats.probplot(x, plot=plt)\n \n A mixture of two normal distributions with broadcasting:\n \n >>> ax3 = plt.subplot(223)\n >>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],\n ... size=(nsample//2,2)).ravel()\n >>> res = stats.probplot(x, plot=plt)\n \n A standard normal distribution:\n \n >>> ax4 = plt.subplot(224)\n >>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)\n >>> res = stats.probplot(x, plot=plt)\n \n Produce a new figure with a loggamma distribution, using the ``dist`` and\n ``sparams`` keywords:\n \n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> x = stats.loggamma.rvs(c=2.5, size=500)\n >>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)\n >>> ax.set_title(\"Probplot for loggamma dist with shape parameter 2.5\")\n \n Show the results with Matplotlib:\n \n >>> plt.show()\n \n rankdata(a, method='average')\n Assign ranks to data, dealing with ties appropriately.\n \n Ranks begin at 1. The `method` argument controls how ranks are assigned\n to equal values. See [1]_ for further discussion of ranking methods.\n \n Parameters\n ----------\n a : array_like\n The array of values to be ranked. The array is first flattened.\n method : {'average', 'min', 'max', 'dense', 'ordinal'}, optional\n The method used to assign ranks to tied elements.\n The following methods are available (default is 'average'):\n \n * 'average': The average of the ranks that would have been assigned to\n all the tied values is assigned to each value.\n * 'min': The minimum of the ranks that would have been assigned to all\n the tied values is assigned to each value. (This is also\n referred to as \"competition\" ranking.)\n * 'max': The maximum of the ranks that would have been assigned to all\n the tied values is assigned to each value.\n * 'dense': Like 'min', but the rank of the next highest element is\n assigned the rank immediately after those assigned to the tied\n elements.\n * 'ordinal': All values are given a distinct rank, corresponding to\n the order that the values occur in `a`.\n \n Returns\n -------\n ranks : ndarray\n An array of length equal to the size of `a`, containing rank\n scores.\n \n References\n ----------\n .. [1] \"Ranking\", https://en.wikipedia.org/wiki/Ranking\n \n Examples\n --------\n >>> from scipy.stats import rankdata\n >>> rankdata([0, 2, 3, 2])\n array([ 1. , 2.5, 4. , 2.5])\n >>> rankdata([0, 2, 3, 2], method='min')\n array([ 1, 2, 4, 2])\n >>> rankdata([0, 2, 3, 2], method='max')\n array([ 1, 3, 4, 3])\n >>> rankdata([0, 2, 3, 2], method='dense')\n array([ 1, 2, 3, 2])\n >>> rankdata([0, 2, 3, 2], method='ordinal')\n array([ 1, 2, 4, 3])\n \n ranksums(x, y)\n Compute the Wilcoxon rank-sum statistic for two samples.\n \n The Wilcoxon rank-sum test tests the null hypothesis that two sets\n of measurements are drawn from the same distribution. The alternative\n hypothesis is that values in one sample are more likely to be\n larger than the values in the other sample.\n \n This test should be used to compare two samples from continuous\n distributions. It does not handle ties between measurements\n in x and y. For tie-handling and an optional continuity correction\n see `scipy.stats.mannwhitneyu`.\n \n Parameters\n ----------\n x,y : array_like\n The data from the two samples.\n \n Returns\n -------\n statistic : float\n The test statistic under the large-sample approximation that the\n rank sum statistic is normally distributed.\n pvalue : float\n The two-sided p-value of the test.\n \n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test\n \n relfreq(a, numbins=10, defaultreallimits=None, weights=None)\n Return a relative frequency histogram, using the histogram function.\n \n A relative frequency histogram is a mapping of the number of\n observations in each of the bins relative to the total of observations.\n \n Parameters\n ----------\n a : array_like\n Input array.\n numbins : int, optional\n The number of bins to use for the histogram. Default is 10.\n defaultreallimits : tuple (lower, upper), optional\n The lower and upper values for the range of the histogram.\n If no value is given, a range slightly larger than the range of the\n values in a is used. Specifically ``(a.min() - s, a.max() + s)``,\n where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.\n weights : array_like, optional\n The weights for each value in `a`. Default is None, which gives each\n value a weight of 1.0\n \n Returns\n -------\n frequency : ndarray\n Binned values of relative frequency.\n lowerlimit : float\n Lower real limit.\n binsize : float\n Width of each bin.\n extrapoints : int\n Extra points.\n \n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from scipy import stats\n >>> a = np.array([2, 4, 1, 2, 3, 2])\n >>> res = stats.relfreq(a, numbins=4)\n >>> res.frequency\n array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])\n >>> np.sum(res.frequency) # relative frequencies should add up to 1\n 1.0\n \n Create a normal distribution with 1000 random values\n \n >>> rng = np.random.RandomState(seed=12345)\n >>> samples = stats.norm.rvs(size=1000, random_state=rng)\n \n Calculate relative frequencies\n \n >>> res = stats.relfreq(samples, numbins=25)\n \n Calculate space of values for x\n \n >>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,\n ... res.frequency.size)\n \n Plot relative frequency histogram\n \n >>> fig = plt.figure(figsize=(5, 4))\n >>> ax = fig.add_subplot(1, 1, 1)\n >>> ax.bar(x, res.frequency, width=res.binsize)\n >>> ax.set_title('Relative frequency histogram')\n >>> ax.set_xlim([x.min(), x.max()])\n \n >>> plt.show()\n \n rvs_ratio_uniforms(pdf, umax, vmin, vmax, size=1, c=0, random_state=None)\n Generate random samples from a probability density function using the\n ratio-of-uniforms method.\n \n Parameters\n ----------\n pdf : callable\n A function with signature `pdf(x)` that is the probability\n density function of the distribution.\n umax : float\n The upper bound of the bounding rectangle in the u-direction.\n vmin : float\n The lower bound of the bounding rectangle in the v-direction.\n vmax : float\n The upper bound of the bounding rectangle in the v-direction.\n size : int or tuple of ints, optional\n Defining number of random variates (default is 1).\n c : float, optional.\n Shift parameter of ratio-of-uniforms method, see Notes. Default is 0.\n random_state : int or np.random.RandomState instance, optional\n If already a RandomState instance, use it.\n If seed is an int, return a new RandomState instance seeded with seed.\n If None, use np.random.RandomState. Default is None.\n \n Returns\n -------\n rvs : ndarray\n The random variates distributed according to the probability\n distribution defined by the pdf.\n \n Notes\n -----\n Given a univariate probability density function `pdf` and a constant `c`,\n define the set ``A = {(u, v) : 0 < u <= sqrt(pdf(v/u + c))}``.\n If `(U, V)` is a random vector uniformly distributed over `A`,\n then `V/U + c` follows a distribution according to `pdf`.\n \n The above result (see [1]_, [2]_) can be used to sample random variables\n using only the pdf, i.e. no inversion of the cdf is required. Typical\n choices of `c` are zero or the mode of `pdf`. The set `A` is a subset of\n the rectangle ``R = [0, umax] x [vmin, vmax]`` where\n \n - ``umax = sup sqrt(pdf(x))``\n - ``vmin = inf (x - c) sqrt(pdf(x))``\n - ``vmax = sup (x - c) sqrt(pdf(x))``\n \n In particular, these values are finite if `pdf` is bounded and\n ``x**2 * pdf(x)`` is bounded (i.e. subquadratic tails).\n One can generate `(U, V)` uniformly on `R` and return\n `V/U + c` if `(U, V)` are also in `A` which can be directly\n verified.\n \n Intuitively, the method works well if `A` fills up most of the\n enclosing rectangle such that the probability is high that `(U, V)`\n lies in `A` whenever it lies in `R` as the number of required\n iterations becomes too large otherwise. To be more precise, note that\n the expected number of iterations to draw `(U, V)` uniformly\n distributed on `R` such that `(U, V)` is also in `A` is given by\n the ratio ``area(R) / area(A) = 2 * umax * (vmax - vmin)``, using the fact\n that the area of `A` is equal to 1/2 (Theorem 7.1 in [1]_). A warning\n is displayed if this ratio is larger than 20. Moreover, if the sampling\n fails to generate a single random variate after 50000 iterations (i.e.\n not a single draw is in `A`), an exception is raised.\n \n If the bounding rectangle is not correctly specified (i.e. if it does not\n contain `A`), the algorithm samples from a distribution different from\n the one given by `pdf`. It is therefore recommended to perform a\n test such as `~scipy.stats.kstest` as a check.\n \n References\n ----------\n .. [1] L. Devroye, \"Non-Uniform Random Variate Generation\",\n Springer-Verlag, 1986.\n \n .. [2] W. Hoermann and J. Leydold, \"Generating generalized inverse Gaussian\n random variates\", Statistics and Computing, 24(4), p. 547--557, 2014.\n \n .. [3] A.J. Kinderman and J.F. Monahan, \"Computer Generation of Random\n Variables Using the Ratio of Uniform Deviates\",\n ACM Transactions on Mathematical Software, 3(3), p. 257--260, 1977.\n \n Examples\n --------\n >>> from scipy import stats\n \n Simulate normally distributed random variables. It is easy to compute the\n bounding rectangle explicitly in that case.\n \n >>> f = stats.norm.pdf\n >>> v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)\n >>> umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound\n >>> np.random.seed(12345)\n >>> rvs = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=2500)\n \n The K-S test confirms that the random variates are indeed normally\n distributed (normality is not rejected at 5% significance level):\n \n >>> stats.kstest(rvs, 'norm')[1]\n 0.3420173467307603\n \n The exponential distribution provides another example where the bounding\n rectangle can be determined explicitly.\n \n >>> np.random.seed(12345)\n >>> rvs = stats.rvs_ratio_uniforms(lambda x: np.exp(-x), umax=1,\n ... vmin=0, vmax=2*np.exp(-1), size=1000)\n >>> stats.kstest(rvs, 'expon')[1]\n 0.928454552559516\n \n Sometimes it can be helpful to use a non-zero shift parameter `c`, see e.g.\n [2]_ above in the case of the generalized inverse Gaussian distribution.\n \n scoreatpercentile(a, per, limit=(), interpolation_method='fraction', axis=None)\n Calculate the score at a given percentile of the input sequence.\n \n For example, the score at `per=50` is the median. If the desired quantile\n lies between two data points, we interpolate between them, according to\n the value of `interpolation`. If the parameter `limit` is provided, it\n should be a tuple (lower, upper) of two values.\n \n Parameters\n ----------\n a : array_like\n A 1-D array of values from which to extract score.\n per : array_like\n Percentile(s) at which to extract score. Values should be in range\n [0,100].\n limit : tuple, optional\n Tuple of two scalars, the lower and upper limits within which to\n compute the percentile. Values of `a` outside\n this (closed) interval will be ignored.\n interpolation_method : {'fraction', 'lower', 'higher'}, optional\n Specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`\n The following options are available (default is 'fraction'):\n \n * 'fraction': ``i + (j - i) * fraction`` where ``fraction`` is the\n fractional part of the index surrounded by ``i`` and ``j``\n * 'lower': ``i``\n * 'higher': ``j``\n \n axis : int, optional\n Axis along which the percentiles are computed. Default is None. If\n None, compute over the whole array `a`.\n \n Returns\n -------\n score : float or ndarray\n Score at percentile(s).\n \n See Also\n --------\n percentileofscore, numpy.percentile\n \n Notes\n -----\n This function will become obsolete in the future.\n For NumPy 1.9 and higher, `numpy.percentile` provides all the functionality\n that `scoreatpercentile` provides. And it's significantly faster.\n Therefore it's recommended to use `numpy.percentile` for users that have\n numpy >= 1.9.\n \n Examples\n --------\n >>> from scipy import stats\n >>> a = np.arange(100)\n >>> stats.scoreatpercentile(a, 50)\n 49.5\n \n sem(a, axis=0, ddof=1, nan_policy='propagate')\n Compute standard error of the mean.\n \n Calculate the standard error of the mean (or standard error of\n measurement) of the values in the input array.\n \n Parameters\n ----------\n a : array_like\n An array containing the values for which the standard error is\n returned.\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over\n the whole array `a`.\n ddof : int, optional\n Delta degrees-of-freedom. How many degrees of freedom to adjust\n for bias in limited samples relative to the population estimate\n of variance. Defaults to 1.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n Returns\n -------\n s : ndarray or float\n The standard error of the mean in the sample(s), along the input axis.\n \n Notes\n -----\n The default value for `ddof` is different to the default (0) used by other\n ddof containing routines, such as np.std and np.nanstd.\n \n Examples\n --------\n Find standard error along the first axis:\n \n >>> from scipy import stats\n >>> a = np.arange(20).reshape(5,4)\n >>> stats.sem(a)\n array([ 2.8284, 2.8284, 2.8284, 2.8284])\n \n Find standard error across the whole array, using n degrees of freedom:\n \n >>> stats.sem(a, axis=None, ddof=0)\n 1.2893796958227628\n \n shapiro(x)\n Perform the Shapiro-Wilk test for normality.\n \n The Shapiro-Wilk test tests the null hypothesis that the\n data was drawn from a normal distribution.\n \n Parameters\n ----------\n x : array_like\n Array of sample data.\n \n Returns\n -------\n W : float\n The test statistic.\n p-value : float\n The p-value for the hypothesis test.\n \n See Also\n --------\n anderson : The Anderson-Darling test for normality\n kstest : The Kolmogorov-Smirnov test for goodness of fit.\n \n Notes\n -----\n The algorithm used is described in [4]_ but censoring parameters as\n described are not implemented. For N > 5000 the W test statistic is accurate\n but the p-value may not be.\n \n The chance of rejecting the null hypothesis when it is true is close to 5%\n regardless of sample size.\n \n References\n ----------\n .. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm\n .. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for\n normality (complete samples), Biometrika, Vol. 52, pp. 591-611.\n .. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk,\n Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of\n Statistical Modeling and Analytics, Vol. 2, pp. 21-33.\n .. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4.\n \n Examples\n --------\n >>> from scipy import stats\n >>> np.random.seed(12345678)\n >>> x = stats.norm.rvs(loc=5, scale=3, size=100)\n >>> stats.shapiro(x)\n (0.9772805571556091, 0.08144091814756393)\n \n siegelslopes(y, x=None, method='hierarchical')\n Computes the Siegel estimator for a set of points (x, y).\n \n `siegelslopes` implements a method for robust linear regression\n using repeated medians (see [1]_) to fit a line to the points (x, y).\n The method is robust to outliers with an asymptotic breakdown point\n of 50%.\n \n Parameters\n ----------\n y : array_like\n Dependent variable.\n x : array_like or None, optional\n Independent variable. If None, use ``arange(len(y))`` instead.\n method : {'hierarchical', 'separate'}\n If 'hierarchical', estimate the intercept using the estimated\n slope ``medslope`` (default option).\n If 'separate', estimate the intercept independent of the estimated\n slope. See Notes for details.\n \n Returns\n -------\n medslope : float\n Estimate of the slope of the regression line.\n medintercept : float\n Estimate of the intercept of the regression line.\n \n See also\n --------\n theilslopes : a similar technique without repeated medians\n \n Notes\n -----\n With ``n = len(y)``, compute ``m_j`` as the median of\n the slopes from the point ``(x[j], y[j])`` to all other `n-1` points.\n ``medslope`` is then the median of all slopes ``m_j``.\n Two ways are given to estimate the intercept in [1]_ which can be chosen\n via the parameter ``method``.\n The hierarchical approach uses the estimated slope ``medslope``\n and computes ``medintercept`` as the median of ``y - medslope*x``.\n The other approach estimates the intercept separately as follows: for\n each point ``(x[j], y[j])``, compute the intercepts of all the `n-1`\n lines through the remaining points and take the median ``i_j``.\n ``medintercept`` is the median of the ``i_j``.\n \n The implementation computes `n` times the median of a vector of size `n`\n which can be slow for large vectors. There are more efficient algorithms\n (see [2]_) which are not implemented here.\n \n References\n ----------\n .. [1] A. Siegel, \"Robust Regression Using Repeated Medians\",\n Biometrika, Vol. 69, pp. 242-244, 1982.\n \n .. [2] A. Stein and M. Werman, \"Finding the repeated median regression\n line\", Proceedings of the Third Annual ACM-SIAM Symposium on\n Discrete Algorithms, pp. 409-413, 1992.\n \n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n \n >>> x = np.linspace(-5, 5, num=150)\n >>> y = x + np.random.normal(size=x.size)\n >>> y[11:15] += 10 # add outliers\n >>> y[-5:] -= 7\n \n Compute the slope and intercept. For comparison, also compute the\n least-squares fit with `linregress`:\n \n >>> res = stats.siegelslopes(y, x)\n >>> lsq_res = stats.linregress(x, y)\n \n Plot the results. The Siegel regression line is shown in red. The green\n line shows the least-squares fit for comparison.\n \n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> ax.plot(x, y, 'b.')\n >>> ax.plot(x, res[1] + res[0] * x, 'r-')\n >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')\n >>> plt.show()\n \n sigmaclip(a, low=4.0, high=4.0)\n Perform iterative sigma-clipping of array elements.\n \n Starting from the full sample, all elements outside the critical range are\n removed, i.e. all elements of the input array `c` that satisfy either of\n the following conditions::\n \n c < mean(c) - std(c)*low\n c > mean(c) + std(c)*high\n \n The iteration continues with the updated sample until no\n elements are outside the (updated) range.\n \n Parameters\n ----------\n a : array_like\n Data array, will be raveled if not 1-D.\n low : float, optional\n Lower bound factor of sigma clipping. Default is 4.\n high : float, optional\n Upper bound factor of sigma clipping. Default is 4.\n \n Returns\n -------\n clipped : ndarray\n Input array with clipped elements removed.\n lower : float\n Lower threshold value use for clipping.\n upper : float\n Upper threshold value use for clipping.\n \n Examples\n --------\n >>> from scipy.stats import sigmaclip\n >>> a = np.concatenate((np.linspace(9.5, 10.5, 31),\n ... np.linspace(0, 20, 5)))\n >>> fact = 1.5\n >>> c, low, upp = sigmaclip(a, fact, fact)\n >>> c\n array([ 9.96666667, 10. , 10.03333333, 10. ])\n >>> c.var(), c.std()\n (0.00055555555555555165, 0.023570226039551501)\n >>> low, c.mean() - fact*c.std(), c.min()\n (9.9646446609406727, 9.9646446609406727, 9.9666666666666668)\n >>> upp, c.mean() + fact*c.std(), c.max()\n (10.035355339059327, 10.035355339059327, 10.033333333333333)\n \n >>> a = np.concatenate((np.linspace(9.5, 10.5, 11),\n ... np.linspace(-100, -50, 3)))\n >>> c, low, upp = sigmaclip(a, 1.8, 1.8)\n >>> (c == np.linspace(9.5, 10.5, 11)).all()\n True\n \n skew(a, axis=0, bias=True, nan_policy='propagate')\n Compute the sample skewness of a data set.\n \n For normally distributed data, the skewness should be about zero. For\n unimodal continuous distributions, a skewness value greater than zero means\n that there is more weight in the right tail of the distribution. The\n function `skewtest` can be used to determine if the skewness value\n is close enough to zero, statistically speaking.\n \n Parameters\n ----------\n a : ndarray\n Input array.\n axis : int or None, optional\n Axis along which skewness is calculated. Default is 0.\n If None, compute over the whole array `a`.\n bias : bool, optional\n If False, then the calculations are corrected for statistical bias.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n Returns\n -------\n skewness : ndarray\n The skewness of values along an axis, returning 0 where all values are\n equal.\n \n Notes\n -----\n The sample skewness is computed as the Fisher-Pearson coefficient\n of skewness, i.e.\n \n .. math:: \n \n g_1=\\frac{m_3}{m_2^{3/2}}\n \n where\n \n .. math:: \n \n m_i=\\frac{1}{N}\\sum_{n=1}^N(x[n]-\\bar{x})^i\n \n is the biased sample :math:`i\\texttt{th}` central moment, and :math:`\\bar{x}` is\n the sample mean. If ``bias`` is False, the calculations are\n corrected for bias and the value computed is the adjusted\n Fisher-Pearson standardized moment coefficient, i.e.\n \n .. math:: \n \n G_1=\\frac{k_3}{k_2^{3/2}}=\n \\frac{\\sqrt{N(N-1)}}{N-2}\\frac{m_3}{m_2^{3/2}}.\n \n References\n ----------\n .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard\n Probability and Statistics Tables and Formulae. Chapman & Hall: New\n York. 2000.\n Section 2.2.24.1\n \n Examples\n --------\n >>> from scipy.stats import skew\n >>> skew([1, 2, 3, 4, 5])\n 0.0\n >>> skew([2, 8, 0, 4, 1, 9, 9, 0])\n 0.2650554122698573\n \n skewtest(a, axis=0, nan_policy='propagate')\n Test whether the skew is different from the normal distribution.\n \n This function tests the null hypothesis that the skewness of\n the population that the sample was drawn from is the same\n as that of a corresponding normal distribution.\n \n Parameters\n ----------\n a : array\n The data to be tested.\n axis : int or None, optional\n Axis along which statistics are calculated. Default is 0.\n If None, compute over the whole array `a`.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n Returns\n -------\n statistic : float\n The computed z-score for this test.\n pvalue : float\n Two-sided p-value for the hypothesis test.\n \n Notes\n -----\n The sample size must be at least 8.\n \n References\n ----------\n .. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr.,\n \"A suggestion for using powerful and informative tests of\n normality\", American Statistician 44, pp. 316-321, 1990.\n \n Examples\n --------\n >>> from scipy.stats import skewtest\n >>> skewtest([1, 2, 3, 4, 5, 6, 7, 8])\n SkewtestResult(statistic=1.0108048609177787, pvalue=0.3121098361421897)\n >>> skewtest([2, 8, 0, 4, 1, 9, 9, 0])\n SkewtestResult(statistic=0.44626385374196975, pvalue=0.6554066631275459)\n >>> skewtest([1, 2, 3, 4, 5, 6, 7, 8000])\n SkewtestResult(statistic=3.571773510360407, pvalue=0.0003545719905823133)\n >>> skewtest([100, 100, 100, 100, 100, 100, 100, 101])\n SkewtestResult(statistic=3.5717766638478072, pvalue=0.000354567720281634)\n \n spearmanr(a, b=None, axis=0, nan_policy='propagate')\n Calculate a Spearman correlation coefficient with associated p-value.\n \n The Spearman rank-order correlation coefficient is a nonparametric measure\n of the monotonicity of the relationship between two datasets. Unlike the\n Pearson correlation, the Spearman correlation does not assume that both\n datasets are normally distributed. Like other correlation coefficients,\n this one varies between -1 and +1 with 0 implying no correlation.\n Correlations of -1 or +1 imply an exact monotonic relationship. Positive\n correlations imply that as x increases, so does y. Negative correlations\n imply that as x increases, y decreases.\n \n The p-value roughly indicates the probability of an uncorrelated system\n producing datasets that have a Spearman correlation at least as extreme\n as the one computed from these datasets. The p-values are not entirely\n reliable but are probably reasonable for datasets larger than 500 or so.\n \n Parameters\n ----------\n a, b : 1D or 2D array_like, b is optional\n One or two 1-D or 2-D arrays containing multiple variables and\n observations. When these are 1-D, each represents a vector of\n observations of a single variable. For the behavior in the 2-D case,\n see under ``axis``, below.\n Both arrays need to have the same length in the ``axis`` dimension.\n axis : int or None, optional\n If axis=0 (default), then each column represents a variable, with\n observations in the rows. If axis=1, the relationship is transposed:\n each row represents a variable, while the columns contain observations.\n If axis=None, then both arrays will be raveled.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n Returns\n -------\n correlation : float or ndarray (2-D square)\n Spearman correlation matrix or correlation coefficient (if only 2\n variables are given as parameters. Correlation matrix is square with\n length equal to total number of variables (columns or rows) in ``a``\n and ``b`` combined.\n pvalue : float\n The two-sided p-value for a hypothesis test whose null hypothesis is\n that two sets of data are uncorrelated, has same dimension as rho.\n \n References\n ----------\n .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard\n Probability and Statistics Tables and Formulae. Chapman & Hall: New\n York. 2000.\n Section 14.7\n \n Examples\n --------\n >>> from scipy import stats\n >>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])\n (0.82078268166812329, 0.088587005313543798)\n >>> np.random.seed(1234321)\n >>> x2n = np.random.randn(100, 2)\n >>> y2n = np.random.randn(100, 2)\n >>> stats.spearmanr(x2n)\n (0.059969996999699973, 0.55338590803773591)\n >>> stats.spearmanr(x2n[:,0], x2n[:,1])\n (0.059969996999699973, 0.55338590803773591)\n >>> rho, pval = stats.spearmanr(x2n, y2n)\n >>> rho\n array([[ 1. , 0.05997 , 0.18569457, 0.06258626],\n [ 0.05997 , 1. , 0.110003 , 0.02534653],\n [ 0.18569457, 0.110003 , 1. , 0.03488749],\n [ 0.06258626, 0.02534653, 0.03488749, 1. ]])\n >>> pval\n array([[ 0. , 0.55338591, 0.06435364, 0.53617935],\n [ 0.55338591, 0. , 0.27592895, 0.80234077],\n [ 0.06435364, 0.27592895, 0. , 0.73039992],\n [ 0.53617935, 0.80234077, 0.73039992, 0. ]])\n >>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)\n >>> rho\n array([[ 1. , 0.05997 , 0.18569457, 0.06258626],\n [ 0.05997 , 1. , 0.110003 , 0.02534653],\n [ 0.18569457, 0.110003 , 1. , 0.03488749],\n [ 0.06258626, 0.02534653, 0.03488749, 1. ]])\n >>> stats.spearmanr(x2n, y2n, axis=None)\n (0.10816770419260482, 0.1273562188027364)\n >>> stats.spearmanr(x2n.ravel(), y2n.ravel())\n (0.10816770419260482, 0.1273562188027364)\n \n >>> xint = np.random.randint(10, size=(100, 2))\n >>> stats.spearmanr(xint)\n (0.052760927029710199, 0.60213045837062351)\n \n theilslopes(y, x=None, alpha=0.95)\n Computes the Theil-Sen estimator for a set of points (x, y).\n \n `theilslopes` implements a method for robust linear regression. It\n computes the slope as the median of all slopes between paired values.\n \n Parameters\n ----------\n y : array_like\n Dependent variable.\n x : array_like or None, optional\n Independent variable. If None, use ``arange(len(y))`` instead.\n alpha : float, optional\n Confidence degree between 0 and 1. Default is 95% confidence.\n Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are\n interpreted as \"find the 90% confidence interval\".\n \n Returns\n -------\n medslope : float\n Theil slope.\n medintercept : float\n Intercept of the Theil line, as ``median(y) - medslope*median(x)``.\n lo_slope : float\n Lower bound of the confidence interval on `medslope`.\n up_slope : float\n Upper bound of the confidence interval on `medslope`.\n \n See also\n --------\n siegelslopes : a similar technique using repeated medians\n \n Notes\n -----\n The implementation of `theilslopes` follows [1]_. The intercept is\n not defined in [1]_, and here it is defined as ``median(y) -\n medslope*median(x)``, which is given in [3]_. Other definitions of\n the intercept exist in the literature. A confidence interval for\n the intercept is not given as this question is not addressed in\n [1]_.\n \n References\n ----------\n .. [1] P.K. Sen, \"Estimates of the regression coefficient based on Kendall's tau\",\n J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.\n .. [2] H. Theil, \"A rank-invariant method of linear and polynomial\n regression analysis I, II and III\", Nederl. Akad. Wetensch., Proc.\n 53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.\n .. [3] W.L. Conover, \"Practical nonparametric statistics\", 2nd ed.,\n John Wiley and Sons, New York, pp. 493.\n \n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n \n >>> x = np.linspace(-5, 5, num=150)\n >>> y = x + np.random.normal(size=x.size)\n >>> y[11:15] += 10 # add outliers\n >>> y[-5:] -= 7\n \n Compute the slope, intercept and 90% confidence interval. For comparison,\n also compute the least-squares fit with `linregress`:\n \n >>> res = stats.theilslopes(y, x, 0.90)\n >>> lsq_res = stats.linregress(x, y)\n \n Plot the results. The Theil-Sen regression line is shown in red, with the\n dashed red lines illustrating the confidence interval of the slope (note\n that the dashed red lines are not the confidence interval of the regression\n as the confidence interval of the intercept is not included). The green\n line shows the least-squares fit for comparison.\n \n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> ax.plot(x, y, 'b.')\n >>> ax.plot(x, res[1] + res[0] * x, 'r-')\n >>> ax.plot(x, res[1] + res[2] * x, 'r--')\n >>> ax.plot(x, res[1] + res[3] * x, 'r--')\n >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')\n >>> plt.show()\n \n tiecorrect(rankvals)\n Tie correction factor for Mann-Whitney U and Kruskal-Wallis H tests.\n \n Parameters\n ----------\n rankvals : array_like\n A 1-D sequence of ranks. Typically this will be the array\n returned by `~scipy.stats.rankdata`.\n \n Returns\n -------\n factor : float\n Correction factor for U or H.\n \n See Also\n --------\n rankdata : Assign ranks to the data\n mannwhitneyu : Mann-Whitney rank test\n kruskal : Kruskal-Wallis H test\n \n References\n ----------\n .. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral\n Sciences. New York: McGraw-Hill.\n \n Examples\n --------\n >>> from scipy.stats import tiecorrect, rankdata\n >>> tiecorrect([1, 2.5, 2.5, 4])\n 0.9\n >>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])\n >>> ranks\n array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])\n >>> tiecorrect(ranks)\n 0.9833333333333333\n \n tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate')\n Compute the trimmed maximum.\n \n This function computes the maximum value of an array along a given axis,\n while ignoring values larger than a specified upper limit.\n \n Parameters\n ----------\n a : array_like\n Array of values.\n upperlimit : None or float, optional\n Values in the input array greater than the given limit will be ignored.\n When upperlimit is None, then all values are used. The default value\n is None.\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over the\n whole array `a`.\n inclusive : {True, False}, optional\n This flag determines whether values exactly equal to the upper limit\n are included. The default value is True.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n Returns\n -------\n tmax : float, int or ndarray\n Trimmed maximum.\n \n Examples\n --------\n >>> from scipy import stats\n >>> x = np.arange(20)\n >>> stats.tmax(x)\n 19\n \n >>> stats.tmax(x, 13)\n 13\n \n >>> stats.tmax(x, 13, inclusive=False)\n 12\n \n tmean(a, limits=None, inclusive=(True, True), axis=None)\n Compute the trimmed mean.\n \n This function finds the arithmetic mean of given values, ignoring values\n outside the given `limits`.\n \n Parameters\n ----------\n a : array_like\n Array of values.\n limits : None or (lower limit, upper limit), optional\n Values in the input array less than the lower limit or greater than the\n upper limit will be ignored. When limits is None (default), then all\n values are used. Either of the limit values in the tuple can also be\n None representing a half-open interval.\n inclusive : (bool, bool), optional\n A tuple consisting of the (lower flag, upper flag). These flags\n determine whether values exactly equal to the lower or upper limits\n are included. The default value is (True, True).\n axis : int or None, optional\n Axis along which to compute test. Default is None.\n \n Returns\n -------\n tmean : float\n Trimmed mean.\n \n See Also\n --------\n trim_mean : Returns mean after trimming a proportion from both tails.\n \n Examples\n --------\n >>> from scipy import stats\n >>> x = np.arange(20)\n >>> stats.tmean(x)\n 9.5\n >>> stats.tmean(x, (3,17))\n 10.0\n \n tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate')\n Compute the trimmed minimum.\n \n This function finds the miminum value of an array `a` along the\n specified axis, but only considering values greater than a specified\n lower limit.\n \n Parameters\n ----------\n a : array_like\n Array of values.\n lowerlimit : None or float, optional\n Values in the input array less than the given limit will be ignored.\n When lowerlimit is None, then all values are used. The default value\n is None.\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over the\n whole array `a`.\n inclusive : {True, False}, optional\n This flag determines whether values exactly equal to the lower limit\n are included. The default value is True.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n Returns\n -------\n tmin : float, int or ndarray\n Trimmed minimum.\n \n Examples\n --------\n >>> from scipy import stats\n >>> x = np.arange(20)\n >>> stats.tmin(x)\n 0\n \n >>> stats.tmin(x, 13)\n 13\n \n >>> stats.tmin(x, 13, inclusive=False)\n 14\n \n trim1(a, proportiontocut, tail='right', axis=0)\n Slice off a proportion from ONE end of the passed array distribution.\n \n If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'\n 10% of scores. The lowest or highest values are trimmed (depending on\n the tail).\n Slice off less if proportion results in a non-integer slice index\n (i.e. conservatively slices off `proportiontocut` ).\n \n Parameters\n ----------\n a : array_like\n Input array.\n proportiontocut : float\n Fraction to cut off of 'left' or 'right' of distribution.\n tail : {'left', 'right'}, optional\n Defaults to 'right'.\n axis : int or None, optional\n Axis along which to trim data. Default is 0. If None, compute over\n the whole array `a`.\n \n Returns\n -------\n trim1 : ndarray\n Trimmed version of array `a`. The order of the trimmed content is\n undefined.\n \n trim_mean(a, proportiontocut, axis=0)\n Return mean of array after trimming distribution from both tails.\n \n If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of\n scores. The input is sorted before slicing. Slices off less if proportion\n results in a non-integer slice index (i.e., conservatively slices off\n `proportiontocut` ).\n \n Parameters\n ----------\n a : array_like\n Input array.\n proportiontocut : float\n Fraction to cut off of both tails of the distribution.\n axis : int or None, optional\n Axis along which the trimmed means are computed. Default is 0.\n If None, compute over the whole array `a`.\n \n Returns\n -------\n trim_mean : ndarray\n Mean of trimmed array.\n \n See Also\n --------\n trimboth\n tmean : Compute the trimmed mean ignoring values outside given `limits`.\n \n Examples\n --------\n >>> from scipy import stats\n >>> x = np.arange(20)\n >>> stats.trim_mean(x, 0.1)\n 9.5\n >>> x2 = x.reshape(5, 4)\n >>> x2\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15],\n [16, 17, 18, 19]])\n >>> stats.trim_mean(x2, 0.25)\n array([ 8., 9., 10., 11.])\n >>> stats.trim_mean(x2, 0.25, axis=1)\n array([ 1.5, 5.5, 9.5, 13.5, 17.5])\n \n trimboth(a, proportiontocut, axis=0)\n Slice off a proportion of items from both ends of an array.\n \n Slice off the passed proportion of items from both ends of the passed\n array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**\n rightmost 10% of scores). The trimmed values are the lowest and\n highest ones.\n Slice off less if proportion results in a non-integer slice index (i.e.\n conservatively slices off `proportiontocut`).\n \n Parameters\n ----------\n a : array_like\n Data to trim.\n proportiontocut : float\n Proportion (in range 0-1) of total data set to trim of each end.\n axis : int or None, optional\n Axis along which to trim data. Default is 0. If None, compute over\n the whole array `a`.\n \n Returns\n -------\n out : ndarray\n Trimmed version of array `a`. The order of the trimmed content\n is undefined.\n \n See Also\n --------\n trim_mean\n \n Examples\n --------\n >>> from scipy import stats\n >>> a = np.arange(20)\n >>> b = stats.trimboth(a, 0.1)\n >>> b.shape\n (16,)\n \n tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1)\n Compute the trimmed standard error of the mean.\n \n This function finds the standard error of the mean for given\n values, ignoring values outside the given `limits`.\n \n Parameters\n ----------\n a : array_like\n Array of values.\n limits : None or (lower limit, upper limit), optional\n Values in the input array less than the lower limit or greater than the\n upper limit will be ignored. When limits is None, then all values are\n used. Either of the limit values in the tuple can also be None\n representing a half-open interval. The default value is None.\n inclusive : (bool, bool), optional\n A tuple consisting of the (lower flag, upper flag). These flags\n determine whether values exactly equal to the lower or upper limits\n are included. The default value is (True, True).\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over the\n whole array `a`.\n ddof : int, optional\n Delta degrees of freedom. Default is 1.\n \n Returns\n -------\n tsem : float\n Trimmed standard error of the mean.\n \n Notes\n -----\n `tsem` uses unbiased sample standard deviation, i.e. it uses a\n correction factor ``n / (n - 1)``.\n \n Examples\n --------\n >>> from scipy import stats\n >>> x = np.arange(20)\n >>> stats.tsem(x)\n 1.3228756555322954\n >>> stats.tsem(x, (3,17))\n 1.1547005383792515\n \n tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1)\n Compute the trimmed sample standard deviation.\n \n This function finds the sample standard deviation of given values,\n ignoring values outside the given `limits`.\n \n Parameters\n ----------\n a : array_like\n Array of values.\n limits : None or (lower limit, upper limit), optional\n Values in the input array less than the lower limit or greater than the\n upper limit will be ignored. When limits is None, then all values are\n used. Either of the limit values in the tuple can also be None\n representing a half-open interval. The default value is None.\n inclusive : (bool, bool), optional\n A tuple consisting of the (lower flag, upper flag). These flags\n determine whether values exactly equal to the lower or upper limits\n are included. The default value is (True, True).\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over the\n whole array `a`.\n ddof : int, optional\n Delta degrees of freedom. Default is 1.\n \n Returns\n -------\n tstd : float\n Trimmed sample standard deviation.\n \n Notes\n -----\n `tstd` computes the unbiased sample standard deviation, i.e. it uses a\n correction factor ``n / (n - 1)``.\n \n Examples\n --------\n >>> from scipy import stats\n >>> x = np.arange(20)\n >>> stats.tstd(x)\n 5.9160797830996161\n >>> stats.tstd(x, (3,17))\n 4.4721359549995796\n \n ttest_1samp(a, popmean, axis=0, nan_policy='propagate')\n Calculate the T-test for the mean of ONE group of scores.\n \n This is a two-sided test for the null hypothesis that the expected value\n (mean) of a sample of independent observations `a` is equal to the given\n population mean, `popmean`.\n \n Parameters\n ----------\n a : array_like\n Sample observation.\n popmean : float or array_like\n Expected value in null hypothesis. If array_like, then it must have the\n same shape as `a` excluding the axis dimension.\n axis : int or None, optional\n Axis along which to compute test. If None, compute over the whole\n array `a`.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n Returns\n -------\n statistic : float or array\n t-statistic.\n pvalue : float or array\n Two-sided p-value.\n \n Examples\n --------\n >>> from scipy import stats\n \n >>> np.random.seed(7654567) # fix seed to get the same result\n >>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))\n \n Test if mean of random sample is equal to true mean, and different mean.\n We reject the null hypothesis in the second case and don't reject it in\n the first case.\n \n >>> stats.ttest_1samp(rvs,5.0)\n (array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))\n >>> stats.ttest_1samp(rvs,0.0)\n (array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))\n \n Examples using axis and non-scalar dimension for population mean.\n \n >>> stats.ttest_1samp(rvs,[5.0,0.0])\n (array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))\n >>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)\n (array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))\n >>> stats.ttest_1samp(rvs,[[5.0],[0.0]])\n (array([[-0.68014479, -0.04323899],\n [ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],\n [ 7.89094663e-03, 1.49986458e-04]]))\n \n ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate')\n Calculate the T-test for the means of *two independent* samples of scores.\n \n This is a two-sided test for the null hypothesis that 2 independent samples\n have identical average (expected) values. This test assumes that the\n populations have identical variances by default.\n \n Parameters\n ----------\n a, b : array_like\n The arrays must have the same shape, except in the dimension\n corresponding to `axis` (the first, by default).\n axis : int or None, optional\n Axis along which to compute test. If None, compute over the whole\n arrays, `a`, and `b`.\n equal_var : bool, optional\n If True (default), perform a standard independent 2 sample test\n that assumes equal population variances [1]_.\n If False, perform Welch's t-test, which does not assume equal\n population variance [2]_.\n \n .. versionadded:: 0.11.0\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n \n Returns\n -------\n statistic : float or array\n The calculated t-statistic.\n pvalue : float or array\n The two-tailed p-value.\n \n Notes\n -----\n We can use this test, if we observe two independent samples from\n the same or different population, e.g. exam scores of boys and\n girls or of two ethnic groups. The test measures whether the\n average (expected) value differs significantly across samples. If\n we observe a large p-value, for example larger than 0.05 or 0.1,\n then we cannot reject the null hypothesis of identical average scores.\n If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,\n then we reject the null hypothesis of equal averages.\n \n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test\n \n .. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test\n \n Examples\n --------\n >>> from scipy import stats\n >>> np.random.seed(12345678)\n \n Test with sample with identical means:\n \n >>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)\n >>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)\n >>> stats.ttest_ind(rvs1,rvs2)\n (0.26833823296239279, 0.78849443369564776)\n >>> stats.ttest_ind(rvs1,rvs2, equal_var = False)\n (0.26833823296239279, 0.78849452749500748)\n \n `ttest_ind` underestimates p for unequal variances:\n \n >>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)\n >>> stats.ttest_ind(rvs1, rvs3)\n (-0.46580283298287162, 0.64145827413436174)\n >>> stats.ttest_ind(rvs1, rvs3, equal_var = False)\n (-0.46580283298287162, 0.64149646246569292)\n \n When n1 != n2, the equal variance t-statistic is no longer equal to the\n unequal variance t-statistic:\n \n >>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)\n >>> stats.ttest_ind(rvs1, rvs4)\n (-0.99882539442782481, 0.3182832709103896)\n >>> stats.ttest_ind(rvs1, rvs4, equal_var = False)\n (-0.69712570584654099, 0.48716927725402048)\n \n T-test with different means, variance, and n:\n \n >>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)\n >>> stats.ttest_ind(rvs1, rvs5)\n (-1.4679669854490653, 0.14263895620529152)\n >>> stats.ttest_ind(rvs1, rvs5, equal_var = False)\n (-0.94365973617132992, 0.34744170334794122)\n \n ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2, equal_var=True)\n T-test for means of two independent samples from descriptive statistics.\n \n This is a two-sided test for the null hypothesis that two independent\n samples have identical average (expected) values.\n \n Parameters\n ----------\n mean1 : array_like\n The mean(s) of sample 1.\n std1 : array_like\n The standard deviation(s) of sample 1.\n nobs1 : array_like\n The number(s) of observations of sample 1.\n mean2 : array_like\n The mean(s) of sample 2.\n std2 : array_like\n The standard deviations(s) of sample 2.\n nobs2 : array_like\n The number(s) of observations of sample 2.\n equal_var : bool, optional\n If True (default), perform a standard independent 2 sample test\n that assumes equal population variances [1]_.\n If False, perform Welch's t-test, which does not assume equal\n population variance [2]_.\n \n Returns\n -------\n statistic : float or array\n The calculated t-statistics.\n pvalue : float or array\n The two-tailed p-value.\n \n See Also\n --------\n scipy.stats.ttest_ind\n \n Notes\n -----\n .. versionadded:: 0.16.0\n \n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test\n \n .. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test\n \n Examples\n --------\n Suppose we have the summary data for two samples, as follows::\n \n Sample Sample\n Size Mean Variance\n Sample 1 13 15.0 87.5\n Sample 2 11 12.0 39.0\n \n Apply the t-test to this data (with the assumption that the population\n variances are equal):\n \n >>> from scipy.stats import ttest_ind_from_stats\n >>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13,\n ... mean2=12.0, std2=np.sqrt(39.0), nobs2=11)\n Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487)\n \n For comparison, here is the data from which those summary statistics\n were taken. With this data, we can compute the same result using\n `scipy.stats.ttest_ind`:\n \n >>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26])\n >>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21])\n >>> from scipy.stats import ttest_ind\n >>> ttest_ind(a, b)\n Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486)\n \n Suppose we instead have binary data and would like to apply a t-test to\n compare the proportion of 1s in two independent groups::\n \n Number of Sample Sample\n Size ones Mean Variance\n Sample 1 150 30 0.2 0.16\n Sample 2 200 45 0.225 0.174375\n \n The sample mean :math:`\\hat{p}` is the proportion of ones in the sample\n and the variance for a binary observation is estimated by\n :math:`\\hat{p}(1-\\hat{p})`.\n \n >>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.16), nobs1=150,\n ... mean2=0.225, std2=np.sqrt(0.17437), nobs2=200)\n Ttest_indResult(statistic=-0.564327545549774, pvalue=0.5728947691244874)\n \n For comparison, we could compute the t statistic and p-value using\n arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above.\n \n >>> group1 = np.array([1]*30 + [0]*(150-30))\n >>> group2 = np.array([1]*45 + [0]*(200-45))\n >>> ttest_ind(group1, group2)\n Ttest_indResult(statistic=-0.5627179589855622, pvalue=0.573989277115258)\n \n ttest_rel(a, b, axis=0, nan_policy='propagate')\n Calculate the t-test on TWO RELATED samples of scores, a and b.\n \n This is a two-sided test for the null hypothesis that 2 related or\n repeated samples have identical average (expected) values.\n \n Parameters\n ----------\n a, b : array_like\n The arrays must have the same shape.\n axis : int or None, optional\n Axis along which to compute test. If None, compute over the whole\n arrays, `a`, and `b`.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n Returns\n -------\n statistic : float or array\n t-statistic.\n pvalue : float or array\n Two-sided p-value.\n \n Notes\n -----\n Examples for use are scores of the same set of student in\n different exams, or repeated sampling from the same units. The\n test measures whether the average score differs significantly\n across samples (e.g. exams). If we observe a large p-value, for\n example greater than 0.05 or 0.1 then we cannot reject the null\n hypothesis of identical average scores. If the p-value is smaller\n than the threshold, e.g. 1%, 5% or 10%, then we reject the null\n hypothesis of equal averages. Small p-values are associated with\n large t-statistics.\n \n References\n ----------\n https://en.wikipedia.org/wiki/T-test#Dependent_t-test_for_paired_samples\n \n Examples\n --------\n >>> from scipy import stats\n >>> np.random.seed(12345678) # fix random seed to get same numbers\n \n >>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)\n >>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +\n ... stats.norm.rvs(scale=0.2,size=500))\n >>> stats.ttest_rel(rvs1,rvs2)\n (0.24101764965300962, 0.80964043445811562)\n >>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +\n ... stats.norm.rvs(scale=0.2,size=500))\n >>> stats.ttest_rel(rvs1,rvs3)\n (-3.9995108708727933, 7.3082402191726459e-005)\n \n tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1)\n Compute the trimmed variance.\n \n This function computes the sample variance of an array of values,\n while ignoring values which are outside of given `limits`.\n \n Parameters\n ----------\n a : array_like\n Array of values.\n limits : None or (lower limit, upper limit), optional\n Values in the input array less than the lower limit or greater than the\n upper limit will be ignored. When limits is None, then all values are\n used. Either of the limit values in the tuple can also be None\n representing a half-open interval. The default value is None.\n inclusive : (bool, bool), optional\n A tuple consisting of the (lower flag, upper flag). These flags\n determine whether values exactly equal to the lower or upper limits\n are included. The default value is (True, True).\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over the\n whole array `a`.\n ddof : int, optional\n Delta degrees of freedom. Default is 1.\n \n Returns\n -------\n tvar : float\n Trimmed variance.\n \n Notes\n -----\n `tvar` computes the unbiased sample variance, i.e. it uses a correction\n factor ``n / (n - 1)``.\n \n Examples\n --------\n >>> from scipy import stats\n >>> x = np.arange(20)\n >>> stats.tvar(x)\n 35.0\n >>> stats.tvar(x, (3,17))\n 20.0\n \n variation(a, axis=0, nan_policy='propagate')\n Compute the coefficient of variation.\n \n The coefficient of variation is the ratio of the biased standard\n deviation to the mean.\n \n Parameters\n ----------\n a : array_like\n Input array.\n axis : int or None, optional\n Axis along which to calculate the coefficient of variation. Default\n is 0. If None, compute over the whole array `a`.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n \n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n \n Returns\n -------\n variation : ndarray\n The calculated variation along the requested axis.\n \n References\n ----------\n .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard\n Probability and Statistics Tables and Formulae. Chapman & Hall: New\n York. 2000.\n \n Examples\n --------\n >>> from scipy.stats import variation\n >>> variation([1, 2, 3, 4, 5])\n 0.47140452079103173\n \n wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None)\n Compute the first Wasserstein distance between two 1D distributions.\n \n This distance is also known as the earth mover's distance, since it can be\n seen as the minimum amount of \"work\" required to transform :math:`u` into\n :math:`v`, where \"work\" is measured as the amount of distribution weight\n that must be moved, multiplied by the distance it has to be moved.\n \n .. versionadded:: 1.0.0\n \n Parameters\n ----------\n u_values, v_values : array_like\n Values observed in the (empirical) distribution.\n u_weights, v_weights : array_like, optional\n Weight for each value. If unspecified, each value is assigned the same\n weight.\n `u_weights` (resp. `v_weights`) must have the same length as\n `u_values` (resp. `v_values`). If the weight sum differs from 1, it\n must still be positive and finite so that the weights can be normalized\n to sum to 1.\n \n Returns\n -------\n distance : float\n The computed distance between the distributions.\n \n Notes\n -----\n The first Wasserstein distance between the distributions :math:`u` and\n :math:`v` is:\n \n .. math::\n \n l_1 (u, v) = \\inf_{\\pi \\in \\Gamma (u, v)} \\int_{\\mathbb{R} \\times\n \\mathbb{R}} |x-y| \\mathrm{d} \\pi (x, y)\n \n where :math:`\\Gamma (u, v)` is the set of (probability) distributions on\n :math:`\\mathbb{R} \\times \\mathbb{R}` whose marginals are :math:`u` and\n :math:`v` on the first and second factors respectively.\n \n If :math:`U` and :math:`V` are the respective CDFs of :math:`u` and\n :math:`v`, this distance also equals to:\n \n .. math::\n \n l_1(u, v) = \\int_{-\\infty}^{+\\infty} |U-V|\n \n See [2]_ for a proof of the equivalence of both definitions.\n \n The input distributions can be empirical, therefore coming from samples\n whose values are effectively inputs of the function, or they can be seen as\n generalized functions, in which case they are weighted sums of Dirac delta\n functions located at the specified values.\n \n References\n ----------\n .. [1] \"Wasserstein metric\", https://en.wikipedia.org/wiki/Wasserstein_metric\n .. [2] Ramdas, Garcia, Cuturi \"On Wasserstein Two Sample Testing and Related\n Families of Nonparametric Tests\" (2015). :arXiv:`1509.02237`.\n \n Examples\n --------\n >>> from scipy.stats import wasserstein_distance\n >>> wasserstein_distance([0, 1, 3], [5, 6, 8])\n 5.0\n >>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2])\n 0.25\n >>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4],\n ... [1.4, 0.9, 3.1, 7.2], [3.2, 3.5])\n 4.0781331438047861\n \n weightedtau(x, y, rank=True, weigher=None, additive=True)\n Compute a weighted version of Kendall's :math:`\\tau`.\n \n The weighted :math:`\\tau` is a weighted version of Kendall's\n :math:`\\tau` in which exchanges of high weight are more influential than\n exchanges of low weight. The default parameters compute the additive\n hyperbolic version of the index, :math:`\\tau_\\mathrm h`, which has\n been shown to provide the best balance between important and\n unimportant elements [1]_.\n \n The weighting is defined by means of a rank array, which assigns a\n nonnegative rank to each element, and a weigher function, which\n assigns a weight based from the rank to each element. The weight of an\n exchange is then the sum or the product of the weights of the ranks of\n the exchanged elements. The default parameters compute\n :math:`\\tau_\\mathrm h`: an exchange between elements with rank\n :math:`r` and :math:`s` (starting from zero) has weight\n :math:`1/(r+1) + 1/(s+1)`.\n \n Specifying a rank array is meaningful only if you have in mind an\n external criterion of importance. If, as it usually happens, you do\n not have in mind a specific rank, the weighted :math:`\\tau` is\n defined by averaging the values obtained using the decreasing\n lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the\n behavior with default parameters.\n \n Note that if you are computing the weighted :math:`\\tau` on arrays of\n ranks, rather than of scores (i.e., a larger value implies a lower\n rank) you must negate the ranks, so that elements of higher rank are\n associated with a larger value.\n \n Parameters\n ----------\n x, y : array_like\n Arrays of scores, of the same shape. If arrays are not 1-D, they will\n be flattened to 1-D.\n rank : array_like of ints or bool, optional\n A nonnegative rank assigned to each element. If it is None, the\n decreasing lexicographical rank by (`x`, `y`) will be used: elements of\n higher rank will be those with larger `x`-values, using `y`-values to\n break ties (in particular, swapping `x` and `y` will give a different\n result). If it is False, the element indices will be used\n directly as ranks. The default is True, in which case this\n function returns the average of the values obtained using the\n decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`).\n weigher : callable, optional\n The weigher function. Must map nonnegative integers (zero\n representing the most important element) to a nonnegative weight.\n The default, None, provides hyperbolic weighing, that is,\n rank :math:`r` is mapped to weight :math:`1/(r+1)`.\n additive : bool, optional\n If True, the weight of an exchange is computed by adding the\n weights of the ranks of the exchanged elements; otherwise, the weights\n are multiplied. The default is True.\n \n Returns\n -------\n correlation : float\n The weighted :math:`\\tau` correlation index.\n pvalue : float\n Presently ``np.nan``, as the null statistics is unknown (even in the\n additive hyperbolic case).\n \n See Also\n --------\n kendalltau : Calculates Kendall's tau.\n spearmanr : Calculates a Spearman rank-order correlation coefficient.\n theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).\n \n Notes\n -----\n This function uses an :math:`O(n \\log n)`, mergesort-based algorithm\n [1]_ that is a weighted extension of Knight's algorithm for Kendall's\n :math:`\\tau` [2]_. It can compute Shieh's weighted :math:`\\tau` [3]_\n between rankings without ties (i.e., permutations) by setting\n `additive` and `rank` to False, as the definition given in [1]_ is a\n generalization of Shieh's.\n \n NaNs are considered the smallest possible score.\n \n .. versionadded:: 0.19.0\n \n References\n ----------\n .. [1] Sebastiano Vigna, \"A weighted correlation index for rankings with\n ties\", Proceedings of the 24th international conference on World\n Wide Web, pp. 1166-1176, ACM, 2015.\n .. [2] W.R. Knight, \"A Computer Method for Calculating Kendall's Tau with\n Ungrouped Data\", Journal of the American Statistical Association,\n Vol. 61, No. 314, Part 1, pp. 436-439, 1966.\n .. [3] Grace S. Shieh. \"A weighted Kendall's tau statistic\", Statistics &\n Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998.\n \n Examples\n --------\n >>> from scipy import stats\n >>> x = [12, 2, 1, 12, 2]\n >>> y = [1, 4, 7, 1, 0]\n >>> tau, p_value = stats.weightedtau(x, y)\n >>> tau\n -0.56694968153682723\n >>> p_value\n nan\n >>> tau, p_value = stats.weightedtau(x, y, additive=False)\n >>> tau\n -0.62205716951801038\n \n NaNs are considered the smallest possible score:\n \n >>> x = [12, 2, 1, 12, 2]\n >>> y = [1, 4, 7, 1, np.nan]\n >>> tau, _ = stats.weightedtau(x, y)\n >>> tau\n -0.56694968153682723\n \n This is exactly Kendall's tau:\n \n >>> x = [12, 2, 1, 12, 2]\n >>> y = [1, 4, 7, 1, 0]\n >>> tau, _ = stats.weightedtau(x, y, weigher=lambda x: 1)\n >>> tau\n -0.47140452079103173\n \n >>> x = [12, 2, 1, 12, 2]\n >>> y = [1, 4, 7, 1, 0]\n >>> stats.weightedtau(x, y, rank=None)\n WeightedTauResult(correlation=-0.4157652301037516, pvalue=nan)\n >>> stats.weightedtau(y, x, rank=None)\n WeightedTauResult(correlation=-0.7181341329699028, pvalue=nan)\n \n wilcoxon(x, y=None, zero_method='wilcox', correction=False, alternative='two-sided')\n Calculate the Wilcoxon signed-rank test.\n \n The Wilcoxon signed-rank test tests the null hypothesis that two\n related paired samples come from the same distribution. In particular,\n it tests whether the distribution of the differences x - y is symmetric\n about zero. It is a non-parametric version of the paired T-test.\n \n Parameters\n ----------\n x : array_like\n Either the first set of measurements (in which case `y` is the second\n set of measurements), or the differences between two sets of\n measurements (in which case `y` is not to be specified.) Must be\n one-dimensional.\n y : array_like, optional\n Either the second set of measurements (if `x` is the first set of\n measurements), or not specified (if `x` is the differences between\n two sets of measurements.) Must be one-dimensional.\n zero_method : {'pratt', 'wilcox', 'zsplit'}, optional\n The following options are available (default is 'wilcox'):\n \n * 'pratt': Includes zero-differences in the ranking process,\n but drops the ranks of the zeros, see [4]_, (more conservative).\n * 'wilcox': Discards all zero-differences, the default.\n * 'zsplit': Includes zero-differences in the ranking process and \n split the zero rank between positive and negative ones.\n correction : bool, optional\n If True, apply continuity correction by adjusting the Wilcoxon rank\n statistic by 0.5 towards the mean value when computing the\n z-statistic. Default is False.\n alternative : {\"two-sided\", \"greater\", \"less\"}, optional\n The alternative hypothesis to be tested, see Notes. Default is\n \"two-sided\".\n \n Returns\n -------\n statistic : float\n If `alternative` is \"two-sided\", the sum of the ranks of the\n differences above or below zero, whichever is smaller.\n Otherwise the sum of the ranks of the differences above zero.\n pvalue : float\n The p-value for the test depending on `alternative`.\n \n See Also\n --------\n kruskal, mannwhitneyu\n \n Notes\n -----\n The test has been introduced in [4]_. Given n independent samples\n (xi, yi) from a bivariate distribution (i.e. paired samples),\n it computes the differences di = xi - yi. One assumption of the test\n is that the differences are symmetric, see [2]_.\n The two-sided test has the null hypothesis that the median of the\n differences is zero against the alternative that it is different from\n zero. The one-sided test has the null hypothesis that the median is \n positive against the alternative that it is negative \n (``alternative == 'less'``), or vice versa (``alternative == 'greater.'``).\n \n The test uses a normal approximation to derive the p-value (if\n ``zero_method == 'pratt'``, the approximation is adjusted as in [5]_).\n A typical rule is to require that n > 20 ([2]_, p. 383). For smaller n,\n exact tables can be used to find critical values.\n \n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test\n .. [2] Conover, W.J., Practical Nonparametric Statistics, 1971.\n .. [3] Pratt, J.W., Remarks on Zeros and Ties in the Wilcoxon Signed\n Rank Procedures, Journal of the American Statistical Association,\n Vol. 54, 1959, pp. 655-667. :doi:`10.1080/01621459.1959.10501526`\n .. [4] Wilcoxon, F., Individual Comparisons by Ranking Methods,\n Biometrics Bulletin, Vol. 1, 1945, pp. 80-83. :doi:`10.2307/3001968`\n .. [5] Cureton, E.E., The Normal Approximation to the Signed-Rank\n Sampling Distribution When Zero Differences are Present,\n Journal of the American Statistical Association, Vol. 62, 1967,\n pp. 1068-1069. :doi:`10.1080/01621459.1967.10500917`\n \n Examples\n --------\n In [4]_, the differences in height between cross- and self-fertilized\n corn plants is given as follows:\n \n >>> d = [6, 8, 14, 16, 23, 24, 28, 29, 41, -48, 49, 56, 60, -67, 75]\n \n Cross-fertilized plants appear to be be higher. To test the null\n hypothesis that there is no height difference, we can apply the\n two-sided test:\n \n >>> from scipy.stats import wilcoxon\n >>> w, p = wilcoxon(d)\n >>> w, p\n (24.0, 0.04088813291185591)\n \n Hence, we would reject the null hypothesis at a confidence level of 5%,\n concluding that there is a difference in height between the groups.\n To confirm that the median of the differences can be assumed to be\n positive, we use:\n \n >>> w, p = wilcoxon(d, alternative='greater')\n >>> w, p\n (96.0, 0.020444066455927955)\n \n This shows that the null hypothesis that the median is negative can be\n rejected at a confidence level of 5% in favor of the alternative that\n the median is greater than zero. The p-value based on the approximation\n is within the range of 0.019 and 0.054 given in [2]_.\n Note that the statistic changed to 96 in the one-sided case (the sum\n of ranks of positive differences) whereas it is 24 in the two-sided\n case (the minimum of sum of ranks above and below zero).\n \n yeojohnson(x, lmbda=None)\n Return a dataset transformed by a Yeo-Johnson power transformation.\n \n Parameters\n ----------\n x : ndarray\n Input array. Should be 1-dimensional.\n lmbda : float, optional\n If ``lmbda`` is ``None``, find the lambda that maximizes the\n log-likelihood function and return it as the second output argument.\n Otherwise the transformation is done for the given value.\n \n Returns\n -------\n yeojohnson: ndarray\n Yeo-Johnson power transformed array.\n maxlog : float, optional\n If the `lmbda` parameter is None, the second returned argument is\n the lambda that maximizes the log-likelihood function.\n \n See Also\n --------\n probplot, yeojohnson_normplot, yeojohnson_normmax, yeojohnson_llf, boxcox\n \n Notes\n -----\n The Yeo-Johnson transform is given by::\n \n y = ((x + 1)**lmbda - 1) / lmbda, for x >= 0, lmbda != 0\n log(x + 1), for x >= 0, lmbda = 0\n -((-x + 1)**(2 - lmbda) - 1) / (2 - lmbda), for x < 0, lmbda != 2\n -log(-x + 1), for x < 0, lmbda = 2\n \n Unlike `boxcox`, `yeojohnson` does not require the input data to be\n positive.\n \n .. versionadded:: 1.2.0\n \n \n References\n ----------\n I. Yeo and R.A. Johnson, \"A New Family of Power Transformations to\n Improve Normality or Symmetry\", Biometrika 87.4 (2000):\n \n \n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n \n We generate some random variates from a non-normal distribution and make a\n probability plot for it, to show it is non-normal in the tails:\n \n >>> fig = plt.figure()\n >>> ax1 = fig.add_subplot(211)\n >>> x = stats.loggamma.rvs(5, size=500) + 5\n >>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)\n >>> ax1.set_xlabel('')\n >>> ax1.set_title('Probplot against normal distribution')\n \n We now use `yeojohnson` to transform the data so it's closest to normal:\n \n >>> ax2 = fig.add_subplot(212)\n >>> xt, lmbda = stats.yeojohnson(x)\n >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)\n >>> ax2.set_title('Probplot after Yeo-Johnson transformation')\n \n >>> plt.show()\n \n yeojohnson_llf(lmb, data)\n The yeojohnson log-likelihood function.\n \n Parameters\n ----------\n lmb : scalar\n Parameter for Yeo-Johnson transformation. See `yeojohnson` for\n details.\n data : array_like\n Data to calculate Yeo-Johnson log-likelihood for. If `data` is\n multi-dimensional, the log-likelihood is calculated along the first\n axis.\n \n Returns\n -------\n llf : float\n Yeo-Johnson log-likelihood of `data` given `lmb`.\n \n See Also\n --------\n yeojohnson, probplot, yeojohnson_normplot, yeojohnson_normmax\n \n Notes\n -----\n The Yeo-Johnson log-likelihood function is defined here as\n \n .. math::\n \n llf = N/2 \\log(\\hat{\\sigma}^2) + (\\lambda - 1)\n \\sum_i \\text{ sign }(x_i)\\log(|x_i| + 1)\n \n where :math:`\\hat{\\sigma}^2` is estimated variance of the the Yeo-Johnson\n transformed input data ``x``.\n \n .. versionadded:: 1.2.0\n \n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n >>> np.random.seed(1245)\n \n Generate some random variates and calculate Yeo-Johnson log-likelihood\n values for them for a range of ``lmbda`` values:\n \n >>> x = stats.loggamma.rvs(5, loc=10, size=1000)\n >>> lmbdas = np.linspace(-2, 10)\n >>> llf = np.zeros(lmbdas.shape, dtype=float)\n >>> for ii, lmbda in enumerate(lmbdas):\n ... llf[ii] = stats.yeojohnson_llf(lmbda, x)\n \n Also find the optimal lmbda value with `yeojohnson`:\n \n >>> x_most_normal, lmbda_optimal = stats.yeojohnson(x)\n \n Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a\n horizontal line to check that that's really the optimum:\n \n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> ax.plot(lmbdas, llf, 'b.-')\n >>> ax.axhline(stats.yeojohnson_llf(lmbda_optimal, x), color='r')\n >>> ax.set_xlabel('lmbda parameter')\n >>> ax.set_ylabel('Yeo-Johnson log-likelihood')\n \n Now add some probability plots to show that where the log-likelihood is\n maximized the data transformed with `yeojohnson` looks closest to normal:\n \n >>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'\n >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):\n ... xt = stats.yeojohnson(x, lmbda=lmbda)\n ... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)\n ... ax_inset = inset_axes(ax, width=\"20%\", height=\"20%\", loc=loc)\n ... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')\n ... ax_inset.set_xticklabels([])\n ... ax_inset.set_yticklabels([])\n ... ax_inset.set_title(r'$\\lambda=%1.2f$' % lmbda)\n \n >>> plt.show()\n \n yeojohnson_normmax(x, brack=(-2, 2))\n Compute optimal Yeo-Johnson transform parameter.\n \n Compute optimal Yeo-Johnson transform parameter for input data, using\n maximum likelihood estimation.\n \n Parameters\n ----------\n x : array_like\n Input array.\n brack : 2-tuple, optional\n The starting interval for a downhill bracket search with\n `optimize.brent`. Note that this is in most cases not critical; the\n final result is allowed to be outside this bracket.\n \n Returns\n -------\n maxlog : float\n The optimal transform parameter found.\n \n See Also\n --------\n yeojohnson, yeojohnson_llf, yeojohnson_normplot\n \n Notes\n -----\n .. versionadded:: 1.2.0\n \n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n >>> np.random.seed(1234) # make this example reproducible\n \n Generate some data and determine optimal ``lmbda``\n \n >>> x = stats.loggamma.rvs(5, size=30) + 5\n >>> lmax = stats.yeojohnson_normmax(x)\n \n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> prob = stats.yeojohnson_normplot(x, -10, 10, plot=ax)\n >>> ax.axvline(lmax, color='r')\n \n >>> plt.show()\n \n yeojohnson_normplot(x, la, lb, plot=None, N=80)\n Compute parameters for a Yeo-Johnson normality plot, optionally show it.\n \n A Yeo-Johnson normality plot shows graphically what the best\n transformation parameter is to use in `yeojohnson` to obtain a\n distribution that is close to normal.\n \n Parameters\n ----------\n x : array_like\n Input array.\n la, lb : scalar\n The lower and upper bounds for the ``lmbda`` values to pass to\n `yeojohnson` for Yeo-Johnson transformations. These are also the\n limits of the horizontal axis of the plot if that is generated.\n plot : object, optional\n If given, plots the quantiles and least squares fit.\n `plot` is an object that has to have methods \"plot\" and \"text\".\n The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,\n or a custom object with the same methods.\n Default is None, which means that no plot is created.\n N : int, optional\n Number of points on the horizontal axis (equally distributed from\n `la` to `lb`).\n \n Returns\n -------\n lmbdas : ndarray\n The ``lmbda`` values for which a Yeo-Johnson transform was done.\n ppcc : ndarray\n Probability Plot Correlelation Coefficient, as obtained from `probplot`\n when fitting the Box-Cox transformed input `x` against a normal\n distribution.\n \n See Also\n --------\n probplot, yeojohnson, yeojohnson_normmax, yeojohnson_llf, ppcc_max\n \n Notes\n -----\n Even if `plot` is given, the figure is not shown or saved by\n `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``\n should be used after calling `probplot`.\n \n .. versionadded:: 1.2.0\n \n Examples\n --------\n >>> from scipy import stats\n >>> import matplotlib.pyplot as plt\n \n Generate some non-normally distributed data, and create a Yeo-Johnson plot:\n \n >>> x = stats.loggamma.rvs(5, size=500) + 5\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> prob = stats.yeojohnson_normplot(x, -20, 20, plot=ax)\n \n Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in\n the same plot:\n \n >>> _, maxlog = stats.yeojohnson(x)\n >>> ax.axvline(maxlog, color='r')\n \n >>> plt.show()\n \n zmap(scores, compare, axis=0, ddof=0)\n Calculate the relative z-scores.\n \n Return an array of z-scores, i.e., scores that are standardized to\n zero mean and unit variance, where mean and variance are calculated\n from the comparison array.\n \n Parameters\n ----------\n scores : array_like\n The input for which z-scores are calculated.\n compare : array_like\n The input from which the mean and standard deviation of the\n normalization are taken; assumed to have the same dimension as\n `scores`.\n axis : int or None, optional\n Axis over which mean and variance of `compare` are calculated.\n Default is 0. If None, compute over the whole array `scores`.\n ddof : int, optional\n Degrees of freedom correction in the calculation of the\n standard deviation. Default is 0.\n \n Returns\n -------\n zscore : array_like\n Z-scores, in the same shape as `scores`.\n \n Notes\n -----\n This function preserves ndarray subclasses, and works also with\n matrices and masked arrays (it uses `asanyarray` instead of\n `asarray` for parameters).\n \n Examples\n --------\n >>> from scipy.stats import zmap\n >>> a = [0.5, 2.0, 2.5, 3]\n >>> b = [0, 1, 2, 3, 4]\n >>> zmap(a, b)\n array([-1.06066017, 0. , 0.35355339, 0.70710678])\n \n zscore(a, axis=0, ddof=0, nan_policy='propagate')\n Compute the z score.\n \n Compute the z score of each value in the sample, relative to the\n sample mean and standard deviation.\n \n Parameters\n ----------\n a : array_like\n An array like object containing the sample data.\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over\n the whole array `a`.\n ddof : int, optional\n Degrees of freedom correction in the calculation of the\n standard deviation. Default is 0.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan. 'propagate' returns nan,\n 'raise' throws an error, 'omit' performs the calculations ignoring nan\n values. Default is 'propagate'.\n \n Returns\n -------\n zscore : array_like\n The z-scores, standardized by mean and standard deviation of\n input array `a`.\n \n Notes\n -----\n This function preserves ndarray subclasses, and works also with\n matrices and masked arrays (it uses `asanyarray` instead of\n `asarray` for parameters).\n \n Examples\n --------\n >>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,\n ... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])\n >>> from scipy import stats\n >>> stats.zscore(a)\n array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,\n 0.6748, -1.1488, -1.3324])\n \n Computing along a specified axis, using n-1 degrees of freedom\n (``ddof=1``) to calculate the standard deviation:\n \n >>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],\n ... [ 0.7149, 0.0775, 0.6072, 0.9656],\n ... [ 0.6341, 0.1403, 0.9759, 0.4064],\n ... [ 0.5918, 0.6948, 0.904 , 0.3721],\n ... [ 0.0921, 0.2481, 0.1188, 0.1366]])\n >>> stats.zscore(b, axis=1, ddof=1)\n array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],\n [ 0.33048416, -1.37380874, 0.04251374, 1.00081084],\n [ 0.26796377, -1.12598418, 1.23283094, -0.37481053],\n [-0.22095197, 0.24468594, 1.19042819, -1.21416216],\n [-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])\n\nDATA\n __all__ = ['PearsonRConstantInputWarning', 'PearsonRNearConstantInputW...\n absolute_import = _Feature((2, 5, 0, 'alpha', 1), (3, 0, 0, 'alpha', 0...\n alpha = <scipy.stats._continuous_distns.alpha_gen object>\n anglit = <scipy.stats._continuous_distns.anglit_gen object>\n arcsine = <scipy.stats._continuous_distns.arcsine_gen object>\n argus = <scipy.stats._continuous_distns.argus_gen object>\n bernoulli = <scipy.stats._discrete_distns.bernoulli_gen object>\n beta = <scipy.stats._continuous_distns.beta_gen object>\n betabinom = <scipy.stats._discrete_distns.betabinom_gen object>\n betaprime = <scipy.stats._continuous_distns.betaprime_gen object>\n binom = <scipy.stats._discrete_distns.binom_gen object>\n boltzmann = <scipy.stats._discrete_distns.boltzmann_gen object>\n bradford = <scipy.stats._continuous_distns.bradford_gen object>\n burr = <scipy.stats._continuous_distns.burr_gen object>\n burr12 = <scipy.stats._continuous_distns.burr12_gen object>\n cauchy = <scipy.stats._continuous_distns.cauchy_gen object>\n chi = <scipy.stats._continuous_distns.chi_gen object>\n chi2 = <scipy.stats._continuous_distns.chi2_gen object>\n cosine = <scipy.stats._continuous_distns.cosine_gen object>\n crystalball = <scipy.stats._continuous_distns.crystalball_gen object>\n dgamma = <scipy.stats._continuous_distns.dgamma_gen object>\n dirichlet = <scipy.stats._multivariate.dirichlet_gen object>\n division = _Feature((2, 2, 0, 'alpha', 2), (3, 0, 0, 'alpha', 0), 8192...\n dlaplace = <scipy.stats._discrete_distns.dlaplace_gen object>\n dweibull = <scipy.stats._continuous_distns.dweibull_gen object>\n erlang = <scipy.stats._continuous_distns.erlang_gen object>\n expon = <scipy.stats._continuous_distns.expon_gen object>\n exponnorm = <scipy.stats._continuous_distns.exponnorm_gen object>\n exponpow = <scipy.stats._continuous_distns.exponpow_gen object>\n exponweib = <scipy.stats._continuous_distns.exponweib_gen object>\n f = <scipy.stats._continuous_distns.f_gen object>\n fatiguelife = <scipy.stats._continuous_distns.fatiguelife_gen object>\n fisk = <scipy.stats._continuous_distns.fisk_gen object>\n foldcauchy = <scipy.stats._continuous_distns.foldcauchy_gen object>\n foldnorm = <scipy.stats._continuous_distns.foldnorm_gen object>\n frechet_l = <scipy.stats._continuous_distns.frechet_l_gen object>\n frechet_r = <scipy.stats._continuous_distns.frechet_r_gen object>\n gamma = <scipy.stats._continuous_distns.gamma_gen object>\n gausshyper = <scipy.stats._continuous_distns.gausshyper_gen object>\n genexpon = <scipy.stats._continuous_distns.genexpon_gen object>\n genextreme = <scipy.stats._continuous_distns.genextreme_gen object>\n gengamma = <scipy.stats._continuous_distns.gengamma_gen object>\n genhalflogistic = <scipy.stats._continuous_distns.genhalflogistic_gen ...\n geninvgauss = <scipy.stats._continuous_distns.geninvgauss_gen object>\n genlogistic = <scipy.stats._continuous_distns.genlogistic_gen object>\n gennorm = <scipy.stats._continuous_distns.gennorm_gen object>\n genpareto = <scipy.stats._continuous_distns.genpareto_gen object>\n geom = <scipy.stats._discrete_distns.geom_gen object>\n gilbrat = <scipy.stats._continuous_distns.gilbrat_gen object>\n gompertz = <scipy.stats._continuous_distns.gompertz_gen object>\n gumbel_l = <scipy.stats._continuous_distns.gumbel_l_gen object>\n gumbel_r = <scipy.stats._continuous_distns.gumbel_r_gen object>\n halfcauchy = <scipy.stats._continuous_distns.halfcauchy_gen object>\n halfgennorm = <scipy.stats._continuous_distns.halfgennorm_gen object>\n halflogistic = <scipy.stats._continuous_distns.halflogistic_gen object...\n halfnorm = <scipy.stats._continuous_distns.halfnorm_gen object>\n hypergeom = <scipy.stats._discrete_distns.hypergeom_gen object>\n hypsecant = <scipy.stats._continuous_distns.hypsecant_gen object>\n invgamma = <scipy.stats._continuous_distns.invgamma_gen object>\n invgauss = <scipy.stats._continuous_distns.invgauss_gen object>\n invweibull = <scipy.stats._continuous_distns.invweibull_gen object>\n invwishart = <scipy.stats._multivariate.invwishart_gen object>\n johnsonsb = <scipy.stats._continuous_distns.johnsonsb_gen object>\n johnsonsu = <scipy.stats._continuous_distns.johnsonsu_gen object>\n kappa3 = <scipy.stats._continuous_distns.kappa3_gen object>\n kappa4 = <scipy.stats._continuous_distns.kappa4_gen object>\n ksone = <scipy.stats._continuous_distns.ksone_gen object>\n kstwobign = <scipy.stats._continuous_distns.kstwobign_gen object>\n laplace = <scipy.stats._continuous_distns.laplace_gen object>\n levy = <scipy.stats._continuous_distns.levy_gen object>\n levy_l = <scipy.stats._continuous_distns.levy_l_gen object>\n levy_stable = <scipy.stats._continuous_distns.levy_stable_gen object>\n loggamma = <scipy.stats._continuous_distns.loggamma_gen object>\n logistic = <scipy.stats._continuous_distns.logistic_gen object>\n loglaplace = <scipy.stats._continuous_distns.loglaplace_gen object>\n lognorm = <scipy.stats._continuous_distns.lognorm_gen object>\n logser = <scipy.stats._discrete_distns.logser_gen object>\n loguniform = <scipy.stats._continuous_distns.reciprocal_gen object>\n lomax = <scipy.stats._continuous_distns.lomax_gen object>\n matrix_normal = <scipy.stats._multivariate.matrix_normal_gen object>\n maxwell = <scipy.stats._continuous_distns.maxwell_gen object>\n mielke = <scipy.stats._continuous_distns.mielke_gen object>\n moyal = <scipy.stats._continuous_distns.moyal_gen object>\n multinomial = <scipy.stats._multivariate.multinomial_gen object>\n multivariate_normal = <scipy.stats._multivariate.multivariate_normal_g...\n nakagami = <scipy.stats._continuous_distns.nakagami_gen object>\n nbinom = <scipy.stats._discrete_distns.nbinom_gen object>\n ncf = <scipy.stats._continuous_distns.ncf_gen object>\n nct = <scipy.stats._continuous_distns.nct_gen object>\n ncx2 = <scipy.stats._continuous_distns.ncx2_gen object>\n norm = <scipy.stats._continuous_distns.norm_gen object>\n norminvgauss = <scipy.stats._continuous_distns.norminvgauss_gen object...\n ortho_group = <scipy.stats._multivariate.ortho_group_gen object>\n pareto = <scipy.stats._continuous_distns.pareto_gen object>\n pearson3 = <scipy.stats._continuous_distns.pearson3_gen object>\n planck = <scipy.stats._discrete_distns.planck_gen object>\n poisson = <scipy.stats._discrete_distns.poisson_gen object>\n powerlaw = <scipy.stats._continuous_distns.powerlaw_gen object>\n powerlognorm = <scipy.stats._continuous_distns.powerlognorm_gen object...\n powernorm = <scipy.stats._continuous_distns.powernorm_gen object>\n print_function = _Feature((2, 6, 0, 'alpha', 2), (3, 0, 0, 'alpha', 0)...\n randint = <scipy.stats._discrete_distns.randint_gen object>\n random_correlation = <scipy.stats._multivariate.random_correlation_gen...\n rayleigh = <scipy.stats._continuous_distns.rayleigh_gen object>\n rdist = <scipy.stats._continuous_distns.rdist_gen object>\n recipinvgauss = <scipy.stats._continuous_distns.recipinvgauss_gen obje...\n reciprocal = <scipy.stats._continuous_distns.reciprocal_gen object>\n rice = <scipy.stats._continuous_distns.rice_gen object>\n semicircular = <scipy.stats._continuous_distns.semicircular_gen object...\n skellam = <scipy.stats._discrete_distns.skellam_gen object>\n skewnorm = <scipy.stats._continuous_distns.skew_norm_gen object>\n special_ortho_group = <scipy.stats._multivariate.special_ortho_group_g...\n t = <scipy.stats._continuous_distns.t_gen object>\n trapz = <scipy.stats._continuous_distns.trapz_gen object>\n triang = <scipy.stats._continuous_distns.triang_gen object>\n truncexpon = <scipy.stats._continuous_distns.truncexpon_gen object>\n truncnorm = <scipy.stats._continuous_distns.truncnorm_gen object>\n tukeylambda = <scipy.stats._continuous_distns.tukeylambda_gen object>\n uniform = <scipy.stats._continuous_distns.uniform_gen object>\n unitary_group = <scipy.stats._multivariate.unitary_group_gen object>\n vonmises = <scipy.stats._continuous_distns.vonmises_gen object>\n vonmises_line = <scipy.stats._continuous_distns.vonmises_gen object>\n wald = <scipy.stats._continuous_distns.wald_gen object>\n weibull_max = <scipy.stats._continuous_distns.weibull_max_gen object>\n weibull_min = <scipy.stats._continuous_distns.weibull_min_gen object>\n wishart = <scipy.stats._multivariate.wishart_gen object>\n wrapcauchy = <scipy.stats._continuous_distns.wrapcauchy_gen object>\n yulesimon = <scipy.stats._discrete_distns.yulesimon_gen object>\n zipf = <scipy.stats._discrete_distns.zipf_gen object>\n\nFILE\n c:\\users\\rosha\\appdata\\roaming\\python\\python37\\site-packages\\scipy\\stats\\__init__.py\n\n\n" ] ], [ [ "Let's plot some probability density functions of the Gaussian distribution:", "_____no_output_____" ] ], [ [ "from scipy.stats import norm\n\nx = np.linspace(-5,5,num=200)\n\nfig = plt.figure(figsize=(12,6))\nfor mu, s in zip([0.5, 0.5, 0.5], [0.2, 0.5, 0.8]):\n plt.plot(x, norm.pdf(x,mu,s), lw=2, \n label=\"$\\mu={0:.1f}, s={1:.1f}$\".format(mu, s))\n plt.fill_between(x, norm.pdf(x, mu, s), alpha = .4)\n\nplt.xlim([-5,5])\nplt.legend(loc=0)\nplt.ylabel(\"pdf at $x$\")\nplt.xlabel(\"$x$\")\nplt.show()", "_____no_output_____" ] ], [ [ "Let's create an interactive plot of the Gamma distribution:", "_____no_output_____" ] ], [ [ "%%capture\n\nfrom ipywidgets import interactive\nfrom scipy.stats import gamma\n\nx = np.arange(0, 40, 0.005)\n\nshape, scale = 5, 0.5\nfig, ax = plt.subplots()\ny = gamma.pdf(x, shape, scale=scale)\nline = ax.plot(x, y)\nax.set_ylim((0,0.5))\n\ndef gamma_update(shape, scale):\n y = gamma.pdf(x, shape, scale=scale)\n line[0].set_ydata(y)\n fig.canvas.draw()\n display(fig)\n", "_____no_output_____" ], [ "interactive(gamma_update, shape=(0.1, 10.0), scale=(0.3, 3.0))", "_____no_output_____" ] ], [ [ "## 5.3 Seaborn", "_____no_output_____" ], [ "Seaborn is a Python data visualization library based on `matplotlib`. It is the equivalent to `R`'s package `ggplot2` and provides a high-level interface for drawing attractive and informative statistical graphics.", "_____no_output_____" ] ], [ [ "import seaborn as sns", "_____no_output_____" ] ], [ [ "We will create some basic `seaborn` plots. A gallery is alvailable here: http://seaborn.pydata.org/examples/index.html.", "_____no_output_____" ], [ "A scatterplot of a bivariate normal distribution:", "_____no_output_____" ] ], [ [ "import pandas as pd\n\nmean, cov = [0, 1], [(1, .5), (.5, 1)]\ndata = np.random.multivariate_normal(mean, cov, 500)\ndf = pd.DataFrame(data, columns=[\"x\", \"y\"])", "_____no_output_____" ], [ "sns.jointplot(x=\"x\", y=\"y\", data=df)", "_____no_output_____" ] ], [ [ "A scatterplot matrix:", "_____no_output_____" ] ], [ [ "df", "_____no_output_____" ], [ "df = sns.load_dataset(\"iris\")\nsns.pairplot(df, hue=\"species\")", "_____no_output_____" ], [ "tips = sns.load_dataset(\"tips\")\ntips", "_____no_output_____" ] ], [ [ "A linear model plot:", "_____no_output_____" ] ], [ [ "sns.lmplot(x=\"total_bill\", y=\"tip\", data=tips, hue=\"smoker\")", "_____no_output_____" ] ], [ [ "## 5.4 Statistical Models", "_____no_output_____" ], [ "Statsmodels is a Python package that allows users to explore data, estimate statistical models, and perform statistical tests. An extensive list of descriptive statistics, statistical tests, plotting functions, and result statistics are available for different types of data and each estimator. It complements SciPy's stats module.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport statsmodels.api as sm", "_____no_output_____" ] ], [ [ "The user guide can be found here: https://www.statsmodels.org/stable/user-guide.html.", "_____no_output_____" ], [ "Let's explore our `iris` dataset again:", "_____no_output_____" ] ], [ [ "df", "_____no_output_____" ] ], [ [ "We would like to know whether the `sepal_length` depends on the explanatory variable `species`. Let's create a boxplot:", "_____no_output_____" ] ], [ [ "sns.boxplot(x=\"species\", y=\"sepal_length\", data=df)", "_____no_output_____" ] ], [ [ "It seems like this is indeed the case. However, we need to perform some statistical test to conclude this. Let's do some ANOVA (see syllabus Statistical Models, M. de Gunst):", "_____no_output_____" ] ], [ [ "lm = sm.OLS.from_formula('sepal_length ~ species', data=df)\nfitted_model = lm.fit()\nprint(sm.stats.anova_lm(fitted_model))", " df sum_sq mean_sq F PR(>F)\nspecies 2.0 63.212133 31.606067 119.264502 1.669669e-31\nResidual 147.0 38.956200 0.265008 NaN NaN\n" ] ], [ [ "We conclude that `species` is a significant explanatory variable for `sepal_length`. We can find the coefficients using the following code:", "_____no_output_____" ] ], [ [ "print(fitted_model.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: sepal_length R-squared: 0.619\nModel: OLS Adj. R-squared: 0.614\nMethod: Least Squares F-statistic: 119.3\nDate: Sun, 16 Aug 2020 Prob (F-statistic): 1.67e-31\nTime: 17:21:10 Log-Likelihood: -111.73\nNo. Observations: 150 AIC: 229.5\nDf Residuals: 147 BIC: 238.5\nDf Model: 2 \nCovariance Type: nonrobust \n=========================================================================================\n coef std err t P>|t| [0.025 0.975]\n-----------------------------------------------------------------------------------------\nIntercept 5.0060 0.073 68.762 0.000 4.862 5.150\nspecies[T.versicolor] 0.9300 0.103 9.033 0.000 0.727 1.133\nspecies[T.virginica] 1.5820 0.103 15.366 0.000 1.379 1.785\n==============================================================================\nOmnibus: 1.188 Durbin-Watson: 2.043\nProb(Omnibus): 0.552 Jarque-Bera (JB): 0.785\nSkew: 0.119 Prob(JB): 0.675\nKurtosis: 3.263 Cond. No. 3.73\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ] ], [ [ "Now let's explore a dataset from `statsmodels`:", "_____no_output_____" ] ], [ [ "spector_data = sm.datasets.spector.load_pandas().data\nspector_data", "_____no_output_____" ] ], [ [ "We will again do some ANOVA:", "_____no_output_____" ] ], [ [ "m = sm.OLS.from_formula('GRADE ~ GPA + TUCE', spector_data)", "_____no_output_____" ], [ "print(m.df_model, m.df_resid)\nprint(m.endog_names, m.exog_names)", "2.0 29.0\nGRADE ['Intercept', 'GPA', 'TUCE']\n" ], [ "res = m.fit()", "_____no_output_____" ], [ "# res.summary()\nprint(res.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: GRADE R-squared: 0.262\nModel: OLS Adj. R-squared: 0.211\nMethod: Least Squares F-statistic: 5.136\nDate: Sun, 16 Aug 2020 Prob (F-statistic): 0.0123\nTime: 17:21:15 Log-Likelihood: -16.730\nNo. Observations: 32 AIC: 39.46\nDf Residuals: 29 BIC: 43.86\nDf Model: 2 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nIntercept -1.4494 0.578 -2.506 0.018 -2.632 -0.266\nGPA 0.4619 0.179 2.582 0.015 0.096 0.828\nTUCE 0.0161 0.021 0.752 0.458 -0.028 0.060\n==============================================================================\nOmnibus: 2.432 Durbin-Watson: 1.813\nProb(Omnibus): 0.296 Jarque-Bera (JB): 2.143\nSkew: 0.607 Prob(JB): 0.343\nKurtosis: 2.638 Cond. No. 175.\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ] ], [ [ "From this table, we conclude that `GPA` is a significant factor but `TUCE` is not. We can extract the coefficients of our fitted model as follows:", "_____no_output_____" ] ], [ [ "res.params # parameters", "_____no_output_____" ] ], [ [ "Given the values `GPA` and `TUCE`, we can get a predicted value for `GRADE`:", "_____no_output_____" ] ], [ [ "m.predict(res.params, [1, 4.0, 25])", "_____no_output_____" ] ], [ [ "We predict `GRADE = 1`.", "_____no_output_____" ], [ "We can also perform some _Fisher tests_ to check whether the explanatory variables are significant:", "_____no_output_____" ] ], [ [ "a = res.f_test(\"GPA = 0\")\na.summary()", "_____no_output_____" ], [ "b = res.f_test(\"GPA = TUCE = 0\")\nb.summary()", "_____no_output_____" ] ], [ [ "Now let's take the full model:", "_____no_output_____" ] ], [ [ "spector_data", "_____no_output_____" ], [ "m = sm.OLS.from_formula('GRADE ~ GPA + TUCE + PSI', spector_data)\nres1 = m.fit()", "_____no_output_____" ], [ "print(res1.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: GRADE R-squared: 0.416\nModel: OLS Adj. R-squared: 0.353\nMethod: Least Squares F-statistic: 6.646\nDate: Sun, 16 Aug 2020 Prob (F-statistic): 0.00157\nTime: 17:21:21 Log-Likelihood: -12.978\nNo. Observations: 32 AIC: 33.96\nDf Residuals: 28 BIC: 39.82\nDf Model: 3 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nIntercept -1.4980 0.524 -2.859 0.008 -2.571 -0.425\nGPA 0.4639 0.162 2.864 0.008 0.132 0.796\nTUCE 0.0105 0.019 0.539 0.594 -0.029 0.050\nPSI 0.3786 0.139 2.720 0.011 0.093 0.664\n==============================================================================\nOmnibus: 0.176 Durbin-Watson: 2.346\nProb(Omnibus): 0.916 Jarque-Bera (JB): 0.167\nSkew: 0.141 Prob(JB): 0.920\nKurtosis: 2.786 Cond. No. 176.\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ] ], [ [ "As we can see, `PSI` is an important explanatory variable! We compare our models using the information criteria, or by performing some other tests:", "_____no_output_____" ] ], [ [ "res1.compare_f_test(res) # res1 better", "_____no_output_____" ], [ "res1.compare_lm_test(res)", "_____no_output_____" ], [ "res1.compare_lr_test(res)", "_____no_output_____" ], [ "help(sm)", "Help on module statsmodels.api in statsmodels:\n\nNAME\n statsmodels.api\n\nDESCRIPTION\n # -*- coding: utf-8 -*-\n # flake8: noqa\n\nVERSION\n 0.11.1\n\nFILE\n c:\\programdata\\anaconda3\\lib\\site-packages\\statsmodels\\api.py\n\n\n" ] ], [ [ "We can also use a generalized linear model using the `sm.GLM` function or do some time series analysis using the `sm.tsa` subpackage. The investigation of this is left to the entusiastic reader. An introduction video can be found here:", "_____no_output_____" ] ], [ [ "from IPython.display import YouTubeVideo\nYouTubeVideo('o7Ux5jKEbcw', width=533, height=300)", "_____no_output_____" ] ], [ [ "## 5.5 Python vs. R", "_____no_output_____" ], [ "There’s a lot of recurrent discussion on the right tool to use for statistics and machine learning. `R` and `Python` are often considered alternatives: they are both good for statistics and machine learning tasks. But which one is the fastest? For a benchmark, it is relatively hard to make it fair: the speed of execution may well depend on the code, or the speed of the different libraries used. We decide to do classification on the Iris dataset. It is a relatively easy Machine Learning project, which seems to make for a fair comparison. We use the commonly used libraries in both `R` and `Python`. The following steps are executed:\n\n1. Read a csv file with the iris data.\n2. Randomly split the data in 80% training data and 20% test data.\n3. Fit a number of models (logistic regression, linear discriminant analysis, k-nearest neighbors, and support vector machines) on the training data using built-in grid-search and cross-validation methods\n4. Evaluate each of those best models on the test data and select the best model\n\nWe get the following results:", "_____no_output_____" ] ], [ [ "# %load resources/python_vs_R.py\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.model_selection import GridSearchCV, KFold\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import svm\n\ndef main():\n\n names = [\"sepal_length\", \"sepal_width\", \"petal_length\", \"petal_width\", \"Name\"]\n iris_data = pd.read_csv(\"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\", names = names)\n train, test = train_test_split(iris_data, test_size=0.2)\n\n X_train = train.drop('Name', axis=1)\n y_train = train['Name']\n X_test = test.drop('Name', axis=1)\n y_test = test['Name']\n\n # logistic regression\n lr = LogisticRegression(solver='lbfgs', multi_class='auto', max_iter=1000)\n lr.fit(X_train, y_train)\n\n # linear discriminant analysis\n lda = LinearDiscriminantAnalysis()\n lda.fit(X_train,y_train)\n\n # KNN (k-nearest neighbours)\n parameters = {'n_neighbors': range(1,11)}\n knn = GridSearchCV(KNeighborsClassifier(), parameters, scoring = 'accuracy', cv = KFold(n_splits=5))\n knn.fit(X_train,y_train)\n\n # SVM\n parameters = {'C': range(1,11)}\n svc = GridSearchCV(svm.SVC(kernel = 'linear'), parameters, scoring = 'accuracy', cv = KFold(n_splits=5))\n svc.fit(X_train,y_train)\n\n # evaluate\n lr_test_acc = lr.score(X_test,y_test)\n lda_test_acc = lda.score(X_test,y_test)\n knn_test_acc = knn.best_estimator_.score(X_test,y_test)\n svc_test_acc= svc.best_estimator_.score(X_test,y_test)\n\n # print(lr_test_acc, lda_test_acc, knn_test_acc, svc_test_acc)\n", "_____no_output_____" ], [ "from datetime import datetime as dt", "_____no_output_____" ], [ "now = dt.now()\nfor i in range(5):\n main()\nprint(dt.now() - now)", "0:00:05.675866\n" ] ], [ [ "It seems that the `Python` code runs a little bit faster. However, when we make the model more complex, or use multiprocessing, the difference is even higher! If speed matters, using `Python` is the best alternative.", "_____no_output_____" ], [ "### 🔴 *Next Week:*", "_____no_output_____" ] ], [ [ "np.random.choice(['Machine learning 2','Something else'], p=[0.99,0.01])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
4a4269d738078166195b6807556f2a0372a6d9a5
169,567
ipynb
Jupyter Notebook
Intro to Deep Learning/2 Deep Neural Networks/exercise-deep-neural-networks.ipynb
mattborghi/kaggle-courses
b56b9e67210a409e5a0d633a7a0a8fbcf090c10f
[ "MIT" ]
null
null
null
Intro to Deep Learning/2 Deep Neural Networks/exercise-deep-neural-networks.ipynb
mattborghi/kaggle-courses
b56b9e67210a409e5a0d633a7a0a8fbcf090c10f
[ "MIT" ]
null
null
null
Intro to Deep Learning/2 Deep Neural Networks/exercise-deep-neural-networks.ipynb
mattborghi/kaggle-courses
b56b9e67210a409e5a0d633a7a0a8fbcf090c10f
[ "MIT" ]
null
null
null
169,567
169,567
0.94023
[ [ [ "**This notebook is an exercise in the [Intro to Deep Learning](https://www.kaggle.com/learn/intro-to-deep-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/ryanholbrook/deep-neural-networks).**\n\n---\n", "_____no_output_____" ], [ "# Introduction #\n\nIn the tutorial, we saw how to build deep neural networks by stacking layers inside a `Sequential` model. By adding an *activation function* after the hidden layers, we gave the network the ability to learn more complex (non-linear) relationships in the data.\n\nIn these exercises, you'll build a neural network with several hidden layers and then explore some activation functions beyond ReLU. Run this next cell to set everything up!", "_____no_output_____" ] ], [ [ "import tensorflow as tf\n\n# Setup plotting\nimport matplotlib.pyplot as plt\n\nplt.style.use('seaborn-whitegrid')\n# Set Matplotlib defaults\nplt.rc('figure', autolayout=True)\nplt.rc('axes', labelweight='bold', labelsize='large',\n titleweight='bold', titlesize=18, titlepad=10)\n\n# Setup feedback system\nfrom learntools.core import binder\nbinder.bind(globals())\nfrom learntools.deep_learning_intro.ex2 import *", "_____no_output_____" ] ], [ [ "In the *Concrete* dataset, your task is to predict the compressive strength of concrete manufactured according to various recipes.\n\nRun the next code cell without changes to load the dataset.", "_____no_output_____" ] ], [ [ "import pandas as pd\n\nconcrete = pd.read_csv('../input/dl-course-data/concrete.csv')\nconcrete.head()", "_____no_output_____" ] ], [ [ "# 1) Input Shape #\n\nThe target for this task is the column `'CompressiveStrength'`. The remaining columns are the features we'll use as inputs.\n\nWhat would be the input shape for this dataset?", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE\ninput_shape = [concrete.shape[1] - 1]\n\n# Check your answer\nq_1.check()", "_____no_output_____" ], [ "# Lines below will give you a hint or solution code\n#q_1.hint()\n#q_1.solution()", "_____no_output_____" ] ], [ [ "# 2) Define a Model with Hidden Layers #\n\nNow create a model with three hidden layers, each having 512 units and the ReLU activation. Be sure to include an output layer of one unit and no activation, and also `input_shape` as an argument to the first layer.", "_____no_output_____" ] ], [ [ "from tensorflow import keras\nfrom tensorflow.keras import layers\n\n# YOUR CODE HERE\nmodel = keras.Sequential([\n layers.Dense(units=512, activation='relu', input_shape=input_shape),\n layers.Dense(units=512, activation='relu'),\n layers.Dense(units=512, activation='relu'),\n layers.Dense(units=1)\n])\n\n# Check your answer\nq_2.check()", "_____no_output_____" ], [ "model.weights[0]", "_____no_output_____" ], [ "for layer in model.weights:\n print(layer.shape)", "(8, 512)\n(512,)\n(512, 512)\n(512,)\n(512, 512)\n(512,)\n(512, 1)\n(1,)\n" ], [ "# Lines below will give you a hint or solution code\n#q_2.hint()\n#q_2.solution()", "_____no_output_____" ] ], [ [ "# 3) Activation Layers #\n\nLet's explore activations functions some.\n\nThe usual way of attaching an activation function to a `Dense` layer is to include it as part of the definition with the `activation` argument. Sometimes though you'll want to put some other layer between the `Dense` layer and its activation function. (We'll see an example of this in Lesson 5 with *batch normalization*.) In this case, we can define the activation in its own `Activation` layer, like so:\n\n```\nlayers.Dense(units=8),\nlayers.Activation('relu')\n```\n\nThis is completely equivalent to the ordinary way: `layers.Dense(units=8, activation='relu')`.\n\nRewrite the following model so that each activation is in its own `Activation` layer.", "_____no_output_____" ] ], [ [ "### YOUR CODE HERE: rewrite this to use activation layers\nmodel = keras.Sequential([\n layers.Dense(32, input_shape=[8]),\n layers.Activation('relu'),\n layers.Dense(32),\n layers.Activation('relu'),\n layers.Dense(1),\n])\n\n# Check your answer\nq_3.check()", "_____no_output_____" ], [ "# Lines below will give you a hint or solution code\n#q_3.hint()\n#q_3.solution()", "_____no_output_____" ] ], [ [ "# Optional: Alternatives to ReLU #\n\nThere is a whole family of variants of the `'relu'` activation -- `'elu'`, `'selu'`, and `'swish'`, among others -- all of which you can use in Keras. Sometimes one activation will perform better than another on a given task, so you could consider experimenting with activations as you develop a model. The ReLU activation tends to do well on most problems, so it's a good one to start with.\n\nLet's look at the graphs of some of these. Change the activation from `'relu'` to one of the others named above. Then run the cell to see the graph. (Check out the [documentation](https://www.tensorflow.org/api_docs/python/tf/keras/activations) for more ideas.)", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE: Change 'relu' to 'elu', 'selu', 'swish'... or something else\nactivation_layers = ['relu', 'elu', 'selu', 'swish', 'sigmoid', 'tanh']\n\nfor activation_layer in activation_layers:\n x = tf.linspace(-3.0, 3.0, 100)\n y = layers.Activation(activation_layer)(x)\n \n plt.figure(dpi=100)\n plt.title(activation_layer)\n plt.plot(x, y)\n plt.xlim(-3, 3)\n plt.xlabel(\"Input\")\n plt.ylabel(\"Output\")\n plt.show()", "_____no_output_____" ] ], [ [ "# Keep Going #\n\nNow move on to Lesson 3 and [**learn how to train neural networks**](https://www.kaggle.com/ryanholbrook/stochastic-gradient-descent) with stochastic gradient descent.", "_____no_output_____" ], [ "---\n\n\n\n\n*Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/191966) to chat with other Learners.*", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4a426eb3bf31910b4eeb24109af5fcb53f5a6f40
18,875
ipynb
Jupyter Notebook
xgboost.ipynb
rakshitraj/kaggle
83bdfb2caa8826af7eea751d1b2f335d781fc6c3
[ "MIT" ]
null
null
null
xgboost.ipynb
rakshitraj/kaggle
83bdfb2caa8826af7eea751d1b2f335d781fc6c3
[ "MIT" ]
null
null
null
xgboost.ipynb
rakshitraj/kaggle
83bdfb2caa8826af7eea751d1b2f335d781fc6c3
[ "MIT" ]
null
null
null
37.524851
404
0.589881
[ [ [ "import pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n# Read the data\ndata = pd.read_csv('~/kaggle/input/melbourne-housing-snapshot/melb_data.csv')\n\n# Select subset of predictors\ncols_to_use = ['Rooms', 'Distance', 'Landsize', 'BuildingArea', 'YearBuilt']\nX = data[cols_to_use]\n\n# Select target\ny = data.Price\n\n# Separate data into training and validation sets\nX_train, X_valid, y_train, y_valid = train_test_split(X, y)", "_____no_output_____" ], [ "# import XGBoost - extreme gradient boosting\nfrom xgboost import XGBRegressor\n\nmy_model = XGBRegressor()\nmy_model.fit(X_train, y_train)", "/home/raxit/anaconda3/lib/python3.7/site-packages/xgboost/core.py:587: FutureWarning: Series.base is deprecated and will be removed in a future version\n if getattr(data, 'base', None) is not None and \\\n" ], [ "from sklearn.metrics import mean_absolute_error\n\npredictions = my_model.predict(X_valid)\nmae = str(mean_absolute_error(y_valid, predictions))\nprint(\"Mean Absolute Error: \"+ mae)", "Mean Absolute Error: 269567.0099318851\n" ] ], [ [ "## Parameter Tuning\n\nThe various parameter that can be tuned to dramatically affect accuracy and training speed are\n\n- `n_estimators`\n \n- `early_stopping_rounds`\n\n- `learning_rate`\n\n- `n_jobs`", "_____no_output_____" ], [ "### n_estimators\n\nspecifies how mant times to go through the modelling cycle, thus equal to the number of models included in the ensemble", "_____no_output_____" ] ], [ [ "# n_estimators\nmy_model1 = XGBRegressor(n_estimators=500)\nmy_model1.fit(X_train, y_train)", "/home/raxit/anaconda3/lib/python3.7/site-packages/xgboost/core.py:587: FutureWarning: Series.base is deprecated and will be removed in a future version\n if getattr(data, 'base', None) is not None and \\\n" ], [ "predictions = my_model1.predict(X_valid)\nmae = str(mean_absolute_error(y_valid, predictions))\nprint(\"Mean Absolute Error: \"+ mae)", "Mean Absolute Error: 247134.31409011415\n" ] ], [ [ "### early_stopping_rounds\n\n`early_stopping_rounds` offers a way to automatically find the ideal value for n_estimators. Early stopping causes the model to stop iterating when the validation score stops improving, even if we aren't at the hard stop for `n_estimators`. \n\n\nSince random chance sometimes causes a single round where validation scores don't improve, you need to specify a number for how many rounds of straight deterioration to allow before stopping.\n\n\nWhen using `early_stopping_rounds`, you also need to set aside some data for calculating the validation scores - this is done by setting the `eval_set` parameter.", "_____no_output_____" ] ], [ [ "# early_stopping_rounds\nmy_model2 = XGBRegressor(n_estimators=500)\nmy_model2.fit(X_train, y_train,\n early_stopping_rounds=5,\n eval_set=[(X_valid, y_valid)],\n verbose=False)", "/home/raxit/anaconda3/lib/python3.7/site-packages/xgboost/core.py:587: FutureWarning: Series.base is deprecated and will be removed in a future version\n if getattr(data, 'base', None) is not None and \\\n" ], [ "predictions = my_model2.predict(X_valid)\nmae = str(mean_absolute_error(y_valid, predictions))\nprint(\"Mean Absolute Error: \"+ mae)", "Mean Absolute Error: 251656.57691458028\n" ] ], [ [ "### learning_rate\n\nStep size of gradient descent, multiplied to predictions of each ensembled model before they are added(ensembled).\n\nAllows us to set a higher value of `n_estimators` without overfitting.", "_____no_output_____" ] ], [ [ "# learning_rate\nmy_model3 = XGBRegressor(n_estimators=1000,\n learning_rate=0.2)\nmy_model3.fit(X_train, y_train,\n early_stopping_rounds=5,\n eval_set=[(X_valid, y_valid)],\n verbose=False)", "/home/raxit/anaconda3/lib/python3.7/site-packages/xgboost/core.py:587: FutureWarning: Series.base is deprecated and will be removed in a future version\n if getattr(data, 'base', None) is not None and \\\n" ], [ "predictions = my_model3.predict(X_valid)\nmae = str(mean_absolute_error(y_valid, predictions))\nprint(\"Mean Absolute Error: \"+ mae)", "Mean Absolute Error: 249474.64732488035\n" ] ], [ [ "### n_jobs\n\nImplements parallelism to reduce runtime while fitting and training model. Usually set to the number of cores and isn't particularly helpful on smaller models.", "_____no_output_____" ] ], [ [ "my_model4 = XGBRegressor(n_estimators=1000,\n learning_rate=0.2, n_jobs=6)\nmy_model4.fit(X_train, y_train, \n early_stopping_rounds=5, \n eval_set=[(X_valid, y_valid)],\n verbose=False)", "[22:27:44] WARNING: /home/conda/feedstock_root/build_artifacts/xgboost_1566327313563/work/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n" ], [ "predictions = my_model4.predict(X_valid)\nmae = str(mean_absolute_error(y_valid, predictions))\nprint(\"Mean Absolute Error: \"+ mae)", "Mean Absolute Error: 249474.64732488035\n" ], [ "def get_score(n_estimators, model):\n\n predictions = model.predict(X_valid)\n mae = str(mean_absolute_error(y_valid, predictions))\n return mae", "_____no_output_____" ], [ "models = [ my_model1, my_model2, my_model3, my_model4 ]\nmodel_results = {}\nfor model in models:\n results = {}\n for i in range(1,11):\n results[100*i] = get_score(100*i, model)\n print(results)\n model_results[model] = results\nprint('\\n') \nprint(model_results)", "{100: '247134.31409011415', 200: '247134.31409011415', 300: '247134.31409011415', 400: '247134.31409011415', 500: '247134.31409011415', 600: '247134.31409011415', 700: '247134.31409011415', 800: '247134.31409011415', 900: '247134.31409011415', 1000: '247134.31409011415'}\n{100: '251656.57691458028', 200: '251656.57691458028', 300: '251656.57691458028', 400: '251656.57691458028', 500: '251656.57691458028', 600: '251656.57691458028', 700: '251656.57691458028', 800: '251656.57691458028', 900: '251656.57691458028', 1000: '251656.57691458028'}\n{100: '249474.64732488035', 200: '249474.64732488035', 300: '249474.64732488035', 400: '249474.64732488035', 500: '249474.64732488035', 600: '249474.64732488035', 700: '249474.64732488035', 800: '249474.64732488035', 900: '249474.64732488035', 1000: '249474.64732488035'}\n{100: '249474.64732488035', 200: '249474.64732488035', 300: '249474.64732488035', 400: '249474.64732488035', 500: '249474.64732488035', 600: '249474.64732488035', 700: '249474.64732488035', 800: '249474.64732488035', 900: '249474.64732488035', 1000: '249474.64732488035'}\n\n\n{XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n colsample_bynode=1, colsample_bytree=1, gamma=0,\n importance_type='gain', learning_rate=0.1, max_delta_step=0,\n max_depth=3, min_child_weight=1, missing=None, n_estimators=500,\n n_jobs=1, nthread=None, objective='reg:linear', random_state=0,\n reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,\n silent=None, subsample=1, verbosity=1): {100: '247134.31409011415', 200: '247134.31409011415', 300: '247134.31409011415', 400: '247134.31409011415', 500: '247134.31409011415', 600: '247134.31409011415', 700: '247134.31409011415', 800: '247134.31409011415', 900: '247134.31409011415', 1000: '247134.31409011415'}, XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n colsample_bynode=1, colsample_bytree=1, gamma=0,\n importance_type='gain', learning_rate=0.1, max_delta_step=0,\n max_depth=3, min_child_weight=1, missing=None, n_estimators=500,\n n_jobs=1, nthread=None, objective='reg:linear', random_state=0,\n reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,\n silent=None, subsample=1, verbosity=1): {100: '251656.57691458028', 200: '251656.57691458028', 300: '251656.57691458028', 400: '251656.57691458028', 500: '251656.57691458028', 600: '251656.57691458028', 700: '251656.57691458028', 800: '251656.57691458028', 900: '251656.57691458028', 1000: '251656.57691458028'}, XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n colsample_bynode=1, colsample_bytree=1, gamma=0,\n importance_type='gain', learning_rate=0.2, max_delta_step=0,\n max_depth=3, min_child_weight=1, missing=None, n_estimators=1000,\n n_jobs=1, nthread=None, objective='reg:linear', random_state=0,\n reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,\n silent=None, subsample=1, verbosity=1): {100: '249474.64732488035', 200: '249474.64732488035', 300: '249474.64732488035', 400: '249474.64732488035', 500: '249474.64732488035', 600: '249474.64732488035', 700: '249474.64732488035', 800: '249474.64732488035', 900: '249474.64732488035', 1000: '249474.64732488035'}, XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n colsample_bynode=1, colsample_bytree=1, gamma=0,\n importance_type='gain', learning_rate=0.2, max_delta_step=0,\n max_depth=3, min_child_weight=1, missing=None, n_estimators=1000,\n n_jobs=6, nthread=None, objective='reg:linear', random_state=0,\n reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,\n silent=None, subsample=1, verbosity=1): {100: '249474.64732488035', 200: '249474.64732488035', 300: '249474.64732488035', 400: '249474.64732488035', 500: '249474.64732488035', 600: '249474.64732488035', 700: '249474.64732488035', 800: '249474.64732488035', 900: '249474.64732488035', 1000: '249474.64732488035'}}\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
4a4283f9fa7e57905e630f2eb119ebee7c74ba8e
388,385
ipynb
Jupyter Notebook
experiments/analysis.ipynb
Nagasaki45/floor-control
208e0c02167dbd67425ad1d43d120050cf04a6ee
[ "MIT" ]
1
2019-10-23T09:23:40.000Z
2019-10-23T09:23:40.000Z
experiments/analysis.ipynb
Nagasaki45/floor-control
208e0c02167dbd67425ad1d43d120050cf04a6ee
[ "MIT" ]
2
2019-10-22T14:42:13.000Z
2019-10-25T11:02:16.000Z
experiments/analysis.ipynb
Nagasaki45/floor-control
208e0c02167dbd67425ad1d43d120050cf04a6ee
[ "MIT" ]
null
null
null
294.677542
73,208
0.918877
[ [ [ "# Analysis for the floor control detection (FCD) model and competitor models\n\nThis notebook analyses the predictions of the FCD model and the competitor models discussed in the paper and show how they are compared over a few performance measurements. It also includes some stats about the dataset and the annotated floor properties, and an optimised FCD model for highest accuracy.", "_____no_output_____" ] ], [ [ "import itertools\nimport pathlib\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport pyjags\nfrom scipy import optimize as soptimize\n\nimport predict_fcd\nimport utils.annotated_floor\nimport utils.iteration\nimport utils.mcmc_plot\nimport utils.path", "_____no_output_____" ], [ "%load_ext autoreload\n\n%autoreload 2\n\nplt.style.use('ggplot')\nplt.rcParams.update({'axes.titlesize': 'large'})\nnp.random.seed(1234)", "_____no_output_____" ], [ "FEATURES_DIR = pathlib.Path('features')\nPREDICTIONS_DIR = pathlib.Path('predictions')\n\nANALYSIS_SAMPLE_RATE = 10\nSAMPLE_RATE = {\n 'fcd': 50,\n 'optimised_fcd': 50,\n 'lstm': 20,\n 'partial_lstm': 20,\n 'vad': 50,\n 'random': ANALYSIS_SAMPLE_RATE,\n}\nMODELS = list(SAMPLE_RATE.keys())\n\nDEFAULT_FCD_PARAMS = (0.35, 0.1)\nOPTIMISED_FCD_PARAMS = (1.78924915, 1.06722576) # Overriden by lengthy optimisation below\n\nCHAINS = 4\nITERATIONS = 10_000", "_____no_output_____" ] ], [ [ "# Utilities\n\nUtility functions and generator functions that are used throughout the code and use the constants declared above. More utilities are imported from the `util` package. These are considered more generic.", "_____no_output_____" ], [ "### General utilities", "_____no_output_____" ] ], [ [ "def array_to_series(x, name, sample_rate):\n '''\n Convert a numpy array to a pandas series\n with time index.\n '''\n x = x[::sample_rate // ANALYSIS_SAMPLE_RATE]\n return pd.Series(\n x,\n index=np.arange(len(x)) / ANALYSIS_SAMPLE_RATE,\n name=name,\n )", "_____no_output_____" ], [ "def utterances_to_floor(utterances_df):\n '''\n Calculate the floor timeseries from a dataframe\n of utterances (every row has start_time, end_time,\n and participant).\n '''\n return array_to_series(\n list(\n utils.annotated_floor.gen(\n utterances_df,\n sample_rate=ANALYSIS_SAMPLE_RATE,\n )\n ),\n name='floor',\n sample_rate=ANALYSIS_SAMPLE_RATE,\n )", "_____no_output_____" ] ], [ [ "### Random model utilities", "_____no_output_____" ] ], [ [ "def _generate_random_model_intervals(average_floor_duration):\n floor_holder = np.random.randint(2)\n previous_timestamp = 0\n while True:\n samples = np.random.exponential(average_floor_duration, 100)\n timestamps = samples.cumsum() + previous_timestamp\n for timestamp in timestamps:\n yield {\n 'start_time': previous_timestamp,\n 'end_time': timestamp,\n 'participant': floor_holder,\n }\n floor_holder = (floor_holder * -1) + 1\n previous_timestamp = timestamp\n\n\ndef calculate_random_model(average_floor_duration, part_duration):\n '''\n Calculate a random floor array with turns duration distributin\n exponentially with `average_floor_duration` as mean.\n '''\n gen = _generate_random_model_intervals(average_floor_duration)\n gen = itertools.takewhile(lambda i: i['start_time'] < part_duration, gen)\n return list(\n utils.iteration.intervals_to_values_gen(\n gen,\n sample_rate=ANALYSIS_SAMPLE_RATE,\n key='participant',\n )\n )", "_____no_output_____" ] ], [ [ "### Dataset stats utilities", "_____no_output_____" ] ], [ [ "def dataset_stats_gen():\n '''\n Calculate basic stats about the annotated floor.\n '''\n for part in utils.path.session_parts_gen(train_set=True, test_set=True):\n utterances_df = pd.read_csv(FEATURES_DIR / 'utterances' / f'{part}.csv')\n floor_intervals = list(utils.annotated_floor.utterances_to_floor_intervals_gen(utterances_df))\n floor = utterances_to_floor(utterances_df)\n yield {\n 'competition_for_floor': np.isnan(floor).mean(),\n 'average_floor_duration': np.mean([i['end_time'] - i['start_time'] for i in floor_intervals]),\n 'average_part_duration': utterances_df['end_time'].max(),\n }", "_____no_output_____" ] ], [ [ "### Performance measurment generator functions", "_____no_output_____" ] ], [ [ "def accuracy(model, floor):\n '''\n Every 10 seconds, if defined floor (no competition nor silence)\n yields 1 if the model and the floor agrees, 0 otherwise. 10 seconds\n jumps are used to make sure the samples are independent.\n '''\n jump = 10 * ANALYSIS_SAMPLE_RATE\n both = pd.concat([model, floor], axis=1)[::jump].dropna()\n yield from (both.iloc[:, 0] == both.iloc[:, 1]).astype(int)", "_____no_output_____" ], [ "def backchannels(model, utterances_df):\n '''\n For each backchannel yield 1 if the model report a floor\n for the partner, 0 otherwise.\n '''\n backchannels = utterances_df[utterances_df['backchannel']]\n for _, bc in backchannels.iterrows():\n bc_timestamp = bc['start_time']\n prediction_at_bc = model[bc_timestamp:].values[0]\n if prediction_at_bc:\n yield int(prediction_at_bc != bc['participant'])", "_____no_output_____" ], [ "def _floor_holder_changes(array):\n array = array[~np.isnan(array)]\n items = utils.iteration.dedup(array)\n return len(list(items)) - 1 # number of changes is number of values minus 1\n\n\ndef stability(model, floor):\n '''\n Ratio of actual floor changes vs. predicted floor changes.\n '''\n annotated_floor_changes = _floor_holder_changes(floor)\n model_floor_changes = _floor_holder_changes(model)\n yield annotated_floor_changes / model_floor_changes", "_____no_output_____" ], [ "def lag(model, floor):\n '''\n Yield positive lags in seconds.\n '''\n model_change = pd.Series(dict(utils.iteration.dedup(model.dropna().iteritems(), key=lambda x: x[1])))\n floor_change = pd.Series(dict(utils.iteration.dedup(floor.dropna().iteritems(), key=lambda x: x[1])))\n\n visited_timestamps = set()\n for timestamp, prediction in model_change.iteritems():\n previous_floors = floor_change[:timestamp]\n if not previous_floors.empty:\n current_floor_timestamp = previous_floors.index[-1]\n current_floor_value = previous_floors.values[-1]\n if (current_floor_value == prediction and current_floor_timestamp not in visited_timestamps):\n yield (timestamp - current_floor_timestamp)\n visited_timestamps.add(current_floor_timestamp)", "_____no_output_____" ] ], [ [ "### Models' performance (stats) collection utilities", "_____no_output_____" ] ], [ [ "def _part_models_stats_gen(part, average_floor_duration):\n utterances_df = pd.read_csv(FEATURES_DIR / 'utterances' / f'{part}.csv')\n floor = utterances_to_floor(utterances_df)\n rms = np.load(FEATURES_DIR / 'FCD' / f'{part}.npy')\n\n models = {\n 'fcd': np.load(PREDICTIONS_DIR / 'FCD' / f'{part}.npy'),\n 'optimised_fcd': list(predict_fcd.gen_from_rms(rms, *OPTIMISED_FCD_PARAMS)),\n 'lstm': np.load(PREDICTIONS_DIR / 'LSTM' / f'full-{part}.npy'),\n 'partial_lstm': np.load(PREDICTIONS_DIR / 'LSTM' / f'partial-{part}.npy'),\n 'vad': np.load(PREDICTIONS_DIR / 'VAD' / f'{part}.npy'),\n 'random': calculate_random_model(\n average_floor_duration,\n part_duration=floor.index[-1],\n ),\n }\n\n models_df = pd.concat(\n [array_to_series(x, name=n, sample_rate=SAMPLE_RATE[n]) for n, x in models.items()],\n axis=1,\n )\n\n measurement_functions_and_args = {\n backchannels: utterances_df,\n **{f: floor for f in [accuracy, stability, lag]},\n }\n\n for model in models:\n for f, arg in measurement_functions_and_args.items():\n for value in f(models_df[model], arg):\n yield {\n 'part': part,\n 'model': model,\n 'measurement': f.__name__,\n 'value': value,\n }\n\n\ndef models_stats_gen(average_floor_duration):\n '''\n Calculate the performance measure for each model accross the\n test-set.\n '''\n for part in utils.path.session_parts_gen(train_set=False, test_set=True):\n yield from _part_models_stats_gen(part, average_floor_duration)", "_____no_output_____" ] ], [ [ "### Bayesian analysis utilities", "_____no_output_____" ] ], [ [ "def gamma_template(mode, sd):\n '''\n Return a string template with shape and rate from mode and sd.\n '''\n rate = f'({mode} + sqrt({mode} ^ 2 + 4 * {sd} ^ 2)) / (2 * {sd} ^ 2)'\n shape = f'1 + {mode} * {rate}'\n return f'{shape}, {rate}'\n\n\ndef beta_template(mode, k):\n '''\n Return a string template with a and b from mode and concentration.\n '''\n a = f'{mode} * ({k} - 2) + 1'\n b = f'(1 - {mode}) * ({k} - 2) + 1'\n return f'{a}, {b}'\n\n\ndef run_model(code, data):\n '''\n Create and sample a JAGS model.\n '''\n model = pyjags.Model(code=code, data=data, chains=CHAINS)\n return model.sample(ITERATIONS, vars=['mode'])\n\n\ndef mode_comparison(trace, models, diag_xlim, comp_xlim):\n utils.mcmc_plot.param_comparison(\n trace,\n 'mode',\n comparison=[MODELS.index(m) for m in models],\n names=models,\n diag_xlim=diag_xlim,\n comp_xlim=comp_xlim,\n )\n \n\ndef compare_two(models, traces, xlim):\n _, axes = plt.subplots(ncols=len(traces), figsize=(8, 2))\n for ax, (measurement, trace) in zip(axes, traces.items()):\n m1, m2 = [MODELS.index(m) for m in models]\n ax.set(title=measurement)\n ax.axvline(0, linestyle='--', c='grey')\n utils.mcmc_plot.dist(\n trace['mode'][m1].reshape(-1) - trace['mode'][m2].reshape(-1),\n histplot_kwargs={'binrange': xlim},\n ax=ax,\n )\n\n\ndef _hdi_as_dict(model, samples):\n return {\n 'model': model,\n 'hdi_start': np.percentile(samples, 2.5),\n 'hdi_end': np.percentile(samples, 97.5),\n }\n\n\ndef hdi_summary(models, trace):\n for m in models:\n samples = trace['mode'][MODELS.index(m)].reshape(-1)\n yield _hdi_as_dict(m, samples)\n for m1, m2 in itertools.combinations(models, 2):\n samples_m1 = trace['mode'][MODELS.index(m1)].reshape(-1)\n samples_m2 = trace['mode'][MODELS.index(m2)].reshape(-1)\n diff = samples_m1 - samples_m2\n yield _hdi_as_dict(f'{m1} - {m2}', diff)", "_____no_output_____" ] ], [ [ "# Analysis starts here!", "_____no_output_____" ], [ "## Dataset stats", "_____no_output_____" ] ], [ [ "dataset_stats_df = pd.DataFrame(dataset_stats_gen())\ndataset_stats_df.describe()", "_____no_output_____" ], [ "# Keep the average floor duration for later, for the random model\naverage_floor_duration = dataset_stats_df['average_floor_duration'].mean()", "_____no_output_____" ] ], [ [ "## Optimising FCD parameters for accuracy\n\nThis is done on the train set.", "_____no_output_____" ] ], [ [ "optimisation_data = []\nfor part in utils.path.session_parts_gen(train_set=True, test_set=False):\n utterances_df = pd.read_csv(FEATURES_DIR / 'utterances' / f'{part}.csv')\n floor = utterances_to_floor(utterances_df)\n rms = np.load(FEATURES_DIR / 'FCD' / f'{part}.npy')\n optimisation_data.append((rms, floor))", "_____no_output_____" ], [ "def get_negative_accuracy_from_model(params):\n\n accuracies = []\n for rms, floor in optimisation_data:\n fcd_gen = predict_fcd.gen_from_rms(rms, *params)\n fcd = array_to_series(list(fcd_gen), name='fcd', sample_rate=SAMPLE_RATE['fcd'])\n accuracies.append(np.mean(list(accuracy(fcd, floor))))\n \n return -np.mean(accuracies)", "_____no_output_____" ] ], [ [ "**Note!** This cell takes a while to run. It is commented out as the entire notebook can be executed without it. The default optimised parameters (declared at the top of the notebook) are used in that case.", "_____no_output_____" ] ], [ [ "# %%time\n\n# res = soptimize.basinhopping(\n# get_negative_accuracy_from_model,\n# DEFAULT_FCD_PARAMS,\n# seed=1234,\n# )\n# OPTIMISED_FCD_PARAMS = res.x\n# res", "_____no_output_____" ] ], [ [ "**Example of the output of the cell above for reference**\n```\nCPU times: user 1h 7min 23s, sys: 24.2 s, total: 1h 7min 47s\nWall time: 1h 7min 40s\n\n fun: -0.890908193538182\n lowest_optimization_result: fun: -0.890908193538182\n hess_inv: array([[1, 0],\n [0, 1]])\n jac: array([0., 0.])\n message: 'Optimization terminated successfully.'\n nfev: 3\n nit: 0\n njev: 1\n status: 0\n success: True\n x: array([1.78924915, 1.06722576])\n message: ['requested number of basinhopping iterations completed successfully']\n minimization_failures: 0\n nfev: 303\n nit: 100\n njev: 101\n x: array([1.78924915, 1.06722576])\n```", "_____no_output_____" ], [ "## The average of the models' performance on each measurement", "_____no_output_____" ] ], [ [ "models_stats_df = pd.DataFrame(models_stats_gen(average_floor_duration))\nmodels_stats_df['model'] = pd.Categorical(\n models_stats_df['model'],\n categories=MODELS,\n ordered=True,\n)\nfor c in ['part', 'measurement']:\n models_stats_df[c] = models_stats_df[c].astype('category')", "_____no_output_____" ], [ "(\n models_stats_df\n # Average within parts\n .groupby(['model', 'measurement', 'part'])\n .mean()\n # Average accross parts\n .reset_index()\n .pivot_table(index='model', columns='measurement', values='value')\n)", "_____no_output_____" ] ], [ [ "## Bayesian analysis of differences between the models\n\nHere we estimate the mode of the accuracy, backchannels classification, stability, and lag, for each model. The Bayesian method provides a direct way to estimate the differences between the modes.", "_____no_output_____" ] ], [ [ "group_by_measurement = models_stats_df.groupby('measurement')", "_____no_output_____" ] ], [ [ "### Accuracy", "_____no_output_____" ] ], [ [ "hierarchical_beta_code = f\"\"\"\nmodel {{\n for (m in 1:n_models) {{\n for (p in 1:n_parts) {{\n correct[m, p] ~ dbin(part_mode[m, p], attempts[m, p])\n part_mode[m, p] ~ dbeta({beta_template('mode[m]', 'concentration[m]')})\n }}\n \n mode[m] ~ dunif(0, 1)\n concentration[m] = concentration_minus_two[m] + 2\n concentration_minus_two[m] ~ dgamma({gamma_template(20, 20)})\n }}\n}}\n\"\"\"", "_____no_output_____" ], [ "_df = group_by_measurement.get_group('accuracy')\naccuracy_data = {\n 'n_parts': len(_df['part'].unique()),\n 'n_models': len(_df['model'].unique()),\n 'correct': _df.pivot_table(index='model', columns='part', values='value', aggfunc='sum'),\n 'attempts': _df.pivot_table(index='model', columns='part', values='value', aggfunc='count'),\n}", "_____no_output_____" ], [ "accuracy_trace = run_model(code=hierarchical_beta_code, data=accuracy_data)", "adapting: iterations 4000 of 4000, elapsed 0:00:01, remaining 0:00:00\nsampling: iterations 40000 of 40000, elapsed 0:00:06, remaining 0:00:00\nsampling: iterations 40000 of 40000, elapsed 0:00:06, remaining 0:00:00\n" ], [ "mode_comparison(accuracy_trace, ['fcd', 'lstm', 'random'], diag_xlim=(0, 1), comp_xlim=(-0.6, 0.6))", "_____no_output_____" ] ], [ [ "### Backchannels categorisation", "_____no_output_____" ] ], [ [ "_df = group_by_measurement.get_group('backchannels')\nbc_data = {\n 'n_parts': len(_df['part'].unique()),\n 'n_models': len(_df['model'].unique()),\n 'correct': _df.pivot_table(index='model', columns='part', values='value', aggfunc='sum'),\n 'attempts': _df.pivot_table(index='model', columns='part', values='value', aggfunc='count'),\n}", "_____no_output_____" ], [ "bc_trace = run_model(code=hierarchical_beta_code, data=bc_data)", "adapting: iterations 4000 of 4000, elapsed 0:00:01, remaining 0:00:00\nsampling: iterations 40000 of 40000, elapsed 0:00:07, remaining 0:00:00\nsampling: iterations 40000 of 40000, elapsed 0:00:07, remaining 0:00:00\n" ], [ "mode_comparison(bc_trace, ['fcd', 'lstm', 'random'], diag_xlim=(0, 1), comp_xlim=(-0.6, 0.6))", "_____no_output_____" ] ], [ [ "### Stability", "_____no_output_____" ] ], [ [ "stability_code = f\"\"\"\nmodel {{\n for (m in 1:n_models) {{\n for (p in 1:n_parts) {{\n stability[m, p] ~ dgamma({gamma_template('mode[m]', 'sd[m]')})\n }}\n \n mode[m] ~ dgamma({gamma_template(1, 1)})\n sd[m] ~ dgamma({gamma_template(1, 1)})\n }}\n}}\n\"\"\"", "_____no_output_____" ], [ "_df = group_by_measurement.get_group('stability')\nstability_data = {\n 'n_parts': len(_df['part'].unique()),\n 'n_models': len(_df['model'].unique()),\n 'stability': _df.pivot(index='model', columns='part', values='value'),\n}", "_____no_output_____" ], [ "stability_trace = run_model(code=stability_code, data=stability_data)", "adapting: iterations 4000 of 4000, elapsed 0:00:00, remaining 0:00:00\nsampling: iterations 40000 of 40000, elapsed 0:00:03, remaining 0:00:00\n" ], [ "mode_comparison(stability_trace, ['fcd', 'lstm', 'random'], diag_xlim=(0, 1.25), comp_xlim=(-1.2, 1.2))", "_____no_output_____" ] ], [ [ "### Lag", "_____no_output_____" ] ], [ [ "lag_code = f\"\"\"\nmodel {{\n for (i in 1:n_lags) {{\n lag[i] ~ dexp(1 / part_mean[models[i], part[i]])\n }}\n for (i in 1:n_models) {{\n for (j in 1:n_parts) {{\n part_mean[i, j] ~ dgamma({gamma_template('mode[i]', 'sd[i]')})\n }}\n \n mode[i] ~ dgamma({gamma_template(0.5, 1)})\n sd[i] ~ dgamma({gamma_template(1, 1)})\n }}\n}}\n\"\"\"", "_____no_output_____" ], [ "_df = group_by_measurement.get_group('lag')\nlag_data = {\n 'n_parts': len(_df['part'].unique()),\n 'n_models': len(_df['model'].unique()),\n 'n_lags': len(_df),\n 'lag': _df['value'],\n 'models': _df['model'].cat.codes + 1,\n 'part': _df['part'].cat.codes + 1,\n}", "_____no_output_____" ], [ "lag_trace = run_model(code=lag_code, data=lag_data)", "adapting: iterations 4000 of 4000, elapsed 0:00:03, remaining 0:00:00\nsampling: iterations 12672 of 40000, elapsed 0:00:09, remaining 0:00:20\nsampling: iterations 19512 of 40000, elapsed 0:00:14, remaining 0:00:15\nsampling: iterations 26348 of 40000, elapsed 0:00:20, remaining 0:00:10\nsampling: iterations 32952 of 40000, elapsed 0:00:26, remaining 0:00:06\nsampling: iterations 39268 of 40000, elapsed 0:00:32, remaining 0:00:01\nsampling: iterations 40000 of 40000, elapsed 0:00:32, remaining 0:00:00\n" ], [ "mode_comparison(lag_trace, ['fcd', 'lstm', 'random'], diag_xlim=(0, 2.1), comp_xlim=(-2.2, 2.2))", "_____no_output_____" ] ], [ [ "### FCD with default params vs. optimised FCD", "_____no_output_____" ] ], [ [ "traces = {\n 'accuracy': accuracy_trace,\n 'backchannels': bc_trace,\n 'stability': stability_trace,\n 'lag': lag_trace,\n}", "_____no_output_____" ], [ "compare_two(['fcd', 'optimised_fcd'], traces, xlim=(-0.75, 0.75))", "_____no_output_____" ] ], [ [ "### LSTM vs. partial-LSTM", "_____no_output_____" ] ], [ [ "compare_two(['lstm', 'partial_lstm'], traces, xlim=(-0.75, 0.75))", "_____no_output_____" ] ], [ [ "### Optimised FCD vs. LSTM\n\nThis is marely to see if the lag of the optimised FCD is better.", "_____no_output_____" ] ], [ [ "compare_two(['optimised_fcd', 'lstm'], traces, xlim=(-0.75, 0.75))", "_____no_output_____" ] ], [ [ "### HDIs summary", "_____no_output_____" ] ], [ [ "models = ['fcd', 'lstm', 'random']\n\ncomp_values = [0.5, 0.5, 1, average_floor_duration / 2]\n\nfig, axes = plt.subplots(nrows=len(traces), figsize=(8, 8), sharex=True)\n\nfor ax, (measurement, trace), comp_value in zip(axes, traces.items(), comp_values):\n yticks = {}\n ax.axvline(0, linestyle='--', c='grey')\n if comp_value:\n ax.axvline(comp_value, linestyle='dotted', c='grey')\n for i, row in enumerate(hdi_summary(models, trace)):\n ax.plot((row['hdi_start'], row['hdi_end']), (-i, -i), linewidth=4, c='k')\n for tail, alignment in zip(['hdi_start', 'hdi_end'], ['right', 'left']):\n s = format(row[tail], '.2f').replace('-0', '-').lstrip('0')\n ax.text(row[tail], -i + 0.1, s, horizontalalignment=alignment)\n yticks[-i] = row['model']\n ax.set(title=measurement)\n ax.set_yticks(list(yticks.keys()))\n ax.set_yticklabels(list(yticks.values()))\nfig.tight_layout()\nfig.savefig('graphics/hdis.svg')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a4284deabc289f6c339bc02e1be59f000bee8ba
4,515
ipynb
Jupyter Notebook
tutorials/object_detection_evaluation.ipynb
argoai/argoverse2-api
c37b85af96ca0188530032d573255bb195a722fd
[ "MIT" ]
26
2022-03-17T13:09:18.000Z
2022-03-21T15:06:22.000Z
tutorials/object_detection_evaluation.ipynb
argoai/av2-api
c37b85af96ca0188530032d573255bb195a722fd
[ "MIT" ]
9
2022-03-21T16:03:54.000Z
2022-03-29T02:28:56.000Z
tutorials/object_detection_evaluation.ipynb
argoai/av2-api
c37b85af96ca0188530032d573255bb195a722fd
[ "MIT" ]
6
2022-03-22T08:16:29.000Z
2022-03-30T21:11:33.000Z
31.354167
181
0.619712
[ [ [ "# 3D Object Detection Evaluation Tutorial\n\nWelcome to the 3D object detection evaluation tutorial! We'll walk through the steps to submit your detections to the competition server.", "_____no_output_____" ] ], [ [ "from av2.evaluation.detection.eval import evaluate\nfrom av2.evaluation.detection.utils import DetectionCfg\nfrom pathlib import Path\nfrom av2.utils.io import read_feather, read_all_annotations", "_____no_output_____" ] ], [ [ "### Constructing the evaluation configuration\n\nThe `DetectionCfg` class stores the configuration for the 3D object detection challenge.\n\n- During evaluation, we remove _all_ cuboids which are not within the region-of-interest (ROI) which spatially is a 5 meter dilation of the drivable area isocontour. \n\n- **NOTE**: If you would like to _locally_ enable this behavior, you **must** pass in the directory to sensor dataset (to build the raster maps from the included vector maps).", "_____no_output_____" ] ], [ [ "dataset_dir = Path.home() / \"data\" / \"datasets\" / \"av2\" / \"sensor\" # Path to your AV2 sensor dataset directory.\ncompetition_cfg = DetectionCfg(dataset_dir=dataset_dir) # Defaults to competition parameters.", "_____no_output_____" ], [ "split = \"val\"\ngts = read_all_annotations(dataset_dir=dataset_dir, split=split) # Contains all annotations in a particular split.\ndisplay(gts)", "_____no_output_____" ] ], [ [ "## Preparing detections for submission.\n\nThe evaluation expects the following 14 fields within a `pandas.DataFrame`:\n\n- `tx_m`: x-component of the object translation in the egovehicle reference frame.\n- `ty_m`: y-component of the object translation in the egovehicle reference frame.\n- `tz_m`: z-component of the object translation in the egovehicle reference frame.\n- `length_m`: Object extent along the x-axis in meters.\n- `width_m`: Object extent along the y-axis in meters.\n- `height_m`: Object extent along the z-axis in meters.\n- `qw`: Real quaternion coefficient.\n- `qx`: First quaternion coefficient.\n- `qy`: Second quaternion coefficient.\n- `qz`: Third quaternion coefficient.\n- `score`: Object confidence.\n- `log_id`: Log id associated with the detection.\n- `timestamp_ns`: Timestamp associated with the detection.\n- `category`: Object category.\n\nAdditional details can be found in [SUBMISSION_FORMAT.md](../src/av2/evaluation/detection/SUBMISSION_FORMAT.md).", "_____no_output_____" ] ], [ [ "# If you've already aggregated your detections into one file.\ndts_path = Path(\"detections.feather\")\ndts = read_feather(dts_path)\n\ndts, gts, metrics = evaluate(dts, gts, cfg=competition_cfg) # Evaluate instances.", "_____no_output_____" ], [ "display(metrics)", "_____no_output_____" ] ], [ [ "Finally, if you would like to submit to the evaluation server, you just need to export your detections into a `.feather` file. This can be done by:\n\n```python\ndts.to_feather(\"detections.feather\")\n```", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
4a42a1a8b6d7f031dd65946d62548e00d6b0bf87
593,920
ipynb
Jupyter Notebook
notebooks/cordex-preprocessing.ipynb
ludwiglierhammer/py-cordex
0896017ac3be444e62600921cf586b6445a2b108
[ "MIT" ]
null
null
null
notebooks/cordex-preprocessing.ipynb
ludwiglierhammer/py-cordex
0896017ac3be444e62600921cf586b6445a2b108
[ "MIT" ]
null
null
null
notebooks/cordex-preprocessing.ipynb
ludwiglierhammer/py-cordex
0896017ac3be444e62600921cf586b6445a2b108
[ "MIT" ]
null
null
null
55.688701
6,818
0.517856
[ [ [ "# Cordex preprocessing", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import xclim\nxclim.__version__", "_____no_output_____" ], [ "import os\nimport intake\nimport xarray as xr\nimport numpy as np\nfrom tqdm.notebook import tqdm\n\nos.environ[\"HDF5_USE_FILE_LOCKING\"] = \"FALSE\"\n\nxr.set_options(keep_attrs=True)\nprint(np.__version__)\nprint(xr.__version__)\n\nimport intake_esm\nprint(intake_esm.__version__)\n\nimport cordex as cx\nfrom cordex.preprocessing import preprocessing as preproc", "1.20.1\n0.20.1\n2021.1.15\n" ], [ "eur11 = cx.cordex_domain('EUR-11')", "_____no_output_____" ], [ "!echo $HDF5_USE_FILE_LOCKING", "FALSE\n" ], [ "from dask.distributed import Client, progress\nclient = Client()", "/work/ch0636/g300046/conda_envs/cmip6-xclim/lib/python3.9/site-packages/distributed/node.py:151: UserWarning: Port 8787 is already in use.\nPerhaps you already have a cluster running?\nHosting the HTTP server on port 39975 instead\n warnings.warn(\n" ], [ "client", "_____no_output_____" ], [ "#client.get_versions(check=True)", "_____no_output_____" ] ], [ [ "## Data access", "_____no_output_____" ] ], [ [ "url = \"/work/kd0956/Catalogs/mistral-cordex.json\" \ncat = intake.open_esm_datastore(url)\ncat", "_____no_output_____" ], [ "models = ['CLMcom-CCLM4-8-17',\n 'CLMcom-BTU-CCLM4-8-17',\n 'CLMcom-ETH-COSMO-crCLIM-v1-1',\n 'CNRM-ALADIN53',\n 'CNRM-ALADIN63',\n 'DMI-HIRHAM5',\n 'GERICS-REMO2015',\n 'ICTP-RegCM4-6', # regcm seems to make trouble when opening with to_dataset_dict..\n 'IPSL-INERIS-WRF331F',\n 'KNMI-RACMO22E',\n 'MOHC-HadREM3-GA7-05',\n 'MPI-CSC-REMO2009',\n 'RMIB-UGent-ALARO-0',\n 'SMHI-RCA4', \n 'UHOH-WRF361H']", "_____no_output_____" ], [ "models = ['UHOH-WRF361H']\n#models = 'ICTP-RegCM4-6'", "_____no_output_____" ], [ "#institute_ids = ['CLMcom', 'CLMcom-BTU', 'CLMcom-ETH', 'CNRM', 'DMI', 'GERICS',\n# 'IPSL-INERIS', 'KNMI', 'MOHC', 'MPI-CSC', 'RMIB-UGent', 'SMHI',\n# 'UHOH', 'z_GERICS_KB']\n# there are some missleading entries in the catalog, so we state all institute ids explicitly here\n# to avoid wrong ones...\ninstitute_ids = ['CLMcom', 'CLMcom-BTU', 'CLMcom-ETH', 'CNRM', 'DMI', 'GERICS',\n 'ICTP', 'IPSL-INERIS', 'KNMI', 'MOHC', 'MPI-CSC', 'RMIB-UGent',\n 'SMHI', 'UHOH', 'z_GERICS_KB']", "_____no_output_____" ], [ "scens = ['rcp26', 'rcp45', 'rcp85']\nexpts = ['historical'] + scens\nattrs = {'variable_id': ['tas'] , 'frequency': 'mon', 'CORDEX_domain': 'EUR-11', \n 'experiment_id': expts, 'rcm_version_id': 'v1', 'model_id': models,}\n # 'institute_id': institute_ids}", "_____no_output_____" ], [ "import pandas as pd\npd.set_option('display.max_rows', None)\nselection = cat.search(**attrs)\nselection.df.groupby(['model_id', 'institute_id', 'experiment_id', 'driving_model_id', 'member', 'frequency', 'rcm_version_id', 'version'])['variable_id'].unique().apply(list).to_frame()", "_____no_output_____" ], [ "selection.df.institute_id.unique()", "_____no_output_____" ], [ "#selection['EUR-11.MIROC-MIROC5.UHOH.UHOH-WRF361H.rcp85.mon'].df", "_____no_output_____" ], [ "preproc.rename_cordex", "_____no_output_____" ], [ "dset_dict = selection.to_dataset_dict(cdf_kwargs = {\"use_cftime\": True, \"chunks\": {}}, preprocess=preproc.rename_cordex)", "\n--> The keys in the returned dictionary of datasets are constructed as follows:\n\t'CORDEX_domain.driving_model_id.institute_id.model_id.experiment_id.frequency'\n" ], [ "dset_dict_flatten = preproc.member_id_to_dset_id(dset_dict)", "_____no_output_____" ], [ "dset_dict.keys()", "_____no_output_____" ], [ "sort = preproc.sort_ds_dict_by_attr(dset_dict, 'model_id')", "_____no_output_____" ], [ "for ds_id, ds in dset_dict.items():\n print(ds_id)\n print(ds.tas.dims)", "EUR-11.MPI-M-MPI-ESM-LR.UHOH.UHOH-WRF361H.historical.mon\n('member', 'time', 'rlat', 'rlon')\nEUR-11.ICHEC-EC-EARTH.UHOH.UHOH-WRF361H.historical.mon\n('member', 'time', 'rlat', 'rlon')\nEUR-11.MOHC-HadGEM2-ES.UHOH.UHOH-WRF361H.historical.mon\n('member', 'time', 'rlat', 'rlon')\nEUR-11.MIROC-MIROC5.UHOH.UHOH-WRF361H.historical.mon\n('member', 'time', 'height', 'rlat', 'rlon')\nEUR-11.MOHC-HadGEM2-ES.UHOH.UHOH-WRF361H.rcp85.mon\n('member', 'time', 'rlat', 'rlon')\nEUR-11.MIROC-MIROC5.UHOH.UHOH-WRF361H.rcp85.mon\n('member', 'time', 'height', 'lat', 'lon')\nEUR-11.MPI-M-MPI-ESM-LR.UHOH.UHOH-WRF361H.rcp26.mon\n('member', 'time', 'rlat', 'rlon')\nEUR-11.MPI-M-MPI-ESM-LR.UHOH.UHOH-WRF361H.rcp85.mon\n('member', 'time', 'rlat', 'rlon')\nEUR-11.ICHEC-EC-EARTH.UHOH.UHOH-WRF361H.rcp85.mon\n('member', 'time', 'rlat', 'rlon')\n" ], [ "dset_sorted = preproc.sort_ds_dict_by_attr(dset_dict_flatten, 'experiment_id')", "_____no_output_____" ], [ "dset_sorted['rcp45'].keys()", "_____no_output_____" ], [ "ds_list = []\nfor ds in dset_sorted['rcp85'].values():\n ds = preproc.replace_rlon_rlat(ds)\n ds = preproc.replace_lon_lat(ds)\n ds_list.append(ds)", "_____no_output_____" ], [ "from xclim.ensembles import create_ensemble", "_____no_output_____" ], [ "rcp85 = create_ensemble(ds_list, resample_freq='MS')", "/work/ch0636/g300046/conda_envs/cmip6-xclim/lib/python3.9/site-packages/xarray/core/indexing.py:1369: PerformanceWarning: Slicing is producing a large chunk. To accept the large\nchunk and silence this warning, set the option\n >>> with dask.config.set(**{'array.slicing.split_large_chunks': False}):\n ... array[indexer]\n\nTo avoid creating the large chunks, set the option\n >>> with dask.config.set(**{'array.slicing.split_large_chunks': True}):\n ... array[indexer]\n return self.array[key]\n/work/ch0636/g300046/conda_envs/cmip6-xclim/lib/python3.9/site-packages/xarray/core/indexing.py:1369: PerformanceWarning: Slicing is producing a large chunk. To accept the large\nchunk and silence this warning, set the option\n >>> with dask.config.set(**{'array.slicing.split_large_chunks': False}):\n ... array[indexer]\n\nTo avoid creating the large chunks, set the option\n >>> with dask.config.set(**{'array.slicing.split_large_chunks': True}):\n ... array[indexer]\n return self.array[key]\n/work/ch0636/g300046/conda_envs/cmip6-xclim/lib/python3.9/site-packages/xarray/core/indexing.py:1369: PerformanceWarning: Slicing is producing a large chunk. To accept the large\nchunk and silence this warning, set the option\n >>> with dask.config.set(**{'array.slicing.split_large_chunks': False}):\n ... array[indexer]\n\nTo avoid creating the large chunks, set the option\n >>> with dask.config.set(**{'array.slicing.split_large_chunks': True}):\n ... array[indexer]\n return self.array[key]\n/work/ch0636/g300046/conda_envs/cmip6-xclim/lib/python3.9/site-packages/xarray/core/indexing.py:1369: PerformanceWarning: Slicing is producing a large chunk. To accept the large\nchunk and silence this warning, set the option\n >>> with dask.config.set(**{'array.slicing.split_large_chunks': False}):\n ... array[indexer]\n\nTo avoid creating the large chunks, set the option\n >>> with dask.config.set(**{'array.slicing.split_large_chunks': True}):\n ... array[indexer]\n return self.array[key]\n" ], [ "for key, ds in dset_dict.items():\n print(key)\n print(list(ds.dims))\n ds = preproc.rename_cordex(ds)\n preproc.check_domain(ds)", "_____no_output_____" ], [ "ds = dset_dict['EUR-11.NCC-NorESM1-M.z_GERICS_KB.GERICS-REMO2015.historical.mon']\nds = preproc.rename_cordex(ds)\nds = preproc.promote_empty_dims(ds)\nds = preproc.replace_rlon_rlat(ds)\nds", "_____no_output_____" ], [ "ds = dset_dict['EUR-11.NCC-NorESM1-M.CNRM.CNRM-ALADIN63.historical.mon']\nds\npreproc.get_grid_mapping(ds)", "_____no_output_____" ], [ "ds = dset_dict['EUR-11.MPI-M-MPI-ESM-LR.GERICS.GERICS-REMO2015.historical.mon']\nds\npreproc.get_grid_mapping(ds)", "_____no_output_____" ], [ "preproc.get_grid_mapping(ds)", "_____no_output_____" ], [ "preproc.remap_lambert_conformal(ds)", "/work/ch0636/g300046/conda_envs/cmip6-processing/lib/python3.9/site-packages/xesmf/frontend.py:464: FutureWarning: ``output_sizes`` should be given in the ``dask_gufunc_kwargs`` parameter. It will be removed as direct parameter in a future version.\n dr_out = xr.apply_ufunc(\n" ], [ "preproc.regridder is None", "_____no_output_____" ], [ "preproc._init_regridder(ds, eur11)", "_____no_output_____" ], [ "preproc.regridder", "_____no_output_____" ], [ "preproc.cordex_dataset_id(ds)", "_____no_output_____" ], [ "ds = dset_dict['EUR-11.IPSL-IPSL-CM5A-LR.GERICS.GERICS-REMO2015.historical.mon']\nds", "_____no_output_____" ] ], [ [ "We move the member id from a coordinate to the dsets key so that all datasets have the same structure...", "_____no_output_____" ], [ "we concentrate on rotated pole grids for now. there are just a handful of lambert conformal projection models that we can deal later with.", "_____no_output_____" ] ], [ [ "from cordex.preprocessing import preprocessing as preproc", "_____no_output_____" ], [ "dset_dict['EUR-11.ICHEC-EC-EARTH.DMI.DMI-HIRHAM5.historical.mon']", "_____no_output_____" ] ], [ [ "Now, we concatenate scenarion data with historical data for easier comparisons:", "_____no_output_____" ], [ "Now, we have to align the different coordinats. The ensemble members might have slightly different rotated coordinates and also the projection to the global coordinates might be different. We use a reference dataset here, from wich we copy the coordinates to all datasets to make them comparable.", "_____no_output_____" ], [ "The idea to make the whole dataset id a coordinate, so that xarray will automatically do all stuff for all datasets...", "_____no_output_____" ] ], [ [ "# make the dataset id a coordinate for easier access\ndims = {}\nfor rcp, dsets in dsets_sorted.items():\n dset_ids = list(dsets.keys())\n dim = xr.DataArray(dset_ids, dims='dset_id', name='dset_id',\n coords={'dset_id': dset_ids})\n dims[rcp] = dim", "_____no_output_____" ], [ "preproc.dset_ids_to_coord(dset_dict)", "_____no_output_____" ] ], [ [ "we create three big datasets for each senario one:", "_____no_output_____" ] ], [ [ "from cordex import cordex_domain\n\n\ndef create_test_ds(name, pol_name='rotated_latitude_longitude'):\n domain = cordex_domain(name, mapping_name=pol_name, dummy=True, add_vertices=True)\n domain.dummy\n return domain\n \n", "_____no_output_____" ], [ "dm = create_test_ds('EUR-11', 'rotated_pole')\n#dm = dm.drop(('lon', 'lat'))\n#dm.rename({'rlon': 'lon', 'rlat': 'lat'})\ndm['dummy'] = xr.DataArray(dm.dummy.values, dims=('lat', 'lon'), attrs=dm.dummy.attrs)\ndm", "_____no_output_____" ], [ "preproc.rename_cordex(dm)", "_____no_output_____" ], [ "preproc.rename_cordex(dm).equals(create_test_ds('EUR-11'))", "_____no_output_____" ], [ "dm = create_test_ds('EUR-11', 'rotated_pole')\n#dm = dm.rename({'lon_vertices': 'longitude_vertices', 'lat_vertices': 'latitude_vertices'})\ndm", "_____no_output_____" ], [ "dm.drop_vars(('rlon', 'rlat'))", "_____no_output_____" ], [ "preproc.rename_cordex(dm)", "_____no_output_____" ], [ "preproc.rename_cordex(ds)", "_____no_output_____" ], [ "xr.Dataset(dict)", "_____no_output_____" ], [ "preproc.rename_cordex(dm)", "_____no_output_____" ], [ "dm.dummy.reset_coords().assign_coords({'lat': dm.rlat.values, 'lon': dm.rlon.values})", "_____no_output_____" ], [ "dm.dummy.assign_coords({'rlat': dm.rlat.values, 'rlon': dm.rlon.values})", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a42a4e6fa9d7e7f9afb4c97ab0d8f594e420228
10,887
ipynb
Jupyter Notebook
Synthetic_wedge_short.ipynb
EvanBianco/geocomputing_demos
96dd8e0d7293077d39099d13f102709fb6812961
[ "Apache-2.0" ]
2
2021-07-29T15:17:08.000Z
2021-07-29T15:17:15.000Z
Synthetic_wedge_short.ipynb
EvanBianco/geocomputing_demos
96dd8e0d7293077d39099d13f102709fb6812961
[ "Apache-2.0" ]
null
null
null
Synthetic_wedge_short.ipynb
EvanBianco/geocomputing_demos
96dd8e0d7293077d39099d13f102709fb6812961
[ "Apache-2.0" ]
1
2021-07-30T18:03:32.000Z
2021-07-30T18:03:32.000Z
22.083164
318
0.490953
[ [ [ "# Synthetic seismic: wedge\n\nWe're going to make the famous wedge model, which interpreters can use to visualize the tuning effect. Then we can extend the idea to other kinds of model.\n\n## Make a wedge earth model", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np", "_____no_output_____" ], [ "length = 80 # x range\ndepth = 200 # z range", "_____no_output_____" ] ], [ [ "### EXERCISE\n\nMake a NumPy array of integers with these dimensions, placing a boundary at a 'depth' of 66 and another at a depth of 133.\n\nA plot of a vertical section through this array should look something like:\n\n |\n |\n ---\n |\n |\n ---\n |\n |", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE\n\n", "_____no_output_____" ], [ "# We have to pass dtype=int or we get floats.\n# We need ints because we're going to use for indexing later.\nmodel = 1 + np.tri(depth, length, -depth//3, dtype=int)\n\nplt.imshow(model)\nplt.colorbar()\nplt.show()", "_____no_output_____" ] ], [ [ "Now set the upper part of the model — above the wedge — to zero.", "_____no_output_____" ] ], [ [ "model[:depth//3,:] = 0\n\nplt.imshow(model)\nplt.colorbar()\nplt.show()", "_____no_output_____" ] ], [ [ "Now we can make some Vp-rho pairs (rock 0, rock 1, and rock 2).", "_____no_output_____" ] ], [ [ "rocks = np.array([[2540, 2550], # <-- Upper layer\n [2400, 2450], # <-- Wedge\n [2650, 2800]]) # <-- Lower layer", "_____no_output_____" ] ], [ [ "Now we can use ['fancy indexing'](http://docs.scipy.org/doc/numpy/user/basics.indexing.html) to use `model`, which is an array of 0, 1, and 2, as the indices of the rock property pairs to 'grab' from `rocks`.", "_____no_output_____" ] ], [ [ "earth = rocks[model]", "_____no_output_____" ] ], [ [ "Now apply `np.prod` (product) to those Vp-rho pairs to get impedance at every sample.", "_____no_output_____" ] ], [ [ "imp = np.apply_along_axis(np.prod, arr=earth, axis=-1)", "_____no_output_____" ] ], [ [ "## Model seismic reflections\n\nNow we have an earth model — giving us acoustic impedance everywhere in this 2D grid — we define a function to compute reflection coefficients for every trace.", "_____no_output_____" ], [ "### EXERCISE\n\nCan you write a function to compute the reflection coefficients in this model?\n\nIt should implement this equation, where $Z$ is acoustic impedance and :\n\n$$ R = \\frac{Z_\\mathrm{lower} - Z_\\mathrm{upper}}{Z_\\mathrm{lower} + Z_\\mathrm{upper}} $$\n\nThe result should be a sparse 2D array of shape (199, 80). The upper interface of the wedge should be positive.", "_____no_output_____" ] ], [ [ "def make_rc(imp):\n \n # YOUR CODE HERE\n \n return rc\n\nrc = make_rc(imp)", "_____no_output_____" ], [ "def make_rc(imp):\n \"\"\"\n Compute reflection coefficients.\n \"\"\"\n upper = imp[ :-1, :]\n lower = imp[1: , :]\n \n return (lower - upper) / (lower + upper)\n\nrc = make_rc(imp)", "_____no_output_____" ] ], [ [ "You should be able to plot the RC series like so:", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(8,4))\nplt.imshow(rc, aspect='auto')\nplt.colorbar()\nplt.show()", "_____no_output_____" ] ], [ [ "### EXERCISE\n\nImplement a Ricker wavelet of frequency $f$ with amplitude $A$ at time $t$ given by:\n\n$$ \\mathbf{a}(\\mathbf{t}) = (1-2 \\pi^2 f^2 \\mathbf{t}^2) \\mathrm{e}^{-\\pi^2 f^2 \\mathbf{t}^2} $$", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE\n\n", "_____no_output_____" ] ], [ [ "There is an implementation in `scipy.signal` but it has a 'width parameter' instead of 'frequency' so it's harder to parameterize.\n\nInstead, we'll use `bruges` to make a wavelet:", "_____no_output_____" ] ], [ [ "from bruges.filters import ricker\n\nf = 25 # We'll use this later.\nw, t = ricker(duration=0.128, dt=0.001, f=f, return_t=True)\n\nplt.plot(t, w)\nplt.show()", "_____no_output_____" ] ], [ [ "### EXERCISE\n\nMake an RC series 200 samples long, with one positive and one negative RC. Make a corresponding time array.\n\nPass the RC series to `np.convolve()` along with the wavelet, then plot the resulting synthetic seismogram.", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE\n\n", "_____no_output_____" ], [ "temp = np.zeros(200)\ntemp[66] = 1\ntemp[133] = -0.5\n\ntr = np.convolve(temp, w, mode='same')\n\nplt.plot(tr)", "_____no_output_____" ] ], [ [ "## Synthetic wedge", "_____no_output_____" ], [ "It's only a little trickier for us to apply 1D convolution to every trace in our 2D reflection coeeficient matrix. NumPy provides a function, `apply_along_axis()` to apply any function along any one axis of an n-dimensional array. I don't think it's much faster than looping, but I find it easier to think about.", "_____no_output_____" ] ], [ [ "def convolve(trace, wavelet):\n return np.convolve(trace, wavelet, mode='same')\n\nsynth = np.apply_along_axis(convolve,\n axis=0,\n arr=rc,\n wavelet=w)\n\nplt.figure(figsize=(12,6))\nplt.imshow(synth, cmap=\"Greys\", aspect=0.2)\nplt.colorbar()\nplt.show()", "_____no_output_____" ] ], [ [ "### EXERCISE\n\nUse `ipywidgets.interact` to turn this into an interactive plot, so that we can vary the frequency of the wavelet and see the effect on the synthetic.\n\nHere's a reminder of how to use it:\n\n from ipywidgets import interact\n\n @interact(a=(0, 10, 1), b=(0, 100, 10))\n def main(a, b):\n \"\"\"Do the things!\"\"\"\n print(a + b)\n return", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE\n\n", "_____no_output_____" ], [ "from ipywidgets import interact\n\n@interact(f=(4, 100, 4))\ndef show(f):\n w, t = ricker(duration=0.128, dt=0.001, f=f, return_t=True)\n synth = np.apply_along_axis(convolve,\n axis=0,\n arr=rc,\n wavelet=w)\n plt.figure(figsize=(12,6))\n plt.imshow(synth, cmap=\"Greys\", aspect=0.2)\n plt.colorbar()\n plt.show()", "_____no_output_____" ] ], [ [ "<hr />\n\n<div>\n<img src=\"https://avatars1.githubusercontent.com/u/1692321?s=50\"><p style=\"text-align:center\">© Agile Scientific 2020</p>\n</div>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
4a42a75dc1906ead39f0e1251ac89b96071f8ef1
4,750
ipynb
Jupyter Notebook
03-Add-Some-Data.ipynb
meatballs/sql_python_notebooks
185750ddd2d6ee27ef014e313a29ff7a7d2053b0
[ "MIT" ]
1
2018-09-04T13:26:37.000Z
2018-09-04T13:26:37.000Z
03-Add-Some-Data.ipynb
meatballs/sql_python_notebooks
185750ddd2d6ee27ef014e313a29ff7a7d2053b0
[ "MIT" ]
2
2018-09-17T14:00:23.000Z
2018-09-17T14:35:49.000Z
03-Add-Some-Data.ipynb
meatballs/sql_python_notebooks
185750ddd2d6ee27ef014e313a29ff7a7d2053b0
[ "MIT" ]
1
2018-05-13T08:28:29.000Z
2018-05-13T08:28:29.000Z
27.142857
266
0.551158
[ [ [ "# Add Some Data\n\nNow that we have a table, it's time to add some data into it.\n\nFirst, let's connect to our database:", "_____no_output_____" ] ], [ [ "import sqlalchemy as sa\nengine = sa.create_engine('sqlite:///flight.db')\nconnection = engine.connect()", "_____no_output_____" ] ], [ [ "To add data, we use an 'INSERT' statment.\n\nWe specify the columns for which we have data and then the values for the record we want to create.\n\nIn our case, we'll provide values for the flight name, timestamp and the teperature, pressure and humidity readings but we'll leave the accelerometer columns to have the default value of 0:\n\n```sql\nINSERT INTO readings(flight, ts, temp, pressure, humidity)\nVALUES ('hab1', '2015-01-01 09:00:00', 25.5, 1020, 40)\n```\n\nLet's execute that statement:", "_____no_output_____" ] ], [ [ "sql = \"\"\"\n INSERT INTO readings(flight, ts, temp, pressure, humidity)\n VALUES ('hab1', '2015-01-01 09:00:00', 25.5, 1020, 40)\n\"\"\"\nconnection.execute(sql)", "_____no_output_____" ] ], [ [ "A single INSERT statement can create multiple records:\n \n```sql\nINSERT INTO readings(flight, ts, temp, pressure, humidity)\nVALUES\n ('hab1', '2015-01-01 09:01:00', 25.5, 1019, 40),\n ('hab1', '2015-01-01 09:02:00', 25.5, 1019, 41)\n```\n\nLet's execute this statment too so that our table has three records in total:", "_____no_output_____" ] ], [ [ "sql = \"\"\"\n INSERT INTO readings(flight, ts, temp, pressure, humidity)\n VALUES\n ('hab1', '2015-01-01 09:01:00', 25.5, 1019, 40),\n ('hab1', '2015-01-01 09:02:00', 25.5, 1019, 41)\n\"\"\"\nconnection.execute(sql)", "_____no_output_____" ] ], [ [ "If you opted to [run the code on your own computer](http://owencampbell.me.uk/pages/howto#howto-local) and also installed the graphical tool, you can now click the 'Browse Data' button to view the three records you just inserted.\n\nOur table has check constraints to validate the records we insert. What happens when we try to insert invalid records?\n\nLet's try to create a record with a negative pressure reading:", "_____no_output_____" ] ], [ [ "sql = \"\"\"\n INSERT INTO readings(flight, ts, temp, pressure, humidity)\n VALUES ('hab1', '2015-01-01 09:03:00', 25.5, -1000, 40)\n\"\"\"\ntry:\n connection.execute(sql)\nexcept Exception as e:\n print(e)\n ", "(sqlite3.IntegrityError) CHECK constraint failed: pres_ck [SQL: \"\\n INSERT INTO readings(flight, ts, temp, pressure, humidity)\\n VALUES ('hab1', '2015-01-01 09:03:00', 25.5, -1000, 40)\\n\"] (Background on this error at: http://sqlalche.me/e/gkpj)\n" ] ], [ [ "We see that SQLAlchemy raises an IntegrityError and the insert fails as expected.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]