hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0ec661d069c8a2337c99a24677bb3214f5e845
| 33,298 |
ipynb
|
Jupyter Notebook
|
create_model/collect_image_with_metadata.ipynb
|
justcho5/instagrammability
|
fe023314ccb32135781e6f9ac988f06d9cb07629
|
[
"MIT"
] | 1 |
2019-01-04T14:10:15.000Z
|
2019-01-04T14:10:15.000Z
|
create_model/collect_image_with_metadata.ipynb
|
justcho5/instagrammability
|
fe023314ccb32135781e6f9ac988f06d9cb07629
|
[
"MIT"
] | null | null | null |
create_model/collect_image_with_metadata.ipynb
|
justcho5/instagrammability
|
fe023314ccb32135781e6f9ac988f06d9cb07629
|
[
"MIT"
] | null | null | null | 33.805076 | 261 | 0.463151 |
[
[
[
"## In this notebook, images and their corresponding metadata are organized. We take note of the actual existing images, combine with available metadata, and scraped follower counts. After merging and dropping image duplicates, we obtain 7702 total images.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport os\nfrom PIL import Image\nimport json\nfrom pandas.io.json import json_normalize\nimport ast\n\nIMAGE_DIR = \"./images/training/resized/\"",
"_____no_output_____"
]
],
[
[
"### Dataframe (df_imagename) of all existing images: 11181 Images\n",
"_____no_output_____"
]
],
[
[
"# Directory of museum folders\nim_dirs = os.listdir(IMAGE_DIR)\n\nfolder = []\nfor f in im_dirs:\n if f != '.DS_Store':\n print(IMAGE_DIR+f)\n folder = folder + os.listdir(IMAGE_DIR+f) \n \n# df_imagename : Dataframe of existing images \ndf_imagename = pd.DataFrame({\"filename\": folder})\ndf_imagename.head()\nprint(\"Number of existing images: {}\".format(df_imagename.filename.size))",
"./data/data/cablausanne\n./data/data/elyseemusee\n./data/data/espacearlaud\n./data/data/hermitage_lausanne\n./data/data/mcbalausanne\n./data/data/mudaclausanne\n./data/data/olympicmuseum\nNumber of existing images: 11181\n"
],
[
"# Takes metadata for museum and returns a dataframe\ndef load_metadata(file, folder):\n data = json.load(file)\n df = pd.DataFrame.from_dict(json_normalize(data), orient = 'columns')\n df['museum'] = folder\n df = df.rename(index=str, columns={\"id\": \"insta_id\"})\n df.drop(labels = ['comments_disabled', 'edge_media_preview_like.count',\n 'edge_media_to_caption.edges', 'edge_media_to_comment.count', 'is_video', 'thumbnail_resources', 'thumbnail_src', 'urls',\n 'video_view_count'], axis = 1, inplace = True)\n df['display_url'] = df['display_url'].str.split('/').str[-1]\n return df\n ",
"_____no_output_____"
]
],
[
[
"### Dataframe (df) of images in metadata: Metadata for 8362 images\n",
"_____no_output_____"
]
],
[
[
"# Load all the metadata\ndf = pd.DataFrame()\nfor folder in im_dirs:\n if folder != \".DS_Store\":\n print(\"Loading {} images\".format(folder))\n meta_file = open(\"{image_dir}{folder}/{folder}.json\".format(image_dir=IMAGE_DIR, folder = folder))\n if df.empty:\n df = load_metadata(meta_file, folder)\n else:\n df = pd.concat([df, load_metadata(meta_file, folder)], ignore_index = True)\n columns = ['height',\n 'width',\n 'filename',\n 'liked_count',\n 'insta_id',\n 'user_id',\n 'shortcode',\n 'tags',\n 'timestamp',\n 'museum'] \n \ndf.to_csv('./images/training/data/merged_metadata.csv', header = columns)\ndf.head()\nprint(\"Number of images in metadata: {}\".format(df.shortcode.size))",
"Loading cablausanne images\nLoading elyseemusee images\nLoading espacearlaud images\nLoading hermitage_lausanne images\nLoading mcbalausanne images\nLoading mudaclausanne images\nLoading olympicmuseum images\nNumber of images in metadata: 8362\n"
]
],
[
[
"## Script for scraping follower counts. Some of the shortcodes used were not valid, possibly because the images were removed.",
"_____no_output_____"
]
],
[
[
"from selenium import webdriver\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom lxml import html\n\nimport csv\ndef write_out_csv(data, filename_base, fieldnames):\n print(\"Writing to output file %s.csv\" % filename_base)\n with open(\"%s.csv\" % filename_base, \"w\") as csvfile:\n fields = fieldnames\n writer = csv.DictWriter(csvfile, fieldnames=fields)\n writer.writeheader()\n for row in data:\n writer.writerow(row)\n \ndef scrape_followers(lst, output_filename):\n instagram_data = []\n error_sc = []\n for code in lst:\n url = \"https://instagram.com/p/\" + code\n try:\n browser.get(url)\n elem = wait.until(\n EC.element_to_be_clickable(\n (By.XPATH, '//div[@class = \"e1e1d\"]//a[@class = \"FPmhX notranslate nJAzx\"]')\n )\n )\n\n elem.click()\n elem = wait.until(\n EC.element_to_be_clickable((By.XPATH, '//div[@class = \"v9tJq \"]'))\n )\n el = browser.find_element_by_xpath(\"//*\")\n parser = html.fromstring(el.get_attribute(\"outerHTML\"))\n # print(el.get_attribute(\"outerHTML\"))\n raw_followers = parser.xpath(\n './/ul[@class=\"k9GMp \"]/li[position()=2]//span[@class = \"g47SY \"]/@title'\n )[0].replace(\",\", \"\")\n\n data = {\"shortcode\": code, \"followers\": int(raw_followers)}\n instagram_data.append(data)\n except:\n error_sc.append(code)\n pass\n\n\n browser.close()\n fields = [\"shortcode\", \"followers\"]\n print(error_sc)\n write_out_csv(instagram_data, \"{}\".format(output_filename), fields)\n",
"_____no_output_____"
],
[
"# Uncomment the code below to run scraping for a list of shortcodes\n# Load the shortcodes of images for which the followers was not scraped\n\n# with open('error_sc4.txt', 'r') as f:\n# error_sc4 = ast.literal_eval(f.read())\n \n# print(len(error_sc4))\n\n# browser = webdriver.Chrome()\n# wait = WebDriverWait(browser, 15)\n# scrape_followers(error_sc3, \"followers4\")",
"_____no_output_____"
]
],
[
[
"### Dataframe (df_followers) of follower number for each shortcode: 8138 counts, 8068 shortcodes are unique\n",
"_____no_output_____"
]
],
[
[
"# Follower counts are merged\n# lst_followers = [pd.read_csv(\"followers.csv\"), pd.read_csv(\"followers2.csv\"), pd.read_csv(\"followers3.csv\"), pd.read_csv(\"followers4.csv\")]\n# df_followers = pd.concat(lst_followers, ignore_index = True)\n# df_followers.to_csv(\"scraped_follower_counts.csv\")\n\n\n# Follower count df: df_followers\n# Metadata df: df_images\ndf_followers = pd.read_csv(\"./images/training/data/scraped_follower_counts.csv\")\ndf_images = pd.read_csv(\"./images/training/data/merged_metadata.csv\")",
"_____no_output_____"
],
[
"print(\"Number of Follower counts\", df_followers.shortcode.size)\nprint(\"Number of Follower counts based on unique shortcodes\", df_followers.shortcode.unique().size)\nprint(\"Number of Images with metadata\", df_images.shortcode.size)\nprint(\"Number of actual Images\", df_imagename.size)",
"Number of Follower counts 8138\nNumber of Follower counts based on unique shortcodes 8068\nNumber of Images with metadata 8362\nNumber of actual Images 11181\n"
]
],
[
[
"### Dataframe (df_final): merge metadata with scraped followers counts.",
"_____no_output_____"
]
],
[
[
"df_final = df_followers.merge(df_images, on = \"shortcode\")",
"_____no_output_____"
],
[
"print(\"From Metadata - Number of unique filenames: {}\".format(df_images.filename.unique().size))\nprint(\"From Metadata - Number of filenames: {}\".format(df_images.filename.size))\nprint(\"Metadata + Followers - Number of unique filenames : {}\".format(df_final.filename.unique().size))\nprint(\"Metadata + Followers - Number of filenames: {}\".format(df_final.filename.size))",
"From Metadata - Number of unique filenames: 8292\nFrom Metadata - Number of filenames: 8362\nMetadata + Followers - Number of unique filenames : 8068\nMetadata + Followers - Number of filenames: 8280\n"
],
[
"df_final.drop_duplicates(subset = [\"shortcode\"], inplace = True)\ndf_final.shortcode.unique().size\ndf_final.shortcode.size\ndf_final['score'] = df_final.liked_count/df_final.followers\ndf_final = df_final[df_final['score'] != float('inf')]\nprint(\"min: {}, max: {}\".format(min(df_final.score), max(df_final.score)))\ndf_final['norm_score'] = (df_final['score'] - min(df_final.score))/(max(df_final.score) - min(df_final.score))\nprint(\"normalized - min: {}, max: {}\".format(min(df_final.norm_score), max(df_final.norm_score)))\n\ndf_final.head()",
"min: 0.0, max: 5.5\nnormalized - min: 0.0, max: 1.0\n"
]
],
[
[
"### Dataframe (df_final) -- existing images merged with metadata images: ",
"_____no_output_____"
]
],
[
[
"df_final = df_final.merge(df_imagename, on=\"filename\")\nprint(df_imagename.filename.unique().size)\nprint(df_imagename.filename.size)\ndf_final.filename.unique().size\ndf_final = df_final.sort_values(by = \"score\", ascending=False)[['filename', 'museum', 'score', 'liked_count', 'followers', 'norm_score']]\ndf_final.drop_duplicates(subset = \"filename\", inplace = True)\nprint(\"Number of existing images merged with metadata: {}\".format(df_final.filename.size))\ndf_final.to_csv('./images/training/data/image_data_final.csv')",
"11161\n11181\nNumber of existing images merged with metadata: 7702\n"
],
[
"df_final.read_csv('./images/training/data/image_data_final.csv')",
"_____no_output_____"
],
[
"df.filename.size",
"_____no_output_____"
],
[
"# Dataframe of follower counts\ndf_followers = pd.read_csv(\"./images/training/data/scraped_follower_counts.csv\")\ndf_followers.head()",
"_____no_output_____"
],
[
"# Dataframe of metadata\ndf_images = pd.read_csv(\"./images/training/data/merged_metadata.csv\")\ndf_images.head()",
"_____no_output_____"
],
[
"# Final dataframe of images that are existing and have follower counts and metadata\ndf_final = pd.read_csv('./images/training/data/image_data_final.csv')\ndf_final.head()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a0ed84c7d011aa4432457c2f98154733f4c73b8
| 7,809 |
ipynb
|
Jupyter Notebook
|
IntroODEs/intro_to_ODEs2.ipynb
|
andrew-lundgren/ComputationalPhysics
|
2dc20e0794b60d4d890ad9470b8d1f84871e1acd
|
[
"Apache-2.0"
] | null | null | null |
IntroODEs/intro_to_ODEs2.ipynb
|
andrew-lundgren/ComputationalPhysics
|
2dc20e0794b60d4d890ad9470b8d1f84871e1acd
|
[
"Apache-2.0"
] | null | null | null |
IntroODEs/intro_to_ODEs2.ipynb
|
andrew-lundgren/ComputationalPhysics
|
2dc20e0794b60d4d890ad9470b8d1f84871e1acd
|
[
"Apache-2.0"
] | null | null | null | 28.815498 | 312 | 0.549622 |
[
[
[
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\n\n# In addition to the imports, we'll also import some constants\n# And also define our own\nfrom scipy.constants import gravitational_constant, au\nyear = 365.25*24*3600\nmass_sun = 1.989e30\nmars_distance = 227.9*1.e9\n# NOTE: All units in SI",
"_____no_output_____"
]
],
[
[
"## Exercise 3 - Gravity!\n\nContinuing from the previous notebook, now we're going to try a more difficult problem: gravity! We need to do this in two dimensions, so now we've got more variables. It's still ordinary differential equations though. The only derivative is a time derivative.\n\nNow we want to solve a vector equation:\n\n$$\\vec{F~} = - \\frac{G~M~m}{r^2} \\hat{r~}$$\n\nWe'll take this to be the force on $m$, so $F = m a$. In terms of the unnormalized vector $\\vec{r~}$, we have\n\n$$\\vec{a~} = - \\frac{G~M}{r^2} \\frac{\\vec{r~}}{r}$$\n\nwhere $r$ is the length of $\\vec{r~}$.",
"_____no_output_____"
],
[
"So how do we put this into the form scipy expects? We define the position of the little object by\n$$\\vec{r~} = (x, y)$$\nThen the length is\n$$r = \\sqrt{x^2 + y^2}$$\nWe have second-order differential equations for both $x$ and $y$. We need four variables $x$, $y$, $v_x$, $v_y$.\n\nWe also need to rescale our variables. Kilograms, meters, and seconds aren't great for describing orbits. We'll get a lot of huge numbers. Let's define a rescaling:\n$$t = T~\\tau$$\n$$r = R~\\rho$$\nSo the differential equation looks something like\n$$\\frac{d^2 r}{d t^2} = \\frac{R}{T^2} \\frac{d^2 \\rho}{d \\tau^2} = - \\frac{G~M}{(R~\\rho)^2}$$\nor\n$$\\frac{d^2 \\rho}{d \\tau^2} = - \\left( \\frac{G~M~T^2}{R^3}\\right) ~ \\frac{1}{\\rho^2}$$\nAll the units have been collected into one single factor. If we choose $R = 1~\\mathrm{AU}$ and $T = 1~\\mathrm{yr}$, this factor becomes a nice number close to $1$.",
"_____no_output_____"
]
],
[
[
"# Calculate the factor above\ngee_msol = gravitational_constant*mass_sun\nscale_factor = (gee_msol/au/au/au) * year * year\nprint(scale_factor)",
"_____no_output_____"
]
],
[
[
"Now we're ready to define the gravitational acceleration and start some calculations.",
"_____no_output_____"
]
],
[
[
"# Gravitational acceleration in 2D\ndef fgrav(vec, t):\n x, y, vx, vy = vec\n r = # FIXME: Calculate the distance from x and y\n acc = # FIXME: Calculate the magnitude of the acceleration\n return (vx, vy, -acc*x/r, -acc*y/r) # Turn the calculations above into the acceleration vector",
"_____no_output_____"
],
[
"r_init = (1., 0., 0., 1.) # Starting values at t = 0\ntimes = np.linspace(0., 4., 10000)\nrarr = odeint(fgrav, r_init, times)",
"_____no_output_____"
],
[
"plt.figure(figsize=(8,8))\nplt.scatter(rarr[:,0], rarr[:,1], s=5)\nplt.scatter(0., 0., c='y', s=50)\nplt.gca().set_aspect('equal', 'datalim')",
"_____no_output_____"
]
],
[
[
"We just guessed at the initial conditions, and we get a very elliptical orbit. Using the formula for acceleration on a circle\n\n$$v^2/r = G~M/r^2$$\n\nSo the velocity on a circular orbit should be\n\n$$v = \\sqrt{G~M/r}$$\n\nWe can use that to get the initial conditions correct.",
"_____no_output_____"
],
[
"**Exercise 3.1**: Fill in the initial condition below to get a circular orbit at $r = 1$.",
"_____no_output_____"
]
],
[
[
"fIr_init1 = (1., 0., 0., 1.) # FIXME: Change the last value\ntimes = np.linspace(0., 4., 10000)\nrarr1 = odeint(fgrav, r_init1, times)",
"_____no_output_____"
],
[
"plt.figure(figsize=(8,8))\nplt.scatter(rarr1[:,0], rarr1[:,1], s=5)\nplt.scatter(0., 0., c='y', s=50)\nplt.gca().set_aspect('equal', 'datalim')",
"_____no_output_____"
]
],
[
[
"**Exercise 3.2**: How long does a single orbit take? Does this make sense?",
"_____no_output_____"
],
[
"**Exercise 3.3**: Play with the conditions below, shooting the planet toward the sun but offset a bit in $y$ so it doesn't go straight through the center. What kind of shapes do you get? Note that we use a different `times` array than the others, so orbits that go way off can be stopped early if you want.",
"_____no_output_____"
]
],
[
[
"r_init2 = (4., 0.5, -10., 0.) # FIXME: Try different values\ntimes2 = np.linspace(0., 2, 1000)\nrarr2 = odeint(fgrav, r_init2, times)",
"_____no_output_____"
],
[
"plt.figure(figsize=(8,8))\nplt.scatter(rarr2[:,0], rarr2[:,1], s=5)\nplt.scatter(0., 0., c='y', s=50)\nplt.gca().set_aspect('equal', 'datalim')",
"_____no_output_____"
]
],
[
[
"**Exercise 3.4**: I've defined the distance from Mars to the Sun in kilometers as `mars_distance`. Define `r_mars` in our units (the ones where the Earth is at $r = 1$, and change the initial conditions below to add Mars to the plot.",
"_____no_output_____"
]
],
[
[
"r_init3 = (1, 0., 0., 1.) # FIXME: Set correct x and vy for Mars\nrarr3 = odeint(fgrav, r_init3, times)",
"_____no_output_____"
],
[
"plt.figure(figsize=(8,8))\nplt.scatter(rarr1[:,0], rarr1[:,1], s=5)\nplt.scatter(rarr3[:,0], rarr3[:,1], c='r', s=4)\nplt.scatter(0., 0., c='y', s=50)\nplt.gca().set_aspect('equal', 'datalim')",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a0edaa571e5a6c48c9aaba77bd0ded6d67ebed9
| 347,084 |
ipynb
|
Jupyter Notebook
|
Residual+Networks+-+v2.ipynb
|
AGrosserHH/Coursera_Convolutional_Networks
|
126519a9bceb12114299addd576e1d0179b42f03
|
[
"MIT"
] | null | null | null |
Residual+Networks+-+v2.ipynb
|
AGrosserHH/Coursera_Convolutional_Networks
|
126519a9bceb12114299addd576e1d0179b42f03
|
[
"MIT"
] | null | null | null |
Residual+Networks+-+v2.ipynb
|
AGrosserHH/Coursera_Convolutional_Networks
|
126519a9bceb12114299addd576e1d0179b42f03
|
[
"MIT"
] | null | null | null | 109.248977 | 110,302 | 0.705725 |
[
[
[
"# Residual Networks\n\nWelcome to the second assignment of this week! You will learn how to build very deep convolutional networks, using Residual Networks (ResNets). In theory, very deep networks can represent very complex functions; but in practice, they are hard to train. Residual Networks, introduced by [He et al.](https://arxiv.org/pdf/1512.03385.pdf), allow you to train much deeper networks than were previously practically feasible.\n\n**In this assignment, you will:**\n- Implement the basic building blocks of ResNets. \n- Put together these building blocks to implement and train a state-of-the-art neural network for image classification. \n\nThis assignment will be done in Keras. \n\nBefore jumping into the problem, let's run the cell below to load the required packages.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom keras import layers\nfrom keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D\nfrom keras.models import Model, load_model\nfrom keras.preprocessing import image\nfrom keras.utils import layer_utils\nfrom keras.utils.data_utils import get_file\nfrom keras.applications.imagenet_utils import preprocess_input\nimport pydot\nfrom IPython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\nfrom keras.utils import plot_model\nfrom resnets_utils import *\nfrom keras.initializers import glorot_uniform\nimport scipy.misc\nfrom matplotlib.pyplot import imshow\n%matplotlib inline\n\nimport keras.backend as K\nK.set_image_data_format('channels_last')\nK.set_learning_phase(1)",
"_____no_output_____"
]
],
[
[
"## 1 - The problem of very deep neural networks\n\nLast week, you built your first convolutional neural network. In recent years, neural networks have become deeper, with state-of-the-art networks going from just a few layers (e.g., AlexNet) to over a hundred layers.\n\nThe main benefit of a very deep network is that it can represent very complex functions. It can also learn features at many different levels of abstraction, from edges (at the lower layers) to very complex features (at the deeper layers). However, using a deeper network doesn't always help. A huge barrier to training them is vanishing gradients: very deep networks often have a gradient signal that goes to zero quickly, thus making gradient descent unbearably slow. More specifically, during gradient descent, as you backprop from the final layer back to the first layer, you are multiplying by the weight matrix on each step, and thus the gradient can decrease exponentially quickly to zero (or, in rare cases, grow exponentially quickly and \"explode\" to take very large values). \n\nDuring training, you might therefore see the magnitude (or norm) of the gradient for the earlier layers descrease to zero very rapidly as training proceeds: ",
"_____no_output_____"
],
[
"<img src=\"images/vanishing_grad_kiank.png\" style=\"width:450px;height:220px;\">\n<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **Vanishing gradient** <br> The speed of learning decreases very rapidly for the early layers as the network trains </center></caption>\n\nYou are now going to solve this problem by building a Residual Network!",
"_____no_output_____"
],
[
"## 2 - Building a Residual Network\n\nIn ResNets, a \"shortcut\" or a \"skip connection\" allows the gradient to be directly backpropagated to earlier layers: \n\n<img src=\"images/skip_connection_kiank.png\" style=\"width:650px;height:200px;\">\n<caption><center> <u> <font color='purple'> **Figure 2** </u><font color='purple'> : A ResNet block showing a **skip-connection** <br> </center></caption>\n\nThe image on the left shows the \"main path\" through the network. The image on the right adds a shortcut to the main path. By stacking these ResNet blocks on top of each other, you can form a very deep network. \n\nWe also saw in lecture that having ResNet blocks with the shortcut also makes it very easy for one of the blocks to learn an identity function. This means that you can stack on additional ResNet blocks with little risk of harming training set performance. (There is also some evidence that the ease of learning an identity function--even more than skip connections helping with vanishing gradients--accounts for ResNets' remarkable performance.)\n\nTwo main types of blocks are used in a ResNet, depending mainly on whether the input/output dimensions are same or different. You are going to implement both of them. ",
"_____no_output_____"
],
[
"### 2.1 - The identity block\n\nThe identity block is the standard block used in ResNets, and corresponds to the case where the input activation (say $a^{[l]}$) has the same dimension as the output activation (say $a^{[l+2]}$). To flesh out the different steps of what happens in a ResNet's identity block, here is an alternative diagram showing the individual steps:\n\n<img src=\"images/idblock2_kiank.png\" style=\"width:650px;height:150px;\">\n<caption><center> <u> <font color='purple'> **Figure 3** </u><font color='purple'> : **Identity block.** Skip connection \"skips over\" 2 layers. </center></caption>\n\nThe upper path is the \"shortcut path.\" The lower path is the \"main path.\" In this diagram, we have also made explicit the CONV2D and ReLU steps in each layer. To speed up training we have also added a BatchNorm step. Don't worry about this being complicated to implement--you'll see that BatchNorm is just one line of code in Keras! \n\nIn this exercise, you'll actually implement a slightly more powerful version of this identity block, in which the skip connection \"skips over\" 3 hidden layers rather than 2 layers. It looks like this: \n\n<img src=\"images/idblock3_kiank.png\" style=\"width:650px;height:150px;\">\n<caption><center> <u> <font color='purple'> **Figure 4** </u><font color='purple'> : **Identity block.** Skip connection \"skips over\" 3 layers.</center></caption>\n\nHere're the individual steps.\n\nFirst component of main path: \n- The first CONV2D has $F_1$ filters of shape (1,1) and a stride of (1,1). Its padding is \"valid\" and its name should be `conv_name_base + '2a'`. Use 0 as the seed for the random initialization. \n- The first BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2a'`.\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n\nSecond component of main path:\n- The second CONV2D has $F_2$ filters of shape $(f,f)$ and a stride of (1,1). Its padding is \"same\" and its name should be `conv_name_base + '2b'`. Use 0 as the seed for the random initialization. \n- The second BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2b'`.\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n\nThird component of main path:\n- The third CONV2D has $F_3$ filters of shape (1,1) and a stride of (1,1). Its padding is \"valid\" and its name should be `conv_name_base + '2c'`. Use 0 as the seed for the random initialization. \n- The third BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2c'`. Note that there is no ReLU activation function in this component. \n\nFinal step: \n- The shortcut and the input are added together.\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n\n**Exercise**: Implement the ResNet identity block. We have implemented the first component of the main path. Please read over this carefully to make sure you understand what it is doing. You should implement the rest. \n- To implement the Conv2D step: [See reference](https://keras.io/layers/convolutional/#conv2d)\n- To implement BatchNorm: [See reference](https://faroit.github.io/keras-docs/1.2.2/layers/normalization/) (axis: Integer, the axis that should be normalized (typically the channels axis))\n- For the activation, use: `Activation('relu')(X)`\n- To add the value passed forward by the shortcut: [See reference](https://keras.io/layers/merge/#add)",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: identity_block\n\ndef identity_block(X, f, filters, stage, block):\n \"\"\"\n Implementation of the identity block as defined in Figure 3\n \n Arguments:\n X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)\n f -- integer, specifying the shape of the middle CONV's window for the main path\n filters -- python list of integers, defining the number of filters in the CONV layers of the main path\n stage -- integer, used to name the layers, depending on their position in the network\n block -- string/character, used to name the layers, depending on their position in the network\n \n Returns:\n X -- output of the identity block, tensor of shape (n_H, n_W, n_C)\n \"\"\"\n \n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieve Filters\n F1, F2, F3 = filters\n \n # Save the input value. You'll need this later to add back to the main path. \n X_shortcut = X\n \n # First component of main path\n X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n \n ### START CODE HERE ### \n # Second component of main path (≈3 lines)\n X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path (≈2 lines)\n X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n \n ### END CODE HERE ###\n \n return X",
"_____no_output_____"
],
[
"tf.reset_default_graph()\n\nwith tf.Session() as test:\n np.random.seed(1)\n A_prev = tf.placeholder(\"float\", [3, 4, 4, 6])\n X = np.random.randn(3, 4, 4, 6)\n A = identity_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')\n test.run(tf.global_variables_initializer())\n out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})\n print(\"out = \" + str(out[0][1][1][0]))",
"out = [ 0.94822985 0. 1.16101444 2.747859 0. 1.36677003]\n"
]
],
[
[
"**Expected Output**:\n\n<table>\n <tr>\n <td>\n **out**\n </td>\n <td>\n [ 0.94822985 0. 1.16101444 2.747859 0. 1.36677003]\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"## 2.2 - The convolutional block\n\nYou've implemented the ResNet identity block. Next, the ResNet \"convolutional block\" is the other type of block. You can use this type of block when the input and output dimensions don't match up. The difference with the identity block is that there is a CONV2D layer in the shortcut path: \n\n<img src=\"images/convblock_kiank.png\" style=\"width:650px;height:150px;\">\n<caption><center> <u> <font color='purple'> **Figure 4** </u><font color='purple'> : **Convolutional block** </center></caption>\n\nThe CONV2D layer in the shortcut path is used to resize the input $x$ to a different dimension, so that the dimensions match up in the final addition needed to add the shortcut value back to the main path. (This plays a similar role as the matrix $W_s$ discussed in lecture.) For example, to reduce the activation dimensions's height and width by a factor of 2, you can use a 1x1 convolution with a stride of 2. The CONV2D layer on the shortcut path does not use any non-linear activation function. Its main role is to just apply a (learned) linear function that reduces the dimension of the input, so that the dimensions match up for the later addition step. \n\nThe details of the convolutional block are as follows. \n\nFirst component of main path:\n- The first CONV2D has $F_1$ filters of shape (1,1) and a stride of (s,s). Its padding is \"valid\" and its name should be `conv_name_base + '2a'`. \n- The first BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2a'`.\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n\nSecond component of main path:\n- The second CONV2D has $F_2$ filters of (f,f) and a stride of (1,1). Its padding is \"same\" and it's name should be `conv_name_base + '2b'`.\n- The second BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2b'`.\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n\nThird component of main path:\n- The third CONV2D has $F_3$ filters of (1,1) and a stride of (1,1). Its padding is \"valid\" and it's name should be `conv_name_base + '2c'`.\n- The third BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2c'`. Note that there is no ReLU activation function in this component. \n\nShortcut path:\n- The CONV2D has $F_3$ filters of shape (1,1) and a stride of (s,s). Its padding is \"valid\" and its name should be `conv_name_base + '1'`.\n- The BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '1'`. \n\nFinal step: \n- The shortcut and the main path values are added together.\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n \n**Exercise**: Implement the convolutional block. We have implemented the first component of the main path; you should implement the rest. As before, always use 0 as the seed for the random initialization, to ensure consistency with our grader.\n- [Conv Hint](https://keras.io/layers/convolutional/#conv2d)\n- [BatchNorm Hint](https://keras.io/layers/normalization/#batchnormalization) (axis: Integer, the axis that should be normalized (typically the features axis))\n- For the activation, use: `Activation('relu')(X)`\n- [Addition Hint](https://keras.io/layers/merge/#add)",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: convolutional_block\n\ndef convolutional_block(X, f, filters, stage, block, s = 2):\n \"\"\"\n Implementation of the convolutional block as defined in Figure 4\n \n Arguments:\n X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)\n f -- integer, specifying the shape of the middle CONV's window for the main path\n filters -- python list of integers, defining the number of filters in the CONV layers of the main path\n stage -- integer, used to name the layers, depending on their position in the network\n block -- string/character, used to name the layers, depending on their position in the network\n s -- Integer, specifying the stride to be used\n \n Returns:\n X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)\n \"\"\"\n \n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieve Filters\n F1, F2, F3 = filters\n \n # Save the input value\n X_shortcut = X\n\n ##### MAIN PATH #####\n # First component of main path \n X = Conv2D(F1, (1, 1), strides = (s,s), name = conv_name_base + '2a', padding = 'valid', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n \n ### START CODE HERE ###\n\n # Second component of main path (≈3 lines)\n X = Conv2D(F2, (f, f), strides = (1,1), name = conv_name_base + '2b', padding = 'same', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n \n # Third component of main path (≈2 lines)\n X = Conv2D(F3, (1, 1), strides = (1,1), name = conv_name_base + '2c', padding = 'valid', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)\n\n ##### SHORTCUT PATH #### (≈2 lines)\n X_shortcut = Conv2D(F3, (1, 1), strides = (s,s), name = conv_name_base + '2d', kernel_initializer = glorot_uniform(seed=0))(X_shortcut)\n X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + '2d')(X_shortcut)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n \n ### END CODE HERE ###\n \n return X",
"_____no_output_____"
],
[
"tf.reset_default_graph()\n\nwith tf.Session() as test:\n np.random.seed(1)\n A_prev = tf.placeholder(\"float\", [3, 4, 4, 6])\n X = np.random.randn(3, 4, 4, 6)\n A = convolutional_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')\n test.run(tf.global_variables_initializer())\n out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})\n print(\"out = \" + str(out[0][1][1][0]))",
"out = [ 0.09018463 1.23489773 0.46822017 0.0367176 0. 0.65516603]\n"
]
],
[
[
"**Expected Output**:\n\n<table>\n <tr>\n <td>\n **out**\n </td>\n <td>\n [ 0.09018463 1.23489773 0.46822017 0.0367176 0. 0.65516603]\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"## 3 - Building your first ResNet model (50 layers)\n\nYou now have the necessary blocks to build a very deep ResNet. The following figure describes in detail the architecture of this neural network. \"ID BLOCK\" in the diagram stands for \"Identity block,\" and \"ID BLOCK x3\" means you should stack 3 identity blocks together.\n\n<img src=\"images/resnet_kiank.png\" style=\"width:850px;height:150px;\">\n<caption><center> <u> <font color='purple'> **Figure 5** </u><font color='purple'> : **ResNet-50 model** </center></caption>\n\nThe details of this ResNet-50 model are:\n- Zero-padding pads the input with a pad of (3,3)\n- Stage 1:\n - The 2D Convolution has 64 filters of shape (7,7) and uses a stride of (2,2). Its name is \"conv1\".\n - BatchNorm is applied to the channels axis of the input.\n - MaxPooling uses a (3,3) window and a (2,2) stride.\n- Stage 2:\n - The convolutional block uses three set of filters of size [64,64,256], \"f\" is 3, \"s\" is 1 and the block is \"a\".\n - The 2 identity blocks use three set of filters of size [64,64,256], \"f\" is 3 and the blocks are \"b\" and \"c\".\n- Stage 3:\n - The convolutional block uses three set of filters of size [128,128,512], \"f\" is 3, \"s\" is 2 and the block is \"a\".\n - The 3 identity blocks use three set of filters of size [128,128,512], \"f\" is 3 and the blocks are \"b\", \"c\" and \"d\".\n- Stage 4:\n - The convolutional block uses three set of filters of size [256, 256, 1024], \"f\" is 3, \"s\" is 2 and the block is \"a\".\n - The 5 identity blocks use three set of filters of size [256, 256, 1024], \"f\" is 3 and the blocks are \"b\", \"c\", \"d\", \"e\" and \"f\".\n- Stage 5:\n - The convolutional block uses three set of filters of size [512, 512, 2048], \"f\" is 3, \"s\" is 2 and the block is \"a\".\n - The 2 identity blocks use three set of filters of size [512, 512, 2048], \"f\" is 3 and the blocks are \"b\" and \"c\".\n- The 2D Average Pooling uses a window of shape (2,2) and its name is \"avg_pool\".\n- The flatten doesn't have any hyperparameters or name.\n- The Fully Connected (Dense) layer reduces its input to the number of classes using a softmax activation. Its name should be `'fc' + str(classes)`.\n\n**Exercise**: Implement the ResNet with 50 layers described in the figure above. We have implemented Stages 1 and 2. Please implement the rest. (The syntax for implementing Stages 3-5 should be quite similar to that of Stage 2.) Make sure you follow the naming convention in the text above. \n\nYou'll need to use this function: \n- Average pooling [see reference](https://keras.io/layers/pooling/#averagepooling2d)\n\nHere're some other functions we used in the code below:\n- Conv2D: [See reference](https://keras.io/layers/convolutional/#conv2d)\n- BatchNorm: [See reference](https://keras.io/layers/normalization/#batchnormalization) (axis: Integer, the axis that should be normalized (typically the features axis))\n- Zero padding: [See reference](https://keras.io/layers/convolutional/#zeropadding2d)\n- Max pooling: [See reference](https://keras.io/layers/pooling/#maxpooling2d)\n- Fully conected layer: [See reference](https://keras.io/layers/core/#dense)\n- Addition: [See reference](https://keras.io/layers/merge/#add)",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: ResNet50\n\ndef ResNet50(input_shape = (64, 64, 3), classes = 6):\n \"\"\"\n Implementation of the popular ResNet50 the following architecture:\n CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3\n -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER\n\n Arguments:\n input_shape -- shape of the images of the dataset\n classes -- integer, number of classes\n\n Returns:\n model -- a Model() instance in Keras\n \"\"\"\n \n # Define the input as a tensor with shape input_shape\n X_input = Input(input_shape)\n\n \n # Zero-Padding\n X = ZeroPadding2D((3, 3))(X_input)\n \n # Stage 1\n X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((3, 3), strides=(2, 2))(X)\n\n # Stage 2\n X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1)\n X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')\n X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')\n\n ### START CODE HERE ###\n\n # Stage 3 (≈4 lines)\n X = convolutional_block(X, f = 3, filters = [128,128,512], stage = 3, block='a', s = 2)\n X = identity_block(X, 3, [128,128,512], stage=3, block='b')\n X = identity_block(X, 3, [128,128,512], stage=3, block='c')\n X = identity_block(X, 3, [128,128,512], stage=3, block='d')\n\n # Stage 4 (≈6 lines)\n X = convolutional_block(X, f = 3, filters = [256, 256, 1024], stage = 4, block='a', s = 2)\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f')\n\n # Stage 5 (≈3 lines)\n X = convolutional_block(X, f = 3, filters = [512, 512, 2048], stage = 5, block='a', s = 2)\n X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b')\n X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c')\n\n # AVGPOOL (≈1 line). Use \"X = AveragePooling2D(...)(X)\"\n X = AveragePooling2D(pool_size=(2, 2))(X)\n \n ### END CODE HERE ###\n\n # output layer\n X = Flatten()(X)\n X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)\n \n \n # Create model\n model = Model(inputs = X_input, outputs = X, name='ResNet50')\n\n return model",
"_____no_output_____"
]
],
[
[
"Run the following code to build the model's graph. If your implementation is not correct you will know it by checking your accuracy when running `model.fit(...)` below.",
"_____no_output_____"
]
],
[
[
"model = ResNet50(input_shape = (64, 64, 3), classes = 6)",
"_____no_output_____"
]
],
[
[
"As seen in the Keras Tutorial Notebook, prior training a model, you need to configure the learning process by compiling the model.",
"_____no_output_____"
]
],
[
[
"model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])",
"_____no_output_____"
]
],
[
[
"The model is now ready to be trained. The only thing you need is a dataset.",
"_____no_output_____"
],
[
"Let's load the SIGNS Dataset.\n\n<img src=\"images/signs_data_kiank.png\" style=\"width:450px;height:250px;\">\n<caption><center> <u> <font color='purple'> **Figure 6** </u><font color='purple'> : **SIGNS dataset** </center></caption>\n",
"_____no_output_____"
]
],
[
[
"X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()\n\n# Normalize image vectors\nX_train = X_train_orig/255.\nX_test = X_test_orig/255.\n\n# Convert training and test labels to one hot matrices\nY_train = convert_to_one_hot(Y_train_orig, 6).T\nY_test = convert_to_one_hot(Y_test_orig, 6).T\n\nprint (\"number of training examples = \" + str(X_train.shape[0]))\nprint (\"number of test examples = \" + str(X_test.shape[0]))\nprint (\"X_train shape: \" + str(X_train.shape))\nprint (\"Y_train shape: \" + str(Y_train.shape))\nprint (\"X_test shape: \" + str(X_test.shape))\nprint (\"Y_test shape: \" + str(Y_test.shape))",
"number of training examples = 1080\nnumber of test examples = 120\nX_train shape: (1080, 64, 64, 3)\nY_train shape: (1080, 6)\nX_test shape: (120, 64, 64, 3)\nY_test shape: (120, 6)\n"
]
],
[
[
"Run the following cell to train your model on 2 epochs with a batch size of 32. On a CPU it should take you around 5min per epoch. ",
"_____no_output_____"
]
],
[
[
"model.fit(X_train, Y_train, epochs = 2, batch_size = 32)",
"Epoch 1/2\n1080/1080 [==============================] - 289s - loss: 2.9504 - acc: 0.2657 \nEpoch 2/2\n1080/1080 [==============================] - 261s - loss: 2.3924 - acc: 0.3435 \n"
]
],
[
[
"**Expected Output**:\n\n<table>\n <tr>\n <td>\n ** Epoch 1/2**\n </td>\n <td>\n loss: between 1 and 5, acc: between 0.2 and 0.5, although your results can be different from ours.\n </td>\n </tr>\n <tr>\n <td>\n ** Epoch 2/2**\n </td>\n <td>\n loss: between 1 and 5, acc: between 0.2 and 0.5, you should see your loss decreasing and the accuracy increasing.\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"Let's see how this model (trained on only two epochs) performs on the test set.",
"_____no_output_____"
]
],
[
[
"preds = model.evaluate(X_test, Y_test)\nprint (\"Loss = \" + str(preds[0]))\nprint (\"Test Accuracy = \" + str(preds[1]))",
"120/120 [==============================] - 9s \nLoss = 2.22657113075\nTest Accuracy = 0.166666666667\n"
]
],
[
[
"**Expected Output**:\n\n<table>\n <tr>\n <td>\n **Test Accuracy**\n </td>\n <td>\n between 0.16 and 0.25\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"For the purpose of this assignment, we've asked you to train the model only for two epochs. You can see that it achieves poor performances. Please go ahead and submit your assignment; to check correctness, the online grader will run your code only for a small number of epochs as well.",
"_____no_output_____"
],
[
"After you have finished this official (graded) part of this assignment, you can also optionally train the ResNet for more iterations, if you want. We get a lot better performance when we train for ~20 epochs, but this will take more than an hour when training on a CPU. \n\nUsing a GPU, we've trained our own ResNet50 model's weights on the SIGNS dataset. You can load and run our trained model on the test set in the cells below. It may take ≈1min to load the model.",
"_____no_output_____"
]
],
[
[
"model = load_model('ResNet50.h5') ",
"_____no_output_____"
],
[
"preds = model.evaluate(X_test, Y_test)\nprint (\"Loss = \" + str(preds[0]))\nprint (\"Test Accuracy = \" + str(preds[1]))",
"120/120 [==============================] - 10s \nLoss = 0.530178320408\nTest Accuracy = 0.866666662693\n"
]
],
[
[
"ResNet50 is a powerful model for image classification when it is trained for an adequate number of iterations. We hope you can use what you've learnt and apply it to your own classification problem to perform state-of-the-art accuracy.\n\nCongratulations on finishing this assignment! You've now implemented a state-of-the-art image classification system! ",
"_____no_output_____"
],
[
"## 4 - Test on your own image (Optional/Ungraded)",
"_____no_output_____"
],
[
"If you wish, you can also take a picture of your own hand and see the output of the model. To do this:\n 1. Click on \"File\" in the upper bar of this notebook, then click \"Open\" to go on your Coursera Hub.\n 2. Add your image to this Jupyter Notebook's directory, in the \"images\" folder\n 3. Write your image's name in the following code\n 4. Run the code and check if the algorithm is right! ",
"_____no_output_____"
]
],
[
[
"img_path = 'images/my_image.jpg'\nimg = image.load_img(img_path, target_size=(64, 64))\nx = image.img_to_array(img)\nx = np.expand_dims(x, axis=0)\nx = preprocess_input(x)\nprint('Input image shape:', x.shape)\nmy_image = scipy.misc.imread(img_path)\nimshow(my_image)\nprint(\"class prediction vector [p(0), p(1), p(2), p(3), p(4), p(5)] = \")\nprint(model.predict(x))",
"Input image shape: (1, 64, 64, 3)\nclass prediction vector [p(0), p(1), p(2), p(3), p(4), p(5)] = \n[[ 1. 0. 0. 0. 0. 0.]]\n"
]
],
[
[
"You can also print a summary of your model by running the following code.",
"_____no_output_____"
]
],
[
[
"model.summary()",
"____________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n====================================================================================================\ninput_1 (InputLayer) (None, 64, 64, 3) 0 \n____________________________________________________________________________________________________\nzero_padding2d_1 (ZeroPadding2D) (None, 70, 70, 3) 0 input_1[0][0] \n____________________________________________________________________________________________________\nconv1 (Conv2D) (None, 32, 32, 64) 9472 zero_padding2d_1[0][0] \n____________________________________________________________________________________________________\nbn_conv1 (BatchNormalization) (None, 32, 32, 64) 256 conv1[0][0] \n____________________________________________________________________________________________________\nactivation_4 (Activation) (None, 32, 32, 64) 0 bn_conv1[0][0] \n____________________________________________________________________________________________________\nmax_pooling2d_1 (MaxPooling2D) (None, 15, 15, 64) 0 activation_4[0][0] \n____________________________________________________________________________________________________\nres2a_branch2a (Conv2D) (None, 15, 15, 64) 4160 max_pooling2d_1[0][0] \n____________________________________________________________________________________________________\nbn2a_branch2a (BatchNormalizatio (None, 15, 15, 64) 256 res2a_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_5 (Activation) (None, 15, 15, 64) 0 bn2a_branch2a[0][0] \n____________________________________________________________________________________________________\nres2a_branch2b (Conv2D) (None, 15, 15, 64) 36928 activation_5[0][0] \n____________________________________________________________________________________________________\nbn2a_branch2b (BatchNormalizatio (None, 15, 15, 64) 256 res2a_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_6 (Activation) (None, 15, 15, 64) 0 bn2a_branch2b[0][0] \n____________________________________________________________________________________________________\nres2a_branch2c (Conv2D) (None, 15, 15, 256) 16640 activation_6[0][0] \n____________________________________________________________________________________________________\nres2a_branch1 (Conv2D) (None, 15, 15, 256) 16640 max_pooling2d_1[0][0] \n____________________________________________________________________________________________________\nbn2a_branch2c (BatchNormalizatio (None, 15, 15, 256) 1024 res2a_branch2c[0][0] \n____________________________________________________________________________________________________\nbn2a_branch1 (BatchNormalization (None, 15, 15, 256) 1024 res2a_branch1[0][0] \n____________________________________________________________________________________________________\nadd_2 (Add) (None, 15, 15, 256) 0 bn2a_branch2c[0][0] \n bn2a_branch1[0][0] \n____________________________________________________________________________________________________\nactivation_7 (Activation) (None, 15, 15, 256) 0 add_2[0][0] \n____________________________________________________________________________________________________\nres2b_branch2a (Conv2D) (None, 15, 15, 64) 16448 activation_7[0][0] \n____________________________________________________________________________________________________\nbn2b_branch2a (BatchNormalizatio (None, 15, 15, 64) 256 res2b_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_8 (Activation) (None, 15, 15, 64) 0 bn2b_branch2a[0][0] \n____________________________________________________________________________________________________\nres2b_branch2b (Conv2D) (None, 15, 15, 64) 36928 activation_8[0][0] \n____________________________________________________________________________________________________\nbn2b_branch2b (BatchNormalizatio (None, 15, 15, 64) 256 res2b_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_9 (Activation) (None, 15, 15, 64) 0 bn2b_branch2b[0][0] \n____________________________________________________________________________________________________\nres2b_branch2c (Conv2D) (None, 15, 15, 256) 16640 activation_9[0][0] \n____________________________________________________________________________________________________\nbn2b_branch2c (BatchNormalizatio (None, 15, 15, 256) 1024 res2b_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_3 (Add) (None, 15, 15, 256) 0 bn2b_branch2c[0][0] \n activation_7[0][0] \n____________________________________________________________________________________________________\nactivation_10 (Activation) (None, 15, 15, 256) 0 add_3[0][0] \n____________________________________________________________________________________________________\nres2c_branch2a (Conv2D) (None, 15, 15, 64) 16448 activation_10[0][0] \n____________________________________________________________________________________________________\nbn2c_branch2a (BatchNormalizatio (None, 15, 15, 64) 256 res2c_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_11 (Activation) (None, 15, 15, 64) 0 bn2c_branch2a[0][0] \n____________________________________________________________________________________________________\nres2c_branch2b (Conv2D) (None, 15, 15, 64) 36928 activation_11[0][0] \n____________________________________________________________________________________________________\nbn2c_branch2b (BatchNormalizatio (None, 15, 15, 64) 256 res2c_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_12 (Activation) (None, 15, 15, 64) 0 bn2c_branch2b[0][0] \n____________________________________________________________________________________________________\nres2c_branch2c (Conv2D) (None, 15, 15, 256) 16640 activation_12[0][0] \n____________________________________________________________________________________________________\nbn2c_branch2c (BatchNormalizatio (None, 15, 15, 256) 1024 res2c_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_4 (Add) (None, 15, 15, 256) 0 bn2c_branch2c[0][0] \n activation_10[0][0] \n____________________________________________________________________________________________________\nactivation_13 (Activation) (None, 15, 15, 256) 0 add_4[0][0] \n____________________________________________________________________________________________________\nres3a_branch2a (Conv2D) (None, 8, 8, 128) 32896 activation_13[0][0] \n____________________________________________________________________________________________________\nbn3a_branch2a (BatchNormalizatio (None, 8, 8, 128) 512 res3a_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_14 (Activation) (None, 8, 8, 128) 0 bn3a_branch2a[0][0] \n____________________________________________________________________________________________________\nres3a_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_14[0][0] \n____________________________________________________________________________________________________\nbn3a_branch2b (BatchNormalizatio (None, 8, 8, 128) 512 res3a_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_15 (Activation) (None, 8, 8, 128) 0 bn3a_branch2b[0][0] \n____________________________________________________________________________________________________\nres3a_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_15[0][0] \n____________________________________________________________________________________________________\nres3a_branch1 (Conv2D) (None, 8, 8, 512) 131584 activation_13[0][0] \n____________________________________________________________________________________________________\nbn3a_branch2c (BatchNormalizatio (None, 8, 8, 512) 2048 res3a_branch2c[0][0] \n____________________________________________________________________________________________________\nbn3a_branch1 (BatchNormalization (None, 8, 8, 512) 2048 res3a_branch1[0][0] \n____________________________________________________________________________________________________\nadd_5 (Add) (None, 8, 8, 512) 0 bn3a_branch2c[0][0] \n bn3a_branch1[0][0] \n____________________________________________________________________________________________________\nactivation_16 (Activation) (None, 8, 8, 512) 0 add_5[0][0] \n____________________________________________________________________________________________________\nres3b_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_16[0][0] \n____________________________________________________________________________________________________\nbn3b_branch2a (BatchNormalizatio (None, 8, 8, 128) 512 res3b_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_17 (Activation) (None, 8, 8, 128) 0 bn3b_branch2a[0][0] \n____________________________________________________________________________________________________\nres3b_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_17[0][0] \n____________________________________________________________________________________________________\nbn3b_branch2b (BatchNormalizatio (None, 8, 8, 128) 512 res3b_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_18 (Activation) (None, 8, 8, 128) 0 bn3b_branch2b[0][0] \n____________________________________________________________________________________________________\nres3b_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_18[0][0] \n____________________________________________________________________________________________________\nbn3b_branch2c (BatchNormalizatio (None, 8, 8, 512) 2048 res3b_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_6 (Add) (None, 8, 8, 512) 0 bn3b_branch2c[0][0] \n activation_16[0][0] \n____________________________________________________________________________________________________\nactivation_19 (Activation) (None, 8, 8, 512) 0 add_6[0][0] \n____________________________________________________________________________________________________\nres3c_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_19[0][0] \n____________________________________________________________________________________________________\nbn3c_branch2a (BatchNormalizatio (None, 8, 8, 128) 512 res3c_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_20 (Activation) (None, 8, 8, 128) 0 bn3c_branch2a[0][0] \n____________________________________________________________________________________________________\nres3c_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_20[0][0] \n____________________________________________________________________________________________________\nbn3c_branch2b (BatchNormalizatio (None, 8, 8, 128) 512 res3c_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_21 (Activation) (None, 8, 8, 128) 0 bn3c_branch2b[0][0] \n____________________________________________________________________________________________________\nres3c_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_21[0][0] \n____________________________________________________________________________________________________\nbn3c_branch2c (BatchNormalizatio (None, 8, 8, 512) 2048 res3c_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_7 (Add) (None, 8, 8, 512) 0 bn3c_branch2c[0][0] \n activation_19[0][0] \n____________________________________________________________________________________________________\nactivation_22 (Activation) (None, 8, 8, 512) 0 add_7[0][0] \n____________________________________________________________________________________________________\nres3d_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_22[0][0] \n____________________________________________________________________________________________________\nbn3d_branch2a (BatchNormalizatio (None, 8, 8, 128) 512 res3d_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_23 (Activation) (None, 8, 8, 128) 0 bn3d_branch2a[0][0] \n____________________________________________________________________________________________________\nres3d_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_23[0][0] \n____________________________________________________________________________________________________\nbn3d_branch2b (BatchNormalizatio (None, 8, 8, 128) 512 res3d_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_24 (Activation) (None, 8, 8, 128) 0 bn3d_branch2b[0][0] \n____________________________________________________________________________________________________\nres3d_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_24[0][0] \n____________________________________________________________________________________________________\nbn3d_branch2c (BatchNormalizatio (None, 8, 8, 512) 2048 res3d_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_8 (Add) (None, 8, 8, 512) 0 bn3d_branch2c[0][0] \n activation_22[0][0] \n____________________________________________________________________________________________________\nactivation_25 (Activation) (None, 8, 8, 512) 0 add_8[0][0] \n____________________________________________________________________________________________________\nres4a_branch2a (Conv2D) (None, 4, 4, 256) 131328 activation_25[0][0] \n____________________________________________________________________________________________________\nbn4a_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4a_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_26 (Activation) (None, 4, 4, 256) 0 bn4a_branch2a[0][0] \n____________________________________________________________________________________________________\nres4a_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_26[0][0] \n____________________________________________________________________________________________________\nbn4a_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4a_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_27 (Activation) (None, 4, 4, 256) 0 bn4a_branch2b[0][0] \n____________________________________________________________________________________________________\nres4a_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_27[0][0] \n____________________________________________________________________________________________________\nres4a_branch1 (Conv2D) (None, 4, 4, 1024) 525312 activation_25[0][0] \n____________________________________________________________________________________________________\nbn4a_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4a_branch2c[0][0] \n____________________________________________________________________________________________________\nbn4a_branch1 (BatchNormalization (None, 4, 4, 1024) 4096 res4a_branch1[0][0] \n____________________________________________________________________________________________________\nadd_9 (Add) (None, 4, 4, 1024) 0 bn4a_branch2c[0][0] \n bn4a_branch1[0][0] \n____________________________________________________________________________________________________\nactivation_28 (Activation) (None, 4, 4, 1024) 0 add_9[0][0] \n____________________________________________________________________________________________________\nres4b_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_28[0][0] \n____________________________________________________________________________________________________\nbn4b_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4b_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_29 (Activation) (None, 4, 4, 256) 0 bn4b_branch2a[0][0] \n____________________________________________________________________________________________________\nres4b_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_29[0][0] \n____________________________________________________________________________________________________\nbn4b_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4b_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_30 (Activation) (None, 4, 4, 256) 0 bn4b_branch2b[0][0] \n____________________________________________________________________________________________________\nres4b_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_30[0][0] \n____________________________________________________________________________________________________\nbn4b_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4b_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_10 (Add) (None, 4, 4, 1024) 0 bn4b_branch2c[0][0] \n activation_28[0][0] \n____________________________________________________________________________________________________\nactivation_31 (Activation) (None, 4, 4, 1024) 0 add_10[0][0] \n____________________________________________________________________________________________________\nres4c_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_31[0][0] \n____________________________________________________________________________________________________\nbn4c_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4c_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_32 (Activation) (None, 4, 4, 256) 0 bn4c_branch2a[0][0] \n____________________________________________________________________________________________________\nres4c_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_32[0][0] \n____________________________________________________________________________________________________\nbn4c_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4c_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_33 (Activation) (None, 4, 4, 256) 0 bn4c_branch2b[0][0] \n____________________________________________________________________________________________________\nres4c_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_33[0][0] \n____________________________________________________________________________________________________\nbn4c_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4c_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_11 (Add) (None, 4, 4, 1024) 0 bn4c_branch2c[0][0] \n activation_31[0][0] \n____________________________________________________________________________________________________\nactivation_34 (Activation) (None, 4, 4, 1024) 0 add_11[0][0] \n____________________________________________________________________________________________________\nres4d_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_34[0][0] \n____________________________________________________________________________________________________\nbn4d_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4d_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_35 (Activation) (None, 4, 4, 256) 0 bn4d_branch2a[0][0] \n____________________________________________________________________________________________________\nres4d_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_35[0][0] \n____________________________________________________________________________________________________\nbn4d_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4d_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_36 (Activation) (None, 4, 4, 256) 0 bn4d_branch2b[0][0] \n____________________________________________________________________________________________________\nres4d_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_36[0][0] \n____________________________________________________________________________________________________\nbn4d_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4d_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_12 (Add) (None, 4, 4, 1024) 0 bn4d_branch2c[0][0] \n activation_34[0][0] \n____________________________________________________________________________________________________\nactivation_37 (Activation) (None, 4, 4, 1024) 0 add_12[0][0] \n____________________________________________________________________________________________________\nres4e_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_37[0][0] \n____________________________________________________________________________________________________\nbn4e_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4e_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_38 (Activation) (None, 4, 4, 256) 0 bn4e_branch2a[0][0] \n____________________________________________________________________________________________________\nres4e_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_38[0][0] \n____________________________________________________________________________________________________\nbn4e_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4e_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_39 (Activation) (None, 4, 4, 256) 0 bn4e_branch2b[0][0] \n____________________________________________________________________________________________________\nres4e_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_39[0][0] \n____________________________________________________________________________________________________\nbn4e_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4e_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_13 (Add) (None, 4, 4, 1024) 0 bn4e_branch2c[0][0] \n activation_37[0][0] \n____________________________________________________________________________________________________\nactivation_40 (Activation) (None, 4, 4, 1024) 0 add_13[0][0] \n____________________________________________________________________________________________________\nres4f_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_40[0][0] \n____________________________________________________________________________________________________\nbn4f_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4f_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_41 (Activation) (None, 4, 4, 256) 0 bn4f_branch2a[0][0] \n____________________________________________________________________________________________________\nres4f_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_41[0][0] \n____________________________________________________________________________________________________\nbn4f_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4f_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_42 (Activation) (None, 4, 4, 256) 0 bn4f_branch2b[0][0] \n____________________________________________________________________________________________________\nres4f_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_42[0][0] \n____________________________________________________________________________________________________\nbn4f_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4f_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_14 (Add) (None, 4, 4, 1024) 0 bn4f_branch2c[0][0] \n activation_40[0][0] \n____________________________________________________________________________________________________\nactivation_43 (Activation) (None, 4, 4, 1024) 0 add_14[0][0] \n____________________________________________________________________________________________________\nres5a_branch2a (Conv2D) (None, 2, 2, 512) 524800 activation_43[0][0] \n____________________________________________________________________________________________________\nbn5a_branch2a (BatchNormalizatio (None, 2, 2, 512) 2048 res5a_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_44 (Activation) (None, 2, 2, 512) 0 bn5a_branch2a[0][0] \n____________________________________________________________________________________________________\nres5a_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_44[0][0] \n____________________________________________________________________________________________________\nbn5a_branch2b (BatchNormalizatio (None, 2, 2, 512) 2048 res5a_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_45 (Activation) (None, 2, 2, 512) 0 bn5a_branch2b[0][0] \n____________________________________________________________________________________________________\nres5a_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_45[0][0] \n____________________________________________________________________________________________________\nres5a_branch1 (Conv2D) (None, 2, 2, 2048) 2099200 activation_43[0][0] \n____________________________________________________________________________________________________\nbn5a_branch2c (BatchNormalizatio (None, 2, 2, 2048) 8192 res5a_branch2c[0][0] \n____________________________________________________________________________________________________\nbn5a_branch1 (BatchNormalization (None, 2, 2, 2048) 8192 res5a_branch1[0][0] \n____________________________________________________________________________________________________\nadd_15 (Add) (None, 2, 2, 2048) 0 bn5a_branch2c[0][0] \n bn5a_branch1[0][0] \n____________________________________________________________________________________________________\nactivation_46 (Activation) (None, 2, 2, 2048) 0 add_15[0][0] \n____________________________________________________________________________________________________\nres5b_branch2a (Conv2D) (None, 2, 2, 512) 1049088 activation_46[0][0] \n____________________________________________________________________________________________________\nbn5b_branch2a (BatchNormalizatio (None, 2, 2, 512) 2048 res5b_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_47 (Activation) (None, 2, 2, 512) 0 bn5b_branch2a[0][0] \n____________________________________________________________________________________________________\nres5b_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_47[0][0] \n____________________________________________________________________________________________________\nbn5b_branch2b (BatchNormalizatio (None, 2, 2, 512) 2048 res5b_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_48 (Activation) (None, 2, 2, 512) 0 bn5b_branch2b[0][0] \n____________________________________________________________________________________________________\nres5b_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_48[0][0] \n____________________________________________________________________________________________________\nbn5b_branch2c (BatchNormalizatio (None, 2, 2, 2048) 8192 res5b_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_16 (Add) (None, 2, 2, 2048) 0 bn5b_branch2c[0][0] \n activation_46[0][0] \n____________________________________________________________________________________________________\nactivation_49 (Activation) (None, 2, 2, 2048) 0 add_16[0][0] \n____________________________________________________________________________________________________\nres5c_branch2a (Conv2D) (None, 2, 2, 512) 1049088 activation_49[0][0] \n____________________________________________________________________________________________________\nbn5c_branch2a (BatchNormalizatio (None, 2, 2, 512) 2048 res5c_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_50 (Activation) (None, 2, 2, 512) 0 bn5c_branch2a[0][0] \n____________________________________________________________________________________________________\nres5c_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_50[0][0] \n____________________________________________________________________________________________________\nbn5c_branch2b (BatchNormalizatio (None, 2, 2, 512) 2048 res5c_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_51 (Activation) (None, 2, 2, 512) 0 bn5c_branch2b[0][0] \n____________________________________________________________________________________________________\nres5c_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_51[0][0] \n____________________________________________________________________________________________________\nbn5c_branch2c (BatchNormalizatio (None, 2, 2, 2048) 8192 res5c_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_17 (Add) (None, 2, 2, 2048) 0 bn5c_branch2c[0][0] \n activation_49[0][0] \n____________________________________________________________________________________________________\nactivation_52 (Activation) (None, 2, 2, 2048) 0 add_17[0][0] \n____________________________________________________________________________________________________\navg_pool (AveragePooling2D) (None, 1, 1, 2048) 0 activation_52[0][0] \n____________________________________________________________________________________________________\nflatten_1 (Flatten) (None, 2048) 0 avg_pool[0][0] \n____________________________________________________________________________________________________\nfc6 (Dense) (None, 6) 12294 flatten_1[0][0] \n====================================================================================================\nTotal params: 23,600,006\nTrainable params: 23,546,886\nNon-trainable params: 53,120\n____________________________________________________________________________________________________\n"
]
],
[
[
"Finally, run the code below to visualize your ResNet50. You can also download a .png picture of your model by going to \"File -> Open...-> model.png\".",
"_____no_output_____"
]
],
[
[
"plot_model(model, to_file='model.png')\nSVG(model_to_dot(model).create(prog='dot', format='svg'))",
"_____no_output_____"
]
],
[
[
"<font color='blue'>\n**What you should remember:**\n- Very deep \"plain\" networks don't work in practice because they are hard to train due to vanishing gradients. \n- The skip-connections help to address the Vanishing Gradient problem. They also make it easy for a ResNet block to learn an identity function. \n- There are two main type of blocks: The identity block and the convolutional block. \n- Very deep Residual Networks are built by stacking these blocks together.",
"_____no_output_____"
],
[
"### References \n\nThis notebook presents the ResNet algorithm due to He et al. (2015). The implementation here also took significant inspiration and follows the structure given in the github repository of Francois Chollet: \n\n- Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun - [Deep Residual Learning for Image Recognition (2015)](https://arxiv.org/abs/1512.03385)\n- Francois Chollet's github repository: https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a0ee0b4c80ab59e5d753c22df7f25bf275f382c
| 11,426 |
ipynb
|
Jupyter Notebook
|
notebooks/road_following/live_demo_trt.ipynb
|
lohitslohit/Jetson
|
6b4dca2efc8df070d00b83d087ad1d2050439b9e
|
[
"MIT"
] | null | null | null |
notebooks/road_following/live_demo_trt.ipynb
|
lohitslohit/Jetson
|
6b4dca2efc8df070d00b83d087ad1d2050439b9e
|
[
"MIT"
] | null | null | null |
notebooks/road_following/live_demo_trt.ipynb
|
lohitslohit/Jetson
|
6b4dca2efc8df070d00b83d087ad1d2050439b9e
|
[
"MIT"
] | null | null | null | 31.916201 | 289 | 0.587082 |
[
[
[
"# Road Following - Live demo (TensorRT)",
"_____no_output_____"
],
[
"In this notebook, we will use model we trained to move JetBot smoothly on track. ",
"_____no_output_____"
],
[
"# TensorRT",
"_____no_output_____"
]
],
[
[
"import torch\ndevice = torch.device('cuda')",
"_____no_output_____"
]
],
[
[
"Load the TRT optimized model by executing the cell below",
"_____no_output_____"
]
],
[
[
"import torch\nfrom torch2trt import TRTModule\n\nmodel_trt = TRTModule()\nmodel_trt.load_state_dict(torch.load('best_steering_model_xy_trt.pth'))",
"_____no_output_____"
]
],
[
[
"### Creating the Pre-Processing Function",
"_____no_output_____"
],
[
"We have now loaded our model, but there's a slight issue. The format that we trained our model doesn't exactly match the format of the camera. To do that, we need to do some preprocessing. This involves the following steps:\n\n1. Convert from HWC layout to CHW layout\n2. Normalize using same parameters as we did during training (our camera provides values in [0, 255] range and training loaded images in [0, 1] range so we need to scale by 255.0\n3. Transfer the data from CPU memory to GPU memory\n4. Add a batch dimension",
"_____no_output_____"
]
],
[
[
"import torchvision.transforms as transforms\nimport torch.nn.functional as F\nimport cv2\nimport PIL.Image\nimport numpy as np\n\nmean = torch.Tensor([0.485, 0.456, 0.406]).cuda().half()\nstd = torch.Tensor([0.229, 0.224, 0.225]).cuda().half()\n\ndef preprocess(image):\n image = PIL.Image.fromarray(image)\n image = transforms.functional.to_tensor(image).to(device).half()\n image.sub_(mean[:, None, None]).div_(std[:, None, None])\n return image[None, ...]",
"_____no_output_____"
]
],
[
[
"Awesome! We've now defined our pre-processing function which can convert images from the camera format to the neural network input format.\n\nNow, let's start and display our camera. You should be pretty familiar with this by now. ",
"_____no_output_____"
]
],
[
[
"from IPython.display import display\nimport ipywidgets\nimport traitlets\nfrom jetbot import Camera, bgr8_to_jpeg\n\ncamera = Camera()\n",
"_____no_output_____"
],
[
"image_widget = ipywidgets.Image()\n\ntraitlets.dlink((camera, 'value'), (image_widget, 'value'), transform=bgr8_to_jpeg)\n\ndisplay(image_widget)",
"_____no_output_____"
]
],
[
[
"We'll also create our robot instance which we'll need to drive the motors.",
"_____no_output_____"
]
],
[
[
"from jetbot import Robot\n\nrobot = Robot()",
"_____no_output_____"
]
],
[
[
"Now, we will define sliders to control JetBot\n> Note: We have initialize the slider values for best known configurations, however these might not work for your dataset, therefore please increase or decrease the sliders according to your setup and environment\n\n1. Speed Control (speed_gain_slider): To start your JetBot increase ``speed_gain_slider`` \n2. Steering Gain Control (steering_gain_slider): If you see JetBot is wobbling, you need to reduce ``steering_gain_slider`` till it is smooth\n3. Steering Bias control (steering_bias_slider): If you see JetBot is biased towards extreme right or extreme left side of the track, you should control this slider till JetBot start following line or track in the center. This accounts for motor biases as well as camera offsets\n\n> Note: You should play around above mentioned sliders with lower speed to get smooth JetBot road following behavior.",
"_____no_output_____"
]
],
[
[
"speed_gain_slider = ipywidgets.FloatSlider(min=0.0, max=1.0, step=0.01, description='speed gain')\nsteering_gain_slider = ipywidgets.FloatSlider(min=0.0, max=1.0, step=0.01, value=0.2, description='steering gain')\nsteering_dgain_slider = ipywidgets.FloatSlider(min=0.0, max=0.5, step=0.001, value=0.0, description='steering kd')\nsteering_bias_slider = ipywidgets.FloatSlider(min=-0.3, max=0.3, step=0.01, value=0.0, description='steering bias')\n\ndisplay(speed_gain_slider, steering_gain_slider, steering_dgain_slider, steering_bias_slider)",
"_____no_output_____"
]
],
[
[
"Next, let's display some sliders that will let us see what JetBot is thinking. The x and y sliders will display the predicted x, y values.\n\nThe steering slider will display our estimated steering value. Please remember, this value isn't the actual angle of the target, but simply a value that is\nnearly proportional. When the actual angle is ``0``, this will be zero, and it will increase / decrease with the actual angle. ",
"_____no_output_____"
]
],
[
[
"x_slider = ipywidgets.FloatSlider(min=-1.0, max=1.0, description='x')\ny_slider = ipywidgets.FloatSlider(min=0, max=1.0, orientation='vertical', description='y')\nsteering_slider = ipywidgets.FloatSlider(min=-1.0, max=1.0, description='steering')\nspeed_slider = ipywidgets.FloatSlider(min=0, max=1.0, orientation='vertical', description='speed')\n\ndisplay(ipywidgets.HBox([y_slider, speed_slider]))\ndisplay(x_slider, steering_slider)",
"_____no_output_____"
]
],
[
[
"Next, we'll create a function that will get called whenever the camera's value changes. This function will do the following steps\n\n1. Pre-process the camera image\n2. Execute the neural network\n3. Compute the approximate steering value\n4. Control the motors using proportional / derivative control (PD)",
"_____no_output_____"
]
],
[
[
"angle = 0.0\nangle_last = 0.0\n\ndef execute(change):\n global angle, angle_last\n image = change['new']\n xy = model_trt(preprocess(image)).detach().float().cpu().numpy().flatten()\n x = xy[0]\n y = (0.5 - xy[1]) / 2.0\n \n x_slider.value = x\n y_slider.value = y\n \n speed_slider.value = speed_gain_slider.value\n \n angle = np.arctan2(x, y)\n pid = angle * steering_gain_slider.value + (angle - angle_last) * steering_dgain_slider.value\n angle_last = angle\n \n steering_slider.value = pid + steering_bias_slider.value\n \n robot.left_motor.value = max(min(speed_slider.value + steering_slider.value, 1.0), 0.0)\n robot.right_motor.value = max(min(speed_slider.value - steering_slider.value, 1.0), 0.0)\n \nexecute({'new': camera.value})",
"_____no_output_____"
]
],
[
[
"Cool! We've created our neural network execution function, but now we need to attach it to the camera for processing.\n\nWe accomplish that with the observe function.",
"_____no_output_____"
],
[
">WARNING: This code will move the robot!! Please make sure your robot has clearance and it is on Lego or Track you have collected data on. The road follower should work, but the neural network is only as good as the data it's trained on!",
"_____no_output_____"
]
],
[
[
"camera.observe(execute, names='value')",
"_____no_output_____"
]
],
[
[
"Awesome! If your robot is plugged in it should now be generating new commands with each new camera frame. \n\nYou can now place JetBot on Lego or Track you have collected data on and see whether it can follow track.\n\nIf you want to stop this behavior, you can unattach this callback by executing the code below.",
"_____no_output_____"
]
],
[
[
"import time\n\ncamera.unobserve(execute, names='value')\n\ntime.sleep(0.1) # add a small sleep to make sure frames have finished processing\n\nrobot.stop()",
"_____no_output_____"
]
],
[
[
"Again, let's close the camera conneciton properly so that we can use the camera in other notebooks.",
"_____no_output_____"
]
],
[
[
"camera.stop()",
"_____no_output_____"
]
],
[
[
"### Conclusion\nThat's it for this live demo! Hopefully you had some fun seeing your JetBot moving smoothly on track following the road!!!\n\nIf your JetBot wasn't following road very well, try to spot where it fails. The beauty is that we can collect more data for these failure scenarios and the JetBot should get even better :)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a0ee91abaf2b5019382d25abd5e7c01fb9c9be5
| 591,680 |
ipynb
|
Jupyter Notebook
|
wechat_tool_py3_local/lesson_5_py3_local.ipynb
|
telescopeuser/workshop_blog
|
3680173f5f22c564700f24a8def2a231cf1a0ccc
|
[
"MIT"
] | 226 |
2017-04-23T07:37:25.000Z
|
2022-01-06T05:22:02.000Z
|
wechat_tool_py3_local/lesson_5_py3_local.ipynb
|
telescopeuser/workshop_blog
|
3680173f5f22c564700f24a8def2a231cf1a0ccc
|
[
"MIT"
] | 4 |
2017-07-27T13:47:02.000Z
|
2019-04-11T03:31:07.000Z
|
wechat_tool_py3_local/lesson_5_py3_local.ipynb
|
telescopeuser/workshop_blog
|
3680173f5f22c564700f24a8def2a231cf1a0ccc
|
[
"MIT"
] | 71 |
2017-04-22T12:19:33.000Z
|
2021-03-07T10:19:56.000Z
| 306.252588 | 467,203 | 0.894 |
[
[
[
"<img src='https://www.iss.nus.edu.sg/Sitefinity/WebsiteTemplates/ISS/App_Themes/ISS/Images/branding-iss.png' width=15% style=\"float: right;\">\n<img src='https://www.iss.nus.edu.sg/Sitefinity/WebsiteTemplates/ISS/App_Themes/ISS/Images/branding-nus.png' width=15% style=\"float: right;\">\n",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
]
],
[
[
"import IPython.display\nIPython.display.YouTubeVideo('leVZjVahdKs')",
"_____no_output_____"
]
],
[
[
"# 如何使用和开发微信聊天机器人的系列教程\n# A workshop to develop & use an intelligent and interactive chat-bot in WeChat",
"_____no_output_____"
],
[
"### WeChat is a popular social media app, which has more than 800 million monthly active users.\n\n<img src='https://www.iss.nus.edu.sg/images/default-source/About-Us/7.6.1-teaching-staff/sam-website.tmb-.png' width=8% style=\"float: right;\">\n<img src='reference/WeChat_SamGu_QR.png' width=10% style=\"float: right;\">\n\n\nby: GU Zhan (Sam)\n\n\nOctober 2018 : Update to support Python 3 in local machine, e.g. iss-vm.\n\n\nApril 2017 ======= Scan the QR code to become trainer's friend in WeChat =====>>",
"_____no_output_____"
],
[
"### 第五课:视频识别和处理\n### Lesson 5: Video Recognition & Processing\n\n* 识别视频消息中的物体名字 (Label Detection: Detect entities within the video, such as \"dog\", \"flower\" or \"car\")\n* 识别视频的场景片段 (Shot Change Detection: Detect scene changes within the video)\n* 识别受限内容 (Explicit Content Detection: Detect adult content within a video)\n* 生成视频字幕 (Video Transcription BETA: Transcribes video content in English)\n",
"_____no_output_____"
],
[
"### Using Google Cloud Platform's Machine Learning APIs",
"_____no_output_____"
],
[
"From the same API console, choose \"Dashboard\" on the left-hand menu and \"Enable API\".\n\nEnable the following APIs for your project (search for them) if they are not already enabled:\n<ol>\n**<li> Google Cloud Video Intelligence API </li>**\n</ol>\n\nFinally, because we are calling the APIs from Python (clients in many other languages are available), let's install the Python package (it's not installed by default on Datalab)",
"_____no_output_____"
]
],
[
[
"# Copyright 2016 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); \n# !pip install --upgrade google-api-python-client",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"### 短片预览 / Video viewing",
"_____no_output_____"
]
],
[
[
"# 多媒体文件的二进制base64码转换 (Define media pre-processing functions)\n\n# Import the base64 encoding library.\nimport base64, io, sys, IPython.display\n\n# Python 2\nif sys.version_info[0] < 3:\n import urllib2\n# Python 3\nelse:\n import urllib.request\n\n# Pass the media data to an encoding function.\ndef encode_media(media_file):\n with io.open(media_file, \"rb\") as media_file:\n media_content = media_file.read()\n# Python 2\n if sys.version_info[0] < 3:\n return base64.b64encode(media_content).decode('ascii')\n# Python 3\n else:\n return base64.b64encode(media_content).decode('utf-8')",
"_____no_output_____"
],
[
"video_file = 'reference/video_IPA.mp4'\n# video_file = 'reference/SampleVideo_360x240_1mb.mp4'\n# video_file = 'reference/SampleVideo_360x240_2mb.mp4'",
"_____no_output_____"
],
[
"IPython.display.HTML(data=\n '''<video alt=\"test\" controls><source src=\"data:video/mp4;base64,{0}\" type=\"video/mp4\" /></video>'''\n .format(encode_media(video_file)))",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"## <span style=\"color:blue\">Install the client library</span> for Video Intelligence / Processing",
"_____no_output_____"
]
],
[
[
"!pip install --upgrade google-cloud-videointelligence",
"Requirement already up-to-date: google-cloud-videointelligence in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (1.5.0)\nRequirement already satisfied, skipping upgrade: google-api-core[grpc]<2.0.0dev,>=0.1.0 in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (from google-cloud-videointelligence) (1.5.0)\nRequirement already satisfied, skipping upgrade: protobuf>=3.4.0 in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (from google-api-core[grpc]<2.0.0dev,>=0.1.0->google-cloud-videointelligence) (3.6.1)\nRequirement already satisfied, skipping upgrade: pytz in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (from google-api-core[grpc]<2.0.0dev,>=0.1.0->google-cloud-videointelligence) (2018.3)\nRequirement already satisfied, skipping upgrade: google-auth<2.0.0dev,>=0.4.0 in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (from google-api-core[grpc]<2.0.0dev,>=0.1.0->google-cloud-videointelligence) (1.5.1)\nRequirement already satisfied, skipping upgrade: googleapis-common-protos<2.0dev,>=1.5.3 in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (from google-api-core[grpc]<2.0.0dev,>=0.1.0->google-cloud-videointelligence) (1.5.3)\nRequirement already satisfied, skipping upgrade: setuptools>=34.0.0 in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (from google-api-core[grpc]<2.0.0dev,>=0.1.0->google-cloud-videointelligence) (38.5.1)\nRequirement already satisfied, skipping upgrade: requests<3.0.0dev,>=2.18.0 in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (from google-api-core[grpc]<2.0.0dev,>=0.1.0->google-cloud-videointelligence) (2.18.4)\nRequirement already satisfied, skipping upgrade: six>=1.10.0 in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (from google-api-core[grpc]<2.0.0dev,>=0.1.0->google-cloud-videointelligence) (1.11.0)\nRequirement already satisfied, skipping upgrade: grpcio>=1.8.2; extra == \"grpc\" in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (from google-api-core[grpc]<2.0.0dev,>=0.1.0->google-cloud-videointelligence) (1.10.0)\nRequirement already satisfied, skipping upgrade: cachetools>=2.0.0 in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (from google-auth<2.0.0dev,>=0.4.0->google-api-core[grpc]<2.0.0dev,>=0.1.0->google-cloud-videointelligence) (2.1.0)\nRequirement already satisfied, skipping upgrade: rsa>=3.1.4 in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (from google-auth<2.0.0dev,>=0.4.0->google-api-core[grpc]<2.0.0dev,>=0.1.0->google-cloud-videointelligence) (3.4.2)\nRequirement already satisfied, skipping upgrade: pyasn1-modules>=0.2.1 in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (from google-auth<2.0.0dev,>=0.4.0->google-api-core[grpc]<2.0.0dev,>=0.1.0->google-cloud-videointelligence) (0.2.1)\nRequirement already satisfied, skipping upgrade: chardet<3.1.0,>=3.0.2 in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<2.0.0dev,>=0.1.0->google-cloud-videointelligence) (3.0.4)\nRequirement already satisfied, skipping upgrade: idna<2.7,>=2.5 in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<2.0.0dev,>=0.1.0->google-cloud-videointelligence) (2.6)\nRequirement already satisfied, skipping upgrade: urllib3<1.23,>=1.21.1 in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<2.0.0dev,>=0.1.0->google-cloud-videointelligence) (1.22)\nRequirement already satisfied, skipping upgrade: certifi>=2017.4.17 in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (from requests<3.0.0dev,>=2.18.0->google-api-core[grpc]<2.0.0dev,>=0.1.0->google-cloud-videointelligence) (2018.8.13)\nRequirement already satisfied, skipping upgrade: pyasn1>=0.1.3 in /home/iss-user/anaconda3/envs/iss-env-py3/lib/python3.6/site-packages (from rsa>=3.1.4->google-auth<2.0.0dev,>=0.4.0->google-api-core[grpc]<2.0.0dev,>=0.1.0->google-cloud-videointelligence) (0.4.2)\n"
]
],
[
[
"---",
"_____no_output_____"
]
],
[
[
"# Imports the Google Cloud client library\nfrom google.cloud import videointelligence",
"_____no_output_____"
],
[
"# [Optional] Display location of service account API key if defined in GOOGLE_APPLICATION_CREDENTIALS\n!echo $GOOGLE_APPLICATION_CREDENTIALS",
"\r\n"
],
[
"\n##################################################################\n# (1) Instantiates a client - using GOOGLE_APPLICATION_CREDENTIALS\n# video_client = videointelligence.VideoIntelligenceServiceClient()\n\n# \n# (2) Instantiates a client - using 'service account json' file\nvideo_client = videointelligence.VideoIntelligenceServiceClient.from_service_account_json(\n \"/media/sf_vm_shared_folder/000-cloud-api-key/mtech-ai-7b7e049cf5f6.json\")\n##################################################################\n",
"_____no_output_____"
]
],
[
[
"### * 识别视频消息中的物体名字 (Label Detection: Detect entities within the video, such as \"dog\", \"flower\" or \"car\")\nhttps://cloud.google.com/video-intelligence/docs/analyze-labels\n\n\ndidi_video_label_detection()\n",
"_____no_output_____"
]
],
[
[
"from google.cloud import videointelligence\n\ndef didi_video_label_detection(path):\n \"\"\"Detect labels given a local file path. (Demo)\"\"\"\n \"\"\" Detects labels given a GCS path. (Exercise / Workshop Enhancement)\"\"\"\n\n##################################################################\n# (1) Instantiates a client - using GOOGLE_APPLICATION_CREDENTIALS\n# video_client = videointelligence.VideoIntelligenceServiceClient()\n\n# \n# (2) Instantiates a client - using 'service account json' file\n video_client = videointelligence.VideoIntelligenceServiceClient.from_service_account_json(\n \"/media/sf_vm_shared_folder/000-cloud-api-key/mtech-ai-7b7e049cf5f6.json\")\n##################################################################\n\n features = [videointelligence.enums.Feature.LABEL_DETECTION]\n\n with io.open(path, 'rb') as movie:\n input_content = movie.read()\n\n operation = video_client.annotate_video(\n features=features, input_content=input_content)\n print('\\nProcessing video for label annotations:')\n\n result = operation.result(timeout=90)\n print('\\nFinished processing.')\n\n # Process video/segment level label annotations\n segment_labels = result.annotation_results[0].segment_label_annotations\n for i, segment_label in enumerate(segment_labels):\n print('Video label description: {}'.format(\n segment_label.entity.description))\n for category_entity in segment_label.category_entities:\n print('\\tLabel category description: {}'.format(\n category_entity.description))\n\n for i, segment in enumerate(segment_label.segments):\n start_time = (segment.segment.start_time_offset.seconds +\n segment.segment.start_time_offset.nanos / 1e9)\n end_time = (segment.segment.end_time_offset.seconds +\n segment.segment.end_time_offset.nanos / 1e9)\n positions = '{}s to {}s'.format(start_time, end_time)\n confidence = segment.confidence\n print('\\tSegment {}: {}'.format(i, positions))\n print('\\tConfidence: {}'.format(confidence))\n print('\\n')\n\n # Process shot level label annotations\n shot_labels = result.annotation_results[0].shot_label_annotations\n for i, shot_label in enumerate(shot_labels):\n print('Shot label description: {}'.format(\n shot_label.entity.description))\n for category_entity in shot_label.category_entities:\n print('\\tLabel category description: {}'.format(\n category_entity.description))\n\n for i, shot in enumerate(shot_label.segments):\n start_time = (shot.segment.start_time_offset.seconds +\n shot.segment.start_time_offset.nanos / 1e9)\n end_time = (shot.segment.end_time_offset.seconds +\n shot.segment.end_time_offset.nanos / 1e9)\n positions = '{}s to {}s'.format(start_time, end_time)\n confidence = shot.confidence\n print('\\tSegment {}: {}'.format(i, positions))\n print('\\tConfidence: {}'.format(confidence))\n print('\\n')\n\n # Process frame level label annotations\n frame_labels = result.annotation_results[0].frame_label_annotations\n for i, frame_label in enumerate(frame_labels):\n print('Frame label description: {}'.format(\n frame_label.entity.description))\n for category_entity in frame_label.category_entities:\n print('\\tLabel category description: {}'.format(\n category_entity.description))\n\n # Each frame_label_annotation has many frames,\n # here we print information only about the first frame.\n frame = frame_label.frames[0]\n time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9\n print('\\tFirst frame time offset: {}s'.format(time_offset))\n print('\\tFirst frame confidence: {}'.format(frame.confidence))\n print('\\n')\n \n return segment_labels, shot_labels, frame_labels",
"_____no_output_____"
],
[
"# video_file = 'reference/video_IPA.mp4'",
"_____no_output_____"
],
[
"didi_segment_labels, didi_shot_labels, didi_frame_labels = didi_video_label_detection(video_file)",
"\nProcessing video for label annotations:\n\nFinished processing.\nVideo label description: toy\n\tSegment 0: 0.0s to 5.5s\n\tConfidence: 0.9247158169746399\n\n\nVideo label description: lego\n\tLabel category description: toy\n\tSegment 0: 0.0s to 5.5s\n\tConfidence: 0.9257180094718933\n\n\nVideo label description: robot\n\tLabel category description: technology\n\tLabel category description: machine\n\tSegment 0: 0.0s to 5.5s\n\tConfidence: 0.32479360699653625\n\n\nShot label description: toy\n\tSegment 0: 0.0s to 5.5s\n\tConfidence: 0.9247158169746399\n\n\nShot label description: lego\n\tLabel category description: toy\n\tSegment 0: 0.0s to 5.5s\n\tConfidence: 0.9257180094718933\n\n\nShot label description: robot\n\tLabel category description: technology\n\tLabel category description: machine\n\tSegment 0: 0.0s to 5.5s\n\tConfidence: 0.32479360699653625\n\n\n"
],
[
"didi_segment_labels",
"_____no_output_____"
],
[
"didi_shot_labels",
"_____no_output_____"
],
[
"didi_frame_labels",
"_____no_output_____"
]
],
[
[
"### * 识别视频的场景片段 (Shot Change Detection: Detect scene changes within the video)\nhttps://cloud.google.com/video-intelligence/docs/shot_detection\n\n\ndidi_video_shot_detection()\n",
"_____no_output_____"
]
],
[
[
"from google.cloud import videointelligence\n\ndef didi_video_shot_detection(path):\n \"\"\" Detects camera shot changes given a local file path \"\"\"\n\n##################################################################\n# (1) Instantiates a client - using GOOGLE_APPLICATION_CREDENTIALS\n# video_client = videointelligence.VideoIntelligenceServiceClient()\n\n# \n# (2) Instantiates a client - using 'service account json' file\n video_client = videointelligence.VideoIntelligenceServiceClient.from_service_account_json(\n \"/media/sf_vm_shared_folder/000-cloud-api-key/mtech-ai-7b7e049cf5f6.json\")\n##################################################################\n\n features = [videointelligence.enums.Feature.SHOT_CHANGE_DETECTION]\n# features = [videointelligence.enums.Feature.LABEL_DETECTION]\n\n with io.open(path, 'rb') as movie:\n input_content = movie.read()\n \n# operation = video_client.annotate_video(path, features=features)\n operation = video_client.annotate_video(features=features, input_content=input_content)\n print('\\nProcessing video for shot change annotations:')\n\n result = operation.result(timeout=180)\n print('\\nFinished processing.')\n\n for i, shot in enumerate(result.annotation_results[0].shot_annotations):\n start_time = (shot.start_time_offset.seconds +\n shot.start_time_offset.nanos / 1e9)\n end_time = (shot.end_time_offset.seconds +\n shot.end_time_offset.nanos / 1e9)\n print('\\tShot {}: {} to {}'.format(i, start_time, end_time))\n \n return result\n",
"_____no_output_____"
],
[
"# video_file = 'reference/video_IPA.mp4'",
"_____no_output_____"
],
[
"didi_result = didi_video_shot_detection(video_file)",
"\nProcessing video for shot change annotations:\n\nFinished processing.\n\tShot 0: 0.0 to 5.5\n"
],
[
"didi_result",
"_____no_output_____"
]
],
[
[
"### * 识别受限内容 (Explicit Content Detection: Detect adult content within a video)\n\n\n\ndidi_video_safesearch_detection()\n",
"_____no_output_____"
]
],
[
[
"from google.cloud import videointelligence\n\ndef didi_video_safesearch_detection(path):\n \"\"\" Detects explicit content given a local file path. \"\"\"\n\n##################################################################\n# (1) Instantiates a client - using GOOGLE_APPLICATION_CREDENTIALS\n# video_client = videointelligence.VideoIntelligenceServiceClient()\n\n# \n# (2) Instantiates a client - using 'service account json' file\n video_client = videointelligence.VideoIntelligenceServiceClient.from_service_account_json(\n \"/media/sf_vm_shared_folder/000-cloud-api-key/mtech-ai-7b7e049cf5f6.json\")\n##################################################################\n\n features = [videointelligence.enums.Feature.EXPLICIT_CONTENT_DETECTION]\n\n with io.open(path, 'rb') as movie:\n input_content = movie.read()\n \n# operation = video_client.annotate_video(path, features=features)\n operation = video_client.annotate_video(features=features, input_content=input_content)\n print('\\nProcessing video for explicit content annotations:')\n\n result = operation.result(timeout=90)\n print('\\nFinished processing.')\n\n likely_string = (\"Unknown\", \"Very unlikely\", \"Unlikely\", \"Possible\",\n \"Likely\", \"Very likely\")\n\n # first result is retrieved because a single video was processed\n for frame in result.annotation_results[0].explicit_annotation.frames:\n frame_time = frame.time_offset.seconds + frame.time_offset.nanos / 1e9\n print('Time: {}s'.format(frame_time))\n print('\\tpornography: {}'.format(\n likely_string[frame.pornography_likelihood]))\n \n return result\n",
"_____no_output_____"
],
[
"# video_file = 'reference/video_IPA.mp4'",
"_____no_output_____"
],
[
"didi_result = didi_video_safesearch_detection(video_file)",
"\nProcessing video for explicit content annotations:\n\nFinished processing.\nTime: 0.070218s\n\tpornography: Very unlikely\nTime: 1.262424s\n\tpornography: Very unlikely\nTime: 2.265889s\n\tpornography: Very unlikely\nTime: 3.2359999999999998s\n\tpornography: Very unlikely\nTime: 4.288838s\n\tpornography: Very unlikely\nTime: 5.358866s\n\tpornography: Very unlikely\n"
]
],
[
[
"### <span style=\"color:red\">[ Beta Features ]</span> * 生成视频字幕 (Video Transcription BETA: Transcribes video content in English)\n\nhttps://cloud.google.com/video-intelligence/docs/beta\n\nCloud Video Intelligence API includes the following beta features in version v1p1beta1:\n\nSpeech Transcription - the Video Intelligence API can transcribe speech to text from the audio in supported video files. Learn more.\n",
"_____no_output_____"
]
],
[
[
"# Beta Features: videointelligence_v1p1beta1\nfrom google.cloud import videointelligence_v1p1beta1 as videointelligence\n\ndef didi_video_speech_transcription(path):\n \"\"\"Transcribe speech given a local file path.\"\"\"\n\n##################################################################\n# (1) Instantiates a client - using GOOGLE_APPLICATION_CREDENTIALS\n# video_client = videointelligence.VideoIntelligenceServiceClient()\n\n# \n# (2) Instantiates a client - using 'service account json' file\n video_client = videointelligence.VideoIntelligenceServiceClient.from_service_account_json(\n \"/media/sf_vm_shared_folder/000-cloud-api-key/mtech-ai-7b7e049cf5f6.json\")\n##################################################################\n\n features = [videointelligence.enums.Feature.SPEECH_TRANSCRIPTION]\n\n with io.open(path, 'rb') as movie:\n input_content = movie.read()\n \n config = videointelligence.types.SpeechTranscriptionConfig(\n language_code='en-US',\n enable_automatic_punctuation=True)\n video_context = videointelligence.types.VideoContext(\n speech_transcription_config=config)\n\n# operation = video_client.annotate_video(\n# input_uri, \n# features=features,\n# video_context=video_context)\n operation = video_client.annotate_video(\n features=features,\n input_content=input_content, \n video_context=video_context)\n\n print('\\nProcessing video for speech transcription.')\n\n result = operation.result(timeout=180) \n \n # There is only one annotation_result since only\n # one video is processed.\n annotation_results = result.annotation_results[0]\n speech_transcription = annotation_results.speech_transcriptions[0]\n \n if str(speech_transcription) == '': # result.annotation_results[0].speech_transcriptions[0] == ''\n print('\\nNOT FOUND: video for speech transcription.')\n else:\n alternative = speech_transcription.alternatives[0]\n print('Transcript: {}'.format(alternative.transcript))\n print('Confidence: {}\\n'.format(alternative.confidence))\n\n print('Word level information:')\n for word_info in alternative.words:\n word = word_info.word\n start_time = word_info.start_time\n end_time = word_info.end_time\n print('\\t{}s - {}s: {}'.format(\n start_time.seconds + start_time.nanos * 1e-9,\n end_time.seconds + end_time.nanos * 1e-9,\n word))\n\n return result\n ",
"_____no_output_____"
],
[
"# video_file = 'reference/video_IPA.mp4'",
"_____no_output_____"
],
[
"didi_result = didi_video_speech_transcription(video_file)",
"\nProcessing video for speech transcription.\nTranscript: Hi everyone. It's great to meet you in intelligent process automation course.\nConfidence: 0.8206615447998047\n\nWord level information:\n\t0.0s - 0.30000000000000004s: Hi\n\t0.30000000000000004s - 0.8s: everyone.\n\t0.8s - 1.6s: It's\n\t1.6s - 1.7000000000000002s: great\n\t1.7000000000000002s - 2.1s: to\n\t2.1s - 2.2s: meet\n\t2.2s - 2.5s: you\n\t2.5s - 2.6s: in\n\t2.6s - 3.5s: intelligent\n\t3.5s - 4.1s: process\n\t4.1s - 4.7s: automation\n\t4.7s - 5.4s: course.\n"
],
[
"didi_result",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"## <span style=\"color:blue\">Wrap cloud APIs into Functions() for conversational virtual assistant (VA):</span>\nReuse above defined Functions().",
"_____no_output_____"
]
],
[
[
"def didi_video_processing(video_file):\n didi_video_reply = u'[ Video 视频处理结果 ]\\n\\n'\n \n didi_video_reply += u'[ didi_video_label_detection 识别视频消息中的物体名字 ]\\n\\n' \\\n + str(didi_video_label_detection(video_file)) + u'\\n\\n'\n \n didi_video_reply += u'[ didi_video_shot_detection 识别视频的场景片段 ]\\n\\n' \\\n + str(didi_video_shot_detection(video_file)) + u'\\n\\n'\n \n didi_video_reply += u'[ didi_video_safesearch_detection 识别受限内容 ]\\n\\n' \\\n + str(didi_video_safesearch_detection(video_file)) + u'\\n\\n'\n \n didi_video_reply += u'[ didi_video_speech_transcription 生成视频字幕 ]\\n\\n' \\\n + str(didi_video_speech_transcription(video_file)) + u'\\n\\n'\n \n return didi_video_reply",
"_____no_output_____"
],
[
"# [Optional] Agile testing:\n# parm_video_response = didi_video_processing(video_file)\n# print(parm_video_response)",
"\nProcessing video for label annotations:\n\nFinished processing.\nVideo label description: lego\n\tLabel category description: toy\n\tSegment 0: 0.0s to 5.5s\n\tConfidence: 0.9257180094718933\n\n\nVideo label description: robot\n\tLabel category description: technology\n\tLabel category description: machine\n\tSegment 0: 0.0s to 5.5s\n\tConfidence: 0.32479360699653625\n\n\nVideo label description: toy\n\tSegment 0: 0.0s to 5.5s\n\tConfidence: 0.9247158169746399\n\n\nShot label description: lego\n\tLabel category description: toy\n\tSegment 0: 0.0s to 5.5s\n\tConfidence: 0.9257180094718933\n\n\nShot label description: robot\n\tLabel category description: technology\n\tLabel category description: machine\n\tSegment 0: 0.0s to 5.5s\n\tConfidence: 0.32479360699653625\n\n\nShot label description: toy\n\tSegment 0: 0.0s to 5.5s\n\tConfidence: 0.9247158169746399\n\n\n\nProcessing video for shot change annotations:\n\nFinished processing.\n\tShot 0: 0.0 to 5.5\n\nProcessing video for explicit content annotations:\n\nFinished processing.\nTime: 0.070218s\n\tpornography: Very unlikely\nTime: 1.262424s\n\tpornography: Very unlikely\nTime: 2.265889s\n\tpornography: Very unlikely\nTime: 3.2359999999999998s\n\tpornography: Very unlikely\nTime: 4.288838s\n\tpornography: Very unlikely\nTime: 5.358866s\n\tpornography: Very unlikely\n\nProcessing video for speech transcription.\nTranscript: Hi everyone. It's great to meet you in intelligent process automation course.\nConfidence: 0.8206615447998047\n\nWord level information:\n\t0.0s - 0.30000000000000004s: Hi\n\t0.30000000000000004s - 0.8s: everyone.\n\t0.8s - 1.6s: It's\n\t1.6s - 1.7000000000000002s: great\n\t1.7000000000000002s - 2.1s: to\n\t2.1s - 2.2s: meet\n\t2.2s - 2.5s: you\n\t2.5s - 2.6s: in\n\t2.6s - 3.5s: intelligent\n\t3.5s - 4.1s: process\n\t4.1s - 4.7s: automation\n\t4.7s - 5.4s: course.\n"
]
],
[
[
"**Define a global variable for future 'video search' function enhancement**",
"_____no_output_____"
]
],
[
[
"parm_video_response = {} # Define a global variable for future 'video search' function enhancement",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"## <span style=\"color:blue\">Start interactive conversational virtual assistant (VA):</span>",
"_____no_output_____"
],
[
"### Import ItChat, etc. 导入需要用到的一些功能程序库:",
"_____no_output_____"
]
],
[
[
"import itchat\nfrom itchat.content import *",
"_____no_output_____"
]
],
[
[
"### Log in using QR code image / 用微信App扫QR码图片来自动登录",
"_____no_output_____"
]
],
[
[
"# itchat.auto_login(hotReload=True) # hotReload=True: 退出程序后暂存登陆状态。即使程序关闭,一定时间内重新开启也可以不用重新扫码。\nitchat.auto_login(enableCmdQR=-2) # enableCmdQR=-2: 命令行显示QR图片",
"Getting uuid of QR code.\nDownloading QR code.\n"
],
[
"# @itchat.msg_register([VIDEO], isGroupChat=True)\[email protected]_register([VIDEO])\ndef download_files(msg):\n msg.download(msg.fileName)\n print('\\nDownloaded video file name is: %s' % msg['FileName'])\n \n ##############################################################################################################\n # call video analysis APIs #\n ##############################################################################################################\n global parm_video_response # save into global variable, which can be accessed by next WeChat keyword search\n \n # python 2 version WeChat Bot\n # parm_video_response = KudosData_VIDEO_DETECTION(encode_media(msg['FileName']))\n \n # python 3 version WeChat Bot\n parm_video_response = didi_video_processing(msg['FileName'])\n\n \n ##############################################################################################################\n # format video API results #\n ##############################################################################################################\n \n # python 2 version WeChat Bot\n # video_analysis_reply = KudosData_video_generate_reply(parm_video_response)\n\n # python 3 version WeChat Bot\n video_analysis_reply = parm_video_response # Exercise / Workshop Enhancement: To pase and format result nicely.\n \n \n print ('')\n print(video_analysis_reply)\n return video_analysis_reply",
"_____no_output_____"
],
[
"itchat.run()",
"Start auto replying.\n"
]
],
[
[
"---",
"_____no_output_____"
]
],
[
[
"# interupt kernel, then logout\nitchat.logout() # 安全退出",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"## <span style=\"color:blue\">Exercise / Workshop Enhancement:</span>\n",
"_____no_output_____"
],
[
"<font color='blue'>\n<font color='blue'>\n[提问 1] 使用文字来搜索视频内容?需要怎么处理? \n[Question 1] Can we use text (keywords) as input to search video content? How?\n</font>\n\n",
"_____no_output_____"
],
[
"<font color='blue'>\n<font color='blue'>\n[提问 2] 使用图片来搜索视频内容?需要怎么处理? \n[Question 2] Can we use an image as input to search video content? How?\n</font>",
"_____no_output_____"
]
],
[
[
"'''\n\n# Private conversational mode / 单聊模式,基于关键词进行视频搜索:\[email protected]_register([TEXT])\ndef text_reply(msg):\n# if msg['isAt']:\n list_keywords = [x.strip() for x in msg['Text'].split(',')]\n # call video search function:\n search_responses = KudosData_search(list_keywords) # return is a list\n # Format search results:\n search_reply = u'[ Video Search 视频搜索结果 ]' + '\\n'\n if len(search_responses) == 0:\n search_reply += u'[ Nill 无结果 ]'\n else:\n for i in range(len(search_responses)): search_reply += '\\n' + str(search_responses[i])\n print ('')\n print (search_reply)\n return search_reply\n \n '''",
"_____no_output_____"
],
[
"'''\n\n# Group conversational mode / 群聊模式,基于关键词进行视频搜索:\[email protected]_register([TEXT], isGroupChat=True)\ndef text_reply(msg):\n if msg['isAt']:\n list_keywords = [x.strip() for x in msg['Text'].split(',')]\n # call video search function:\n search_responses = KudosData_search(list_keywords) # return is a list\n # Format search results:\n search_reply = u'[ Video Search 视频搜索结果 ]' + '\\n'\n if len(search_responses) == 0:\n search_reply += u'[ Nill 无结果 ]'\n else:\n for i in range(len(search_responses)): search_reply += '\\n' + str(search_responses[i])\n print ('')\n print (search_reply)\n return search_reply\n \n '''",
"_____no_output_____"
]
],
[
[
"### 恭喜您!已经完成了:",
"_____no_output_____"
],
[
"### 第五课:视频识别和处理\n### Lesson 5: Video Recognition & Processing\n\n* 识别视频消息中的物体名字 (Label Detection: Detect entities within the video, such as \"dog\", \"flower\" or \"car\")\n* 识别视频的场景片段 (Shot Change Detection: Detect scene changes within the video)\n* 识别受限内容 (Explicit Content Detection: Detect adult content within a video)\n* 生成视频字幕 (Video Transcription BETA: Transcribes video content in English)\n",
"_____no_output_____"
],
[
"### 下一课是:",
"_____no_output_____"
],
[
"### 第六课:交互式虚拟助手的智能应用\n### Lesson 6: Interactive Conversatioinal Virtual Assistant Applications / Intelligent Process Automations\n* 虚拟员工: 贷款填表申请审批一条龙自动化流程 (Virtual Worker: When Chat-bot meets RPA-bot for mortgage loan application automation) \n* 虚拟员工: 文字指令交互(Conversational automation using text/message command) \n* 虚拟员工: 语音指令交互(Conversational automation using speech/voice command) \n* 虚拟员工: 多种语言交互(Conversational automation with multiple languages)",
"_____no_output_____"
],
[
"<img src='reference/WeChat_SamGu_QR.png' width=80% style=\"float: left;\">\n",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a0eee41ced1eb8a9d0c9dee7554bcfbfcf2e78c
| 78,143 |
ipynb
|
Jupyter Notebook
|
seq2seq_with_attention.ipynb
|
YanXuHappygela/NLP-study
|
6df0dd4a0d7d318d1222c5e93fa4e91a0e4eea38
|
[
"MIT"
] | 8 |
2020-07-18T19:14:12.000Z
|
2021-03-02T16:40:55.000Z
|
seq2seq_with_attention.ipynb
|
YanXuHappygela/NLP-study
|
6df0dd4a0d7d318d1222c5e93fa4e91a0e4eea38
|
[
"MIT"
] | null | null | null |
seq2seq_with_attention.ipynb
|
YanXuHappygela/NLP-study
|
6df0dd4a0d7d318d1222c5e93fa4e91a0e4eea38
|
[
"MIT"
] | 7 |
2020-07-18T19:37:01.000Z
|
2022-03-01T15:25:46.000Z
| 51.073856 | 23,310 | 0.640288 |
[
[
[
"# Sequence to Sequence attention model for machine translation\n\nThis notebook trains a sequence to sequence (seq2seq) model with two different attentions implemented for Spanish to English translation.\n\nThe codes are built on TensorFlow Core tutorials: https://www.tensorflow.org/tutorials/text/nmt_with_attention",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nprint(tf.__version__)\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nfrom sklearn.model_selection import train_test_split\n\nimport unicodedata\nimport re\nimport numpy as np\nimport os\nimport io\nimport time",
"2.2.0\n"
]
],
[
[
"\n\n\n# Load data set\n\n\n",
"_____no_output_____"
],
[
"* Clean the sentences by removing special characters.\n* Add a start and end token to each sentence.\n* Create a word index and reverse word index (dictionaries mapping from word → id and id → word).\n* Pad each sentence to a maximum length.",
"_____no_output_____"
]
],
[
[
"# Download the file\npath_to_zip = tf.keras.utils.get_file(\n 'spa-eng.zip', origin='http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip',\n extract=True)\n\npath_to_file = os.path.dirname(path_to_zip)+\"/spa-eng/spa.txt\"",
"Downloading data from http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip\n2646016/2638744 [==============================] - 0s 0us/step\n"
],
[
"# Converts the unicode file to ascii\ndef unicode_to_ascii(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn')\n\n\ndef preprocess_sentence(w):\n w = unicode_to_ascii(w.lower().strip())\n\n # creating a space between a word and the punctuation following it\n # eg: \"he is a boy.\" => \"he is a boy .\"\n # Reference:- https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation\n w = re.sub(r\"([?.!,¿])\", r\" \\1 \", w)\n w = re.sub(r'[\" \"]+', \" \", w)\n\n # replacing everything with space except (a-z, A-Z, \".\", \"?\", \"!\", \",\",\"¿\")\n w = re.sub(r\"[^a-zA-Z?.!,¿]+\", \" \", w)\n\n # remove extra space\n w = w.strip()\n\n # adding a start and an end token to the sentence\n # so that the model know when to start and stop predicting.\n w = '<start> ' + w + ' <end>'\n return w",
"_____no_output_____"
],
[
"en_sentence = u\"May I borrow this @ book?\"\nsp_sentence = u\"¿Puedo tomar prestado este libro?\"\nprint(preprocess_sentence(en_sentence))\nprint(preprocess_sentence(sp_sentence))\nprint(preprocess_sentence(sp_sentence).encode(\"UTF-8\"))",
"<start> may i borrow this book ? <end>\n<start> ¿ puedo tomar prestado este libro ? <end>\nb'<start> \\xc2\\xbf puedo tomar prestado este libro ? <end>'\n"
],
[
"# Return word pairs in the format: [ENGLISH, SPANISH]\ndef create_dataset(path, num_examples):\n lines = io.open(path, encoding='UTF-8').read().strip().split('\\n')\n word_pairs = [[preprocess_sentence(w) for w in l.split('\\t')] for l in lines[:num_examples]]\n return zip(*word_pairs)\n\nen, sp = create_dataset(path_to_file, None)\nprint(en[-1])\nprint(sp[-1])\nprint(len(en), len(sp))",
"<start> if you want to sound like a native speaker , you must be willing to practice saying the same sentence over and over in the same way that banjo players practice the same phrase over and over until they can play it correctly and at the desired tempo . <end>\n<start> si quieres sonar como un hablante nativo , debes estar dispuesto a practicar diciendo la misma frase una y otra vez de la misma manera en que un musico de banjo practica el mismo fraseo una y otra vez hasta que lo puedan tocar correctamente y en el tiempo esperado . <end>\n118964 118964\n"
],
[
"# Tokenize the sentence into list of words(integers) and pad the sequence to the same length\ndef tokenize(lang):\n lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(\n filters='')\n lang_tokenizer.fit_on_texts(lang)\n\n tensor = lang_tokenizer.texts_to_sequences(lang)\n\n tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor,\n padding='post')\n return tensor, lang_tokenizer",
"_____no_output_____"
],
[
"def load_dataset(path, num_examples=None):\n # creating cleaned input, output pairs\n targ_lang, inp_lang = create_dataset(path, num_examples)\n\n input_tensor, inp_lang_tokenizer = tokenize(inp_lang)\n target_tensor, targ_lang_tokenizer = tokenize(targ_lang)\n\n return input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer",
"_____no_output_____"
],
[
"# Try experimenting with the size of that dataset\nnum_examples = 30000\ninput_tensor, target_tensor, inp_lang, targ_lang = load_dataset(path_to_file, num_examples)\n\n# Calculate max_length of the target tensors\nmax_length_targ, max_length_inp = target_tensor.shape[1], input_tensor.shape[1]\nprint(max_length_targ, max_length_inp)",
"11 16\n"
],
[
"# Creating training and validation sets using an 80-20 split\ninput_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2)\n\n# Show length\nprint(len(input_tensor_train), len(target_tensor_train), len(input_tensor_val), len(target_tensor_val))\nprint(input_tensor_train[0])\nprint(target_tensor_train[0])",
"24000 24000 6000 6000\n[ 1 134 10 4 44 41 3 2 0 0 0 0 0 0 0 0]\n[ 1 4 95 5 80 45 3 2 0 0 0]\n"
]
],
[
[
"# Create a tf.data datasest",
"_____no_output_____"
],
[
"The tf.data.Dataset API supports writing descriptive and efficient input pipelines. Dataset usage follows a common pattern:\n\n\n* Create a source dataset from your input data.\n* Apply dataset transformations to preprocess the data.\n* Iterate over the dataset and process the elements.\n\nIteration happens in a streaming fashion, so the full dataset does not need to fit into memory.",
"_____no_output_____"
]
],
[
[
"# Configuration \nBUFFER_SIZE = len(input_tensor_train)\nBATCH_SIZE = 64\nsteps_per_epoch = len(input_tensor_train)//BATCH_SIZE\nsteps_per_epoch_val = len(input_tensor_val)//BATCH_SIZE\nembedding_dim = 256 # for word embedding\nunits = 1024 # dimensionality of the output space of RNN\nvocab_inp_size = len(inp_lang.word_index)+1\nvocab_tar_size = len(targ_lang.word_index)+1\n\ndataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)\ndataset = dataset.batch(BATCH_SIZE, drop_remainder=True)\nvalidation_dataset = tf.data.Dataset.from_tensor_slices((input_tensor_val, target_tensor_val)).shuffle(BUFFER_SIZE)\nvalidation_dataset = validation_dataset.batch(BATCH_SIZE, drop_remainder=True)\n\nexample_input_batch, example_target_batch = next(iter(dataset))\nexample_input_batch.shape, example_target_batch.shape",
"_____no_output_____"
]
],
[
[
"# Basic seq2seq model: encoder and decoder",
"_____no_output_____"
],
[
"Model groups layers into an object with training and inference features. Two ways to define tf model:\n\n\n\nBasic sequence to sequence model without attention:\n\n\n",
"_____no_output_____"
]
],
[
[
"class Encoder(tf.keras.Model):\n def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):\n super(Encoder, self).__init__()\n self.batch_sz = batch_sz\n self.enc_units = enc_units\n self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n self.gru = tf.keras.layers.GRU(self.enc_units,\n return_sequences=True, # Whether to return the last output in the output sequence, or the full sequence. \n return_state=True, # Whether to return the last state in addition to the output.\n recurrent_initializer='glorot_uniform')\n\n def call(self, x, hidden):\n x = self.embedding(x)\n output, state = self.gru(x, initial_state = hidden)\n return output, state\n\n def initialize_hidden_state(self):\n return tf.zeros((self.batch_sz, self.enc_units))",
"_____no_output_____"
],
[
"encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)\n\n# sample input\nsample_hidden = encoder.initialize_hidden_state()\nsample_output, sample_hidden = encoder(example_input_batch, sample_hidden)\nprint ('Encoder output shape: (batch size, sequence length, units) {}'.format(sample_output.shape))\nprint ('Encoder Hidden state shape: (batch size, units) {}'.format(sample_hidden.shape))",
"Encoder output shape: (batch size, sequence length, units) (64, 16, 1024)\nEncoder Hidden state shape: (batch size, units) (64, 1024)\n"
],
[
"class Decoder(tf.keras.Model):\n def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):\n super(Decoder, self).__init__()\n self.batch_sz = batch_sz\n self.dec_units = dec_units\n self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n self.gru = tf.keras.layers.GRU(self.dec_units,\n return_sequences=True,\n return_state=True,\n recurrent_initializer='glorot_uniform')\n self.fc = tf.keras.layers.Dense(vocab_size)\n\n\n def call(self, x, hidden):\n # x shape after passing through embedding == (batch_size, 1, embedding_dim)\n x = self.embedding(x)\n\n # passing the concatenated vector to the GRU\n output, state = self.gru(x, initial_state = hidden)\n\n # output shape == (batch_size * 1, hidden_size)\n output = tf.reshape(output, (-1, output.shape[2]))\n\n # output shape == (batch_size, vocab)\n x = self.fc(output)\n return x, state",
"_____no_output_____"
],
[
"tf.reshape([[1,2,3],[4,5,6]], (-1, 2))",
"_____no_output_____"
],
[
"decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)\n\nsample_decoder_output, _ = decoder(tf.random.uniform((BATCH_SIZE, 1)),\n sample_hidden)\n\nprint ('Decoder output shape: (batch_size, vocab size) {}'.format(sample_decoder_output.shape))",
"Decoder output shape: (batch_size, vocab size) (64, 4935)\n"
]
],
[
[
"# Dot-product attention\n\n\n\n",
"_____no_output_____"
]
],
[
[
"class DotProductAttention(tf.keras.layers.Layer):\n def call(self, query, values):\n # query hidden state shape == (batch_size, hidden size)\n # query_with_time_axis shape == (batch_size, 1, hidden size)\n # values shape == (batch_size, max_len, hidden size)\n # we are doing this to broadcast addition along the time axis to calculate the score\n query_with_time_axis = tf.expand_dims(query, 1)\n\n # inner product, score shape == (batch_size, max_length, 1)\n score = query_with_time_axis * values\n score = tf.reduce_sum(score, axis=2)\n score = tf.expand_dims(score, 2)\n \n # attention_weights shape == (batch_size, max_length, 1)\n attention_weights = tf.nn.softmax(score, axis=1)\n\n # context_vector shape after sum == (batch_size, hidden_size)\n context_vector = attention_weights * values\n context_vector = tf.reduce_sum(context_vector, axis=1)\n\n return context_vector, attention_weights",
"_____no_output_____"
],
[
"attention_layer = DotProductAttention()\nattention_result, attention_weights = attention_layer(sample_hidden, sample_output)\n\nprint(\"Attention result shape: (batch size, units) {}\".format(attention_result.shape))\nprint(\"Attention weights shape: (batch_size, sequence_length, 1) {}\".format(attention_weights.shape))",
"Attention result shape: (batch size, units) (64, 1024)\nAttention weights shape: (batch_size, sequence_length, 1) (64, 16, 1)\n"
]
],
[
[
"# Additive attention\n\n",
"_____no_output_____"
]
],
[
[
"class BahdanauAttention(tf.keras.layers.Layer):\n def __init__(self, units):\n super(BahdanauAttention, self).__init__()\n self.W1 = tf.keras.layers.Dense(units)\n self.W2 = tf.keras.layers.Dense(units)\n self.V = tf.keras.layers.Dense(1)\n\n def call(self, query, values):\n # query hidden state shape == (batch_size, hidden size)\n # query_with_time_axis shape == (batch_size, 1, hidden size)\n # values shape == (batch_size, max_len, hidden size)\n query_with_time_axis = tf.expand_dims(query, 1)\n\n # score shape == (batch_size, max_length, 1)\n # we get 1 at the last axis because we are applying score to self.V\n # the shape of the tensor before applying self.V is (batch_size, max_length, units)\n score = self.V(tf.nn.tanh(\n self.W1(values) + self.W2(query_with_time_axis)))\n\n # attention_weights shape == (batch_size, max_length, 1)\n attention_weights = tf.nn.softmax(score, axis=1)\n\n # context_vector shape after sum == (batch_size, hidden_size)\n context_vector = attention_weights * values\n context_vector = tf.reduce_sum(context_vector, axis=1)\n\n return context_vector, attention_weights",
"_____no_output_____"
]
],
[
[
"# Decoder layer with attention\n\n",
"_____no_output_____"
]
],
[
[
"class DecoderWithAttention(tf.keras.Model):\n def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz, attention_layer = None):\n super(DecoderWithAttention, self).__init__()\n self.batch_sz = batch_sz\n self.dec_units = dec_units\n self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n self.gru = tf.keras.layers.GRU(self.dec_units,\n return_sequences=True,\n return_state=True,\n recurrent_initializer='glorot_uniform')\n self.fc = tf.keras.layers.Dense(vocab_size)\n\n # used for attention\n self.attention = attention_layer\n\n def call(self, x, hidden, enc_output):\n # x shape after passing through embedding == (batch_size, 1, embedding_dim)\n x = self.embedding(x)\n attention_weights = None\n \n if self.attention:\n # enc_output shape == (batch_size, max_length, hidden_size)\n context_vector, attention_weights = self.attention(hidden, enc_output)\n # x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)\n x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)\n\n # passing the concatenated vector to the GRU\n output, state = self.gru(x, initial_state = hidden)\n\n # output shape == (batch_size * 1, hidden_size)\n output = tf.reshape(output, (-1, output.shape[2]))\n\n # output shape == (batch_size, vocab)\n x = self.fc(output)\n\n return x, state, attention_weights",
"_____no_output_____"
]
],
[
[
"# Define loss function\n\nCross-entropy loss, or log loss, measures the performance of a classification model whose output is a probability value between 0 and 1. Cross-entropy loss increases as the predicted probability diverges from the actual label. \n",
"_____no_output_____"
]
],
[
[
"loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')\n\ndef loss_function(real, pred):\n mask = tf.math.logical_not(tf.math.equal(real, 0)) \n loss_ = loss_object(real, pred)\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n return tf.reduce_mean(loss_)",
"_____no_output_____"
],
[
"print(loss_object([1,2],[[0,0.6,0.3,0.1],[0,0.6,0.3,0.1]]))\nprint(loss_function([1,2],[[0,0.6,0.3,0.1],[0,0.6,0.3,0.1]]))",
"tf.Tensor([1.063386 1.3633859], shape=(2,), dtype=float32)\ntf.Tensor(1.2133859, shape=(), dtype=float32)\n"
]
],
[
[
"# Training\n\[email protected]\nIn TensorFlow 2, eager execution is turned on by default. The user interface is intuitive and flexible (running one-off operations is much easier and faster), but this can come at the expense of performance and deployability. It is recommended to debug in eager mode, then decorate with @tf.function for better performance.\n\nIn TensorFlow 2.0, users should refactor their code into smaller functions which are called as needed. In general, it's not necessary to decorate each of these smaller functions with tf.function; only use tf.function to decorate high-level computations - for example, one step of training, or the forward pass of your model.\n\nTensorFlow provides the tf.GradientTape API for automatic differentiation; that is, computing the gradient of a computation with respect to some inputs, usually tf.Variables. TensorFlow \"records\" relevant operations executed inside the context of a tf.GradientTape onto a \"tape\". TensorFlow then uses that tape to compute the gradients of a \"recorded\" computation using reverse mode differentiation.",
"_____no_output_____"
]
],
[
[
"optimizer = tf.keras.optimizers.Adam()\n\ndef get_train_step_func():\n\n @tf.function\n def train_step(inp, targ, enc_hidden, encoder, decoder):\n loss = 0\n\n with tf.GradientTape() as tape: # for automatic differentiation\n enc_output, enc_hidden = encoder(inp, enc_hidden)\n\n dec_hidden = enc_hidden\n\n dec_input = tf.expand_dims([targ_lang.word_index['<start>']] * BATCH_SIZE, 1)\n\n # Teacher forcing - feeding the target as the next input\n for t in range(1, targ.shape[1]):\n # passing enc_output to the decoder\n predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)\n\n loss += loss_function(targ[:, t], predictions)\n\n # using teacher forcing\n dec_input = tf.expand_dims(targ[:, t], 1)\n\n batch_loss = (loss / int(targ.shape[1]))\n\n variables = encoder.trainable_variables + decoder.trainable_variables\n\n gradients = tape.gradient(loss, variables)\n\n optimizer.apply_gradients(zip(gradients, variables))\n\n return batch_loss\n \n return train_step\n ",
"_____no_output_____"
],
[
"def caculate_validation_loss(inp, targ, enc_hidden, encoder, decoder):\n loss = 0\n enc_output, enc_hidden = encoder(inp, enc_hidden)\n dec_hidden = enc_hidden\n dec_input = tf.expand_dims([targ_lang.word_index['<start>']] * BATCH_SIZE, 1)\n\n # Teacher forcing - feeding the target as the next input\n for t in range(1, targ.shape[1]):\n predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)\n loss += loss_function(targ[:, t], predictions)\n dec_input = tf.expand_dims(targ[:, t], 1)\n\n loss = loss / int(targ.shape[1])\n return loss",
"_____no_output_____"
],
[
"def training_seq2seq(epochs, attention):\n encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)\n decoder = DecoderWithAttention(vocab_tar_size, embedding_dim, units, BATCH_SIZE, attention)\n train_step_func = get_train_step_func()\n training_loss = []\n validation_loss = []\n\n for epoch in range(epochs):\n start = time.time()\n enc_hidden = encoder.initialize_hidden_state()\n total_loss = 0\n\n for (batch, (inp, targ)) in enumerate(dataset.take(steps_per_epoch)):\n batch_loss = train_step_func(inp, targ, enc_hidden, encoder, decoder)\n total_loss += batch_loss\n\n if batch % 100 == 0:\n print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1, batch, batch_loss))\n \n enc_hidden = encoder.initialize_hidden_state()\n total_val_loss = 0\n for (batch, (inp, targ)) in enumerate(validation_dataset.take(steps_per_epoch)):\n val_loss = caculate_validation_loss(inp, targ, enc_hidden, encoder, decoder)\n total_val_loss += val_loss\n\n training_loss.append(total_loss / steps_per_epoch)\n validation_loss.append(total_val_loss / steps_per_epoch_val)\n print('Epoch {} Loss {:.4f} Validation Loss {:.4f}'.format(epoch + 1,\n training_loss[-1], validation_loss[-1]))\n print('Time taken for 1 epoch {} sec\\n'.format(time.time() - start))\n return encoder, decoder, training_loss, validation_loss",
"_____no_output_____"
]
],
[
[
"## Training seq2seq without attention\n\n",
"_____no_output_____"
]
],
[
[
"epochs = 10\nattention = None\n\nprint(\"Running seq2seq model without attention\")\nencoder, decoder, training_loss, validation_loss = training_seq2seq(epochs, attention)\n\ntloss = training_loss\nvloss = validation_loss",
"Running seq2seq model without attention\nEpoch 1 Batch 0 Loss 4.6869\nEpoch 1 Batch 100 Loss 2.1395\nEpoch 1 Batch 200 Loss 1.8686\nEpoch 1 Batch 300 Loss 1.6464\nEpoch 1 Loss 1.9497 Validation Loss 1.5812\nTime taken for 1 epoch 37.284785747528076 sec\n\nEpoch 2 Batch 0 Loss 1.5578\nEpoch 2 Batch 100 Loss 1.4306\nEpoch 2 Batch 200 Loss 1.4045\nEpoch 2 Batch 300 Loss 1.3330\nEpoch 2 Loss 1.3965 Validation Loss 1.3412\nTime taken for 1 epoch 29.765738487243652 sec\n\nEpoch 3 Batch 0 Loss 1.1735\nEpoch 3 Batch 100 Loss 1.2294\nEpoch 3 Batch 200 Loss 1.1231\nEpoch 3 Batch 300 Loss 1.0149\nEpoch 3 Loss 1.1215 Validation Loss 1.2057\nTime taken for 1 epoch 29.904013872146606 sec\n\nEpoch 4 Batch 0 Loss 0.8761\nEpoch 4 Batch 100 Loss 0.8823\nEpoch 4 Batch 200 Loss 0.8799\nEpoch 4 Batch 300 Loss 0.9043\nEpoch 4 Loss 0.9073 Validation Loss 1.0975\nTime taken for 1 epoch 30.0055148601532 sec\n\nEpoch 5 Batch 0 Loss 0.6926\nEpoch 5 Batch 100 Loss 0.7090\nEpoch 5 Batch 200 Loss 0.7810\nEpoch 5 Batch 300 Loss 0.7730\nEpoch 5 Loss 0.7218 Validation Loss 1.0257\nTime taken for 1 epoch 29.86213254928589 sec\n\nEpoch 6 Batch 0 Loss 0.4796\nEpoch 6 Batch 100 Loss 0.5501\nEpoch 6 Batch 200 Loss 0.6577\nEpoch 6 Batch 300 Loss 0.5363\nEpoch 6 Loss 0.5660 Validation Loss 0.9892\nTime taken for 1 epoch 29.855602502822876 sec\n\nEpoch 7 Batch 0 Loss 0.4077\nEpoch 7 Batch 100 Loss 0.3633\nEpoch 7 Batch 200 Loss 0.4509\nEpoch 7 Batch 300 Loss 0.4631\nEpoch 7 Loss 0.4390 Validation Loss 0.9606\nTime taken for 1 epoch 30.220492839813232 sec\n\nEpoch 8 Batch 0 Loss 0.2913\nEpoch 8 Batch 100 Loss 0.3730\nEpoch 8 Batch 200 Loss 0.3265\nEpoch 8 Batch 300 Loss 0.3581\nEpoch 8 Loss 0.3365 Validation Loss 0.9582\nTime taken for 1 epoch 30.660627841949463 sec\n\nEpoch 9 Batch 0 Loss 0.2343\nEpoch 9 Batch 100 Loss 0.2408\nEpoch 9 Batch 200 Loss 0.2485\nEpoch 9 Batch 300 Loss 0.2376\nEpoch 9 Loss 0.2550 Validation Loss 0.9644\nTime taken for 1 epoch 30.542608737945557 sec\n\nEpoch 10 Batch 0 Loss 0.2025\nEpoch 10 Batch 100 Loss 0.1809\nEpoch 10 Batch 200 Loss 0.1757\nEpoch 10 Batch 300 Loss 0.2049\nEpoch 10 Loss 0.1909 Validation Loss 0.9752\nTime taken for 1 epoch 30.083377361297607 sec\n\n"
]
],
[
[
"## Training seq2seq with dot product attention",
"_____no_output_____"
]
],
[
[
"attention = DotProductAttention()\nprint(\"Running seq2seq model with dot product attention\")\nencoder_dp, decoder_dp, training_loss, validation_loss = training_seq2seq(epochs, attention)\n\ntloss = np.vstack((tloss, training_loss))\nvloss = np.vstack((vloss, validation_loss))",
"Running seq2seq model with dot product attention\nEpoch 1 Batch 0 Loss 4.6264\nEpoch 1 Batch 100 Loss 2.4235\nEpoch 1 Batch 200 Loss 2.1550\nEpoch 1 Batch 300 Loss 1.8926\nEpoch 1 Loss 2.4485 Validation Loss 1.9181\nTime taken for 1 epoch 42.6515851020813 sec\n\nEpoch 2 Batch 0 Loss 1.8629\nEpoch 2 Batch 100 Loss 1.7040\nEpoch 2 Batch 200 Loss 1.4752\nEpoch 2 Batch 300 Loss 1.5064\nEpoch 2 Loss 1.6576 Validation Loss 1.5580\nTime taken for 1 epoch 34.465418100357056 sec\n\nEpoch 3 Batch 0 Loss 1.3883\nEpoch 3 Batch 100 Loss 1.3110\nEpoch 3 Batch 200 Loss 1.4096\nEpoch 3 Batch 300 Loss 1.3529\nEpoch 3 Loss 1.3195 Validation Loss 1.3515\nTime taken for 1 epoch 34.06086468696594 sec\n\nEpoch 4 Batch 0 Loss 1.1593\nEpoch 4 Batch 100 Loss 1.0606\nEpoch 4 Batch 200 Loss 0.9367\nEpoch 4 Batch 300 Loss 0.9321\nEpoch 4 Loss 1.0839 Validation Loss 1.2153\nTime taken for 1 epoch 34.23249864578247 sec\n\nEpoch 5 Batch 0 Loss 0.8570\nEpoch 5 Batch 100 Loss 0.9223\nEpoch 5 Batch 200 Loss 0.9343\nEpoch 5 Batch 300 Loss 0.7972\nEpoch 5 Loss 0.8963 Validation Loss 1.1206\nTime taken for 1 epoch 34.13781118392944 sec\n\nEpoch 6 Batch 0 Loss 0.6813\nEpoch 6 Batch 100 Loss 0.7029\nEpoch 6 Batch 200 Loss 0.7612\nEpoch 6 Batch 300 Loss 0.7500\nEpoch 6 Loss 0.7414 Validation Loss 1.0458\nTime taken for 1 epoch 33.956538677215576 sec\n\nEpoch 7 Batch 0 Loss 0.6269\nEpoch 7 Batch 100 Loss 0.7352\nEpoch 7 Batch 200 Loss 0.6241\nEpoch 7 Batch 300 Loss 0.5669\nEpoch 7 Loss 0.6110 Validation Loss 0.9914\nTime taken for 1 epoch 33.77721953392029 sec\n\nEpoch 8 Batch 0 Loss 0.4866\nEpoch 8 Batch 100 Loss 0.4470\nEpoch 8 Batch 200 Loss 0.4653\nEpoch 8 Batch 300 Loss 0.5365\nEpoch 8 Loss 0.5020 Validation Loss 0.9507\nTime taken for 1 epoch 33.88840341567993 sec\n\nEpoch 9 Batch 0 Loss 0.4635\nEpoch 9 Batch 100 Loss 0.4269\nEpoch 9 Batch 200 Loss 0.4993\nEpoch 9 Batch 300 Loss 0.4623\nEpoch 9 Loss 0.4111 Validation Loss 0.9175\nTime taken for 1 epoch 33.621456146240234 sec\n\nEpoch 10 Batch 0 Loss 0.4042\nEpoch 10 Batch 100 Loss 0.3281\nEpoch 10 Batch 200 Loss 0.3313\nEpoch 10 Batch 300 Loss 0.2708\nEpoch 10 Loss 0.3350 Validation Loss 0.8871\nTime taken for 1 epoch 33.877196073532104 sec\n\n"
]
],
[
[
"## Training seq2seq with Bahdanau attention",
"_____no_output_____"
]
],
[
[
"epochs = 10\n\nattention = BahdanauAttention(units)\nprint(\"Running seq2seq model with Bahdanau attention\")\nencoder_bah, decoder_bah, training_loss, validation_loss = training_seq2seq(epochs, attention)\n\ntloss = np.vstack((tloss, training_loss))\nvloss = np.vstack((vloss, validation_loss))",
"Running seq2seq model with Bahdanau attention\nEpoch 1 Batch 0 Loss 4.5053\nEpoch 1 Batch 100 Loss 2.4014\nEpoch 1 Batch 200 Loss 1.8638\nEpoch 1 Batch 300 Loss 1.8508\nEpoch 1 Loss 2.2304 Validation Loss 1.6459\nTime taken for 1 epoch 49.266451835632324 sec\n\nEpoch 2 Batch 0 Loss 1.5681\nEpoch 2 Batch 100 Loss 1.4242\nEpoch 2 Batch 200 Loss 1.2935\nEpoch 2 Batch 300 Loss 1.2440\nEpoch 2 Loss 1.3518 Validation Loss 1.2219\nTime taken for 1 epoch 41.31578230857849 sec\n\nEpoch 3 Batch 0 Loss 1.1384\nEpoch 3 Batch 100 Loss 0.9462\nEpoch 3 Batch 200 Loss 1.0665\nEpoch 3 Batch 300 Loss 0.8158\nEpoch 3 Loss 0.9383 Validation Loss 0.9957\nTime taken for 1 epoch 41.691455125808716 sec\n\nEpoch 4 Batch 0 Loss 0.6284\nEpoch 4 Batch 100 Loss 0.6292\nEpoch 4 Batch 200 Loss 0.6123\nEpoch 4 Batch 300 Loss 0.6204\nEpoch 4 Loss 0.6468 Validation Loss 0.8762\nTime taken for 1 epoch 42.22861170768738 sec\n\nEpoch 5 Batch 0 Loss 0.4090\nEpoch 5 Batch 100 Loss 0.4194\nEpoch 5 Batch 200 Loss 0.4316\nEpoch 5 Batch 300 Loss 0.5111\nEpoch 5 Loss 0.4441 Validation Loss 0.8132\nTime taken for 1 epoch 42.191874742507935 sec\n\nEpoch 6 Batch 0 Loss 0.3097\nEpoch 6 Batch 100 Loss 0.2659\nEpoch 6 Batch 200 Loss 0.2879\nEpoch 6 Batch 300 Loss 0.2659\nEpoch 6 Loss 0.3054 Validation Loss 0.7782\nTime taken for 1 epoch 41.95083737373352 sec\n\nEpoch 7 Batch 0 Loss 0.2015\nEpoch 7 Batch 100 Loss 0.1861\nEpoch 7 Batch 200 Loss 0.2536\nEpoch 7 Batch 300 Loss 0.2544\nEpoch 7 Loss 0.2145 Validation Loss 0.7608\nTime taken for 1 epoch 41.63440918922424 sec\n\nEpoch 8 Batch 0 Loss 0.1759\nEpoch 8 Batch 100 Loss 0.1875\nEpoch 8 Batch 200 Loss 0.1559\nEpoch 8 Batch 300 Loss 0.2081\nEpoch 8 Loss 0.1536 Validation Loss 0.7595\nTime taken for 1 epoch 41.56172251701355 sec\n\nEpoch 9 Batch 0 Loss 0.1397\nEpoch 9 Batch 100 Loss 0.0913\nEpoch 9 Batch 200 Loss 0.1051\nEpoch 9 Batch 300 Loss 0.1352\nEpoch 9 Loss 0.1152 Validation Loss 0.7651\nTime taken for 1 epoch 41.66077256202698 sec\n\nEpoch 10 Batch 0 Loss 0.0813\nEpoch 10 Batch 100 Loss 0.0750\nEpoch 10 Batch 200 Loss 0.1285\nEpoch 10 Batch 300 Loss 0.1169\nEpoch 10 Loss 0.0897 Validation Loss 0.7695\nTime taken for 1 epoch 42.043405294418335 sec\n\n"
],
[
"import matplotlib.pyplot as plt\n\nax = plt.subplot(111) \nt = np.arange(1, epochs+1)\n\nfor i in range(0, vloss.shape[0]):\n line, = plt.plot(t, vloss[i,:], lw=2)\n\nax.legend(('No attention', 'Dot product', 'Bahdanau'))\nax.set_title(\"Validation loss\")\n",
"_____no_output_____"
]
],
[
[
"# Translation",
"_____no_output_____"
]
],
[
[
"def translate(sentence, encoder, decoder):\n attention_plot = np.zeros((max_length_targ, max_length_inp))\n\n sentence = preprocess_sentence(sentence)\n\n inputs = [inp_lang.word_index[i] for i in sentence.split(' ')]\n inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],\n maxlen=max_length_inp,\n padding='post')\n inputs = tf.convert_to_tensor(inputs)\n\n result = ''\n\n hidden = [tf.zeros((1, units))]\n enc_out, enc_hidden = encoder(inputs, hidden)\n\n dec_hidden = enc_hidden\n dec_input = tf.expand_dims([targ_lang.word_index['<start>']], 0)\n\n for t in range(max_length_targ):\n predictions, dec_hidden, attention_weights = decoder(dec_input,\n dec_hidden,\n enc_out)\n\n predicted_id = tf.argmax(predictions[0]).numpy()\n\n result += targ_lang.index_word[predicted_id] + ' '\n\n # until the predicted word is <end>.\n if targ_lang.index_word[predicted_id] == '<end>':\n return result, sentence\n\n # the predicted ID is fed back into the model, no teacher forcing.\n dec_input = tf.expand_dims([predicted_id], 0)\n\n return result, sentence",
"_____no_output_____"
],
[
"result, sentence = translate(u'esta es mi vida.', encoder_bah, decoder_bah)\nprint('Input: %s' % (sentence))\nprint('Predicted translation: {}'.format(result))",
"Input: <start> esta es mi vida . <end>\nPredicted translation: this is my life . <end> \n"
],
[
"result, sentence = translate(u'esta es mi vida.', encoder_dp, decoder_dp)\nprint('Input: %s' % (sentence))\nprint('Predicted translation: {}'.format(result))",
"Input: <start> esta es mi vida . <end>\nPredicted translation: this is my life . <end> \n"
],
[
"result, sentence = translate(u'¿todavia estan en casa?', encoder_bah, decoder_bah)\nprint('Input: %s' % (sentence))\nprint('Predicted translation: {}'.format(result))",
"Input: <start> ¿ todavia estan en casa ? <end>\nPredicted translation: are you at home ? <end> \n"
]
],
[
[
"# Next Steps\n\n\n* Training on larger dataset\n* Model tuning\n* Try out other attention scores such as multiplicative\n* Train on other seq2seq tasks\n\n\n\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a0ef138f51eaa307ca4b002986b9677f6013f52
| 82,351 |
ipynb
|
Jupyter Notebook
|
docs/examples/driver_examples/Qcodes example with DynaCool PPMS.ipynb
|
pjramsey37/Qcodes
|
9feb65baee4576ece1a7b7f38cf1be320d6abcb5
|
[
"MIT"
] | 1 |
2019-12-07T01:25:49.000Z
|
2019-12-07T01:25:49.000Z
|
docs/examples/driver_examples/Qcodes example with DynaCool PPMS.ipynb
|
pjramsey37/Qcodes
|
9feb65baee4576ece1a7b7f38cf1be320d6abcb5
|
[
"MIT"
] | null | null | null |
docs/examples/driver_examples/Qcodes example with DynaCool PPMS.ipynb
|
pjramsey37/Qcodes
|
9feb65baee4576ece1a7b7f38cf1be320d6abcb5
|
[
"MIT"
] | 1 |
2020-05-03T22:47:40.000Z
|
2020-05-03T22:47:40.000Z
| 68.683069 | 36,333 | 0.700587 |
[
[
[
"# QCoDeS Example with DynaCool PPMS\n\nThis notebook explains how to control the DynaCool PPMS from QCoDeS.\n\nFor this setup to work, the proprietary `PPMS Dynacool` application (or, alternatively `Simulate PPMS Dynacool`) must be running on some PC. On that same PC, the `server.py` script (found in `qcodes/instrument_drivers/QuantumDesign/DynaCoolPPMS/private`) must be running. The script can be run from the command line with no arguments and will run under python 3.6+.\n\nThe architecture is as follows:\n\nThe QCoDeS driver sends strings via VISA to the server who passes those same strings on to the `CommandHandler` (found in `qcodes/instrument_drivers/QuantumDesign/DynaCoolPPMS/commandhandler`). The `CommandHandler` makes the calls into the proprietary API. The QCoDeS driver can thus be called on any machine that can communicate with the machine hosting the server.\n\nApart from that, the driver is really simple. For this notebook, we used the `Simulate PPMS Dynacool` application running on the same machine as QCoDeS.",
"_____no_output_____"
]
],
[
[
"%matplotlib notebook\nfrom qcodes.instrument_drivers.QuantumDesign.DynaCoolPPMS.DynaCool import DynaCool",
"_____no_output_____"
]
],
[
[
"To instantiate the driver, simply provide the address and port in the standard VISA format.\nThe connect message is not too pretty, but there does not seem to be a way to query serial and firmware versions.",
"_____no_output_____"
]
],
[
[
"dynacool = DynaCool('dynacool', address=\"TCPIP0::127.0.0.1::5000::SOCKET\")",
"Connected to: QuantumDesign dynacool (serial:N/A, firmware:N/A) in 0.13s\n"
]
],
[
[
"To get an overview over all available parameters, use `print_readable_snapshot`.\n\nA value of \"Not available\" means (for this driver) that the parameter has been deprecated.",
"_____no_output_____"
]
],
[
[
"dynacool.print_readable_snapshot(update=True)",
"dynacool:\n\tparameter value\n--------------------------------------------------------------------------------\nIDN :\t{'vendor': ' QuantumDesign', 'model': 'dynacool', 'ser...\nchamber_state :\tsealed \nchamber_temperature :\t298.7 (K)\nfield :\tNot available (T)\nfield_approach :\tlinear \nfield_measured :\t1 (T)\nfield_ramp :\tNone (T)\nfield_rate :\t0 (T/s)\nfield_setpoint :\tNot available (T)\nfield_target :\t1 (T)\nmagnet_state :\tholding \ntemperature :\t298.7 (K)\ntemperature_rate :\t0 (K/s)\ntemperature_setpoint :\t298.7 (K)\ntemperature_settling :\tfast settle \ntemperature_state :\tnear \ntimeout :\t5 (s)\n"
]
],
[
[
"## Temperature Control\n\nAs soon as ANY of the temperature rate, the temperature setpoint, or the temperature settling mode parameters has been set, the system will start moving to the given temperature setpoint at the given rate using the given settling mode.\n\nThe system can continuously be queried for its temperature.",
"_____no_output_____"
]
],
[
[
"from time import sleep\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# example 1\n\ndynacool.temperature_rate(0.1)\ndynacool.temperature_setpoint(dynacool.temperature() - 1.3)\n\ntemps = []\n\nwhile dynacool.temperature_state() == 'tracking':\n temp = dynacool.temperature()\n temps.append(temp)\n sleep(0.75)\n print(f'Temperature is now {temp} K')\n\n",
"Temperature is now 298.70001220703125 K\nTemperature is now 298.6000061035156 K\nTemperature is now 298.5 K\nTemperature is now 298.5 K\nTemperature is now 298.3999938964844 K\nTemperature is now 298.29998779296875 K\nTemperature is now 298.1999816894531 K\nTemperature is now 298.1999816894531 K\nTemperature is now 298.0999755859375 K\nTemperature is now 297.9999694824219 K\n"
],
[
"plt.figure()\ntimeax = np.linspace(0, len(temps)*0.2, len(temps))\nplt.plot(timeax, temps, '-o')\nplt.xlabel('Time (s)')\nplt.ylabel('Temperature (K)')",
"_____no_output_____"
]
],
[
[
"## Field Control\n\nThe field has **four** related parameters:\n\n- `field_measured`: (read-only) the field strength right now\n- `field_target`: the target field that the `ramp` method will ramp to when called. Setting this parameter does **not** trigger a ramp\n- `field_rate`: the field ramp rate. NB: setting this parameter **will** trigger a ramp\n- `field_approach`: the approach the system should use to ramp. NB: setting this parameter **will** trigger a ramp\n- `field_ramp`: this is a convenience parameter that sets the target and then triggers a blocking ramp.\n\nThe idea is that the user first sets the `field_target` and then ramps the field to that target using the `ramp` method. The ramp method takes a `mode` argument that controls whether the ramp is blocking or non-blocking. \n\nUsing the simulation software, the field change is instanteneous irrespective of rate. We nevertheless include two examples of ramping here.",
"_____no_output_____"
],
[
"### A blocking ramp",
"_____no_output_____"
],
[
"First, we set a field target:",
"_____no_output_____"
]
],
[
[
"field_now = dynacool.field_measured()\ntarget = field_now + 1\ndynacool.field_target(target)",
"_____no_output_____"
]
],
[
[
"Note that the field has not changed yet:",
"_____no_output_____"
]
],
[
[
"assert dynacool.field_measured() == field_now",
"_____no_output_____"
]
],
[
[
"And now we ramp:",
"_____no_output_____"
]
],
[
[
"dynacool.ramp(mode='blocking')",
"_____no_output_____"
]
],
[
[
"The ramping will take some finite time on a real instrument. The field value is now at the target field:",
"_____no_output_____"
]
],
[
[
"print(f'Field value: {dynacool.field_measured()} T')\nprint(f'Field target: {dynacool.field_target()} T')",
"Field value: 2.0 T\nField target: 2.0 T\n"
]
],
[
[
"### A non-blocking ramp\n\nThe non-blocking ramp is very similar to the the blocking ramp.",
"_____no_output_____"
]
],
[
[
"field_now = dynacool.field_measured()\ntarget = field_now - 0.5\ndynacool.field_target(target)\n\nassert dynacool.field_measured() == field_now\n\ndynacool.ramp(mode='non-blocking')\n# Here you can do stuff while the magnet ramps\n\nprint(f'Field value: {dynacool.field_measured()} T')\nprint(f'Field target: {dynacool.field_target()} T')",
"Field value: 1.5 T\nField target: 1.5 T\n"
]
],
[
[
"### Using the `field_ramp` parameter\n\nThe `field_ramp` parameter sets the target field and ramp when being set.",
"_____no_output_____"
]
],
[
[
"print(f'Now the field is {dynacool.field_measured()} T...')\nprint(f'...and the field target is {dynacool.field_target()} T.')",
"Now the field is 1.5 T...\n...and the field target is 1.5 T.\n"
],
[
"dynacool.field_ramp(1)",
"_____no_output_____"
],
[
"print(f'Now the field is {dynacool.field_measured()} T...')\nprint(f'...and the field target is {dynacool.field_target()} T.')",
"Now the field is 1.0 T...\n...and the field target is 1 T.\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a0ef7c63622540f2cca2942ec76fe65eb01cc70
| 573,375 |
ipynb
|
Jupyter Notebook
|
SentimentAndKeyrordAnalysis.ipynb
|
NayLinDS/MLmodels
|
b4e461e52256c994c285657fe5d3b96126caabac
|
[
"MIT"
] | null | null | null |
SentimentAndKeyrordAnalysis.ipynb
|
NayLinDS/MLmodels
|
b4e461e52256c994c285657fe5d3b96126caabac
|
[
"MIT"
] | null | null | null |
SentimentAndKeyrordAnalysis.ipynb
|
NayLinDS/MLmodels
|
b4e461e52256c994c285657fe5d3b96126caabac
|
[
"MIT"
] | null | null | null | 171.566427 | 159,146 | 0.855233 |
[
[
[
"# Sentiment analysis",
"_____no_output_____"
]
],
[
[
"import pandas as pd\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer",
"_____no_output_____"
],
[
"df=pd.read_csv(\"train.csv\")\r\n",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df.shape\r\n",
"_____no_output_____"
],
[
"df_clean=df.drop(['ID','Place','location','date','status','job_title','summary','advice_to_mgmt','score_1','score_2','score_3','score_4','score_5','score_6','overall'],axis=1)\r\ndf_postitives=df_clean.drop(['negatives'],axis=1)\r\n\r\ndf_postitives=df_postitives.head(n=1000)\r\n\r\ndf_negatives=df_clean.drop(['positives'],axis=1)\r\n\r\ndf_negatives=df_negatives.head(n=1000)\r\n\r\ndf_postitives['polarity']=1\r\n#dftive_postis['polarity']=1\r\ndf_negatives['polarity']=0\r\ndf_postitives.columns=['value','polarity']\r\ndf_negatives.columns=['value','polarity']\r\nframes=[df_postitives,df_negatives]\r\ndf=pd.concat(frames)",
"_____no_output_____"
],
[
"df_postitives\r\n",
"_____no_output_____"
],
[
"type(df_postitives)",
"_____no_output_____"
],
[
"type(df_postitives['value'][2])",
"_____no_output_____"
],
[
"df_to_list=df_postitives['value'].values.tolist()",
"_____no_output_____"
],
[
"df_to_list0=df_to_list #positives",
"_____no_output_____"
],
[
"df_to_list1=df_negatives['value'].values.tolist()",
"_____no_output_____"
],
[
"type(df_to_list)",
"_____no_output_____"
],
[
"vectorizer=TfidfVectorizer(max_features=1000)\r\nvectors=vectorizer.fit_transform(df.value)\r\nword_df=pd.DataFrame(vectors.toarray(),columns=vectorizer.get_feature_names())\r\nword_df.head()",
"_____no_output_____"
],
[
"X=word_df\r\ny=df.polarity",
"_____no_output_____"
],
[
"X",
"_____no_output_____"
],
[
"X",
"_____no_output_____"
],
[
"from sklearn.linear_model import LinearRegression\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.naive_bayes import MultinomialNB",
"_____no_output_____"
],
[
"# logistic regression model\r\nlogreg = LogisticRegression(C=1e9, solver='lbfgs', max_iter=1000)\r\nlogreg.fit(X, y)",
"_____no_output_____"
]
],
[
[
"# Testing\n",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test= train_test_split(X,y,test_size=0.33, random_state=12)\r\n",
"_____no_output_____"
],
[
"# logistic regression model\r\nlogreg1 = LogisticRegression(C=1e9, solver='lbfgs', max_iter=1000)\r\nlogreg1.fit(X_train, y_train)\r\nsvc1=LinearSVC()\r\nsvc1.fit(X_train,y_train)",
"_____no_output_____"
],
[
"y_pred= logreg1.predict(X_test)\r\ny_pred2=svc1.predict(X_test)",
"_____no_output_____"
],
[
"y_test",
"_____no_output_____"
],
[
"from sklearn.metrics import confusion_matrix\r\nimport matplotlib as plt\r\ncm= confusion_matrix(y_test,y_pred)\r\nprint(cm)\r\n",
"[[291 25]\n [ 42 302]]\n"
],
[
"import pylab as pl \r\npl.matshow(cm)\r\npl.title('Confusion matrix of LogisticRegression')\r\npl.colorbar()\r\npl.show()\r\n",
"_____no_output_____"
],
[
"cm_svc= confusion_matrix(y_test,y_pred2)\r\nprint(cm_svc)",
"[[293 23]\n [ 40 304]]\n"
],
[
"import pylab as pl \r\npl.matshow(cm_svc)\r\npl.title('Confusion matrix of SVN')\r\npl.colorbar()\r\npl.show()\r\n\r\n",
"_____no_output_____"
]
],
[
[
"## End of testing",
"_____no_output_____"
]
],
[
[
"svc=LinearSVC()\r\nsvc.fit(X,y)",
"_____no_output_____"
],
[
"unknown = pd.DataFrame({'content': [\r\n \"office space is big\",\r\n\r\n \"excellent manager\",\r\n \"low salary\",\r\n \"good career opportunity\",\r\n \"office politics\",\r\n \"inefficient leadership\",\r\n \"canteen food is good but service is bad\",\r\n \"cantenn food is good\"\r\n \"food taste bad\"\r\n\r\n]})",
"_____no_output_____"
],
[
"unknown_vectors = vectorizer.transform(unknown.content)\r\nunknown_words_df = pd.DataFrame(unknown_vectors.toarray(), columns=vectorizer.get_feature_names())\r\nunknown_words_df.head()",
"_____no_output_____"
],
[
"unknown['pred_logreg'] = logreg.predict(unknown_words_df)",
"_____no_output_____"
],
[
"unknown",
"_____no_output_____"
],
[
"unknown.pred_logreg[0]\r\n",
"_____no_output_____"
],
[
"df_review = df_postitives",
"_____no_output_____"
],
[
"df_review=df_postitives[\"value\"]+df_negatives['value']",
"_____no_output_____"
],
[
"df_review",
"_____no_output_____"
],
[
"df1=pd.read_csv(\"train.csv\")",
"_____no_output_____"
],
[
"df1_overall=df1.drop(['ID','Place','location','date','status','job_title','summary','advice_to_mgmt','score_1','score_2','score_3','score_4','score_5','score_6'],axis=1)",
"_____no_output_____"
],
[
"df1_overall[\"review\"] = df1_overall[\"positives\"] + df1_overall[\"negatives\"]",
"_____no_output_____"
],
[
"df1_overall.drop(['positives','negatives'],axis=1)",
"_____no_output_____"
],
[
"vectorizer=TfidfVectorizer(max_features=1000)\r\nvectors=vectorizer.fit_transform(df1_overall.review)\r\nwordreview_df=pd.DataFrame(vectors.toarray(),columns=vectorizer.get_feature_names())\r\nwordreview_df.head()",
"_____no_output_____"
],
[
"XX=wordreview_df.head(n=1000)\r\nyy=df1_overall.overall.head(n=1000)",
"_____no_output_____"
],
[
"yy.head",
"_____no_output_____"
],
[
"logreg1 = LogisticRegression(C=1e9, solver='lbfgs', max_iter=1000)\r\nlogreg1.fit(XX, yy)",
"_____no_output_____"
],
[
"unknown = pd.DataFrame({'content': [\r\n \"office space is big\",\r\n\r\n \"excellent manager but lack of job security\",\r\n \"low salary however job security is high\",\r\n \"good career opportunity low salay\",\r\n \"office politics\",\r\n \"inefficient leadership\",\r\n \"canteen food is good but service is bad\",\r\n \"cantenn food is good\"\r\n \"food taste bad\"\r\n\r\n]})",
"_____no_output_____"
],
[
"unknown_vectors = vectorizer.transform(unknown.content)\r\nunknown_words_df = pd.DataFrame(unknown_vectors.toarray(), columns=vectorizer.get_feature_names())\r\nunknown_words_df.head()",
"_____no_output_____"
],
[
"unknown['pred_logreg'] = logreg1.predict(unknown_words_df)",
"_____no_output_____"
],
[
"unknown",
"_____no_output_____"
]
],
[
[
"# LDA\n",
"_____no_output_____"
]
],
[
[
"df_to_list=df_to_list0 # df_to_list1 == negatives comments\r\n#df_to_list0 == positives comments",
"_____no_output_____"
],
[
"from time import time\r\nimport matplotlib.pyplot as plt \r\n\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\r\nfrom sklearn.decomposition import NMF, LatentDirichletAllocation\r\nfrom sklearn.datasets import fetch_20newsgroups\r\n\r\nn_samples = 2000\r\nn_features = 1000\r\nn_components = 10\r\nn_top_words = 10",
"_____no_output_____"
],
[
"def plot_top_words(model, feature_names, n_top_words, title):\r\n fig, axes = plt.subplots(2, 5, figsize=(30, 15), sharex=True)\r\n axes = axes.flatten()\r\n for topic_idx, topic in enumerate(model.components_):\r\n top_features_ind = topic.argsort()[:-n_top_words - 1:-1]\r\n top_features = [feature_names[i] for i in top_features_ind]\r\n print(top_features)\r\n weights = topic[top_features_ind]\r\n\r\n ax = axes[topic_idx]\r\n ax.barh(top_features, weights, height=0.7)\r\n ax.set_title(f'Topic {topic_idx +1}',\r\n fontdict={'fontsize': 30})\r\n ax.invert_yaxis()\r\n ax.tick_params(axis='both', which='major', labelsize=20)\r\n for i in 'top right left'.split():\r\n ax.spines[i].set_visible(False)\r\n fig.suptitle(title, fontsize=40)\r\n\r\n plt.subplots_adjust(top=0.90, bottom=0.05, wspace=0.90, hspace=0.3)\r\n plt.show()",
"_____no_output_____"
],
[
"tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,\r\n max_features=n_features,\r\n stop_words='english')",
"_____no_output_____"
],
[
"tfidf = tfidf_vectorizer.fit_transform(df_to_list)",
"_____no_output_____"
],
[
"tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,\r\n max_features=n_features,\r\n stop_words='english')",
"_____no_output_____"
],
[
"individual=[df_to_list[2],df_to_list[3],df_to_list[11]] #,df_to_list[3],df_to_list[4],df_to_list[5],df_to_list[6],df_to_list[7],df_to_list[8],df_to_list[9],df_to_list[10],df_to_list[11]\r\ntf = tf_vectorizer.fit_transform(individual) # df_to_list",
"_____no_output_____"
],
[
"for i in range(len(individual)):\r\n print(df_to_list[i])\r\n print(\"=============\")",
"People are smart and friendly\n=============\n1) Food, food, food. 15+ cafes on main campus (MTV) alone. Mini-kitchens, snacks, drinks, free breakfast/lunch/dinner, all day, errr'day. 2) Benefits/perks. Free 24:7 gym access (on MTV campus). Free (self service) laundry (washer/dryer) available. Bowling alley. Volley ball pit. Custom-built and exclusive employee use only outdoor sport park (MTV). Free health/fitness assessments. Dog-friendly. Etc. etc. etc. 3) Compensation. In ~2010 or 2011, Google updated its compensation packages so that they were more competitive. 4) For the size of the organization (30K+), it has remained relatively innovative, nimble, and fast-paced and open with communication but, that is definitely changing (for the worse). 5) With so many departments, focus areas, and products, *in theory*, you should have plenty of opportunity to grow your career (horizontally or vertically). In practice, not true. 6) You get to work with some of the brightest, most innovative and hard-working/diligent minds in the industry. There's a \"con\" to that, too (see below).\n=============\n* If you're a software engineer, you're among the kings of the hill at Google. It's an engineer-driven company without a doubt (that *is* changing, but it's still very engineer-focused). * The perks are amazing. Yes, free breakfast, lunch, an dinner every weekday. Aaaaaamazing holiday parties (at Waldorf Astoria, NY Public Library, MoMA, etc.), overnight ski trips to Vermont, overnight nature trips to the Poconos in the summer, summer picnics at Chelsea piers, and on and on and on. I don't see this going away unless the company starts hurting financially. * Speaking of which, the company is doing quite well, which reflects in bonuses and equity grants. * There a huge diversity of work ranging from defending independent journalism worldwide (Google Project Shield) to crisis response during disasters (see Maps during Hurricane Sandy or Tsunamis), to the best machine learning experts and projects in the world, to more mundane revenue-driving projects in advertising, there's really something for everybody. * It's easy to move around within the company as long as you're in good standing (the vast majority of engineers are). * The company is amazingly open: every week Larry Page and Sergey Brin host what's called TGIF where food, beer, wine, etc. is served, a new project is presented, and afterward there's an open forum to ask the executives anything you want. It's truly fair game to ask anything, no matter how controversial, and frequently the executives will be responsive. * No, nobody cares if you use an iPhone, Facebook, shop with Amazon, stream using Spotify, or refuse to use Google+. The company is amazingly open and flexible. Neither pro nor con, but general information on work-life balance, promotions, and advancement. * Work life balance can be what you want it to be on most teams. (Some teams are in more competitive sectors and require more crazy hours all the time - but very few of them). If you do what's expected, you'll be fine at least for a handful of years. Working a roughly 40 hour work week is possible, and many people do it. There are also people who are hyper-motived and work like crazy just because they love it, or because they're competitive, or they want to get a promotion. If you work 40 hour weeks without putting in anything extra, you'll fall behind them as they advance and you stand still - and maybe that doesn't matter, so it works out for everybody. But at least know where you would realistically stand. * If you excel and work your butt off, you'll be compensated and promoted. If you let yourself be a code monkey, and just sit coding with your head down all day, you'll be fine but won't advance. A big complaint from some Googlers is about not being able to advance \"even at Google\" with pure coding. Sure, if you're the uber genius who created MapReduce and Bigtable, you're going to advance like a rocket without having to do anything but coding, but if you're like most engineers at Google -- smarter than average, but just average compared to other Googlers -- you're just a good coder and not revolutionary. Code monkeys are important to actually get stuff done, and to be sure you absolutely need to be a good coder as a software engineer (it's the minimum requirement), but code monkeys won't advance because they're not leaders and they're easy to replace. To get promoted you need to lead and do more than just code. There are plenty of ways to lead other than being an official tech lead, so this isn't actually _that_ hard, so the real point is just that you can't just sit there coding what other people tell you to code all day and expect to advance.\n=============\n"
],
[
"type(individual)",
"_____no_output_____"
],
[
"tf\r\nprint(tf)",
" (0, 10)\t5\n (0, 2)\t6\n (0, 9)\t1\n (0, 5)\t1\n (0, 23)\t1\n (0, 21)\t2\n (0, 30)\t1\n (0, 6)\t2\n (0, 18)\t3\n (0, 13)\t1\n (0, 22)\t1\n (0, 25)\t1\n (0, 8)\t1\n (0, 1)\t1\n (0, 7)\t1\n (0, 3)\t2\n (0, 26)\t1\n (0, 16)\t4\n (0, 29)\t1\n (0, 19)\t3\n (0, 15)\t3\n (0, 4)\t2\n (0, 28)\t2\n (0, 24)\t2\n (0, 11)\t1\n :\t:\n (1, 30)\t1\n (1, 18)\t1\n (1, 13)\t1\n (1, 22)\t1\n (1, 25)\t1\n (1, 8)\t1\n (1, 1)\t1\n (1, 7)\t2\n (1, 3)\t1\n (1, 26)\t1\n (1, 16)\t9\n (1, 29)\t1\n (1, 19)\t2\n (1, 15)\t2\n (1, 4)\t1\n (1, 28)\t1\n (1, 24)\t1\n (1, 11)\t1\n (1, 12)\t1\n (1, 0)\t1\n (1, 17)\t1\n (1, 14)\t1\n (1, 20)\t1\n (1, 27)\t1\n (2, 6)\t1\n"
],
[
"nmf = NMF(n_components=n_components, random_state=1,\r\n alpha=.1, l1_ratio=.5).fit(tfidf)",
"_____no_output_____"
],
[
"tfidf_feature_names = tfidf_vectorizer.get_feature_names()\r\nplot_top_words(nmf, tfidf_feature_names, n_top_words,\r\n 'Topics in NMF model (Frobenius norm)')",
"['great', 'people', 'atmosphere', 'flexibility', 'flexible', 'fantastic', 'opportunities', 'food', 'care', 'workers']\n['good', 'pay', 'salary', 'people', 'pretty', 'food', 'job', 'money', 'experience', 'times']\n['place', 'work', 'google', 'nice', 'fun', 'best', 'amazing', 'terrific', 'grow', 'loved']\n['people', 'smart', 'work', 'amazing', 'working', 'awesome', 'nice', 'challenging', 'lots', 'really']\n['food', 'free', 'gym', 'nice', 'really', 'google', 'lot', 'snacks', '401k', 'wonderful']\n['company', 'work', 'best', 'google', 'world', 'large', 'culture', 'growth', 'big', 'need']\n['environment', 'work', 'fun', 'friendly', 'working', 'best', 'perks', 'flexible', 'awesome', 'excellent']\n['life', 'balance', 'work', 'interesting', 'projects', 'peers', 'opportunities', 'home', 'excellent', 'provides']\n['benefits', 'pay', 'lots', 'salary', 'excellent', 'fantastic', 'amazing', 'compensation', 'opportunities', 'interesting']\n['perks', 'culture', 'awesome', 'opportunities', 'colleagues', 'team', 'nice', 'offices', 'amazing', 'peers']\n"
],
[
"nmf",
"_____no_output_____"
],
[
"nmf = NMF(n_components=n_components, random_state=1,\r\n beta_loss='kullback-leibler', solver='mu', max_iter=1000, alpha=.1,\r\n l1_ratio=.5).fit(tfidf)",
"_____no_output_____"
],
[
"enumerate(nmf.components_)",
"_____no_output_____"
],
[
"n_top_words",
"_____no_output_____"
],
[
"tfidf_feature_names = tfidf_vectorizer.get_feature_names()\r\nplot_top_words(nmf, tfidf_feature_names, n_top_words,\r\n 'Topics in NMF model (generalized Kullback-Leibler divergence)')",
"['great', 'people', 'benefits', 'culture', 'perks', 'smart', 'fantastic', 'atmosphere', 'workers', 'team']\n['good', 'pay', 'salary', 'pretty', 'money', 'experience', 'pros', 'stock', 'perk', 'hard']\n['work', 'place', 'best', 'google', 'fun', 'job', 'places', 'dynamic', 'range', 'energy']\n['people', 'smart', 'nice', 'amazing', 'really', 'lot', 'learn', 'work', 'challenging', 'opportunity']\n['food', 'free', 'really', 'gym', 'like', 'nice', 'cool', 'campus', 'google', 'lunch']\n['company', 'world', 'best', 'problems', 'tech', 'big', 'people', 'working', 'things', 'google']\n['environment', 'friendly', 'working', 'great', 'fun', 'flexible', 'awesome', 'time', 'staff', 'technology']\n['work', 'life', 'balance', 'projects', 'interesting', 'opportunities', 'colleagues', 'peers', 'flexible', 'hours']\n['benefits', 'lots', 'excellent', 'pay', 'amazing', 'competitive', 'compensation', 'easy', 'love', 'time']\n['perks', 'culture', 'awesome', 'opportunities', 'google', 'employees', 'learning', 'way', 'development', 'engineering']\n"
],
[
"#tfidf_feature_names",
"_____no_output_____"
],
[
"lda = LatentDirichletAllocation(n_components=n_components, max_iter=5,\r\n learning_method='online',\r\n learning_offset=50.,\r\n random_state=0)",
"_____no_output_____"
],
[
"lda.fit(tf)",
"_____no_output_____"
],
[
"tf_feature_names = tf_vectorizer.get_feature_names()\r\nplot_top_words(lda, tf_feature_names, n_top_words, 'Topics in LDA model')",
"['ll', 'company', 'google', 'lead', 'like', 'people', 'projects', 'open', 'actually', 'need']\n['working', 'plenty', 'need', 'time', 'world', 'lead', 'flexible', 'sergey', 'like', 'engineers']\n['ways', 'free', 'projects', 'flexible', 'people', 'competitive', 'working', 'cares', 'larry', 'need']\n['need', 'open', 'projects', 'cares', 'ways', 'day', 'sergey', 'free', 'like', 'company']\n['actually', 'world', 'll', 'need', 'having', 'projects', 'plenty', 'larry', 'speaking', 'open']\n['engineers', 'having', 'day', 'food', 'sure', 'projects', 'lead', 'cares', 'actually', 'll']\n['food', 'competitive', 'sergey', 'won', 'day', 'world', 'projects', 'flexible', 'speaking', 'tgif']\n['people', 'sergey', 'don', 'projects', 'google', 'speaking', 'working', 'free', 'like', 'open']\n['like', 'time', 'plenty', 'world', 'speaking', 'day', 'sure', 'actually', 'sergey', 'ways']\n['cares', 'lead', 'google', 'actually', 'projects', 'working', 'don', 'free', 'plenty', 'larry']\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a0f039459905de2d1526fdf9ebcc2e9bfe531e0
| 152,040 |
ipynb
|
Jupyter Notebook
|
entity-resolution-with-monetdb/notebooks/draft.ipynb
|
ilias-ant/entity-resolution-with-monetdb
|
da8231f6b9aaf4f95b8504e299b39015cd7f794b
|
[
"MIT"
] | 1 |
2021-10-04T20:09:21.000Z
|
2021-10-04T20:09:21.000Z
|
entity-resolution-with-monetdb/notebooks/draft.ipynb
|
ilias-ant/entity-resolution-with-monetdb
|
da8231f6b9aaf4f95b8504e299b39015cd7f794b
|
[
"MIT"
] | null | null | null |
entity-resolution-with-monetdb/notebooks/draft.ipynb
|
ilias-ant/entity-resolution-with-monetdb
|
da8231f6b9aaf4f95b8504e299b39015cd7f794b
|
[
"MIT"
] | null | null | null | 55.327511 | 19,224 | 0.611813 |
[
[
[
"import json\nimport os\nimport random\nimport re\nfrom itertools import product\n\nimport numpy as np\nimport pandas as pd\nfrom more_itertools import distinct_combinations\nfrom plotnine import *\nfrom sklearn import feature_extraction, metrics\n\n\nROOT_PATH = os.path.dirname(os.path.abspath(os.getcwd()))",
"_____no_output_____"
],
[
"def inspect_df(df: pd.DataFrame, n : int=5) -> pd.DataFrame:\n \"\"\"Helper method to easily inspect DataFrames.\"\"\"\n \n print(f'shape: {df.shape}')\n\n return df.head(n)",
"_____no_output_____"
]
],
[
[
"# Table of Contents\n- [Exploratory Data Analysis](#Exploratory-Data-Analysis)\n- [A Baseline Model: random classifier](#A-Baseline-Model:-random-classifier)\n- [A Better Baseline Model: \\<page title\\> similarity](#A-Better-Baseline-Model:-<page-title>-similarity)\n- [Feature Extraction](#Feature-Extraction)",
"_____no_output_____"
],
[
"# Exploratory Data Analysis",
"_____no_output_____"
]
],
[
[
"def json_loader(dirpath: str) -> list:\n \"\"\"Discover all .json files and gather their respective data, given a `dirpath`.\n \"\"\"\n data = []\n for subdir in os.listdir(dirpath):\n \n temp = os.path.join(dirpath, subdir)\n\n for datafile in os.listdir(temp):\n\n with open(os.path.join(temp, datafile), 'r') as f:\n\n spec = json.loads(f.read())\n \n # keep global identifier, format it as in the labelled dataset\n spec['id'] = subdir + '//' + datafile.split('.json')[0]\n \n data.append(spec)\n\n return data",
"_____no_output_____"
],
[
"data = json_loader(dirpath=os.path.join(ROOT_PATH, 'data/2013_camera_specs'))",
"_____no_output_____"
],
[
"specs = pd.DataFrame(data)",
"_____no_output_____"
],
[
"inspect_df(specs)",
"shape: (29787, 4662)\n"
],
[
"specs.set_index('id', inplace=True)",
"_____no_output_____"
],
[
"labels = pd.read_csv(os.path.join(ROOT_PATH, 'data/sigmod_medium_labelled_dataset.csv'))",
"_____no_output_____"
],
[
"inspect_df(labels)",
"shape: (46665, 3)\n"
],
[
"matched_products = labels['label'] == 1",
"_____no_output_____"
],
[
"matched_products.value_counts()",
"_____no_output_____"
],
[
"ggplot() + \\\n geom_bar(mapping=aes(x=matched_products), colour='white') + \\\n labs(title='same products ?', x='') + \\\n coord_flip()",
"_____no_output_____"
],
[
"specs_info = specs.describe()\nspecs_info = specs_info.transpose()\n\ninspect_df(specs_info)",
"shape: (4661, 4)\n"
],
[
"specs_info['support'] = specs_info['count'] / len(specs.index)\nspecs_info = specs_info.sort_values(by='support', ascending=False)",
"_____no_output_____"
],
[
"specs_info.head(10)",
"_____no_output_____"
],
[
"top10 = list(specs_info.head(10).index)",
"_____no_output_____"
]
],
[
[
"These are the 10 camera specs (attributes) with the highest support.",
"_____no_output_____"
]
],
[
[
"specs[top10]",
"_____no_output_____"
]
],
[
[
"# A Baseline Model: random classifier",
"_____no_output_____"
]
],
[
[
"inspect_df(labels)",
"shape: (46665, 3)\n"
],
[
"def random_classifier(*args):\n \"\"\"A random classifier.\n \n Returns: True of False (i.e. if products are the same)\n \"\"\"\n return random.random() > 0.5",
"_____no_output_____"
],
[
"predictions = labels.apply(random_classifier, axis=1)",
"_____no_output_____"
],
[
"metrics.accuracy_score(predictions, labels['label'])",
"_____no_output_____"
],
[
"metrics.precision_score(predictions, labels['label'])",
"_____no_output_____"
],
[
"metrics.recall_score(predictions, labels['label'])",
"_____no_output_____"
],
[
"metrics.f1_score(predictions, labels['label'])",
"_____no_output_____"
]
],
[
[
"This is a good indication of the model performance: **f1 = 0.1337**",
"_____no_output_____"
]
],
[
[
"metrics.confusion_matrix(predictions, labels['label'])",
"_____no_output_____"
]
],
[
[
"This is the initial, baseline performance. Our model should easily outperform this random classifier.",
"_____no_output_____"
],
[
"# A Better Baseline Model: \\<page title\\> similarity",
"_____no_output_____"
]
],
[
[
"ggplot() + \\\n geom_histogram(mapping=aes(x=specs['<page title>'].map(len)), colour='white', bins=30) + \\\n xlab('<page title>: no. of characters ')",
"_____no_output_____"
],
[
"ggplot() + \\\n geom_histogram(mapping=aes(x=specs['<page title>'].map(lambda title: len(title.split()))), colour='white', bins=30) + \\\n xlab('<page title>: no. of words')",
"_____no_output_____"
]
],
[
[
"We will use a BoW model + a text similarity algorithm + a suitable threshold in order to assert whether two cameras are the same.",
"_____no_output_____"
]
],
[
[
"def get_corpus(data: pd.DataFrame) -> np.ndarray:\n \n return data['<page title>'].values",
"_____no_output_____"
],
[
"vectorizer = feature_extraction.text.CountVectorizer()",
"_____no_output_____"
],
[
"vectorizer.fit(get_corpus(specs))",
"_____no_output_____"
],
[
"def create_dataset(data: pd.DataFrame, labels: pd.DataFrame, features: list):\n \"\"\"Helper method that creates a dataset.\n \"\"\"\n left_part = pd.merge(labels, data[features], how='inner', left_on='left_spec_id', right_on='id')\n \n right_part = pd.merge(labels, data[features], how='inner', left_on='right_spec_id', right_on='id')\n \n dataset = pd.merge(left_part, right_part, how='inner', on=('left_spec_id', 'right_spec_id'), \n suffixes=('_left', '_right'))\n \n dataset['label'] = dataset['label_left']\n dataset.drop(['label_left', 'label_right'], axis=1, inplace=True)\n \n dataset.set_index(['left_spec_id', 'right_spec_id'], inplace=True)\n \n return dataset",
"_____no_output_____"
],
[
"X = create_dataset(data=specs, labels=labels, features=top10[0])\n\ninspect_df(X)",
"shape: (46665, 3)\n"
],
[
"def pagetitle_similarity(title1: str, title2: str) -> float:\n \n vec1 = vectorizer.transform([title1])\n vec2 = vectorizer.transform([title2])\n\n return metrics.pairwise.cosine_similarity(vec1, vec2).take(0)",
"_____no_output_____"
],
[
"X['similarity'] = X[['<page title>_left', '<page title>_right']].apply(lambda x: pagetitle_similarity(x[0], x[1]), axis=1)",
"_____no_output_____"
],
[
"X[X['label'] == 1]['similarity'].mean()",
"_____no_output_____"
],
[
"X[X['label'] == 0]['similarity'].mean()",
"_____no_output_____"
],
[
"X['predictions'] = X['similarity'].map(lambda score: score > 0.5)",
"_____no_output_____"
],
[
"metrics.accuracy_score(X['predictions'], X['label'])",
"_____no_output_____"
],
[
"metrics.precision_score(X['predictions'], X['label'])",
"_____no_output_____"
],
[
"metrics.recall_score(X['predictions'], X['label'])",
"_____no_output_____"
],
[
"metrics.f1_score(X['predictions'], X['label'])",
"_____no_output_____"
]
],
[
[
"With this model we improved: **f1 = 0.3843**",
"_____no_output_____"
]
],
[
[
"metrics.confusion_matrix(X['predictions'], X['label'])",
"_____no_output_____"
]
],
[
[
"We can definitely improve over this by a better choice of text embeddings or similarity algorithm.\n\nBut, all things considered, any approach that relies on the notion of similarity between page titles could not drastically improve the 0.38 F1 score.\n\nIt is time to proceed with an ML approach.",
"_____no_output_____"
],
[
"# Feature Extraction",
"_____no_output_____"
]
],
[
[
"MAX_PRODUCTS = 1000",
"_____no_output_____"
],
[
"camera_pairs = list(distinct_combinations(specs.index[:MAX_PRODUCTS], 2))",
"_____no_output_____"
],
[
"inspect_df(specs[top10])",
"shape: (29787, 10)\n"
]
],
[
[
"### brand",
"_____no_output_____"
]
],
[
[
"for brand in specs[specs['brand'].notna()]['brand'].tolist():\n\n if not isinstance(brand, str):\n print(brand)",
"['Pentax', 'Pentax']\n['Kodak', 'Kodak']\n['Nikon Megapixels: 12.1 MP', 'Nikon\\nMegapixels:\\n12.1 MP']\n['Kodak Optical Zoom: 8x', 'Kodak\\nOptical Zoom:\\n8x']\n['Nikon\\nType:\\nDigital SLR', 'Nikon']\n['Canon', 'Canon']\n['Nikon', 'Nikon']\n['Pentax', 'Pentax']\n['Canon', 'Canon']\n['Pentax', 'Pentax']\n['Pentax', 'Pentax']\n['Pentax', 'Pentax']\n['Olympus', 'Olympus']\n['Nikon', 'Nikon']\n['Nikon', 'Nikon']\n['Canon', 'Canon']\n['Nikon', 'Nikon']\n['Canon', 'Canon']\n['Canon', 'Canon']\n['Nikon', 'Nikon']\n['Canon', 'Canon']\n['Canon', 'Canon']\n['Pentax', 'Pentax']\n['Pentax', 'Pentax']\n['Nikon', 'Nikon']\n['Sony', 'Sony']\n['Canon', 'Canon']\n['Olympus', 'Olympus']\n['Nikon', 'Nikon']\n['Nikon', 'Nikon']\n['Pentax', 'Pentax']\n['Samsung', 'Samsung']\n['Pentax', 'Pentax']\n['Pentax', 'Pentax']\n['Focus', 'Sony']\n['NikonEu', 'Nikon']\n"
],
[
"def get_brand(value: str) -> str:\n \n if isinstance(value, str):\n \n return value\n \n try:\n brands = sorted(value, key=len, reverse=True)\n \n return brands[0]\n \n except (KeyError, TypeError):\n return None",
"_____no_output_____"
],
[
"specs['brand'] = specs['brand'].map(get_brand)",
"_____no_output_____"
],
[
"specs['brand'].value_counts()[:40]",
"_____no_output_____"
]
],
[
[
"### model",
"_____no_output_____"
]
],
[
[
"for model in specs[specs['model'].notna()]['model'].tolist():\n\n if not isinstance(model, str):\n print(model)",
"['0001820825468', 'Nikon D7000']\n['Wingmaster® Standard Contour', 'Special Purpose Deer', 'Special Purpose', 'Premier Deer', 'Express® Deer', 'Express Deer', 'Cantilever w/Scope', 'Cantilever']\n['DMCGX7SBODY', 'Panasonic DMC-GX7']\n['Nikon J2', '0001820827572']\n['NEX7KB', 'Sony NEX-7']\n['Premier Deer', 'Special Purpose', 'Special Purpose Deer', 'Wingmaster® Standard Contour', 'Cantilever', 'Cantilever w/Scope', 'Express Deer', 'Express® Deer']\n['Nikon D3100', '0001820813284']\n['Sony NEX-6', 'NEX6LB']\n['NEX7B', 'Sony NEX-7']\n['25482', 'Nikon D4']\n['SLTA99V', 'Sony SLT-A99']\n['Pentax K-5', '0002707517668']\n['Cantilever w/Scope', 'Express Deer', 'Express® Deer', 'Premier Deer', 'Special Purpose', 'Special Purpose Deer', 'Wingmaster® Standard Contour', 'Cantilever']\n['NEX6B', 'Sony NEX-6']\n['Wingmaster® Standard Contour', 'Cantilever', 'Cantilever w/Scope', 'Express Deer', 'Express® Deer', 'Premier Deer', 'Special Purpose', 'Special Purpose Deer']\n['6596B002', 'Canon 60DA']\n['0002724286589', 'Sony SLT-A58']\n['5253B002', 'Canon 1DX']\n['Panasonic DMC-G6', 'DMCG6KK']\n['0001820825488', 'Nikon D600']\n['13290', 'Nikon D3100']\n['Sony SLT-A99', 'SLTA99V']\n['Canon T3i', 'Canon T3']\n['Nikon D600', '0001820813203']\n['Fuji X-PRO1', '16225391']\n['Pentax K-50', '0002707523311']\n['Nikon D5100', '25476']\n['0002724284758', 'Sony SLT-A37']\n['DSLRA560L', 'Sony A560']\n['Nikon D7000', '0001820813019']\n['Nikon D3100', '0001820813284']\n['DMCGF6KK', 'Panasonic DMC-GF6']\n['Q', 'W30']\n['CX7300', 'CX7300']\n['S8100 Optical Zoom: 10x', 'S8100\\nOptical Zoom:\\n10x']\n['M580 Screen Size: 3\"', 'M580\\nScreen Size:\\n3\"']\n"
],
[
"def get_model(value: str) -> str:\n \n if isinstance(value, str):\n \n return value\n \n try:\n models = sorted(value, key=len, reverse=True)\n \n return models[0]\n \n except (KeyError, TypeError):\n return None",
"_____no_output_____"
],
[
"specs['model'] = specs['model'].map(get_model)",
"_____no_output_____"
]
],
[
[
"### megapixels",
"_____no_output_____"
]
],
[
[
"for mp in specs[specs['megapixels'].notna()]['megapixels'].tolist():\n\n if not isinstance(mp, str):\n print(mp)",
"['12.1', '10.0']\n['12.0', '12.1']\n['8.0', '10.0']\n['12.1', '12.0']\n['12.1', '10.0']\n['12.1', '10.0']\n['14', '12.1']\n['24 Megapixels', '16 Megapixels']\n['8.0', '10.0']\n['12.0', '11.1']\n['12.1', '14']\n['14', '12.1']\n['12.0', '12.1']\n['12.1', '16']\n['14', '12.0']\n['14', '16']\n['12.0', '12.1', '10.0']\n['16', '10.0']\n['12.0', '12.1', '10.0']\n['16', '14']\n['10.0', '8.0']\n['10.0', '8.0']\n['16', '14']\n['10.0', '16']\n['14', '12.1']\n['16 Megapixels', '24 Megapixels']\n['10.2', '10.0']\n['18', '12.0']\n['14', '12.0']\n['24', '16']\n['12.0', '11.1']\n['10.1 MP\\nModel:\\nJ1', '10.1']\n['8.0 MP Model: E890', '8.0 MP\\nModel:\\nVPC-E890']\n['14.2 MP\\nModel:\\nD3100', '14.2']\n['16', '16.0 MP\\nModel:\\nS9600']\n['12.1', '12.1 MP\\nModel:\\nSX510 HS']\n['21.1', '18.0']\n['14.0 MP\\nModel:\\nTough 8010 / µ (mju) Tough 8010', '14.0 MP Model: Tough 8010 / µ (mju) Tough 8010']\n['14.0 MP', '14.0 MP']\n['12.1 MP\\nModel:\\nSX50 HS', '12.1']\n['10.1 MP\\nModel:\\nRebel XS / 1000D', '10.0 MP\\nBrand:\\nCanon']\n['14.2', '14.2 MP\\nBrand:\\nNikon']\n['8.1 MP\\nModel:\\nDSC-W90', '8.1 MP Model: DSC-W90']\n['6.3 MP\\nMPN:\\n8363A002AA', '6.3 MP Model: 10D']\n['18', '18.0 MP\\nBrand:\\nCanon']\n"
],
[
"def extract_number(value: str) -> int:\n \n match = re.search(r'\\d{0,2}(.\\d)?', value)\n \n try: \n return float(match.group(0)) if match else None\n except ValueError:\n return None",
"_____no_output_____"
],
[
"def get_megapixels(value: str) -> int:\n \n if isinstance(value, str):\n \n return extract_number(value)\n \n try:\n mps = sorted(value, key=len, reverse=True)\n \n return extract_number(mps[0])\n \n except (KeyError, TypeError):\n return None",
"_____no_output_____"
],
[
"specs['megapixels'] = specs['megapixels'].map(get_megapixels)",
"_____no_output_____"
],
[
"specs['megapixels'] = pd.to_numeric(specs['megapixels'])",
"_____no_output_____"
],
[
"specs['megapixels'].value_counts()",
"_____no_output_____"
]
],
[
[
"### type",
"_____no_output_____"
]
],
[
[
"for ctype in specs[specs['type'].notna()]['type'].tolist():\n\n if not isinstance(ctype, str):\n print(ctype)",
"['2 - 7 x 32 scope', 'Cantilever Scope Mount', 'Rifle Sight', 'Ventilated Rib']\n['2 - 7 x 32 scope', 'Cantilever Scope Mount', 'Rifle Sight', 'Ventilated Rib']\n['Telephoto', 'Step-Up Rings']\n['2 - 7 x 32 scope', 'Cantilever Scope Mount', 'Rifle Sight', 'Ventilated Rib']\n['2 - 7 x 32 scope', 'Cantilever Scope Mount', 'Rifle Sight', 'Ventilated Rib']\n['Compact digital camera', 'Electronically-controlled ND filter (â\\x80\\x932 AV) selection']\n['8.0 Megapixel, 1/2.5 inch type Charge Coupled Device (CCD)', 'Compact digital still camera with built-in flash, 3.8x Optical/4x Digital/15x Combined Zoom with Optical Image Stabilizer (IS) System']\n['Compact digital still camera with built-in flash, 10x Optical/4x Digital/40xCombined Zoom with Optical Image Stabilizer System', '9.0 Megapixel, 1/2.3 inch type Charge Coupled Device (CCD)']\n['Lens - 8.54 mm - f/3.0', '1.5\" LCD display']\n['Digital AF/AE SLR camera with built-in flash', 'TTL-CT-SIR AF-dedicated CMOS sensor\\n(TTL Secondary image-registration, phase detection)', 'CMOS sensor']\n['Point & Shoot Bundled Items: car charger, extra wall charger, Case or Bag, Extra Battery, Strap (Neck or Wrist)', 'Point & Shoot\\nBundled Items:\\nCase or Bag, Memory Card, Memory Reader, Strap (Neck or Wrist), Tripod']\n['SD, SDHC, SDXC (UHS Speed Class 1 compatible)', 'TTL', 'DIGIC 6 with iSAPS technology', '1/1.7\" type High Sensitivity CMOS']\n['Digital SLR', 'Digital SLR']\n['Point & Shoot\\nCountry/Region of Manufacture:\\nChina', 'Point & Shoot Country/Region of Manufacture: China']\n['High-sensitivity, high-resolution, large single-plate CMOS sensor', 'Digital, AF/AE single-lens reflex camera with built-in flash']\n['Intermittent flash firing', 'Infrared light remote controller', 'On camera, E-TTL II/E-TTL autoflash Speedlite']\n['3\" LCD display', '3 x zoom lens - 18 - 55 mm - f/3.5-5.6 II Canon EF-S']\n['3 x zoom lens - 18 - 55 mm - f/3.5-5.6 IS II Canon EF-S', '3\" LCD display']\n['3\" LCD display', '3 x zoom lens - 16 - 50 mm - f/3.5-5.6 PZ OSS']\n['2.7\" LCD display', '3 x zoom lens - 18 - 55 mm - f/3.5-5.6 II Canon EF-S']\n['3\" LCD display', '3 x zoom lens - 18 - 55 mm - f/3.5-5.6']\n['3\" LCD display', '30 x zoom lens - 4.3 - 129 mm - f/3.4-5.8']\n['16 x zoom lens - 5 - 80 mm - f/3.5-5.9', '3\" LCD display']\n['7.5 x zoom lens - 18 - 135 mm - f/3.5-5.6 Canon EF-S', '3\" LCD display']\n['3\" LCD display', '18 x zoom lens - 4.5 - 81 mm - f/3.8-6.9']\n['2.5 x zoom lens - 20 - 50 mm - f/3.5-5.6 i-Function', '3.31\" AMOLED display']\n['Lens', '2.7\" LCD display']\n['12 x zoom lens - 4 - 48 mm - f/3.4-5.6', '3.2\" LCD display']\n['3\" LCD display', 'Carl Zeiss lens - 35 mm - f/2.0 Sonnar T*']\n['15 x zoom lens - 4.3 - 64.5 mm - f/3.3-5.9', '3\" LCD display']\n['2.5\" LCD display', 'Lens - 5.7 mm - f/3.2']\n['3.3\" OLED display', 'Carl Zeiss 5 x zoom lens - 4.7 - 23.5 mm - f/3.5-4.8']\n['4.3 x zoom lens - 24 - 105 mm - f/4.0 L IS USM Canon EF', '3\" LCD display']\n['3\" LCD display', 'Fujinon 8 x zoom lens - 4.5 - 36 mm - f/2.9-5.9']\n['3 x zoom lens - 14 - 42 mm - f/3.5-5.6 X', '3\" LCD display']\n['5 x zoom lens - 6.2 - 31 mm - f/3.0-5.6', '2.7\" LCD display']\n['2.7\" LCD display', '15 x zoom lens - 4.9 - 73.5 mm - f/3.0-5.2']\n['3\" LCD display', '18 x zoom lens - 4.5 - 81 mm - f/3.8-6.9']\n['3\" LCD display', '18 x zoom lens - 4.5 - 81 mm - f/3.8-6.9']\n['2.7\" LCD display', 'Lens']\n['5 x zoom lens - 4.3 - 21.5 mm - f/2.8-4.9', '3\" OLED display']\n['3\" LCD display', '3 x zoom lens - 18 - 55 mm - f/3.5-5.6 DA L AL Pentax KAF']\n['2.5\" LCD display', 'Lens - 5.1 mm - f/3.2']\n['3 x zoom lens - 18 - 55 mm - f/3.5-5.6 II Canon EF-S', '2.7\" LCD display']\n['3 x zoom lens', '2.4\" LCD display']\n['3\" LCD display', '60 x zoom lens - 4.3 - 258 mm - f/3.3-6.5']\n['Lens - 5.7 mm - f/3.2', '2.5\" LCD display']\n['Lens - 18.5 mm - f/2.8', '3\" LCD display']\n['Lens - 5.7 mm - f/3.2', '2.5\" LCD display']\n['Fujinon 8 x zoom lens - 4.5 - 36 mm - f/2.9-5.9', '3\" LCD display']\n['3\" LCD display', '4 x zoom lens - 15.4 - 60.4 mm - f/2.8-5.8']\n['2.7\" LCD display', '5 x zoom lens - 5 - 25 mm - f/2.8-6.9']\n['3\" LCD display', '3 x zoom lens - 18 - 55 mm - f/3.5-5.6 IS II Canon EF-S']\n['34 x zoom lens - 4 - 136 mm - f/3.0-5.9', '3\" LCD display']\n['2.7\" LCD display', 'Lens - 8.15 mm - f/3.0']\n['3\" LCD display', '3 x zoom lens - 18 - 55 mm - f/3.5-5.6 G Nikon AF-S DX VR']\n['5.8 x zoom lens - 18 - 105 mm - f/3.5-5.6 G Nikon AF-S DX ED VR', '3.2\" LCD display']\n['3 x zoom lens - 16 - 50 mm - f/3.5-5.6 PZ OSS', '3\" LCD display']\n['3.2\" LCD display', '3.5 x zoom lens - 24 - 85 mm - f/3.5-4.5 G Nikon AF-S ED VR']\n['3\" LCD display', '3 x zoom lens - 14 - 42 mm - f/3.5-5.6 X']\n['18 x zoom lens - 4 - 72 mm - f/3.2-5.8', '3\" LCD display']\n['3\" LCD display', '42 x zoom lens - 4.3 - 180 mm - f/3.0-5.9']\n['3 x zoom lens - 18 - 55 mm - f/3.5-5.6 G Nikon AF-S DX VR', '3\" LCD display']\n['2.7\" LCD display', '5 x zoom lens - 4.5 - 22.5 mm - f/2.5-6.3']\n['2.5\" LCD display', '3 x zoom lens - 4.1 - 12.3 mm - f/3.3-5.9']\n['3 x zoom lens - 18 - 55 mm - f/3.5-5.6 II Canon EF-S', '2.7\" LCD display']\n['2.7\" LCD display', '3 x zoom lens - 18 - 55 mm - f/3.5-5.6 II Canon EF-S']\n['3\" LCD display', '3 x zoom lens - 10 - 30 mm - f/3.5-5.6']\n['7.1 x zoom lens - 6 - 42.8 mm - f/2.0-4.0', '3\" LCD display']\n['30 x zoom lens - 4.3 - 129 mm - f/3.4-5.8', '3\" LCD display']\n['8 x zoom lens - 5 - 40 mm - f/3.2-6.9', '2.7\" LCD display']\n['3\" LCD display', '7.5 x zoom lens - 18 - 135 mm - f/3.5-5.6 IS STM Canon EF-S']\n['3 x zoom lens - 18 - 55 mm - f/3.5-5.6', '3\" LCD display']\n['2.7\" LCD display', 'Lens - 7.45 mm - f/3.0']\n['52 x zoom lens - 4.3 - 223.6 mm - f/2.8-5.6', '3\" LCD display']\n['3 x zoom lens - 18 - 55 mm - f/3.5-5.6', '3\" LCD display']\n['3\" LCD display', '18 x zoom lens - 4 - 72 mm - f/3.2-5.8']\n['3\" LCD display', '5 x zoom lens - 4.6 - 23 mm - f/3.2-6.5']\n['3.2\" LCD display', '3 x zoom lens - 18 - 55 mm - f/3.5-5.6 G Nikon AF-S DX VR II']\n['2.7\" LCD display', '5 x zoom lens - 6.2 - 31 mm - f/3.0-5.6']\n['2.7\" LCD display', 'Lens']\n['3\" LCD display', '5 x zoom lens - 4.6 - 23 mm - f/3.2-6.5']\n['5 x zoom lens - 4.5 - 22.5 mm - f/2.5-6.3', '2.7\" LCD display']\n['3\" LCD display', '14 x zoom lens - 4.5 - 63 mm - f/3.3-5.9']\n['3\" LCD display', '5 x zoom lens - 5 - 25 mm - f/3.9-4.8']\n['3\" OLED display', '30 x zoom lens - 4.5 - 135 mm - f/3.7-6.4']\n['2.7\" LCD display', '5 x zoom lens - 4.6 - 23 mm - f/3.2-6.5']\n['3.3\" OLED display', 'Carl Zeiss 5 x zoom lens - 4.7 - 23.5 mm - f/3.5-4.8']\n['3\" LCD display', '18 x zoom lens - 4.5 - 81 mm - f/3.8-6.9']\n['Lens', '2.7\" LCD display']\n['2.7\" LCD display', 'Fujinon 5 x zoom lens - 4.6 - 23 mm - f/3.5-6.3']\n['4.3 x zoom lens - 24 - 105 mm - f/4.0 L IS USM Canon EF', '3\" LCD display']\n['12 x zoom lens - 4 - 48 mm - f/3.4-5.6', '3.2\" LCD display']\n['52 x zoom lens - 4.3 - 223.6 mm - f/2.8-5.6', '3\" LCD display']\n['Lens - 18.5 mm - f/2.8', '3\" LCD display']\n['8 x zoom lens - 5 - 40 mm - f/3.2-6.9', '2.7\" LCD display']\n['3\" LCD display', '8 x zoom lens - 4.5 - 36 mm - f/3.7-6.6']\n['3 x zoom lens - 10 - 30 mm - f/3.5-5.6', '3\" LCD display']\n['3 x zoom lens - 4.1 - 12.3 mm - f/3.3-5.9', '2.5\" LCD display']\n['5 x zoom lens - 4.5 - 22.5 mm - f/2.5-6.3', '2.7\" LCD display']\n['3\" LCD display', '3 x zoom lens - 18 - 55 mm - f/3.5-5.6 IS STM Canon EF-S']\n['2.8\" LCD display', '50 x zoom lens - 4.3 - 215 mm - f/3.4-6.5']\n['3\" LCD display', 'Lens']\n['15 x zoom lens - 4.9 - 73.5 mm - f/3.0-5.2', '2.7\" LCD display']\n['2.7\" LCD display', '5 x zoom lens - 6.2 - 31 mm - f/3.0-5.6']\n['Fujinon 5 x zoom lens - 4.6 - 23 mm - f/3.5-6.3', '2.7\" LCD display']\n['16 x zoom lens - 5 - 80 mm - f/3.5-5.9', '3\" LCD display']\n['3\" LCD display', '3 x zoom lens - 18 - 55 mm - f/3.5-5.6 G ED II Nikon AF-S DX']\n['3\" LCD display', '34 x zoom lens - 4 - 136 mm - f/3.0-5.9']\n['3\" LCD display', '3 x zoom lens - 18 - 55 mm - f/3.5-5.6 G Nikon AF-S DX VR']\n['3\" LCD display', '5 x zoom lens - 4.5 - 22.5 mm - f/2.5-6.3']\n['2.7\" LCD display', '5 x zoom lens - 4.6 - 23 mm - f/3.2-6.5']\n['2.7\" LCD display', '25 x zoom lens - 4.3 - 107.5 mm - f/3.7-6.2']\n['3\" LCD display', 'Carl Zeiss 50 x zoom lens - 4.3 - 215 mm - f/2.8-6.3']\n['Carl Zeiss 3.6 x zoom lens - 10.4 - 37.1 mm - f/1.8-4.9', '3\" LCD display']\n['3 x zoom lens - 18 - 55 mm - f/3.5-5.6 G Nikon AF-S DX VR', '3\" LCD display']\n['25 x zoom lens - 4.3 - 107.5 mm - f/3.7-6.2', '2.7\" LCD display']\n['3\" LCD display', '5 x zoom lens - 5.2 - 26 mm - f/1.8-5.7']\n['2.7\" LCD display', 'Lens']\n['3\" LCD display', '12 x zoom lens - 4.5 - 54 mm - f/3.3-6.3']\n['2.5\" LCD display', 'Lens - 5.7 mm - f/3.2']\n['4 x zoom lens - 4.9 - 19.6 mm - f/3.0-6.6', '2.7\" LCD display']\n['3\" LCD display', '60 x zoom lens - 4.3 - 258 mm - f/3.3-6.5']\n['3 x zoom lens - 18 - 55 mm - f/3.5-5.6 IS STM Canon EF-S', '3\" LCD display']\n['3\" LCD display', '7.1 x zoom lens - 6 - 42.8 mm - f/2.0-4.0']\n['7.8 x zoom lens - 18 - 140 mm - f/3.5-5.6 G Nikon AF-S DX ED VR', '3.2\" LCD display']\n['3 x zoom lens - 18 - 55 mm - f/3.5-5.6 G ED II Nikon AF-S DX', '3\" LCD display']\n['12 x zoom lens - 4.5 - 54 mm - f/3.3-6.3', '3\" LCD display']\n['3 x zoom lens - 18 - 55 mm - f/3.5-5.6', '3\" LCD display']\n['4.8\" LCD display', '21 x zoom lens - 4.1 - 86.1 mm - f/2.8-5.9']\n['2.7\" LCD display', 'Lens']\n['Image Stabilization', 'Standard\\nCamera Technology', 'High Quality', 'Portrait']\n['Fisheye\\nCamera Technology', 'Fisheye\\nCamera Technology']\n['Film\\nImage Stabilization\\nCamera Technology', 'Image Stabilization\\nCamera Technology', 'Image Stabilization\\nCamera Technology', 'Telephoto', 'Telephoto', 'Telephoto']\n['Vari angle 7.7cm (3.0\") 3:2 Clear View II TFT, approx. 1040K dots', 'TTL-CT-SIR with a CMOS sensor', 'Electronically-controlled focal-plane shutter', 'Electronic viewfinder with image sensor', '22.5mm x 15.0mm CMOS', 'Pentaprism']\n['TTL-CT-SIR with a CMOS sensor', '22.5mm x 15.0mm CMOS', 'Electronic viewfinder with image sensor', 'Electronically-controlled focal-plane shutter', 'Pentaprism', 'Vari angle 7.7cm (3.0\") 3:2 Clear View II TFT, approx. 1040K dots']\n['AMOLED with Touch (C-type Touch Control Enabled) and Tilt (Up 90, Down 45)', 'External Flash only (Bundle with SEF8A)', 'Single image, Thumbnails (3 / 15 / 40 images), Slide show, Movie', 'Phase Detection & Contrast AF']\n['Pentaprism', 'Electronically-controlled focal-plane shutter', 'Vari angle 7.7cm (3.0\") 3:2 Clear View II TFT, approx. 1040K dots', 'Electronic viewfinder with image sensor', '22.5mm x 15.0mm CMOS', 'TTL-CT-SIR with a CMOS sensor']\n['Phase Detection & Contrast AF', 'AMOLED with Touch (C-type Touch Control Enabled) and Tilt (Up 90, Down 45)', 'Single image, Thumbnails (3 / 15 / 40 images), Slide show, Movie', 'External Flash only (Bundle with SEF8A)']\n['Electronically-controlled focal-plane shutter', 'Vari angle 7.7cm (3.0\") 3:2 Clear View II TFT, approx. 1040K dots', 'Pentaprism', '22.5mm x 15.0mm CMOS', 'Electronic viewfinder with image sensor', 'TTL-CT-SIR with a CMOS sensor']\n['Single image, Thumbnails (3 / 15 / 40 images), Slide show, Movie', 'Phase Detection & Contrast AF', 'Electronically controlled vertical-run focal plane shutter', 'AMOLED with Touch (C-type Touch Control Enabled) and Tilt (Up 90, Down 45)', 'External Flash only (Bundle with SEF8A)']\n['AMOLED with Touch (C-type Touch Control Enabled) and Tilt (Up 90, Down 45)', 'External Flash only (Bundle with SEF8A)', 'Phase Detection & Contrast AF', 'Single image, Thumbnails (3 / 15 / 40 images), Slide show, Movie']\n['Electronically-controlled vertical-travel focal-plane shutter', 'Single-lens reflex digital camera']\n['DIGIC 6 with iSAPS technology', 'TTL', 'sRGB', 'SD, SDHC, SDXC (UHS Speed Class 1 compatible)', '1.5 type (18.7 mm x 14.0 mm) Canon high-sensitivity CMOS', 'TTL']\n['Single-lens reflex digital camera', 'Electronically-controlled vertical-travel focal-plane shutter']\n['CompactFlash Type I (UDMA compatible), SD card, SDHC card or SDXC card. High-speed writing with UHS-I type SD cards is supported', 'Dual \"DIGIC 6\"', '22.4 x 15.0 mm CMOS', 'Electronically-controlled focal-plane shutter', 'TTL-CT-SIR with a dedicated CMOS sensor', '7.7cm (3.0\") Clear View II TFT, approx. 1040K dots', 'Pentaprism', 'Auto white balance with the imaging sensor', 'Electronic viewfinder with image sensor']\n['sRGB', 'TTL', 'SD, SDHC, SDXC', 'TTL', 'DIGIC 4+ with iSAPS technology', '1/2.3 type CCD']\n['Contrast AF system', 'Live MOS Sensor', 'LCD Live View Finder (2,764,800 dots equivalent)', 'Digital Single Lens Mirrorless camera', 'Tilt Static LCD with Touch Monitor', 'TTL Built-in Flash, GN7.0 equivalent (ISO 200 ・m), GN5.0 equivalent (ISO 100 ・m), Built-in Pop-up', 'Focal-plane shutter']\n['Tilt Static LCD with Touch Monitor', 'TTL Built-in Flash, GN7.0 equivalent (ISO 200 ・m), GN5.0 equivalent (ISO 100 ・m), Built-in Pop-up', 'Live MOS Sensor', 'Focal-plane shutter', 'Contrast AF system', 'LCD Live View Finder (2,764,800 dots equivalent)', 'Digital Single Lens Mirrorless camera']\n['Single-lens reflex digital camera', 'Electronically-controlled vertical-travel focal-plane shutter']\n['Digital Single Lens Mirrorless camera', 'TTL Built-in Flash, GN7.0 equivalent (ISO 200 ・m), GN5.0 equivalent (ISO 100 ・m), Built-in Pop-up', 'Focal-plane shutter', 'Tilt Static LCD with Touch Monitor', 'Contrast AF system', 'Live MOS Sensor', 'LCD Live View Finder (2,764,800 dots equivalent)']\n['Electronically-controlled vertical-travel focal-plane shutter', 'Single-lens reflex digital camera']\n['Focal-plane shutter', 'Live MOS Sensor', 'Digital Single Lens Mirrorless camera', 'TFT LCD with Touch panel, Tiltable monitor', 'Contrast AF system', 'TTL Built-in-Flash, GN6.3 equivalent (ISO 160 ・m), GN5.0 equivalent (ISO 100 ・m), Built-in Pop-up']\n['Electronically-controlled vertical-travel focal-plane shutter', 'Single-lens reflex digital camera']\n['LCD Live View Finder (2,764,800 dots equivalent)', 'Digital Single Lens Mirrorless camera', 'Focal-plane shutter', 'Contrast AF system', 'Live MOS Sensor', 'TTL Built-in Flash, GN7.0 equivalent (ISO 200 ・m), GN5.0 equivalent (ISO 100 ・m), Built-in Pop-up', 'Tilt Static LCD with Touch Monitor']\n['TTL', '1/2.3 type back-illuminated CMOS', 'sRGB', 'TTL', 'DIGIC 6 with iSAPS technology', 'SD, SDHC, SDXC']\n['Electronically-controlled vertical-travel focal-plane shutter', 'Single-lens reflex digital camera']\n['1/2.3 type CCD', 'SD, SDHC, SDXC', 'TTL', 'DIGIC 4+ with iSAPS technology', 'sRGB', 'TTL']\n['Electronically-controlled vertical-travel focal-plane mechanical shutter; electronic front-curtain shutter available in mirror up release mode', 'Single-lens reflex digital camera']\n['SD, SDHC, SDXC', 'TTL', 'DIGIC 4+ with iSAPS technology', '1/2.3 type CCD', 'sRGB', 'TTL']\n['Tilt Static LCD with Touch Monitor', 'Live MOS Sensor', 'Digital Single Lens Mirrorless camera', 'LCD Live View Finder (2,764,800 dots equivalent)', 'Contrast AF system', 'Focal-plane shutter', 'TTL Built-in Flash, GN7.0 equivalent (ISO 200 ・m), GN5.0 equivalent (ISO 100 ・m), Built-in Pop-up']\n['SD card, SDHC card or SDXC card', 'Electronic viewfinder with image sensor', 'Auto white balance with the imaging sensor', 'Electronically-controlled focal-plane shutter, with electronic first curtain', '7.5cm (3.0\") TFT, approx. 460k dots', 'DIGIC 4', 'Pentamirror', 'Approx. 22.3 mm x 14.9 mm', 'TTL-CT-SIR with a CMOS sensor']\n['SD, SDHC, SDXC', 'sRGB', 'TTL', 'TTL', 'DIGIC 4+ with iSAPS technology', '1/2.3 type CCD']\n['sRGB', 'TTL', '1/2.3 type back-illuminated CMOS', 'TTL', 'DIGIC 6 with iSAPS technology', 'SD, SDHC, SDXC (UHS Speed Class 1 compatible)']\n['1.0 cm (0.39 type) OLED Electronic viewfinder (color)', 'Electronically-controlled, vertical-traverse, focal-plane type', 'Built-in flash', 'Fast Hybrid AF(phase-detection AF/contrast-detection AF)', 'APS-C type (23.5 x 15.6mm) \"Exmor\" APS HD CMOS sensor']\n['Auto white balance with the imaging sensor', 'TTL-CT-SIR with a dedicated CMOS sensor', 'CompactFlash Type I (UDMA compatible), SD card, SDHC card or SDXC card. High-speed writing with UHS-I type SD cards is supported', 'Dual \"DIGIC 6\"', 'Pentaprism', 'Electronically-controlled focal-plane shutter', '22.4 x 15.0 mm CMOS', 'Electronic viewfinder with image sensor', '7.7cm (3.0\") Clear View II TFT, approx. 1040K dots']\n['Electronically-controlled focal-plane shutter', '7.7cm (3.0\") Clear View II TFT, approx. 1040K dots', 'Pentaprism', 'Electronic viewfinder with image sensor', 'Auto white balance with the imaging sensor', 'CompactFlash Type I (UDMA compatible), SD card, SDHC card or SDXC card. High-speed writing with UHS-I type SD cards is supported', 'TTL-CT-SIR with a dedicated CMOS sensor', 'Dual \"DIGIC 6\"', '22.4 x 15.0 mm CMOS']\n['TTL', '1/2.3 type back-illuminated CMOS', 'DIGIC 6 with iSAPS technology', 'sRGB', 'TTL', 'SD, SDHC, SDXC']\n['Contrast AF system', 'Digital Single Lens Mirrorless camera', 'OLED Live View Finder (1,440,000 dots)', 'Live MOS Sensor', 'Focal-plane shutter', 'TFT LCD with Touch monitor', 'TTL Built-in-Flash, GN10.5 equivalent (ISO 160 ・m), GN8.3 equivalent (ISO 100 ・m), Built-in Pop-up']\n['sRGB', 'SD, SDHC, SDXC', 'TTL', 'DIGIC 4 with iSAPS technology', '1/2.3 type back-illuminated CMOS', 'TTL']\n['Single-lens reflex digital camera', 'Electronically-controlled vertical-travel focal-plane shutter']\n['TTL', '1/2.3 type back-illuminated CMOS', 'DIGIC 4+ with iSAPS technology', 'sRGB', 'SD, SDHC, SDXC', 'TTL']\n['Electronically-controlled vertical-travel focal-plane shutter', 'Single-lens reflex digital camera']\n['3.0’’ wide-TFT color LCD, Wide viewing 170°, AR Coating (LCD Cover only), with protection acrylic cover LCD frame rate: approx. 60', 'Integrated auto flash control', '1/2.3\" CMOS', 'TTL contrast detection auto focus system']\n['SD, SDHC, SDXC', 'DIGIC 4+ with iSAPS technology', '1/2.3 type CCD', 'TTL', 'sRGB', 'TTL']\n['sRGB', 'TTL', 'DIGIC 6 with iSAPS technology', 'SD, SDHC, SDXC (UHS Speed Class 1 compatible)', '1/1.7 type back-illuminated CMOS', 'TTL']\n['SD, SDHC, SDXC (UHS Speed Class 1 compatible)', 'DIGIC 6 with iSAPS technology', '1.0 type back-illuminated CMOS', 'TTL', 'TTL', 'sRGB']\n['7.5cm (3.0\") TFT, approx. 460k dots', 'Pentamirror', 'Auto white balance with the imaging sensor', 'Electronically-controlled focal-plane shutter, with electronic first curtain', 'SD card, SDHC card or SDXC card', 'DIGIC 4', 'Approx. 22.3 mm x 14.9 mm', 'TTL-CT-SIR with a CMOS sensor', 'Electronic viewfinder with image sensor']\n['Focal-plane shutter', 'OLED Live View Finder (1,440,000 dots)', 'Live MOS Sensor', 'Contrast AF system', 'TTL Built-in-Flash, GN10.5 equivalent (ISO 160 ・m), GN8.3 equivalent (ISO 100 ・m), Built-in Pop-up', 'TFT LCD with Touch monitor', 'Digital Single Lens Mirrorless camera']\n['SD, SDHC, SDXC', 'TTL', 'DIGIC 4+ with iSAPS technology', '1/2.3 type CCD', 'TTL', 'sRGB']\n['SD, SDHC, SDXC (UHS Speed Class 1 compatible)', 'TTL', 'DIGIC 6 with iSAPS technology', 'sRGB', '1/1.7 type back-illuminated CMOS', 'TTL']\n['TTL', 'DIGIC 4+ with iSAPS technology', '1/2.3 type CCD', 'sRGB', 'SD, SDHC, SDXC', 'TTL']\n['TFT LCD with Touch panel, Tiltable monitor', 'TTL Built-in-Flash, GN6.3 equivalent (ISO 160 ・m), GN5.0 equivalent (ISO 100 ・m), Built-in Pop-up', 'Digital Single Lens Mirrorless camera', 'Focal-plane shutter', 'Contrast AF system', 'Live MOS Sensor']\n['TTL', 'sRGB', 'DIGIC 4+ with iSAPS technology', '1/2.3 type CCD', 'TTL', 'SD, SDHC, SDXC']\n['Electronically-controlled vertical-travel focal-plane shutter', 'Single-lens reflex digital camera']\n['Electronically-controlled vertical-travel focal-plane shutter', 'Single-lens reflex digital camera']\n['Digital Single Lens Mirrorless camera', 'Live MOS Sensor', 'Contrast AF system', 'OLED Live View Finder (1,440,000 dots)', 'TFT LCD with Touch monitor', 'TTL Built-in-Flash, GN10.5 equivalent (ISO 160 ・m), GN8.3 equivalent (ISO 100 ・m), Built-in Pop-up', 'Focal-plane shutter']\n['SD, SDHC, SDXC', 'DIGIC 4+ with iSAPS technology', '1/2.3 type CCD', 'sRGB', 'TTL', 'TTL']\n['3.0’’ wide-TFT color LCD, Wide viewing 170°, AR Coating (LCD Cover only), with protection acrylic cover LCD frame rate: approx. 60', 'Integrated auto flash control', 'TTL contrast detection auto focus system', '1/2.3\" CMOS']\n['1.5 type (18.7 mm x 14.0 mm) Canon high-sensitivity CMOS', 'TTL']\n['1/2.3 type back-illuminated CMOS', 'TTL', 'TTL']\n['Hybrid AF (TTL and external systems)', 'Mechanical shutter & electronic shutter']\n['TTL', 'TTL']\n['TTL', 'Mechanical shutter & Electronic shutter']\n['18.7 mm x 14.0 mm Canon high-sensitivity CMOS', 'TTL', 'TTL']\n['TTL', 'Mechanical shutter & electronic shutter']\n['TTL', 'TTL']\n['TTL', 'TTL']\n['TTL', 'TTL']\n['TTL', 'TTL']\n['1/2.3 type back-illuminated CMOS', 'TTL']\n['TTL', 'Mechanical shutter & electronic shutter']\n"
],
[
"def get_type(value: str) -> str:\n \n if isinstance(value, str):\n \n return value\n \n try:\n types = sorted(value, key=len, reverse=True)\n \n return types[0]\n \n except (KeyError, TypeError):\n return None",
"_____no_output_____"
],
[
"specs['type'] = specs['type'].map(get_type)",
"_____no_output_____"
],
[
"specs['type'].value_counts()[0:40]",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4a0f094419da6cf2d1bb05118fda18b51123cf6e
| 64,901 |
ipynb
|
Jupyter Notebook
|
docs/source/dowhy_causal_api.ipynb
|
ktmud/dowhy
|
67e6588b1ca07f788e97f32ba9500d3b14a73564
|
[
"MIT"
] | null | null | null |
docs/source/dowhy_causal_api.ipynb
|
ktmud/dowhy
|
67e6588b1ca07f788e97f32ba9500d3b14a73564
|
[
"MIT"
] | null | null | null |
docs/source/dowhy_causal_api.ipynb
|
ktmud/dowhy
|
67e6588b1ca07f788e97f32ba9500d3b14a73564
|
[
"MIT"
] | null | null | null | 40.537789 | 3,904 | 0.440533 |
[
[
[
"# Demo for the DoWhy causal API\nWe show a simple example of adding a causal extension to any dataframe. ",
"_____no_output_____"
]
],
[
[
"import os, sys\nsys.path.append(os.path.abspath(\"../../\"))",
"_____no_output_____"
],
[
"import dowhy.datasets\nimport dowhy.api\n\nimport numpy as np\nimport pandas as pd\n\nfrom statsmodels.api import OLS",
"_____no_output_____"
],
[
"data = dowhy.datasets.linear_dataset(beta=5,\n num_common_causes=1,\n num_instruments = 0,\n num_samples=1000,\n treatment_is_binary=True)\ndf = data['df']\ndf['y'] = df['y'] + np.random.normal(size=len(df)) # Adding noise to data. Without noise, the variance in Y|X, Z is zero, and mcmc fails.\ndata['dot_graph'] = 'digraph { v ->y;X0-> v;X0-> y;}'",
"_____no_output_____"
],
[
"# data['df'] is just a regular pandas.DataFrame\ndf.causal.do(x='v',\n variable_types={'v': 'b', 'y': 'c', 'X0': 'c'},\n outcome='y',\n common_causes=['X0']).groupby('v').mean().plot(y='y', kind='bar')",
"WARNING:dowhy.do_why:Causal Graph not provided. DoWhy will construct a graph based on data inputs.\nINFO:dowhy.do_why:Model to find the causal effect of treatment ['v'] on outcome ['y']\n/home/amit/python-virtual-envs/env/lib/python3.5/site-packages/sklearn/ensemble/weight_boosting.py:29: DeprecationWarning: numpy.core.umath_tests is an internal NumPy module and should not be imported. It will be removed in a future NumPy release.\n from numpy.core.umath_tests import inner1d\nINFO:dowhy.causal_identifier:Common causes of treatment and outcome:['U', 'X0']\nWARNING:dowhy.causal_identifier:There are unobserved common causes. Causal effect cannot be identified.\n"
],
[
"df.causal.do(x={'v': 1}, \n variable_types={'v': 'b', 'y': 'c', 'X0': 'c'}, \n outcome='y',\n method='weighting', \n common_causes=['X0'],\n proceed_when_unidentifiable=True).groupby('v').mean().plot(y='y', kind='bar')",
"WARNING:dowhy.do_why:Causal Graph not provided. DoWhy will construct a graph based on data inputs.\nINFO:dowhy.do_why:Model to find the causal effect of treatment ['v'] on outcome ['y']\nINFO:dowhy.causal_identifier:Common causes of treatment and outcome:['U', 'X0']\nWARNING:dowhy.causal_identifier:There are unobserved common causes. Causal effect cannot be identified.\nINFO:dowhy.causal_identifier:Continuing by ignoring these unobserved confounders because proceed_when_unidentifiable flag is True.\nINFO:dowhy.causal_identifier:Instrumental variables for treatment and outcome:[]\nINFO:dowhy.do_sampler:Using WeightingSampler for do sampling.\nINFO:dowhy.do_sampler:Caution: do samplers assume iid data.\n"
],
[
"cdf_1 = df.causal.do(x={'v': 1}, \n variable_types={'v': 'b', 'y': 'c', 'X0': 'c'}, \n outcome='y', \n dot_graph=data['dot_graph'],\n proceed_when_unidentifiable=True)\n\ncdf_0 = df.causal.do(x={'v': 0}, \n variable_types={'v': 'b', 'y': 'c', 'X0': 'c'}, \n outcome='y', \n dot_graph=data['dot_graph'],\n proceed_when_unidentifiable=True)",
"INFO:dowhy.do_why:Model to find the causal effect of treatment ['v'] on outcome ['y']\nINFO:dowhy.causal_identifier:Common causes of treatment and outcome:['U', 'X0']\nWARNING:dowhy.causal_identifier:There are unobserved common causes. Causal effect cannot be identified.\nINFO:dowhy.causal_identifier:Continuing by ignoring these unobserved confounders because proceed_when_unidentifiable flag is True.\nINFO:dowhy.causal_identifier:Instrumental variables for treatment and outcome:[]\nINFO:dowhy.do_sampler:Using WeightingSampler for do sampling.\nINFO:dowhy.do_sampler:Caution: do samplers assume iid data.\nINFO:dowhy.do_why:Model to find the causal effect of treatment ['v'] on outcome ['y']\nINFO:dowhy.causal_identifier:Common causes of treatment and outcome:['U', 'X0']\nWARNING:dowhy.causal_identifier:There are unobserved common causes. Causal effect cannot be identified.\nINFO:dowhy.causal_identifier:Continuing by ignoring these unobserved confounders because proceed_when_unidentifiable flag is True.\nINFO:dowhy.causal_identifier:Instrumental variables for treatment and outcome:[]\nINFO:dowhy.do_sampler:Using WeightingSampler for do sampling.\nINFO:dowhy.do_sampler:Caution: do samplers assume iid data.\n"
],
[
"cdf_0",
"_____no_output_____"
],
[
"cdf_1",
"_____no_output_____"
]
],
[
[
"## Comparing the estimate to Linear Regression\nFirst, estimating the effect using the causal data frame, and the 95% confidence interval.",
"_____no_output_____"
]
],
[
[
"(cdf_1['y'] - cdf_0['y']).mean()",
"_____no_output_____"
],
[
"1.96*(cdf_1['y'] - cdf_0['y']).std() / np.sqrt(len(df))",
"_____no_output_____"
]
],
[
[
"Comparing to the estimate from OLS.",
"_____no_output_____"
]
],
[
[
"model = OLS(df['y'], df[['X0', 'v']])\nresult = model.fit()\nresult.summary()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a0f196c74df0e3e30f211dad2e7a96f126263b9
| 421,846 |
ipynb
|
Jupyter Notebook
|
day2_visualisation.ipynb
|
pawelbura/dw_matrix_car
|
8e102eb1218de82323521fbf2928e7a820857486
|
[
"MIT"
] | null | null | null |
day2_visualisation.ipynb
|
pawelbura/dw_matrix_car
|
8e102eb1218de82323521fbf2928e7a820857486
|
[
"MIT"
] | null | null | null |
day2_visualisation.ipynb
|
pawelbura/dw_matrix_car
|
8e102eb1218de82323521fbf2928e7a820857486
|
[
"MIT"
] | null | null | null | 421,846 | 421,846 | 0.918065 |
[
[
[
"# upgrade tables bo read_hdf wymaga zaraz\n!pip install --upgrade tables",
"Requirement already up-to-date: tables in /usr/local/lib/python3.6/dist-packages (3.6.1)\nRequirement already satisfied, skipping upgrade: numpy>=1.9.3 in /usr/local/lib/python3.6/dist-packages (from tables) (1.17.5)\nRequirement already satisfied, skipping upgrade: numexpr>=2.6.2 in /usr/local/lib/python3.6/dist-packages (from tables) (2.7.1)\n"
],
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns",
"_____no_output_____"
],
[
"ls \"/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car\"",
"\u001b[0m\u001b[01;34mdata\u001b[0m/ LICENSE README.md\n"
],
[
"cd \"/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car/data\"",
"/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car/data\n"
],
[
"ls",
"car.h5\n"
],
[
"df = pd.read_hdf('car.h5')\ndf.shape",
"_____no_output_____"
],
[
"# jakie mamy kolumny\ndf.columns.values",
"_____no_output_____"
],
[
"df['price_value'].hist(bins=100)",
"_____no_output_____"
],
[
"df['price_value'].max()",
"_____no_output_____"
],
[
"df['price_value'].describe()",
"_____no_output_____"
],
[
"df['price_value']",
"_____no_output_____"
],
[
"df['param_marka-pojazdu'].unique()",
"_____no_output_____"
],
[
"df['param_marka-pojazdu'].value_counts()",
"_____no_output_____"
],
[
"df['param_marka-pojazdu'].describe()",
"_____no_output_____"
],
[
"\ndf.groupby('param_marka-pojazdu')['price_value'].mean()",
"_____no_output_____"
],
[
"(\n df\n .groupby('param_marka-pojazdu')['price_value']\n #.agg(np.mean)\n #.agg(np.median)\n .agg([np.mean, np.median, np.size])\n .sort_values(by='size', ascending=False)\n).plot(kind='bar', figsize=(20,5), subplots=True)\n",
"_____no_output_____"
]
],
[
[
"# funkcja rysująca 3 wykresy",
"_____no_output_____"
]
],
[
[
"def group_and_barplot(feat_groupby, feat_agg='price_value', agg_funcs=[np.mean, np.median, np.size], feat_sort='mean', ascending=False, top=50, subplots=True, ):\n return(\n df\n .groupby(feat_groupby)[feat_agg]\n .agg(agg_funcs)\n .sort_values(by=feat_sort, ascending=ascending)\n .head(top)\n ).plot(kind='bar', figsize=(20,5), subplots=subplots)\n",
"_____no_output_____"
],
[
"group_and_barplot('param_marka-pojazdu');",
"_____no_output_____"
],
[
"group_and_barplot('param_make');",
"_____no_output_____"
],
[
"group_and_barplot('param_kolor');",
"_____no_output_____"
],
[
"group_and_barplot('feature_bluetooth');",
"_____no_output_____"
],
[
"group_and_barplot('param_rodzaj-paliwa');",
"_____no_output_____"
],
[
"group_and_barplot('param_kraj-pochodzenia');",
"_____no_output_____"
],
[
"group_and_barplot('param_kraj-pochodzenia',feat_sort='size');",
"_____no_output_____"
],
[
"df['param_kraj-pochodzenia'].count()",
"_____no_output_____"
],
[
"df[df['param_kraj-pochodzenia'].isna()].head()",
"_____no_output_____"
],
[
"df.count()",
"_____no_output_____"
],
[
"group_and_barplot('param_przebieg', feat_sort='size');",
"_____no_output_____"
],
[
"df.param_przebieg.count()",
"_____no_output_____"
],
[
"df['param_przebieg_int'] = df['param_przebieg'].map(lambda x: int(str(x).replace(\"km\",\"\").replace(\" \", \"\")) if x else 0).astype(int)",
"_____no_output_____"
],
[
"df.param_przebieg_int.value_counts()",
"_____no_output_____"
],
[
"group_and_barplot('param_przebieg_int');",
"_____no_output_____"
],
[
"df.param_przebieg_int.describe()",
"_____no_output_____"
],
[
"df.param_przebieg_int.hist(bins=1000, figsize=(20,5));",
"_____no_output_____"
],
[
"df['param_przebieg_bins'] = pd.qcut(df['param_przebieg_int'],20).map(lambda x: x.right)",
"_____no_output_____"
],
[
"df['param_przebieg_bins'].value_counts()",
"_____no_output_____"
],
[
"group_and_barplot('param_przebieg_bins', feat_sort='param_przebieg_bins', ascending=True);",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a0f20035811f89f3e9fda86f3f32a4c9221806d
| 6,608 |
ipynb
|
Jupyter Notebook
|
gan_notebook.ipynb
|
George3d6/GAN-70-Lines-of-Julia
|
70c515fe343ef02dc0fd3529ce647c201cc1266a
|
[
"MIT"
] | null | null | null |
gan_notebook.ipynb
|
George3d6/GAN-70-Lines-of-Julia
|
70c515fe343ef02dc0fd3529ce647c201cc1266a
|
[
"MIT"
] | null | null | null |
gan_notebook.ipynb
|
George3d6/GAN-70-Lines-of-Julia
|
70c515fe343ef02dc0fd3529ce647c201cc1266a
|
[
"MIT"
] | null | null | null | 31.617225 | 121 | 0.479873 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a0f3215e4df6baa55c8c3305a4d06aab3105f38
| 2,580 |
ipynb
|
Jupyter Notebook
|
Data Type.ipynb
|
sounak9/python-projects
|
0043dec6b87abda63dfe7cbae793638ab6634886
|
[
"Apache-2.0"
] | 267 |
2020-02-10T05:29:53.000Z
|
2021-12-11T20:14:38.000Z
|
Data Type.ipynb
|
nikhilgubbi/Introduction-to-Python
|
5a30eb483f1da25bb6d425ba69219e474fd8d1d3
|
[
"Apache-2.0"
] | null | null | null |
Data Type.ipynb
|
nikhilgubbi/Introduction-to-Python
|
5a30eb483f1da25bb6d425ba69219e474fd8d1d3
|
[
"Apache-2.0"
] | 247 |
2020-02-07T15:46:32.000Z
|
2021-01-17T13:30:45.000Z
| 14.913295 | 36 | 0.421705 |
[
[
[
"### Data Type",
"_____no_output_____"
]
],
[
[
"1+1",
"_____no_output_____"
],
[
"1*3",
"_____no_output_____"
],
[
"1/2",
"_____no_output_____"
],
[
"2**4",
"_____no_output_____"
],
[
"4%2",
"_____no_output_____"
],
[
"5%2",
"_____no_output_____"
],
[
"(2+3)*(5+5)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a0f39c6e61c1321199084f66723058d5cd5f798
| 7,451 |
ipynb
|
Jupyter Notebook
|
00_checker.ipynb
|
muellerzr/dependency_verifier
|
29e214ac0d44248a248ecfa9499b18c2183a5144
|
[
"Apache-2.0"
] | 4 |
2021-09-26T21:36:39.000Z
|
2022-01-06T16:52:00.000Z
|
00_checker.ipynb
|
muellerzr/dependency_verifier
|
29e214ac0d44248a248ecfa9499b18c2183a5144
|
[
"Apache-2.0"
] | 7 |
2021-09-27T21:25:52.000Z
|
2021-10-02T00:16:40.000Z
|
00_checker.ipynb
|
muellerzr/dependency_verifier
|
29e214ac0d44248a248ecfa9499b18c2183a5144
|
[
"Apache-2.0"
] | 1 |
2021-09-30T17:03:22.000Z
|
2021-09-30T17:03:22.000Z
| 28.76834 | 250 | 0.57603 |
[
[
[
"# default_exp checker",
"_____no_output_____"
]
],
[
[
"# Dependency Checker\n\n> A pragmatic way to talk with pypi and find out what dependencies are out of date",
"_____no_output_____"
]
],
[
[
"#hide\nfrom nbverbose.showdoc import *",
"_____no_output_____"
]
],
[
[
"## Dependency Traversing\n\nSometimes, we may want to check the current installed versions of a project's basic dependencies, and further check if those dependencies are out of date. `dependency_checker` is designed around this concept, utilizing the `pipdeptree` library.",
"_____no_output_____"
]
],
[
[
"#export\nimport json, ast, pipdeptree, sys, subprocess",
"_____no_output_____"
],
[
"#export\ndef get_installed_dependencies(\n package_name:str, # The name of a python package\n depth_limit:int=1, # How deep to follow nested dependencies\n include_self:bool=False, # Whether to include the original library in the results\n) -> dict: # A dictionary of {package:version}\n \"Recursively grabs dependencies of python package\"\n pkgs = pipdeptree.get_installed_distributions(local_only=False, user_only=False)\n tree = pipdeptree.PackageDAG.from_pkgs(pkgs)\n tree = tree.filter([package_name], None)\n curr_depth=0\n def _get_deps(j, dep_dict={}, curr_depth=0):\n if curr_depth > depth_limit: return dep_dict\n if isinstance(j, list):\n for a in j:\n _get_deps(a, dep_dict, curr_depth)\n elif isinstance(j, dict):\n if 'package_name' in j.keys():\n if j['package_name'] not in dep_dict.keys():\n dep_dict[j['package_name']] = j['installed_version']\n if 'dependencies' in j.keys():\n curr_depth += 1\n return _get_deps(j['dependencies'], dep_dict, curr_depth)\n return dep_dict\n deps = _get_deps(ast.literal_eval(pipdeptree.render_json_tree(tree, 4)), {})\n if not include_self: deps.pop(package_name, None)\n return deps",
"_____no_output_____"
]
],
[
[
"This function operates by traversing a DAG and grabbing dependencies of projects found from it. Generally a depth of 1 is recommended, below is a quick guide to what will be returned at each depth.\n\n\n**0**: A depth of zero will an empty dictionary unless `include_self` is `True`. If so, it will include only the library name:",
"_____no_output_____"
]
],
[
[
"deps = get_installed_dependencies('pipdeptree', depth_limit=0)",
"_____no_output_____"
],
[
"assert deps == {}",
"_____no_output_____"
],
[
"deps = get_installed_dependencies('pipdeptree', depth_limit=0, include_self=True)",
"_____no_output_____"
],
[
"assert deps == {'pipdeptree':'2.1.0'}",
"_____no_output_____"
]
],
[
[
"**1**: A depth of one will return the project and its main dependencies (if `include_self` is `True`), such as those stated in the `requirements.txt` as well as packages such as `pip` ",
"_____no_output_____"
]
],
[
[
"deps = get_installed_dependencies('pipdeptree', depth_limit=1, include_self=True)",
"_____no_output_____"
],
[
"assert len(deps.keys()) == 2",
"_____no_output_____"
],
[
"assert all(package in deps.keys() for package in ('pipdeptree', 'pip'))",
"_____no_output_____"
],
[
"deps = get_installed_dependencies('pipdeptree', depth_limit=1, include_self=False)",
"_____no_output_____"
],
[
"assert len(deps.keys()) == 1",
"_____no_output_____"
],
[
"assert 'pip' in deps.keys()",
"_____no_output_____"
]
],
[
[
"**2+**: A depth of two or greater will return the dependencies for each of the dependencies above that layer. These allow for more fine-grained requirements",
"_____no_output_____"
],
[
"## Checking for New Versions\n\nGiven these dependencies, we can also then check for a new version to see if an upgrade is available. This is what the `is_latest_version` function is designed for:",
"_____no_output_____"
]
],
[
[
"#export\ndef is_latest_version(\n package_name:str, # The name of a pip python package \n current_version:str, # The installed version of a package, such as \"1.2.3\"\n) -> bool: # Whether the versions are the same\n \"Compares the current version with the latest version, and returns if they are different\"\n latest_version = str(subprocess.run([sys.executable, '-m', 'pip', 'install', '{}==random'.format(package_name)], capture_output=True, text=True))\n latest_version = latest_version[latest_version.find('(from versions:')+15:]\n latest_version = latest_version[:latest_version.find(')')]\n latest_version = latest_version.replace(' ','').split(',')[-1]\n\n if latest_version == current_version:\n return True\n else:\n return False",
"_____no_output_____"
],
[
"using_latest_version = is_latest_version('pipdeptree', '2.0.9')\nassert using_latest_version == False",
"_____no_output_____"
]
],
[
[
"Here we tested if `pipdeptree` is the latest version. The version we specified is one less than that of the latest release at the time of development. We got `False`, meaning a newer version is available.",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
4a0f5651b62186b8f25ee0d00393274040373dea
| 3,231 |
ipynb
|
Jupyter Notebook
|
Gardenkiak/Programazioa/del sententzia.ipynb
|
mpenagar/Konputaziorako-Sarrera
|
1f276cbda42e9d3d0beb716249fadbad348533d7
|
[
"MIT"
] | null | null | null |
Gardenkiak/Programazioa/del sententzia.ipynb
|
mpenagar/Konputaziorako-Sarrera
|
1f276cbda42e9d3d0beb716249fadbad348533d7
|
[
"MIT"
] | null | null | null |
Gardenkiak/Programazioa/del sententzia.ipynb
|
mpenagar/Konputaziorako-Sarrera
|
1f276cbda42e9d3d0beb716249fadbad348533d7
|
[
"MIT"
] | null | null | null | 18.568966 | 125 | 0.443516 |
[
[
[
"# `del` sententzia\n\n**Indexagarriak** diren objektu **aldakorretan**, balio bat (edo balio sorta bat) bere indizearen bidez ezabatu daiteke",
"_____no_output_____"
]
],
[
[
"z = list(enumerate(\"aeiou\"))\nprint(z)",
"[(0, 'a'), (1, 'e'), (2, 'i'), (3, 'o'), (4, 'u')]\n"
],
[
"del z[3]\nprint(z)",
"[(0, 'a'), (1, 'e'), (2, 'i'), (4, 'u')]\n"
],
[
"z = list(enumerate(\"aeiou\"))\ndel z[:3]\nprint(z)",
"[(3, 'o'), (4, 'u')]\n"
],
[
"z = list(enumerate(\"aeiou\"))\ndel z[3:]\nprint(z)",
"[(0, 'a'), (1, 'e'), (2, 'i')]\n"
],
[
"h = {1:\"bat\",2:\"bi\",3:\"hiru\",4:\"lau\"}\ndel h[3]\nprint(h)",
"{1: 'bat', 2: 'bi', 4: 'lau'}\n"
]
],
[
[
"<table border=\"0\" width=\"100%\" style=\"margin: 0px;\">\n<tr> \n <td style=\"text-align:left\"><a href=\"Multzoak.ipynb\">< < Multzoak < <</a></td>\n <td style=\"text-align:right\"><a href=\"with%20sententzia.ipynb\">> > with sententzia > ></a></td>\n</tr>\n</table>",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a0f66fd258db1ca95208f1ff0199a0df06916e5
| 373,216 |
ipynb
|
Jupyter Notebook
|
Introduction to Computer Vision/Feature Vectors/Image Pyramids.ipynb
|
brand909/Computer-Vision
|
18e5bda880e40f0a355d1df8520770df5bb1ed6b
|
[
"MIT"
] | null | null | null |
Introduction to Computer Vision/Feature Vectors/Image Pyramids.ipynb
|
brand909/Computer-Vision
|
18e5bda880e40f0a355d1df8520770df5bb1ed6b
|
[
"MIT"
] | 4 |
2021-03-19T02:34:33.000Z
|
2022-03-11T23:56:20.000Z
|
Introduction to Computer Vision/Feature Vectors/Image Pyramids.ipynb
|
brand909/Computer-Vision
|
18e5bda880e40f0a355d1df8520770df5bb1ed6b
|
[
"MIT"
] | null | null | null | 2,665.828571 | 199,232 | 0.964155 |
[
[
[
"## Image pyramids\n\nTake a look at how downsampling with image pyramids works.\n\nFirst, we'll read in an image then construct and display a few layers of an image pyramid.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n\n%matplotlib inline\n\n# Read in the image\nimage = cv2.imread('images/rainbow_flag.jpg')\n\n# Change color to RGB (from BGR)\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\nplt.imshow(image)",
"_____no_output_____"
],
[
"level_1 = cv2.pyrDown(image)\nlevel_2 = cv2.pyrDown(level_1)\nlevel_3 = cv2.pyrDown(level_2)\n\n# Display the images\nf, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10))\n\nax1.set_title('original')\nax1.imshow(image)\n\nax2.imshow(level_1)\nax2.set_xlim([0, image.shape[1]])\nax2.set_ylim([image.shape[0], 0])\n\nax3.imshow(level_2)\nax3.set_xlim([0, image.shape[1]])\nax3.set_ylim([image.shape[0], 0])\n\nax4.imshow(level_3)\nax4.set_xlim([0, image.shape[1]])\nax4.set_ylim([image.shape[0], 0])\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
]
] |
4a0f7bd60f671a39c9285f7a8d0fcec573139bb2
| 42,796 |
ipynb
|
Jupyter Notebook
|
site/en/tutorials/distribute/dtensor_ml_tutorial.ipynb
|
Pandinosaurus/docs-2
|
3550667e06ea24580b6d907aaf09f0c8e0dfca23
|
[
"Apache-2.0"
] | null | null | null |
site/en/tutorials/distribute/dtensor_ml_tutorial.ipynb
|
Pandinosaurus/docs-2
|
3550667e06ea24580b6d907aaf09f0c8e0dfca23
|
[
"Apache-2.0"
] | null | null | null |
site/en/tutorials/distribute/dtensor_ml_tutorial.ipynb
|
Pandinosaurus/docs-2
|
3550667e06ea24580b6d907aaf09f0c8e0dfca23
|
[
"Apache-2.0"
] | null | null | null | 39.8473 | 434 | 0.564819 |
[
[
[
"##### Copyright 2019 The TensorFlow Authors.\n",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Distributed Training with DTensors\n",
"_____no_output_____"
],
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/distribute/dtensor_ml_tutorial\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/distribute/dtensor_ml_tutorial.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/distribute/dtensor_ml_tutorial.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/distribute/dtensor_ml_tutorial.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"## Overview\n\nDTensor provides a way for you to distribute the training of your model across devices to improve efficiency, reliability and scalability. For more details on DTensor concepts, see [The DTensor Programming Guide](https://www.tensorflow.org/guide/dtensor_overview).\n\nIn this tutorial, you will train a Sentiment Analysis model with DTensor. Three distributed training schemes are demonstrated with this example:\n\n - Data Parallel training, where the training samples are sharded (partitioned) to devices.\n - Model Parallel training, where the model variables are sharded to devices.\n - Spatial Parallel training, where the features of input data are sharded to devices. (Also known as [Spatial Partitioning](https://cloud.google.com/blog/products/ai-machine-learning/train-ml-models-on-large-images-and-3d-volumes-with-spatial-partitioning-on-cloud-tpus))\n\nThe training portion of this tutorial is inspired [A Kaggle guide on Sentiment Analysis](https://www.kaggle.com/code/anasofiauzsoy/yelp-review-sentiment-analysis-tensorflow-tfds/notebook) notebook. To learn about the complete training and evaluation workflow (without DTensor), refer to that notebook.\n\nThis tutorial will walk through the following steps:\n\n- First start with some data cleaning to obtain a `tf.data.Dataset` of tokenized sentences and their polarity.\n\n- Next build an MLP model with custom Dense and BatchNorm layers. Use a `tf.Module` to track the inference variables. The model constructor takes additional `Layout` arguments to control the sharding of variables.\n\n- For training, you will first use data parallel training together with `tf.experimental.dtensor`'s checkpoint feature. Then continue with Model Parallel Training and Spatial Parallel Training.\n\n- The final section briefly describes the interaction between `tf.saved_model` and `tf.experimental.dtensor` as of TensorFlow 2.9.\n",
"_____no_output_____"
],
[
"## Setup\n\nDTensor is part of TensorFlow 2.9.0 release.",
"_____no_output_____"
]
],
[
[
"!pip install --quiet --upgrade --pre tensorflow tensorflow-datasets",
"_____no_output_____"
]
],
[
[
"Next, import `tensorflow` and `tensorflow.experimental.dtensor`. Then configure TensorFlow to use 8 virtual CPUs.\n\nEven though this example uses CPUs, DTensor works the same way on CPU, GPU or TPU devices.",
"_____no_output_____"
]
],
[
[
"import tempfile\nimport numpy as np\nimport tensorflow_datasets as tfds\n\nimport tensorflow as tf\n\nfrom tensorflow.experimental import dtensor\nprint('TensorFlow version:', tf.__version__)",
"_____no_output_____"
],
[
"def configure_virtual_cpus(ncpu):\n phy_devices = tf.config.list_physical_devices('CPU')\n tf.config.set_logical_device_configuration(phy_devices[0], [\n tf.config.LogicalDeviceConfiguration(),\n ] * ncpu)\n\nconfigure_virtual_cpus(8)\nDEVICES = [f'CPU:{i}' for i in range(8)]\n\ntf.config.list_logical_devices('CPU')",
"_____no_output_____"
]
],
[
[
"## Download the dataset\n\nDownload the IMDB reviews data set to train the sentiment analysis model.",
"_____no_output_____"
]
],
[
[
"train_data = tfds.load('imdb_reviews', split='train', shuffle_files=True, batch_size=64)\ntrain_data",
"_____no_output_____"
]
],
[
[
"## Prepare the data\n\nFirst tokenize the text. Here use an extension of one-hot encoding, the `'tf_idf'` mode of `tf.keras.layers.TextVectorization`.\n\n- For the sake of speed, limit the number of tokens to 1200.\n- To keep the `tf.Module` simple, run `TextVectorization` as a preprocessing step before the training.\n\nThe final result of the data cleaning section is a `Dataset` with the tokenized text as `x` and label as `y`.\n\n**Note**: Running `TextVectorization` as a preprocessing step is **neither a usual practice nor a recommended one** as doing so assumes the training data fits into the client memory, which is not always the case.\n",
"_____no_output_____"
]
],
[
[
"text_vectorization = tf.keras.layers.TextVectorization(output_mode='tf_idf', max_tokens=1200, output_sequence_length=None)\ntext_vectorization.adapt(data=train_data.map(lambda x: x['text']))",
"_____no_output_____"
],
[
"def vectorize(features):\n return text_vectorization(features['text']), features['label']\n\ntrain_data_vec = train_data.map(vectorize)\ntrain_data_vec",
"_____no_output_____"
]
],
[
[
"## Build a neural network with DTensor\n\nNow build a Multi-Layer Perceptron (MLP) network with `DTensor`. The network will use fully connected Dense and BatchNorm layers.\n\n`DTensor` expands TensorFlow through single-program multi-data (SPMD) expansion of regular TensorFlow Ops according to the `dtensor.Layout` attributes of their input `Tensor` and variables.\n\nVariables of `DTensor` aware layers are `dtensor.DVariable`, and the constructors of `DTensor` aware layer objects take additional `Layout` inputs in addition to the usual layer parameters.\n\nNote: As of TensorFlow 2.9, Keras layers such as `tf.keras.layer.Dense`, and `tf.keras.layer.BatchNormalization` accepts `dtensor.Layout` arguments. Refer to the [DTensor Keras Integration Tutorial](/tutorials/distribute/dtensor_keras_tutorial) for more information using Keras with DTensor.",
"_____no_output_____"
],
[
"### Dense Layer\n\nThe following custom Dense layer defines 2 layer variables: $W_{ij}$ is the variable for weights, and $b_i$ is the variable for the biases.\n\n$$\ny_j = \\sigma(\\sum_i x_i W_{ij} + b_j)\n$$\n",
"_____no_output_____"
],
[
"### Layout deduction\n\nThis result comes from the following observations:\n\n- The preferred DTensor sharding for operands to a matrix dot product $t_j = \\sum_i x_i W_{ij}$ is to shard $\\mathbf{W}$ and $\\mathbf{x}$ the same way along the $i$-axis.\n\n- The preferred DTensor sharding for operands to a matrix sum $t_j + b_j$, is to shard $\\mathbf{t}$ and $\\mathbf{b}$ the same way along the $j$-axis.\n",
"_____no_output_____"
]
],
[
[
"class Dense(tf.Module):\n\n def __init__(self, input_size, output_size,\n init_seed, weight_layout, activation=None):\n super().__init__()\n\n random_normal_initializer = tf.function(tf.random.stateless_normal)\n\n self.weight = dtensor.DVariable(\n dtensor.call_with_layout(\n random_normal_initializer, weight_layout,\n shape=[input_size, output_size],\n seed=init_seed\n ))\n if activation is None:\n activation = lambda x:x\n self.activation = activation\n \n # bias is sharded the same way as the last axis of weight.\n bias_layout = weight_layout.delete([0])\n\n self.bias = dtensor.DVariable(\n dtensor.call_with_layout(tf.zeros, bias_layout, [output_size]))\n\n def __call__(self, x):\n y = tf.matmul(x, self.weight) + self.bias\n y = self.activation(y)\n\n return y",
"_____no_output_____"
]
],
[
[
"### BatchNorm\n\nA batch normalization layer helps avoid collapsing modes while training. In this case, adding batch normalization layers helps model training avoid producing a model that only produces zeros.\n\nThe constructor of the custom `BatchNorm` layer below does not take a `Layout` argument. This is because `BatchNorm` has no layer variables. This still works with DTensor because 'x', the only input to the layer, is already a DTensor that represents the global batch.\n\nNote: With DTensor, the input Tensor 'x' always represents the global batch. Therefore `tf.nn.batch_normalization` is applied to the global batch. This differs from training with `tf.distribute.MirroredStrategy`, where Tensor 'x' only represents the per-replica shard of the batch (the local batch).",
"_____no_output_____"
]
],
[
[
"class BatchNorm(tf.Module):\n\n def __init__(self):\n super().__init__()\n\n def __call__(self, x, training=True):\n if not training:\n # This branch is not used in the Tutorial.\n pass\n mean, variance = tf.nn.moments(x, axes=[0])\n return tf.nn.batch_normalization(x, mean, variance, 0.0, 1.0, 1e-5)",
"_____no_output_____"
]
],
[
[
"A full featured batch normalization layer (such as `tf.keras.layers.BatchNormalization`) will need Layout arguments for its variables.",
"_____no_output_____"
]
],
[
[
"def make_keras_bn(bn_layout):\n return tf.keras.layers.BatchNormalization(gamma_layout=bn_layout,\n beta_layout=bn_layout,\n moving_mean_layout=bn_layout,\n moving_variance_layout=bn_layout,\n fused=False)",
"_____no_output_____"
]
],
[
[
"### Putting Layers Together\n\nNext, build a Multi-layer perceptron (MLP) network with the building blocks above. The diagram below shows the axis relationships between the input `x` and the weight matrices for the two `Dense` layers without any DTensor sharding or replication applied.",
"_____no_output_____"
],
[
"<img src=\"https://www.tensorflow.org/images/dtensor/no_dtensor.png\" alt=\"The input and weight matrices for a non distributed model.\" class=\"no-filter\">\n",
"_____no_output_____"
],
[
"The output of the first `Dense` layer is passed into the input of the second `Dense` layer (after the `BatchNorm`). Therefore, the preferred DTensor sharding for the output of first `Dense` layer ($\\mathbf{W_1}$) and the input of second `Dense` layer ($\\mathbf{W_2}$) is to shard $\\mathbf{W_1}$ and $\\mathbf{W_2}$ the same way along the common axis $\\hat{j}$,\n\n$$\n\\mathsf{Layout}[{W_{1,ij}}; i, j] = \\left[\\hat{i}, \\hat{j}\\right] \\\\\n\\mathsf{Layout}[{W_{2,jk}}; j, k] = \\left[\\hat{j}, \\hat{k} \\right]\n$$\n\nEven though the layout deduction shows that the 2 layouts are not independent, for the sake of simplicity of the model interface, `MLP` will take 2 `Layout` arguments, one per Dense layer.",
"_____no_output_____"
]
],
[
[
"from typing import Tuple\n\nclass MLP(tf.Module):\n\n def __init__(self, dense_layouts: Tuple[dtensor.Layout, dtensor.Layout]):\n super().__init__()\n\n self.dense1 = Dense(\n 1200, 48, (1, 2), dense_layouts[0], activation=tf.nn.relu)\n self.bn = BatchNorm()\n self.dense2 = Dense(48, 2, (3, 4), dense_layouts[1])\n\n def __call__(self, x):\n y = x\n y = self.dense1(y)\n y = self.bn(y)\n y = self.dense2(y)\n return y\n",
"_____no_output_____"
]
],
[
[
"The trade-off between correctness in layout deduction constraints and simplicity of API is a common design point of APIs that uses DTensor.\nIt is also possible to capture the dependency between `Layout`'s with a different API. For example, the `MLPStricter` class creates the `Layout` objects in the constructor.",
"_____no_output_____"
]
],
[
[
"class MLPStricter(tf.Module):\n\n def __init__(self, mesh, input_mesh_dim, inner_mesh_dim1, output_mesh_dim):\n super().__init__()\n\n self.dense1 = Dense(\n 1200, 48, (1, 2), dtensor.Layout([input_mesh_dim, inner_mesh_dim1], mesh),\n activation=tf.nn.relu)\n self.bn = BatchNorm()\n self.dense2 = Dense(48, 2, (3, 4), dtensor.Layout([inner_mesh_dim1, output_mesh_dim], mesh))\n\n\n def __call__(self, x):\n y = x\n y = self.dense1(y)\n y = self.bn(y)\n y = self.dense2(y)\n return y",
"_____no_output_____"
]
],
[
[
"To make sure the model runs, probe your model with fully replicated layouts and a fully replicated batch of `'x'` input.",
"_____no_output_____"
]
],
[
[
"WORLD = dtensor.create_mesh([(\"world\", 8)], devices=DEVICES)\n\nmodel = MLP([dtensor.Layout.replicated(WORLD, rank=2),\n dtensor.Layout.replicated(WORLD, rank=2)])\n\nsample_x, sample_y = train_data_vec.take(1).get_single_element()\nsample_x = dtensor.copy_to_mesh(sample_x, dtensor.Layout.replicated(WORLD, rank=2))\nprint(model(sample_x))",
"_____no_output_____"
]
],
[
[
"## Moving data to the device\n\nUsually, `tf.data` iterators (and other data fetching methods) yield tensor objects backed by the local host device memory. This data must be transferred to the accelerator device memory that backs DTensor's component tensors.\n\n`dtensor.copy_to_mesh` is unsuitable for this situation because it replicates input tensors to all devices due to DTensor's global perspective. So in this tutorial, you will use a helper function `repack_local_tensor`, to facilitate the transfer of data. This helper function uses `dtensor.pack` to send (and only send) the shard of the global batch that is intended for a replica to the device backing the replica.\n\nThis simplified function assumes single-client. Determining the correct way to split the local tensor and the mapping between the pieces of the split and the local devices can be laboring in a multi-client application.\n\nAdditional DTensor API to simplify `tf.data` integration is planned, supporting both single-client and multi-client applications. Please stay tuned.",
"_____no_output_____"
]
],
[
[
"def repack_local_tensor(x, layout):\n \"\"\"Repacks a local Tensor-like to a DTensor with layout.\n\n This function assumes a single-client application.\n \"\"\"\n x = tf.convert_to_tensor(x)\n sharded_dims = []\n\n # For every sharded dimension, use tf.split to split the along the dimension.\n # The result is a nested list of split-tensors in queue[0].\n queue = [x]\n for axis, dim in enumerate(layout.sharding_specs):\n if dim == dtensor.UNSHARDED:\n continue\n num_splits = layout.shape[axis]\n queue = tf.nest.map_structure(lambda x: tf.split(x, num_splits, axis=axis), queue)\n sharded_dims.append(dim)\n\n # Now we can build the list of component tensors by looking up the location in\n # the nested list of split-tensors created in queue[0].\n components = []\n for locations in layout.mesh.local_device_locations():\n t = queue[0]\n for dim in sharded_dims:\n split_index = locations[dim] # Only valid on single-client mesh.\n t = t[split_index]\n components.append(t)\n\n return dtensor.pack(components, layout)",
"_____no_output_____"
]
],
[
[
"## Data parallel training\n\nIn this section, you will train your MLP model with data parallel training. The following sections will demonstrate model parallel training and spatial parallel training.\n\nData parallel training is a commonly used scheme for distributed machine learning:\n\n - Model variables are replicated on N devices each.\n - A global batch is split into N per-replica batches.\n - Each per-replica batch is trained on the replica device.\n - The gradient is reduced before weight up data is collectively performed on all replicas.\n\nData parallel training provides nearly linear speedup regarding the number of devices.",
"_____no_output_____"
],
[
"### Creating a data parallel mesh\n\nA typical data parallelism training loop uses a DTensor `Mesh` that consists of a single `batch` dimension, where each device becomes a replica that receives a shard from the global batch.\n\n<img src=\"https://www.tensorflow.org/images/dtensor/dtensor_data_para.png\" alt=\"Data parallel mesh\" class=\"no-filter\">\n\n\nThe replicated model runs on the replica, therefore the model variables are fully replicated (unsharded).",
"_____no_output_____"
]
],
[
[
"mesh = dtensor.create_mesh([(\"batch\", 8)], devices=DEVICES)\n\nmodel = MLP([dtensor.Layout([dtensor.UNSHARDED, dtensor.UNSHARDED], mesh),\n dtensor.Layout([dtensor.UNSHARDED, dtensor.UNSHARDED], mesh),])\n",
"_____no_output_____"
]
],
[
[
"### Packing training data to DTensors\n\nThe training data batch should be packed into DTensors sharded along the `'batch'`(first) axis, such that DTensor will evenly distribute the training data to the `'batch'` mesh dimension.\n\n**Note**: In DTensor, the `batch size` always refers to the global batch size. The batch size should be chosen such that it can be divided evenly by the size of the `batch` mesh dimension.",
"_____no_output_____"
]
],
[
[
"def repack_batch(x, y, mesh):\n x = repack_local_tensor(x, layout=dtensor.Layout(['batch', dtensor.UNSHARDED], mesh))\n y = repack_local_tensor(y, layout=dtensor.Layout(['batch'], mesh))\n return x, y\n\nsample_x, sample_y = train_data_vec.take(1).get_single_element()\nsample_x, sample_y = repack_batch(sample_x, sample_y, mesh)\n\nprint('x', sample_x[:, 0])\nprint('y', sample_y)",
"_____no_output_____"
]
],
[
[
"### Training step\n\nThis example uses a Stochastic Gradient Descent optimizer with the Custom Training Loop (CTL). Consult the [Custom Training Loop guide](https://www.tensorflow.org/guide/keras/writing_a_training_loop_from_scratch) and [Walk through](https://www.tensorflow.org/tutorials/customization/custom_training_walkthrough) for more information on those topics.\n\nThe `train_step` is encapsulated as a `tf.function` to indicate this body is to be traced as a TensorFlow Graph. The body of `train_step` consists of a forward inference pass, a backward gradient pass, and the variable update.\n\nNote that the body of `train_step` does not contain any special DTensor annotations. Instead, `train_step` only contains high-level TensorFlow operations that process the input `x` and `y` from the global view of the input batch and the model. All of the DTensor annotations (`Mesh`, `Layout`) are factored out of the train step.",
"_____no_output_____"
]
],
[
[
"# Refer to the CTL (custom training loop guide)\[email protected]\ndef train_step(model, x, y, learning_rate=tf.constant(1e-4)):\n with tf.GradientTape() as tape:\n logits = model(x)\n # tf.reduce_sum sums the batch sharded per-example loss to a replicated\n # global loss (scalar).\n loss = tf.reduce_sum(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=y))\n parameters = model.trainable_variables\n gradients = tape.gradient(loss, parameters)\n for parameter, parameter_gradient in zip(parameters, gradients):\n parameter.assign_sub(learning_rate * parameter_gradient)\n\n # Define some metrics\n accuracy = 1.0 - tf.reduce_sum(tf.cast(tf.argmax(logits, axis=-1, output_type=tf.int64) != y, tf.float32)) / x.shape[0]\n loss_per_sample = loss / len(x)\n return {'loss': loss_per_sample, 'accuracy': accuracy}",
"_____no_output_____"
]
],
[
[
"### Checkpointing\n\nYou can checkpoint a DTensor model using `dtensor.DTensorCheckpoint`. The format of a DTensor checkpoint is fully compatible with a Standard TensorFlow Checkpoint. There is ongoing work to consolidate `dtensor.DTensorCheckpoint` into `tf.train.Checkpoint`.\n\nWhen a DTensor checkpoint is restored, `Layout`s of variables can be different from when the checkpoint is saved. This tutorial makes use of this feature to continue the training in the Model Parallel training and Spatial Parallel training sections.\n",
"_____no_output_____"
]
],
[
[
"CHECKPOINT_DIR = tempfile.mkdtemp()\n\ndef start_checkpoint_manager(mesh, model):\n ckpt = dtensor.DTensorCheckpoint(mesh, root=model)\n manager = tf.train.CheckpointManager(ckpt, CHECKPOINT_DIR, max_to_keep=3)\n\n if manager.latest_checkpoint:\n print(\"Restoring a checkpoint\")\n ckpt.restore(manager.latest_checkpoint).assert_consumed()\n else:\n print(\"new training\")\n return manager\n",
"_____no_output_____"
]
],
[
[
"### Training loop\n\nFor the data parallel training scheme, train for epochs and report the progress. 3 epochs is insufficient for training the model -- an accuracy of 50% is as good as randomly guessing.\n\nEnable checkpointing so that you can pick up the training later. In the following section, you will load the checkpoint and train with a different parallel scheme.",
"_____no_output_____"
]
],
[
[
"num_epochs = 2\nmanager = start_checkpoint_manager(mesh, model)\n\nfor epoch in range(num_epochs):\n step = 0\n pbar = tf.keras.utils.Progbar(target=int(train_data_vec.cardinality()), stateful_metrics=[])\n metrics = {'epoch': epoch}\n for x,y in train_data_vec:\n\n x, y = repack_batch(x, y, mesh)\n\n metrics.update(train_step(model, x, y, 1e-2))\n\n pbar.update(step, values=metrics.items(), finalize=False)\n step += 1\n manager.save()\n pbar.update(step, values=metrics.items(), finalize=True)",
"_____no_output_____"
]
],
[
[
"## Model Parallel Training\n\nIf you switch to a 2 dimensional `Mesh`, and shard the model variables along the second mesh dimension, then the training becomes Model Parallel.\n\nIn Model Parallel training, each model replica spans multiple devices (2 in this case):\n\n- There are 4 model replicas, and the training data batch is distributed to the 4 replicas.\n- The 2 devices within a single model replica receive replicated training data.\n\n\n<img src=\"https://www.tensorflow.org/images/dtensor/dtensor_model_para.png\" alt=\"Model parallel mesh\" class=\"no-filter\">\n",
"_____no_output_____"
]
],
[
[
"mesh = dtensor.create_mesh([(\"batch\", 4), (\"model\", 2)], devices=DEVICES)\nmodel = MLP([dtensor.Layout([dtensor.UNSHARDED, \"model\"], mesh), \n dtensor.Layout([\"model\", dtensor.UNSHARDED], mesh)])",
"_____no_output_____"
]
],
[
[
"As the training data is still sharded along the batch dimension, you can reuse the same `repack_batch` function as the Data Parallel training case. DTensor will automatically replicate the per-replica batch to all devices inside the replica along the `\"model\"` mesh dimension.",
"_____no_output_____"
]
],
[
[
"def repack_batch(x, y, mesh):\n x = repack_local_tensor(x, layout=dtensor.Layout(['batch', dtensor.UNSHARDED], mesh))\n y = repack_local_tensor(y, layout=dtensor.Layout(['batch'], mesh))\n return x, y",
"_____no_output_____"
]
],
[
[
"Next run the training loop. The training loop reuses the same checkpoint manager as the Data Parallel training example, and the code looks identical.\n\nYou can continue training the data parallel trained model under model parallel training.",
"_____no_output_____"
]
],
[
[
"num_epochs = 2\nmanager = start_checkpoint_manager(mesh, model)\n\nfor epoch in range(num_epochs):\n step = 0\n pbar = tf.keras.utils.Progbar(target=int(train_data_vec.cardinality()))\n metrics = {'epoch': epoch}\n for x,y in train_data_vec:\n x, y = repack_batch(x, y, mesh)\n metrics.update(train_step(model, x, y, 1e-2))\n pbar.update(step, values=metrics.items(), finalize=False)\n step += 1\n manager.save()\n pbar.update(step, values=metrics.items(), finalize=True)",
"_____no_output_____"
]
],
[
[
"## Spatial Parallel Training",
"_____no_output_____"
],
[
"When training data of very high dimensionality (e.g. a very large image or a video), it may be desirable to shard along the feature dimension. This is called [Spatial Partitioning](https://cloud.google.com/blog/products/ai-machine-learning/train-ml-models-on-large-images-and-3d-volumes-with-spatial-partitioning-on-cloud-tpus), which was first introduced into TensorFlow for training models with large 3-d input samples.\n\n<img src=\"https://www.tensorflow.org/images/dtensor/dtensor_spatial_para.png\" alt=\"Spatial parallel mesh\" class=\"no-filter\">\n\nDTensor also supports this case. The only change you need to do is to create a Mesh that includes a `feature` dimension, and apply the corresponding `Layout`.\n",
"_____no_output_____"
]
],
[
[
"mesh = dtensor.create_mesh([(\"batch\", 2), (\"feature\", 2), (\"model\", 2)], devices=DEVICES)\nmodel = MLP([dtensor.Layout([\"feature\", \"model\"], mesh), \n dtensor.Layout([\"model\", dtensor.UNSHARDED], mesh)])\n",
"_____no_output_____"
]
],
[
[
"Shard the input data along the `feature` dimension when packing the input tensors to DTensors. You do this with a slightly different repack function, `repack_batch_for_spt`, where `spt` stands for Spatial Parallel Training.",
"_____no_output_____"
]
],
[
[
"def repack_batch_for_spt(x, y, mesh):\n # Shard data on feature dimension, too\n x = repack_local_tensor(x, layout=dtensor.Layout([\"batch\", 'feature'], mesh))\n y = repack_local_tensor(y, layout=dtensor.Layout([\"batch\"], mesh))\n return x, y",
"_____no_output_____"
]
],
[
[
"The Spatial parallel training can also continue from a checkpoint created with other parallell training schemes.",
"_____no_output_____"
]
],
[
[
"num_epochs = 2\n\nmanager = start_checkpoint_manager(mesh, model)\nfor epoch in range(num_epochs):\n step = 0\n metrics = {'epoch': epoch}\n pbar = tf.keras.utils.Progbar(target=int(train_data_vec.cardinality()))\n\n for x, y in train_data_vec:\n x, y = repack_batch_for_spt(x, y, mesh)\n metrics.update(train_step(model, x, y, 1e-2))\n\n pbar.update(step, values=metrics.items(), finalize=False)\n step += 1\n manager.save()\n pbar.update(step, values=metrics.items(), finalize=True)",
"_____no_output_____"
]
],
[
[
"## SavedModel and DTensor\n\nThe integration of DTensor and SavedModel is still under development. This section only describes the current status quo for TensorFlow 2.9.0.\n\nAs of TensorFlow 2.9.0, `tf.saved_model` only accepts DTensor models with fully replicated variables.\n\nAs a workaround, you can convert a DTensor model to a fully replicated one by reloading a checkpoint. However, after a model is saved, all DTensor annotations are lost and the saved signatures can only be used with regular Tensors, not DTensors.",
"_____no_output_____"
]
],
[
[
"mesh = dtensor.create_mesh([(\"world\", 1)], devices=DEVICES[:1])\nmlp = MLP([dtensor.Layout([dtensor.UNSHARDED, dtensor.UNSHARDED], mesh), \n dtensor.Layout([dtensor.UNSHARDED, dtensor.UNSHARDED], mesh)])\n\nmanager = start_checkpoint_manager(mesh, mlp)\n\nmodel_for_saving = tf.keras.Sequential([\n text_vectorization,\n mlp\n])\n\[email protected](input_signature=[tf.TensorSpec([None], tf.string)])\ndef run(inputs):\n return {'result': model_for_saving(inputs)}\n\ntf.saved_model.save(\n model_for_saving, \"/tmp/saved_model\",\n signatures=run)",
"_____no_output_____"
]
],
[
[
"As of TensorFlow 2.9.0, you can only call a loaded signature with a regular Tensor, or a fully replicated DTensor (which will be converted to a regular Tensor).",
"_____no_output_____"
]
],
[
[
"sample_batch = train_data.take(1).get_single_element()\nsample_batch",
"_____no_output_____"
],
[
"loaded = tf.saved_model.load(\"/tmp/saved_model\")\n\nrun_sig = loaded.signatures[\"serving_default\"]\nresult = run_sig(sample_batch['text'])['result']",
"_____no_output_____"
],
[
"np.mean(tf.argmax(result, axis=-1) == sample_batch['label'])",
"_____no_output_____"
]
],
[
[
"## What's next?\n\nThis tutorial demonstrated building and training an MLP sentiment analysis model with DTensor.\n\nThrough `Mesh` and `Layout` primitives, DTensor can transform a TensorFlow `tf.function` to a distributed program suitable for a variety of training schemes.\n\nIn a real-world machine learning application, evaluation and cross-validation should be applied to avoid producing an over-fitted model. The techniques introduced in this tutorial can also be applied to introduce parallelism to evaluation.\n\nComposing a model with `tf.Module` from scratch is a lot of work, and reusing existing building blocks such as layers and helper functions can drastically speed up model development.\nAs of TensorFlow 2.9, all Keras Layers under `tf.keras.layers` accepts DTensor layouts as their arguments, and can be used to build DTensor models. You can even directly reuse a Keras model with DTensor without modifying the model implementation. Refer to the [DTensor Keras Integration Tutorial](https://www.tensorflow.org/tutorials/distribute/dtensor_keras_tutorial) for information on using DTensor Keras. ",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a0f82235b13cb3c40053574a77a7660353a6374
| 74,119 |
ipynb
|
Jupyter Notebook
|
evaluation/test.ipynb
|
D2KLab/py-sparql-transformer
|
885000e36872a3214b45bb1b64cf9ec4d1008be1
|
[
"Apache-2.0"
] | 7 |
2019-02-21T09:29:08.000Z
|
2021-06-06T14:04:40.000Z
|
evaluation/test.ipynb
|
D2KLab/py-sparql-transformer
|
885000e36872a3214b45bb1b64cf9ec4d1008be1
|
[
"Apache-2.0"
] | 4 |
2019-06-13T07:27:27.000Z
|
2021-04-07T19:23:24.000Z
|
evaluation/test.ipynb
|
D2KLab/py-sparql-transformer
|
885000e36872a3214b45bb1b64cf9ec4d1008be1
|
[
"Apache-2.0"
] | null | null | null | 84.998853 | 9,812 | 0.79398 |
[
[
[
"SPARQL Transformer evaluation\n=========================\n\nThis notebook contains some quantitative measures for the evaluation of SPARQL Transformer.",
"_____no_output_____"
]
],
[
[
"import json\nimport os\nimport time\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\nfrom ipywidgets import FloatProgress\nfrom IPython.display import display\n\nfrom SPARQLWrapper import SPARQLWrapper, JSON\nfrom SPARQLTransformer import sparqlTransformer",
"_____no_output_____"
],
[
"input_folder = './sparql'\nENDPOINT = 'http://0.0.0.0:7790/sparql'\n# ENDPOINT = 'http://dbpedia.org/sparql'",
"_____no_output_____"
],
[
"json_queries_files = list(filter(lambda x: x.endswith('.json'), os.listdir(input_folder)))\njson_queries_files.sort()\nrq_queries_files = [f.replace('.json', '.rq') for f in json_queries_files]\n\njson_queries = [json.load(open('%s/%s' % (input_folder, f), 'r')) for f in json_queries_files]\nrq_queries = [open('%s/%s' % (input_folder, f), 'r').read() for f in rq_queries_files]\n\njson_queries_files",
"_____no_output_____"
]
],
[
[
"The test queries have been taken from the __[DBpedia wiki](https://wiki.dbpedia.org/OnlineAccess)__.\n\nThose SELECT queries have been manually converted in json query, making sure that the transformed query was equal to the original one (variable names apart).\n\nThe following table shows, for each query:\n- `n vars`, how many variable are selected\n- `levels`, how many levels are present in the json prototype, considered that `1` refers to a flat object (all properties attached to the root) and `2` at one level of nested object\n- `features` included in the query\n \n| name | n vars | levels | features |\n|--------------------------|--------|--------|----------------------|\n|1.Born_in_Berlin | 4 | 1 | filter, orderby |\n|2.German_musicians | 4 | 1 | lang filter, optional|\n|3.Musicians_born_in_Berlin| 4 | 1 | lang filter |\n|4.Soccer_players | 5 | 2 | filter, orderby |\n|5.Games | 2 | 1 | orderby |",
"_____no_output_____"
],
[
"Functions for executing the query and returning the bindings.\n\n- For JSON queries, we use **SPARQLTransformer**.\n- For SPARQL queries, we use **SPARQLWrapper** (which is also internally used by SPARQLTransformer).",
"_____no_output_____"
]
],
[
[
"def sparql_exec(query):\n sparql = SPARQLWrapper(ENDPOINT)\n sparql.setQuery(query)\n sparql.setReturnFormat(JSON)\n result = sparql.query().convert()\n return result[\"results\"][\"bindings\"]\n\ndef json_exec(query, debug=False):\n return sparqlTransformer(query, {'endpoint': ENDPOINT, 'debug': debug})",
"_____no_output_____"
]
],
[
[
"Functions for running the test for a particular query (sparql or json).\n\nThe test measure the **execution time** of the query (including any parsing task) and the **number of results**.",
"_____no_output_____"
]
],
[
[
"def test_atom(query, typ='sparql'):\n start = time.time()\n if typ == 'sparql':\n r = sparql_exec(query)\n else:\n r = json_exec(query)\n \n end = time.time()\n timing = end - start\n \n return len(r), timing",
"_____no_output_____"
]
],
[
[
"We will execute the test multiple times for each query, to obtain an average result as much as possible not correlated to the network/server workload.\n\nIn particular, each test would be executed `num_iteration` times. Each couple of consecutive iteration will be separated by `sleep_time` seconds.",
"_____no_output_____"
]
],
[
[
"num_iteration = 100\nsleep_time = 5",
"_____no_output_____"
],
[
"def mean_without_outliers(x):\n df = pd.DataFrame(x)\n Q1 = df.quantile(0.25)\n Q3 = df.quantile(0.75)\n IQR = Q3 - Q1\n\n return float(df[(df >= Q1-1.5*IQR ) | (df <= Q3+1.5*IQR)].mean())",
"_____no_output_____"
],
[
"test_results = []\nall_timings = []\n\nfor i, json_query in enumerate(json_queries):\n # queries\n json_query = json_queries[i]\n rq_query = rq_queries[i]\n title = rq_queries_files[i].replace('.rq', '')\n print(title)\n \n # progress bars\n fs = FloatProgress(min=0, max=num_iteration, description='SPARQL test:')\n display(fs)\n fj = FloatProgress(min=0, max=num_iteration, description='JSON test:')\n display(fj)\n\n sparql_time = []\n sparql_results = 0\n json_time = []\n json_results = 0\n \n for j in np.arange(num_iteration):\n if (i + j) > 0 :\n time.sleep(sleep_time)\n sparql_results, t = test_atom(rq_query, typ='sparql')\n sparql_time.append(t)\n fs.value += 1\n\n for j in np.arange(num_iteration):\n time.sleep(sleep_time)\n json_results, t = test_atom(json_query, typ='json')\n json_time.append(t)\n fj.value += 1\n \n ts = np.mean(sparql_time) \n tj = np.mean(json_time)\n time_diff = (tj - ts)\n time_diff_percent = 100 * time_diff / np.mean([ts,tj])\n \n test_results.append({\n 'name': title,\n 'time_sparql': ts, \n 'result_sparql': sparql_results,\n 'time_json': tj , \n 'result_json': json_results,\n 'time_diff': '{0:.2g}'.format(time_diff),\n 'time_diff_percent': '{0:.2g}%'.format(time_diff_percent)\n });\n\n all_timings.append({\n 'name': title,\n 'json': json_time,\n 'sparql': sparql_time\n })",
"1.Born_in_Berlin\n"
]
],
[
[
"Those plots show that over the whole test, some query tooks much longer to be executed. The **outliers** are clearly visible as dots. \n\nWhen computing the mean, we excluded all the outliers, where an outlier stands outside the IQR (see [definition](https://www.purplemath.com/modules/boxwhisk3.htm)).",
"_____no_output_____"
]
],
[
[
"for i, json_query in enumerate(json_queries):\n tim = all_timings[i]\n \n a = np.array([np.hstack(tim['sparql']), np.hstack(tim['json'])]).transpose()\n \n df = pd.DataFrame(a, columns=['SPARQL', 'JSON'])\n bp = df.boxplot(vert=False, figsize=(16,4))\n fig = np.asarray(bp).reshape(-1)[0].get_figure()\n fig.suptitle(tim['name'])\n plt.show()",
"_____no_output_____"
],
[
"pd.DataFrame.from_dict(test_results)",
"_____no_output_____"
]
],
[
[
"The table give us two different informations.\n\n#### Time difference\n\nThe execution time of JSON queries (`time_json`) is quite close to the one of SPARQL ones (`time_sparql`). The difference in percentage (`time_diff`) never overcomes few hundredths of a second.\n\n#### Result difference\n\nThe number of results (bindings) returned by SPARQL Transformer (`result_json`) is always lower than the ones returned by the endpoint (`result_json`). This is due to the fact that the latter represents all the combination of values as distinct bindings, while the former aggregates the results with the same id.",
"_____no_output_____"
],
[
"### Example of result for `1.Born_in_Berlin`.\n\nAn interest case is the 2nd result about [Prince Adalbert of Prussia](http://dbpedia.org/resource/Prince_Adalbert_of_Prussia_(1811–1873)), which has 4 names and 2 differently formatted death date. This is represented with 4 * 2 = 8 bindings, then merged with SPARQL Transformer",
"_____no_output_____"
]
],
[
[
"# SPARQL query\nsparql_exec(rq_queries[0])[1:9]",
"_____no_output_____"
],
[
"# SPARQL query\njson_exec(json_queries[0])[1]",
"_____no_output_____"
],
[
"test_results",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a0f838826888bba51d1443b2ba896ab43549730
| 461,308 |
ipynb
|
Jupyter Notebook
|
WeatherPy/WeatherPy.ipynb
|
theaddies/API-challenge
|
e2c3f5ad33329f52465c8edf5b906ed01c269753
|
[
"ADSL"
] | null | null | null |
WeatherPy/WeatherPy.ipynb
|
theaddies/API-challenge
|
e2c3f5ad33329f52465c8edf5b906ed01c269753
|
[
"ADSL"
] | null | null | null |
WeatherPy/WeatherPy.ipynb
|
theaddies/API-challenge
|
e2c3f5ad33329f52465c8edf5b906ed01c269753
|
[
"ADSL"
] | null | null | null | 157.820048 | 34,988 | 0.860568 |
[
[
[
"# WeatherPy\n----\n\n#### Note\n* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.",
"_____no_output_____"
]
],
[
[
"# Dependencies and Setup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport requests\nimport time\nfrom scipy.stats import linregress\nfrom datetime import datetime\n\n# Import API key\nfrom api_keys import weather_api_key\n\n# Incorporated citipy to determine city based on latitude and longitude\nfrom citipy import citipy\n\n# Output File (CSV)\noutput_data_file = \"../output_data/cities.csv\"\noutput_data_file_2 = \"../output_data/cities_clean.csv\"\n\n# Range of latitudes and longitudes\nlat_range = (-90, 90)\nlng_range = (-180, 180)\nplt.ioff() ",
"_____no_output_____"
],
[
" # Save config information.\nurl = \"http://api.openweathermap.org/data/2.5/weather?\"\nunits = \"imperial\"\n\n# Build partial query URL\nquery_url = f\"{url}appid={weather_api_key}&units={units}&q=\"",
"_____no_output_____"
]
],
[
[
"## Generate Cities List",
"_____no_output_____"
]
],
[
[
"# List for holding lat_lngs and cities\nlat_lngs = []\ncities = []\n\n# Create a set of random lat and lng combinations\nlats = np.random.uniform(lat_range[0], lat_range[1], size=1500)\nlngs = np.random.uniform(lng_range[0], lng_range[1], size=1500)\nlat_lngs = zip(lats, lngs)\n\n# Identify nearest city for each lat, lng combination\nfor lat_lng in lat_lngs:\n city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name\n \n # If the city is unique, then add it to a our cities list\n if city not in cities:\n cities.append(city)\n\n# Print the city count to confirm sufficient count\nlen(cities)",
"_____no_output_____"
],
[
"#assign list variables to build data table.\nCity = []\nLat = []\nLng = []\nMax_Temp= []\nHumidity= []\nCloudiness= []\nWind_Speed= []\nCountry = []\nDate = []",
"_____no_output_____"
],
[
"cities",
"_____no_output_____"
],
[
"#cities = ['new norfolk', 'barrow', 'barentsburg', 'staromaryevka', 'thompson', 'yumen', 'bathsheba',\\\n# 'ushuaia', 'yar-sale', 'nishihara', 'leningradskiy', 'iqaluit', 'severo-kurilsk']",
"_____no_output_____"
]
],
[
[
"### Perform API Calls\n* Perform a weather check on each city using a series of successive API calls.\n* Include a print log of each city as it'sbeing processed (with the city number and city name).\n",
"_____no_output_____"
]
],
[
[
"url = \"http://api.openweathermap.org/data/2.5/weather?\"\ngroup_item_number = np.uint8(5)\ni = np.uint8(0)\nj = np.uint8(0)\nitem = np.uint16(0)\nif len(cities)%group_item_number:\n group_number = int(len(cities) / group_item_number) + 1\nelse:\n group_number = len(cities) / group_item_number\n\nfor i in range(0, group_number):\n j=0\n while ((j < group_item_number) & (item < (len(cities)-1))):\n\n item = i * (group_item_number) + j\n city = cities[item]\n print(f'Processing record {item} of group {i} item {j} | {city}')\n try:\n response = requests.get(query_url + city).json()\n #City.append()\n Lat.append(response['coord']['lat'])\n Lng.append(response['coord']['lon'])\n Max_Temp.append(response['main']['temp_max'])\n Humidity.append(response['main']['humidity'])\n Cloudiness.append(response['clouds']['all'])\n Wind_Speed.append(response['wind']['speed'])\n Country.append(response['sys']['country'])\n Date.append(response['dt'])\n except KeyError:\n print('Data for ',city,' not available. Skipping......')\n Lat.append(np.nan)\n Lng.append(np.nan)\n Max_Temp.append(np.nan)\n Humidity.append(np.nan)\n Cloudiness.append(np.nan)\n Wind_Speed.append(np.nan)\n Country.append(np.nan)\n Date.append(np.nan)\n j = j + 1",
"Processing record 0 of group 0 item 0 | bredasdorp\nProcessing record 1 of group 0 item 1 | san patricio\nProcessing record 2 of group 0 item 2 | itaituba\nProcessing record 3 of group 0 item 3 | qaanaaq\nProcessing record 4 of group 0 item 4 | leningradskiy\nProcessing record 5 of group 1 item 0 | arraial do cabo\nProcessing record 6 of group 1 item 1 | changtu\nProcessing record 7 of group 1 item 2 | hilo\nProcessing record 8 of group 1 item 3 | illoqqortoormiut\nData for illoqqortoormiut not available. Skipping......\nProcessing record 9 of group 1 item 4 | butaritari\nProcessing record 10 of group 2 item 0 | altamira\nProcessing record 11 of group 2 item 1 | sergach\nProcessing record 12 of group 2 item 2 | saint-philippe\nProcessing record 13 of group 2 item 3 | ilam\nProcessing record 14 of group 2 item 4 | itea\nProcessing record 15 of group 3 item 0 | camacha\nProcessing record 16 of group 3 item 1 | waddan\nProcessing record 17 of group 3 item 2 | ushuaia\nProcessing record 18 of group 3 item 3 | zeya\nProcessing record 19 of group 3 item 4 | shingu\nProcessing record 20 of group 4 item 0 | tazovskiy\nProcessing record 21 of group 4 item 1 | busselton\nProcessing record 22 of group 4 item 2 | yellowknife\nProcessing record 23 of group 4 item 3 | chuy\nProcessing record 24 of group 4 item 4 | vaini\nProcessing record 25 of group 5 item 0 | sentyabrskiy\nData for sentyabrskiy not available. Skipping......\nProcessing record 26 of group 5 item 1 | baykit\nProcessing record 27 of group 5 item 2 | clyde river\nProcessing record 28 of group 5 item 3 | albany\nProcessing record 29 of group 5 item 4 | ribeira grande\nProcessing record 30 of group 6 item 0 | east london\nProcessing record 31 of group 6 item 1 | gazli\nProcessing record 32 of group 6 item 2 | avarua\nProcessing record 33 of group 6 item 3 | nouakchott\nProcessing record 34 of group 6 item 4 | thompson\nProcessing record 35 of group 7 item 0 | carnarvon\nProcessing record 36 of group 7 item 1 | port macquarie\nProcessing record 37 of group 7 item 2 | lucapa\nProcessing record 38 of group 7 item 3 | beaumont\nProcessing record 39 of group 7 item 4 | cape town\nProcessing record 40 of group 8 item 0 | uribia\nProcessing record 41 of group 8 item 1 | murdochville\nProcessing record 42 of group 8 item 2 | grootfontein\nProcessing record 43 of group 8 item 3 | salta\nProcessing record 44 of group 8 item 4 | powell\nProcessing record 45 of group 9 item 0 | mataura\nProcessing record 46 of group 9 item 1 | punta arenas\nProcessing record 47 of group 9 item 2 | monze\nProcessing record 48 of group 9 item 3 | iqaluit\nProcessing record 49 of group 9 item 4 | tasiilaq\nProcessing record 50 of group 10 item 0 | kruisfontein\nProcessing record 51 of group 10 item 1 | mahebourg\nProcessing record 52 of group 10 item 2 | jamestown\nProcessing record 53 of group 10 item 3 | kapaa\nProcessing record 54 of group 10 item 4 | halifax\nProcessing record 55 of group 11 item 0 | saint anthony\nProcessing record 56 of group 11 item 1 | xuanzhou\nProcessing record 57 of group 11 item 2 | barrow\nProcessing record 58 of group 11 item 3 | kastamonu\nProcessing record 59 of group 11 item 4 | selfoss\nProcessing record 60 of group 12 item 0 | bilma\nProcessing record 61 of group 12 item 1 | clarence town\nProcessing record 62 of group 12 item 2 | belushya guba\nData for belushya guba not available. Skipping......\nProcessing record 63 of group 12 item 3 | victoria\nProcessing record 64 of group 12 item 4 | akyab\nProcessing record 65 of group 13 item 0 | san jose de guanipa\nProcessing record 66 of group 13 item 1 | rikitea\nProcessing record 67 of group 13 item 2 | esperance\nProcessing record 68 of group 13 item 3 | krasnoselkup\nProcessing record 69 of group 13 item 4 | codrington\nProcessing record 70 of group 14 item 0 | puerto ayora\nProcessing record 71 of group 14 item 1 | ayan\nProcessing record 72 of group 14 item 2 | hasaki\nProcessing record 73 of group 14 item 3 | atar\nProcessing record 74 of group 14 item 4 | kuanshan\nData for kuanshan not available. Skipping......\nProcessing record 75 of group 15 item 0 | araxa\nProcessing record 76 of group 15 item 1 | oneonta\nProcessing record 77 of group 15 item 2 | urdzhar\nData for urdzhar not available. Skipping......\nProcessing record 78 of group 15 item 3 | comodoro rivadavia\nProcessing record 79 of group 15 item 4 | the valley\nProcessing record 80 of group 16 item 0 | kozhva\nProcessing record 81 of group 16 item 1 | pizarro\nProcessing record 82 of group 16 item 2 | atuona\nProcessing record 83 of group 16 item 3 | deer lake\nProcessing record 84 of group 16 item 4 | mount isa\nProcessing record 85 of group 17 item 0 | zalantun\nProcessing record 86 of group 17 item 1 | dingle\nProcessing record 87 of group 17 item 2 | hermanus\nProcessing record 88 of group 17 item 3 | madarounfa\nProcessing record 89 of group 17 item 4 | grindavik\nProcessing record 90 of group 18 item 0 | castro\nProcessing record 91 of group 18 item 1 | narsaq\nProcessing record 92 of group 18 item 2 | mayo\nProcessing record 93 of group 18 item 3 | belawan\nProcessing record 94 of group 18 item 4 | ryotsu\nProcessing record 95 of group 19 item 0 | whitehorse\nProcessing record 96 of group 19 item 1 | simao\nProcessing record 97 of group 19 item 2 | eyl\nProcessing record 98 of group 19 item 3 | hithadhoo\nProcessing record 99 of group 19 item 4 | yulara\nProcessing record 100 of group 20 item 0 | chor\nProcessing record 101 of group 20 item 1 | katobu\nProcessing record 102 of group 20 item 2 | mar del plata\nProcessing record 103 of group 20 item 3 | cogua\nProcessing record 104 of group 20 item 4 | saskylakh\nProcessing record 105 of group 21 item 0 | olafsvik\nProcessing record 106 of group 21 item 1 | navalmoral de la mata\nProcessing record 107 of group 21 item 2 | wegorzewo\nProcessing record 108 of group 21 item 3 | severnyy\nProcessing record 109 of group 21 item 4 | mys shmidta\nData for mys shmidta not available. Skipping......\nProcessing record 110 of group 22 item 0 | saint george\nProcessing record 111 of group 22 item 1 | isangel\nProcessing record 112 of group 22 item 2 | esil\nProcessing record 113 of group 22 item 3 | alofi\nProcessing record 114 of group 22 item 4 | siderno\nProcessing record 115 of group 23 item 0 | okhotsk\nProcessing record 116 of group 23 item 1 | port lincoln\nProcessing record 117 of group 23 item 2 | tiksi\nProcessing record 118 of group 23 item 3 | airai\nProcessing record 119 of group 23 item 4 | inirida\nProcessing record 120 of group 24 item 0 | apiai\nProcessing record 121 of group 24 item 1 | mogadishu\nProcessing record 122 of group 24 item 2 | koungou\nProcessing record 123 of group 24 item 3 | araouane\nProcessing record 124 of group 24 item 4 | khatanga\nProcessing record 125 of group 25 item 0 | bethel\nProcessing record 126 of group 25 item 1 | bagn\nProcessing record 127 of group 25 item 2 | sibolga\nProcessing record 128 of group 25 item 3 | geraldton\nProcessing record 129 of group 25 item 4 | nishihara\nProcessing record 130 of group 26 item 0 | teluk nibung\nProcessing record 131 of group 26 item 1 | ahipara\nProcessing record 132 of group 26 item 2 | turukhansk\nProcessing record 133 of group 26 item 3 | ust-ilimsk\nProcessing record 134 of group 26 item 4 | taolanaro\nData for taolanaro not available. Skipping......\nProcessing record 135 of group 27 item 0 | sola\nProcessing record 136 of group 27 item 1 | kaitangata\nProcessing record 137 of group 27 item 2 | luderitz\nProcessing record 138 of group 27 item 3 | kyabe\nProcessing record 139 of group 27 item 4 | machado\nProcessing record 140 of group 28 item 0 | praia da vitoria\nProcessing record 141 of group 28 item 1 | necochea\nProcessing record 142 of group 28 item 2 | hobart\nProcessing record 143 of group 28 item 3 | batagay-alyta\nProcessing record 144 of group 28 item 4 | kalanguy\nProcessing record 145 of group 29 item 0 | waldoboro\nProcessing record 146 of group 29 item 1 | sovetskiy\nProcessing record 147 of group 29 item 2 | ewa beach\nProcessing record 148 of group 29 item 3 | torbay\nProcessing record 149 of group 29 item 4 | new norfolk\nProcessing record 150 of group 30 item 0 | port alfred\nProcessing record 151 of group 30 item 1 | bargal\nData for bargal not available. Skipping......\nProcessing record 152 of group 30 item 2 | grand gaube\nProcessing record 153 of group 30 item 3 | roebourne\n"
]
],
[
[
"### Convert Raw Data to DataFrame\n* Export the city data into a .csv.\n* Display the DataFrame",
"_____no_output_____"
]
],
[
[
"main_df = pd.DataFrame({'city' : cities, 'Latitude' : Lat, 'Longitude': Lng, \"Max temp, F\": Max_Temp, \"Humidity %\" : Humidity, \\\n \"Cloudiness\" : Cloudiness, 'Wind Speed, mph' : Wind_Speed, 'Country' : Country , 'Date' : Date})\nlen(main_df)\nmain_df",
"_____no_output_____"
],
[
"#clean up data by removing rows with NaN\nmain_df = main_df.drop(labels = main_df[pd.isna(main_df['Latitude'])]['Latitude'].index)\nmain_df.reset_index(drop = True)",
"_____no_output_____"
],
[
"main_df.to_csv(output_data_file)",
"_____no_output_____"
]
],
[
[
"## Inspect the data and remove the cities where the humidity > 100%.\n----\nSkip this step if there are no cities that have humidity > 100%. ",
"_____no_output_____"
]
],
[
[
"#there are no cities with humidity over 100%\nmain_df.loc[(main_df['Humidity %'] > 100)][:]",
"_____no_output_____"
],
[
"# Get the indices of cities that have humidity over 100%.\nhumidity_index = main_df.loc[(main_df['Humidity %'] > 100)].index\nhumidity_index",
"_____no_output_____"
],
[
"# Make a new DataFrame equal to the city data to drop all humidity outliers by index.\n# Passing \"inplace=False\" will make a copy of the city_data DataFrame, which we call \"clean_city_data\".\nclean_city_data = main_df.drop(index = humidity_index)\nclean_city_data",
"_____no_output_____"
],
[
"#This line is here because I had one datapoint with a 800 mph wind speed.\nclean_city_data.loc[clean_city_data['Wind Speed, mph'] > 100,'Wind Speed, mph'] = 0",
"_____no_output_____"
],
[
"# Extract relevant fields from the data frame\n\n\n# Export the City_Data into a csv\nclean_city_data.to_csv(output_data_file_2, index = False)",
"_____no_output_____"
]
],
[
[
"## Plotting the Data\n* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.\n* Save the plotted figures as .pngs.",
"_____no_output_____"
],
[
"## Latitude vs. Temperature Plot",
"_____no_output_____"
]
],
[
[
"now = datetime.now()\ndate = now.strftime(\"%m/%d/%Y\")\ntemperature_title = \"Latitude versus Temperature\\n\"+date\nclean_city_data.plot(x = 'Latitude', y = 'Max temp, F', kind='scatter', title = temperature_title)\nplt.show()",
"_____no_output_____"
]
],
[
[
"pretty clear correlation between latitude and temperature.",
"_____no_output_____"
],
[
"## Latitude vs. Humidity Plot",
"_____no_output_____"
]
],
[
[
"humidity_title = \"Latitude versus Humidity\\n\"+date\nclean_city_data.plot(x = 'Latitude', y = 'Humidity %', kind='scatter', title = humidity_title)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Weak correlation between Humidity and latitude",
"_____no_output_____"
],
[
"## Latitude vs. Cloudiness Plot",
"_____no_output_____"
]
],
[
[
"cloudiness_title = \"Latitude versus Cloudiness\\n\"+date\nclean_city_data.plot(x = 'Latitude', y = 'Cloudiness', kind='scatter', title = cloudiness_title)\nplt.show()",
"_____no_output_____"
]
],
[
[
"No correlation between latitude and Cloudiness",
"_____no_output_____"
],
[
"## Latitude vs. Wind Speed Plot",
"_____no_output_____"
]
],
[
[
"wind_speed_title = \"Latitude versus Wind Speed\\n\"+date\nclean_city_data.plot(x = 'Latitude', y = 'Wind Speed, mph', kind='scatter', title = wind_speed_title)\nplt.show()",
"_____no_output_____"
]
],
[
[
"No correlation between Wind Speed and latitude",
"_____no_output_____"
],
[
"## Linear Regression",
"_____no_output_____"
]
],
[
[
"# OPTIONAL: Create a function to create Linear Regression plots\ndef linear_regression(x_values, y_values, graph_title):\n slope, intercept, r_value, p_value, std_err = linregress(x_values, y_values)\n plt.scatter(x = x_values, y = y_values)\n return",
"_____no_output_____"
],
[
"# Create Northern and Southern Hemisphere DataFrames\n#I created a dataframe of dataframes. One is 'north' and the other 'south'\ncity_north_df = clean_city_data.loc[clean_city_data['Latitude'] >= 0][:]\ncity_south_df = clean_city_data.loc[clean_city_data['Latitude'] <= 0][:]\ncity_north_df.head()\ncity_south_df.head()\ndata_df = {'North' : city_north_df, 'South' : city_south_df}",
"_____no_output_____"
]
],
[
[
"#### Generate all graphs",
"_____no_output_____"
]
],
[
[
"#these 2 lists are used in for loops to go through each graph possibility and plot it.\nhemisphere_list = ['North', 'South']\ny_axis_data_list = ['Max temp, F', 'Humidity %', 'Cloudiness', 'Wind Speed, mph']",
"_____no_output_____"
],
[
"#this function simply takes in the slope, intercept and max and min latitudes\n#it returns the corresponding y_values for plotting\ndef y_values(slope, intercept, x_max, x_min):\n y_max = slope * x_max + intercept\n y_min = slope * x_min + intercept\n return (y_max, y_min)",
"_____no_output_____"
],
[
"#main graphing function to complete all graphs.\ndef graph_function(hemisphere, y_axis_data):\n plt.figure()\n plt.scatter(x = data_df[hemisphere]['Latitude'], y = data_df[hemisphere][y_axis_data])\n plt.title('{}ern Hemisphere - {} vs. Latitude\\n Linear Regression'.format(hemisphere, y_axis_data))\n slope, intercept, r_value, p_value, std_err = linregress(x = data_df[hemisphere]['Latitude'], y = data_df[hemisphere][y_axis_data])\n r_squared = r_value ** 2\n x_max = data_df[hemisphere]['Latitude'].max()\n x_min = data_df[hemisphere]['Latitude'].min()\n y_max, y_min = y_values(slope, intercept, x_max, x_min)\n x_line = [x_max, x_min]\n y_line = [y_max, y_min]\n #print(y_max)\n plt.plot(x_line, y_line, color = 'red')\n text_equation = 'y = {:.2f} * x + {:.2f}\\nr**2 = {:.4f}'.format(slope, intercept, r_squared)\n plt.text(x_min, y_max,text_equation, color = 'r', bbox=dict(facecolor='w', alpha=0.5))\n plt.legend(['correlation', 'raw data'] , bbox_to_anchor=(1.05, 1), loc = 'upper left')\n filename = \"./plots/graph%sern_hemisphere_and_%s.png\" % (hemisphere, y_axis_data)\n plt.savefig(filename, bbox_inches='tight')\n #this line is key to making the graphs plot one after another\n plt.show()\n",
"_____no_output_____"
],
[
"#This loop goes through both hemispheres and then all of the y-axis data that is required.\n#values are defined in lists that that then reference the dataframe of dataframes. One for nothern\n#and one for southern hemispheres.\nfor hemisphere in hemisphere_list:\n for y_axis_data in y_axis_data_list:\n graph_function(hemisphere, y_axis_data)\n\n",
"_____no_output_____"
]
],
[
[
"The graphs pretty much speak for themselves. Only observed correlation with Max Temperature. All other variables seem to not correlate.",
"_____no_output_____"
],
[
"#### Southern Hemisphere - Max Temp vs. Latitude Linear Regression",
"_____no_output_____"
],
[
"#### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression",
"_____no_output_____"
],
[
"#### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression",
"_____no_output_____"
],
[
"#### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression",
"_____no_output_____"
],
[
"#### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression",
"_____no_output_____"
],
[
"#### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression",
"_____no_output_____"
],
[
"#### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a0f8502e231286229afc670ac345fd2f24a9a78
| 3,806 |
ipynb
|
Jupyter Notebook
|
Looping_over_lists.ipynb
|
MaiaNgo/python-best-practices
|
dc0ab2d309279ac83f3529326ada15f6215ca9c0
|
[
"Apache-2.0"
] | null | null | null |
Looping_over_lists.ipynb
|
MaiaNgo/python-best-practices
|
dc0ab2d309279ac83f3529326ada15f6215ca9c0
|
[
"Apache-2.0"
] | null | null | null |
Looping_over_lists.ipynb
|
MaiaNgo/python-best-practices
|
dc0ab2d309279ac83f3529326ada15f6215ca9c0
|
[
"Apache-2.0"
] | null | null | null | 31.983193 | 711 | 0.533631 |
[
[
[
"# LOOPING OVER LISTS",
"_____no_output_____"
],
[
"### Create 3 lists:",
"_____no_output_____"
]
],
[
[
"products = ['Avocado', 'Sweet Potato', 'Banana', 'Pumpkin', 'Butter Squash', 'Plantain', 'Mango', 'Pineapple']; products",
"_____no_output_____"
],
[
"units = ['315', '294', '1423', '543', '120', '2314', '673', '872', '3278', '9432', '1673']; units",
"_____no_output_____"
],
[
"sales = ['209', '152', '1017', '274', '95', '1425', '512', '461']; sales",
"_____no_output_____"
],
[
"# Look up value for each element in the list of products by the corresponding index element in the list of units:\nfor product, unit, sale in zip(products, units, sales):\n #print(f\"[{format(product, '<20')}], 'has the total number is: ', unit, 'with the number of sale is', sale)\n print(format(product, '>20'), 'has the total number is: ', format(unit, '>4'), 'with the number of sale is: ', format(sale, '>4'))\n\n",
" Avocado has the total number is: 315 with the number of sale is: 209\n Sweet Potato has the total number is: 294 with the number of sale is: 152\n Banana has the total number is: 1423 with the number of sale is: 1017\n Pumpkin has the total number is: 543 with the number of sale is: 274\n Butter Squash has the total number is: 120 with the number of sale is: 95\n Plantain has the total number is: 2314 with the number of sale is: 1425\n Mango has the total number is: 673 with the number of sale is: 512\n Pineapple has the total number is: 872 with the number of sale is: 461\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4a0f904914023caf5850ce487147020d5064314e
| 125,673 |
ipynb
|
Jupyter Notebook
|
word_cloud/Mining_Cancer_Tweets.ipynb
|
manual123/Nacho-Jupyter-Notebooks
|
e75523434b1a90313a6b44e32b056f63de8a7135
|
[
"MIT"
] | 2 |
2021-02-13T05:52:05.000Z
|
2022-02-08T09:52:35.000Z
|
word_cloud/Mining_Cancer_Tweets.ipynb
|
manual123/Nacho-Jupyter-Notebooks
|
e75523434b1a90313a6b44e32b056f63de8a7135
|
[
"MIT"
] | null | null | null |
word_cloud/Mining_Cancer_Tweets.ipynb
|
manual123/Nacho-Jupyter-Notebooks
|
e75523434b1a90313a6b44e32b056f63de8a7135
|
[
"MIT"
] | null | null | null | 68.899671 | 176 | 0.763314 |
[
[
[
"#Import the necessary methods from tweepy library\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\n\n#Variables that contains the user credentials to access Twitter API \naccess_token = \"your_access_token\"\naccess_token_secret = \"your_access_secret_token\"\nconsumer_key = \"your_consumer_key\"\nconsumer_secret = \"your_consumer_secret\"\n\n\n#This is a basic listener that just prints received tweets to stdout.\nclass StdOutListener(StreamListener):\n\n def on_data(self, data):\n print(data)\n return True\n\n def on_error(self, status):\n print(status)\n\n\nif __name__ == '__main__':\n\n #This handles Twitter authetification and the connection to Twitter Streaming API\n l = StdOutListener()\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n stream = Stream(auth, l)\n\n #This line filter Twitter Streams to capture data by the keywords: 'honda'\n stream.filter(track=['honda','Honda','HONDA'])",
"_____no_output_____"
]
],
[
[
"### Then from your terminal, execute this script with output piped to a text file: your_script.py > tweets_data.txt",
"_____no_output_____"
],
[
"# Then run this script below to create a Python dataframe of the tweets data",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport json\nimport string\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom os import path\npd.set_option(\"display.max_rows\",1000)\npd.set_option(\"display.max_columns\",20)\npd.set_option(\"display.max_colwidth\",150)\n\nd = path.dirname('/home/pybokeh/temp/')\n\ntweets_data = []\ntweets_file = open(path.join(d, 'cancer_tweets_data.txt'),'r')\nfor line in tweets_file:\n try:\n tweet = json.loads(line)\n tweets_data.append(tweet)\n except:\n continue\n\nprint(len(tweets_data))",
"9208\n"
],
[
"tweets = pd.DataFrame()\ntweets['text'] = [tweet['text'] for tweet in tweets_data]\ntweets['lang'] = [tweet['lang'] for tweet in tweets_data]\ntweets['retweeted'] = [tweet['retweeted'] for tweet in tweets_data]",
"_____no_output_____"
],
[
"tweets.head()",
"_____no_output_____"
],
[
"english_tweets = tweets[(tweets['lang']=='en') & (tweets['retweeted']==False)]\nenglish_tweets.drop_duplicates(subset='text');",
"_____no_output_____"
],
[
"text = ''\nfor line in english_tweets['text']:\n text = text + ' ' + line\n\ntext = text.replace(\"'s\",'')",
"_____no_output_____"
],
[
"%matplotlib inline\nfrom os import path\nfrom scipy.misc import imread\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud, STOPWORDS\nd = path.dirname('/home/pybokeh/Downloads/')\n# Read the whole text.\n#text = strWords\n#text = open(path.join(d, 'alice.txt')).read()\n\nadditional_words = [\n 'rt',\n 'ebay',\n 'co',\n 't',\n 'amp',\n 'https'\n]\n\nfor word in additional_words:\n STOPWORDS.add(word)\n\n# read the mask image\n# taken from\n# http://www.stencilry.org/stencils/movies/alice%20in%20wonderland/255fk.jpg\n#honda_mask = imread(path.join(d, \"honda_logo_mask.png\"), flatten=True)\n#wc = WordCloud(background_color=\"black\", max_words=2000, mask=honda_mask, stopwords=STOPWORDS)\n\n# generate word cloud\nwc = WordCloud(width=800, height=600).generate(text)\n\n# store to file\nwc.to_file(path.join(d, \"cancer_word_cloud.png\"))\n\n# show\nplt.imshow(wc)\nplt.axis(\"off\")\n#plt.figure()\n#plt.imshow(honda_mask, cmap=plt.cm.gray)\n#plt.axis(\"off\")\nplt.show()",
"_____no_output_____"
],
[
"prevent = tweets[(tweets['text'].str.contains('food')) | (tweets['text'].str.contains('nutrient'))]",
"_____no_output_____"
],
[
"prevent['text']",
"_____no_output_____"
],
[
"wc.process_text(text)[:50]",
"_____no_output_____"
],
[
"STOPWORDS",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a0f9542e1f7ee03b263209e5cbc3950c488051f
| 5,048 |
ipynb
|
Jupyter Notebook
|
ipynb/Germany-Bayern-LK-Erding.ipynb
|
oscovida/oscovida.github.io
|
c74d6da79feda1b5ccce107ad3acd48cf0e74c1c
|
[
"CC-BY-4.0"
] | 2 |
2020-06-19T09:16:14.000Z
|
2021-01-24T17:47:56.000Z
|
ipynb/Germany-Bayern-LK-Erding.ipynb
|
oscovida/oscovida.github.io
|
c74d6da79feda1b5ccce107ad3acd48cf0e74c1c
|
[
"CC-BY-4.0"
] | 8 |
2020-04-20T16:49:49.000Z
|
2021-12-25T16:54:19.000Z
|
ipynb/Germany-Bayern-LK-Erding.ipynb
|
oscovida/oscovida.github.io
|
c74d6da79feda1b5ccce107ad3acd48cf0e74c1c
|
[
"CC-BY-4.0"
] | 4 |
2020-04-20T13:24:45.000Z
|
2021-01-29T11:12:12.000Z
| 30.227545 | 178 | 0.520602 |
[
[
[
"# Germany: LK Erding (Bayern)\n\n* Homepage of project: https://oscovida.github.io\n* Plots are explained at http://oscovida.github.io/plots.html\n* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Bayern-LK-Erding.ipynb)",
"_____no_output_____"
]
],
[
[
"import datetime\nimport time\n\nstart = datetime.datetime.now()\nprint(f\"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}\")",
"_____no_output_____"
],
[
"%config InlineBackend.figure_formats = ['svg']\nfrom oscovida import *",
"_____no_output_____"
],
[
"overview(country=\"Germany\", subregion=\"LK Erding\", weeks=5);",
"_____no_output_____"
],
[
"overview(country=\"Germany\", subregion=\"LK Erding\");",
"_____no_output_____"
],
[
"compare_plot(country=\"Germany\", subregion=\"LK Erding\", dates=\"2020-03-15:\");\n",
"_____no_output_____"
],
[
"# load the data\ncases, deaths = germany_get_region(landkreis=\"LK Erding\")\n\n# get population of the region for future normalisation:\ninhabitants = population(country=\"Germany\", subregion=\"LK Erding\")\nprint(f'Population of country=\"Germany\", subregion=\"LK Erding\": {inhabitants} people')\n\n# compose into one table\ntable = compose_dataframe_summary(cases, deaths)\n\n# show tables with up to 1000 rows\npd.set_option(\"max_rows\", 1000)\n\n# display the table\ntable",
"_____no_output_____"
]
],
[
[
"# Explore the data in your web browser\n\n- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Bayern-LK-Erding.ipynb)\n- and wait (~1 to 2 minutes)\n- Then press SHIFT+RETURN to advance code cell to code cell\n- See http://jupyter.org for more details on how to use Jupyter Notebook",
"_____no_output_____"
],
[
"# Acknowledgements:\n\n- Johns Hopkins University provides data for countries\n- Robert Koch Institute provides data for within Germany\n- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)\n- Open source and scientific computing community for the data tools\n- Github for hosting repository and html files\n- Project Jupyter for the Notebook and binder service\n- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))\n\n--------------------",
"_____no_output_____"
]
],
[
[
"print(f\"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and \"\n f\"deaths at {fetch_deaths_last_execution()}.\")",
"_____no_output_____"
],
[
"# to force a fresh download of data, run \"clear_cache()\"",
"_____no_output_____"
],
[
"print(f\"Notebook execution took: {datetime.datetime.now()-start}\")\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a0faa4f9f221a69a310c9a81b5913ba1cadd703
| 59,581 |
ipynb
|
Jupyter Notebook
|
d2l-en/mxnet/chapter_multilayer-perceptrons/numerical-stability-and-init.ipynb
|
gr8khan/d2lai
|
7c10432f38c80e86978cd075d0024902b47842a0
|
[
"MIT"
] | null | null | null |
d2l-en/mxnet/chapter_multilayer-perceptrons/numerical-stability-and-init.ipynb
|
gr8khan/d2lai
|
7c10432f38c80e86978cd075d0024902b47842a0
|
[
"MIT"
] | null | null | null |
d2l-en/mxnet/chapter_multilayer-perceptrons/numerical-stability-and-init.ipynb
|
gr8khan/d2lai
|
7c10432f38c80e86978cd075d0024902b47842a0
|
[
"MIT"
] | null | null | null | 43.2373 | 421 | 0.523993 |
[
[
[
"# Numerical Stability and Initialization\n:label:`sec_numerical_stability`\n\n\nThus far, every model that we have implemented\nrequired that we initialize its parameters\naccording to some pre-specified distribution.\nUntil now, we took the initialization scheme for granted,\nglossing over the details of how these choices are made.\nYou might have even gotten the impression that these choices\nare not especially important.\nTo the contrary, the choice of initialization scheme\nplays a significant role in neural network learning,\nand it can be crucial for maintaining numerical stability.\nMoreover, these choices can be tied up in interesting ways\nwith the choice of the nonlinear activation function.\nWhich function we choose and how we initialize parameters\ncan determine how quickly our optimization algorithm converges.\nPoor choices here can cause us to encounter\nexploding or vanishing gradients while training.\nIn this section, we delve into these topics with greater detail\nand discuss some useful heuristics\nthat you will find useful\nthroughout your career in deep learning.\n\n\n## Vanishing and Exploding Gradients\n\nConsider a deep network with $L$ layers,\ninput $\\mathbf{x}$ and output $\\mathbf{o}$.\nWith each layer $l$ defined by a transformation $f_l$\nparameterized by weights $\\mathbf{W}^{(l)}$,\nwhose hidden variable is $\\mathbf{h}^{(l)}$ (let $\\mathbf{h}^{(0)} = \\mathbf{x}$),\nour network can be expressed as:\n\n$$\\mathbf{h}^{(l)} = f_l (\\mathbf{h}^{(l-1)}) \\text{ and thus } \\mathbf{o} = f_L \\circ \\ldots \\circ f_1(\\mathbf{x}).$$\n\nIf all the hidden variables and the input are vectors,\nwe can write the gradient of $\\mathbf{o}$ with respect to\nany set of parameters $\\mathbf{W}^{(l)}$ as follows:\n\n$$\\partial_{\\mathbf{W}^{(l)}} \\mathbf{o} = \\underbrace{\\partial_{\\mathbf{h}^{(L-1)}} \\mathbf{h}^{(L)}}_{ \\mathbf{M}^{(L)} \\stackrel{\\mathrm{def}}{=}} \\cdot \\ldots \\cdot \\underbrace{\\partial_{\\mathbf{h}^{(l)}} \\mathbf{h}^{(l+1)}}_{ \\mathbf{M}^{(l+1)} \\stackrel{\\mathrm{def}}{=}} \\underbrace{\\partial_{\\mathbf{W}^{(l)}} \\mathbf{h}^{(l)}}_{ \\mathbf{v}^{(l)} \\stackrel{\\mathrm{def}}{=}}.$$\n\nIn other words, this gradient is\nthe product of $L-l$ matrices\n$\\mathbf{M}^{(L)} \\cdot \\ldots \\cdot \\mathbf{M}^{(l+1)}$\nand the gradient vector $\\mathbf{v}^{(l)}$.\nThus we are susceptible to the same\nproblems of numerical underflow that often crop up\nwhen multiplying together too many probabilities.\nWhen dealing with probabilities, a common trick is to\nswitch into log-space, i.e., shifting\npressure from the mantissa to the exponent\nof the numerical representation.\nUnfortunately, our problem above is more serious:\ninitially the matrices $\\mathbf{M}^{(l)}$ may have a wide variety of eigenvalues.\nThey might be small or large, and\ntheir product might be *very large* or *very small*.\n\nThe risks posed by unstable gradients\ngo beyond numerical representation.\nGradients of unpredictable magnitude\nalso threaten the stability of our optimization algorithms.\nWe may be facing parameter updates that are either\n(i) excessively large, destroying our model\n(the *exploding gradient* problem);\nor (ii) excessively small\n(the *vanishing gradient* problem),\nrendering learning impossible as parameters\nhardly move on each update.\n\n\n### Vanishing Gradients\n\nOne frequent culprit causing the vanishing gradient problem\nis the choice of the activation function $\\sigma$\nthat is appended following each layer's linear operations.\nHistorically, the sigmoid function\n$1/(1 + \\exp(-x))$ (introduced in :numref:`sec_mlp`)\nwas popular because it resembles a thresholding function.\nSince early artificial neural networks were inspired\nby biological neural networks,\nthe idea of neurons that fire either *fully* or *not at all*\n(like biological neurons) seemed appealing.\nLet us take a closer look at the sigmoid\nto see why it can cause vanishing gradients.\n",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nfrom d2l import mxnet as d2l\nfrom mxnet import autograd, np, npx\nnpx.set_np()\n\nx = np.arange(-8.0, 8.0, 0.1)\nx.attach_grad()\nwith autograd.record():\n y = npx.sigmoid(x)\ny.backward()\n\nd2l.plot(x, [y, x.grad], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))",
"_____no_output_____"
]
],
[
[
"As you can see, the sigmoid's gradient vanishes\nboth when its inputs are large and when they are small.\nMoreover, when backpropagating through many layers,\nunless we are in the Goldilocks zone, where\nthe inputs to many of the sigmoids are close to zero,\nthe gradients of the overall product may vanish.\nWhen our network boasts many layers,\nunless we are careful, the gradient\nwill likely be cut off at some layer.\nIndeed, this problem used to plague deep network training.\nConsequently, ReLUs, which are more stable\n(but less neurally plausible),\nhave emerged as the default choice for practitioners.\n\n\n### Exploding Gradients\n\nThe opposite problem, when gradients explode,\ncan be similarly vexing.\nTo illustrate this a bit better,\nwe draw 100 Gaussian random matrices\nand multiply them with some initial matrix.\nFor the scale that we picked\n(the choice of the variance $\\sigma^2=1$),\nthe matrix product explodes.\nWhen this happens due to the initialization\nof a deep network, we have no chance of getting\na gradient descent optimizer to converge.\n",
"_____no_output_____"
]
],
[
[
"M = np.random.normal(size=(4, 4))\nprint('a single matrix', M)\nfor i in range(100):\n M = np.dot(M, np.random.normal(size=(4, 4)))\n\nprint('after multiplying 100 matrices', M)",
"a single matrix [[ 2.2122064 1.1630787 0.7740038 0.4838046 ]\n [ 1.0434405 0.29956347 1.1839255 0.15302546]\n [ 1.8917114 -1.1688148 -1.2347414 1.5580711 ]\n [-1.771029 -0.5459446 -0.45138445 -2.3556297 ]]\nafter multiplying 100 matrices [[ 3.4459714e+23 -7.8040680e+23 5.9973287e+23 4.5229990e+23]\n [ 2.5275089e+23 -5.7240326e+23 4.3988473e+23 3.3174740e+23]\n [ 1.3731286e+24 -3.1097155e+24 2.3897773e+24 1.8022959e+24]\n [-4.4951040e+23 1.0180033e+24 -7.8232281e+23 -5.9000354e+23]]\n"
]
],
[
[
"### Breaking the Symmetry\n\nAnother problem in neural network design\nis the symmetry inherent in their parametrization.\nAssume that we have a simple MLP\nwith one hidden layer and two units.\nIn this case, we could permute the weights $\\mathbf{W}^{(1)}$\nof the first layer and likewise permute\nthe weights of the output layer\nto obtain the same function.\nThere is nothing special differentiating\nthe first hidden unit vs. the second hidden unit.\nIn other words, we have permutation symmetry\namong the hidden units of each layer.\n\nThis is more than just a theoretical nuisance.\nConsider the aforementioned one-hidden-layer MLP\nwith two hidden units.\nFor illustration,\nsuppose that the output layer transforms the two hidden units into only one output unit.\nImagine what would happen if we initialized\nall of the parameters of the hidden layer\nas $\\mathbf{W}^{(1)} = c$ for some constant $c$.\nIn this case, during forward propagation\neither hidden unit takes the same inputs and parameters,\nproducing the same activation,\nwhich is fed to the output unit.\nDuring backpropagation,\ndifferentiating the output unit with respect to parameters $\\mathbf{W}^{(1)}$ gives a gradient whose elements all take the same value.\nThus, after gradient-based iteration (e.g., minibatch stochastic gradient descent),\nall the elements of $\\mathbf{W}^{(1)}$ still take the same value.\nSuch iterations would\nnever *break the symmetry* on its own\nand we might never be able to realize\nthe network's expressive power.\nThe hidden layer would behave\nas if it had only a single unit.\nNote that while minibatch stochastic gradient descent would not break this symmetry,\ndropout regularization would!\n\n\n## Parameter Initialization\n\nOne way of addressing---or at least mitigating---the\nissues raised above is through careful initialization.\nAdditional care during optimization\nand suitable regularization can further enhance stability.\n\n\n### Default Initialization\n\nIn the previous sections, e.g., in :numref:`sec_linear_concise`,\nwe used a normal distribution \nto initialize the values of our weights.\nIf we do not specify the initialization method, the framework will\nuse a default random initialization method, which often works well in practice\nfor moderate problem sizes.\n\n\n\n\n\n\n### Xavier Initialization\n:label:`subsec_xavier`\n\nLet us look at the scale distribution of\nan output (e.g., a hidden variable) $o_{i}$ for some fully-connected layer\n*without nonlinearities*.\nWith $n_\\mathrm{in}$ inputs $x_j$\nand their associated weights $w_{ij}$ for this layer,\nan output is given by\n\n$$o_{i} = \\sum_{j=1}^{n_\\mathrm{in}} w_{ij} x_j.$$\n\nThe weights $w_{ij}$ are all drawn\nindependently from the same distribution.\nFurthermore, let us assume that this distribution\nhas zero mean and variance $\\sigma^2$.\nNote that this does not mean that the distribution has to be Gaussian,\njust that the mean and variance need to exist.\nFor now, let us assume that the inputs to the layer $x_j$\nalso have zero mean and variance $\\gamma^2$\nand that they are independent of $w_{ij}$ and independent of each other.\nIn this case, we can compute the mean and variance of $o_i$ as follows:\n\n$$\n\\begin{aligned}\n E[o_i] & = \\sum_{j=1}^{n_\\mathrm{in}} E[w_{ij} x_j] \\\\&= \\sum_{j=1}^{n_\\mathrm{in}} E[w_{ij}] E[x_j] \\\\&= 0, \\\\\n \\mathrm{Var}[o_i] & = E[o_i^2] - (E[o_i])^2 \\\\\n & = \\sum_{j=1}^{n_\\mathrm{in}} E[w^2_{ij} x^2_j] - 0 \\\\\n & = \\sum_{j=1}^{n_\\mathrm{in}} E[w^2_{ij}] E[x^2_j] \\\\\n & = n_\\mathrm{in} \\sigma^2 \\gamma^2.\n\\end{aligned}\n$$\n\nOne way to keep the variance fixed\nis to set $n_\\mathrm{in} \\sigma^2 = 1$.\nNow consider backpropagation.\nThere we face a similar problem,\nalbeit with gradients being propagated from the layers closer to the output.\nUsing the same reasoning as for forward propagation,\nwe see that the gradients' variance can blow up\nunless $n_\\mathrm{out} \\sigma^2 = 1$,\nwhere $n_\\mathrm{out}$ is the number of outputs of this layer.\nThis leaves us in a dilemma:\nwe cannot possibly satisfy both conditions simultaneously.\nInstead, we simply try to satisfy:\n\n$$\n\\begin{aligned}\n\\frac{1}{2} (n_\\mathrm{in} + n_\\mathrm{out}) \\sigma^2 = 1 \\text{ or equivalently }\n\\sigma = \\sqrt{\\frac{2}{n_\\mathrm{in} + n_\\mathrm{out}}}.\n\\end{aligned}\n$$\n\nThis is the reasoning underlying the now-standard\nand practically beneficial *Xavier initialization*,\nnamed after the first author of its creators :cite:`Glorot.Bengio.2010`.\nTypically, the Xavier initialization\nsamples weights from a Gaussian distribution\nwith zero mean and variance\n$\\sigma^2 = \\frac{2}{n_\\mathrm{in} + n_\\mathrm{out}}$.\nWe can also adapt Xavier's intuition to\nchoose the variance when sampling weights\nfrom a uniform distribution.\nNote that the uniform distribution $U(-a, a)$ has variance $\\frac{a^2}{3}$.\nPlugging $\\frac{a^2}{3}$ into our condition on $\\sigma^2$\nyields the suggestion to initialize according to\n\n$$U\\left(-\\sqrt{\\frac{6}{n_\\mathrm{in} + n_\\mathrm{out}}}, \\sqrt{\\frac{6}{n_\\mathrm{in} + n_\\mathrm{out}}}\\right).$$\n\nThough the assumption for nonexistence of nonlinearities\nin the above mathematical reasoning \ncan be easily violated in neural networks,\nthe Xavier initialization method\nturns out to work well in practice.\n\n\n### Beyond\n\nThe reasoning above barely scratches the surface\nof modern approaches to parameter initialization.\nA deep learning framework often implements over a dozen different heuristics.\nMoreover, parameter initialization continues to be\na hot area of fundamental research in deep learning.\nAmong these are heuristics specialized for\ntied (shared) parameters, super-resolution,\nsequence models, and other situations.\nFor instance,\nXiao et al. demonstrated the possibility of training\n10000-layer neural networks without architectural tricks\nby using a carefully-designed initialization method :cite:`Xiao.Bahri.Sohl-Dickstein.ea.2018`.\n\nIf the topic interests you we suggest\na deep dive into this module's offerings,\nreading the papers that proposed and analyzed each heuristic,\nand then exploring the latest publications on the topic.\nPerhaps you will stumble across or even invent\na clever idea and contribute an implementation to deep learning frameworks.\n\n\n## Summary\n\n* Vanishing and exploding gradients are common issues in deep networks. Great care in parameter initialization is required to ensure that gradients and parameters remain well controlled.\n* Initialization heuristics are needed to ensure that the initial gradients are neither too large nor too small.\n* ReLU activation functions mitigate the vanishing gradient problem. This can accelerate convergence.\n* Random initialization is key to ensure that symmetry is broken before optimization.\n* Xavier initialization suggests that, for each layer, variance of any output is not affected by the number of inputs, and variance of any gradient is not affected by the number of outputs.\n\n## Exercises\n\n1. Can you design other cases where a neural network might exhibit symmetry requiring breaking besides the permutation symmetry in an MLP's layers?\n1. Can we initialize all weight parameters in linear regression or in softmax regression to the same value?\n1. Look up analytic bounds on the eigenvalues of the product of two matrices. What does this tell you about ensuring that gradients are well conditioned?\n1. If we know that some terms diverge, can we fix this after the fact? Look at the paper on layer-wise adaptive rate scaling for inspiration :cite:`You.Gitman.Ginsburg.2017`.\n",
"_____no_output_____"
],
[
"[Discussions](https://discuss.d2l.ai/t/103)\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a0fb20b94132b82e83d4257858b42622444b61e
| 9,275 |
ipynb
|
Jupyter Notebook
|
docs/practices/quick_start/hello_paddle.ipynb
|
luotao1/docs
|
2ebc0351fa1060426253fbea3559e84a55c7cb7c
|
[
"Apache-2.0"
] | 37 |
2021-05-28T08:59:49.000Z
|
2022-03-16T12:41:43.000Z
|
docs/practices/quick_start/hello_paddle.ipynb
|
luotao1/docs
|
2ebc0351fa1060426253fbea3559e84a55c7cb7c
|
[
"Apache-2.0"
] | 896 |
2021-05-14T16:05:54.000Z
|
2022-03-31T08:58:33.000Z
|
docs/practices/quick_start/hello_paddle.ipynb
|
luotao1/docs
|
2ebc0351fa1060426253fbea3559e84a55c7cb7c
|
[
"Apache-2.0"
] | 138 |
2021-05-17T02:57:09.000Z
|
2022-03-30T08:23:54.000Z
| 24.667553 | 281 | 0.565067 |
[
[
[
"# hello paddle: 从普通程序走向机器学习程序\n\n**作者:** [PaddlePaddle](https://github.com/PaddlePaddle) <br>\n**日期:** 2021.12 <br>\n**摘要:** 这篇示例向你介绍普通程序跟机器学习程序的区别,并带着你用飞桨框架,实现第一个机器学习程序。",
"_____no_output_____"
],
[
"## 一、普通程序跟机器学习程序的逻辑区别\n\n作为一名开发者,你最熟悉的开始学习一门编程语言,或者一个深度学习框架的方式,可能是通过一个hello world程序。\n\n学习飞桨也可以这样,这篇小示例教程将会通过一个非常简单的示例来向你展示如何开始使用飞桨。\n\n机器学习程序跟通常的程序最大的不同是,通常的程序是在给定输入的情况下,通过告诉计算机处理数据的规则,然后得到处理后的结果。而机器学习程序则是在并不知道这些规则的情况下,让机器来从数据当中**学习**出来规则。\n\n作为热身,先来看看通常的程序所做的事情。\n\n现在面临这样一个任务:\n\n乘坐出租车的时候,会有一个10元的起步价,只要上车就需要收取。出租车每行驶1公里,需要再支付每公里2元的行驶费用。当一个乘客坐完出租车之后,车上的计价器需要算出来该乘客需要支付的乘车费用。\n\n如果用python来实现该功能,会如下所示:",
"_____no_output_____"
]
],
[
[
"def calculate_fee(distance_travelled):\n return 10 + 2 * distance_travelled\n\nfor x in [1.0, 3.0, 5.0, 9.0, 10.0, 20.0]:\n print(calculate_fee(x))",
"12.0\n16.0\n20.0\n28.0\n30.0\n50.0\n"
]
],
[
[
"接下来,把问题稍微变换一下,现在知道乘客每次乘坐出租车的公里数,也知道乘客每次下车的时候支付给出租车司机的总费用。但是并不知道乘车的起步价,以及每公里行驶费用是多少。希望让机器从这些数据当中学习出来计算总费用的规则。\n\n更具体的,想要让机器学习程序通过数据学习出来下面的公式当中的参数 `w` 和参数 `b`(这是一个非常简单的示例,所以`w`和`b`都是浮点数,随着对深度学习了解的深入,你将会知道`w`和`b`通常情况下会是矩阵和向量)。这样,当下次乘车的时候,知道了行驶里程`distance_travelled`的时候,就可以估算出来用户的总费用`total_fee`了。\n\n```\ntotal_fee = w * distance_travelled + b\n```\n\n接下来,看看用飞桨如何实现这个hello, world级别的机器学习程序。",
"_____no_output_____"
],
[
"## 二、导入飞桨\n\n为了能够使用飞桨,需要先用python的`import`语句导入飞桨`paddle`。\n\n同时,为了能够更好的对数组进行计算和处理,还需要导入`numpy`。\n\n如果你是在本机运行这个notebook,而且还没有安装飞桨,请先参考官网[安装](https://www.paddlepaddle.org.cn/install/quick) Paddle 2.2.0。",
"_____no_output_____"
]
],
[
[
"import paddle\nprint(\"paddle \" + paddle.__version__)",
"paddle 2.2.0\n"
]
],
[
[
"## 三、准备数据\n\n在这个机器学习任务中,已经知道了乘客的行驶里程`distance_travelled`,和对应的,这些乘客的总费用`total_fee`。\n\n通常情况下,在机器学习任务中,像`distance_travelled`这样的输入值,一般被称为`x`(或者特征`feature`),像`total_fee`这样的输出值,一般被称为`y`(或者标签`label`)。\n\n可以用`paddle.to_tensor`把示例数据转换为paddle的Tensor数据。",
"_____no_output_____"
]
],
[
[
"x_data = paddle.to_tensor([[1.], [3.0], [5.0], [9.0], [10.0], [20.0]])\ny_data = paddle.to_tensor([[12.], [16.0], [20.0], [28.0], [30.0], [50.0]])",
"_____no_output_____"
]
],
[
[
"## 四、用飞桨定义模型的计算\n\n使用飞桨定义模型的计算的过程,本质上,是用python,通过飞桨提供的API,来告诉飞桨计算规则的过程。回顾一下,想要通过飞桨用机器学习方法,从数据当中学习出来如下公式当中的`w`和`b`。这样在未来,给定`x`时就可以估算出来`y`值(估算出来的`y`记为`y_predict`)\n\n```\ny_predict = w * x + b\n```\n\n将会用飞桨的线性变换层:`paddle.nn.Linear`来实现这个计算过程,这个公式里的变量`x, y, w, b, y_predict`,对应着飞桨里面的[Tensor概念](https://www.paddlepaddle.org.cn/documentation/docs/zh/beginners_guide/basic_concept/tensor.html)。\n\n**稍微补充一下**\n\n在这里的示例中,根据经验,已经事先知道了`distance_travelled`和`total_fee`之间是线性的关系,而在更实际的问题当中,`x`和`y`的关系通常是非线性的,因此也就需要使用更多类型,也更复杂的神经网络。(比如,BMI指数跟你的身高就不是线性关系,一张图片里的某个像素值跟这个图片是猫还是狗也不是线性关系。)\n",
"_____no_output_____"
]
],
[
[
"linear = paddle.nn.Linear(in_features=1, out_features=1)",
"_____no_output_____"
]
],
[
[
"## 五、准备好运行飞桨\n\n机器(计算机)在一开始的时候会随便猜`w`和`b`,先看看机器猜的怎么样。你应该可以看到,这时候的`w`是一个随机值,`b`是0.0,这是飞桨的初始化策略,也是这个领域常用的初始化策略。(如果你愿意,也可以采用其他的初始化的方式,今后你也会看到,选择不同的初始化策略也是对于做好深度学习任务来说很重要的一点)。",
"_____no_output_____"
]
],
[
[
"w_before_opt = linear.weight.numpy().item()\nb_before_opt = linear.bias.numpy().item()\n\nprint(\"w before optimize: {}\".format(w_before_opt))\nprint(\"b before optimize: {}\".format(b_before_opt))",
"w before optimize: 1.0952061414718628\nb before optimize: 0.0\n"
]
],
[
[
"## 六、告诉飞桨怎么样学习\n\n前面定义好了神经网络(尽管是一个最简单的神经网络),还需要告诉飞桨,怎么样去**学习**,从而能得到参数`w`和`b`。\n\n这个过程简单的来陈述一下,你应该就会大致明白了(尽管背后的理论和知识还需要逐步的去学习)。在机器学习/深度学习当中,机器(计算机)在最开始的时候,得到参数`w`和`b`的方式是随便猜一下,用这种随便猜测得到的参数值,去进行计算(预测)的时候,得到的`y_predict`,跟实际的`y`值一定是有**差距**的。接下来,机器会根据这个差距来**调整`w`和`b`**,随着这样的逐步的调整,`w`和`b`会越来越正确,`y_predict`跟`y`之间的差距也会越来越小,从而最终能得到好用的`w`和`b`。这个过程就是机器**学习**的过程。\n\n用更加技术的语言来说,衡量**差距**的函数(一个公式)就是损失函数,用来**调整**参数的方法就是优化算法。\n\n在本示例当中,用最简单的均方误差(mean square error)作为损失函数(`paddle.nn.MSELoss`);和最常见的优化算法SGD(stocastic gradient descent)作为优化算法(传给`paddle.optimizer.SGD`的参数`learning_rate`,你可以理解为控制每次调整的步子大小的参数)。",
"_____no_output_____"
]
],
[
[
"mse_loss = paddle.nn.MSELoss()\nsgd_optimizer = paddle.optimizer.SGD(learning_rate=0.001, parameters = linear.parameters())",
"_____no_output_____"
]
],
[
[
"## 七、运行优化算法\n\n接下来,让飞桨运行一下这个优化算法,这会是一个前面介绍过的逐步调整参数的过程,你应该可以看到loss值(衡量`y`和`y_predict`的差距的`loss`)在不断的降低。",
"_____no_output_____"
]
],
[
[
"total_epoch = 5000\nfor i in range(total_epoch):\n y_predict = linear(x_data)\n loss = mse_loss(y_predict, y_data)\n loss.backward()\n sgd_optimizer.step()\n sgd_optimizer.clear_grad()\n \n if i%1000 == 0:\n print(\"epoch {} loss {}\".format(i, loss.numpy()))\n \nprint(\"finished training, loss {}\".format(loss.numpy()))",
"epoch 0 loss [328.8153]\nepoch 1000 loss [8.201832]\nepoch 2000 loss [1.8338685]\nepoch 3000 loss [0.41004065]\nepoch 4000 loss [0.09168116]\nfinished training, loss [0.0205305]\n"
]
],
[
[
"## 八、机器学习出来的参数\n\n经过了这样的对参数`w`和`b`的调整(**学习**),再通过下面的程序,来看看现在的参数变成了多少。你应该会发现`w`变成了很接近2.0的一个值,`b`变成了接近10.0的一个值。虽然并不是正好的2和10,但却是从数据当中学习出来的还不错的模型的参数,可以在未来的时候,用从这批数据当中学习到的参数来预估了。(如果你愿意,也可以通过让机器多学习一段时间,从而得到更加接近2.0和10.0的参数值。)",
"_____no_output_____"
]
],
[
[
"w_after_opt = linear.weight.numpy().item()\nb_after_opt = linear.bias.numpy().item()\n\nprint(\"w after optimize: {}\".format(w_after_opt))\nprint(\"b after optimize: {}\".format(b_after_opt))\n",
"w after optimize: 2.0178842544555664\nb after optimize: 9.771327018737793\n"
]
],
[
[
"## 九、hello paddle\n\n通过这个小示例,希望你已经初步了解了飞桨,能在接下来随着对飞桨的更多学习,来解决实际遇到的问题。",
"_____no_output_____"
]
],
[
[
"print(\"hello paddle\")",
"hello paddle\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a0fb6eaa76e2b746406be5bc0d2105b8d72639d
| 14,594 |
ipynb
|
Jupyter Notebook
|
Sklearn/SVR & SVC/.ipynb_checkpoints/2.5.5 SVC-checkpoint.ipynb
|
120180163/Sklearn-Library
|
96f3dbcac9797066ce4e8938fe39b7ce00b89596
|
[
"Apache-2.0"
] | 1 |
2021-08-28T11:03:28.000Z
|
2021-08-28T11:03:28.000Z
|
Sklearn/SVR & SVC/2.5.5 SVC.ipynb
|
120180163/Sklearn-Library
|
96f3dbcac9797066ce4e8938fe39b7ce00b89596
|
[
"Apache-2.0"
] | null | null | null |
Sklearn/SVR & SVC/2.5.5 SVC.ipynb
|
120180163/Sklearn-Library
|
96f3dbcac9797066ce4e8938fe39b7ce00b89596
|
[
"Apache-2.0"
] | null | null | null | 55.280303 | 4,540 | 0.802179 |
[
[
[
"from sklearn.datasets import load_breast_cancer\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"data = load_breast_cancer()\nX = data.data\ny = data.target\n",
"_____no_output_____"
],
[
"poly = PolynomialFeatures( degree = 3 , include_bias = False)\npoly.fit(X)\nx_poly = poly.transform(X)",
"_____no_output_____"
],
[
"x_train, x_test, y_train, y_test = train_test_split(x_poly, y, test_size = 0.2)",
"_____no_output_____"
],
[
"# apply LR\n\nfrom sklearn.linear_model import LogisticRegression\nlogreg = LogisticRegression(penalty = 'l2')\nlogreg\nlogreg.fit(x_train , y_train)",
"/Users/khalednada/opt/anaconda3/lib/python3.8/site-packages/sklearn/linear_model/_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\n"
],
[
"result= logreg.predict(x_test)\nprint('accuracy =',accuracy_score(y_test , result))",
"accuracy = 0.9473684210526315\n"
],
[
"cm = confusion_matrix(y_test , result)\nprint('confusion matrix \\n', cm)",
"confusion matrix \n [[43 3]\n [ 3 65]]\n"
],
[
"import seaborn as sns\nsns.heatmap(cm, center=True)\nplt.show()",
"_____no_output_____"
],
[
"# apply SVC\n\nfrom sklearn.svm import SVC\nclassifier = SVC(C = 0.9 , degree = 5 , tol = 0.0000001)\nclassifier\nclassifier.fit(x_train , y_train)",
"_____no_output_____"
],
[
"result= logreg.predict(x_test)\nprint('accuracy =',accuracy_score(y_test , result))",
"accuracy = 0.9473684210526315\n"
],
[
"cm = confusion_matrix(y_test , result)\nprint('confusion matrix \\n', cm)",
"confusion matrix \n [[43 3]\n [ 3 65]]\n"
],
[
"sns.heatmap(cm, center=True)\nplt.show()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a0fc20571dad13afa788d01e2a7bbd68e51a24b
| 39,687 |
ipynb
|
Jupyter Notebook
|
test/ipynb/groovy/XChartingTest.ipynb
|
ssadedin/beakerx
|
34479b07d2dfdf1404692692f483faf0251632c3
|
[
"Apache-2.0"
] | 1,491 |
2017-03-30T03:05:05.000Z
|
2022-03-27T04:26:02.000Z
|
test/ipynb/groovy/XChartingTest.ipynb
|
ssadedin/beakerx
|
34479b07d2dfdf1404692692f483faf0251632c3
|
[
"Apache-2.0"
] | 3,268 |
2015-01-01T00:10:26.000Z
|
2017-05-05T18:59:41.000Z
|
test/ipynb/groovy/XChartingTest.ipynb
|
ssadedin/beakerx
|
34479b07d2dfdf1404692692f483faf0251632c3
|
[
"Apache-2.0"
] | 287 |
2017-04-03T01:30:06.000Z
|
2022-03-17T06:09:15.000Z
| 78.588119 | 21,072 | 0.665004 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a0fc23339d58e1d8f90eca8748f379bfbd90ad4
| 44,201 |
ipynb
|
Jupyter Notebook
|
LearningCurves.ipynb
|
fsqiu/TF-Test-Project
|
41131dcaf67426980b1389c94c5af19605f42ffc
|
[
"MIT"
] | null | null | null |
LearningCurves.ipynb
|
fsqiu/TF-Test-Project
|
41131dcaf67426980b1389c94c5af19605f42ffc
|
[
"MIT"
] | 1 |
2020-03-26T19:30:31.000Z
|
2020-03-26T19:30:31.000Z
|
LearningCurves.ipynb
|
fsqiu/TF-Test-Project
|
41131dcaf67426980b1389c94c5af19605f42ffc
|
[
"MIT"
] | null | null | null | 261.544379 | 15,036 | 0.92473 |
[
[
[
"#looking at the learning curves can help prevent overfitting the data",
"_____no_output_____"
],
[
"from sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split #used for splitting the data into multiple test groups\nfrom sklearn.linear_model import LinearRegression\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import PolynomialFeatures",
"_____no_output_____"
],
[
"def plot_learning_curves(model,X, y):\n \"\"\"\n Plots a learning curve\n \"\"\"\n X_train, X_val, y_train, y_val = train_test_split(X,y,test_size=0.2)\n train_errors, val_errors = [],[]\n for m in range(1,len(X_train)):\n model.fit(X_train[:m],y_train[:m])\n y_train_predict = model.predict(X_train[:m])\n y_val_predict = model.predict(X_val)\n train_errors.append(mean_squared_error(y_train[:m],y_train_predict))\n val_errors.append(mean_squared_error(y_val,y_val_predict))\n \n plt.plot(np.sqrt(train_errors), \"r-+\", linewidth = 2, label = \"train\")\n plt.plot(np.sqrt(val_errors), \"b-\", linewidth = 3, label = \"val\")\n plt.axis([0, len(X_train), 0, 3])",
"_____no_output_____"
],
[
"#testing the plot of a learning curve\nnp.random.seed(50)\nX = 6*np.random.rand(100,1)- 3\ny = 0.5* X**2 + X + 2 + np.random.randn(100,1)",
"_____no_output_____"
],
[
"lin_reg = LinearRegression()\nplot_learning_curves(lin_reg,X,y)\n#the error rate that goes up very high and reaches a plateau\n#a validation curve that plateaus and ends very close to error curve\n#is a sign of underfitting",
"_____no_output_____"
],
[
"poly_reg = Pipeline([(\"poly_features\",PolynomialFeatures(degree=10,include_bias=False)),(\"lin_reg\",LinearRegression()),])\nplot_learning_curves(poly_reg,X,y)\n#error in training data is much lower\n#there is a gap in the curves\n#it means it is significantly better on the training data than the validation data\n#overfitting",
"_____no_output_____"
],
[
"poly_reg = Pipeline([(\"poly_features\",PolynomialFeatures(degree=2,include_bias=False)),(\"lin_reg\",LinearRegression()),])\nplot_learning_curves(poly_reg,X,y)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a0fc5085dbd8fcd94b502df1df42efefe4988cd
| 13,539 |
ipynb
|
Jupyter Notebook
|
3_TNCTrips_Purpose.ipynb
|
sfcta/summ_notebooks_SB1
|
1a35ac013f2b96306d320318c4c090f353e503a6
|
[
"Apache-2.0"
] | null | null | null |
3_TNCTrips_Purpose.ipynb
|
sfcta/summ_notebooks_SB1
|
1a35ac013f2b96306d320318c4c090f353e503a6
|
[
"Apache-2.0"
] | null | null | null |
3_TNCTrips_Purpose.ipynb
|
sfcta/summ_notebooks_SB1
|
1a35ac013f2b96306d320318c4c090f353e503a6
|
[
"Apache-2.0"
] | null | null | null | 37.504155 | 149 | 0.524337 |
[
[
[
"%run ..\\notebooks\\Util_func.ipynb",
"_____no_output_____"
],
[
"# # Bay Area\n# REG = 'BayArea'\n# base_dir = r'Q:\\Data\\Surveys\\HouseholdSurveys\\MTC-SFCTA2018\\Processing_20200228\\2_tour_extract\\wt_wkday'\n# allwk_dir = r'Q:\\Data\\Surveys\\HouseholdSurveys\\MTC-SFCTA2018\\Processing_20200228\\2_tour_extract\\wt_7day'\n# raw_dir = r'Q:\\Data\\Surveys\\HouseholdSurveys\\MTC-SFCTA2018\\Processing_20200228\\spatial_join'\n\n# out_file = r'out\\%s_3_TNCTrips_Purpose.xlsx' %REG",
"_____no_output_____"
],
[
"# # SANDAG\n# REG = 'SANDAG'\n# base_dir = r'Q:\\Data\\Surveys\\HouseholdSurveys\\MTC-SFCTA2018\\SANDAG\\2_tour_extract\\wt_wkday'\n# allwk_dir = r'Q:\\Data\\Surveys\\HouseholdSurveys\\MTC-SFCTA2018\\SANDAG\\2_tour_extract\\wt_7day'\n# raw_dir = r'Q:\\Data\\Surveys\\HouseholdSurveys\\MTC-SFCTA2018\\SANDAG'\n\n# out_file = r'out2\\%s_3_TNCTrips_Purpose.xlsx' %REG",
"_____no_output_____"
],
[
"# SCAG\nREG = 'SCAG'\nbase_dir = r'Q:\\Data\\Surveys\\HouseholdSurveys\\MTC-SFCTA2018\\SCAG_dataset_2020-02-27\\2_tour_extract\\wt_wkday'\nallwk_dir = r'Q:\\Data\\Surveys\\HouseholdSurveys\\MTC-SFCTA2018\\SCAG_dataset_2020-02-27\\2_tour_extract\\wt_7day'\nraw_dir = r'Q:\\Data\\Surveys\\HouseholdSurveys\\MTC-SFCTA2018\\SCAG_dataset_2020-02-27'\n\nout_file = r'out2\\%s_3_TNCTrips_Purpose.xlsx' %REG",
"_____no_output_____"
],
[
"## Process person records\ntmp_df = pd.read_csv(join(raw_dir, 'ex_person_wZones.csv'))\nif REG=='SANDAG' or REG=='SCAG':\n tmp_df2 = pd.read_csv(join(raw_dir, 'ex2_person.tsv'), sep='\\t')\n tmp_df = tmp_df.merge(tmp_df2[['hh_id','person_id','raceeth_new_imputed']], how='left')\ntmp_df = tmp_df[['hh_id','person_id','person_num','raceeth_new_imputed','income_imputed','gender','age',\n 'wt_alladult_mon','wt_alladult_tue','wt_alladult_wed','wt_alladult_thu','wt_alladult_fri',\n 'wt_alladult_sat','wt_alladult_sun']]\ntmp_df['person_id'] = tmp_df['person_id'].round().astype('int64')\ntmp_df = tmp_df.rename(columns={'raceeth_new_imputed':'raceeth','income_imputed':'hinc'})\ntmp_df = tmp_df.rename(columns={'hh_id':'hhno','person_num':'pno'})\n\nper_df = pd.read_csv(join(base_dir,'survey2018_precx.dat'), sep=' ') \nper_df = per_df.merge(tmp_df, how='left')\n\n## Process trip records\nraw_trips = pd.read_csv(join(raw_dir, 'ex_trip_wZones.csv'))\nraw_trips = raw_trips[['hh_id','person_num','trip_num','mode_uber','mode_lyft','mode_type_imputed']]\nraw_trips = raw_trips.rename(columns={'hh_id':'hhno','person_num':'pno','trip_num':'tsvid'})\n\ntrip_df = pd.read_csv(join(base_dir,'survey2018_tripx.dat'), sep=' ')\n\nreq_percols = ['hhno','pno','raceeth','hinc','gender','age',\n 'wt_alladult_mon','wt_alladult_tue','wt_alladult_wed','wt_alladult_thu','wt_alladult_fri',\n 'wt_alladult_sat','wt_alladult_sun']\ndef prep_df(df):\n df = link_dt(df)\n \n df = df.loc[df['mode']==9, ]\n df = df.merge(raw_trips, how='left')\n df = df[df['mode_type_imputed']!=4] #remove taxi trips\n \n df['tnc_type'] = 3 # prem/other\n df.loc[(df['mode_uber']==1) | (df['mode_lyft']==1), 'tnc_type'] = 1 # pooled\n df.loc[(df['mode_uber']==2) | (df['mode_lyft']==2), 'tnc_type'] = 2 # regular\n \n df = df.merge(per_df[req_percols], \n how='left', on=['hhno','pno'])\n df.loc[df['gender']==997, 'gender'] = 5 #Other\n df.loc[df['gender']==999, 'gender'] = 6 #NoAnswer\n df.loc[df['gender'].isin([-9998, 995]), 'gender'] = 6 #Missing\n \n df['dephr'] = (df['deptm']/100).astype(int)\n df['count'] = 1\n df = df[(df['trexpfac']>0) & (df['mode']>0)]\n df = df[(df['otaz']>0) & (df['dtaz']>0)]\n return df\n\ntrip_df = prep_df(trip_df)",
"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\pandas\\core\\indexing.py:1745: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n isetter(ilocs[0], value)\n"
],
[
"col_dict = {\n 'dpurp': {\n 'desc': 'DPurp',\n 'col': 'dpurp',\n 'vals': range(0,8),\n 'labels': ['1_Home','2_Work','3_School','4_Escort','5_PersBus','6_Shop','7_Meal','8_SocRec']\n },\n 'raceeth': {\n 'desc': 'RaceEth',\n 'col': 'raceeth',\n 'vals': range(1,6),\n 'labels': ['1_Hispanic','2_Black','3_Asian/PI','4_White','5_Other']\n },\n 'hinc': {\n 'desc': 'HHInc',\n 'col': 'hinc',\n 'vals': range(1,9),\n 'labels': ['1_25K','2_25_50K','3_50_75K','4_75_100K','5_100_150K','6_150_200K','7_200_250K','8_250K']\n },\n 'age': {\n 'desc': 'Age',\n 'col': 'age',\n 'vals': range(4,11),\n 'labels': ['18-24','25-34','35-44','45-54','55-64','65-74','75+']\n },\n 'gender': {\n 'desc': 'Gend',\n 'col': 'gender',\n 'vals': range(1,7),\n 'labels': ['1_F','2_M','3_Trns','4_NBin','5_Oth','6_Miss']\n },\n 'tncmode': {\n 'desc': 'TNCMode',\n 'col': 'tnc_type',\n 'vals': range(1,4),\n 'labels': ['2_Pool','1_Reg','3_PremOth']\n },\n 'tod': {\n 'desc': 'TOD',\n 'col': 'dephr',\n 'vals': range(0,24),\n 'labels': ['10_0AM','11_1AM','12_2AM','13_3AM','14_4AM','15_5AM','16_6AM','17_7AM',\n '18_8AM','19_9AM','20_10AM','21_11AM','22_12AM','23_1PM','24_2PM','25_3PM',\n '26_4PM','27_5PM','28_6PM','29_7PM','30_8PM','31_9PM','32_10PM','33_11PM']\n },\n 'day': {\n 'desc': 'DOW',\n 'col': 'day',\n 'vals': range(1,8),\n 'labels': ['1_Mon','2_Tue','3_Wed','4_Thu','5_Fri','6_Sat','7_Sun']\n }\n}",
"_____no_output_____"
],
[
"fname = out_file\nwriter = pd.ExcelWriter(fname, engine='xlsxwriter')\nworkbook = writer.book\nformat1 = workbook.add_format({'num_format': '#,##0.0'})",
"_____no_output_____"
],
[
"from xlsxwriter.utility import xl_rowcol_to_cell\n\nrow = 0\nsname = 'Weekday'\nd1_dict = col_dict['dpurp']\n\ntitle = 'TNC Trips by ' + d1_dict['desc']\ntab = prep_data_1d(trip_df, d1_dict['desc'],d1_dict['col'], 'trexpfac', d1_dict['vals'], d1_dict['labels'])\nrow = write_to_excel(tab, sname, title, row)\n\ntitle = 'Column Shares by ' + d1_dict['desc']\nrow = write_to_excel(getSharesIdx(tab.copy()), sname, title, row)\n\ntab2 = tab.copy()\ntab2.iloc[-1,-1] = tab2.iloc[-1,0]\n\ntitle = 'Column Shares 95% CI by ' + d1_dict['desc']\nrow = write_to_excel(getSharesIdxCI95(tab.copy()), sname, title, row)\n\ntab_range = xl_rowcol_to_cell(row,1) + ':' + xl_rowcol_to_cell(row,tab.shape[1])\n_ = writer.sheets[sname].set_column(tab_range, 11, format1)",
"_____no_output_____"
],
[
"wt_cols = ['count', 'trexpfac']\nwt_desc = ['(Unweighted)', '(Weighted)']\n\nfor key in ['tod','raceeth', 'hinc', 'age', 'gender', 'tncmode']:\n d2_dict = col_dict[key]\n row = 0\n sname = d2_dict['desc']\n \n for wc, wd in zip(wt_cols, wt_desc):\n title = 'TNC Trips by ' + d2_dict['desc'] + ' and ' + d1_dict['desc'] + ' ' + wd\n tab, tab_fmt = prep_data_2d(trip_df,d1_dict['col'],d1_dict['vals'],d1_dict['labels'],\n d2_dict['col'],d2_dict['vals'],d2_dict['labels'],wc)\n row = write_to_excel(tab.astype('float64'), sname, title, row)\n\n if wc == 'count':\n tab2 = tab.copy()\n else:\n tab2.iloc[:-1,:-1] = tab.iloc[:-1,:-1]\n\n title = 'Column Shares by ' + d2_dict['desc'] + ' and ' + d1_dict['desc'] + ' ' + wd\n row = write_to_excel(getSharesIdx(tab.copy()), sname, title, row)\n\n title = 'Column Shares 95% CI by ' + d2_dict['desc'] + ' and ' + d1_dict['desc'] + ' ' + wd\n row = write_to_excel(getSharesIdxCI95(tab2.copy()), sname, title, row)\n tab_range = xl_rowcol_to_cell(row,1) + ':' + xl_rowcol_to_cell(row,tab.shape[1])\n _ = writer.sheets[sname].set_column(tab_range, 11, format1)",
"_____no_output_____"
],
[
"## Process all week trip records\ntrip_df = pd.read_csv(join(allwk_dir,'survey2018_tripx.dat'), sep=' ')\ntrip_df = prep_df(trip_df)\n\nDOW_LOOKUP = {1:'mon',2:'tue',3:'wed',4:'thu',5:'fri',6:'sat',7:'sun'}\ntrip_df['trexpfac'] = 0\nfor dow_num, dow in DOW_LOOKUP.items():\n trip_df.loc[trip_df['day']==dow_num, 'trexpfac'] = trip_df.loc[trip_df['day']==dow_num, 'wt_alladult_'+dow]\ntrip_df['trexpfac'] = trip_df['trexpfac'].fillna(0)",
"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\pandas\\core\\indexing.py:1745: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n isetter(ilocs[0], value)\n"
],
[
"row = 0\nsname = 'DOW'\nd2_dict = col_dict['day']",
"_____no_output_____"
],
[
"for wc, wd in zip(wt_cols, wt_desc):\n title = 'TNC Trips by ' + d2_dict['desc'] + ' and ' + d1_dict['desc'] + ' ' + wd\n tab, tab_fmt = prep_data_2d(trip_df,d1_dict['col'],d1_dict['vals'],d1_dict['labels'],\n d2_dict['col'],d2_dict['vals'],d2_dict['labels'],wc)\n row = write_to_excel(tab.astype('float64'), sname, title, row)\n \n if wc == 'count':\n tab2 = tab.copy()\n else:\n tab2.iloc[:-1,:-1] = tab.iloc[:-1,:-1]\n\n title = 'Column Shares by ' + d2_dict['desc'] + ' and ' + d1_dict['desc'] + ' ' + wd\n row = write_to_excel(getSharesIdx(tab.copy()), sname, title, row)\n\n title = 'Column Shares 95% CI by ' + d2_dict['desc'] + ' and ' + d1_dict['desc'] + ' ' + wd\n row = write_to_excel(getSharesIdxCI95(tab2.copy()), sname, title, row)\ntab_range = xl_rowcol_to_cell(row,1) + ':' + xl_rowcol_to_cell(row,tab.shape[1])\n_ = writer.sheets[sname].set_column(tab_range, 11, format1)",
"_____no_output_____"
],
[
"writer.save()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a0fddd33f7f2294e961a89f62fad5e34f2e51fd
| 942,184 |
ipynb
|
Jupyter Notebook
|
300.ipynb
|
liujuncn/jointquant_data
|
04b089442032efa00110345ae60331480a08453f
|
[
"MIT"
] | 7 |
2019-07-01T17:21:04.000Z
|
2021-01-03T07:57:03.000Z
|
300.ipynb
|
liujuncn/jointquant_data
|
04b089442032efa00110345ae60331480a08453f
|
[
"MIT"
] | null | null | null |
300.ipynb
|
liujuncn/jointquant_data
|
04b089442032efa00110345ae60331480a08453f
|
[
"MIT"
] | 1 |
2019-07-01T17:21:34.000Z
|
2019-07-01T17:21:34.000Z
| 1,064.614689 | 207,940 | 0.952658 |
[
[
[
"far = analyze_factor(factor=MlFactor,\n start_date='2012-01-01',\n end_date='2019-07-01',\n weight_method='mktcap',\n universe='000300.XSHG',\n industry='jq_l1',\n quantiles=10,\n periods=(5, 10)\n )",
"_____no_output_____"
],
[
"far.plot_returns_table(demeaned=False, group_adjust=False)",
"收益分析\n"
],
[
"far.plot_quantile_returns_bar(by_group=False, demeaned=False, group_adjust=False)",
"_____no_output_____"
],
[
"far.plot_cumulative_returns_by_quantile(period=(3, 5), demeaned=False, group_adjust=False)",
"_____no_output_____"
],
[
"far.plot_information_table(group_adjust=False, method='rank')",
"IC 分析\n"
],
[
"far.plot_ic_by_group(group_adjust=False, method='rank')",
"_____no_output_____"
],
[
"far.plot_ic_hist(group_adjust=False, method='rank')",
"_____no_output_____"
],
[
"far.plot_monthly_ic_heatmap(group_adjust=False)",
"_____no_output_____"
],
[
"far.plot_turnover_table()",
"换手率分析\n"
],
[
"far.plot_quantile_average_cumulative_return(periods_before=5, periods_after=30, \n by_quantile=False, std_bar=False, demeaned=False, group_adjust=False)",
"_____no_output_____"
],
[
"far.plot_returns_table(demeaned=True, group_adjust=True)",
"收益分析\n"
],
[
"far.plot_quantile_returns_bar(by_group=False, demeaned=True, group_adjust=True)",
"_____no_output_____"
],
[
"far.plot_cumulative_returns_by_quantile(period=(3, 5), demeaned=True, group_adjust=True)",
"_____no_output_____"
],
[
"far.plot_information_table(group_adjust=True, method='rank')",
"IC 分析\n"
],
[
"far.plot_ic_by_group(group_adjust=True, method='rank')",
"_____no_output_____"
],
[
"far.plot_quantile_average_cumulative_return(periods_before=5, periods_after=30, \n by_quantile=False, std_bar=False, demeaned=True, group_adjust=True)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a0fe3a6339e5f620787242b6736effef294d5d6
| 82,677 |
ipynb
|
Jupyter Notebook
|
src/01_intro/round_off_error.ipynb
|
joaomh/numerical-methods
|
8e8743d3a4e1f1f2691c8fc6ded3e09c44f16b04
|
[
"MIT"
] | 1 |
2020-08-21T01:23:40.000Z
|
2020-08-21T01:23:40.000Z
|
src/01_intro/round_off_error.ipynb
|
joaomh/numerical-methods
|
8e8743d3a4e1f1f2691c8fc6ded3e09c44f16b04
|
[
"MIT"
] | null | null | null |
src/01_intro/round_off_error.ipynb
|
joaomh/numerical-methods
|
8e8743d3a4e1f1f2691c8fc6ded3e09c44f16b04
|
[
"MIT"
] | null | null | null | 180.124183 | 23,468 | 0.904992 |
[
[
[
"# Accumulation of roundoof error\nIn this notebook we'll study some effects of accumulation of roundoof error.\n",
"_____no_output_____"
],
[
"# Unstable Algorithms\n\nWe need to solve this integral for $n=1,2,....8$\n\n$$y_n=\\int_0^1\\frac{x^n}{x+5}$$\n\nWe write the equation like this:\n\n$$y_n = \\frac{1}{n} - 5y_{n-1}$$\n$$y_{1}=1-5(y_{0}+\\epsilon )=1-5y_{0}-5\\epsilon$$\n$$y_{2}={\\frac {1}{2}}-5(1-5y_{0}-5\\epsilon )={\\frac {1}{2}}-5+25y_{0}+5^{2}\\epsilon$$\n$$\\vdots$$\n$$y_{n}=\\ldots +5^{n}\\epsilon$$\n\nThe roundoff error is amplified ,$\\mathcal{O}(5^n)$, in succeeding calculations so this algorithm is unstable. ",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport scipy as sc\nimport matplotlib.pyplot as plt\nimport sys ",
"_____no_output_____"
],
[
"def function(y0, n):\n y_sol = np.zeros(n)\n y_sol[0] = y0\n for i in range (1,n):\n y_sol[i] = 1/n - 5*y_sol[i-1]\n return y_sol",
"_____no_output_____"
],
[
"n = 8\nx = np.linspace(-1,1,8)\ny0 = 0\ny = function(y0, n)\nplt.plot(x,y)\n# The value of 'y' goes to infinity",
"_____no_output_____"
]
],
[
[
"# Conditioned problems\nEven if a stable algorithm is used, the solution to a problem is still inaccurate due to the accumulation of roundoff error when the problem itself is ill-conditioned. \n## Dangers of Higher-Order Polynomial Interpolation\nIn 1901, Carl Runge published a study on the dangers of higher-\norder polynomial interpolation. He looked at the following simple-looking function:\n\n$$f(x) = \\frac{1}{1+25x^2}$$\n\nwhich is now called Runge’s function",
"_____no_output_____"
]
],
[
[
"x = np.linspace(-1,1,10)\ny = 1/(1 + 25*x**2)\n\nxx = np.linspace(-1,1,100)\np = np.polyfit(x,y,4)\ny4 = np.polyval(p,xx)\n\nyr = 1/(1 + 25*xx**2)\n\nplt.plot(x,y,'o')\nplt.plot(xx,y4)\nplt.plot(xx,yr,'--')\nplt.legend(['','Polynomial fit','Runge function'])\n# The polynomial does a poor job of following Runge’s function",
"_____no_output_____"
],
[
"# Continuing with the analysis, \n# the 20th-order polynomial can be generated and plotted\nx = np.linspace(-1,1,10)\ny = 1/(1 + 25*x**2)\n\nxx = np.linspace(-1,1,100)\np = np.polyfit(x,y,20)\ny4 = np.polyval(p,xx)\n\nyr = 1/(1+25*xx**2)\n\nplt.plot(x,y,'o')\nplt.plot(xx,y4)\nplt.plot(xx,yr,'--')\nplt.legend(['','Polynomial fit','Runge function'])\n# The polynomial does a poor job of following Runge’s function",
"/home/joaomh/anaconda3/lib/python3.8/site-packages/IPython/core/interactiveshell.py:3343: RankWarning: Polyfit may be poorly conditioned\n exec(code_obj, self.user_global_ns, self.user_ns)\n"
]
],
[
[
"Although there may be certain contexts where higher-order polynomials are neces-\nsary, they are usually to be avoided. In most engineering and scientific contexts, lower-\norder polynomials of the type described in this chapter can be used effectively to capture\nthe curving trends of data without suffering from oscillations",
"_____no_output_____"
],
[
"[Real world example: Patriot missile failure due to magnification of roundoff error](https://en.wikipedia.org/wiki/Round-off_error)",
"_____no_output_____"
],
[
"# Error Estimates for Iterative Methods\nThe approximation of $e$ using Maclaurin series expansion\n\n$$e^x = 1+ x+ \\frac{x^2}{2!}+\\frac{x^3}{3!}+\\frac{x^4}{4!} ... \\frac{x^n}{n!}$$",
"_____no_output_____"
]
],
[
[
"def maclaurin(x, esp, max_int):\n \"\"\"\n Maclaurin series of exponential function\n input:\n x = value at which series evaluated\n esp = stopping criterion (default = 0.0001)\n max_int = maximum iterations (default = 50)\n output:\n fx = estimated value\n ea = approximate relative error (%)\n iter = number of iterations\n \"\"\"\n iter = 1\n sol = 1\n ea = 100\n while sol:\n sol_old = sol\n sol = sol + x**iter / np.math.factorial(iter)\n iter += 1\n if sol != 0:\n ea = np.abs((sol - sol_old)/sol)*100\n if ea <= esp and iter>= max_int:\n break\n fx = sol\n return fx, ea, iter",
"_____no_output_____"
],
[
"maclaurin(1,1e-6,100)",
"_____no_output_____"
],
[
"e, a, inte = maclaurin(1,1e-6,100)\n# np.exp(1) return the true value of the number 'e'\n# At least if a better approximation than our method\nprint('The error is: '+ str(np.exp(1) - e))",
"The error is: -4.440892098500626e-16\n"
],
[
"print(\"The epsilon funciton build in python is: \"+str(sys.float_info.epsilon))",
"The epsilon funciton build in python is: 2.220446049250313e-16\n"
]
],
[
[
"The 52 bits used for the mantissa correspond to about 15 to 16 base-10 digits, so in our programming language the machine epsilvol is $10^{-16}$",
"_____no_output_____"
],
[
"Remember that?\n$$lim_{n\\to\\infty}(1 + \\frac{1}{n})^n = e = 2.718281828...$$\n\nLet's use the power of python to calculate",
"_____no_output_____"
]
],
[
[
"def euler(n):\n return (1 + 1/n)**n\neuler(10000)",
"_____no_output_____"
],
[
"# We can write 10^16 like 10E16 or 10e16\n# What just happen?\neuler(10e16)",
"_____no_output_____"
]
],
[
[
"When $n$ becames bigger than $10^{15}$ our functions stop to increase and start to oscillating",
"_____no_output_____"
]
],
[
[
"x = np.linspace(1,1e16,100)\ny = euler(x)\ny2 = np.exp(1)\nplt.xscale('log')\nplt.axhline(y=y2, color='r', linestyle='--')\nplt.plot(x,y)\nplt.title(\"euler function in lin-log scale\")\nplt.legend([\"Real Value of Euler Number\", \"f(n) \"])",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a0ff93c48940cc48f16d399f1ef3ca6835b2e09
| 28,424 |
ipynb
|
Jupyter Notebook
|
assignments/assignment12/FittingModelsEx01.ipynb
|
sraejones/phys202-2015-work
|
988db3174fed541a85564b7aebeb618c3ad46075
|
[
"MIT"
] | null | null | null |
assignments/assignment12/FittingModelsEx01.ipynb
|
sraejones/phys202-2015-work
|
988db3174fed541a85564b7aebeb618c3ad46075
|
[
"MIT"
] | null | null | null |
assignments/assignment12/FittingModelsEx01.ipynb
|
sraejones/phys202-2015-work
|
988db3174fed541a85564b7aebeb618c3ad46075
|
[
"MIT"
] | null | null | null | 104.5 | 13,570 | 0.868878 |
[
[
[
"# Fitting Models Exercise 1",
"_____no_output_____"
],
[
"## Imports",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.optimize as opt",
"_____no_output_____"
]
],
[
[
"## Fitting a quadratic curve",
"_____no_output_____"
],
[
"For this problem we are going to work with the following model:\n\n$$ y_{model}(x) = a x^2 + b x + c $$\n\nThe true values of the model parameters are as follows:",
"_____no_output_____"
]
],
[
[
"a_true = 0.5\nb_true = 2.0\nc_true = -4.0",
"_____no_output_____"
]
],
[
[
"First, generate a dataset using this model using these parameters and the following characteristics:\n\n* For your $x$ data use 30 uniformly spaced points between $[-5,5]$.\n* Add a noise term to the $y$ value at each point that is drawn from a normal distribution with zero mean and standard deviation 2.0. Make sure you add a different random number to each point (see the `size` argument of `np.random.normal`).\n\nAfter you generate the data, make a plot of the raw data (use points).",
"_____no_output_____"
]
],
[
[
"# YOUR CODE HERE\n# raise NotImplementedError()\nx = np.linspace(-5, 5, 30)\ny = (x**2)*a_true + x*b_true + c_true + np.random.normal(0.0, 2.0, size=30)\nplt.scatter(x, y, color = 'r')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.box(False)\nplt.title('Rendomized quadratic curve')",
"_____no_output_____"
],
[
"assert True # leave this cell for grading the raw data generation and plot",
"_____no_output_____"
]
],
[
[
"Now fit the model to the dataset to recover estimates for the model's parameters:\n\n* Print out the estimates and uncertainties of each parameter.\n* Plot the raw data and best fit of the model.",
"_____no_output_____"
]
],
[
[
"# YOUR CODE HERE\n# raise NotImplementedError()\ndef model(x, b, m):\n return m*x+b",
"_____no_output_____"
],
[
"theta_best, theta_cov = opt.curve_fit(model, x, y)",
"_____no_output_____"
],
[
"xfit = np.linspace(-5.0,5.0)\nyfit = theta_best[1]*xfit + theta_best[0]\nplt.plot(xfit, yfit, color ='k')\nplt.scatter(x, y, color = 'r')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.box(False)\nplt.title('Rendomized quadratic curve with best fit line')\nplt.legend('fd')",
"_____no_output_____"
],
[
"assert True # leave this cell for grading the fit; should include a plot and printout of the parameters+errors",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4a0ffb64d714519a26594f0c06cd5d76ec5fc40d
| 558,909 |
ipynb
|
Jupyter Notebook
|
docs/_build/html/_sources/datainspection.ipynb
|
Guillermo-Hidalgo-Gadea/Seminar-TrackingAnimalBehavior
|
1c82edfef0409890a593fc34617dc8c50f0c8459
|
[
"MIT"
] | null | null | null |
docs/_build/html/_sources/datainspection.ipynb
|
Guillermo-Hidalgo-Gadea/Seminar-TrackingAnimalBehavior
|
1c82edfef0409890a593fc34617dc8c50f0c8459
|
[
"MIT"
] | null | null | null |
docs/_build/html/_sources/datainspection.ipynb
|
Guillermo-Hidalgo-Gadea/Seminar-TrackingAnimalBehavior
|
1c82edfef0409890a593fc34617dc8c50f0c8459
|
[
"MIT"
] | null | null | null | 176.479002 | 284,482 | 0.831872 |
[
[
[
"# What does data look like",
"_____no_output_____"
],
[
"## What libraries should I import?",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n",
"_____no_output_____"
]
],
[
[
"## How to read data?\nDummy data for the following exercises is provided [here](https://ruhr-uni-bochum.sciebo.de/s/Svwxncw01Ir9uxw).",
"_____no_output_____"
]
],
[
[
"file = '/Users/guillermo/Downloads/pose-3d.csv'\n",
"_____no_output_____"
],
[
"data = pd.read_csv(file, header=0)\n",
"_____no_output_____"
]
],
[
[
"## How is my data structured?",
"_____no_output_____"
]
],
[
[
"data.info()\n",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 450 entries, 0 to 449\nColumns: 139 entries, lefteye1_x to fnum\ndtypes: float64(138), int64(1)\nmemory usage: 488.8 KB\n"
],
[
"np.shape(data)\n",
"_____no_output_____"
],
[
"data\n",
"_____no_output_____"
]
],
[
[
"### Cleaning data",
"_____no_output_____"
]
],
[
[
"coords = data.loc[:, ~data.columns.str.contains(\n 'score|error|ncams|fnum|center|M_')]\n",
"_____no_output_____"
],
[
"scores = data.loc[:, data.columns.str.contains('score')]\n",
"_____no_output_____"
]
],
[
[
"### Changing the data structure",
"_____no_output_____"
]
],
[
[
"# Let us transform the data to be centered around a reference point\ncentered_coords = coords.copy()\nfor i in range(centered_coords.shape[1]):\n if '_x' in centered_coords.columns[i]:\n centered_coords.loc[:, centered_coords.columns[i]] = centered_coords.loc[:,\n centered_coords.columns[i]].subtract(coords.loc[:, \"nose1_x\"].values)\n elif '_y' in centered_coords.columns[i]:\n centered_coords.loc[:, centered_coords.columns[i]] = centered_coords.loc[:,\n centered_coords.columns[i]].subtract(coords.loc[:, \"nose1_y\"].values)\n elif '_z' in centered_coords.columns[i]:\n centered_coords.loc[:, centered_coords.columns[i]] = centered_coords.loc[:,\n centered_coords.columns[i]].subtract(coords.loc[:, \"nose1_z\"].values)\n else:\n pass\n",
"_____no_output_____"
],
[
"centered_coords\n",
"_____no_output_____"
],
[
"# What is the difference between pandas Data Frame and numpy Array?\ncoords_egocentric = centered_coords.to_numpy()\ncoords_egocentric\n",
"_____no_output_____"
]
],
[
[
"## Reading DeepLabCut Data\nNote that DeepLabCut files contain multiple headers",
"_____no_output_____"
]
],
[
[
"# .h5 vs csv with multiple headings\nfile = '/Users/guillermo/Downloads/DLC_data.csv'\ndata = pd.read_csv(file, header=0)\ndata\n",
"/Users/guillermo/opt/anaconda3/lib/python3.8/site-packages/IPython/core/interactiveshell.py:3444: DtypeWarning: Columns (0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18) have mixed types.Specify dtype option on import or set low_memory=False.\n exec(code_obj, self.user_global_ns, self.user_ns)\n"
]
],
[
[
"You can specify multiple headers in `pd.read_csv(file, header=[0,1,2])`, but your data frame will be a little more difficult to subset, as columns will be a MultiIndex array.",
"_____no_output_____"
]
],
[
[
"data = pd.read_csv(file, header=[0, 1, 2])\ndata\n",
"_____no_output_____"
],
[
"data.columns\n",
"_____no_output_____"
],
[
"data.columns.get_level_values(1)\n",
"_____no_output_____"
],
[
"data.columns.get_level_values(2)\n",
"_____no_output_____"
]
],
[
[
"Better rename the columns of your data frame to avoid MultiIndex",
"_____no_output_____"
]
],
[
[
"data.columns.get_level_values(1) + '_' + data.columns.get_level_values(1)\n",
"_____no_output_____"
],
[
"new_col_names = list(data.columns.get_level_values(\n 1) + '_' + data.columns.get_level_values(2))\ndata.columns = new_col_names\ndata\n",
"_____no_output_____"
]
],
[
[
"## What does my data tell me?",
"_____no_output_____"
]
],
[
[
"# Does this make sense?\ncoords.mean(axis='columns')\n",
"_____no_output_____"
],
[
"# What about this?\ncoords.mean(axis='index')\n",
"_____no_output_____"
],
[
"coords['lefteye1_x'].mean()\n",
"_____no_output_____"
],
[
"coords.describe()\n",
"_____no_output_____"
]
],
[
[
"## How could my data look like",
"_____no_output_____"
]
],
[
[
"scores.hist(figsize=(20, 20))\n",
"_____no_output_____"
],
[
"scores.boxplot(column=['chin_score', 'lefteye1_score'], figsize=(10, 10))\n",
"_____no_output_____"
],
[
"x_coords = coords.loc[:, coords.columns.str.contains('_x')]\ny_coords = coords.loc[:, coords.columns.str.contains('_y')]\nz_coords = coords.loc[:, coords.columns.str.contains('_z')]\n\nt = 0\n\nfig = plt.figure(figsize=(6, 4), dpi=100)\nax = fig.add_subplot(projection='3d')\nx_points = x_coords[t:t+1]\ny_points = y_coords[t:t+1]\nz_points = z_coords[t:t+1]\n\nax.scatter3D(x_points, y_points, z_points)\nax.view_init(11, 280)\nax.set(xlabel='X axis', ylabel='Y axis', zlabel='Z axis')\n\nplt.title(\"My First Plot\")\n",
"_____no_output_____"
]
],
[
[
"In the following section we will learn to calculate some easy kinematic features to better understand our data.",
"_____no_output_____"
],
[
"## Bonus",
"_____no_output_____"
]
],
[
[
"def face_skeleton(pose):\n \"\"\"\n The face_skeleton function defines a mesh skeleton by connecting the facial landmarks as defined below.\n This function is directly passed to plot_3Dpose. \n \"\"\"\n skeletons = []\n for n in range(len(pose)): # read out n_components from different poses\n\n lefteye = [pose[n]['lefteye1_x'], pose[n]['lefteye2_x']], [\n pose[n]['lefteye1_y'], pose[n]['lefteye2_y']], [pose[n]['lefteye1_z'], pose[n]['lefteye2_z']]\n righteye = [pose[n]['righteye1_x'], pose[n]['righteye2_x']], [\n pose[n]['righteye1_y'], pose[n]['righteye2_y']], [pose[n]['righteye1_z'], pose[n]['righteye2_z']]\n leyebrow = [pose[n]['leyebrow1_x'], pose[n]['leyebrow2_x'], pose[n]['leyebrow3_x']], [pose[n]['leyebrow1_y'], pose[n]\n ['leyebrow2_y'], pose[n]['leyebrow3_y']], [pose[n]['leyebrow1_z'], pose[n]['leyebrow2_z'], pose[n]['leyebrow3_z']]\n reyebrow = [pose[n]['reyebrow1_x'], pose[n]['reyebrow2_x'], pose[n]['reyebrow3_x']], [pose[n]['reyebrow1_y'], pose[n]\n ['reyebrow2_y'], pose[n]['reyebrow3_y']], [pose[n]['reyebrow1_z'], pose[n]['reyebrow2_z'], pose[n]['reyebrow3_z']]\n nose = [pose[n]['nose1_x'], pose[n]['nose3_x'], pose[n]['nose2_x'], pose[n]['nose4_x'], pose[n]['nose1_x']], [pose[n]['nose1_y'], pose[n]['nose3_y'], pose[n]\n ['nose2_y'], pose[n]['nose4_y'], pose[n]['nose1_y']], [pose[n]['nose1_z'], pose[n]['nose3_z'], pose[n]['nose2_z'], pose[n]['nose4_z'], pose[n]['nose1_z']]\n lips = [pose[n]['uplip_x'], pose[n]['llip_x'], pose[n]['lowlip_x'], pose[n]['rlip_x'], pose[n]['uplip_x']], [pose[n]['uplip_y'], pose[n]['llip_y'], pose[n]\n ['lowlip_y'], pose[n]['rlip_y'], pose[n]['uplip_y']], [pose[n]['uplip_z'], pose[n]['llip_z'], pose[n]['lowlip_z'], pose[n]['rlip_z'], pose[n]['uplip_z']]\n face = [pose[n]['rear_x'], pose[n]['chin_x'], pose[n]['lear_x']], [pose[n]['rear_y'], pose[n]\n ['chin_y'], pose[n]['lear_y']], [pose[n]['rear_z'], pose[n]['chin_z'], pose[n]['lear_z']]\n\n skeleton = lefteye, righteye, leyebrow, reyebrow, nose, lips, face\n skeletons.append(skeleton)\n\n return skeletons\n\n\ndef plot_3Dpose(pose, elevation, azimuth):\n \"\"\"\n This plot function takes the average pose coordinates of facial landmarks, creates a skeleton and visualizes the facial expression\n in a 3D coordinate system with predefined elevantion and azimuth angles.\n \"\"\"\n skeletons = face_skeleton(pose)\n\n ncols = 3\n nrows = math.ceil(len(pose)/ncols)\n width = ncols*6\n height = nrows * 5\n\n fig, axes = plt.subplots(nrows, ncols, figsize=(\n width, height), subplot_kw=dict(projection='3d'))\n\n for ax, n in zip(axes.flat, range(len(pose))):\n x_points = pose[n][['_x' in s for s in pose[n].index]]\n y_points = pose[n][['_y' in s for s in pose[n].index]]\n z_points = pose[n][['_z' in s for s in pose[n].index]]\n ax.scatter3D(x_points, y_points, z_points)\n ax.view_init(elevation, azimuth)\n ax.set(xlabel='X axis', ylabel='Y axis', zlabel='Z axis')\n ax.set_title('Predicted Pose: %d' % (n+1))\n for i in range(len(skeletons[0])):\n x = skeletons[n][i][0]\n y = skeletons[n][i][1]\n z = skeletons[n][i][2]\n ax.plot(x, y, z, color='g')\n\n plt.suptitle(\n 'Hidden Markov Model predictions with N = %d Components' % len(pose))\n plt.show()\n return\n\n\ndef split_data(data, prediction):\n \"\"\"\n The split_data function will be used to split time series data into smaller \n chunks by the prediction variable.\n\n \"\"\"\n n = max(prediction)+1 # read out the number of predicted components\n data['pred'] = prediction\n grouped = data.groupby(data.pred)\n predictions = [grouped.get_group(i) for i in range(n)]\n pose = [predictions[i].mean() for i in range(n)]\n\n return predictions, pose\n",
"_____no_output_____"
],
[
"from hmmlearn import hmm\nimport math\n# change the number of components you expect to find in your data\nmodel1 = hmm.GaussianHMM(n_components=9, covariance_type=\"full\")\nmodel1.fit(coords)\npred1 = model1.predict(coords)\n",
"_____no_output_____"
],
[
"_, pose1 = split_data(centered_coords, pred1)\n",
"_____no_output_____"
],
[
"plot_3Dpose(pose1, 11, 280)\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4a101a4997aff87440f87ea4cc648ae09d2a6f78
| 6,104 |
ipynb
|
Jupyter Notebook
|
my_data_processing/bagofwords.ipynb
|
smurf-1119/mlapp-cml-
|
9e1ea11c7e9686e7cd295ea0e5aa047a0a8579bb
|
[
"MIT"
] | null | null | null |
my_data_processing/bagofwords.ipynb
|
smurf-1119/mlapp-cml-
|
9e1ea11c7e9686e7cd295ea0e5aa047a0a8579bb
|
[
"MIT"
] | null | null | null |
my_data_processing/bagofwords.ipynb
|
smurf-1119/mlapp-cml-
|
9e1ea11c7e9686e7cd295ea0e5aa047a0a8579bb
|
[
"MIT"
] | null | null | null | 27.495495 | 135 | 0.510976 |
[
[
[
"import pandas as pd\nimport re\nfrom nltk import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport numpy as np\nimport collections",
"_____no_output_____"
]
],
[
[
"**Load the data and split them**",
"_____no_output_____"
]
],
[
[
"oush_df = pd.read_csv('./reuters_10class.csv')\nprint(len(oush_df))\n# reuters_df = pd.read_csv('./reuters1000.csv')\n# reuters_df.to_csv('./reuters1000.csv',index=False)\n# print('Finish')\ntrain_size = int(0.7 * len(oush_df))\noush_Y_train = oush_df.iloc[:train_size,1:].values\noush_Y_test = oush_df.iloc[train_size:,1:].values\n# reuters_Y_train = reuters_df.iloc[:train_size,1:5].values\n# reuters_Y_test = reuters_df.iloc[train_size:,1:5].values\n# print(reuters_Y_train.shape)",
"16876\n"
],
[
"def cleanse(text):\n text = text.lower()\n filters = ['\"','#','$','%','&','\\(','\\)','\\*','\\+',',','-','\\.','/',':',';','<','=','>','\\?','@','\\[','\\\\','\\]'\n ,'^','_',' ','\\{','\\|','\\}','~','\\t','\\n','\\x97','\\x96']\n text = re.sub(\"<.*?>\",' ',text)\n text = re.sub(\"|\".join(filters),\" \",text)\n return text",
"_____no_output_____"
],
[
"def encoder(dataset,max_feature):\n content = dataset.iloc[:,0].values.tolist()\n freq=pd.Series(' '.join(content).split()).value_counts()\n total_size = len(content)\n new_contents = []\n document = []\n from nltk.stem import PorterStemmer\n st = PorterStemmer()\n for text in content:\n new_text = cleanse(text)\n split_content = word_tokenize(new_text)\n words = [word for word in split_content if word not in stopwords.words('english')]\n words = [WordNetLemmatizer().lemmatize(w) for w in words]\n # words = [st.stem(w) for w in words]\n words = [w for w in words if w not in freq[:10]]\n \n new_contents.append([(' '.join(words))])\n\n # counter = collections.Counter([tk for st in new_contents for tk in st[0].split()])\n # counter = list(dict(filter(lambda x: x[1] >= 1, counter.items())).keys())\n # contents = []\n\n # for text in new_contents:\n # words = [w for w in text[0].split() if w in counter]\n # if len(words):\n # contents.append([' '.join(words)])\n # new_contents = contents\n\n # features = len(counter)\n documents = [new_content[0] for new_content in new_contents]\n vectorize = CountVectorizer(max_features=400)\n vectorize.fit(documents)\n features = len(vectorize.vocabulary_)\n X_train = np.ones(shape=[train_size,features],dtype=np.int64)\n X_test = np.ones(shape=[len(new_contents)-train_size,features])\n for idx,new_content in enumerate(new_contents):\n word_vector = vectorize.transform(new_content).toarray()\n if idx >= train_size: \n X_test[idx-train_size] = word_vector\n else:\n X_train[idx] = word_vector\n return X_train,X_test\n\nmax_feature = 400\noush_X_train,oush_X_test = encoder(oush_df,max_feature)\nprint(oush_X_train.shape,oush_Y_test.shape)\n# reuters_X_train,reuters_X_test = encoder(reuters_df,max_feature)",
"(11813, 400) (5063, 9)\n"
],
[
"oush_X_test.shape",
"_____no_output_____"
],
[
"np.save('./traindataReuters_10class.npy',oush_X_train)\nnp.save('./trainlabelReuters_10class.npy',oush_Y_train) \nnp.save('./testdataReuters_10class.npy',oush_X_test)\nnp.save('./testlabelReuters_10class.npy',oush_Y_test)\nprint('Finish')",
"Finish\n"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a101c01af0d4ee6e119ca3aa4f03376f80c945e
| 8,860 |
ipynb
|
Jupyter Notebook
|
or_suite/envs/inventory_control_multiple_suppliers/inventory_control_readme.ipynb
|
JasmineSamadi/ORSuite
|
e2b2b0a5b497ea6566e794dcef1f176081fca4ce
|
[
"MIT"
] | 4 |
2021-12-01T10:56:17.000Z
|
2022-02-06T17:07:43.000Z
|
or_suite/envs/inventory_control_multiple_suppliers/inventory_control_readme.ipynb
|
JasmineSamadi/ORSuite
|
e2b2b0a5b497ea6566e794dcef1f176081fca4ce
|
[
"MIT"
] | 2 |
2021-08-11T13:25:01.000Z
|
2022-03-20T19:23:23.000Z
|
or_suite/envs/inventory_control_multiple_suppliers/inventory_control_readme.ipynb
|
JasmineSamadi/ORSuite
|
e2b2b0a5b497ea6566e794dcef1f176081fca4ce
|
[
"MIT"
] | 2 |
2021-07-27T02:39:37.000Z
|
2022-02-14T21:03:15.000Z
| 94.255319 | 1,413 | 0.732957 |
[
[
[
"# Inventory Control with Lead Times and Multiple Suppliers",
"_____no_output_____"
],
[
"## Description\n\nOne potential application of reinforcement learning involves ordering supplies with mutliple suppliers having various lead times and costs in order to meet a changing demand. Lead time in inventory management is the lapse in time between when an order is placed to replenish inventory and when the order is received. This affects the amount of stock a supplier needs to hold at any point in time. Moreover, due to having multiple suppliers, at every stage the supplier is faced with a decision on how much to order from each supplier, noting that more costly suppliers might have to be used to replenish the inventory from a shorter lead time.\n\nThe inventory control model addresses this by modeling an environment where there are multiplie suppliers with different costs and lead times. Orders must be placed with these suppliers to have an on-hand inventory to meet a changing demand. However, both having supplies on backorder and holding unused inventory have associated costs. The goal of the agent is to choose the amount to order from each supplier to maximize the revenue earned. \n\nAt each time step, an order is placed to each supplier. If previous orders have waited for the length of their supplier's lead time, then these orders will become part of the on-hand inventory. The demand is then randomly chosen from a user-selected distribution and is subtracted from the on-hand inventory. If the on-hand inventory would become less than zero, than items are considered to be on backorder which decreases the reward. The demand is subtracted from the on-hand inventory to calculate on-hand inventory for the start of the next time step. A remaining inventory (a positive nonzero number) at the end of this calculation negatively influences the reward proportional to the holding costs. There are two ways that the inventory can be setup for the environment. The first allows negative inventory to be accumulated. In this case the on-hand inventory is offset by adding the value of the maximum inventory. This is done so that the observation space can be properly represented using AI Gym. This allows for backorder costs to be calculated if the inventory were to go become negative. The second way does not allow for inventory to become negative. Backorders are still calculated and they still negatively influence reward, but the inventory is reset to 0 for the next timestep after the reward calculation. The inventory is not offset by any number in this version of the environment. \n\n## Model Assumptions\n* Backorders are not retroactively fulfilled. If a high demand would cause inventory to become negative, this unfulfilled demand is not met later when there may be some inventory being held at the end of a timestep.",
"_____no_output_____"
],
[
"## Environment\n### Dynamics\n#### State Space\nThe state space is $S = [0,\\text{Max-Order}]^{L_1} \\times [0,\\text{Max-Order}]^{L_2} \\times ... \\times [0,\\text{Max-Order}]^{L_N} \\times I$ where $N$ is the number of suppliers and $[0,\\text{Max-Order}]^{L_i}$ represents a list of integers between zero and the max order amount, maxorder (specified in the configuration), with the length of the lead time of supplier $i$. This represents how many timesteps back each order is from being added to the inventory. $I$ represents the current on-hand inventory. To represent a timestep, an order will be moved up an index in the array unless it is added to the inventory, in which case it is removed from the array. Each supplier has their own set of indices in the array that represent its lead times. Each index in the list (except for $ I $) has a maximum value of the max_order parameter. \n\nIf negative inventory is allowed, the last index, the on-hand inventory, is offset by adding the maximum inventory value to it. It is in the range $[0, 2 * maxinventory]$ This is done so that a negative value of the on-hand inventory can be temporarily kept to use in reward calculations for backorders and so that the observation space can be represented properly. Before this value is used in any calculations, the value of the max inventory is subtracted so that the true value of the inventory is used. Otherwise if negative inventory is not allowed, the on-hand inventory must be in the range of $[0,maxinventory]$ and directly corresponds to the current inventory. \n\n#### Action Space\nThe action space is $A = [0,\\text{Max-Order}]^N$ where N is the number of suppliers. This represents the amount to order from each supplier for the current timestep. The order amount cannot be greater than the max_order paramter (set in the initialization of the environment).\n\n#### Reward\nThe reward is $R = - (Order + holdcost \\times max(0,I) + backordercost \\times max(0, -I))$ where $Order = \\sum_{i = 1}^{N} c_i \\times a_i$ and represents the sum of the amount most recently ordered from each supplier, $a_i$, multiplied by the appropriate ordering cost, $c_i$. $holdcost$ represents the holding cost for excess inventory, and $backordercost$ represents the backorder cost for when the inventory would become negative.\n\n#### Transitions\nAt each timestep, orders are placed into each supplier for a certain amount of resources. These orders are processed and will add to the on-hand inventory once the lead time for the appropriate supplier has passed. The time that has passed for each order is trakced using the state at each timestep. If any lead times have passed, the ordered amount is added to the on-hand inventory. Then, the randomly chosen demand is subtracted from the on-hand inventory. If the demand is higher than the current inventory, then the inventory does become negative for the next state. The reward is then calculated proportional to the revenue earned from meeting the demand, but is inversely proportional to the amount that is backordered (the difference between the inventory and demand). If the demand is lower than the current inventory, the inventory remains positive for the next state. The reward is still proportional to the revenue earned from meeting the demand, but is inversely proportional to the amount of inventory left over multiplied by the holding costs. \n\n#### Configuration Paramters\n* lead_times: array of ints representing the lead times of each supplier\n* demand_dist: The random number sampled from the given distribution to be used to calculate the demand\n* supplier_costs: array of ints representing the costs of each supplier\n* hold_cost: The int holding cost.\n* backorder_cost: The backorder holding cost.\n* max_inventory: The maximum value (int) that can be held in inventory\n* max_order: The maximum value (int) that can be ordered from each supplier\n* epLen: The int number of time steps to run the experiment for.\n* starting_state: An int list containing enough indices for the sum of all the lead times, plus an additional index for the initial on-hand inventory.\n* neg_inventory: A bool that says whether the on-hand inventory can be negative or not.\n",
"_____no_output_____"
],
[
"## Heuristic Agents\n\n### Random Agent\nThis agent randomly samples from the action space. For this environment, the amount ordered from each supplier is an integer from $[0, maxorder]$.\n### Base Surge Agent (TBS)\nThe base surge agent has 2 parameters, $r$ and $S$. Each action is expressed as $[r,[orderamount]]$. $r$ is a vector of the order amounts for all suppliers except the one with the greatest lead time. $S$ represents the \"order up to amount\". orderamount is calculated by calculating $S - I$ where $I$ is the current on-hand inventory. This value is then made 0 if it is negative or is reduced to the $maxorder$ if it is greater. This order amount is used for the supplier with the greatest lead time.",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a101d2285b97309e2142e2156d55d89014c226f
| 15,059 |
ipynb
|
Jupyter Notebook
|
Basic Datatypes & Objects.ipynb
|
sumathi-kanthakumar/PythonITMFCS
|
720874976907b79a688377d794c0f14dcf77979c
|
[
"MIT"
] | null | null | null |
Basic Datatypes & Objects.ipynb
|
sumathi-kanthakumar/PythonITMFCS
|
720874976907b79a688377d794c0f14dcf77979c
|
[
"MIT"
] | null | null | null |
Basic Datatypes & Objects.ipynb
|
sumathi-kanthakumar/PythonITMFCS
|
720874976907b79a688377d794c0f14dcf77979c
|
[
"MIT"
] | null | null | null | 16.713651 | 382 | 0.438343 |
[
[
[
"# Numbers\n",
"_____no_output_____"
],
[
"# Title",
"_____no_output_____"
],
[
"Basic Arthemetic",
"_____no_output_____"
]
],
[
[
"a = 10",
"_____no_output_____"
],
[
"b = 10.3",
"_____no_output_____"
],
[
"type(a)",
"_____no_output_____"
],
[
"type(b)",
"_____no_output_____"
],
[
"a+b",
"_____no_output_____"
],
[
"a-b",
"_____no_output_____"
],
[
"a-b",
"_____no_output_____"
],
[
"a*b",
"_____no_output_____"
],
[
"a/b",
"_____no_output_____"
],
[
"a%b",
"_____no_output_____"
],
[
"a=12",
"_____no_output_____"
],
[
"b=10",
"_____no_output_____"
],
[
"a//b",
"_____no_output_____"
],
[
"a%b",
"_____no_output_____"
],
[
"a**2",
"_____no_output_____"
],
[
"a.s=10 # . is not allowed",
"_____no_output_____"
],
[
"\"Hello\"",
"_____no_output_____"
],
[
"'Hello'",
"_____no_output_____"
],
[
"print ('Hello World')",
"Hello World\n"
],
[
"a=\"Kumaran\"",
"_____no_output_____"
],
[
"a[3]",
"_____no_output_____"
],
[
"a[1:]",
"_____no_output_____"
],
[
"a[2:4]",
"_____no_output_____"
],
[
"a[:5]",
"_____no_output_____"
],
[
"a[:-1]",
"_____no_output_____"
],
[
"a[-2:4]",
"_____no_output_____"
],
[
"a[:]",
"_____no_output_____"
],
[
"a[::-1]",
"_____no_output_____"
],
[
"a[::2]",
"_____no_output_____"
],
[
"a[:-3:-1]",
"_____no_output_____"
],
[
"a.capitalize()",
"_____no_output_____"
],
[
"a.upper()",
"_____no_output_____"
],
[
"a.count",
"_____no_output_____"
],
[
"a.count('K')",
"_____no_output_____"
],
[
"a.istitle()",
"_____no_output_____"
],
[
"a.islower()",
"_____no_output_____"
],
[
"a.split('r')",
"_____no_output_____"
],
[
"a.encode()",
"_____no_output_____"
],
[
"a.swapcase('r','a')",
"_____no_output_____"
],
[
"a.swapcase()",
"_____no_output_____"
],
[
"a.join(\" System\")",
"_____no_output_____"
],
[
"a.join('a')",
"_____no_output_____"
],
[
"help(a)",
"No Python documentation found for 'Kumaran'.\nUse help() to get the interactive help utility.\nUse help(str) for help on the str class.\n\n"
],
[
"a = 10",
"_____no_output_____"
],
[
"'insert: {}'.format('abc')",
"_____no_output_____"
],
[
"'a:{}'.format('a')",
"_____no_output_____"
],
[
"a=10",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1022932dc52e78b57c5063ed8a3f354dfa235a
| 6,143 |
ipynb
|
Jupyter Notebook
|
01-sentiment_analysis/05-LSTM.ipynb
|
lleejong/nlp-tensorflow
|
dac8a46426ac01cf65922b2aa01b384f71ce1f6e
|
[
"MIT"
] | 1 |
2018-07-10T01:51:54.000Z
|
2018-07-10T01:51:54.000Z
|
01-sentiment_analysis/05-LSTM.ipynb
|
lleejong/nlp-tensorflow
|
dac8a46426ac01cf65922b2aa01b384f71ce1f6e
|
[
"MIT"
] | null | null | null |
01-sentiment_analysis/05-LSTM.ipynb
|
lleejong/nlp-tensorflow
|
dac8a46426ac01cf65922b2aa01b384f71ce1f6e
|
[
"MIT"
] | null | null | null | 28.976415 | 129 | 0.542243 |
[
[
[
"import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom data_process import build_vocab, batch_iter, sentence_to_index\nfrom models import LSTM, biLSTM, deepBiLSTM",
"_____no_output_____"
],
[
"train = pd.read_csv('./data/train-5T.txt', delimiter='\\t')\ntest = pd.read_csv('./data/test-1T.txt', delimiter='\\t')",
"_____no_output_____"
],
[
"X_train = train.document\nY_train = train.label\nX_test = test.document\nY_test = test.label",
"_____no_output_____"
],
[
"max_vocab = 50000\nvocab, _, vocab_size = build_vocab(X_train, max_vocab)",
"_____no_output_____"
]
],
[
[
"# Sentiment Analysis with LSTM",
"_____no_output_____"
]
],
[
[
"batches = batch_iter(list(zip(X_train, Y_train)), batch_size=64, num_epochs=15)",
"_____no_output_____"
],
[
"config = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\ntf.reset_default_graph()\nsess = tf.Session(config=config)\nmodel = LSTM(sess=sess, vocab_size=vocab_size, lr=1e-2)\ntrain_acc = []\navgLoss = []\nx_test = sentence_to_index(X_test, vocab)\n\nfor step, batch in enumerate(batches):\n x_train, y_train = zip(*batch)\n x_train = sentence_to_index(x_train, vocab)\n acc = model.get_accuracy(x_train, y_train)\n l, _ = model.train(x_train, y_train)\n train_acc.append(acc)\n avgLoss.append(l)\n if step % 100 == 0:\n test_loss = model.get_loss(x_test, Y_test)\n print('batch:', '%04d' % step, '\\ntrain loss:', '%.5f' % np.mean(avgLoss), '\\ttest loss:', '%.5f' % test_loss)\n test_acc = model.get_accuracy(x_test, Y_test)\n print('train accuracy:', '%.3f' % np.mean(train_acc), '\\ttest accuracy:', '%.3f' % test_acc, '\\n')\n avgLoss = []\n train_acc = []",
"_____no_output_____"
]
],
[
[
"# Sentiment Analysis with biLSTM",
"_____no_output_____"
]
],
[
[
"batches = batch_iter(list(zip(X_train, Y_train)), batch_size=64, num_epochs=15)",
"_____no_output_____"
],
[
"config = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\ntf.reset_default_graph()\nsess = tf.Session(config=config)\nmodel = biLSTM(sess=sess, vocab_size=vocab_size, lr=1e-2)\ntrain_acc = []\navgLoss = []\nx_test = sentence_to_index(X_test, vocab)\n\nfor step, batch in enumerate(batches):\n x_train, y_train = zip(*batch)\n x_train = sentence_to_index(x_train, vocab)\n acc = model.get_accuracy(x_train, y_train)\n l, _ = model.train(x_train, y_train)\n train_acc.append(acc)\n avgLoss.append(l)\n if step % 100 == 0:\n test_loss = model.get_loss(x_test, Y_test)\n print('batch:', '%04d' % step, '\\ntrain loss:', '%.5f' % np.mean(avgLoss), '\\ttest loss:', '%.5f' % test_loss)\n test_acc = model.get_accuracy(x_test, Y_test)\n print('train accuracy:', '%.3f' % np.mean(train_acc), '\\ttest accuracy:', '%.3f' % test_acc, '\\n')\n avgLoss = []\n train_acc = []",
"_____no_output_____"
]
],
[
[
"# Sentiment Analysis with deepBiLSTM",
"_____no_output_____"
]
],
[
[
"batches = batch_iter(list(zip(X_train, Y_train)), batch_size=64, num_epochs=15)",
"_____no_output_____"
],
[
"config = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\ntf.reset_default_graph()\nsess = tf.Session(config=config)\nmodel = deepBiLSTM(sess=sess, vocab_size=vocab_size, lr=1e-2)\ntrain_acc = []\navgLoss = []\nx_test = sentence_to_index(X_test, vocab)\n\nfor step, batch in enumerate(batches):\n x_train, y_train = zip(*batch)\n x_train = sentence_to_index(x_train, vocab)\n acc = model.get_accuracy(x_train, y_train)\n l, _ = model.train(x_train, y_train)\n train_acc.append(acc)\n avgLoss.append(l)\n if step % 100 == 0:\n test_loss = model.get_loss(x_test, Y_test)\n print('batch:', '%04d' % step, '\\ntrain loss:', '%.5f' % np.mean(avgLoss), '\\ttest loss:', '%.5f' % test_loss)\n test_acc = model.get_accuracy(x_test, Y_test)\n print('train accuracy:', '%.3f' % np.mean(train_acc), '\\ttest accuracy:', '%.3f' % test_acc, '\\n')\n avgLoss = []\n train_acc = []",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a1022b64a2383d28833242a6106c938cc55c8cf
| 226,855 |
ipynb
|
Jupyter Notebook
|
code_04_XX_Iris_Deep_Learning_Classification_with_penguin_Example.ipynb
|
aksha1234/Deep-learning-tutorials
|
269d5e5ac3663f2fb8fd6ec1c0a35b5eb67e401a
|
[
"MIT"
] | null | null | null |
code_04_XX_Iris_Deep_Learning_Classification_with_penguin_Example.ipynb
|
aksha1234/Deep-learning-tutorials
|
269d5e5ac3663f2fb8fd6ec1c0a35b5eb67e401a
|
[
"MIT"
] | null | null | null |
code_04_XX_Iris_Deep_Learning_Classification_with_penguin_Example.ipynb
|
aksha1234/Deep-learning-tutorials
|
269d5e5ac3663f2fb8fd6ec1c0a35b5eb67e401a
|
[
"MIT"
] | null | null | null | 48.754567 | 18,570 | 0.512512 |
[
[
[
"<a href=\"https://colab.research.google.com/github/aksha1234/Deep-learning-tutorials/blob/main/code_04_XX_Iris_Deep_Learning_Classification_with_penguin_Example.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Deep Learning Example - Iris \n\nThis examples demonstrates the core deep learning model building concepts using the Keras library. The Iris flower dataset is used to build the model and perform classification tasks",
"_____no_output_____"
],
[
"### 5.1 Setup",
"_____no_output_____"
]
],
[
[
"#Install related libraries for the course. \n#This is a common requirement for all other exampels too\n\n!pip install pandas\n!pip install tensorflow\n!pip install sklearn\n!pip install matplotlib",
"Requirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (1.3.5)\nRequirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/dist-packages (from pandas) (2018.9)\nRequirement already satisfied: numpy>=1.17.3 in /usr/local/lib/python3.7/dist-packages (from pandas) (1.19.5)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas) (2.8.2)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas) (1.15.0)\nRequirement already satisfied: tensorflow in /usr/local/lib/python3.7/dist-packages (2.7.0)\nRequirement already satisfied: wheel<1.0,>=0.32.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (0.37.1)\nRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (1.1.0)\nRequirement already satisfied: protobuf>=3.9.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (3.17.3)\nRequirement already satisfied: tensorflow-estimator<2.8,~=2.7.0rc0 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (2.7.0)\nRequirement already satisfied: absl-py>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (1.0.0)\nRequirement already satisfied: keras-preprocessing>=1.1.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (1.1.2)\nRequirement already satisfied: typing-extensions>=3.6.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (3.10.0.2)\nRequirement already satisfied: wrapt>=1.11.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (1.13.3)\nRequirement already satisfied: six>=1.12.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (1.15.0)\nRequirement already satisfied: gast<0.5.0,>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (0.4.0)\nRequirement already satisfied: grpcio<2.0,>=1.24.3 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (1.43.0)\nRequirement already satisfied: keras<2.8,>=2.7.0rc0 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (2.7.0)\nRequirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (3.3.0)\nRequirement already satisfied: numpy>=1.14.5 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (1.19.5)\nRequirement already satisfied: libclang>=9.0.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (13.0.0)\nRequirement already satisfied: tensorboard~=2.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (2.7.0)\nRequirement already satisfied: h5py>=2.9.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (3.1.0)\nRequirement already satisfied: tensorflow-io-gcs-filesystem>=0.21.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (0.24.0)\nRequirement already satisfied: flatbuffers<3.0,>=1.12 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (2.0)\nRequirement already satisfied: astunparse>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (1.6.3)\nRequirement already satisfied: google-pasta>=0.1.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow) (0.2.0)\nRequirement already satisfied: cached-property in /usr/local/lib/python3.7/dist-packages (from h5py>=2.9.0->tensorflow) (1.5.2)\nRequirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow) (57.4.0)\nRequirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow) (2.23.0)\nRequirement already satisfied: google-auth<3,>=1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow) (1.35.0)\nRequirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow) (1.8.1)\nRequirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow) (1.0.1)\nRequirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow) (0.6.1)\nRequirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow) (0.4.6)\nRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard~=2.6->tensorflow) (3.3.6)\nRequirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow) (0.2.8)\nRequirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow) (4.2.4)\nRequirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow) (4.8)\nRequirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.6->tensorflow) (1.3.1)\nRequirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard~=2.6->tensorflow) (4.10.1)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard~=2.6->tensorflow) (3.7.0)\nRequirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard~=2.6->tensorflow) (0.4.8)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow) (1.24.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow) (2021.10.8)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow) (3.0.4)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard~=2.6->tensorflow) (2.10)\nRequirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.6->tensorflow) (3.2.0)\nRequirement already satisfied: sklearn in /usr/local/lib/python3.7/dist-packages (0.0)\nRequirement already satisfied: scikit-learn in /usr/local/lib/python3.7/dist-packages (from sklearn) (1.0.2)\nRequirement already satisfied: numpy>=1.14.6 in /usr/local/lib/python3.7/dist-packages (from scikit-learn->sklearn) (1.19.5)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn->sklearn) (3.1.0)\nRequirement already satisfied: scipy>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn->sklearn) (1.4.1)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn->sklearn) (1.1.0)\nRequirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (3.2.2)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib) (0.11.0)\nRequirement already satisfied: numpy>=1.11 in /usr/local/lib/python3.7/dist-packages (from matplotlib) (1.19.5)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib) (3.0.7)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib) (2.8.2)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib) (1.3.2)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.1->matplotlib) (1.15.0)\n"
]
],
[
[
"### 4.2. Prepare Input Data for Deep Learning\n\nPerform the following steps for preparing data\n\n1. Load data into a pandas dataframe\n2. Convert the dataframe to a numpy array\n3. Scale the feature dataset\n4. Use one-hot-encoding for the target variable\n5. Split into training and test datasets\n",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport os\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nimport seaborn as sns\nimport keras\n",
"_____no_output_____"
],
[
"df=sns.load_dataset('iris')",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"from sklearn.preprocessing import LabelEncoder\n",
"_____no_output_____"
],
[
"encoder=LabelEncoder().fit(df['species'])",
"_____no_output_____"
],
[
"encoder.classes_",
"_____no_output_____"
],
[
"encoder.transform(df.species)",
"_____no_output_____"
],
[
"df['species']=encoder.transform(df.species)",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"## To convert the dataframe into the arrays\n## convert into the five array of size by 5\ndf_numpy=df.to_numpy() ",
"_____no_output_____"
],
[
"train_inputs=df_numpy[:,:4]",
"_____no_output_____"
],
[
"train_targets=df_numpy[:,4]",
"_____no_output_____"
]
],
[
[
"> Standardised teh data with the normal distribution having the 0 as means and 1 as variance ~N(0,1)",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import StandardScaler",
"_____no_output_____"
],
[
"scaler=StandardScaler().fit(train_inputs)",
"_____no_output_____"
],
[
"train_inputs=scaler.transform(train_inputs,3)",
"_____no_output_____"
],
[
"train_inputs",
"_____no_output_____"
],
[
"train_targets=tf.keras.utils.to_categorical(train_targets,3)",
"_____no_output_____"
],
[
"print(\"\\nFeatures after scaling :\\n------------------------------------\")\nprint(train_inputs[:5,:])\nprint(\"\\nTarget after one-hot-encoding :\\n------------------------------------\")\nprint(train_targets[:5,:])",
"\nFeatures after scaling :\n------------------------------------\n[[-0.90068117 1.01900435 -1.34022653 -1.3154443 ]\n [-1.14301691 -0.13197948 -1.34022653 -1.3154443 ]\n [-1.38535265 0.32841405 -1.39706395 -1.3154443 ]\n [-1.50652052 0.09821729 -1.2833891 -1.3154443 ]\n [-1.02184904 1.24920112 -1.34022653 -1.3154443 ]]\n\nTarget after one-hot-encoding :\n------------------------------------\n[[1. 0. 0.]\n [1. 0. 0.]\n [1. 0. 0.]\n [1. 0. 0.]\n [1. 0. 0.]]\n"
],
[
"#Split training and test data\nfrom sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"train_inputs,test_inputs,train_targets,test_targets=train_test_split(train_inputs,train_targets,test_size=0.1,random_state=12)",
"_____no_output_____"
]
],
[
[
"### 4.3. Creating a Model\n\nCreating a model in Keras requires defining the following\n\n1. Number of hidden layers\n2. Number of nodes in each layer\n3. Activation functions\n4. Loss Function & Accuracy measurements",
"_____no_output_____"
]
],
[
[
"from tensorflow import keras",
"_____no_output_____"
],
[
"#Number of classes in the target variable\nNB_CLASSES=3",
"_____no_output_____"
],
[
"## Create a sequential model in Keras\nmodel=tf.keras.models.Sequential()",
"_____no_output_____"
],
[
"## Add the first hidden layer\nmodel.add(keras.layers.Dense(128, ## Number of nodes\n input_shape=(4,), ### Number of input variables\n name='Hiddden_layer-1', ## Logical NAme\n activation='relu')) ## Activation function",
"_____no_output_____"
]
],
[
[
"> Adding the second hidden layer with 128 nodes aand no need to **make input size one more time.**",
"_____no_output_____"
]
],
[
[
"model.add(keras.layers.Dense(128,name='Hidden-Layer-2',activation='relu'))",
"_____no_output_____"
]
],
[
[
"> Creating the output layers",
"_____no_output_____"
]
],
[
[
"model.add(keras.layers.Dense(NB_CLASSES,\n name='Output-Layer',\n activation='softmax')) ## As we have multiclass variables",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"## compile the model with loss and metrics\nmodel.compile(loss='categorical_crossentropy',metrics=['accuracy','MSE'])",
"_____no_output_____"
],
[
"model.summary()",
"Model: \"sequential\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n Hiddden_layer-1 (Dense) (None, 128) 640 \n \n Hidden-Layer-2 (Dense) (None, 128) 16512 \n \n Output-Layer (Dense) (None, 3) 387 \n \n=================================================================\nTotal params: 17,539\nTrainable params: 17,539\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"### 4.4. Training and evaluating the Model\n\nTraining the model involves defining various training models and then perform \nforward and back propagation.",
"_____no_output_____"
]
],
[
[
"#Make it verbose so we can see the progress\nVERBOSE=1\n\n#Setup Hyper Parameters for training\n\n#Set Batch size\nBATCH_SIZE=16 ## 2^n only multiples\n#Set number of epochs\nEPOCHS=10\n#Set validation split. 20% of the training data will be used for validation\n#after each epoch\nVALIDATION_SPLIT=0.2",
"_____no_output_____"
],
[
"\nprint(\"\\nTraining Progress:\\n------------------------------------\")\n\n#Fit the model. This will perform the entire training cycle, including\n#forward propagation, loss computation, backward propagation and gradient descent.\n#Execute for the specified batch sizes and epoch\n#Perform validation after each epoch ",
"\nTraining Progress:\n------------------------------------\n"
],
[
"history=model.fit(train_inputs,train_targets,batch_size=BATCH_SIZE,epochs=EPOCHS,verbose=VERBOSE,validation_split=VALIDATION_SPLIT,workers=-1)",
"Epoch 1/10\n7/7 [==============================] - 1s 37ms/step - loss: 0.9214 - accuracy: 0.6296 - MSE: 0.1802 - val_loss: 0.6425 - val_accuracy: 0.8148 - val_MSE: 0.1168\nEpoch 2/10\n7/7 [==============================] - 0s 6ms/step - loss: 0.5911 - accuracy: 0.8333 - MSE: 0.1063 - val_loss: 0.4602 - val_accuracy: 0.8519 - val_MSE: 0.0811\nEpoch 3/10\n7/7 [==============================] - 0s 5ms/step - loss: 0.4602 - accuracy: 0.8426 - MSE: 0.0821 - val_loss: 0.3968 - val_accuracy: 0.8519 - val_MSE: 0.0723\nEpoch 4/10\n7/7 [==============================] - 0s 6ms/step - loss: 0.3871 - accuracy: 0.8704 - MSE: 0.0698 - val_loss: 0.3480 - val_accuracy: 0.8889 - val_MSE: 0.0653\nEpoch 5/10\n7/7 [==============================] - 0s 5ms/step - loss: 0.3375 - accuracy: 0.8704 - MSE: 0.0619 - val_loss: 0.3122 - val_accuracy: 0.8889 - val_MSE: 0.0593\nEpoch 6/10\n7/7 [==============================] - 0s 6ms/step - loss: 0.3035 - accuracy: 0.8796 - MSE: 0.0566 - val_loss: 0.2871 - val_accuracy: 0.9259 - val_MSE: 0.0550\nEpoch 7/10\n7/7 [==============================] - 0s 5ms/step - loss: 0.2700 - accuracy: 0.8981 - MSE: 0.0497 - val_loss: 0.2614 - val_accuracy: 0.9259 - val_MSE: 0.0509\nEpoch 8/10\n7/7 [==============================] - 0s 6ms/step - loss: 0.2499 - accuracy: 0.9074 - MSE: 0.0469 - val_loss: 0.2555 - val_accuracy: 0.9259 - val_MSE: 0.0493\nEpoch 9/10\n7/7 [==============================] - 0s 6ms/step - loss: 0.2230 - accuracy: 0.9074 - MSE: 0.0416 - val_loss: 0.2760 - val_accuracy: 0.8889 - val_MSE: 0.0527\nEpoch 10/10\n7/7 [==============================] - 0s 7ms/step - loss: 0.2067 - accuracy: 0.9167 - MSE: 0.0382 - val_loss: 0.2446 - val_accuracy: 0.9259 - val_MSE: 0.0472\n"
],
[
"## TO covert this data into dataframe we need history.hsitory\npd.DataFrame(history.history)",
"_____no_output_____"
],
[
"pd.DataFrame(history.history)['accuracy'].plot(figsize=(8,5))",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"#Plot accuracy of the model after each epoch.\npd.DataFrame(history.history)[\"accuracy\"].plot(figsize=(8, 5))\nplt.title(\"Accuracy improvements with Epoch\")\nplt.show()",
"_____no_output_____"
],
[
"#Evaluate the model against the test dataset and print results\nprint(\"\\nEvaluation against Test Dataset :\\n------------------------------------\")\nmodel.evaluate(test_inputs,test_targets)",
"\nEvaluation against Test Dataset :\n------------------------------------\n1/1 [==============================] - 0s 18ms/step - loss: 0.1317 - accuracy: 1.0000 - MSE: 0.0219\n"
]
],
[
[
"### 4.5. Saving and Loading Models\n\nThe training and inference environments are usually separate. Models need to be saved after they are validated. They are then loaded into the inference environments for actual prediction",
"_____no_output_____"
]
],
[
[
"#Saving a model\n \nmodel.save(\"iris_save\")\n \n#Loading a Model \nloaded_model = keras.models.load_model(\"iris_save\")",
"WARNING:absl:Function `_wrapped_model` contains input name(s) Hiddden_layer-1_input with unsupported characters which will be renamed to hiddden_layer_1_input in the SavedModel.\n"
],
[
"\n#Print Model Summary\nloaded_model.summary()",
"Model: \"sequential\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n Hiddden_layer-1 (Dense) (None, 128) 640 \n \n Hidden-Layer-2 (Dense) (None, 128) 16512 \n \n Output-Layer (Dense) (None, 3) 387 \n \n=================================================================\nTotal params: 17,539\nTrainable params: 17,539\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"### 4.6. Predictions with Deep Learning Models",
"_____no_output_____"
]
],
[
[
"#Raw prediction data\nprediction_input = [[6.6, 3. , 4.4, 1.4]]",
"_____no_output_____"
],
[
"#Scale prediction data with the same scaling model\nscaled_input = scaler.transform(prediction_input)",
"_____no_output_____"
],
[
"#Get raw prediction probabilities\nraw_prediction = model.predict(scaled_input)\nprint(\"Raw Prediction Output (Probabilities) :\" , raw_prediction)\n",
"Raw Prediction Output (Probabilities) : [[0.02751675 0.6873533 0.28513 ]]\n"
],
[
"#Find prediction\nprediction = np.argmax(raw_prediction)## numpy is having teh argument maximum\nprint(\"Prediction is \", encoder.inverse_transform([prediction]))",
"Prediction is ['versicolor']\n"
]
],
[
[
"## Modelling new dataset",
"_____no_output_____"
]
],
[
[
"df=sns.load_dataset('penguins')",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"df.dropna(inplace=True)",
"_____no_output_____"
],
[
"## Lest see how juch data is present or not\ndf.isnull().sum()",
"_____no_output_____"
],
[
"input_cols=df.columns.tolist()[1:]",
"_____no_output_____"
],
[
"output_cols=df.columns.tolist()[0]",
"_____no_output_____"
],
[
"input_cols,output_cols",
"_____no_output_____"
],
[
"## We have to convert our input framem in to the numerical calus so for this we be making using one hot encoding\n",
"_____no_output_____"
],
[
"categorical_cols=df[input_cols].select_dtypes(include='object').columns.tolist()\nnumerical_cols=df[input_cols].select_dtypes(include=np.number).columns.tolist()",
"_____no_output_____"
],
[
"## First let us ocnvert it into the Onehot Encoding \nfrom sklearn.preprocessing import OneHotEncoder\n",
"_____no_output_____"
],
[
"encoder=OneHotEncoder(sparse=False,handle_unknown='ignore').fit(df[categorical_cols])",
"_____no_output_____"
],
[
"encoded_cols=encoder.get_feature_names(categorical_cols).tolist()",
"/usr/local/lib/python3.7/dist-packages/sklearn/utils/deprecation.py:87: FutureWarning: Function get_feature_names is deprecated; get_feature_names is deprecated in 1.0 and will be removed in 1.2. Please use get_feature_names_out instead.\n warnings.warn(msg, category=FutureWarning)\n"
],
[
"df[encoded_cols]=encoder.transform(df[categorical_cols])",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"from sklearn.preprocessing import StandardScaler",
"_____no_output_____"
],
[
"scaler=StandardScaler().fit(df[numerical_cols+encoded_cols])",
"_____no_output_____"
],
[
"df[numerical_cols+encoded_cols]=scaler.transform(df[numerical_cols+encoded_cols])",
"_____no_output_____"
],
[
"inputs_df=df[numerical_cols+encoded_cols].copy().to_numpy() ## converting the dataframe into the array as required for deep learning \ntargets_df=df[output_cols].copy()",
"_____no_output_____"
],
[
"## COnverting teh target column in to label encoder\nencoder2=LabelEncoder().fit(targets_df)",
"_____no_output_____"
],
[
"targets_df=encoder2.transform(targets_df)",
"_____no_output_____"
],
[
"encoder2.classes_",
"_____no_output_____"
],
[
"## With the help of keras we will convert it into catgories into matrces of 3 columns as 3 classes avaliable\ntargets_df=tf.keras.utils.to_categorical(targets_df)",
"_____no_output_____"
],
[
"train_inputs,test_inputs,train_targets,test_targets=train_test_split(inputs_df,targets_df,test_size=0.2,random_state=0)",
"_____no_output_____"
],
[
"inputs_df.shape",
"_____no_output_____"
],
[
"## Designing the models\nmodel=tf.keras.models.Sequential()",
"_____no_output_____"
],
[
"model.add(keras.layers.Dense(32,activation='relu',input_shape=(9,),name='Hidden_Layer1'))",
"_____no_output_____"
],
[
"model.add(keras.layers.Dense(32,activation='relu',name='Hidden_Layer2'))",
"_____no_output_____"
],
[
"model.add(keras.layers.Dense(3,activation='softmax',name='Output_layer'))",
"_____no_output_____"
],
[
"## compile the model with loss and metrics\nmodel.compile(loss='categorical_crossentropy',## Because model is multi classs\n metrics=['accuracy','mse'])",
"_____no_output_____"
],
[
"model.summary()",
"Model: \"sequential_1\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n Hidden_Layer1 (Dense) (None, 32) 320 \n \n Hidden_Layer2 (Dense) (None, 32) 1056 \n \n Output_layer (Dense) (None, 3) 99 \n \n=================================================================\nTotal params: 1,475\nTrainable params: 1,475\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"## Training and evaluating the model",
"_____no_output_____"
]
],
[
[
"history=model.fit(train_inputs,train_targets,verbose=1,batch_size=16,epochs=16,validation_split=0.2,workers=-1)",
"Epoch 1/16\n14/14 [==============================] - 1s 16ms/step - loss: 0.7852 - accuracy: 0.7170 - mse: 0.1550 - val_loss: 0.6229 - val_accuracy: 0.9074 - val_mse: 0.1158\nEpoch 2/16\n14/14 [==============================] - 0s 3ms/step - loss: 0.5168 - accuracy: 0.9057 - mse: 0.0913 - val_loss: 0.4578 - val_accuracy: 0.9444 - val_mse: 0.0784\nEpoch 3/16\n14/14 [==============================] - 0s 3ms/step - loss: 0.3740 - accuracy: 0.9387 - mse: 0.0616 - val_loss: 0.3448 - val_accuracy: 0.9630 - val_mse: 0.0552\nEpoch 4/16\n14/14 [==============================] - 0s 4ms/step - loss: 0.2764 - accuracy: 0.9670 - mse: 0.0431 - val_loss: 0.2531 - val_accuracy: 0.9815 - val_mse: 0.0366\nEpoch 5/16\n14/14 [==============================] - 0s 5ms/step - loss: 0.2033 - accuracy: 0.9764 - mse: 0.0292 - val_loss: 0.1894 - val_accuracy: 0.9815 - val_mse: 0.0251\nEpoch 6/16\n14/14 [==============================] - 0s 4ms/step - loss: 0.1475 - accuracy: 0.9906 - mse: 0.0191 - val_loss: 0.1398 - val_accuracy: 0.9815 - val_mse: 0.0170\nEpoch 7/16\n14/14 [==============================] - 0s 5ms/step - loss: 0.1071 - accuracy: 1.0000 - mse: 0.0124 - val_loss: 0.1046 - val_accuracy: 0.9815 - val_mse: 0.0120\nEpoch 8/16\n14/14 [==============================] - 0s 4ms/step - loss: 0.0782 - accuracy: 0.9953 - mse: 0.0083 - val_loss: 0.0854 - val_accuracy: 0.9630 - val_mse: 0.0107\nEpoch 9/16\n14/14 [==============================] - 0s 3ms/step - loss: 0.0557 - accuracy: 1.0000 - mse: 0.0052 - val_loss: 0.0668 - val_accuracy: 0.9815 - val_mse: 0.0088\nEpoch 10/16\n14/14 [==============================] - 0s 3ms/step - loss: 0.0401 - accuracy: 1.0000 - mse: 0.0036 - val_loss: 0.0580 - val_accuracy: 0.9815 - val_mse: 0.0088\nEpoch 11/16\n14/14 [==============================] - 0s 3ms/step - loss: 0.0286 - accuracy: 1.0000 - mse: 0.0022 - val_loss: 0.0437 - val_accuracy: 0.9815 - val_mse: 0.0067\nEpoch 12/16\n14/14 [==============================] - 0s 3ms/step - loss: 0.0226 - accuracy: 1.0000 - mse: 0.0021 - val_loss: 0.0472 - val_accuracy: 0.9815 - val_mse: 0.0089\nEpoch 13/16\n14/14 [==============================] - 0s 3ms/step - loss: 0.0165 - accuracy: 1.0000 - mse: 0.0013 - val_loss: 0.0331 - val_accuracy: 0.9815 - val_mse: 0.0054\nEpoch 14/16\n14/14 [==============================] - 0s 5ms/step - loss: 0.0135 - accuracy: 1.0000 - mse: 0.0011 - val_loss: 0.0403 - val_accuracy: 0.9815 - val_mse: 0.0081\nEpoch 15/16\n14/14 [==============================] - 0s 5ms/step - loss: 0.0100 - accuracy: 1.0000 - mse: 7.3396e-04 - val_loss: 0.0316 - val_accuracy: 0.9815 - val_mse: 0.0061\nEpoch 16/16\n14/14 [==============================] - 0s 4ms/step - loss: 0.0085 - accuracy: 1.0000 - mse: 8.0572e-04 - val_loss: 0.0285 - val_accuracy: 0.9815 - val_mse: 0.0056\n"
],
[
"pd.DataFrame(history.history)['accuracy'].plot(figsize=(12,9))",
"_____no_output_____"
],
[
"model.evaluate(test_inputs,test_targets)",
"3/3 [==============================] - 0s 4ms/step - loss: 0.0186 - accuracy: 0.9851 - mse: 0.0036\n"
],
[
"def classify_species(df):\n data=df.copy()\n data[encoded_cols]=encoder.transform(data[categorical_cols])\n data[numerical_cols+encoded_cols]=scaler.transform(data[numerical_cols+encoded_cols])\n data=data[numerical_cols+encoded_cols].to_numpy()\n predict=model.predict(data)\n arg_max=np.argmax(predict)\n return encoder2.inverse_transform(arg_max)\n\n\n",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"sns.load_dataset('penguins')",
"_____no_output_____"
],
[
"input_cols",
"_____no_output_____"
],
[
"def classify_species(df):\n data=df.copy()\n data[encoded_cols]=encoder.transform(data[categorical_cols])\n data[numerical_cols+encoded_cols]=scaler.transform(data[numerical_cols+encoded_cols])\n data=data[numerical_cols+encoded_cols].to_numpy()\n predict=model.predict(data)\n arg_max=np.argmax(predict)\n return encoder2.inverse_transform([arg_max])",
"_____no_output_____"
],
[
"df=pd.DataFrame([{'island':'Gentoo',\n 'bill_length_mm':58,\n 'bill_depth_mm':20,\n 'flipper_length_mm':200,\n 'body_mass_g':3450,\n 'sex':'Male'}])",
"_____no_output_____"
],
[
"df=pd.DataFrame([{'island':'Adelie',\n 'bill_length_mm':30,\n 'bill_depth_mm':23,\n 'flipper_length_mm':200,\n 'body_mass_g':4000,\n 'sex':'Female'}])",
"_____no_output_____"
],
[
"classify_species(df)",
"_____no_output_____"
],
[
"import joblib",
"_____no_output_____"
],
[
"assignment={'encoder_cat_cols':encoder,'encoder_species':encoder2,'scaler':scaler,'model':model}",
"_____no_output_____"
],
[
"joblib.dump(assignment,filename='model_params.joblib')",
"WARNING:absl:Function `_wrapped_model` contains input name(s) Hidden_Layer1_input with unsupported characters which will be renamed to hidden_layer1_input in the SavedModel.\n"
],
[
"! git",
"usage: git [--version] [--help] [-C <path>] [-c <name>=<value>]\n [--exec-path[=<path>]] [--html-path] [--man-path] [--info-path]\n [-p | --paginate | --no-pager] [--no-replace-objects] [--bare]\n [--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>]\n <command> [<args>]\n\nThese are common Git commands used in various situations:\n\nstart a working area (see also: git help tutorial)\n clone Clone a repository into a new directory\n init Create an empty Git repository or reinitialize an existing one\n\nwork on the current change (see also: git help everyday)\n add Add file contents to the index\n mv Move or rename a file, a directory, or a symlink\n reset Reset current HEAD to the specified state\n rm Remove files from the working tree and from the index\n\nexamine the history and state (see also: git help revisions)\n bisect Use binary search to find the commit that introduced a bug\n grep Print lines matching a pattern\n log Show commit logs\n show Show various types of objects\n status Show the working tree status\n\ngrow, mark and tweak your common history\n branch List, create, or delete branches\n checkout Switch branches or restore working tree files\n commit Record changes to the repository\n diff Show changes between commits, commit and working tree, etc\n merge Join two or more development histories together\n rebase Reapply commits on top of another base tip\n tag Create, list, delete or verify a tag object signed with GPG\n\ncollaborate (see also: git help workflows)\n fetch Download objects and refs from another repository\n pull Fetch from and integrate with another repository or a local branch\n push Update remote refs along with associated objects\n\n'git help -a' and 'git help -g' list available subcommands and some\nconcept guides. See 'git help <command>' or 'git help <concept>'\nto read about a specific subcommand or concept.\n"
],
[
"! git init ## first initializing a new repositroy",
"Initialized empty Git repository in /content/.git/\n"
],
[
"! git clone https://github.com/aksha1234/Deep-learning-tutorials.git",
"fatal: destination path 'Deep-learning-tutorials' already exists and is not an empty directory.\n"
],
[
"! pwd",
"/content\n"
],
[
"%cd Deep-learning-tutorials/",
"/content/Deep-learning-tutorials\n"
],
[
"! git remote -v",
"origin\thttps://aksha1234:[email protected]/aksha1234/Deep-learning-tutorials (fetch)\norigin\thttps://aksha1234:[email protected]/aksha1234/Deep-learning-tutorials (push)\n"
],
[
"! git status",
"On branch main\nYour branch is up to date with 'origin/main'.\n\nnothing to commit, working tree clean\n"
],
[
"## cretaing a file for just example\n! touch firstpy.py",
"_____no_output_____"
],
[
"! git status",
"On branch main\nYour branch is up to date with 'origin/main'.\n\nUntracked files:\n (use \"git add <file>...\" to include in what will be committed)\n\n\t\u001b[31mfirstpy.py\u001b[m\n\nnothing added to commit but untracked files present (use \"git add\" to track)\n"
],
[
"## to add teh files to get tracked\n! git add -A",
"_____no_output_____"
],
[
"!git status",
"On branch main\nYour branch is up to date with 'origin/main'.\n\nChanges to be committed:\n (use \"git reset HEAD <file>...\" to unstage)\n\n\t\u001b[32mnew file: firstpy.py\u001b[m\n\n"
],
[
"! git commit -a -m 'first_commit'",
"[main d3b4ccd] first_commit\n 1 file changed, 2 insertions(+)\n create mode 100644 firstpy.py\n"
],
[
"uname = \"aksha1234\"\n!git config --global user.email '[email protected]'\n!git config --global user.name '$uname'\n",
"_____no_output_____"
],
[
"! git config --list",
"[email protected]\nuser.name=aksha1234\ncore.repositoryformatversion=0\ncore.filemode=true\ncore.bare=false\ncore.logallrefupdates=true\nremote.origin.url=https://aksha1234:[email protected]/aksha1234/Deep-learning-tutorials\nremote.origin.fetch=+refs/heads/*:refs/remotes/origin/*\nbranch.main.remote=origin\nbranch.main.merge=refs/heads/main\n"
],
[
"! git status",
"On branch main\nYour branch is ahead of 'origin/main' by 1 commit.\n (use \"git push\" to publish your local commits)\n\nnothing to commit, working tree clean\n"
]
],
[
[
"To push all teh requitred changes to github we have to ",
"_____no_output_____"
]
],
[
[
"username= input('Enter username')\nfrom getpass import getpass\npassword=getpass(' Enter password:')",
"Enter usernameaksha1234\n Enter password:··········\n"
],
[
"!git remote add origin https://$username:[email protected]/$username/Deep-learning-tutorials.git",
"fatal: remote origin already exists.\n"
],
[
"! git remote rm origin ",
"_____no_output_____"
],
[
"api='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'",
"_____no_output_____"
],
[
"!git remote add origin https://$username:[email protected]/$username/Deep-learning-tutorials.git",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"! git push origin main",
"remote: Repository not found.\nfatal: Authentication failed for 'https://:@github.com//Deep-learning-tutorials.git/'\n"
],
[
"! git log",
"\u001b[33mcommit d3b4ccd183de5cc81ef599a7f5afc9ede55baa9f\u001b[m\u001b[33m (\u001b[m\u001b[1;36mHEAD -> \u001b[m\u001b[1;32mmain\u001b[m\u001b[33m)\u001b[m\nAuthor: aksha1234 <[email protected]>\nDate: Thu Feb 10 20:14:00 2022 +0000\n\n first_commit\n\n\u001b[33mcommit ade2bfcb7dbcf0ea162002c7c4f090b30592fcf0\u001b[m\nAuthor: aksha1234 <[email protected]>\nDate: Wed Feb 9 15:56:16 2022 +0530\n\n Created using Colaboratory\n\n\u001b[33mcommit 6696d0e6d0fafe0a656cf5e8dcd458d47eef64ba\u001b[m\nAuthor: aksha1234 <[email protected]>\nDate: Wed Jan 19 20:26:25 2022 +0530\n\n Predicted the score using random forest Regression\n\n\u001b[33mcommit 375c7dc61aac467387cb97d248265d16c83d8ddc\u001b[m\nAuthor: aksha1234 <[email protected]>\nDate: Sat Jan 15 20:26:10 2022 +0530\n\n Created using Colaboratory\n\n\u001b[33mcommit 4ad8f899e483a143ad4aec68c8909ee18dbd55b2\u001b[m\nAuthor: aksha1234 <[email protected]>\nDate: Sat Jan 8 19:11:54 2022 +0530\n\n Created using Colaboratory\n\n\u001b[33mcommit a85e42e04a9fe3d3ba1c1413f2251c0da11e0966\u001b[m\nAuthor: akshay kadwe <[email protected]>\nDate: Tue Dec 14 19:44:28 2021 +0000\n\n Added the beautifulSoup lib\n\n\u001b[33mcommit 72c8477f4073e84697feb33411ec0ff0232e6e55\u001b[m\nAuthor: akshay kadwe <[email protected]>\nDate: Tue Dec 14 16:36:35 2021 +0000\n\n added a scrapper file\n\n\u001b[33mcommit bd80483435829f17ac2e9191360db88257eacd52\u001b[m\nAuthor: aksha1234 <[email protected]>\nDate: Tue Dec 14 21:59:07 2021 +0530\n\n Initial commit\n"
],
[
"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a10328aa054737e0ab3900b669aafcb11fd7d90
| 20,339 |
ipynb
|
Jupyter Notebook
|
notebook/HW2-draft-Cody.ipynb
|
GWU-CS2021/CSCI6364
|
ea25867ceb39e309ce1acca37618aebc54f822ae
|
[
"MIT"
] | null | null | null |
notebook/HW2-draft-Cody.ipynb
|
GWU-CS2021/CSCI6364
|
ea25867ceb39e309ce1acca37618aebc54f822ae
|
[
"MIT"
] | null | null | null |
notebook/HW2-draft-Cody.ipynb
|
GWU-CS2021/CSCI6364
|
ea25867ceb39e309ce1acca37618aebc54f822ae
|
[
"MIT"
] | null | null | null | 40.759519 | 1,580 | 0.525935 |
[
[
[
"# Import needed packages\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n\n# If you're working in Jupyter Notebook, include the following so that plots will display:\n%matplotlib inline\n\ndataframe = pd.read_csv('student-mat.csv')\nprint(dataframe.head())\n\nfor c in dataframe.columns:\n print(c)\n print(dataframe[c].value_counts(dropna=False))\nprint(dataframe.columns)\n\nfrom sklearn.model_selection import train_test_split\n\ntarget = 'internet'\ny = dataframe[target].map({'yes': 1, 'no': 0})\n\nX = dataframe.drop(labels = [target], axis='columns')\n\n\n# drop non-numberic fields\nyes_no = ['activities','schoolsup','famsup','paid','nursery','higher','romantic']\n# map yes/no to 1/0\nfor c in yes_no:\n X[c] = X[c].map({'yes': 1, 'no': 0})\n# translate non_numeric fields to int\nnon_numeric = ['school', 'sex', 'address', 'famsize', 'Pstatus', 'Mjob',\n 'Fjob', 'reason', 'guardian']\nfor c in non_numeric:\n X[c] = X[c].astype('category').cat.codes\n\n \nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)\n# split validate set and test set\nvalidate_train,holdout_train,validate_test,holdout_test = train_test_split(X_test, y_test, test_size=0.5)\n\nbaseline_classifier_model = RandomForestClassifier()\n# Train the baseline model on training data\nbaseline_classifier_model = baseline_classifier_model.fit(X_train, y_train)\n\n# Use the forest's predict method on the holdout\npredictions = baseline_classifier_model.predict(holdout_train)\n\n# Calculate the accuracy_score\naccuracy_score(predictions, holdout_test)",
" school sex age address famsize Pstatus Medu Fedu Mjob Fjob ... \\\n0 GP F 18 U GT3 A 4 4 at_home teacher ... \n1 GP F 17 U GT3 T 1 1 at_home other ... \n2 GP F 15 U LE3 T 1 1 at_home other ... \n3 GP F 15 U GT3 T 4 2 health services ... \n4 GP F 16 U GT3 T 3 3 other other ... \n\n famrel freetime goout Dalc Walc health absences G1 G2 G3 \n0 4 3 4 1 1 3 6 5 6 6 \n1 5 3 3 1 1 3 4 5 5 6 \n2 4 3 2 2 3 3 10 7 8 10 \n3 3 2 2 1 1 5 2 15 14 15 \n4 4 3 2 1 2 5 4 6 10 10 \n\n[5 rows x 33 columns]\nschool\nGP 349\nMS 46\nName: school, dtype: int64\nsex\nF 208\nM 187\nName: sex, dtype: int64\nage\n16 104\n17 98\n18 82\n15 82\n19 24\n20 3\n22 1\n21 1\nName: age, dtype: int64\naddress\nU 307\nR 88\nName: address, dtype: int64\nfamsize\nGT3 281\nLE3 114\nName: famsize, dtype: int64\nPstatus\nT 354\nA 41\nName: Pstatus, dtype: int64\nMedu\n4 131\n2 103\n3 99\n1 59\n0 3\nName: Medu, dtype: int64\nFedu\n2 115\n3 100\n4 96\n1 82\n0 2\nName: Fedu, dtype: int64\nMjob\nother 141\nservices 103\nat_home 59\nteacher 58\nhealth 34\nName: Mjob, dtype: int64\nFjob\nother 217\nservices 111\nteacher 29\nat_home 20\nhealth 18\nName: Fjob, dtype: int64\nreason\ncourse 145\nhome 109\nreputation 105\nother 36\nName: reason, dtype: int64\nguardian\nmother 273\nfather 90\nother 32\nName: guardian, dtype: int64\ntraveltime\n1 257\n2 107\n3 23\n4 8\nName: traveltime, dtype: int64\nstudytime\n2 198\n1 105\n3 65\n4 27\nName: studytime, dtype: int64\nfailures\n0 312\n1 50\n2 17\n3 16\nName: failures, dtype: int64\nschoolsup\nno 344\nyes 51\nName: schoolsup, dtype: int64\nfamsup\nyes 242\nno 153\nName: famsup, dtype: int64\npaid\nno 214\nyes 181\nName: paid, dtype: int64\nactivities\nyes 201\nno 194\nName: activities, dtype: int64\nnursery\nyes 314\nno 81\nName: nursery, dtype: int64\nhigher\nyes 375\nno 20\nName: higher, dtype: int64\ninternet\nyes 329\nno 66\nName: internet, dtype: int64\nromantic\nno 263\nyes 132\nName: romantic, dtype: int64\nfamrel\n4 195\n5 106\n3 68\n2 18\n1 8\nName: famrel, dtype: int64\nfreetime\n3 157\n4 115\n2 64\n5 40\n1 19\nName: freetime, dtype: int64\ngoout\n3 130\n2 103\n4 86\n5 53\n1 23\nName: goout, dtype: int64\nDalc\n1 276\n2 75\n3 26\n5 9\n4 9\nName: Dalc, dtype: int64\nWalc\n1 151\n2 85\n3 80\n4 51\n5 28\nName: Walc, dtype: int64\nhealth\n5 146\n3 91\n4 66\n1 47\n2 45\nName: health, dtype: int64\nabsences\n0 115\n2 65\n4 53\n6 31\n8 22\n10 17\n14 12\n12 12\n3 8\n16 7\n7 7\n5 5\n18 5\n20 4\n11 3\n9 3\n13 3\n15 3\n22 3\n1 3\n38 1\n30 1\n40 1\n23 1\n19 1\n28 1\n75 1\n21 1\n24 1\n56 1\n26 1\n54 1\n25 1\n17 1\nName: absences, dtype: int64\nG1\n10 51\n8 41\n11 39\n7 37\n12 35\n13 33\n9 31\n14 30\n15 24\n6 24\n16 22\n18 8\n17 8\n5 7\n19 3\n4 1\n3 1\nName: G1, dtype: int64\nG2\n9 50\n10 46\n12 41\n13 37\n11 35\n15 34\n8 32\n14 23\n7 21\n5 15\n6 14\n16 13\n0 13\n18 12\n17 5\n19 3\n4 1\nName: G2, dtype: int64\nG3\n10 56\n11 47\n0 38\n15 33\n8 32\n13 31\n12 31\n9 28\n14 27\n16 16\n6 15\n18 12\n7 9\n5 7\n17 6\n19 5\n20 1\n4 1\nName: G3, dtype: int64\nIndex(['school', 'sex', 'age', 'address', 'famsize', 'Pstatus', 'Medu', 'Fedu',\n 'Mjob', 'Fjob', 'reason', 'guardian', 'traveltime', 'studytime',\n 'failures', 'schoolsup', 'famsup', 'paid', 'activities', 'nursery',\n 'higher', 'internet', 'romantic', 'famrel', 'freetime', 'goout', 'Dalc',\n 'Walc', 'health', 'absences', 'G1', 'G2', 'G3'],\n dtype='object')\n"
],
[
"best_model = None\nfor i in range(5):\n\tclassifier_model = RandomForestClassifier()\n\tclassifier_model = classifier_model.fit(X_train, y_train)\n\tpredictions = classifier_model.predict(validate_train)\n\tscore = accuracy_score(validate_test, predictions)\n\tprint(\"%d times running, scoring %f\"%(i+1,score))\n\tif best_model is None or best_model[1] < score:\n\t\tbest_model = (classifier_model, score)\n\t\n\nmodel, score = best_model",
"1 times running, scoring 0.864407\n2 times running, scoring 0.864407\n3 times running, scoring 0.847458\n4 times running, scoring 0.864407\n5 times running, scoring 0.847458\n"
],
[
"# View confusion matrix for test data and predictions\nconfusion_matrix(holdout_test, predictions)\n# Get and reshape confusion matrix data\nmatrix = confusion_matrix(holdout_test, predictions)\nmatrix = matrix.astype('float') / matrix.sum(axis=1)[:, np.newaxis]\n\n# Build the plot\nplt.figure(figsize=(16,7))\nsns.set(font_scale=1.4)\nsns.heatmap(matrix, annot=True, annot_kws={'size':10},\n cmap=plt.cm.Greens, linewidths=0.2)\n\n# Add labels to the plot\nclass_names = ['activities','schoolsup','famsup','paid','nursery','higher','romantic']\ntick_marks = np.arange(len(class_names))\ntick_marks2 = tick_marks + 0.5\nplt.xticks(tick_marks, class_names, rotation=25)\nplt.yticks(tick_marks2, class_names, rotation=0)\nplt.xlabel('Predicted label')\nplt.ylabel('True label')\nplt.title('Confusion Matrix for Random Forest Model')\nplt.show()",
"_____no_output_____"
],
[
"# View the classification report for test data and predictions\nprint(classification_report(y_test, y_pred_test))",
" precision recall f1-score support\n\n 0 0.00 0.00 0.00 17\n 1 0.83 0.99 0.90 82\n\n accuracy 0.82 99\n macro avg 0.41 0.49 0.45 99\nweighted avg 0.68 0.82 0.75 99\n\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code"
]
] |
4a1062fcb0721023b0029490622a57fa78170ff6
| 393,860 |
ipynb
|
Jupyter Notebook
|
09-Nonlinear-Filtering.ipynb
|
MichaelRW/Kalman_and_Bayesian_Filtering
|
2e9394c7942872b155228ed7b21798527961282b
|
[
"CC-BY-4.0"
] | 4 |
2018-11-20T02:35:29.000Z
|
2019-10-27T23:06:59.000Z
|
09-Nonlinear-Filtering.ipynb
|
nahidalam/Kalman-and-Bayesian-Filters-in-Python
|
80947df4f30d94572207b42e93e0ec1e8f7383b9
|
[
"CC-BY-4.0"
] | null | null | null |
09-Nonlinear-Filtering.ipynb
|
nahidalam/Kalman-and-Bayesian-Filters-in-Python
|
80947df4f30d94572207b42e93e0ec1e8f7383b9
|
[
"CC-BY-4.0"
] | 6 |
2018-01-17T17:42:21.000Z
|
2020-11-12T18:19:42.000Z
| 413.718487 | 38,802 | 0.916308 |
[
[
[
"[Table of Contents](http://nbviewer.ipython.org/github/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/table_of_contents.ipynb)",
"_____no_output_____"
],
[
"# Nonlinear Filtering",
"_____no_output_____"
]
],
[
[
"#format the book\n%matplotlib inline\nfrom __future__ import division, print_function\nfrom book_format import load_style\nload_style()",
"_____no_output_____"
]
],
[
[
"## Introduction\n\nThe Kalman filter that we have developed uses linear equations, and so the filter can only handle linear problems. But the world is nonlinear, and so the classic filter that we have been studying to this point can have very limited utility. \n\nThere can be nonlinearity in the process model. Suppose we want to track an object falling through the atmosphere. The acceleration of the object depends on the drag it encounters. Drag depends on air density, and the air density decreases with altitude. In one dimension this can be modelled with the nonlinear differential equation\n\n$$\\ddot x = \\frac{0.0034ge^{-x/22000}\\dot x^2}{2\\beta} - g$$\n\nA second source of nonlinearity comes from the measurements. For example, radars measure the slant range to an object, and we are typically interested in the aircraft's position over the ground. We invoke Pythagoras and get the nonlinear equation:\n\n$$x=\\sqrt{\\mathtt{slant}^2 - \\mathtt{altitude}^2}$$\n\nThese facts were not lost on the early adopters of the Kalman filter. Soon after Dr. Kalman published his paper people began working on how to extend the Kalman filter for nonlinear problems. \n\nIt is almost true to state that the only equation anyone knows how to solve is $\\mathbf{Ax}=\\mathbf{b}$. We only really know how to do linear algebra. I can give you any linear set of equations and you can either solve it or prove that it has no solution. \n\nAnyone with formal education in math or physics has spent years learning various analytic ways to solve integrals, differential equations and so on. Yet even trivial physical systems produce equations that cannot be solved analytically. I can take an equation that you are able to integrate, insert a $\\log$ term, and render it insolvable. This leads to jokes about physicists stating \"assume a spherical cow on a frictionless surface in a vacuum...\". Without making extreme simplifications most physical problems do not have analytic solutions.\n\nHow do we do things like model airflow over an aircraft in a computer, or predict weather, or track missiles with a Kalman filter? We retreat to what we know: $\\mathbf{Ax}=\\mathbf{b}$. We find some way to linearize the problem, turning it into a set of linear equations, and then use linear algebra software packages to compute an approximate solution. \n\nLinearizing a nonlinear problem gives us inexact answers, and in a recursive algorithm like a Kalman filter or weather tracking system these small errors can sometimes reinforce each other at each step, quickly causing the algorithm to spit out nonsense. \n\nWhat we are about to embark upon is a difficult problem. There is not one obvious, correct, mathematically optimal solution anymore. We will be using approximations, we will be introducing errors into our computations, and we will forever be battling filters that *diverge*, that is, filters whose numerical errors overwhelm the solution. \n\nIn the remainder of this short chapter I will illustrate the specific problems the nonlinear Kalman filter faces. You can only design a filter after understanding the particular problems the nonlinearity in your problem causes. Subsequent chapters will then teach you how to design and implement different kinds of nonlinear filters.",
"_____no_output_____"
],
[
"## The Problem with Nonlinearity\n\nThe mathematics of the Kalman filter is beautiful in part due to the Gaussian equation being so special. It is nonlinear, but when we add and multiply them we get another Gaussian as a result. That is very rare. $\\sin{x}*\\sin{y}$ does not yield a $\\sin$ as an output.\n\nWhat I mean by linearity may be obvious, but there are some subtleties. The mathematical requirements are twofold:\n\n* additivity: $f(x+y) = f(x) + f(y)$\n* homogeneity: $f(ax) = af(x)$\n\n\nThis leads us to say that a linear system is defined as a system whose output is linearly proportional to the sum of all its inputs. A consequence of this is that to be linear if the input is zero than the output must also be zero. Consider an audio amp - if I sing into a microphone, and you start talking, the output should be the sum of our voices (input) scaled by the amplifier gain. But if amplifier outputs a nonzero signal such as a hum for a zero input the additive relationship no longer holds. This is because you linearity requires that $amp(voice) = amp(voice + 0)$ This clearly should give the same output, but if amp(0) is nonzero, then\n\n$$\n\\begin{aligned}\namp(voice) &= amp(voice + 0) \\\\\n&= amp(voice) + amp(0) \\\\\n&= amp(voice) + non\\_zero\\_value\n\\end{aligned}\n$$\n\nwhich is clearly nonsense. Hence, an apparently linear equation such as\n\n$$L(f(t)) = f(t) + 1$$\n\nis not linear because $L(0) = 1$. Be careful!",
"_____no_output_____"
],
[
"## An Intuitive Look at the Problem\n\nI particularly like the following way of looking at the problem, which I am borrowing from Dan Simon's *Optimal State Estimation* [[1]](#[1]). Consider a tracking problem where we get the range and bearing to a target, and we want to track its position. The reported distance is 50 km, and the reported angle is 90$^\\circ$. Assume that the errors in both range and angle are distributed in a Gaussian manner. Given an infinite number of measurements what is the expected value of the position?\n\nI have been recommending using intuition to gain insight, so let's see how it fares for this problem. We might reason that since the mean of the range will be 50 km, and the mean of the angle will be 90$^\\circ$, that the answer will be x=0 km, y=50 km.\n\nLet's plot that and find out. Here are 3000 points plotted with a normal distribution of the distance of 0.4 km, and the angle having a normal distribution of 0.35 radians. We compute the average of the all of the positions, and display it as a star. Our intuition is displayed with a large circle.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom numpy.random import randn\nimport matplotlib.pyplot as plt\n\nN = 3000\na = np.pi/2. + (randn(N) * 0.35)\nr = 50.0 + (randn(N) * 0.4)\nxs = r * np.cos(a)\nys = r * np.sin(a)\n\nplt.figure()\nplt.scatter(xs, ys, label='Sensor', color='k', marker='.', s=2)\nxs, ys = sum(xs)/N, sum(ys)/N\nplt.scatter(xs, ys, c='r', marker='*', s=200, label='Mean')\nplt.scatter(0, 50, c='k', marker='o', s=300, label='Intuition')\nplt.axis('equal')\nplt.legend();",
"_____no_output_____"
]
],
[
[
"We can see that out intuition failed us because the nonlinearity of the problem forced all of the errors to be biased in one direction. This bias, over many iterations, can cause the Kalman filter to diverge. Even if it doesn't diverge the solution will not be optimal. Linear approximations applied to nonlinear problems yields inaccurate results.",
"_____no_output_____"
],
[
"## The Effect of Nonlinear Functions on Gaussians\n\nGaussians are not closed under an arbitrary nonlinear function. Recall the equations of the Kalman filter - at each evolution we pass the Gaussian representing the state through the process function to get the Gaussian at time $k$. Our process function was always linear, so the output was always another Gaussian. Let's look at that on a graph. I will take an arbitrary Gaussian and pass it through the function $f(x) = 2x + 1$ and plot the result. We know how to do this analytically, but let's use sampling. I will generate 500,000 points with a normal distribution, pass them through $f(x)$, and plot the results. I do it this way because the next example will be nonlinear, and we will have no way to compute this analytically.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom numpy.random import normal\ngaussian = (0., 1.)\ndata = normal(loc=gaussian[0], scale=gaussian[1], size=500000)\nplt.figure()\nplt.hist(2*data + 1, 1000);",
"_____no_output_____"
]
],
[
[
"This is an unsurprising result. The result of passing the Gaussian through $f(x)=2x+1$ is another Gaussian centered around 1. Let's look at the input, nonlinear function, and output at once.",
"_____no_output_____"
]
],
[
[
"from kf_book.book_plots import set_figsize, figsize\nfrom kf_book.nonlinear_plots import plot_nonlinear_func\n\ndef g1(x):\n return 2*x+1\nplt.figure()\nplot_nonlinear_func(data, g1, gaussian)",
"_____no_output_____"
]
],
[
[
"> I explain how to plot Gaussians, and much more, in the Notebook *Computing_and_Plotting_PDFs* in the \nSupporting_Notebooks folder. You can also read it online [here](https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/Supporting_Notebooks/Computing_and_plotting_PDFs.ipynb)[1]\n\nThe plot labeled 'Input' is the histogram of the original data. This is passed through the function $f(x)=2x+1$ which is displayed in the chart on the bottom left. The red lines shows how one value, $x=0$ is passed through the function. Each value from input is passed through in the same way to the output function on the right. For the output I computed the mean by taking the average of all the points, and drew the results with the dotted blue line. A solid blue line shows the actual mean for the point $x=0$. The output looks like a Gaussian, and is in fact a Gaussian. We can see that the variance in the output is larger than the variance in the input, and the mean has been shifted from 0 to 1, which is what we would expect given the transfer function $f(x)=2x+1$ The $2x$ affects the variance, and the $+1$ shifts the mean The computed mean, represented by the dotted blue line, is nearly equal to the actual mean. If we used more points in our computation we could get arbitrarily close to the actual value.\n\nNow let's look at a nonlinear function and see how it affects the probability distribution.",
"_____no_output_____"
]
],
[
[
"def g2(x):\n return (np.cos(3*(x/2 + 0.7))) * np.sin(0.3*x) - 1.6*x\n\nplt.figure()\nplot_nonlinear_func(data, g2, gaussian)",
"_____no_output_____"
]
],
[
[
"This result may be somewhat surprising to you. The function looks \"fairly\" linear, but the probability distribution of the output is completely different from a Gaussian. Recall the equations for multiplying two univariate Gaussians:\n\n$$\\begin{aligned}\n\\mu &=\\frac{\\sigma_1^2 \\mu_2 + \\sigma_2^2 \\mu_1} {\\sigma_1^2 + \\sigma_2^2} \\\\\n\\sigma &= \\frac{1}{\\frac{1}{\\sigma_1^2} + \\frac{1}{\\sigma_2^2}}\n\\end{aligned}$$\n\nThese equations do not hold for non-Gaussians, and certainly do not hold for the probability distribution shown in the 'Output' chart above. \n\nThink of what this implies for the Kalman filter algorithm of the previous chapter. All of the equations assume that a Gaussian passed through the process function results in another Gaussian. If this is not true then all of the assumptions and guarantees of the Kalman filter do not hold. Let's look at what happens when we pass the output back through the function again, simulating the next step time step of the Kalman filter.",
"_____no_output_____"
]
],
[
[
"y = g2(data)\ngaussian2 = (np.mean(y), np.var(y))\nplt.figure()\nplot_nonlinear_func(y, g2, gaussian2)",
"_____no_output_____"
]
],
[
[
"As you can see the probability function is further distorted from the original Gaussian. However, the graph is still somewhat symmetric around x=0, let's see what the mean is.",
"_____no_output_____"
]
],
[
[
"print('input mean, variance: %.4f, %.4f' % \n (np.mean(data), np.var(data)))\nprint('output mean, variance: %.4f, %.4f' % \n (np.mean(y), np.var(y)))",
"input mean, variance: 0.0008, 0.9971\noutput mean, variance: -0.1258, 2.3978\n"
]
],
[
[
"Let's compare that to the linear function that passes through (-2,3) and (2,-3), which is very close to the nonlinear function we have plotted. Using the equation of a line we have\n\n$$m=\\frac{-3-3}{2-(-2)}=-1.5$$",
"_____no_output_____"
]
],
[
[
"def g3(x): \n return -1.5 * x\n\nplt.figure()\nplot_nonlinear_func(data, g3, gaussian)\nout = g3(data)\nprint('output mean, variance: %.4f, %.4f' % \n (np.mean(out), np.var(out)))",
"_____no_output_____"
]
],
[
[
"Although the shapes of the output are very different, the mean and variance of each are almost the same. This may lead us to reasoning that perhaps we can ignore this problem if the nonlinear equation is 'close to' linear. To test that, we can iterate several times and then compare the results.",
"_____no_output_____"
]
],
[
[
"out = g3(data)\nout2 = g2(data)\n\nfor i in range(10):\n out = g3(out)\n out2 = g2(out2)\nprint('linear output mean, variance: %.4f, %.4f' % \n (np.average(out), np.std(out)**2))\nprint('nonlinear output mean, variance: %.4f, %.4f' % \n (np.average(out2), np.std(out2)**2))",
"linear output mean, variance: -0.0657, 7459.8773\nnonlinear output mean, variance: -9.3367, 30398.0469\n"
]
],
[
[
"Unfortunately the nonlinear version is not stable. It drifted significantly from the mean of 0, and the variance is half an order of magnitude larger.\n\nI minimized the issue by using a function that is quite close to a straight line. What happens if the function is $y(x)=x^2$?",
"_____no_output_____"
]
],
[
[
"def g3(x): \n return -x*x\nx0 = (1, 1)\ndata = normal(loc=x0[0], scale=x0[1], size=500000)\n\nplt.figure()\nplot_nonlinear_func(data, g3, gaussian=x0)",
"_____no_output_____"
]
],
[
[
"Despite the curve being smooth and reasonably straight at $x=1$ the probability distribution of the output doesn't look anything like a Gaussian and the computed mean of the output is quite different than the value computed directly. This is not an unusual function - a ballistic object moves in a parabola, and this is the sort of nonlinearity your filter will need to handle. If you recall we've tried to track a ball and failed miserably. This graph should give you insight into why the filter performed so poorly.",
"_____no_output_____"
],
[
"## A 2D Example",
"_____no_output_____"
],
[
"It is hard to look at probability distributions and reason about what will happen in a filter. So let's think about tracking an aircraft with radar. The estimate may have a covariance that looks like this:",
"_____no_output_____"
]
],
[
[
"import kf_book.nonlinear_internal as nonlinear_internal\n\nnonlinear_internal.plot1()",
"_____no_output_____"
]
],
[
[
"What happens when we try to linearize this problem? The radar gives us a range to the aircraft. Suppose the radar is directly under the aircraft (x=10) and the next measurement states that the aircraft is 3 miles away (y=3). The positions that could match that measurement form a circle with radius 3 miles, like so.",
"_____no_output_____"
]
],
[
[
"nonlinear_internal.plot2()",
"_____no_output_____"
]
],
[
[
"We can see by inspection that the probable position of the aircraft is somewhere near x=11.4, y=2.7 because that is where the covariance ellipse and range measurement overlap. But the range measurement is nonlinear so we have to linearize it. We haven't covered this material yet, but the Extended Kalman filter will linearize at the last position of the aircraft - (10,2). At x=10 the range measurement has y=3, and so we linearize at that point.",
"_____no_output_____"
]
],
[
[
"nonlinear_internal.plot3()",
"_____no_output_____"
]
],
[
[
"Now we have a linear representation of the problem (literally a straight line) which we can solve. Unfortunately you can see that the intersection of the line and the covariance ellipse is a long way from the actual aircraft position.",
"_____no_output_____"
]
],
[
[
"nonlinear_internal.plot4()",
"_____no_output_____"
]
],
[
[
"That sort of error often leads to disastrous results. The error in this estimate is large. But in the next innovation of the filter that very bad estimate will be used to linearize the next radar measurement, so the next estimate is likely to be markedly worse than this one. After only a few iterations the Kalman filter will diverge, and start producing results that have no correspondence to reality.\n\nThis covariance ellipse spans miles. I exaggerated the size to illustrate the difficulties of highly nonlinear systems. In real radar tracking problems the nonlinearity is usually not that bad, but the errors will still accumulate. Other systems you may be work could have this amount of nonlinearity - this was not an exaggeration only to make a point. You will always be battling divergence when working with nonlinear systems.",
"_____no_output_____"
],
[
"## The Algorithms",
"_____no_output_____"
],
[
"You may be impatient to solve a specific problem, and wondering which filter to use. I will quickly survey the options. The subsequent chapters are somewhat independent of each other, and you can fruitfully skip around, though I recommend reading linearly if you truly want to master all of the material. \n\nThe workhorses of nonlinear filters are the *linearized Kalman filter* and *extended Kalman filter* (EKF). These two techniques were invented shortly after Kalman published his paper and they have been the main techniques used since then. The flight software in airplanes, the GPS in your car or phone almost certainly use one of these techniques. \n\nHowever, these techniques are extremely demanding. The EKF linearizes the differential equations at one point, which requires you to find a solution to a matrix of partial derivatives (a Jacobian). This can be difficult or impossible to do analytically. If impossible, you have to use numerical techniques to find the Jacobian, but this is expensive computationally and introduces more error into the system. Finally, if the problem is quite nonlinear the linearization leads to a lot of error being introduced in each step, and the filters frequently diverge. You can not throw some equations into some arbitrary solver and expect to to get good results. It's a difficult field for professionals. I note that most Kalman filtering textbooks merely gloss over the EKF despite it being the most frequently used technique in real world applications. \n\nRecently the field has been changing in exciting ways. First, computing power has grown to the point that we can use techniques that were once beyond the ability of a supercomputer. These use *Monte Carlo* techniques - the computer generates thousands to tens of thousands of random points and tests all of them against the measurements. It then probabilistically kills or duplicates points based on how well they match the measurements. A point far away from the measurement is unlikely to be retained, whereas a point very close is quite likely to be retained. After a few iterations there is a clump of particles closely tracking your object, and a sparse cloud of points where there is no object.\n\nThis has two benefits. First, the algorithm is robust even for extremely nonlinear problems. Second, the algorithm can track arbitrarily many objects at once - some particles will match the behavior on one object, and other particles will match other objects. So this technique is often used to track automobile traffic, people in crowds, and so on. \n\nThe costs should be clear. It is computationally expensive to test tens of thousands of points for every step in the filter. But modern CPUs are very fast, and this is a good problem for GPUs because the part of the algorithm is parallelizable. Another cost is that the answer is not mathematical. With a Kalman filter my covariance matrix gives me important information about the amount of error in the estimate. The particle filter does not give me a rigorous way to compute this. Finally, the output of the filter is a cloud of points; I then have to figure out how to interpret it. Usually you will be doing something like taking the mean and standard deviations of the points, but this is a difficult problem. There are still many points that do not 'belong' to a tracked object, so you first have to run some sort of clustering algorithm to first find the points that seem to be tracking an object, and then you need another algorithm to produce an state estimate from those points. None of this is intractable, but it is all quite computationally expensive. \n\n\nFinally, we have a new algorithm called the *unscented Kalman filter* (UKF). It does not require you to find analytic solutions to nonlinear equations, and yet almost always performs better than the EKF. It does well with nonlinear problems - problems where the EKF has significant difficulties. Designing the filter is extremely easy. Some will say the jury is still out on the UKF, but to my mind the UKF is superior in almost every way to the EKF. I suggest that the UKF should be the starting point for any implementation, especially if you are not a Kalman filter professional with a graduate degree in control theory. The main downside is that the UKF can be a few times slower than the EKF, but this really depends on whether the EKF solves the Jacobian analytically or numerically. If numerically the UKF is almost certainly faster. It has not been proven (and probably it cannot be proven) that the UKF always yields more accurate results than the EKF. In practice it almost always does, often significantly so. It is very easy to understand and implement, and I strongly suggest this filter as your starting point. ",
"_____no_output_____"
],
[
"## Summary",
"_____no_output_____"
],
[
"The world is nonlinear, but we only really know how to solve linear problems. This introduces significant difficulties for Kalman filters. We've looked at how nonlinearity affects filtering in 3 different but equivalent ways, and I've given you a brief summary of the major appoaches: the linearized Kalman filter, the extended Kalman filter, the Unscented Kalman filter, and the particle filter. \n\nUntil recently the linearized Kalman filter and EKF have been the standard way to solve these problems. They are very difficult to understand and use, and they are also potentially very unstable. \n\nRecent developments have offered what are to my mind superior approaches. The UKF dispenses with the need to find solutions to partial differential equations, yet it is also usually more accurate than the EKF. It is easy to use and understand. I can get a basic UKF going in a few minutes by using FilterPy. The particle filter dispenses with mathimatical modeling completely in favor of a Monte Carlo technique of generating a random cloud of thousands of points. It runs slowly, but it can solve otherwise intractable problems with relative ease.\n\nI get more email about the EKF than anything else; I suspect that this is because most treatments in books, papers, and on the internet use the EKF. If your interest is in mastering the field of course you will want to learn about the EKF. But if you are just trying to get good results I point you to the UKF and particle filter first. They are much easier to implement, understand, and use, and they are typically far more stable than the EKF. \n\nSome will quibble with that advice. A lot of recent publications are devoted to a comparison of the EKF, UKF, and perhaps a few other choices for a given problem. Do you not need to perform a similar comparison for your problem? If you are sending a rocket to Mars then of course you do. You will be balancing issues such as accuracy, round off errors, divergence, mathematical proof of correctness, and the computational effort required. I can't imagine not knowing the EKF intimately. \n\nOn the other hand the UKF works spectacularly! I use it at work for real world applications. I mostly haven't even tried to implement an EKF for these applications because I can verify that the UKF is working fine. Is it possible that I might eke out another 0.2% of performance from the EKF in certain situations? Sure! Do I care? No! I completely understand the UKF implementation, it is easy to test and verify, I can pass the code to others and be confident that they can understand and modify it, and I am not a masochist that wants to battle difficult equations when I already have a working solution. If the UKF or particle filters start to perform poorly for some problem then I will turn other to techniques, but not before then. And realistically, the UKF usually provides substantially better performance than the EKF over a wide range of problems and conditions. If \"really good\" is good enough I'm going to spend my time working on other problems. \n\nI'm belaboring this point because in most textbooks the EKF is given center stage, and the UKF is either not mentioned at all or just given a 2 page gloss that leaves you completely unprepared to use the filter. The UKF is still relatively new, and it takes time to write new editions of books. At the time many books were written the UKF was either not discovered yet, or it was just an unproven but promising curiosity. But I am writing this now, the UKF has had enormous success, and it needs to be in your toolkit. That is what I will spend most of my effort trying to teach you. ",
"_____no_output_____"
],
[
"## References\n\n<A name=\"[1]\">[1]</A> https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/Supporting_Notebooks/Computing_and_plotting_PDFs.ipynb",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a106f375b70b2fd7d2a4b5da1a18052d7b3f039
| 61,207 |
ipynb
|
Jupyter Notebook
|
section_3/mean_field/analyse_results.ipynb
|
g-torr/uncovering-non-eq
|
fb37331373acbbbae7a40e45900ea10b47e248e4
|
[
"CC-BY-4.0"
] | null | null | null |
section_3/mean_field/analyse_results.ipynb
|
g-torr/uncovering-non-eq
|
fb37331373acbbbae7a40e45900ea10b47e248e4
|
[
"CC-BY-4.0"
] | null | null | null |
section_3/mean_field/analyse_results.ipynb
|
g-torr/uncovering-non-eq
|
fb37331373acbbbae7a40e45900ea10b47e248e4
|
[
"CC-BY-4.0"
] | null | null | null | 86.572843 | 13,992 | 0.790383 |
[
[
[
"# Fig. 6\nThis notebook load and combine the output files produced by the script `power.py` to produce the figure shown in the paper.\n\nOutput from ```power.py``` are saved in pickle files with name `\"theta_\"+str(theta)+':_eta_'+str(eta)+specifier+\".pkl\"`.\nMultiple outputs at the same theta are saved with different unique identifiers.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport matplotlib as mpl\nimport pickle\nfrom matplotlib.colors import LogNorm\nimport os\nimport re\nimport sys\nsys.path.insert(0, \"../../lib\") # add the library folder to the path I look for modules\nsys.path.insert(0, \"../heterogeneity/\")\nimport latexify\nfrom utilities import make_network\n#import random_regular\n#import dynamical_cavity as cavity",
"_____no_output_____"
],
[
"\ndef load_obj(theta,eta,specifier = ''):\n name='theta_'+str(theta)+'_eta_'+str(eta)+specifier+'.pkl'\n with open(directory+'/data/dic-' + name , 'rb') as f:\n return pickle.load(f)\ndef load_data(directory):\n filenames=os.listdir(directory+\"/data\")\n pattern = re.compile(\"dic-theta_\\d*\\.\\d_eta_\\d*\\.\\d|\\d.pkl\")\n\n dictnames=[name for name in filenames if pattern.match(name)]# select only dictionary files\n print(' Results are available in the files:')\n params = []\n for filename in dictnames:\n params+=[filename.lstrip('dic-theta_').rstrip('.pkl').split('_eta_')]\n params = pd.DataFrame(params,columns= ['theta','eta'],dtype=float).sort_values(by = ['theta','eta'])\n latexify.latexify(columns = 2)\n mean_mean = []\n mean_cav = []\n for theta, eta in params.values:\n dic = load_obj(theta,eta)\n mean_mean+= [dic['mean_mean']]\n mean_cav += [dic['mean_cav']]\n mean_cav = np.array(mean_cav)\n mean_mean = np.array(mean_mean)\n return params,mean_cav,mean_mean",
"_____no_output_____"
],
[
"directory = 'gamma=3'\nparams,mean_cav,mean_mean = load_data(directory)\nprop_cycle = plt.rcParams['axes.prop_cycle']\ncolors = prop_cycle.by_key()['color']\netas = 1-np.array(list(params['eta'][params['theta']==0]))\nplt.plot(etas,mean_cav[params['theta']==0],'--^',c = colors[0],label = 'cav.')\nplt.plot(etas,mean_mean[params['theta']==0],'s',c = colors[0],mfc= 'w',alpha = 0.5,label = 'nMF')\netas = 1-np.array(list(params['eta'][params['theta']==0.5]))\nplt.plot(etas,mean_cav[params['theta']==0.5],'--^',c = colors[2])\nplt.plot(etas,mean_mean[params['theta']==0.5],'s',c = colors[2],mfc= 'w',alpha = 0.5)\nplt.text(0.65,mean_cav[[(params['theta']==0)&(params['eta']==0.5)]]+0.0,'$\\\\vartheta = 0$',c = colors[0],fontsize = 12)\nplt.text(0.65,mean_cav[[(params['theta']==0.5)&(params['eta']==0.5)]]-0.1, '$\\\\vartheta/J = 0.5$',c = colors[2],fontsize = 12)\nplt.ylabel('$\\\\langle P\\\\rangle$',fontsize = 13)\nplt.xlabel('$\\\\eta$',fontsize = 13)\nplt.legend(ncol = 2,fontsize = 10.5,numpoints = 1,columnspacing=1)\nplt.tight_layout()\n#plt.savefig(directory+'_mean_field.pdf')",
" Results are available in the files:\n"
],
[
"params,mean_cav,mean_mean = load_data(directory)\nparams",
" Results are available in the files:\n"
],
[
"directory = 'kin=3'\nparams,mean_cav,mean_mean = load_data(directory)\nprop_cycle = plt.rcParams['axes.prop_cycle']\ncolors = prop_cycle.by_key()['color']\nparams\nlist(params['eta'][params['theta']==0.5])",
" Results are available in the files:\n"
],
[
"etas = 1-np.array(list(params['eta'][params['theta']==0]))\nplt.plot(etas,mean_cav[params['theta']==0],'--^',c = colors[0],label = 'cav.')\nplt.plot(etas,mean_mean[params['theta']==0],'s',c = colors[0],mfc= 'w',alpha = 0.5,label = 'nMF')\netas = 1-np.array(list(params['eta'][params['theta']==0.5]))\nplt.plot(etas,mean_cav[params['theta']==0.5],'--^',c = colors[2])\nplt.plot(etas,mean_mean[params['theta']==0.5],'s',c = colors[2],mfc= 'w',alpha = 0.5)\nplt.text(0.35,mean_cav[[(params['theta']==0)&(params['eta']==0.3)]]-0.15,'$\\\\vartheta = 0$',c = colors[0],fontsize = 12)\nplt.text(0.35,mean_cav[[(params['theta']==0.5)&(params['eta']==0.3)]]-0.52,'$\\\\vartheta/J = 0.5$',c = colors[2],fontsize = 12)\nplt.ylabel('$\\\\langle P\\\\rangle$',fontsize = 13)\nplt.xlabel('$\\\\eta$',fontsize = 13)\nplt.legend(ncol = 2,fontsize = 10.5,numpoints = 1)\nplt.tight_layout()\n#plt.savefig(directory+'_mean_field.pdf')",
"<ipython-input-14-19022af1e2e0>:7: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n plt.text(0.35,mean_cav[[(params['theta']==0)&(params['eta']==0.3)]]-0.15,'$\\\\vartheta = 0$',c = colors[0],fontsize = 12)\n<ipython-input-14-19022af1e2e0>:8: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n plt.text(0.35,mean_cav[[(params['theta']==0.5)&(params['eta']==0.3)]]-0.52,'$\\\\vartheta/J = 0.5$',c = colors[2],fontsize = 12)\n"
],
[
"prop_cycle = plt.rcParams['axes.prop_cycle']\ncolors = prop_cycle.by_key()['color']\ncolors[0]",
"_____no_output_____"
]
],
[
[
"$$\n\\frac{1}{2}\\left[ 1+P_j \\tanh\\frac{\\beta(\\pm J-\\theta)}{2}-(1-P_j)\\tanh\\frac{\\beta(\\theta)}{2}\\right]\n$$",
"_____no_output_____"
]
],
[
[
"def mean_field(P, js, T, interaction, N, Ks, theta=0, precision=1e-4, max_iter=50):\n \"\"\"\n Run the dynamical cavity with recursive calls.\n :param P_init: list of floats of length N\n :param T: float\n :param J: sparse.csr_matrix\n :param theta: float (in units of 1/sqrt(<K>))\n :param max_iter: int\n :param precision: float\n :return: P_new it is a list of dimensions N which contains the probability of active state for each gene.\n In order to help storing, couplings are taken to be +-1, bias is then rescaled by 1/sqrt(<|J_{ij}|>)\n \"\"\"\n avg_degree = np.mean(Ks)\n P_new = np.zeros(N)\n for count in range(max_iter):\n for i,(inter,j) in enumerate(zip(interaction,js)):\n P_new[i] = 0.5*(1+np.tanh((sum(inter*P[j])-theta)/2/T/ np.sqrt(avg_degree)))\n if max(np.abs(np.array(P) - np.array(P_new))) < precision:\n P = P_new\n print('finishing after', count, 'iterations')\n break\n if count == max_iter-1:\n print(\"Maximum number of repetition reached, but target precision has not been reached. \")\n P = P_new.copy()\n\n return P\n",
"_____no_output_____"
],
[
"N = 100000\ngamma =3#1.81\nbias = 0.3 #0.379\n#J = make_network(N,gamma,bias)\nJ = random_regular.make_network(N,3,bias)\nN = J.shape[0]\nJ.data = np.where(J.data > 0, 1, -1)\nN = J.shape[0]\nJ_transpose = J.transpose().tolil()\njs = J_transpose.rows # list of list, structure is [el[i]] where el[i]\n# is the list of predecessors of gene i ( the index)\ninteraction = J_transpose.data # list of list, structure is [el[i]]\n# where el[i] is the list of predecessors of gene i (interaction strength with sign)\nKs = np.array([len(neigh) for neigh in js]) # in degree of each gene\nJ0 = 1/np.sqrt(Ks.mean())\n",
"_____no_output_____"
]
],
[
[
"Comparison mean field vs hetherogeneous. Mean field is:\n$$\nP_i = \\frac{1}{2}\\left(1+\\tanh \\frac{\\beta}{2}\\sum_j J_{ij}P_j \\right)\n$$\n",
"_____no_output_____"
]
],
[
[
"theta=0.\nT=0.2\n%time P=cavity.cavity(np.random.rand(N), js, T*J0, interaction, N, Ks, theta*J0,J0)\n%time P_mean=mean_field(np.random.rand(N), js, T*J0, interaction, N, Ks, theta*J0)",
"CPU times: user 6.41 s, sys: 0 ns, total: 6.41 s\nWall time: 6.41 s\nfinishing after 31 iterations\nCPU times: user 20.4 s, sys: 398 ms, total: 20.8 s\nWall time: 20.4 s\n"
],
[
"latexify.latexify(columns = 2)\navg_degree = np.mean(Ks)\ndef plus(P):\n return 0.5*(1-np.tanh(theta/np.sqrt(avg_degree)/2/T)+P*(np.tanh((1-theta)/np.sqrt(avg_degree)/2/T)+np.tanh(theta/np.sqrt(avg_degree)/2/T)))\ndef minus(P):\n return 0.5*(1-np.tanh(theta/np.sqrt(avg_degree)/2/T)+P*(np.tanh((-1-theta)/np.sqrt(avg_degree)/2/T)+np.tanh(theta/np.sqrt(avg_degree)/2/T)))\n\nh,b = np.histogram(P_mean,np.linspace(0,1,100),density=True)\nplt.plot(b[:-1],h,label = 'nMF')\nh,b = np.histogram(P,bins = b,density=True)\nplt.plot(b[:-1],h,'-',alpha = 1.,label = 'cav.')\nplt.legend(loc = 'upper center',ncol = 2,fontsize = 10.5)\n\n'''\nx = (1-np.tanh(theta/np.sqrt(avg_degree)/2/T))/(2-(np.tanh((1-theta)/np.sqrt(avg_degree)/2/T)+np.tanh(theta/np.sqrt(avg_degree)/2/T)))\ny = (1-np.tanh(theta/np.sqrt(avg_degree)/2/T))/(2-(np.tanh((-1-theta)/np.sqrt(avg_degree)/2/T)+np.tanh(theta/np.sqrt(avg_degree)/2/T)))\n\nplt.axvline(x)\nplt.axvline(y, ls = ':', c= 'r')\nplt.axvline(plus(y), ls = ':', c= 'r')\nplt.axvline(minus(x),ls = '--')\nplt.axvline(minus(minus(x)),ls = '-.')\nplt.axvline(plus(minus(x)),ls = '-.')\n'''\nplt.semilogy()\nplt.xlabel('$P$',fontsize = 13)\nplt.ylabel('$\\\\Pi(P)$',fontsize = 13)\nplt.tight_layout()\n#plt.savefig('random_regular_comparison.pdf')\n#plt.savefig('power_law_comparison.pdf')\nnp.mean(P),np.mean(P_mean),1-abs(np.mean(P)/np.mean(P_mean))",
"_____no_output_____"
],
[
"1/np.sqrt(np.mean(Ks)),1/np.sqrt(3)",
"_____no_output_____"
],
[
"_ = plt.hist(Ks,100)\n",
"_____no_output_____"
],
[
"params",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a107798ce41b02e61ab159c6e3f58a02c42aff7
| 30,372 |
ipynb
|
Jupyter Notebook
|
examples/add_border_fig.ipynb
|
Rob217/nice-figures
|
5509e9757937892ff3cd0e2a7e22416d9fb01e49
|
[
"MIT"
] | 2 |
2020-06-19T21:55:15.000Z
|
2020-09-14T20:00:53.000Z
|
examples/add_border_fig.ipynb
|
Rob217/nice-figures
|
5509e9757937892ff3cd0e2a7e22416d9fb01e49
|
[
"MIT"
] | 8 |
2020-07-14T20:16:50.000Z
|
2020-09-08T18:39:10.000Z
|
examples/add_border_fig.ipynb
|
Rob217/nice-figures
|
5509e9757937892ff3cd0e2a7e22416d9fb01e49
|
[
"MIT"
] | null | null | null | 349.103448 | 27,984 | 0.93606 |
[
[
[
"Testing add_border()\n\nAdd_border shows where the boundaries of a figure are. This is useful if it is unclear where the boundaries are and you are trying to optimize the location of different elements in the figure or making sure that nothing gets cut off.",
"_____no_output_____"
]
],
[
[
"import matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom nice_figures import *\n\nload_style()\n\nfig = plt.figure()\nax = plt.axes([0.1, 0.07, 0.8, 0.8])\nax.set_xlabel('This label overlaps border')\nax.set_ylabel('This label sits inside border')\n\nplt.savefig(os.path.join('figs', 'add_border_fig.pdf'))\n\nadd_border()\n\nplt.show()",
"C:\\Users\\rbettles\\PythonEnvironments\\packaging_tutorial_env\\lib\\site-packages\\ipykernel_launcher.py:14: UserWarning: This figure includes Axes that are not compatible with tight_layout, so results might be incorrect.\n \nC:\\Users\\rbettles\\PythonEnvironments\\packaging_tutorial_env\\lib\\site-packages\\IPython\\core\\pylabtools.py:132: UserWarning: This figure includes Axes that are not compatible with tight_layout, so results might be incorrect.\n fig.canvas.print_figure(bytes_io, **kw)\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
]
] |
4a10868e60b601e184f11a00057e6c995d3813a2
| 5,026 |
ipynb
|
Jupyter Notebook
|
examples/notebook/contrib/coins_grid.ipynb
|
jspricke/or-tools
|
45770b833997f827d322e929b1ed4781c4e60d44
|
[
"Apache-2.0"
] | 1 |
2020-07-18T16:24:09.000Z
|
2020-07-18T16:24:09.000Z
|
examples/notebook/contrib/coins_grid.ipynb
|
jspricke/or-tools
|
45770b833997f827d322e929b1ed4781c4e60d44
|
[
"Apache-2.0"
] | 1 |
2021-02-23T10:22:55.000Z
|
2021-02-23T13:57:14.000Z
|
examples/notebook/contrib/coins_grid.ipynb
|
jspricke/or-tools
|
45770b833997f827d322e929b1ed4781c4e60d44
|
[
"Apache-2.0"
] | 1 |
2021-03-16T14:30:59.000Z
|
2021-03-16T14:30:59.000Z
| 38.661538 | 87 | 0.578591 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a10879c67fe49dfef406d8b7b1108d8dd906697
| 32,725 |
ipynb
|
Jupyter Notebook
|
machine_translation_tf.ipynb
|
ap-nlp-research/aind2-nlp-capstone
|
128cd3da6a6bcb85801c83bf510a8f094bb32f64
|
[
"MIT"
] | null | null | null |
machine_translation_tf.ipynb
|
ap-nlp-research/aind2-nlp-capstone
|
128cd3da6a6bcb85801c83bf510a8f094bb32f64
|
[
"MIT"
] | null | null | null |
machine_translation_tf.ipynb
|
ap-nlp-research/aind2-nlp-capstone
|
128cd3da6a6bcb85801c83bf510a8f094bb32f64
|
[
"MIT"
] | null | null | null | 43.344371 | 614 | 0.607059 |
[
[
[
"# Artificial Intelligence Nanodegree\n## Machine Translation Project\nIn this notebook, sections that end with **'(IMPLEMENTATION)'** in the header indicate that the following blocks of code will require additional functionality which you must provide. Please be sure to read the instructions carefully!\n\n## Introduction\nIn this notebook, you will build a deep neural network that functions as part of an end-to-end machine translation pipeline. Your completed pipeline will accept English text as input and return the French translation.\n\n- **Preprocess** - You'll convert text to sequence of integers.\n- **Models** Create models which accepts a sequence of integers as input and returns a probability distribution over possible translations. After learning about the basic types of neural networks that are often used for machine translation, you will engage in your own investigations, to design your own model!\n- **Prediction** Run the model on English text.",
"_____no_output_____"
]
],
[
[
"!git clone https://github.com/ap-nlp-research/aind2-nlp-capstone.git",
"UsageError: Line magic function `%autoreload` not found.\n"
],
[
"import os\nfrom tqdm import tqdm, tqdm_notebook\nimport numpy as np\nfrom tensorflow import keras\nimport tensorflow as tf\nfrom tensorflow.contrib.rnn import GRUCell",
"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6\n return f(*args, **kwds)\n"
]
],
[
[
"### Verify access to the GPU\nThe following test applies only if you expect to be using a GPU, e.g., while running in a Udacity Workspace or using an AWS instance with GPU support. Run the next cell, and verify that the device_type is \"GPU\".\n- If the device is not GPU & you are running from a Udacity Workspace, then save your workspace with the icon at the top, then click \"enable\" at the bottom of the workspace.\n- If the device is not GPU & you are running from an AWS instance, then refer to the cloud computing instructions in the classroom to verify your setup steps.",
"_____no_output_____"
]
],
[
[
"from tensorflow.python.client import device_lib\nprint(device_lib.list_local_devices())",
"[name: \"/device:CPU:0\"\ndevice_type: \"CPU\"\nmemory_limit: 268435456\nlocality {\n}\nincarnation: 18321103450502237962\n]\n"
]
],
[
[
"## Dataset\nWe begin by investigating the dataset that will be used to train and evaluate your pipeline. The most common datasets used for machine translation are from [WMT](http://www.statmt.org/). However, that will take a long time to train a neural network on. We'll be using a dataset we created for this project that contains a small vocabulary. You'll be able to train your model in a reasonable time with this dataset.\n### Load Data\nThe data is located in `data/small_vocab_en` and `data/small_vocab_fr`. The `small_vocab_en` file contains English sentences with their French translations in the `small_vocab_fr` file. Load the English and French data from these files from running the cell below.",
"_____no_output_____"
]
],
[
[
"def load_data(path):\n \"\"\"\n Load dataset\n \"\"\"\n input_file = os.path.join(path)\n with open(input_file, \"r\") as f:\n data = f.read()\n\n return data.split('\\n')\n# Load English data\nenglish_sentences = load_data('aind2-nlp-capstone/data/small_vocab_en')\n# Load French data\nfrench_sentences = load_data('aind2-nlp-capstone/data/small_vocab_fr')\n\nprint('Dataset Loaded')",
"Dataset Loaded\n"
]
],
[
[
"### Files\nEach line in `small_vocab_en` contains an English sentence with the respective translation in each line of `small_vocab_fr`. View the first two lines from each file.",
"_____no_output_____"
]
],
[
[
"for sample_i in range(2):\n print('small_vocab_en Line {}: {}'.format(sample_i + 1, english_sentences[sample_i]))\n print('small_vocab_fr Line {}: {}'.format(sample_i + 1, french_sentences[sample_i]))",
"small_vocab_en Line 1: new jersey is sometimes quiet during autumn , and it is snowy in april .\nsmall_vocab_fr Line 1: new jersey est parfois calme pendant l' automne , et il est neigeux en avril .\nsmall_vocab_en Line 2: the united states is usually chilly during july , and it is usually freezing in november .\nsmall_vocab_fr Line 2: les états-unis est généralement froid en juillet , et il gèle habituellement en novembre .\n"
]
],
[
[
"From looking at the sentences, you can see they have been preprocessed already. The puncuations have been delimited using spaces. All the text have been converted to lowercase. This should save you some time, but the text requires more preprocessing.\n### Vocabulary\nThe complexity of the problem is determined by the complexity of the vocabulary. A more complex vocabulary is a more complex problem. Let's look at the complexity of the dataset we'll be working with.",
"_____no_output_____"
],
[
"For comparison, _Alice's Adventures in Wonderland_ contains 2,766 unique words of a total of 15,500 words.\n## Preprocess\nFor this project, you won't use text data as input to your model. Instead, you'll convert the text into sequences of integers using the following preprocess methods:\n1. Tokenize the words into ids\n2. Add padding to make all the sequences the same length.\n\nTime to start preprocessing the data...\n### Tokenize (IMPLEMENTATION)\nFor a neural network to predict on text data, it first has to be turned into data it can understand. Text data like \"dog\" is a sequence of ASCII character encodings. Since a neural network is a series of multiplication and addition operations, the input data needs to be number(s).\n\nWe can turn each character into a number or each word into a number. These are called character and word ids, respectively. Character ids are used for character level models that generate text predictions for each character. A word level model uses word ids that generate text predictions for each word. Word level models tend to learn better, since they are lower in complexity, so we'll use those.\n\nTurn each sentence into a sequence of words ids using Keras's [`Tokenizer`](https://keras.io/preprocessing/text/#tokenizer) function. Use this function to tokenize `english_sentences` and `french_sentences` in the cell below.\n\nRunning the cell will run `tokenize` on sample data and show output for debugging.",
"_____no_output_____"
]
],
[
[
"def tokenize(x):\n \"\"\"\n Tokenize x\n :param x: List of sentences/strings to be tokenized\n :return: Tuple of (tokenized x data, tokenizer used to tokenize x)\n \"\"\"\n x_tk = keras.preprocessing.text.Tokenizer()\n x_tk.fit_on_texts(x)\n return x_tk.texts_to_sequences(x), x_tk\n\n\n# Tokenize Example output\ntext_sentences = [\n 'The quick brown fox jumps over the lazy dog .',\n 'By Jove , my quick study of lexicography won a prize .',\n 'This is a short sentence .']\ntext_tokenized, text_tokenizer = tokenize(text_sentences)\nprint(text_tokenizer.word_index)\nprint()\nfor sample_i, (sent, token_sent) in enumerate(zip(text_sentences, text_tokenized)):\n print('Sequence {} in x'.format(sample_i + 1))\n print(' Input: {}'.format(sent))\n print(' Output: {}'.format(token_sent))",
"{'the': 1, 'quick': 2, 'a': 3, 'brown': 4, 'fox': 5, 'jumps': 6, 'over': 7, 'lazy': 8, 'dog': 9, 'by': 10, 'jove': 11, 'my': 12, 'study': 13, 'of': 14, 'lexicography': 15, 'won': 16, 'prize': 17, 'this': 18, 'is': 19, 'short': 20, 'sentence': 21}\n\nSequence 1 in x\n Input: The quick brown fox jumps over the lazy dog .\n Output: [1, 2, 4, 5, 6, 7, 1, 8, 9]\nSequence 2 in x\n Input: By Jove , my quick study of lexicography won a prize .\n Output: [10, 11, 12, 2, 13, 14, 15, 16, 3, 17]\nSequence 3 in x\n Input: This is a short sentence .\n Output: [18, 19, 3, 20, 21]\n"
]
],
[
[
"### Padding (IMPLEMENTATION)\nWhen batching the sequence of word ids together, each sequence needs to be the same length. Since sentences are dynamic in length, we can add padding to the end of the sequences to make them the same length.\n\nMake sure all the English sequences have the same length and all the French sequences have the same length by adding padding to the **end** of each sequence using Keras's [`pad_sequences`](https://keras.io/preprocessing/sequence/#pad_sequences) function.",
"_____no_output_____"
]
],
[
[
"def pad(x, length=None):\n \"\"\"\n Pad x\n :param x: List of sequences.\n :param length: Length to pad the sequence to. If None, use length of longest sequence in x.\n :return: Padded numpy array of sequences\n \"\"\"\n if length is None:\n length = max([len(sentence) for sentence in x])\n\n return keras.preprocessing.sequence.pad_sequences(x, maxlen=length, padding='post')\n\n# Pad Tokenized output\ntest_pad = pad(text_tokenized)\nfor sample_i, (token_sent, pad_sent) in enumerate(zip(text_tokenized, test_pad)):\n print('Sequence {} in x'.format(sample_i + 1))\n print(' Input: {}'.format(np.array(token_sent)))\n print(' Output: {}'.format(pad_sent))",
"Sequence 1 in x\n Input: [1 2 4 5 6 7 1 8 9]\n Output: [1 2 4 5 6 7 1 8 9 0]\nSequence 2 in x\n Input: [10 11 12 2 13 14 15 16 3 17]\n Output: [10 11 12 2 13 14 15 16 3 17]\nSequence 3 in x\n Input: [18 19 3 20 21]\n Output: [18 19 3 20 21 0 0 0 0 0]\n"
]
],
[
[
"### Preprocess Pipeline\nYour focus for this project is to build neural network architecture, so we won't ask you to create a preprocess pipeline. Instead, we've provided you with the implementation of the `preprocess` function.",
"_____no_output_____"
]
],
[
[
"def preprocess(x, y):\n \"\"\"\n Preprocess x and y\n :param x: Feature List of sentences\n :param y: Label List of sentences\n :return: Tuple of (Preprocessed x, Preprocessed y, x tokenizer, y tokenizer)\n \"\"\"\n preprocess_x, x_tk = tokenize(x)\n preprocess_y, y_tk = tokenize(y)\n\n preprocess_x = pad(preprocess_x)\n preprocess_y = pad(preprocess_y)\n\n return preprocess_x, preprocess_y, x_tk, y_tk\n\npreproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer =\\\n preprocess(english_sentences, french_sentences)\n \nmax_english_sequence_length = preproc_english_sentences.shape[1]\nmax_french_sequence_length = preproc_french_sentences.shape[1]\nenglish_vocab_size = len(english_tokenizer.word_index)\nfrench_vocab_size = len(french_tokenizer.word_index)\n\nprint('Data Preprocessed')\nprint(\"Max English sentence length:\", max_english_sequence_length)\nprint(\"Max French sentence length:\", max_french_sequence_length)\nprint(\"English vocabulary size:\", english_vocab_size)\nprint(\"French vocabulary size:\", french_vocab_size)",
"Data Preprocessed\nMax English sentence length: 15\nMax French sentence length: 21\nEnglish vocabulary size: 199\nFrench vocabulary size: 344\n"
]
],
[
[
"## Models\nIn this section, you will experiment with various neural network architectures.\nYou will begin by training four relatively simple architectures.\n- Model 1 is a simple RNN\n- Model 2 is a RNN with Embedding\n- Model 3 is a Bidirectional RNN\n- Model 4 is an optional Encoder-Decoder RNN\n\nAfter experimenting with the four simple architectures, you will construct a deeper architecture that is designed to outperform all four models.\n### Ids Back to Text\nThe neural network will be translating the input to words ids, which isn't the final form we want. We want the French translation. The function `logits_to_text` will bridge the gab between the logits from the neural network to the French translation. You'll be using this function to better understand the output of the neural network.",
"_____no_output_____"
]
],
[
[
"def logits_to_text(logits, tokenizer):\n \"\"\"\n Turn logits from a neural network into text using the tokenizer\n :param logits: Logits from a neural network\n :param tokenizer: Keras Tokenizer fit on the labels\n :return: String that represents the text of the logits\n \"\"\"\n index_to_words = {id: word for word, id in tokenizer.word_index.items()}\n index_to_words[0] = '<PAD>'\n\n return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])\n\nprint('`logits_to_text` function loaded.')",
"`logits_to_text` function loaded.\n"
],
[
"def train(train_ops: list, metrics: list, inputs: np.ndarray, targets: np.ndarray, epochs: int, batch_size: int):\n n_samples = len(inputs)\n n_batches = (n_samples + batch_size - 1) // batch_size\n\n metric_names = [m.op.name for m in metrics]\n\n with tf.get_default_graph().as_default() as graph:\n\n input_tensors = tf.get_collection('inputs')[0]\n target_tensors = tf.get_collection('targets')[0]\n\n # Get a TensorFlow session managed by the supervisor.\n with tf.Session(graph=graph) as sess:\n # Initialize all global variables\n _ = sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])\n\n for epoc in range(epochs):\n\n pbar = tqdm(range(n_batches), desc=\"Epoch {}\".format(epoc))\n\n for it in pbar:\n start = it * batch_size\n end = min(n_samples, start + batch_size)\n\n x, y = inputs[start:end], targets[start:end]\n\n _, metrics_output = sess.run([train_ops, metrics], feed_dict={input_tensors: x, target_tensors: y})\n\n pbar.set_postfix(dict([(m, v) for m, v in zip(metric_names, metrics_output)]), refresh=True)",
"_____no_output_____"
]
],
[
[
"### Model 1: RNN (IMPLEMENTATION)\n\nA basic RNN model is a good baseline for sequence data. In this model, you'll build a RNN that translates English to French.",
"_____no_output_____"
]
],
[
[
"def embed_input(x: tf.Tensor, n_words: int, embedding_size: int) -> tf.Tensor:\n # Create embedding\n emdedded_input = tf.nn.embedding_lookup(params=tf.get_variable(name=\"embedding\", shape=(n_words, embedding_size)),\n ids=x)\n return emdedded_input\n\n\ndef create_encoder(emdedded_input: tf.Tensor, num_units: int = 64) -> tf.Tensor:\n \"\"\"\n\n :param x: tf.placeholder or data input\n :return:\n \"\"\"\n with tf.variable_scope(\"encoder\"):\n cell = GRUCell(num_units=num_units)\n _, encoder_final_state = tf.nn.dynamic_rnn(cell=cell, inputs=emdedded_input, dtype=tf.float32)\n\n return encoder_final_state\n\n\ndef create_decoder(encoder_hs: tf.Tensor, sequence_length: int) -> tf.Tensor:\n batch_size = tf.shape(encoder_hs)[0]\n encoder_units = encoder_hs.get_shape().as_list()[-1]\n dtype = encoder_hs.dtype\n # create a decoder cell\n\n def teacher_forcing_loop(time, cell_output = None, cell_state = None, loop_state = None):\n emit_output = cell_output # == None for time == 0\n elements_finished = (time >= sequence_length)\n # time == 0 initialize the sequence with encoder hidden state\n # otherwise, force the cell output as RNNCell input\n if cell_output is None: # time == 0\n next_cell_state = encoder_hs\n next_input = tf.zeros([batch_size, encoder_units], dtype=dtype)\n else:\n next_cell_state = cell_state\n finished = tf.reduce_all(elements_finished)\n next_input = tf.cond(\n finished,\n lambda: tf.zeros([batch_size, encoder_units], dtype=dtype),\n lambda: cell_output)\n next_loop_state = None\n return (elements_finished, next_input, next_cell_state,\n emit_output, next_loop_state)\n\n with tf.variable_scope(\"decoder\"):\n cell = GRUCell(num_units=encoder_units)\n\n # unroll the sequence reusing cell output as the next input\n outputs_ta, decoder_final_state, _ = tf.nn.raw_rnn(cell, loop_fn=teacher_forcing_loop)\n\n # outputs provided in the form of tensor array that should be converted back into a tensor\n decoder_output = outputs_ta.stack()\n\n return decoder_output\n\n\ndef create_encoder_decoder_model(inputs: tf.Tensor,\n source_size: int,\n target_size: int,\n target_length: int,\n embedding_size: int = 16) -> tf.Tensor:\n embedded_input = embed_input(inputs, n_words=source_size, embedding_size=embedding_size)\n encoder_hs = create_encoder(embedded_input)\n decoder_output = create_decoder(encoder_hs, sequence_length=target_length)\n logits = tf.layers.Dense(units=target_size)(decoder_output)\n\n return logits",
"_____no_output_____"
],
[
"# refresh the graph to make sure nothing was left there from prior runs\ntf.reset_default_graph()\n\ninputs = tf.placeholder(tf.int32, [None, max_english_sequence_length], name='inputs')\ntf.add_to_collection(name=\"inputs\", value=inputs)\ntargets = tf.placeholder(tf.int32, [None, max_french_sequence_length], name='targets')\ntf.add_to_collection(name=\"targets\", value=targets)\n\nlogits = create_encoder_decoder_model(inputs=inputs,\n source_size=english_vocab_size+1,\n target_size=french_vocab_size+1,\n target_length=max_french_sequence_length)\n# To be consumed correctly be TF metrics and losses, logits should be transposed [1, 0, 2]\nlogits = tf.transpose(logits, [1, 0, 2])\n\n# build a loss function\nloss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=targets, logits=logits), name='acc_loss')\n# build accuracy metric\npredictions = tf.argmax(logits, axis=2, name=\"prediction\")\naccuracy, update_count_op = tf.metrics.accuracy(labels=targets, predictions=predictions)\n\nvariables = tf.trainable_variables()\ngradients = tf.gradients(loss, variables)\n\nclipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)\n\n# Optimization\nglobal_step = tf.train.get_or_create_global_step()\noptimizer = tf.train.AdamOptimizer(learning_rate = 0.001)\nupdate_step = optimizer.apply_gradients(zip(clipped_gradients, variables))\n\ntrain(train_ops=[update_step, update_count_op, global_step],\n metrics=[loss, accuracy],\n inputs=preproc_english_sentences,\n targets=preproc_french_sentences,\n epochs=10,\n batch_size=1024)",
" 87%|████████▋ | 3743/4309 [01:19<00:12, 46.60it/s, acc_loss=3.19, accuracy/value=0.408]ERROR:root:Internal Python error in the inspect module.\nBelow is the traceback from this internal error.\n\n"
]
],
[
[
"## Prediction (IMPLEMENTATION)",
"_____no_output_____"
]
],
[
[
"def final_predictions(x, y, x_tk, y_tk):\n \"\"\"\n Gets predictions using the final model\n :param x: Preprocessed English data\n :param y: Preprocessed French data\n :param x_tk: English tokenizer\n :param y_tk: French tokenizer\n \"\"\"\n # Pass pretrained model\n model = bd_embedded_rnn_model\n\n \n ## DON'T EDIT ANYTHING BELOW THIS LINE\n y_id_to_word = {value: key for key, value in y_tk.word_index.items()}\n y_id_to_word[0] = '<PAD>'\n\n sentence = 'he saw a old yellow truck'\n sentence = [x_tk.word_index[word] for word in sentence.split()]\n sentence = pad_sequences([sentence], maxlen=x.shape[-1], padding='post')\n sentences = np.array([sentence[0], x[0]])\n predictions = model.predict(sentences, len(sentences))\n\n print('Sample 1:')\n print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[0]]))\n print('Il a vu un vieux camion jaune')\n print('Sample 2:')\n print(' '.join([y_id_to_word[np.argmax(x)] for x in predictions[1]]))\n print(' '.join([y_id_to_word[np.max(x)] for x in y[0]]))\n\n\nfinal_predictions(preproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer)",
"_____no_output_____"
]
],
[
[
"## Submission\nWhen you're ready to submit, complete the following steps:\n1. Review the [rubric](https://review.udacity.com/#!/rubrics/1004/view) to ensure your submission meets all requirements to pass\n2. Generate an HTML version of this notebook\n\n - Run the next cell to attempt automatic generation (this is the recommended method in Workspaces)\n - Navigate to **FILE -> Download as -> HTML (.html)**\n - Manually generate a copy using `nbconvert` from your shell terminal\n```\n$ pip install nbconvert\n$ python -m nbconvert machine_translation.ipynb\n```\n \n3. Submit the project\n\n - If you are in a Workspace, simply click the \"Submit Project\" button (bottom towards the right)\n \n - Otherwise, add the following files into a zip archive and submit them \n - `helper.py`\n - `machine_translation.ipynb`\n - `machine_translation.html`\n - You can export the notebook by navigating to **File -> Download as -> HTML (.html)**.",
"_____no_output_____"
]
],
[
[
"!!python -m nbconvert *.ipynb",
"_____no_output_____"
]
],
[
[
"## Optional Enhancements\n\nThis project focuses on learning various network architectures for machine translation, but we don't evaluate the models according to best practices by splitting the data into separate test & training sets -- so the model accuracy is overstated. Use the [`sklearn.model_selection.train_test_split()`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) function to create separate training & test datasets, then retrain each of the models using only the training set and evaluate the prediction accuracy using the hold out test set. Does the \"best\" model change?",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a1087b3ad740150045350aca96f3185e3cee20a
| 377,881 |
ipynb
|
Jupyter Notebook
|
IBM_AI_Engineering/Course-4-deep-neural-networks-with-pytorch/Week-6-CNN/9.4.2CNN_Small_Image.ipynb
|
fengjings/Coursera
|
54098a9732faa4b37afe69d196e27805b1ac73aa
|
[
"MIT"
] | null | null | null |
IBM_AI_Engineering/Course-4-deep-neural-networks-with-pytorch/Week-6-CNN/9.4.2CNN_Small_Image.ipynb
|
fengjings/Coursera
|
54098a9732faa4b37afe69d196e27805b1ac73aa
|
[
"MIT"
] | null | null | null |
IBM_AI_Engineering/Course-4-deep-neural-networks-with-pytorch/Week-6-CNN/9.4.2CNN_Small_Image.ipynb
|
fengjings/Coursera
|
54098a9732faa4b37afe69d196e27805b1ac73aa
|
[
"MIT"
] | 1 |
2021-06-09T08:59:48.000Z
|
2021-06-09T08:59:48.000Z
| 295.450352 | 64,392 | 0.924399 |
[
[
[
"<a href=\"http://cocl.us/pytorch_link_top\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/Pytochtop.png\" width=\"750\" alt=\"IBM Product \" />\n</a> ",
"_____no_output_____"
],
[
"<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/cc-logo-square.png\" width=\"200\" alt=\"cognitiveclass.ai logo\" />",
"_____no_output_____"
],
[
"<h1>Convolutional Neural Network with Small Images</h1> ",
"_____no_output_____"
],
[
"<h2>Table of Contents</h2>\n<p>In this lab, we will use a Convolutional Neural Network to classify handwritten digits from the MNIST database. We will reshape the images to make them faster to process </p>\n\n<ul>\n<li><a href=\"#Makeup_Data\">Get Some Data</a></li>\n<li><a href=\"#CNN\">Convolutional Neural Network</a></li>\n<li><a href=\"#Train\">Define Softmax, Criterion function, Optimizer and Train the Model</a></li>\n<li><a href=\"#Result\">Analyze Results</a></li>\n</ul>\n<p>Estimated Time Needed: <strong>25 min</strong> 14 min to train model </p>\n\n<hr>",
"_____no_output_____"
],
[
"<h2>Preparation</h2>",
"_____no_output_____"
]
],
[
[
"\n\n# Import the libraries we need to use in this lab\n\n# Using the following line code to install the torchvision library\n# !conda install -y torchvision\n\nimport torch \nimport torch.nn as nn\nimport torchvision.transforms as transforms\nimport torchvision.datasets as dsets\nimport matplotlib.pylab as plt\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"Define the function <code>plot_channels</code> to plot out the kernel parameters of each channel ",
"_____no_output_____"
]
],
[
[
"# Define the function for plotting the channels\n\ndef plot_channels(W):\n n_out = W.shape[0]\n n_in = W.shape[1]\n w_min = W.min().item()\n w_max = W.max().item()\n fig, axes = plt.subplots(n_out, n_in)\n fig.subplots_adjust(hspace=0.1)\n out_index = 0\n in_index = 0\n \n #plot outputs as rows inputs as columns \n for ax in axes.flat:\n if in_index > n_in-1:\n out_index = out_index + 1\n in_index = 0\n ax.imshow(W[out_index, in_index, :, :], vmin=w_min, vmax=w_max, cmap='seismic')\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n in_index = in_index + 1\n\n plt.show()",
"_____no_output_____"
]
],
[
[
"Define the function <code>plot_parameters</code> to plot out the kernel parameters of each channel with Multiple outputs . ",
"_____no_output_____"
]
],
[
[
"# Define the function for plotting the parameters\n\ndef plot_parameters(W, number_rows=1, name=\"\", i=0):\n W = W.data[:, i, :, :]\n n_filters = W.shape[0]\n w_min = W.min().item()\n w_max = W.max().item()\n fig, axes = plt.subplots(number_rows, n_filters // number_rows)\n fig.subplots_adjust(hspace=0.4)\n\n for i, ax in enumerate(axes.flat):\n if i < n_filters:\n # Set the label for the sub-plot.\n ax.set_xlabel(\"kernel:{0}\".format(i + 1))\n\n # Plot the image.\n ax.imshow(W[i, :], vmin=w_min, vmax=w_max, cmap='seismic')\n ax.set_xticks([])\n ax.set_yticks([])\n plt.suptitle(name, fontsize=10) \n plt.show()",
"_____no_output_____"
]
],
[
[
"Define the function <code>plot_activation</code> to plot out the activations of the Convolutional layers ",
"_____no_output_____"
]
],
[
[
"# Define the function for plotting the activations\n\ndef plot_activations(A, number_rows=1, name=\"\", i=0):\n A = A[0, :, :, :].detach().numpy()\n n_activations = A.shape[0]\n A_min = A.min().item()\n A_max = A.max().item()\n fig, axes = plt.subplots(number_rows, n_activations // number_rows)\n fig.subplots_adjust(hspace = 0.4)\n\n for i, ax in enumerate(axes.flat):\n if i < n_activations:\n # Set the label for the sub-plot.\n ax.set_xlabel(\"activation:{0}\".format(i + 1))\n\n # Plot the image.\n ax.imshow(A[i, :], vmin=A_min, vmax=A_max, cmap='seismic')\n ax.set_xticks([])\n ax.set_yticks([])\n plt.show()",
"_____no_output_____"
]
],
[
[
"Define the function <code>show_data</code> to plot out data samples as images.",
"_____no_output_____"
]
],
[
[
"def show_data(data_sample):\n plt.imshow(data_sample[0].numpy().reshape(IMAGE_SIZE, IMAGE_SIZE), cmap='gray')\n # plt.title('y = '+ str(data_sample[1].item()))\n plt.title('y = '+ str(data_sample[1]))",
"_____no_output_____"
]
],
[
[
"<!--Empty Space for separating topics-->",
"_____no_output_____"
],
[
"<h2 id=\"Makeup_Data\">Get the Data</h2> ",
"_____no_output_____"
],
[
"we create a transform to resize the image and convert it to a tensor .",
"_____no_output_____"
]
],
[
[
"\n\nIMAGE_SIZE = 16\n\n\ncomposed = transforms.Compose([transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)), transforms.ToTensor()])",
"_____no_output_____"
]
],
[
[
"Load the training dataset by setting the parameters <code>train </code> to <code>True</code>. We use the transform defined above.",
"_____no_output_____"
]
],
[
[
"\ntrain_dataset = dsets.MNIST(root='../data', train=True, download=True, transform=composed)",
"_____no_output_____"
]
],
[
[
"Load the testing dataset by setting the parameters train <code>False</code>.",
"_____no_output_____"
]
],
[
[
"# Make the validating \n\nvalidation_dataset = dsets.MNIST(root='../data', train=False, download=True, transform=composed)",
"_____no_output_____"
]
],
[
[
"We can see the data type is long.",
"_____no_output_____"
]
],
[
[
"# Show the data type for each element in dataset\n\nprint(train_dataset[1][0].type())\ntrain_dataset[1][1]",
"torch.FloatTensor\n"
]
],
[
[
"Each element in the rectangular tensor corresponds to a number representing a pixel intensity as demonstrated by the following image.",
"_____no_output_____"
],
[
"<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/chapter%206/6.2.1imagenet.png\" width=\"550\" alt=\"MNIST data image\">",
"_____no_output_____"
],
[
"Print out the fourth label ",
"_____no_output_____"
]
],
[
[
"# The label for the fourth data element\n\ntrain_dataset[3][1]",
"_____no_output_____"
]
],
[
[
"Plot the fourth sample ",
"_____no_output_____"
]
],
[
[
"# The image for the fourth data element\nshow_data(train_dataset[3])\n",
"_____no_output_____"
]
],
[
[
"The fourth sample is a \"1\".",
"_____no_output_____"
],
[
"<!--Empty Space for separating topics-->",
"_____no_output_____"
],
[
"<h2 id=\"CNN\">Build a Convolutional Neural Network Class</h2>",
"_____no_output_____"
],
[
"Build a Convolutional Network class with two Convolutional layers and one fully connected layer. Pre-determine the size of the final output matrix. The parameters in the constructor are the number of output channels for the first and second layer.",
"_____no_output_____"
]
],
[
[
"class CNN(nn.Module):\n \n # Contructor\n def __init__(self, out_1=16, out_2=32):\n super(CNN, self).__init__()\n self.cnn1 = nn.Conv2d(in_channels=1, out_channels=out_1, kernel_size=5, padding=2)\n self.maxpool1=nn.MaxPool2d(kernel_size=2)\n\n self.cnn2 = nn.Conv2d(in_channels=out_1, out_channels=out_2, kernel_size=5, stride=1, padding=2)\n self.maxpool2=nn.MaxPool2d(kernel_size=2)\n self.fc1 = nn.Linear(out_2 * 4 * 4, 10)\n \n # Prediction\n def forward(self, x):\n x = self.cnn1(x)\n x = torch.relu(x)\n x = self.maxpool1(x)\n x = self.cnn2(x)\n x = torch.relu(x)\n x = self.maxpool2(x)\n x = x.view(x.size(0), -1)\n x = self.fc1(x)\n return x\n \n # Outputs in each steps\n def activations(self, x):\n #outputs activation this is not necessary\n z1 = self.cnn1(x)\n a1 = torch.relu(z1)\n out = self.maxpool1(a1)\n \n z2 = self.cnn2(out)\n a2 = torch.relu(z2)\n out1 = self.maxpool2(a2)\n out = out.view(out.size(0),-1)\n return z1, a1, z2, a2, out1,out",
"_____no_output_____"
]
],
[
[
"<h2 id=\"Train\">Define the Convolutional Neural Network Classifier, Criterion function, Optimizer and Train the Model</h2> ",
"_____no_output_____"
],
[
"There are 16 output channels for the first layer, and 32 output channels for the second layer ",
"_____no_output_____"
]
],
[
[
"# Create the model object using CNN class\n\nmodel = CNN(out_1=16, out_2=32)",
"_____no_output_____"
]
],
[
[
"Plot the model parameters for the kernels before training the kernels. The kernels are initialized randomly.",
"_____no_output_____"
]
],
[
[
"# Plot the parameters\n\nplot_parameters(model.state_dict()['cnn1.weight'], number_rows=4, name=\"1st layer kernels before training \")\nplot_parameters(model.state_dict()['cnn2.weight'], number_rows=4, name='2nd layer kernels before training' )",
"_____no_output_____"
]
],
[
[
"Define the loss function, the optimizer and the dataset loader ",
"_____no_output_____"
]
],
[
[
"criterion = nn.CrossEntropyLoss()\nlearning_rate = 0.1\noptimizer = torch.optim.SGD(model.parameters(), lr = learning_rate)\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=100)\nvalidation_loader = torch.utils.data.DataLoader(dataset=validation_dataset, batch_size=5000)",
"_____no_output_____"
]
],
[
[
"Train the model and determine validation accuracy technically test accuracy **(This may take a long time)**",
"_____no_output_____"
]
],
[
[
"# Train the model\n\nn_epochs=3\ncost_list=[]\naccuracy_list=[]\nN_test=len(validation_dataset)\nCOST=0\n\ndef train_model(n_epochs):\n for epoch in range(n_epochs):\n COST=0\n for x, y in train_loader:\n optimizer.zero_grad()\n z = model(x)\n loss = criterion(z, y)\n loss.backward()\n optimizer.step()\n COST+=loss.data\n \n cost_list.append(COST)\n correct=0\n #perform a prediction on the validation data \n for x_test, y_test in validation_loader:\n z = model(x_test)\n _, yhat = torch.max(z.data, 1)\n correct += (yhat == y_test).sum().item()\n accuracy = correct / N_test\n accuracy_list.append(accuracy)\n \n print('epoch:'+str(epoch)+'/'+str(n_epochs)+' cost: '+str(COST)+' acc: '+str(accuracy))\n \ntrain_model(n_epochs)",
"epoch:0/3 cost: tensor(60.0698) acc: 0.9726\nepoch:1/3 cost: tensor(47.4953) acc: 0.9776\nepoch:2/3 cost: tensor(40.1689) acc: 0.9793\n"
]
],
[
[
"<!--Empty Space for separating topics-->",
"_____no_output_____"
],
[
"<h2 id=\"Result\">Analyze Results</h2> ",
"_____no_output_____"
],
[
"Plot the loss and accuracy on the validation data:",
"_____no_output_____"
]
],
[
[
"# Plot the loss and accuracy\n\nfig, ax1 = plt.subplots()\ncolor = 'tab:red'\nax1.plot(cost_list, color=color)\nax1.set_xlabel('epoch', color=color)\nax1.set_ylabel('Cost', color=color)\nax1.tick_params(axis='y', color=color)\n \nax2 = ax1.twinx() \ncolor = 'tab:blue'\nax2.set_ylabel('accuracy', color=color) \nax2.set_xlabel('epoch', color=color)\nax2.plot( accuracy_list, color=color)\nax2.tick_params(axis='y', color=color)\nfig.tight_layout()",
"_____no_output_____"
]
],
[
[
"View the results of the parameters for the Convolutional layers ",
"_____no_output_____"
]
],
[
[
"# Plot the channels\n\nplot_channels(model.state_dict()['cnn1.weight'])\nplot_channels(model.state_dict()['cnn2.weight'])",
"_____no_output_____"
],
[
"train_dataset[1]",
"_____no_output_____"
]
],
[
[
"Consider the following sample ",
"_____no_output_____"
]
],
[
[
"# Show the second image\n\nshow_data(train_dataset[1])",
"_____no_output_____"
]
],
[
[
"Determine the activations ",
"_____no_output_____"
]
],
[
[
"# Use the CNN activations class to see the steps\n\nout = model.activations(train_dataset[1][0].view(1, 1, IMAGE_SIZE, IMAGE_SIZE))",
"_____no_output_____"
]
],
[
[
"Plot out the first set of activations ",
"_____no_output_____"
]
],
[
[
"# Plot the outputs after the first CNN\n\nplot_activations(out[0], number_rows=4, name=\"Output after the 1st CNN\")",
"_____no_output_____"
]
],
[
[
"The image below is the result after applying the relu activation function ",
"_____no_output_____"
]
],
[
[
"# Plot the outputs after the first Relu\n\nplot_activations(out[1], number_rows=4, name=\"Output after the 1st Relu\")",
"_____no_output_____"
]
],
[
[
"The image below is the result of the activation map after the second output layer.",
"_____no_output_____"
]
],
[
[
"# Plot the outputs after the second CNN\n\nplot_activations(out[2], number_rows=32 // 4, name=\"Output after the 2nd CNN\")",
"_____no_output_____"
]
],
[
[
"The image below is the result of the activation map after applying the second relu ",
"_____no_output_____"
]
],
[
[
"# Plot the outputs after the second Relu\n\nplot_activations(out[3], number_rows=4, name=\"Output after the 2nd Relu\")",
"_____no_output_____"
]
],
[
[
"We can see the result for the third sample ",
"_____no_output_____"
]
],
[
[
"# Show the third image\n\nshow_data(train_dataset[2])",
"_____no_output_____"
],
[
"# Use the CNN activations class to see the steps\n\nout = model.activations(train_dataset[2][0].view(1, 1, IMAGE_SIZE, IMAGE_SIZE))",
"_____no_output_____"
],
[
"# Plot the outputs after the first CNN\n\nplot_activations(out[0], number_rows=4, name=\"Output after the 1st CNN\")",
"_____no_output_____"
],
[
"# Plot the outputs after the first Relu\n\nplot_activations(out[1], number_rows=4, name=\"Output after the 1st Relu\")",
"_____no_output_____"
],
[
"# Plot the outputs after the second CNN\n\nplot_activations(out[2], number_rows=32 // 4, name=\"Output after the 2nd CNN\")",
"_____no_output_____"
],
[
"# Plot the outputs after the second Relu\n\nplot_activations(out[3], number_rows=4, name=\"Output after the 2nd Relu\")",
"_____no_output_____"
]
],
[
[
"Plot the first five mis-classified samples:",
"_____no_output_____"
]
],
[
[
"# Plot the mis-classified samples\n\ncount = 0\nfor x, y in torch.utils.data.DataLoader(dataset=validation_dataset, batch_size=1):\n z = model(x)\n _, yhat = torch.max(z, 1)\n if yhat != y:\n show_data((x, y))\n plt.show()\n print(\"yhat: \",yhat)\n count += 1\n if count >= 5:\n break ",
"_____no_output_____"
]
],
[
[
"<!--Empty Space for separating topics-->",
"_____no_output_____"
],
[
"<a href=\"http://cocl.us/pytorch_link_bottom\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/notebook_bottom%20.png\" width=\"750\" alt=\"PyTorch Bottom\" />\n</a>",
"_____no_output_____"
],
[
"<h2>About the Authors:</h2> \n\n<a href=\"https://www.linkedin.com/in/joseph-s-50398b136/\">Joseph Santarcangelo</a> has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.",
"_____no_output_____"
],
[
"Other contributors: <a href=\"https://www.linkedin.com/in/michelleccarey/\">Michelle Carey</a>, <a href=\"www.linkedin.com/in/jiahui-mavis-zhou-a4537814a\">Mavis Zhou</a>",
"_____no_output_____"
],
[
"Thanks to Magnus <a href=\"http://www.hvass-labs.org/\">Erik Hvass Pedersen</a> whose tutorials helped me understand convolutional Neural Network",
"_____no_output_____"
],
[
"<hr>",
"_____no_output_____"
],
[
"Copyright © 2018 <a href=\"cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu\">cognitiveclass.ai</a>. This notebook and its source code are released under the terms of the <a href=\"https://bigdatauniversity.com/mit-license/\">MIT License</a>.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a1087c024cf06e9bace6372c4ca25921dbbe064
| 117,428 |
ipynb
|
Jupyter Notebook
|
IBM Coursera Advanced Data Science Capstone .ipynb
|
sptennak/IBM-Coursera-Advanced-Data-Science-Capstone
|
88b5efd08eb6bf1a55ffae85402c190722c83723
|
[
"Apache-2.0"
] | 2 |
2020-10-25T16:34:04.000Z
|
2021-08-01T16:31:14.000Z
|
IBM Coursera Advanced Data Science Capstone .ipynb
|
1Shaharear/IBM-Coursera-Advanced-Data-Science-Capstone
|
88b5efd08eb6bf1a55ffae85402c190722c83723
|
[
"Apache-2.0"
] | null | null | null |
IBM Coursera Advanced Data Science Capstone .ipynb
|
1Shaharear/IBM-Coursera-Advanced-Data-Science-Capstone
|
88b5efd08eb6bf1a55ffae85402c190722c83723
|
[
"Apache-2.0"
] | 4 |
2020-10-20T14:30:44.000Z
|
2021-03-09T11:34:38.000Z
| 118.974671 | 36,344 | 0.828712 |
[
[
[
"# Classifying Business Documents using Deep Learning\n## IBM Coursera Advanced Data Science Capstone - Results Demo\n\n## Sumudu Tennakoon",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport sys\nimport os\nimport re\nimport matplotlib.pyplot as plt \nfrom datetime import date\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\nimport tensorflow.keras as keras\nprint('TensorFlow Version: ', tf.__version__)\n\nfrom DocumentClassifierV1 import * # Custom library created for the Capstone project.",
"TensorFlow Version: 1.12.0\n"
]
],
[
[
"## 1. Read Pre-saved Input dataset (Test Sample-not used in modeling)",
"_____no_output_____"
]
],
[
[
"DocumentFilesData = pd.read_pickle('Data/DocumentClassification_IBM_ADV_DS_Capstone_TestSample_128x128_20190316.pkl')",
"_____no_output_____"
]
],
[
[
"## 2. Organize Classs Labels",
"_____no_output_____"
]
],
[
[
"ClassLabels = list(DocumentFilesData.FileClass.unique()) ",
"_____no_output_____"
],
[
"ClassNumbers = list(range(len(ClassLabels)))",
"_____no_output_____"
],
[
"ClassLabelMap = list((zip(ClassLabels, ClassNumbers)))\nprint(ClassLabelMap)",
"[('advertisement', 0), ('handwritten', 1), ('invoice', 2), ('letter', 3), ('resume', 4), ('scientific publication', 5)]\n"
],
[
"for clm in ClassLabelMap:\n DocumentFilesData.loc[DocumentFilesData['FileClass']==clm[0] , 'ClassNumber'] = clm[1]",
"_____no_output_____"
]
],
[
[
"## 3. Separate Features and Response",
"_____no_output_____"
]
],
[
[
"NClasses = len(ClassLabels)\nimgRows = 128\nimgCols = 128 ",
"_____no_output_____"
],
[
"X = np.asarray(list(DocumentFilesData['DocumentMatrix'].values), dtype ='int')\ny = DocumentFilesData['ClassNumber'].values",
"_____no_output_____"
],
[
"#Shape of datasets\nprint(X.shape)\nprint(y.shape)",
"(16298, 128, 128)\n(16298,)\n"
]
],
[
[
"## 4. Plot sample image",
"_____no_output_____"
]
],
[
[
"#Plot sample image with scale\nplt.imshow(X[10000])\nplt.colorbar()",
"_____no_output_____"
]
],
[
[
"## 5. Send data into the Model",
"_____no_output_____"
]
],
[
[
"if keras.backend.image_data_format() == 'channels_first':\n X = X.reshape(X.shape[0], 1, imgRows, imgCols)\n input_shape = (1, imgRows, imgCols)\nelse:\n X = X.reshape(X.shape[0], imgRows, imgCols, 1)\n input_shape = (imgRows, imgCols, 1)",
"_____no_output_____"
],
[
"X = X.astype('float32') #convert interger image tensor to float\nX = X/255 # Normalize grayscale to a number between 0 and 1\nprint(X.shape[0], 'samples')\n# Record actuals\ny_act = y",
"16298 samples\n"
],
[
"y = keras.utils.to_categorical(y, NClasses)",
"_____no_output_____"
],
[
"ClassificationModel = TFModel(ModelFile='Models/DocumentClassification_IBM_ADV_DS_Capstone_CNN_V03_128x128_20190316.pkl', Model=keras.models.load_model('Models/DocumentClassification_IBM_ADV_DS_Capstone_CNN_V03_128x128_20190316.h5'))",
"_____no_output_____"
],
[
"Output = ClassificationModel.Classify(InputFiles=X, size=(imgRows,imgCols), ActualClasses=list(y_act),\n ReturnImageMatrix=True, ReturnJSON=False, ReturnFullPath=True, TransformedData=True)",
"_____no_output_____"
]
],
[
[
"## 6. Proces output",
"_____no_output_____"
]
],
[
[
"Output['actual'] = Output['actual'].astype('int')",
"_____no_output_____"
],
[
"for clm in ClassLabelMap:\n Output.loc[Output['actual']==clm[1] , 'actual'] = clm[0]",
"_____no_output_____"
],
[
"Output.head()",
"_____no_output_____"
]
],
[
[
"## 7. Performance Evaluation",
"_____no_output_____"
],
[
"### Confusion Matrix",
"_____no_output_____"
]
],
[
[
"cf = pd.crosstab(Output.actual, Output.prediction, margins=True)",
"_____no_output_____"
],
[
"cf",
"_____no_output_____"
],
[
"import seaborn as sns\nsns.heatmap(pd.crosstab(Output.actual, Output.prediction, margins=False), annot=True)",
"_____no_output_____"
]
],
[
[
"### Accuracy",
"_____no_output_____"
]
],
[
[
"CorrectPredictions = np.sum(np.diagonal(pd.crosstab(Output.actual, Output.prediction, margins=False).values))\nTotalDocuments = np.sum(pd.crosstab(Output.actual, Output.prediction, margins=False).values)\nAccuracy = CorrectPredictions/TotalDocuments\nprint('CorrectPredictions= {}'.format(CorrectPredictions))\nprint('TotalDocuments= {}'.format(TotalDocuments))\nprint('Accuracy= {}'.format(Accuracy))",
"CorrectPredictions= 14849\nTotalDocuments= 16298\nAccuracy= 0.9110933856914959\n"
]
],
[
[
"### Model Robustness",
"_____no_output_____"
]
],
[
[
"bins=np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])\nlabels=np.array([ 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])\nOutput['MaxProbabilityScore']=pd.cut(Output.probability, bins=bins) #, labels=labels)\nOutput['PredictedCorrect'] = np.where(Output['actual']==Output['prediction'], 1, 0)\n\nRobustness = Output.groupby(by='MaxProbabilityScore').agg({'probability':'mean', 'PredictedCorrect':'sum', 'filename':'count'})\nRobustness.columns = ['MeanProbability', 'PredictedCorrect', 'BucketCount']\nRobustness['BucketPrecision']=Robustness['PredictedCorrect']/Robustness['BucketCount']\nRobustness['BucketFraction']=Robustness['BucketCount']/(Robustness['BucketCount'].sum())",
"_____no_output_____"
],
[
"Robustness",
"_____no_output_____"
]
],
[
[
"## 8. Run the model on sample Image file\n",
"_____no_output_____"
]
],
[
[
"InputFiles = ['Data/test1.png']\nOutput_single = ClassificationModel.Classify(InputFiles=InputFiles, size=(imgRows,imgCols), ActualClasses=None,\n ReturnImageMatrix=True, ReturnJSON=True, ReturnFullPath=False, TransformedData=False)",
"_____no_output_____"
],
[
"Output_single",
"_____no_output_____"
],
[
"OutputDashboard = Dashboard()\nfig = OutputDashboard.ImageOutput(Output_single, NSamples=1, Format='JSON', ClassLabels=ClassificationModel.ClassLabels)\nplt.show()",
"_____no_output_____"
]
],
[
[
"<hr>\n<p> This notebook and related materials were developed by <b> Sumudu Tennakoon</b> for the capstone project in partial fulfillment of the requirements for the <b> Advanced Data Science with IBM Specialization</b>. <br>\nMarch 2019. <br>\nApache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)</p>\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a108a28c4ecdf094c9938782b5c1c2231b5fcc7
| 9,473 |
ipynb
|
Jupyter Notebook
|
notebooks/community/Louvain.ipynb
|
shwina/cugraph
|
8283eda0bd90f176afcb562ee83133180b694a64
|
[
"Apache-2.0"
] | null | null | null |
notebooks/community/Louvain.ipynb
|
shwina/cugraph
|
8283eda0bd90f176afcb562ee83133180b694a64
|
[
"Apache-2.0"
] | null | null | null |
notebooks/community/Louvain.ipynb
|
shwina/cugraph
|
8283eda0bd90f176afcb562ee83133180b694a64
|
[
"Apache-2.0"
] | null | null | null | 28.026627 | 476 | 0.558851 |
[
[
[
"# Louvain Community Detection\n\n\nIn this notebook, we will use cuGraph to identify the cluster in a test graph using the Louvain algorithm \n\nNotebook Credits\n* Original Authors: Bradley Rees and James Wyles\n* Created: 08/01/2019\n* Last Edit: 03/03/2020\n\nRAPIDS Versions: 0.13\n\nTest Hardware\n* GV100 32G, CUDA 10.2\n\n\n\n## Introduction\n\nThe Louvain method of community detection is a greedy heirarical clsutering algorithm which seeks to optimize Modularity as it progresses. Louvain starts with each vertex in its own clusters and iteratively merges groups using graph contraction. \n\nFor a detailed description of the algorithm see: https://en.wikipedia.org/wiki/Louvain_Modularity\n\nIt takes as input a cugraph.Graph object and returns as output a \ncudf.Dataframe object with the id and assigned partition for each \nvertex as well as the final modularity score\n\nTo compute the Louvain cluster in cuGraph use: <br>\n __df, mod = cugraph.louvain(G)__\n \n \n \n Parameters\n ----------\n input_graph : cugraph.Graph\n cuGraph graph descriptor, should contain the connectivity information\n as an edge list. The adjacency list will be computed if not already present.\n The graph should be undirected where an undirected edge is represented by a\n directed edge in both direction.\n\n max_iter : integer\n This controls the maximum number of levels/iterations of the Louvain\n algorithm. When specified the algorithm will terminate after no more\n than the specified number of iterations. No error occurs when the\n algorithm terminates early in this manner.\n\n Returns\n -------\n parts : cudf.DataFrame\n A GPU data frame of size V containing two columns the vertex id and the\n partition id it is assigned to.\n modularity_score : float\n a floating point number containing the modularity score of the\n partitioning.\n \n All vertices with the same partition ID are in the same cluster\n \n\n\n\n#### Note\nParallel Louvain produces different modularity scores that seriel Louvain. A complete technical write-up is being produced and will be linked here when available. \n\n### References\n* Blondel, V. D., Guillaume, J.-L., Lambiotte, R., and Lefebvre, E. Fast unfolding of communities in large networks. Journal of statistical mechanics: theory and experiment 2008, 10 (2008), P10008.\n",
"_____no_output_____"
],
[
"## cuGraph Notice \nThe current version of cuGraph has some limitations:\n\n* Vertex IDs need to be 32-bit integers.\n* Vertex IDs are expected to be contiguous integers starting from 0.\n\ncuGraph provides the renumber function to mitigate this problem. Input vertex IDs for the renumber function can be either 32-bit or 64-bit integers, can be non-contiguous, and can start from an arbitrary number. The renumber function maps the provided input vertex IDs to 32-bit contiguous integers starting from 0. cuGraph still requires the renumbered vertex IDs to be representable in 32-bit integers. These limitations are being addressed and will be fixed soon. ",
"_____no_output_____"
],
[
"### Test Data\nWe will be using the Zachary Karate club dataset \n*W. W. Zachary, An information flow model for conflict and fission in small groups, Journal of\nAnthropological Research 33, 452-473 (1977).*\n\n\n\n",
"_____no_output_____"
],
[
"### Prep",
"_____no_output_____"
]
],
[
[
"# Import needed libraries\nimport cugraph\nimport cudf",
"_____no_output_____"
]
],
[
[
"## Read data using cuDF",
"_____no_output_____"
]
],
[
[
"# Test file \ndatafile='../data//karate-data.csv'",
"_____no_output_____"
],
[
"# read the data using cuDF\ngdf = cudf.read_csv(datafile, delimiter='\\t', names=['src', 'dst'], dtype=['int32', 'int32'] )",
"_____no_output_____"
],
[
"# The algorithm also requires that there are vertex weights. Just use 1.0 \ngdf[\"data\"] = 1.0",
"_____no_output_____"
],
[
"# just for fun, let's look at the data types in the dataframe\ngdf.dtypes",
"_____no_output_____"
],
[
"# create a Graph - since the data does not start at '0', use the auto-renumbering feature\nG = cugraph.Graph()\nG.from_cudf_edgelist(gdf, source='src', destination='dst', edge_attr='data', renumber=True)",
"_____no_output_____"
],
[
"# Call Louvain on the graph\ndf, mod = cugraph.louvain(G) ",
"_____no_output_____"
],
[
"# Print the modularity score\nprint('Modularity was {}'.format(mod))\nprint()",
"Modularity was 0.4027777777777778\n\n"
],
[
"df.dtypes",
"_____no_output_____"
],
[
"# How many partitions where found\npart_ids = df[\"partition\"].unique()",
"_____no_output_____"
],
[
"print(str(len(part_ids)) + \" partition detected\")",
"4 partition detected\n"
],
[
"# print the clusters. \nfor p in range(len(part_ids)):\n part = []\n for i in range(len(df)):\n if (df['partition'][i] == p):\n part.append(df['vertex'][i] )\n print(\"Partition \" + str(p) + \":\")\n print(part)\n",
"Partition 0:\n[1, 2, 3, 4, 8, 10, 12, 13, 14, 18, 20, 22]\nPartition 1:\n[5, 6, 7, 11, 17]\nPartition 2:\n[9, 15, 16, 19, 21, 23, 27, 29, 30, 31, 32, 33, 34]\nPartition 3:\n[24, 25, 26, 28]\n"
]
],
[
[
"___\nCopyright (c) 2019, NVIDIA CORPORATION.\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n___",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a10a1a44dcfac01254ec98003c5edba25cb502a
| 17,627 |
ipynb
|
Jupyter Notebook
|
nbs/011_callback.noisy_student.ipynb
|
sjdlloyd/tsai
|
98d9c02b8429708819d373b475deb9e99f0ab7df
|
[
"Apache-2.0"
] | null | null | null |
nbs/011_callback.noisy_student.ipynb
|
sjdlloyd/tsai
|
98d9c02b8429708819d373b475deb9e99f0ab7df
|
[
"Apache-2.0"
] | null | null | null |
nbs/011_callback.noisy_student.ipynb
|
sjdlloyd/tsai
|
98d9c02b8429708819d373b475deb9e99f0ab7df
|
[
"Apache-2.0"
] | null | null | null | 43.309582 | 2,820 | 0.607364 |
[
[
[
"# default_exp callback.noisy_student",
"_____no_output_____"
]
],
[
[
"# Noisy student\n\n> Callback to apply noisy student self-training (a semi-supervised learning approach) based on: Xie, Q., Luong, M. T., Hovy, E., & Le, Q. V. (2020). Self-training with noisy student improves imagenet classification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 10687-10698).",
"_____no_output_____"
]
],
[
[
"#export \nfrom tsai.imports import *\nfrom tsai.utils import *\nfrom tsai.data.preprocessing import *\nfrom tsai.data.transforms import *\nfrom tsai.models.layers import *\nfrom fastai.callback.all import *",
"_____no_output_____"
],
[
"#export\nimport torch.multiprocessing\ntorch.multiprocessing.set_sharing_strategy('file_system')",
"_____no_output_____"
],
[
"#export\n\n# This is an unofficial implementation of noisy student based on:\n# Xie, Q., Luong, M. T., Hovy, E., & Le, Q. V. (2020). Self-training with noisy student improves imagenet classification. \n# In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 10687-10698).\n# Official tensorflow implementation available in https://github.com/google-research/noisystudent\n\n\nclass NoisyStudent(Callback):\n \"\"\"A callback to implement the Noisy Student approach. In the original paper this was used in combination with noise: \n - stochastic depth: .8\n - RandAugment: N=2, M=27\n - dropout: .5\n \n Steps:\n 1. Build the dl you will use as a teacher\n 2. Create dl2 with the pseudolabels (either soft or hard preds)\n 3. Pass any required batch_tfms to the callback\n \n \"\"\"\n \n def __init__(self, dl2:DataLoader, bs:Optional[int]=None, l2pl_ratio:int=1, batch_tfms:Optional[list]=None, do_setup:bool=True, \n pseudolabel_sample_weight:float=1., verbose=False): \n r'''\n Args:\n dl2: dataloader with the pseudolabels\n bs: batch size of the new, combined dataloader. If None, it will pick the bs from the labeled dataloader.\n l2pl_ratio: ratio between labels and pseudolabels in the combined batch\n batch_tfms: transforms applied to the combined batch. If None, it will pick the batch_tfms from the labeled dataloader (if any)\n do_setup: perform a transform setup on the labeled dataset.\n pseudolabel_sample_weight: weight of each pseudolabel sample relative to the labeled one of the loss.\n '''\n \n self.dl2, self.bs, self.l2pl_ratio, self.batch_tfms, self.do_setup, self.verbose = dl2, bs, l2pl_ratio, batch_tfms, do_setup, verbose\n self.pl_sw = pseudolabel_sample_weight\n \n def before_fit(self):\n if self.batch_tfms is None: self.batch_tfms = self.dls.train.after_batch\n self.old_bt = self.dls.train.after_batch # Remove and store dl.train.batch_tfms\n self.old_bs = self.dls.train.bs\n self.dls.train.after_batch = noop \n\n if self.do_setup and self.batch_tfms:\n for bt in self.batch_tfms: \n bt.setup(self.dls.train)\n\n if self.bs is None: self.bs = self.dls.train.bs\n self.dl2.bs = min(len(self.dl2.dataset), int(self.bs / (1 + self.l2pl_ratio)))\n self.dls.train.bs = self.bs - self.dl2.bs\n pv(f'labels / pseudolabels per training batch : {self.dls.train.bs} / {self.dl2.bs}', self.verbose)\n rel_weight = (self.dls.train.bs/self.dl2.bs) * (len(self.dl2.dataset)/len(self.dls.train.dataset))\n pv(f'relative labeled/ pseudolabel sample weight in dataset: {rel_weight:.1f}', self.verbose)\n self.dl2iter = iter(self.dl2)\n \n self.old_loss_func = self.learn.loss_func\n self.learn.loss_func = self.loss\n \n def before_batch(self):\n if self.training:\n X, y = self.x, self.y\n try: X2, y2 = next(self.dl2iter)\n except StopIteration:\n self.dl2iter = iter(self.dl2)\n X2, y2 = next(self.dl2iter)\n if y.ndim == 1 and y2.ndim == 2: y = torch.eye(self.learn.dls.c)[y].to(device) # ensure both \n \n X_comb, y_comb = concat(X, X2), concat(y, y2)\n \n if self.batch_tfms is not None: \n X_comb = compose_tfms(X_comb, self.batch_tfms, split_idx=0)\n y_comb = compose_tfms(y_comb, self.batch_tfms, split_idx=0)\n self.learn.xb = (X_comb,)\n self.learn.yb = (y_comb,)\n pv(f'\\nX: {X.shape} X2: {X2.shape} X_comb: {X_comb.shape}', self.verbose)\n pv(f'y: {y.shape} y2: {y2.shape} y_comb: {y_comb.shape}', self.verbose)\n \n def loss(self, output, target): \n if target.ndim == 2: _, target = target.max(dim=1)\n if self.training and self.pl_sw != 1: \n loss = (1 - self.pl_sw) * self.old_loss_func(output[:self.dls.train.bs], target[:self.dls.train.bs])\n loss += self.pl_sw * self.old_loss_func(output[self.dls.train.bs:], target[self.dls.train.bs:])\n return loss \n else: \n return self.old_loss_func(output, target)\n \n def after_fit(self):\n self.dls.train.after_batch = self.old_bt\n self.learn.loss_func = self.old_loss_func\n self.dls.train.bs = self.old_bs\n self.dls.bs = self.old_bs",
"_____no_output_____"
],
[
"from tsai.data.all import *\nfrom tsai.models.all import *\ndsid = 'NATOPS'\nX, y, splits = get_UCR_data(dsid, return_split=False)\ntfms = [None, Categorize()]\ndsets = TSDatasets(X, y, tfms=tfms, splits=splits)\ndls = TSDataLoaders.from_dsets(dsets.train, dsets.valid, batch_tfms=[TSStandardize(), TSRandomSize(.5)])",
"_____no_output_____"
],
[
"pseudolabeled_data = X\nsoft_preds = True\n\npseudolabels = ToNumpyCategory()(y) if soft_preds else OneHot()(y)\ndsets2 = TSDatasets(pseudolabeled_data, pseudolabels)\ndl2 = TSDataLoader(dsets2)\nmodel = create_model(InceptionTime, dls=dls)\nnoisy_student_cb = NoisyStudent(dl2, bs=256, l2pl_ratio=2, verbose=True)\nlearn = Learner(dls, model, cbs=noisy_student_cb, metrics=accuracy)\nlearn.fit_one_cycle(1)",
"labels / pseudolabels per training batch : 171 / 85\nrelative labeled/ pseudolabel sample weight in dataset: 4.0\n"
],
[
"pseudolabeled_data = X\nsoft_preds = False\n\npseudolabels = ToNumpyCategory()(y) if soft_preds else OneHot()(y)\ndsets2 = TSDatasets(pseudolabeled_data, pseudolabels)\ndl2 = TSDataLoader(dsets2)\nmodel = create_model(InceptionTime, dls=dls)\nnoisy_student_cb = NoisyStudent(dl2, bs=256, l2pl_ratio=2, verbose=True)\nlearn = Learner(dls, model, cbs=noisy_student_cb, metrics=accuracy)\nlearn.fit_one_cycle(1)",
"labels / pseudolabels per training batch : 171 / 85\nrelative labeled/ pseudolabel sample weight in dataset: 4.0\n"
],
[
"#hide\nout = create_scripts(); beep(out)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a10b3d93142227a35c0d7b406fb327b37d98d49
| 83,421 |
ipynb
|
Jupyter Notebook
|
todo/sp_numpy.ipynb
|
RyleighDavis/spirl
|
67c39c33e02b2a017c29a0c03415171d8f75c935
|
[
"BSD-3-Clause"
] | 7 |
2019-11-17T00:11:29.000Z
|
2022-03-29T04:31:08.000Z
|
todo/sp_numpy.ipynb
|
RyleighDavis/spirl
|
67c39c33e02b2a017c29a0c03415171d8f75c935
|
[
"BSD-3-Clause"
] | 7 |
2019-10-27T17:21:12.000Z
|
2021-09-14T19:54:11.000Z
|
todo/sp_numpy.ipynb
|
RyleighDavis/spirl
|
67c39c33e02b2a017c29a0c03415171d8f75c935
|
[
"BSD-3-Clause"
] | 16 |
2019-09-06T20:56:00.000Z
|
2021-10-17T11:15:31.000Z
| 199.57177 | 20,748 | 0.916951 |
[
[
[
"# Numpy\n\nThe basis of most scientific programming in Pyhton is the *numerical Python* library, `numpy`. NumPy gives us many tools - including a fast and efficient data type, the `numpy Array` - for working with numerical data. \n\n## Numpy Array\n\nNumPy is built around the `array`. This is a data structure defined in NumPy which is *ordered* and *mutable*, must like the `list`. Although very similar to the list, the numpy array only allows *numerical* data as elements, like the `int` and `float`. Let's explore!",
"_____no_output_____"
]
],
[
[
"# Frist we need to import the numpy package. It is commonly shortened to \"np\"\nimport numpy as np ",
"_____no_output_____"
]
],
[
[
"The easiest way to define numpy arrays is to define a list or tuple, and convert it to an array with the `numpy.array()` function.",
"_____no_output_____"
]
],
[
[
"a = [0, 1, 2, 3, 4]\nb = np.array(a)\nprint(type(a))\nprint(type(b))",
"<class 'list'>\n<class 'numpy.ndarray'>\n"
]
],
[
[
"We can index and slice numpy arrays much like lists:",
"_____no_output_____"
]
],
[
[
"print(b[0], b[1:3], b[-1])",
"0 [1 2] 4\n"
]
],
[
[
"Try running the following to get help on the NumPy array",
"_____no_output_____"
]
],
[
[
"help(np.ndarray)",
"_____no_output_____"
]
],
[
[
"Woah. That's a really long help page. Often when you are working with a new package, `help()` won't be the most convenient or easy to read way to get help. Instead, we can search for online *documentation* for the package we are using.\n\nIf you Google **numpy documentation**, you will likely see links to info about *numpy* and another package we will explore later, *scipy*. If you follow the links to **NumPy**, you should find a [NumPy user Guide](https://docs.scipy.org/doc/numpy-1.15.0/user/index.html) and from there, several pages of tutorials and documentation about the package. The [Quickstart tutorial](https://docs.scipy.org/doc/numpy-1.15.0/user/quickstart.html), will give a much more legible intro to the package.\n\n## Numpy Attributes\n\nNumPy arrays have some built in **attributes**, i.e. info stored in an object, accessible with `object.attribute` (note: no parentheses after).",
"_____no_output_____"
]
],
[
[
"# Let's print some attributes of our b array\nprint(\"Num dimensions:\", b.ndim,\n \"\\nShape:\", b.shape,\n \"\\nSize:\", b.size)",
"Num dimensions: 1 \nShape: (5,) \nSize: 5\n"
]
],
[
[
"A common way to define NumPy arrays with with the `arange` function.",
"_____no_output_____"
]
],
[
[
"np.arange(10)",
"_____no_output_____"
],
[
"help(np.arange)",
"_____no_output_____"
]
],
[
[
"The numpy `arange` function allows us to quickly build integer arrays. It takes `start`, `stop`, and `step` as arguments.",
"_____no_output_____"
]
],
[
[
"x = np.arange(1, 10)\ny = np.arange(2, 20, 2)\nprint(x)\nprint(y)",
"[1 2 3 4 5 6 7 8 9]\n[ 2 4 6 8 10 12 14 16 18]\n"
]
],
[
[
"We can apply any mathematical operation to a NumPy array, and it will apply that operation to every element in the array.",
"_____no_output_____"
]
],
[
[
"x = np.arange(-3, 4)\ny = x**2\nprint(y)",
"[9 4 1 0 1 4 9]\n"
]
],
[
[
"Another way to make NumPy arrays is with the `linspace()` function. This allows us to choose the bounds of an interval and the number of points we want to divide it into. Numpy also has useful math constants like `pi` and `e` and math functions like `sin`, `cos`, `tan`.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"x = np.linspace(-2*np.pi, 2*np.pi, 100)\ny = np.sin(x)\nplt.plot(x, y)",
"_____no_output_____"
],
[
"# Linspace can be useful for adding more resolution to continuous functions\nxarange = np.arange(-np.pi, np.pi)\nyarange = np.cos(xarange)\n\nxlinspace = np.linspace(-np.pi, np.pi, 1000)\nylinspace = np.cos(xlinspace)\n\nplt.subplot(1, 2, 1)\nplt.plot(xarange, yarange)\n\nplt.subplot(1, 2, 2)\nplt.plot(xlinspace, ylinspace)",
"_____no_output_____"
]
],
[
[
"If we want to plot a bell curve we can use the `np.random` module to randomly sample a normal distribution.",
"_____no_output_____"
]
],
[
[
"norm = np.random.standard_normal(100000) # Draw 1000 random points from normal distribution\nhist, bins = np.histogram(norm, bins=10, density=True) # Make histogram of our samples\nplt.plot(bins[1:], hist)",
"_____no_output_____"
],
[
"hist, bins = np.histogram(norm, bins=100, density=True)\nplt.plot(bins[1:], hist)",
"_____no_output_____"
]
],
[
[
"This is barely scratching the surface of the `numpy` package, but should be enough to get you started. The [Quickstart tutorial](https://docs.scipy.org/doc/numpy-1.15.0/user/quickstart.html) is a great resource for more of the basics and some more advanced usage. Finally, don't forget to use the most powerful tool at our disposal: *Google*. Most programmers only have the most common syntax memorized, everything else can be found with Google!\n\nNext we will further explore the `matplotlib` package that we briefly introduced above!",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
4a10b43b3e5c7f3858d4b77a297578b2a0ac4cd9
| 17,004 |
ipynb
|
Jupyter Notebook
|
02-python-201/labs/APIs/01_JSON.ipynb
|
MariaPCampos/NEOLAND-DS2020-datalabs
|
563e806e4cc26b31d64c4dbaa3bd9fb3c54b0608
|
[
"MIT"
] | null | null | null |
02-python-201/labs/APIs/01_JSON.ipynb
|
MariaPCampos/NEOLAND-DS2020-datalabs
|
563e806e4cc26b31d64c4dbaa3bd9fb3c54b0608
|
[
"MIT"
] | null | null | null |
02-python-201/labs/APIs/01_JSON.ipynb
|
MariaPCampos/NEOLAND-DS2020-datalabs
|
563e806e4cc26b31d64c4dbaa3bd9fb3c54b0608
|
[
"MIT"
] | null | null | null | 29.066667 | 1,335 | 0.514644 |
[
[
[
"# Adquisión de datos `DIRECTO`\n\n- [X] descarga directa\n- petición GET a través de API de terceros (ej. AEMET, Ayto. Barcelona....)\n- web crawling (que es una práctica ilegal...pero muy de moda entre los hackers!?¿!)",
"_____no_output_____"
],
[
"***\n## Primer paso\nEs trabajar con los datos en formato `JSON`",
"_____no_output_____"
]
],
[
[
"# Primero vamos a entender el funcionamiento del JSON a tráves los diccionarios (dict)\n\n# Construimos un diccionario de ejemplo y mostramos el tipo de datos y el contenido de la variable.\ndiccionario_ejemplo = {\"nombre\": \"Yann\", \"apellidos\": {\"apellido1\": \"LeCun\", \"apellido2\": \"-\"}, \"edad\": 56}\nprint(type(diccionario_ejemplo))\nprint(diccionario_ejemplo)\n\n# Construimos una lista de ejemplo y mostramos el tipo de datos y el contenido de la variable.\nlista_ejemplo = [1, 2, 3]\nprint(type(lista_ejemplo))\nprint(lista_ejemplo)",
"<class 'dict'>\n{'nombre': 'Yann', 'apellidos': {'apellido1': 'LeCun', 'apellido2': '-'}, 'edad': 56}\n<class 'list'>\n[1, 2, 3]\n"
],
[
"nested_dict = [diccionario_ejemplo]\nnested_dict\nprint(type(nested_dict))",
"<class 'list'>\n"
],
[
"nested_dict",
"_____no_output_____"
],
[
"type(nested_dict[0])",
"_____no_output_____"
],
[
"# Trataremos los json a parte\nimport json\n\n# Mostramos la representación del json del diccionario\njson_dict = json.dumps(diccionario_ejemplo)",
"_____no_output_____"
],
[
"# Mostramos su estructura\nprint(type(json_dict))\nprint(json_dict)",
"<class 'str'>\n{\"nombre\": \"Yann\", \"apellidos\": {\"apellido1\": \"LeCun\", \"apellido2\": \"-\"}, \"edad\": 56}\n"
],
[
"# Mostramos la representación del json de la lista\njson_list = json.dumps(lista_ejemplo)",
"_____no_output_____"
],
[
"print(type(json_list))\nprint(json_list)",
"<class 'str'>\n[1, 2, 3]\n"
]
],
[
[
"Este proceso a través de la función `json.dumps` del json, es **serializar** el objeto, en este caso siempre será en formato 'string'.",
"_____no_output_____"
],
[
"***\nEl proceso inverso conocido como **deserializar** crea objetos Python en `list`y `dict` a través de la función `json.loads`",
"_____no_output_____"
]
],
[
[
"# Como el caso anterior convertimos los JSONs en dict y list\njson2dict = json.loads(json_dict)\nprint(json2dict)\nprint(type(json2dict))",
"{'nombre': 'Yann', 'apellidos': {'apellido1': 'LeCun', 'apellido2': '-'}, 'edad': 56}\n<class 'dict'>\n"
],
[
"# No podemos convertir a json datos en lista o diccionarios, tienen que ser en formato o class STR, BYTES o BYTEARRAY\njson2dict_2 = json.loads(nested_dict)",
"_____no_output_____"
],
[
"# Convertimos el objeto (anteriormente en lista) de json a list\njson2list = json.loads(json_list)\nprint(json2list)\nprint(type(json2list))",
"[1, 2, 3]\n<class 'list'>\n"
]
],
[
[
"***\nPara mejorar la legibilidad de los datos que obtendremos de las API, definiremos una función que mostrará cadenas JSON por pantalla formateadas para mejorar la lectura. La función aceptará tanto cadenas de carácteres con contenido JSON como objetos Python, y mostrará el contenido por pantalla.\n\nAdemás, la función recibirá un parámetro opcional que nos permitirá indicar el número máximo de líneas que hay que mostrar. Así, podremos usar la función para visualizar las primeras líneas de un JSON largo, sin tener que mostrar el JSON completo por pantalla.",
"_____no_output_____"
]
],
[
[
"# Definimos una función `json_print` que tiene un parámetro (json_data) y uno opcional (limit)\n# El parámetro sort_keys FALSE para ordenar o no alfabeticamente\n# el parámetro indent para buscar entre los anidados (niveles)\ndef json_print(json_data, limit=None):\n if isinstance(json_data, (str)):\n json_data = json.loads(json_data)\n nice = json.dumps(json_data, sort_keys=False, indent=3, separators=(',',':'))\n print(\"\\n\".join(nice.split(\"\\n\")[0:limit]))\n if limit is not None:\n print(\"[....]\")",
"_____no_output_____"
],
[
"# Aplicamos la función a un tweet\ntweet = {\n \"created_at\": \"Thu Apr 06 15:24:15 +0000 2017\",\n \"id_str\": \"850006245121695744\",\n \"text\": \"1\\/ Today we\\u2019re sharing our vision for the future of the Twitter API platform!\\nhttps:\\/\\/t.co\\/XweGngmxlP\",\n \"user\": {\n \"id\": 2244994945,\n \"name\": \"Twitter Dev\",\n \"screen_name\": \"TwitterDev\",\n \"location\": \"Internet\",\n \"url\": \"https:\\/\\/dev.twitter.com\\/\",\n \"description\": \"Your official source for Twitter Platform news, updates & events. Need technical help? Visit https:\\/\\/twittercommunity.com\\/ \\u2328\\ufe0f #TapIntoTwitter\"\n },\n \"place\": { \n },\n \"entities\": {\n \"hashtags\": [ \n ],\n \"urls\": [\n {\n \"url\": \"https:\\/\\/t.co\\/XweGngmxlP\",\n \"unwound\": {\n \"url\": \"https:\\/\\/cards.twitter.com\\/cards\\/18ce53wgo4h\\/3xo1c\",\n \"title\": \"Building the Future of the Twitter API Platform\"\n }\n }\n ],\n \"user_mentions\": [ \n ]\n }\n}",
"_____no_output_____"
],
[
"tweet",
"_____no_output_____"
],
[
"type(tweet)",
"_____no_output_____"
],
[
"# Convertimos este tweet en json\njson_print(tweet)",
"{\n \"created_at\":\"Thu Apr 06 15:24:15 +0000 2017\",\n \"entities\":{\n \"hashtags\":[],\n \"urls\":[\n {\n \"unwound\":{\n \"title\":\"Building the Future of the Twitter API Platform\",\n \"url\":\"https:\\\\/\\\\/cards.twitter.com\\\\/cards\\\\/18ce53wgo4h\\\\/3xo1c\"\n },\n \"url\":\"https:\\\\/\\\\/t.co\\\\/XweGngmxlP\"\n }\n ],\n \"user_mentions\":[]\n },\n \"id_str\":\"850006245121695744\",\n \"place\":{},\n \"text\":\"1\\\\/ Today we\\u2019re sharing our vision for the future of the Twitter API platform!\\nhttps:\\\\/\\\\/t.co\\\\/XweGngmxlP\",\n \"user\":{\n \"description\":\"Your official source for Twitter Platform news, updates & events. Need technical help? Visit https:\\\\/\\\\/twittercommunity.com\\\\/ \\u2328\\ufe0f #TapIntoTwitter\",\n \"id\":2244994945,\n \"location\":\"Internet\",\n \"name\":\"Twitter Dev\",\n \"screen_name\":\"TwitterDev\",\n \"url\":\"https:\\\\/\\\\/dev.twitter.com\\\\/\"\n }\n}\n"
],
[
"print(json_dict)\nprint(type(json_dict))",
"{\"nombre\": \"Yann\", \"apellidos\": {\"apellido1\": \"LeCun\", \"apellido2\": \"-\"}, \"edad\": 56}\n<class 'str'>\n"
],
[
"print(diccionario_ejemplo)",
"{'nombre': 'Yann', 'apellidos': {'apellido1': 'LeCun', 'apellido2': '-'}, 'edad': 56}\n"
],
[
"print(type(diccionario_ejemplo))",
"<class 'dict'>\n"
],
[
"json_print(diccionario_ejemplo)",
"{\n \"apellidos\":{\n \"apellido1\":\"LeCun\",\n \"apellido2\":\"-\"\n },\n \"edad\":56,\n \"nombre\":\"Yann\"\n}\n"
],
[
"json_print(lista_ejemplo)",
"[\n 1,\n 2,\n 3\n]\n"
],
[
"diccionario_ejemplo",
"_____no_output_____"
],
[
"print(type(json_print(diccionario_ejemplo, 3)))",
"{\n \"nombre\":\"Yann\",\n \"apellidos\":{\n[....]\n<class 'NoneType'>\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a10b49aed91db69dce59eb1dfe130b3214fcd9d
| 47,416 |
ipynb
|
Jupyter Notebook
|
recurrent-neural-networks/char-rnn/Character_Level_RNN_Solution.ipynb
|
zjh1943/deep-learning-v2-pytorch
|
4813ecc7e4c4e29fd35da7a143b0881e11f47a8f
|
[
"MIT"
] | null | null | null |
recurrent-neural-networks/char-rnn/Character_Level_RNN_Solution.ipynb
|
zjh1943/deep-learning-v2-pytorch
|
4813ecc7e4c4e29fd35da7a143b0881e11f47a8f
|
[
"MIT"
] | null | null | null |
recurrent-neural-networks/char-rnn/Character_Level_RNN_Solution.ipynb
|
zjh1943/deep-learning-v2-pytorch
|
4813ecc7e4c4e29fd35da7a143b0881e11f47a8f
|
[
"MIT"
] | null | null | null | 45.945736 | 1,749 | 0.58824 |
[
[
[
"# Character-Level LSTM in PyTorch\n\nIn this notebook, I'll construct a character-level LSTM with PyTorch. The network will train character by character on some text, then generate new text character by character. As an example, I will train on Anna Karenina. **This model will be able to generate new text based on the text from the book!**\n\nThis network is based off of Andrej Karpathy's [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Below is the general architecture of the character-wise RNN.\n\n<img src=\"assets/charseq.jpeg\" width=\"500\">",
"_____no_output_____"
],
[
"First let's load in our required resources for data loading and model creation.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F",
"_____no_output_____"
]
],
[
[
"## Load in Data\n\nThen, we'll load the Anna Karenina text file and convert it into integers for our network to use. ",
"_____no_output_____"
]
],
[
[
"# open text file and read in data as `text`\nwith open('data/anna.txt', 'r') as f:\n text = f.read()",
"_____no_output_____"
]
],
[
[
"Let's check out the first 100 characters, make sure everything is peachy. According to the [American Book Review](http://americanbookreview.org/100bestlines.asp), this is the 6th best first line of a book ever.",
"_____no_output_____"
]
],
[
[
"text[:100]",
"_____no_output_____"
]
],
[
[
"### Tokenization\n\nIn the cells, below, I'm creating a couple **dictionaries** to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.",
"_____no_output_____"
]
],
[
[
"# encode the text and map each character to an integer and vice versa\n\n# we create two dictionaries:\n# 1. int2char, which maps integers to characters\n# 2. char2int, which maps characters to unique integers\nchars = tuple(set(text))\nint2char = dict(enumerate(chars))\nchar2int = {ch: ii for ii, ch in int2char.items()}\n\n# encode the text\nencoded = np.array([char2int[ch] for ch in text])",
"_____no_output_____"
]
],
[
[
"And we can see those same characters from above, encoded as integers.",
"_____no_output_____"
]
],
[
[
"encoded[:100]",
"_____no_output_____"
]
],
[
[
"## Pre-processing the data\n\nAs you can see in our char-RNN image above, our LSTM expects an input that is **one-hot encoded** meaning that each character is converted into an integer (via our created dictionary) and *then* converted into a column vector where only it's corresponding integer index will have the value of 1 and the rest of the vector will be filled with 0's. Since we're one-hot encoding the data, let's make a function to do that!\n",
"_____no_output_____"
]
],
[
[
"def one_hot_encode(arr, n_labels):\n \n # Initialize the the encoded array\n one_hot = np.zeros((arr.size, n_labels), dtype=np.float32)\n \n # Fill the appropriate elements with ones\n one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.\n \n # Finally reshape it to get back to the original array\n one_hot = one_hot.reshape((*arr.shape, n_labels))\n \n return one_hot",
"_____no_output_____"
],
[
"# check that the function works as expected\ntest_seq = np.array([[3, 5, 1]])\none_hot = one_hot_encode(test_seq, 8)\n\nprint(one_hot)",
"[[[0. 0. 0. 1. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 1. 0. 0.]\n [0. 1. 0. 0. 0. 0. 0. 0.]]]\n"
]
],
[
[
"## Making training mini-batches\n\n\nTo train on this data, we also want to create mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this:\n\n<img src=\"assets/[email protected]\" width=500px>\n\n\n<br>\n\nIn this example, we'll take the encoded characters (passed in as the `arr` parameter) and split them into multiple sequences, given by `batch_size`. Each of our sequences will be `seq_length` long.\n\n### Creating Batches\n\n**1. The first thing we need to do is discard some of the text so we only have completely full mini-batches. **\n\nEach batch contains $N \\times M$ characters, where $N$ is the batch size (the number of sequences in a batch) and $M$ is the seq_length or number of time steps in a sequence. Then, to get the total number of batches, $K$, that we can make from the array `arr`, you divide the length of `arr` by the number of characters per batch. Once you know the number of batches, you can get the total number of characters to keep from `arr`, $N * M * K$.\n\n**2. After that, we need to split `arr` into $N$ batches. ** \n\nYou can do this using `arr.reshape(size)` where `size` is a tuple containing the dimensions sizes of the reshaped array. We know we want $N$ sequences in a batch, so let's make that the size of the first dimension. For the second dimension, you can use `-1` as a placeholder in the size, it'll fill up the array with the appropriate data for you. After this, you should have an array that is $N \\times (M * K)$.\n\n**3. Now that we have this array, we can iterate through it to get our mini-batches. **\n\nThe idea is each batch is a $N \\times M$ window on the $N \\times (M * K)$ array. For each subsequent batch, the window moves over by `seq_length`. We also want to create both the input and target arrays. Remember that the targets are just the inputs shifted over by one character. The way I like to do this window is use `range` to take steps of size `n_steps` from $0$ to `arr.shape[1]`, the total number of tokens in each sequence. That way, the integers you get from `range` always point to the start of a batch, and each window is `seq_length` wide.\n\n> **TODO:** Write the code for creating batches in the function below. The exercises in this notebook _will not be easy_. I've provided a notebook with solutions alongside this notebook. If you get stuck, checkout the solutions. The most important thing is that you don't copy and paste the code into here, **type out the solution code yourself.**",
"_____no_output_____"
]
],
[
[
"def get_batches(arr, batch_size, seq_length):\n '''Create a generator that returns batches of size\n batch_size x seq_length from arr.\n \n Arguments\n ---------\n arr: Array you want to make batches from\n batch_size: Batch size, the number of sequences per batch\n seq_length: Number of encoded chars in a sequence\n '''\n \n batch_size_total = batch_size * seq_length\n # total number of batches we can make\n n_batches = len(arr)//batch_size_total\n \n # Keep only enough characters to make full batches\n arr = arr[:n_batches * batch_size_total]\n # Reshape into batch_size rows\n arr = arr.reshape((batch_size, -1))\n \n # iterate through the array, one sequence at a time\n for n in range(0, arr.shape[1], seq_length):\n # The features\n x = arr[:, n:n+seq_length]\n # The targets, shifted by one\n y = np.zeros_like(x)\n try:\n y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n+seq_length]\n except IndexError:\n y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0]\n yield x, y",
"_____no_output_____"
]
],
[
[
"### Test Your Implementation\n\nNow I'll make some data sets and we can check out what's going on as we batch data. Here, as an example, I'm going to use a batch size of 8 and 50 sequence steps.",
"_____no_output_____"
]
],
[
[
"batches = get_batches(encoded, 8, 50)\nx, y = next(batches)",
"_____no_output_____"
],
[
"# printing out the first 10 items in a sequence\nprint('x\\n', x[:10, :10])\nprint('\\ny\\n', y[:10, :10])",
"x\n [[ 3 31 63 46 69 11 9 65 76 33]\n [19 56 68 65 69 31 63 69 65 63]\n [11 68 61 65 56 9 65 63 65 10]\n [19 65 69 31 11 65 74 31 62 11]\n [65 19 63 70 65 31 11 9 65 69]\n [74 81 19 19 62 56 68 65 63 68]\n [65 37 68 68 63 65 31 63 61 65]\n [32 25 2 56 68 19 54 23 0 65]]\n\ny\n [[31 63 46 69 11 9 65 76 33 33]\n [56 68 65 69 31 63 69 65 63 69]\n [68 61 65 56 9 65 63 65 10 56]\n [65 69 31 11 65 74 31 62 11 10]\n [19 63 70 65 31 11 9 65 69 11]\n [81 19 19 62 56 68 65 63 68 61]\n [37 68 68 63 65 31 63 61 65 19]\n [25 2 56 68 19 54 23 0 65 52]]\n"
]
],
[
[
"If you implemented `get_batches` correctly, the above output should look something like \n```\nx\n [[25 8 60 11 45 27 28 73 1 2]\n [17 7 20 73 45 8 60 45 73 60]\n [27 20 80 73 7 28 73 60 73 65]\n [17 73 45 8 27 73 66 8 46 27]\n [73 17 60 12 73 8 27 28 73 45]\n [66 64 17 17 46 7 20 73 60 20]\n [73 76 20 20 60 73 8 60 80 73]\n [47 35 43 7 20 17 24 50 37 73]]\n\ny\n [[ 8 60 11 45 27 28 73 1 2 2]\n [ 7 20 73 45 8 60 45 73 60 45]\n [20 80 73 7 28 73 60 73 65 7]\n [73 45 8 27 73 66 8 46 27 65]\n [17 60 12 73 8 27 28 73 45 27]\n [64 17 17 46 7 20 73 60 20 80]\n [76 20 20 60 73 8 60 80 73 17]\n [35 43 7 20 17 24 50 37 73 36]]\n ```\n although the exact numbers may be different. Check to make sure the data is shifted over one step for `y`.",
"_____no_output_____"
],
[
"---\n## Defining the network with PyTorch\n\nBelow is where you'll define the network.\n\n<img src=\"assets/charRNN.png\" width=500px>\n\nNext, you'll use PyTorch to define the architecture of the network. We start by defining the layers and operations we want. Then, define a method for the forward pass. You've also been given a method for predicting characters.",
"_____no_output_____"
],
[
"### Model Structure\n\nIn `__init__` the suggested structure is as follows:\n* Create and store the necessary dictionaries (this has been done for you)\n* Define an LSTM layer that takes as params: an input size (the number of characters), a hidden layer size `n_hidden`, a number of layers `n_layers`, a dropout probability `drop_prob`, and a batch_first boolean (True, since we are batching)\n* Define a dropout layer with `drop_prob`\n* Define a fully-connected layer with params: input size `n_hidden` and output size (the number of characters)\n* Finally, initialize the weights (again, this has been given)\n\nNote that some parameters have been named and given in the `__init__` function, and we use them and store them by doing something like `self.drop_prob = drop_prob`.",
"_____no_output_____"
],
[
"---\n### LSTM Inputs/Outputs\n\nYou can create a basic [LSTM layer](https://pytorch.org/docs/stable/nn.html#lstm) as follows\n\n```python\nself.lstm = nn.LSTM(input_size, n_hidden, n_layers, \n dropout=drop_prob, batch_first=True)\n```\n\nwhere `input_size` is the number of characters this cell expects to see as sequential input, and `n_hidden` is the number of units in the hidden layers in the cell. And we can add dropout by adding a dropout parameter with a specified probability; this will automatically add dropout to the inputs or outputs. Finally, in the `forward` function, we can stack up the LSTM cells into layers using `.view`. With this, you pass in a list of cells and it will send the output of one cell into the next cell.\n\nWe also need to create an initial hidden state of all zeros. This is done like so\n\n```python\nself.init_hidden()\n```",
"_____no_output_____"
]
],
[
[
"# check if GPU is available\ntrain_on_gpu = torch.cuda.is_available()\nif(train_on_gpu):\n print('Training on GPU!')\nelse: \n print('No GPU available, training on CPU; consider making n_epochs very small.')",
"No GPU available, training on CPU; consider making n_epochs very small.\n"
],
[
"class CharRNN(nn.Module):\n \n def __init__(self, tokens, n_hidden=256, n_layers=2,\n drop_prob=0.5, lr=0.001):\n super().__init__()\n self.drop_prob = drop_prob\n self.n_layers = n_layers\n self.n_hidden = n_hidden\n self.lr = lr\n \n # creating character dictionaries\n self.chars = tokens\n self.int2char = dict(enumerate(self.chars))\n self.char2int = {ch: ii for ii, ch in self.int2char.items()}\n \n ## TODO: define the LSTM\n self.lstm = nn.LSTM(len(self.chars), n_hidden, n_layers, \n dropout=drop_prob, batch_first=True)\n \n ## TODO: define a dropout layer\n self.dropout = nn.Dropout(drop_prob)\n \n ## TODO: define the final, fully-connected output layer\n self.fc = nn.Linear(n_hidden, len(self.chars))\n \n \n def forward(self, x, hidden):\n ''' Forward pass through the network. \n These inputs are x, and the hidden/cell state `hidden`. '''\n \n ## TODO: Get the outputs and the new hidden state from the lstm\n r_output, hidden = self.lstm(x, hidden)\n \n ## TODO: pass through a dropout layer\n out = self.dropout(r_output)\n \n # Stack up LSTM outputs using view\n # you may need to use contiguous to reshape the output\n out = out.contiguous().view(-1, self.n_hidden)\n \n ## TODO: put x through the fully-connected layer\n out = self.fc(out)\n \n # return the final output and the hidden state\n return out, hidden\n \n \n def init_hidden(self, batch_size):\n ''' Initializes hidden state '''\n # Create two new tensors with sizes n_layers x batch_size x n_hidden,\n # initialized to zero, for hidden state and cell state of LSTM\n weight = next(self.parameters()).data\n \n if (train_on_gpu):\n hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),\n weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())\n else:\n hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),\n weight.new(self.n_layers, batch_size, self.n_hidden).zero_())\n \n return hidden\n ",
"_____no_output_____"
]
],
[
[
"## Time to train\n\nThe train function gives us the ability to set the number of epochs, the learning rate, and other parameters.\n\nBelow we're using an Adam optimizer and cross entropy loss since we are looking at character class scores as output. We calculate the loss and perform backpropagation, as usual!\n\nA couple of details about training: \n>* Within the batch loop, we detach the hidden state from its history; this time setting it equal to a new *tuple* variable because an LSTM has a hidden state that is a tuple of the hidden and cell states.\n* We use [`clip_grad_norm_`](https://pytorch.org/docs/stable/_modules/torch/nn/utils/clip_grad.html) to help prevent exploding gradients.",
"_____no_output_____"
]
],
[
[
"def train(net, data, epochs=10, batch_size=10, seq_length=50, lr=0.001, clip=5, val_frac=0.1, print_every=10):\n ''' Training a network \n \n Arguments\n ---------\n \n net: CharRNN network\n data: text data to train the network\n epochs: Number of epochs to train\n batch_size: Number of mini-sequences per mini-batch, aka batch size\n seq_length: Number of character steps per mini-batch\n lr: learning rate\n clip: gradient clipping\n val_frac: Fraction of data to hold out for validation\n print_every: Number of steps for printing training and validation loss\n \n '''\n net.train()\n \n opt = torch.optim.Adam(net.parameters(), lr=lr)\n criterion = nn.CrossEntropyLoss()\n \n # create training and validation data\n val_idx = int(len(data)*(1-val_frac))\n data, val_data = data[:val_idx], data[val_idx:]\n \n if(train_on_gpu):\n net.cuda()\n \n counter = 0\n n_chars = len(net.chars)\n for e in range(epochs):\n # initialize hidden state\n h = net.init_hidden(batch_size)\n \n for x, y in get_batches(data, batch_size, seq_length):\n counter += 1\n \n # One-hot encode our data and make them Torch tensors\n x = one_hot_encode(x, n_chars)\n inputs, targets = torch.from_numpy(x), torch.from_numpy(y)\n \n if(train_on_gpu):\n inputs, targets = inputs.cuda(), targets.cuda()\n\n # Creating new variables for the hidden state, otherwise\n # we'd backprop through the entire training history\n h = tuple([each.data for each in h])\n\n # zero accumulated gradients\n net.zero_grad()\n \n # get the output from the model\n output, h = net(inputs, h)\n \n # calculate the loss and perform backprop\n loss = criterion(output, targets.view(batch_size*seq_length).long())\n loss.backward()\n # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.\n nn.utils.clip_grad_norm_(net.parameters(), clip)\n opt.step()\n \n # loss stats\n if counter % print_every == 0:\n # Get validation loss\n val_h = net.init_hidden(batch_size)\n val_losses = []\n net.eval()\n for x, y in get_batches(val_data, batch_size, seq_length):\n # One-hot encode our data and make them Torch tensors\n x = one_hot_encode(x, n_chars)\n x, y = torch.from_numpy(x), torch.from_numpy(y)\n \n # Creating new variables for the hidden state, otherwise\n # we'd backprop through the entire training history\n val_h = tuple([each.data for each in val_h])\n \n inputs, targets = x, y\n if(train_on_gpu):\n inputs, targets = inputs.cuda(), targets.cuda()\n\n output, val_h = net(inputs, val_h)\n val_loss = criterion(output, targets.view(batch_size*seq_length).long())\n \n val_losses.append(val_loss.item())\n \n net.train() # reset to train mode after iterationg through validation data\n \n print(\"Epoch: {}/{}...\".format(e+1, epochs),\n \"Step: {}...\".format(counter),\n \"Loss: {:.4f}...\".format(loss.item()),\n \"Val Loss: {:.4f}\".format(np.mean(val_losses)))",
"_____no_output_____"
]
],
[
[
"## Instantiating the model\n\nNow we can actually train the network. First we'll create the network itself, with some given hyperparameters. Then, define the mini-batches sizes, and start training!",
"_____no_output_____"
]
],
[
[
"# define and print the net\nn_hidden=512\nn_layers=2\n\nnet = CharRNN(chars, n_hidden, n_layers)\nprint(net)",
"CharRNN(\n (lstm): LSTM(83, 512, num_layers=2, batch_first=True, dropout=0.5)\n (dropout): Dropout(p=0.5, inplace=False)\n (fc): Linear(in_features=512, out_features=83, bias=True)\n)\n"
],
[
"batch_size = 128\nseq_length = 100\nn_epochs = 20 # start smaller if you are just testing initial behavior\n\n# train the model\ntrain(net, encoded, epochs=n_epochs, batch_size=batch_size, seq_length=seq_length, lr=0.001, print_every=10)",
"Epoch: 1/20... Step: 10... Loss: 3.2768... Val Loss: 3.2400\nEpoch: 1/20... Step: 20... Loss: 3.1549... Val Loss: 3.1423\nEpoch: 1/20... Step: 30... Loss: 3.1456... Val Loss: 3.1266\nEpoch: 1/20... Step: 40... Loss: 3.1206... Val Loss: 3.1196\nEpoch: 1/20... Step: 50... Loss: 3.1450... Val Loss: 3.1178\nEpoch: 1/20... Step: 60... Loss: 3.1170... Val Loss: 3.1165\nEpoch: 1/20... Step: 70... Loss: 3.1086... Val Loss: 3.1158\nEpoch: 1/20... Step: 80... Loss: 3.1256... Val Loss: 3.1136\nEpoch: 1/20... Step: 90... Loss: 3.1257... Val Loss: 3.1081\nEpoch: 1/20... Step: 100... Loss: 3.1056... Val Loss: 3.0967\nEpoch: 1/20... Step: 110... Loss: 3.0923... Val Loss: 3.0803\nEpoch: 1/20... Step: 120... Loss: 3.0267... Val Loss: 3.0243\nEpoch: 1/20... Step: 130... Loss: 2.9502... Val Loss: 2.9211\n"
]
],
[
[
"## Getting the best model\n\nTo set your hyperparameters to get the best performance, you'll want to watch the training and validation losses. If your training loss is much lower than the validation loss, you're overfitting. Increase regularization (more dropout) or use a smaller network. If the training and validation losses are close, you're underfitting so you can increase the size of the network.",
"_____no_output_____"
],
[
"## Hyperparameters\n\nHere are the hyperparameters for the network.\n\nIn defining the model:\n* `n_hidden` - The number of units in the hidden layers.\n* `n_layers` - Number of hidden LSTM layers to use.\n\nWe assume that dropout probability and learning rate will be kept at the default, in this example.\n\nAnd in training:\n* `batch_size` - Number of sequences running through the network in one pass.\n* `seq_length` - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.\n* `lr` - Learning rate for training\n\nHere's some good advice from Andrej Karpathy on training the network. I'm going to copy it in here for your benefit, but also link to [where it originally came from](https://github.com/karpathy/char-rnn#tips-and-tricks).\n\n> ## Tips and Tricks\n\n>### Monitoring Validation Loss vs. Training Loss\n>If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:\n\n> - If your training loss is much lower than validation loss then this means the network might be **overfitting**. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.\n> - If your training/validation loss are about equal then your model is **underfitting**. Increase the size of your model (either number of layers or the raw number of neurons per layer)\n\n> ### Approximate number of parameters\n\n> The two most important parameters that control the model are `n_hidden` and `n_layers`. I would advise that you always use `n_layers` of either 2/3. The `n_hidden` can be adjusted based on how much data you have. The two important quantities to keep track of here are:\n\n> - The number of parameters in your model. This is printed when you start training.\n> - The size of your dataset. 1MB file is approximately 1 million characters.\n\n>These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:\n\n> - I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make `n_hidden` larger.\n> - I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.\n\n> ### Best models strategy\n\n>The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.\n\n>It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.\n\n>By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.",
"_____no_output_____"
],
[
"## Checkpoint\n\nAfter training, we'll save the model so we can load it again later if we need too. Here I'm saving the parameters needed to create the same architecture, the hidden layer hyperparameters and the text characters.",
"_____no_output_____"
]
],
[
[
"# change the name, for saving multiple files\nmodel_name = 'rnn_20_epoch.net'\n\ncheckpoint = {'n_hidden': net.n_hidden,\n 'n_layers': net.n_layers,\n 'state_dict': net.state_dict(),\n 'tokens': net.chars}\n\nwith open(model_name, 'wb') as f:\n torch.save(checkpoint, f)",
"_____no_output_____"
]
],
[
[
"---\n## Making Predictions\n\nNow that the model is trained, we'll want to sample from it and make predictions about next characters! To sample, we pass in a character and have the network predict the next character. Then we take that character, pass it back in, and get another predicted character. Just keep doing this and you'll generate a bunch of text!\n\n### A note on the `predict` function\n\nThe output of our RNN is from a fully-connected layer and it outputs a **distribution of next-character scores**.\n\n> To actually get the next character, we apply a softmax function, which gives us a *probability* distribution that we can then sample to predict the next character.\n\n### Top K sampling\n\nOur predictions come from a categorical probability distribution over all the possible characters. We can make the sample text and make it more reasonable to handle (with less variables) by only considering some $K$ most probable characters. This will prevent the network from giving us completely absurd characters while allowing it to introduce some noise and randomness into the sampled text. Read more about [topk, here](https://pytorch.org/docs/stable/torch.html#torch.topk).\n",
"_____no_output_____"
]
],
[
[
"def predict(net, char, h=None, top_k=None):\n ''' Given a character, predict the next character.\n Returns the predicted character and the hidden state.\n '''\n \n # tensor inputs\n x = np.array([[net.char2int[char]]])\n x = one_hot_encode(x, len(net.chars))\n inputs = torch.from_numpy(x)\n \n if(train_on_gpu):\n inputs = inputs.cuda()\n \n # detach hidden state from history\n h = tuple([each.data for each in h])\n # get the output of the model\n out, h = net(inputs, h)\n\n # get the character probabilities\n p = F.softmax(out, dim=1).data\n if(train_on_gpu):\n p = p.cpu() # move to cpu\n \n # get top characters\n if top_k is None:\n top_ch = np.arange(len(net.chars))\n else:\n p, top_ch = p.topk(top_k)\n top_ch = top_ch.numpy().squeeze()\n \n # select the likely next character with some element of randomness\n p = p.numpy().squeeze()\n char = np.random.choice(top_ch, p=p/p.sum())\n \n # return the encoded value of the predicted char and the hidden state\n return net.int2char[char], h",
"_____no_output_____"
]
],
[
[
"### Priming and generating text \n\nTypically you'll want to prime the network so you can build up a hidden state. Otherwise the network will start out generating characters at random. In general the first bunch of characters will be a little rough since it hasn't built up a long history of characters to predict from.",
"_____no_output_____"
]
],
[
[
"def sample(net, size, prime='The', top_k=None):\n \n if(train_on_gpu):\n net.cuda()\n else:\n net.cpu()\n \n net.eval() # eval mode\n \n # First off, run through the prime characters\n chars = [ch for ch in prime]\n h = net.init_hidden(1)\n for ch in prime:\n char, h = predict(net, ch, h, top_k=top_k)\n\n chars.append(char)\n \n # Now pass in the previous character and get a new one\n for ii in range(size):\n char, h = predict(net, chars[-1], h, top_k=top_k)\n chars.append(char)\n\n return ''.join(chars)",
"_____no_output_____"
],
[
"print(sample(net, 1000, prime='Anna', top_k=5))",
"Anna had so that an enter strength to be says off and he cared to be an unmarrely sister.\n\nThe children are saying in a place. A smile of their secretary and the sense of a condition. He saw that the princess was the same, the peaciting of his\nbriderous country second still. That she had seen him a little as it was the simminest that he had not been\nthe simple of\nthe passion to see his finger, and\nhis brother and the points he heard this place which\nhe was not\nsense. All had sent him that he could he concealed the steps and that he was to be patied,\nso much at hands, at the servants who had said something with the\nchair.\n \"This is a solitat matter?\"\n\n\"It's not thinking in the more the point is and that he's talking of the drinking of the\ncrain. If I was a memory. Have you\nseen my to thousard more\ncharacteribries, and this, and would be the framing of the most careful towards me, to the country too that they did nothind when she could not see him. What is\nit you want a conviluated more to mo\n"
]
],
[
[
"## Loading a checkpoint",
"_____no_output_____"
]
],
[
[
"# Here we have loaded in a model that trained over 20 epochs `rnn_20_epoch.net`\nwith open('rnn_20_epoch.net', 'rb') as f:\n checkpoint = torch.load(f)\n \nloaded = CharRNN(checkpoint['tokens'], n_hidden=checkpoint['n_hidden'], n_layers=checkpoint['n_layers'])\nloaded.load_state_dict(checkpoint['state_dict'])",
"_____no_output_____"
],
[
"# Sample using a loaded model\nprint(sample(loaded, 2000, top_k=5, prime=\"And Levin said\"))",
"And Levin said those second portryit on the contrast.\n\n\"What is it?\" said Stepan Arkadyevitch,\nletting up his\nshirt and talking to her face. And he had\nnot speak to Levin that his head on the round stop and\ntrouble\nto be faint, as he\nwas not a man who was said, she was the setter times that had been before so much talking in the steps of the door, his force to think of their sense of the sendence, both always bowing about in the country and the same time of her character and all at him with his face, and went out of her hand, sitting down beside\nthe clothes, and\nthe\nsame\nsingle mind and when they seemed to a strange of his\nbrother's.\n\nAnd he\nwas so meched the paints was so standing the man had been a love was the man, and stopped at once in the first step. But he was\na change to\ndo. The sound of the partice say a construnting his\nsteps and telling a single camp of the\nready and three significance of the same forest.\n\n\"Yes, but you see it.\" He carried his face and the condition in their carriage to her, and to go, she\nsaid that had been talking of his forest, a strange world, when Levin came the conversation as sense of her son, and he could not see him to hive answer, which had been saking when at tomere within the\ncounting her face that he was serenely from her she took a counting, there\nwas the since he\nhad too wearted and seemed to her,\" said the member of the cannors in the steps to his\nword.\n\nThe moss of the convincing it had been drawing up the people that there was nothing without this way or a single wife as he did not hear\nhim or that he was not seeing that she would be a court of the sound of some sound of the position, and to spartly she\ncould\nsee her and a sundroup times there was nothing this\nfather and as she stoop serious in the sound, was a steps of the master, a few sistersily play of his husband. The crowd had no carreated herself, and truets, and shaking up, the pases, and the moment that he was not at the marshal, and the starling the secret were stopping to be\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a10b63ab3b503cb65f3b7b10d6d22db00ace2e4
| 14,246 |
ipynb
|
Jupyter Notebook
|
intro_rl/03-Application-Cart-Pole.ipynb
|
mingjunwang88/rllib_recexample
|
fcaefd009a17cc03b2636ed60c1302aa6c02091c
|
[
"MIT"
] | 40 |
2020-08-14T06:23:33.000Z
|
2022-03-30T07:44:09.000Z
|
intro_rl/03-Application-Cart-Pole.ipynb
|
mingjunwang88/rllib_recexample
|
fcaefd009a17cc03b2636ed60c1302aa6c02091c
|
[
"MIT"
] | 3 |
2020-03-29T23:07:55.000Z
|
2021-05-31T12:27:07.000Z
|
intro_rl/03-Application-Cart-Pole.ipynb
|
mingjunwang88/rllib_recexample
|
fcaefd009a17cc03b2636ed60c1302aa6c02091c
|
[
"MIT"
] | 6 |
2020-03-26T22:12:44.000Z
|
2021-07-28T03:47:30.000Z
| 36.81137 | 406 | 0.613155 |
[
[
[
"# Ray RLlib - Sample Application: CartPole\n\n© 2019-2021, Anyscale. All Rights Reserved\n\n\n\nWe were briefly introduced to the `CartPole` example and the OpenAI gym `CartPole-v1` environment ([gym.openai.com/envs/CartPole-v1/](https://gym.openai.com/envs/CartPole-v1/)) in the [reinforcement learning introduction](../01-Introduction-to-Reinforcement-Learning.ipynb). This lesson uses [RLlib](https://ray.readthedocs.io/en/latest/rllib.html) to train a policy for `CartPole`.\n\nRecall that the `gym` Python module provides MDP interfaces to a variety of simulators, like the simple simulator for the physics of balancing a pole on a cart that is used by the CartPole environment. The `CartPole` problem is described at https://gym.openai.com/envs/CartPole-v1.\n\n\n\n([source](https://gym.openai.com/envs/CartPole-v1/))\n\nEven though this is a relatively simple and quick example to run, its results can be understood quite visually. `CartPole` is one of OpenAI Gym's [\"classic control\"](https://gym.openai.com/envs/#classic_control) examples.\n\nFor more background about this problem, see:\n\n* [\"Neuronlike Adaptive Elements That Can Solve Difficult Learning Control Problem\"](https://ieeexplore.ieee.org/document/6313077), AG Barto, RS Sutton, and CW Anderson, *IEEE Transactions on Systems, Man, and Cybernetics* (1983). The same Sutton and Barto who wrote [*Reinforcement Learning: An Introduction*](https://mitpress.mit.edu/books/reinforcement-learning-second-edition).\n* [\"Cartpole - Introduction to Reinforcement Learning (DQN - Deep Q-Learning)\"](https://towardsdatascience.com/cartpole-introduction-to-reinforcement-learning-ed0eb5b58288), [Greg Surma](https://twitter.com/GSurma).",
"_____no_output_____"
],
[
"First, import Ray and the PPO module in RLlib, then start Ray.",
"_____no_output_____"
]
],
[
[
"import ray\nimport ray.rllib.agents.ppo as ppo",
"_____no_output_____"
],
[
"import pandas as pd\nimport json\nimport os\nimport shutil\nimport sys",
"_____no_output_____"
]
],
[
[
"Model *checkpoints* will get saved after each iteration into directories under `tmp/ppo/cart`, i.e., relative to this directory. \nThe default directories for checkpoints are `$HOME/ray_results/<algo_env>/.../checkpoint_N`.\n\n> **Note:** If you prefer to use a different directory root, change it in the next cell _and_ in the `rllib rollout` command below.",
"_____no_output_____"
]
],
[
[
"checkpoint_root = \"tmp/ppo/cart\"",
"_____no_output_____"
]
],
[
[
"Clean up output of previous lessons (optional):",
"_____no_output_____"
]
],
[
[
"# Where checkpoints are written:\nshutil.rmtree(checkpoint_root, ignore_errors=True, onerror=None)\n\n# Where some data will be written and used by Tensorboard below:\nray_results = f'{os.getenv(\"HOME\")}/ray_results/'\nshutil.rmtree(ray_results, ignore_errors=True, onerror=None)",
"_____no_output_____"
]
],
[
[
"Start Ray:",
"_____no_output_____"
]
],
[
[
"info = ray.init(ignore_reinit_error=True)",
"_____no_output_____"
]
],
[
[
"The Ray Dashboard is useful for monitoring Ray:",
"_____no_output_____"
]
],
[
[
"print(\"Dashboard URL: http://{}\".format(info[\"webui_url\"]))",
"_____no_output_____"
]
],
[
[
"Next we'll train an RLlib policy with the [`CartPole-v1` environment](https://gym.openai.com/envs/CartPole-v1/).\n\nIf you've gone through the _Multi-Armed Bandits_ lessons, you may recall that we used [Ray Tune](http://tune.io), the Ray Hyperparameter Tuning system, to drive training. Here we'll do it ourselves.\n\nBy default, training runs for `10` iterations. Increase the `N_ITER` setting if you want to train longer and see the resulting rewards improve. However, if the max score of `200` is achieved early, you can use a smaller number of iterations.\n\n\n- `num_workers` is the number of actors that the agent will create. This determines the degree of parallelism that will be used. In a cluster, these actors will be spread over the available nodes.\n- `num_sgd_iter` is the number of epochs of SGD (stochastic gradient descent, i.e., passes through the data) that will be used to optimize the PPO surrogate objective at each iteration of PPO, for each _minibatch_ (\"chunk\") of training data. Using minibatches is more efficient than training with one record at a time.\n- `sgd_minibatch_size` is the SGD minibatch size (batches of data) that will be used to optimize the PPO surrogate objective.\n- `model` contains a dictionary of parameters describing the neural net used to parameterize the policy. The `fcnet_hiddens` parameter is a list of the sizes of the hidden layers. Here, we have two hidden layers of size 100, each.\n- `num_cpus_per_worker` when set to 0 prevents Ray from pinning a CPU core to each worker, which means we could run out of workers in a constrained environment like a laptop or a cloud VM.\n\n> **Note:** If you change the values shown for `config['model']['fcnet_hiddens']`, make the same change in the `rllib rollout` command below!",
"_____no_output_____"
]
],
[
[
"SELECT_ENV = \"CartPole-v1\" # Specifies the OpenAI Gym environment for Cart Pole\nN_ITER = 10 # Number of training runs.\n\nconfig = ppo.DEFAULT_CONFIG.copy() # PPO's default configuration. See the next code cell.\nconfig[\"log_level\"] = \"WARN\" # Suppress too many messages, but try \"INFO\" to see what can be printed.\n\n# Other settings we might adjust:\nconfig[\"num_workers\"] = 1 # Use > 1 for using more CPU cores, including over a cluster\nconfig[\"num_sgd_iter\"] = 10 # Number of SGD (stochastic gradient descent) iterations per training minibatch.\n # I.e., for each minibatch of data, do this many passes over it to train. \nconfig[\"sgd_minibatch_size\"] = 250 # The amount of data records per minibatch\nconfig[\"model\"][\"fcnet_hiddens\"] = [100, 50] #\nconfig[\"num_cpus_per_worker\"] = 0 # This avoids running out of resources in the notebook environment when this cell is re-executed",
"_____no_output_____"
]
],
[
[
"Out of curiousity, let's see what configuration settings are defined for PPO. Note in particular the parameters for the deep learning `model`:",
"_____no_output_____"
]
],
[
[
"ppo.DEFAULT_CONFIG",
"_____no_output_____"
],
[
"agent = ppo.PPOTrainer(config, env=SELECT_ENV)\n\nresults = []\nepisode_data = []\nepisode_json = []\n\nfor n in range(N_ITER):\n result = agent.train()\n results.append(result)\n \n episode = {\n \"n\": n,\n \"episode_reward_min\": result[\"episode_reward_min\"],\n \"episode_reward_mean\": result[\"episode_reward_mean\"], \n \"episode_reward_max\": result[\"episode_reward_max\"], \n \"episode_len_mean\": result[\"episode_len_mean\"],\n }\n \n episode_data.append(episode)\n episode_json.append(json.dumps(episode))\n file_name = agent.save(checkpoint_root)\n \n print(f'{n:3d}: Min/Mean/Max reward: {result[\"episode_reward_min\"]:8.4f}/{result[\"episode_reward_mean\"]:8.4f}/{result[\"episode_reward_max\"]:8.4f}. Checkpoint saved to {file_name}')",
"_____no_output_____"
]
],
[
[
"The episode rewards should increase after multiple iterations. Try tweaking the config parameters. Smaller values for the `num_sgd_iter`, `sgd_minibatch_size`, or the `model`'s `fcnet_hiddens` will train faster, but take longer to improve the policy.",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame(data=episode_data)\ndf",
"_____no_output_____"
],
[
"df.plot(x=\"n\", y=[\"episode_reward_mean\", \"episode_reward_min\", \"episode_reward_max\"], secondary_y=True)",
"_____no_output_____"
]
],
[
[
"Also, print out the policy and model to see the results of training in detail…",
"_____no_output_____"
]
],
[
[
"import pprint\n\npolicy = agent.get_policy()\nmodel = policy.model\n\npprint.pprint(model.variables())\npprint.pprint(model.value_function())\n\nprint(model.base_model.summary())",
"_____no_output_____"
]
],
[
[
"## Rollout\n\nNext we'll use the [RLlib rollout CLI](https://ray.readthedocs.io/en/latest/rllib-training.html#evaluating-trained-policies), to evaluate the trained policy.\n\nThis visualizes the `CartPole` agent operating within the simulation: moving the cart left or right to avoid having the pole fall over.\n\nWe'll use the last saved checkpoint, `checkpoint_10` (or whatever you set for `N_ITER` above) for the rollout, evaluated through `2000` steps.\n\n> **Notes:** \n>\n> 1. If you changed `checkpoint_root` above to be different than `tmp/ppo/cart`, then change it here, too. Note that bugs in variable substitution in Jupyter notebooks, we can't use variables in the next cell, unfortunately.\n> 2. If you changed the model parameters, specifically the `fcnet_hiddens` array in the `config` object above, make the same change here.\n\nYou may need to make one more modification, depending on how you are running this tutorial:\n\n1. Running on your laptop? - Remove the line `--no-render`. \n2. Running on the Anyscale Service? The popup windows that would normally be created by the rollout can't be viewed in this case. Hence, the `--no-render` flag suppresses them. The code cell afterwords provides a sample video. You can try adding `--video-dir tmp/ppo/cart`, which will generate MP4 videos, then download them to view them. Or copy the `Video` cell below and use it to view the movies.",
"_____no_output_____"
]
],
[
[
"!rllib rollout tmp/ppo/cart/checkpoint_10/checkpoint-10 \\\n --config \"{\\\"env\\\": \\\"CartPole-v1\\\", \\\"model\\\": {\\\"fcnet_hiddens\\\": [100, 50]}}\" \\\n --run PPO \\\n --no-render \\\n --steps 2000",
"_____no_output_____"
]
],
[
[
"Here is a sample episode. \n\n> **Note:** This video was created by running the previous `rllib rollout` command with the argument `--video-dir some_directory`. It creates one video per episode.",
"_____no_output_____"
]
],
[
[
"from IPython.display import Video\n\ncart_pole_sample_video = \"../images/rllib/Cart-Pole-Example-Video.mp4\"\nVideo(cart_pole_sample_video)",
"_____no_output_____"
]
],
[
[
"Finally, launch [TensorBoard](https://ray.readthedocs.io/en/latest/rllib-training.html#getting-started). Select the Cart Pole runs and visualize the key metrics from training with RLlib.\n\n```shell\ntensorboard --logdir=$HOME/ray_results\n```",
"_____no_output_____"
]
],
[
[
"ray.shutdown()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a10bb31868a5aac753cdcbbb7bc38cd30e8c6fc
| 57,411 |
ipynb
|
Jupyter Notebook
|
Data Analysis/Pandas/PDEN2021-0104-Missing-Values-Example.ipynb
|
reddyprasade/Data-Analysis-with-Python-
|
2440e23486856eea5556c8262467b3a618032bc2
|
[
"MIT"
] | 1 |
2021-06-29T23:15:05.000Z
|
2021-06-29T23:15:05.000Z
|
Data Analysis/Pandas/PDEN2021-0104-Missing-Values-Example.ipynb
|
reddyprasade/Data-Analysis-with-Python-
|
2440e23486856eea5556c8262467b3a618032bc2
|
[
"MIT"
] | null | null | null |
Data Analysis/Pandas/PDEN2021-0104-Missing-Values-Example.ipynb
|
reddyprasade/Data-Analysis-with-Python-
|
2440e23486856eea5556c8262467b3a618032bc2
|
[
"MIT"
] | 1 |
2021-12-20T10:04:53.000Z
|
2021-12-20T10:04:53.000Z
| 34.942788 | 599 | 0.298967 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a10c0149bef507e01c4fba97c43d4b360798a48
| 62,731 |
ipynb
|
Jupyter Notebook
|
data/notebooks/05_LDA_topic_modelling.ipynb
|
riven314/Stylised-Controllable-Image-Captioning
|
50fee52a02e98603cda0cb4b082950c565b96a0f
|
[
"MIT"
] | 1 |
2020-08-16T20:35:27.000Z
|
2020-08-16T20:35:27.000Z
|
data/notebooks/05_LDA_topic_modelling.ipynb
|
riven314/Stylised-Controllable-Image-Captioning
|
50fee52a02e98603cda0cb4b082950c565b96a0f
|
[
"MIT"
] | null | null | null |
data/notebooks/05_LDA_topic_modelling.ipynb
|
riven314/Stylised-Controllable-Image-Captioning
|
50fee52a02e98603cda0cb4b082950c565b96a0f
|
[
"MIT"
] | 1 |
2020-09-06T17:13:23.000Z
|
2020-09-06T17:13:23.000Z
| 54.7869 | 14,436 | 0.680286 |
[
[
[
"import os\nimport json\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport spacy\nimport lda\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.decomposition import PCA,LatentDirichletAllocation\n\nfrom utils import read_json\n\n%matplotlib inline\n%load_ext autoreload\n%autoreload 2\n\nnlp = spacy.load('en_core_web_sm')",
"_____no_output_____"
],
[
"JSON_PATH = './ig_json/mid_clean_wonumber.json'\n\ndata = read_json(JSON_PATH)\ncaptions = []\nfor id_dict in data['images']:\n tokens = ' '.join(id_dict['sentences'][0]['tokens'])\n captions.append(tokens)\nlen(captions)",
"_____no_output_____"
]
],
[
[
"### 1. SpaCy Clean Text",
"_____no_output_____"
]
],
[
[
"docs = nlp.pipe(captions)\nstopwords = nlp.Defaults.stop_words",
"_____no_output_____"
],
[
"clean_docs = []\nfor doc in docs:\n tokens = [i.lemma_ for i in doc if (i.lemma_ != '-PRON-') and (i.text not in stopwords)]\n sentence = ' '.join(tokens).strip()\n if sentence != '':\n clean_docs.append(sentence)",
"_____no_output_____"
],
[
"for i in [10, 84, 23, 876]:\n before = captions[i]\n after = clean_docs[i]\n print(f'before: {before} | after: {after}')",
"before: lacrosse phone action shot | after: lacrosse phone action shoot\nbefore: looking dapper wedding | after: look dapper wedding\nbefore: always getting caught with food in her mouth | after: get catch food mouth\nbefore: state fair with my honey | after: spring sound t\n"
]
],
[
[
"### 2. Get BOW Features",
"_____no_output_____"
]
],
[
[
"vectorizer = CountVectorizer(min_df = 100, max_df = 0.6, stop_words = 'english')\nX = vectorizer.fit_transform(clean_docs)\n#assert X.shape[0] == len(captions)",
"_____no_output_____"
],
[
"X.shape",
"_____no_output_____"
],
[
"vocab = vectorizer.get_feature_names()\nprint(vocab[-10:])",
"['yellow', 'yep', 'yes', 'yesterday', 'yo', 'yoga', 'young', 'yum', 'yummy', 'yup']\n"
],
[
"captions[19], [vocab[i] for i in X[19].indices]",
"_____no_output_____"
]
],
[
[
"### 2. Find the Best No. of Topics",
"_____no_output_____"
]
],
[
[
"topic_ns = [3, 6, 9, 12]\n\nstats_ls = []\nfor topic_n in topic_ns:\n tst_model = lda.LDA(\n n_topics = topic_n, \n n_iter = 1500, \n random_state = 1\n )\n# tst_model = LatentDirichletAllocation(\n# n_components = topic_n, max_iter = 5, \n# learning_method = 'online', \n# learning_offset = 50., random_state = 0\n# )\n tst_model.fit(X)\n stats = tst_model.loglikelihood()\n stats_ls.append(stats)",
"INFO:lda:n_documents: 122200\nINFO:lda:vocab_size: 4556\nINFO:lda:n_words: 400821\nINFO:lda:n_topics: 3\nINFO:lda:n_iter: 1500\nWARNING:lda:all zero row in document-term matrix found\nINFO:lda:<0> log likelihood: -3724282\nINFO:lda:<10> log likelihood: -3172071\nINFO:lda:<20> log likelihood: -3136476\nINFO:lda:<30> log likelihood: -3104471\nINFO:lda:<40> log likelihood: -3083696\nINFO:lda:<50> log likelihood: -3072804\nINFO:lda:<60> log likelihood: -3062924\nINFO:lda:<70> log likelihood: -3056590\nINFO:lda:<80> log likelihood: -3052137\nINFO:lda:<90> log likelihood: -3046477\nINFO:lda:<100> log likelihood: -3045218\nINFO:lda:<110> log likelihood: -3041476\nINFO:lda:<120> log likelihood: -3039780\nINFO:lda:<130> log likelihood: -3037755\nINFO:lda:<140> log likelihood: -3036504\nINFO:lda:<150> log likelihood: -3035129\nINFO:lda:<160> log likelihood: -3033274\nINFO:lda:<170> log likelihood: -3031919\nINFO:lda:<180> log likelihood: -3031343\nINFO:lda:<190> log likelihood: -3030634\nINFO:lda:<200> log likelihood: -3029800\nINFO:lda:<210> log likelihood: -3029518\nINFO:lda:<220> log likelihood: -3028331\nINFO:lda:<230> log likelihood: -3028216\nINFO:lda:<240> log likelihood: -3028433\nINFO:lda:<250> log likelihood: -3028224\nINFO:lda:<260> log likelihood: -3028499\nINFO:lda:<270> log likelihood: -3028451\nINFO:lda:<280> log likelihood: -3026788\nINFO:lda:<290> log likelihood: -3027625\nINFO:lda:<300> log likelihood: -3027395\nINFO:lda:<310> log likelihood: -3026521\nINFO:lda:<320> log likelihood: -3026073\nINFO:lda:<330> log likelihood: -3026767\nINFO:lda:<340> log likelihood: -3026520\nINFO:lda:<350> log likelihood: -3025561\nINFO:lda:<360> log likelihood: -3024973\nINFO:lda:<370> log likelihood: -3026574\nINFO:lda:<380> log likelihood: -3025897\nINFO:lda:<390> log likelihood: -3025847\nINFO:lda:<400> log likelihood: -3025974\nINFO:lda:<410> log likelihood: -3025598\nINFO:lda:<420> log likelihood: -3025492\nINFO:lda:<430> log likelihood: -3025651\nINFO:lda:<440> log likelihood: -3025800\nINFO:lda:<450> log likelihood: -3024801\nINFO:lda:<460> log likelihood: -3025078\nINFO:lda:<470> log likelihood: -3025272\nINFO:lda:<480> log likelihood: -3025082\nINFO:lda:<490> log likelihood: -3023899\nINFO:lda:<500> log likelihood: -3025124\nINFO:lda:<510> log likelihood: -3024754\nINFO:lda:<520> log likelihood: -3024960\nINFO:lda:<530> log likelihood: -3025256\nINFO:lda:<540> log likelihood: -3024205\nINFO:lda:<550> log likelihood: -3024842\nINFO:lda:<560> log likelihood: -3024239\nINFO:lda:<570> log likelihood: -3024609\nINFO:lda:<580> log likelihood: -3023812\nINFO:lda:<590> log likelihood: -3024257\nINFO:lda:<600> log likelihood: -3024868\nINFO:lda:<610> log likelihood: -3025279\nINFO:lda:<620> log likelihood: -3025070\nINFO:lda:<630> log likelihood: -3024183\nINFO:lda:<640> log likelihood: -3024489\nINFO:lda:<650> log likelihood: -3024211\nINFO:lda:<660> log likelihood: -3024447\nINFO:lda:<670> log likelihood: -3024415\nINFO:lda:<680> log likelihood: -3024062\nINFO:lda:<690> log likelihood: -3024820\nINFO:lda:<700> log likelihood: -3024509\nINFO:lda:<710> log likelihood: -3024341\nINFO:lda:<720> log likelihood: -3023370\nINFO:lda:<730> log likelihood: -3024683\nINFO:lda:<740> log likelihood: -3025837\nINFO:lda:<750> log likelihood: -3024599\nINFO:lda:<760> log likelihood: -3025056\nINFO:lda:<770> log likelihood: -3024616\nINFO:lda:<780> log likelihood: -3024919\nINFO:lda:<790> log likelihood: -3024705\nINFO:lda:<800> log likelihood: -3024294\nINFO:lda:<810> log likelihood: -3024405\nINFO:lda:<820> log likelihood: -3024188\nINFO:lda:<830> log likelihood: -3025216\nINFO:lda:<840> log likelihood: -3024613\nINFO:lda:<850> log likelihood: -3024566\nINFO:lda:<860> log likelihood: -3024056\nINFO:lda:<870> log likelihood: -3024488\nINFO:lda:<880> log likelihood: -3024445\nINFO:lda:<890> log likelihood: -3024084\nINFO:lda:<900> log likelihood: -3024190\nINFO:lda:<910> log likelihood: -3024403\nINFO:lda:<920> log likelihood: -3024369\nINFO:lda:<930> log likelihood: -3024226\nINFO:lda:<940> log likelihood: -3024314\nINFO:lda:<950> log likelihood: -3024412\nINFO:lda:<960> log likelihood: -3024953\nINFO:lda:<970> log likelihood: -3023544\nINFO:lda:<980> log likelihood: -3023379\nINFO:lda:<990> log likelihood: -3023648\nINFO:lda:<1000> log likelihood: -3023564\nINFO:lda:<1010> log likelihood: -3023577\nINFO:lda:<1020> log likelihood: -3024321\nINFO:lda:<1030> log likelihood: -3024155\nINFO:lda:<1040> log likelihood: -3023703\nINFO:lda:<1050> log likelihood: -3024058\nINFO:lda:<1060> log likelihood: -3024064\nINFO:lda:<1070> log likelihood: -3023895\nINFO:lda:<1080> log likelihood: -3024587\nINFO:lda:<1090> log likelihood: -3024359\nINFO:lda:<1100> log likelihood: -3024206\nINFO:lda:<1110> log likelihood: -3024428\nINFO:lda:<1120> log likelihood: -3024120\nINFO:lda:<1130> log likelihood: -3023924\nINFO:lda:<1140> log likelihood: -3024812\nINFO:lda:<1150> log likelihood: -3024662\nINFO:lda:<1160> log likelihood: -3024746\nINFO:lda:<1170> log likelihood: -3024596\nINFO:lda:<1180> log likelihood: -3024592\nINFO:lda:<1190> log likelihood: -3024103\nINFO:lda:<1200> log likelihood: -3024392\nINFO:lda:<1210> log likelihood: -3023918\nINFO:lda:<1220> log likelihood: -3024342\nINFO:lda:<1230> log likelihood: -3024341\nINFO:lda:<1240> log likelihood: -3024076\nINFO:lda:<1250> log likelihood: -3023451\nINFO:lda:<1260> log likelihood: -3024433\nINFO:lda:<1270> log likelihood: -3024898\nINFO:lda:<1280> log likelihood: -3023178\nINFO:lda:<1290> log likelihood: -3024117\nINFO:lda:<1300> log likelihood: -3023534\nINFO:lda:<1310> log likelihood: -3023652\nINFO:lda:<1320> log likelihood: -3023442\nINFO:lda:<1330> log likelihood: -3022728\nINFO:lda:<1340> log likelihood: -3024131\nINFO:lda:<1350> log likelihood: -3024362\nINFO:lda:<1360> log likelihood: -3023598\nINFO:lda:<1370> log likelihood: -3024827\nINFO:lda:<1380> log likelihood: -3024603\nINFO:lda:<1390> log likelihood: -3023802\nINFO:lda:<1400> log likelihood: -3024975\nINFO:lda:<1410> log likelihood: -3024108\nINFO:lda:<1420> log likelihood: -3024223\nINFO:lda:<1430> log likelihood: -3023526\nINFO:lda:<1440> log likelihood: -3023847\nINFO:lda:<1450> log likelihood: -3025468\nINFO:lda:<1460> log likelihood: -3024198\nINFO:lda:<1470> log likelihood: -3024024\nINFO:lda:<1480> log likelihood: -3023844\nINFO:lda:<1490> log likelihood: -3024250\nINFO:lda:<1499> log likelihood: -3023594\nINFO:lda:n_documents: 122200\nINFO:lda:vocab_size: 4556\nINFO:lda:n_words: 400821\nINFO:lda:n_topics: 6\nINFO:lda:n_iter: 1500\nWARNING:lda:all zero row in document-term matrix found\nINFO:lda:<0> log likelihood: -4116041\nINFO:lda:<10> log likelihood: -3343741\nINFO:lda:<20> log likelihood: -3267027\nINFO:lda:<30> log likelihood: -3201797\nINFO:lda:<40> log likelihood: -3163394\nINFO:lda:<50> log likelihood: -3141359\nINFO:lda:<60> log likelihood: -3125858\nINFO:lda:<70> log likelihood: -3114853\nINFO:lda:<80> log likelihood: -3106455\nINFO:lda:<90> log likelihood: -3100824\nINFO:lda:<100> log likelihood: -3096398\nINFO:lda:<110> log likelihood: -3093162\nINFO:lda:<120> log likelihood: -3089551\nINFO:lda:<130> log likelihood: -3086480\nINFO:lda:<140> log likelihood: -3085205\nINFO:lda:<150> log likelihood: -3082831\nINFO:lda:<160> log likelihood: -3081088\nINFO:lda:<170> log likelihood: -3078531\nINFO:lda:<180> log likelihood: -3077074\nINFO:lda:<190> log likelihood: -3075755\nINFO:lda:<200> log likelihood: -3074992\nINFO:lda:<210> log likelihood: -3074341\nINFO:lda:<220> log likelihood: -3072137\nINFO:lda:<230> log likelihood: -3072911\nINFO:lda:<240> log likelihood: -3071637\nINFO:lda:<250> log likelihood: -3071630\nINFO:lda:<260> log likelihood: -3072935\nINFO:lda:<270> log likelihood: -3071581\nINFO:lda:<280> log likelihood: -3071393\nINFO:lda:<290> log likelihood: -3071231\nINFO:lda:<300> log likelihood: -3070335\nINFO:lda:<310> log likelihood: -3070205\nINFO:lda:<320> log likelihood: -3070319\nINFO:lda:<330> log likelihood: -3069266\nINFO:lda:<340> log likelihood: -3069234\nINFO:lda:<350> log likelihood: -3068590\nINFO:lda:<360> log likelihood: -3068648\nINFO:lda:<370> log likelihood: -3068835\nINFO:lda:<380> log likelihood: -3069709\nINFO:lda:<390> log likelihood: -3069318\nINFO:lda:<400> log likelihood: -3068625\nINFO:lda:<410> log likelihood: -3070396\nINFO:lda:<420> log likelihood: -3068921\nINFO:lda:<430> log likelihood: -3068875\nINFO:lda:<440> log likelihood: -3068929\n"
],
[
"fig, ax = plt.subplots(1)\nax.plot(topic_ns, stats_ls, '-o');",
"_____no_output_____"
]
],
[
[
"### 3. Fit on Optimal no. of Topics and Show Topic Words",
"_____no_output_____"
]
],
[
[
"lda_model = lda.LDA(n_topics = 4, n_iter = 1500, random_state = 1)\nlda_model.fit(X)",
"INFO:lda:n_documents: 120945\nINFO:lda:vocab_size: 760\nINFO:lda:n_words: 287189\nINFO:lda:n_topics: 4\nINFO:lda:n_iter: 1500\nWARNING:lda:all zero row in document-term matrix found\nINFO:lda:<0> log likelihood: -2413506\nINFO:lda:<10> log likelihood: -2021789\nINFO:lda:<20> log likelihood: -1998018\nINFO:lda:<30> log likelihood: -1962221\nINFO:lda:<40> log likelihood: -1930804\nINFO:lda:<50> log likelihood: -1912464\nINFO:lda:<60> log likelihood: -1899729\nINFO:lda:<70> log likelihood: -1891020\nINFO:lda:<80> log likelihood: -1887013\nINFO:lda:<90> log likelihood: -1885806\nINFO:lda:<100> log likelihood: -1883928\nINFO:lda:<110> log likelihood: -1883617\nINFO:lda:<120> log likelihood: -1882502\nINFO:lda:<130> log likelihood: -1882494\nINFO:lda:<140> log likelihood: -1882253\nINFO:lda:<150> log likelihood: -1881492\nINFO:lda:<160> log likelihood: -1880968\nINFO:lda:<170> log likelihood: -1881170\nINFO:lda:<180> log likelihood: -1880645\nINFO:lda:<190> log likelihood: -1880348\nINFO:lda:<200> log likelihood: -1881362\nINFO:lda:<210> log likelihood: -1880553\nINFO:lda:<220> log likelihood: -1879620\nINFO:lda:<230> log likelihood: -1879935\nINFO:lda:<240> log likelihood: -1879502\nINFO:lda:<250> log likelihood: -1879827\nINFO:lda:<260> log likelihood: -1879750\nINFO:lda:<270> log likelihood: -1879714\nINFO:lda:<280> log likelihood: -1879653\nINFO:lda:<290> log likelihood: -1878702\nINFO:lda:<300> log likelihood: -1878986\nINFO:lda:<310> log likelihood: -1879083\nINFO:lda:<320> log likelihood: -1878907\nINFO:lda:<330> log likelihood: -1878223\nINFO:lda:<340> log likelihood: -1878821\nINFO:lda:<350> log likelihood: -1878141\nINFO:lda:<360> log likelihood: -1877257\nINFO:lda:<370> log likelihood: -1877723\nINFO:lda:<380> log likelihood: -1877729\nINFO:lda:<390> log likelihood: -1877741\nINFO:lda:<400> log likelihood: -1877892\nINFO:lda:<410> log likelihood: -1876686\nINFO:lda:<420> log likelihood: -1878340\nINFO:lda:<430> log likelihood: -1876935\nINFO:lda:<440> log likelihood: -1877136\nINFO:lda:<450> log likelihood: -1875864\nINFO:lda:<460> log likelihood: -1876586\nINFO:lda:<470> log likelihood: -1876695\nINFO:lda:<480> log likelihood: -1877927\nINFO:lda:<490> log likelihood: -1877391\nINFO:lda:<500> log likelihood: -1877124\nINFO:lda:<510> log likelihood: -1877010\nINFO:lda:<520> log likelihood: -1876207\nINFO:lda:<530> log likelihood: -1876544\nINFO:lda:<540> log likelihood: -1877029\nINFO:lda:<550> log likelihood: -1876229\nINFO:lda:<560> log likelihood: -1876244\nINFO:lda:<570> log likelihood: -1876343\nINFO:lda:<580> log likelihood: -1876243\nINFO:lda:<590> log likelihood: -1876000\nINFO:lda:<600> log likelihood: -1875576\nINFO:lda:<610> log likelihood: -1875717\nINFO:lda:<620> log likelihood: -1875677\nINFO:lda:<630> log likelihood: -1875534\nINFO:lda:<640> log likelihood: -1875188\nINFO:lda:<650> log likelihood: -1874247\nINFO:lda:<660> log likelihood: -1875534\nINFO:lda:<670> log likelihood: -1874271\nINFO:lda:<680> log likelihood: -1875348\nINFO:lda:<690> log likelihood: -1875868\nINFO:lda:<700> log likelihood: -1875148\nINFO:lda:<710> log likelihood: -1875014\nINFO:lda:<720> log likelihood: -1874955\nINFO:lda:<730> log likelihood: -1874611\nINFO:lda:<740> log likelihood: -1874999\nINFO:lda:<750> log likelihood: -1876326\nINFO:lda:<760> log likelihood: -1875455\nINFO:lda:<770> log likelihood: -1875551\nINFO:lda:<780> log likelihood: -1875667\nINFO:lda:<790> log likelihood: -1875419\nINFO:lda:<800> log likelihood: -1874938\nINFO:lda:<810> log likelihood: -1875943\nINFO:lda:<820> log likelihood: -1875331\nINFO:lda:<830> log likelihood: -1874897\nINFO:lda:<840> log likelihood: -1875082\nINFO:lda:<850> log likelihood: -1875680\nINFO:lda:<860> log likelihood: -1875332\nINFO:lda:<870> log likelihood: -1874366\nINFO:lda:<880> log likelihood: -1875023\nINFO:lda:<890> log likelihood: -1874812\nINFO:lda:<900> log likelihood: -1875130\nINFO:lda:<910> log likelihood: -1875176\nINFO:lda:<920> log likelihood: -1874989\nINFO:lda:<930> log likelihood: -1874258\nINFO:lda:<940> log likelihood: -1875297\nINFO:lda:<950> log likelihood: -1874423\nINFO:lda:<960> log likelihood: -1875617\nINFO:lda:<970> log likelihood: -1875812\nINFO:lda:<980> log likelihood: -1876289\nINFO:lda:<990> log likelihood: -1875935\nINFO:lda:<1000> log likelihood: -1875519\nINFO:lda:<1010> log likelihood: -1874769\nINFO:lda:<1020> log likelihood: -1875286\nINFO:lda:<1030> log likelihood: -1874932\nINFO:lda:<1040> log likelihood: -1875673\nINFO:lda:<1050> log likelihood: -1876013\nINFO:lda:<1060> log likelihood: -1875111\nINFO:lda:<1070> log likelihood: -1875271\nINFO:lda:<1080> log likelihood: -1875702\nINFO:lda:<1090> log likelihood: -1874906\nINFO:lda:<1100> log likelihood: -1875804\nINFO:lda:<1110> log likelihood: -1874926\nINFO:lda:<1120> log likelihood: -1875386\nINFO:lda:<1130> log likelihood: -1875277\nINFO:lda:<1140> log likelihood: -1875075\nINFO:lda:<1150> log likelihood: -1875334\nINFO:lda:<1160> log likelihood: -1875788\nINFO:lda:<1170> log likelihood: -1875648\nINFO:lda:<1180> log likelihood: -1875277\nINFO:lda:<1190> log likelihood: -1875369\nINFO:lda:<1200> log likelihood: -1875663\nINFO:lda:<1210> log likelihood: -1875013\nINFO:lda:<1220> log likelihood: -1876187\nINFO:lda:<1230> log likelihood: -1876626\nINFO:lda:<1240> log likelihood: -1875941\nINFO:lda:<1250> log likelihood: -1876122\nINFO:lda:<1260> log likelihood: -1875367\nINFO:lda:<1270> log likelihood: -1875062\nINFO:lda:<1280> log likelihood: -1875720\nINFO:lda:<1290> log likelihood: -1875146\nINFO:lda:<1300> log likelihood: -1875561\nINFO:lda:<1310> log likelihood: -1874939\nINFO:lda:<1320> log likelihood: -1875582\nINFO:lda:<1330> log likelihood: -1875904\nINFO:lda:<1340> log likelihood: -1875401\nINFO:lda:<1350> log likelihood: -1876039\nINFO:lda:<1360> log likelihood: -1875680\nINFO:lda:<1370> log likelihood: -1877032\nINFO:lda:<1380> log likelihood: -1875359\nINFO:lda:<1390> log likelihood: -1875858\nINFO:lda:<1400> log likelihood: -1875364\nINFO:lda:<1410> log likelihood: -1875698\nINFO:lda:<1420> log likelihood: -1874600\nINFO:lda:<1430> log likelihood: -1876102\nINFO:lda:<1440> log likelihood: -1876113\nINFO:lda:<1450> log likelihood: -1875376\nINFO:lda:<1460> log likelihood: -1876001\nINFO:lda:<1470> log likelihood: -1876206\nINFO:lda:<1480> log likelihood: -1876857\nINFO:lda:<1490> log likelihood: -1876468\nINFO:lda:<1499> log likelihood: -1876283\n"
],
[
"topic_word = lda_model.topic_word_\nn_top_words = 40\nfor i, topic_dist in enumerate(topic_word):\n topic_words = np.array(vocab)[np.argsort(topic_dist)][:-(n_top_words+1):-1]\n print('Topic {}:\\n {}\\n\\n'.format(i, ' '.join(topic_words)))",
"Topic 0:\n dinner username good night day lunch breakfast time happy love cake chocolate eat today birthday ice chicken sweet cream coffee food morning ready tonight date like make fresh cheese egg homemade green yum cookie tea hot yes home little snack\n\n\nTopic 1:\n username new love day today happy year thank time birthday little photo hair old good come beautiful friend work fun christmas wedding great look family color favorite week weekend shoot ready party girl night book baby black wait white flower\n\n\nTopic 2:\n day morning beautiful good today time walk night view love sunset beach sun weekend sky great light way like enjoy start summer work park look let home run rain fun city spring early blue evening snow username flower come afternoon\n\n\nTopic 3:\n love like username look little good life know time baby work oh lol girl need thing big today think face right want let cute come feel miss boy dog home man cat sleep guy pretty play picture smile real true\n\n\n"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a10cca5031d58eee8f2fa48ec1d6aaeda165779
| 13,339 |
ipynb
|
Jupyter Notebook
|
main.ipynb
|
langlailezf/-AI-13-
|
ba845e7094847f6422468a0a8b92f8154da52233
|
[
"Apache-2.0"
] | null | null | null |
main.ipynb
|
langlailezf/-AI-13-
|
ba845e7094847f6422468a0a8b92f8154da52233
|
[
"Apache-2.0"
] | null | null | null |
main.ipynb
|
langlailezf/-AI-13-
|
ba845e7094847f6422468a0a8b92f8154da52233
|
[
"Apache-2.0"
] | null | null | null | 33.76962 | 253 | 0.541495 |
[
[
[
"#解压一下略小改之后的PaddleSeg,解压一次就可以注释掉了\r\n!unzip -oq /home/aistudio/PaddleSeg.zip",
"_____no_output_____"
],
[
"#解压数据集至data/目录\r\n!unzip -qo data/data95249/train_50k_mask.zip -d data/\r\n!unzip -oq data/data100087/B榜测试数据集.zip -d data/\r\n!unzip -oq data/data95249/train_image.zip -d data/",
"_____no_output_____"
],
[
"import sys\r\nsys.path.append(\"PaddleSeg\")\r\nimport paddleseg\r\nimport paddle\r\nimport numpy as np\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\nfrom tqdm import tqdm\r\nimport random\r\n#设置随机数种子\r\nrandom.seed(2021)",
"/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/__init__.py:107: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n from collections import MutableMapping\n/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/rcsetup.py:20: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n from collections import Iterable, Mapping\n/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/colors.py:53: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n from collections import Sized\n"
],
[
"def write_txt(file_name, imgs_path, labels_path=None, mode='train', val_pro=0.2):\r\n assert mode==\"train\" or mode==\"test\", \"ERROR:mode must be train or test.\"\r\n if mode!=\"test\":\r\n train_path = []\r\n for idx, f_path in enumerate(imgs_path):\r\n for i_path in sorted(os.listdir(f_path)):\r\n path1 = os.path.join(f_path, i_path) \r\n path2 = os.path.join(labels_path[idx], i_path)\r\n train_path.append((path1, path2, str(idx)))\r\n \r\n if val_pro>=0 and val_pro<=1:\r\n #打乱数据\r\n random.shuffle(train_path)\r\n val_len = int(len(train_path)*val_pro)\r\n val_path = train_path[:val_len]\r\n train_path = train_path[val_len:]\r\n with open(file_name[0], 'w') as f:\r\n for path in train_path:\r\n f.write(path[0]+\" \"+path[1]+\" \"+path[2]+\"\\n\")\r\n with open(file_name[1], 'w') as f:\r\n for path in val_path:\r\n f.write(path[0]+\" \"+path[1]+\" \"+path[2]+\"\\n\") \r\n return len(train_path), val_len\r\n else:\r\n with open(file_name[0], 'w') as f:\r\n for path in train_path:\r\n f.write(path[0]+\" \"+path[1]+\" \"+path[2]+\"\\n\") \r\n return len(train_path), 0\r\n else:\r\n with open(file_name, 'w') as f:\r\n for path in imgs_path:\r\n img_path = os.path.join(test_path, path)\r\n f.write(img_path+\"\\n\")",
"_____no_output_____"
],
[
"def create_txt(data_root, train_imgs_dir=None, train_labels_dir=None, test_dir=None, val_pro=0.2):\r\n if train_imgs_dir is not None:\r\n if os.path.exists(\"train.txt\"):\r\n os.remove(\"train.txt\")\r\n if os.path.exists(\"val.txt\"):\r\n os.remove(\"val.txt\")\r\n train_imgs_dir = os.path.join(data_root, train_imgs_dir)\r\n train_labels_dir = os.path.join(data_root, train_labels_dir)\r\n file_names = os.listdir(train_imgs_dir)\r\n file_names = sorted(file_names)\r\n train_imgs_path, train_labels_path =[], []\r\n for na in file_names:\r\n train_imgs_path.append(os.path.join(train_imgs_dir, na))\r\n train_labels_path.append(os.path.join(train_labels_dir, na))\r\n train_len, val_len = write_txt([\"train.txt\", \"val.txt\"], train_imgs_path, train_labels_path, mode='train', val_pro=val_pro)\r\n \r\n print(\"训练数据整理完毕!训练集长度:{},验证集长度:{}, 类别数:{}\".format(train_len, val_len, len(file_names)))\r\n\r\n if test_dir is not None:\r\n if os.path.exists(\"test.txt\"):\r\n os.remove(\"test.txt\")\r\n global test_path\r\n test_path = os.path.join(data_root, test_dir)\r\n test_imgs_path_list = sorted(os.listdir(test_path))\r\n write_txt(\"test.txt\", test_imgs_path_list, mode=\"test\")\r\n print(\"测试数据整理完毕!测试集长度:{}\".format(len(test_imgs_path_list)))",
"_____no_output_____"
],
[
"data_root = \"data\"\r\ntrain_imgs_dir = \"train_image\"\r\ntrain_labels_dir = \"train_50k_mask\"\r\ntest_dir = \"test_image\"\r\ncreate_txt(data_root, train_imgs_dir, train_labels_dir, test_dir, val_pro=0.2)",
"训练数据整理完毕!训练集长度:40000,验证集长度:10000, 类别数:500\n测试数据整理完毕!测试集长度:10989\n"
],
[
"#验证一下是否写入正确,可以直接点开文件查看\r\n#也可以读取文件内容查看\r\n#以train.txt为例,只看前5行验证\r\ncount = 5\r\nwith open('train.txt', 'r') as f:\r\n for line in f.readlines():\r\n print(line)\r\n count -= 1\r\n if count==0:\r\n break",
"data/train_image/n02101006/n02101006_6140.png data/train_50k_mask/n02101006/n02101006_6140.png 138\n\ndata/train_image/n03124170/n03124170_6952.png data/train_50k_mask/n03124170/n03124170_6952.png 333\n\ndata/train_image/n04037443/n04037443_18517.png data/train_50k_mask/n04037443/n04037443_18517.png 448\n\ndata/train_image/n01770393/n01770393_2677.png data/train_50k_mask/n01770393/n01770393_2677.png 44\n\ndata/train_image/n07613480/n07613480_19014.png data/train_50k_mask/n07613480/n07613480_19014.png 485\n\n"
],
[
"# 训练模型\r\n!python PaddleSeg/train.py --config my_deeplabv3.yml --do_eval --use_vdl --save_dir /home/aistudio/output_deeplabv3_1 --save_interval 2000",
"Total Flops: 565520896 Total Params: 46454002\r"
],
[
"#推理\r\n!python PaddleSeg/predict.py --config my_deeplabv3.yml --model_path output_deeplabv3_1/best_model/model.pdparams --image_path data/test_image --save_dir output/result_1 #--aug_pred --flip_horizontal --flip_vertical",
"2021-08-05 22:32:35 [INFO]\t\n---------------Config Information---------------\nbatch_size: 8\niters: 10000\nloss:\n coef:\n - 1\n types:\n - coef:\n - 1.0\n losses:\n - type: DiceLoss\n type: MixedLoss\nlr_scheduler:\n end_lr: 0\n learning_rate: 0.001\n power: 0.9\n type: PolynomialDecay\nmodel:\n align_corners: false\n aspp_out_channels: 256\n aspp_ratios:\n - 1\n - 6\n - 12\n - 18\n - 24\n backbone:\n multi_grid:\n - 1\n - 2\n - 4\n output_stride: 8\n pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz\n type: ResNet101_vd\n backbone_indices:\n - 0\n - 3\n num_classes: 2\n pretrained: output_deeplabv3_1/best_model/model.pdparams\n type: DeepLabV3P\noptimizer:\n momentum: 0.9\n type: sgd\n weight_decay: 4.0e-05\ntrain_dataset:\n dataset_root: /home/aistudio\n mode: train\n num_classes: 2\n train_path: /home/aistudio/train.txt\n transforms:\n - type: RandomHorizontalFlip\n - type: RandomVerticalFlip\n - brightness_range: 0.4\n contrast_range: 0.4\n saturation_range: 0.4\n type: RandomDistort\n - target_size:\n - 256\n - 256\n type: Resize\n - type: Normalize\n type: Dataset\nval_dataset:\n dataset_root: /home/aistudio\n mode: val\n num_classes: 2\n transforms:\n - target_size:\n - 256\n - 256\n type: Resize\n - type: Normalize\n type: Dataset\n val_path: /home/aistudio/val.txt\n------------------------------------------------\nW0805 22:32:35.842975 5047 device_context.cc:404] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 10.1, Runtime API Version: 10.1\nW0805 22:32:35.843027 5047 device_context.cc:422] device: 0, cuDNN Version: 7.6.\n2021-08-05 22:32:41 [INFO]\tLoading pretrained model from https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz\n2021-08-05 22:32:42 [INFO]\tThere are 530/530 variables loaded into ResNet_vd.\n2021-08-05 22:32:42 [INFO]\tLoading pretrained model from output_deeplabv3_1/best_model/model.pdparams\n2021-08-05 22:32:44 [INFO]\tThere are 627/627 variables loaded into DeepLabV3P.\n2021-08-05 22:32:44 [INFO]\tNumber of predict images = 10989\n2021-08-05 22:32:44 [INFO]\tLoading pretrained model from output_deeplabv3_1/best_model/model.pdparams\n2021-08-05 22:32:46 [INFO]\tThere are 627/627 variables loaded into DeepLabV3P.\n2021-08-05 22:32:46 [INFO]\tStart to predict...\n10989/10989 [==============================] - 893s 81ms/st\n"
],
[
"import os\r\nimport cv2\r\nimport numpy as np\r\nfilePath = 'output/result_1'\r\nfilenames = os.listdir(filePath)\r\nkernel = np.ones((3,3),np.uint8)\r\nfor filename in filenames:\r\n img = cv2.imread(filePath + filename)\r\n erosion = cv2.erode(img,kernel,iterations = 1)\r\n gaussian = cv2.GaussianBlur(erosion,(5, 5), 2)\r\n cv2.imwrite('./pred/' + filename, gaussian)",
"_____no_output_____"
],
[
"%cd output/result_1/results\r\n!zip -r -oq /home/aistudio/pred.zip ./\r\n%cd /home/aistudio",
"/home/aistudio/output/result_1/results\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a10cdf7274875145bbe4d019b6cb7178c373eef
| 67,946 |
ipynb
|
Jupyter Notebook
|
OPC_Sensor/Models with Min Max Normalization/LSTM/.ipynb_checkpoints/LSTM_tanh_binary-checkpoint.ipynb
|
utkarshkanswal/Machine-Learning-application-on-Air-quality-dataset
|
12d0aca165fe0faf503ca38bd6a391452b480565
|
[
"MIT"
] | 5 |
2021-10-18T07:36:05.000Z
|
2022-02-09T06:46:58.000Z
|
OPC_Sensor/Models with Min Max Normalization/LSTM/.ipynb_checkpoints/LSTM_tanh_binary-checkpoint.ipynb
|
utkarshkanswal/Machine-Learning-application-on-Air-quality-dataset
|
12d0aca165fe0faf503ca38bd6a391452b480565
|
[
"MIT"
] | null | null | null |
OPC_Sensor/Models with Min Max Normalization/LSTM/.ipynb_checkpoints/LSTM_tanh_binary-checkpoint.ipynb
|
utkarshkanswal/Machine-Learning-application-on-Air-quality-dataset
|
12d0aca165fe0faf503ca38bd6a391452b480565
|
[
"MIT"
] | null | null | null | 97.204578 | 19,280 | 0.768655 |
[
[
[
"import tensorflow as tf\ntf.config.experimental.list_physical_devices()",
"_____no_output_____"
],
[
"tf.test.is_built_with_cuda()",
"_____no_output_____"
]
],
[
[
"# Importing Libraries",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport os.path as op\nimport pickle",
"_____no_output_____"
],
[
"import tensorflow as tf\nfrom tensorflow import keras\nfrom keras.models import Model,Sequential,load_model\nfrom keras.layers import Input, Embedding\nfrom keras.layers import Dense, Bidirectional\nfrom keras.layers.recurrent import LSTM\nimport keras.metrics as metrics\nimport itertools\nfrom tensorflow.python.keras.utils.data_utils import Sequence\nfrom decimal import Decimal\nfrom keras import backend as K\nfrom keras.layers import Conv1D,MaxPooling1D,Flatten,Dense",
"_____no_output_____"
]
],
[
[
"# Data Fetching",
"_____no_output_____"
]
],
[
[
"A1=np.empty((0,5),dtype='float32')\nU1=np.empty((0,7),dtype='float32')\nnode=['150','149','147','144','142','140','136','61']\nmon=['Apr','Mar','Aug','Jun','Jul','Sep','May','Oct']\nfor j in node:\n for i in mon:\n inp= pd.read_csv('data_gkv/AT510_Node_'+str(j)+'_'+str(i)+'19_OutputFile.csv',usecols=[1,2,3,15,16])\n out= pd.read_csv('data_gkv/AT510_Node_'+str(j)+'_'+str(i)+'19_OutputFile.csv',usecols=[5,6,7,8,17,18,19])\n \n inp=np.array(inp,dtype='float32')\n out=np.array(out,dtype='float32')\n \n A1=np.append(A1, inp, axis=0)\n U1=np.append(U1, out, axis=0)\n\nprint(A1)\nprint(U1)\n",
"[[1.50000e+02 1.90401e+05 7.25000e+02 2.75500e+01 8.03900e+01]\n [1.50000e+02 1.90401e+05 8.25000e+02 2.75600e+01 8.03300e+01]\n [1.50000e+02 1.90401e+05 9.25000e+02 2.75800e+01 8.02400e+01]\n ...\n [6.10000e+01 1.91020e+05 1.94532e+05 2.93700e+01 7.52100e+01]\n [6.10000e+01 1.91020e+05 1.94632e+05 2.93500e+01 7.52700e+01]\n [6.10000e+01 1.91020e+05 1.94732e+05 2.93400e+01 7.53000e+01]]\n[[ 28. 3. -52. ... 16.97 19.63 20.06]\n [ 28. 15. -53. ... 16.63 19.57 23.06]\n [ 31. 16. -55. ... 17.24 19.98 20.24]\n ...\n [ 76. 12. -76. ... 3.47 3.95 4.35]\n [ 75. 13. -76. ... 3.88 4.33 4.42]\n [ 76. 12. -75. ... 3.46 4.07 4.28]]\n"
]
],
[
[
"# Min Max Scaler",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import MinMaxScaler\nimport warnings\nscaler_obj=MinMaxScaler()\nX1=scaler_obj.fit_transform(A1)\nY1=scaler_obj.fit_transform(U1)\n\nwarnings.filterwarnings(action='ignore', category=UserWarning)\n\nX1=X1[:,np.newaxis,:]\nY1=Y1[:,np.newaxis,:]",
"_____no_output_____"
],
[
"def rmse(y_true, y_pred):\n return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))\n\ndef coeff_determination(y_true, y_pred):\n SS_res = K.sum(K.square( y_true-y_pred )) \n SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) ) \n return ( 1 - SS_res/(SS_tot + K.epsilon()) )",
"_____no_output_____"
]
],
[
[
"# Model",
"_____no_output_____"
]
],
[
[
"model1 = Sequential()\nmodel1.add(keras.Input(shape=(1,5)))\nmodel1.add(tf.keras.layers.LSTM(7,activation=\"tanh\",use_bias=True,kernel_initializer=\"glorot_uniform\",bias_initializer=\"zeros\"))\nmodel1.add(Dense(7))\nmodel1.add(keras.layers.BatchNormalization(axis=-1,momentum=0.99,epsilon=0.001,center=True,scale=True,\n beta_initializer=\"zeros\",gamma_initializer=\"ones\",\n moving_mean_initializer=\"zeros\",moving_variance_initializer=\"ones\",trainable=True))\nmodel1.add(keras.layers.ReLU())\nmodel1.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-5), loss='binary_crossentropy',metrics=['accuracy','mse','mae',rmse])\nmodel1.summary()",
"Model: \"sequential\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n lstm (LSTM) (None, 7) 364 \n \n dense (Dense) (None, 7) 56 \n \n batch_normalization (BatchN (None, 7) 28 \n ormalization) \n \n re_lu (ReLU) (None, 7) 0 \n \n=================================================================\nTotal params: 448\nTrainable params: 434\nNon-trainable params: 14\n_________________________________________________________________\n"
],
[
"from sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(X1, Y1, test_size=0.25, random_state=42)",
"_____no_output_____"
],
[
"model_fit8 = model1.fit(x_train,y_train,batch_size=256,epochs=50, validation_split=0.1)",
"Epoch 1/50\n4563/4563 [==============================] - 55s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8211e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8125e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 2/50\n4563/4563 [==============================] - 59s 13ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8206e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8125e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 3/50\n4563/4563 [==============================] - 96s 21ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8205e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8125e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 4/50\n4563/4563 [==============================] - 58s 13ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8203e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 5/50\n4563/4563 [==============================] - 56s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8201e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 6/50\n4563/4563 [==============================] - 54s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8200e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 7/50\n4563/4563 [==============================] - 56s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8198e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 8/50\n4563/4563 [==============================] - 56s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8197e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 9/50\n4563/4563 [==============================] - 56s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8195e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 10/50\n4563/4563 [==============================] - 57s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8194e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8125e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 11/50\n4563/4563 [==============================] - 55s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8192e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 12/50\n4563/4563 [==============================] - 56s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8190e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 13/50\n4563/4563 [==============================] - 57s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8189e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 14/50\n4563/4563 [==============================] - 56s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8187e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 15/50\n4563/4563 [==============================] - 57s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8186e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 16/50\n4563/4563 [==============================] - 57s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8185e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 17/50\n4563/4563 [==============================] - 55s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8184e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0086\nEpoch 18/50\n4563/4563 [==============================] - 57s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8181e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 19/50\n4563/4563 [==============================] - 56s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8181e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 20/50\n4563/4563 [==============================] - 55s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8177e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 21/50\n4563/4563 [==============================] - 56s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8177e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 22/50\n4563/4563 [==============================] - 93s 20ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8175e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 23/50\n4563/4563 [==============================] - 64s 14ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8174e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 24/50\n4563/4563 [==============================] - 58s 13ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8171e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8127e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 25/50\n4563/4563 [==============================] - 57s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8169e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 26/50\n4563/4563 [==============================] - 54s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8167e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 27/50\n4563/4563 [==============================] - 56s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8164e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 28/50\n4563/4563 [==============================] - 56s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8162e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0086\nEpoch 29/50\n4563/4563 [==============================] - 55s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8161e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 30/50\n4563/4563 [==============================] - 95s 21ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8161e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 31/50\n4563/4563 [==============================] - 66s 14ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8159e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 32/50\n4563/4563 [==============================] - 58s 13ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8157e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 33/50\n4563/4563 [==============================] - 55s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8155e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 34/50\n4563/4563 [==============================] - 55s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8154e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 35/50\n4563/4563 [==============================] - 53s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8151e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 36/50\n4563/4563 [==============================] - 55s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8150e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 37/50\n4563/4563 [==============================] - 54s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8149e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 38/50\n4563/4563 [==============================] - 54s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8148e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 39/50\n4563/4563 [==============================] - 55s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8146e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 40/50\n4563/4563 [==============================] - 54s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8144e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 41/50\n4563/4563 [==============================] - 55s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8143e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 42/50\n4563/4563 [==============================] - 55s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8141e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 43/50\n4563/4563 [==============================] - 53s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8140e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 44/50\n4563/4563 [==============================] - 55s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8139e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 45/50\n4563/4563 [==============================] - 54s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8138e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 46/50\n4563/4563 [==============================] - 56s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8137e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 47/50\n4563/4563 [==============================] - 57s 13ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8136e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 48/50\n4563/4563 [==============================] - 55s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8136e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8124e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 49/50\n4563/4563 [==============================] - 56s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8135e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\nEpoch 50/50\n4563/4563 [==============================] - 56s 12ms/step - loss: 0.0859 - accuracy: 0.9438 - mse: 1.8134e-04 - mae: 0.0047 - rmse: 0.0087 - val_loss: 0.0860 - val_accuracy: 0.9440 - val_mse: 1.8123e-04 - val_mae: 0.0047 - val_rmse: 0.0087\n"
],
[
"model1.evaluate(x_test,y_test)",
"13518/13518 [==============================] - 66s 5ms/step - loss: 0.0862 - accuracy: 0.9435 - mse: 1.8701e-04 - mae: 0.0047 - rmse: 0.0087\n"
],
[
"model1.evaluate(x_train,y_train)",
"40554/40554 [==============================] - 206s 5ms/step - loss: 0.0860 - accuracy: 0.9438 - mse: 1.8242e-04 - mae: 0.0047 - rmse: 0.0087\n"
]
],
[
[
"# Saving Model as File",
"_____no_output_____"
]
],
[
[
"model_json = model1.to_json()\nwith open(\"Model_File/lstm_tanh.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\nmodel1.save_weights(\"Model_File/lstm_tanh.h5\")\nprint(\"Saved model to disk\")",
"Saved model to disk\n"
],
[
"from keras.models import model_from_json\njson_file = open('Model_File/lstm_tanh.json', 'r')\nloaded_model_json = json_file.read()\njson_file.close()\nloaded_model = model_from_json(loaded_model_json)\n# load weights into new model\nloaded_model.load_weights(\"Model_File/lstm_tanh.h5\")\nprint(\"Loaded model from disk\")\nloaded_model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss='binary_crossentropy',metrics=['accuracy','mse','mae',rmse])",
"Loaded model from disk\n"
]
],
[
[
"# Error Analysis",
"_____no_output_____"
]
],
[
[
"# summarize history for loss\nplt.plot(model_fit8.history['loss'])\nplt.plot(model_fit8.history['val_loss'])\nplt.title('Model Loss',fontweight ='bold',fontsize = 15)\nplt.ylabel('Loss',fontweight ='bold',fontsize = 15)\nplt.xlabel('Epoch',fontweight ='bold',fontsize = 15)\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()\n\n# summarize history for accuracy\nplt.plot(model_fit8.history['accuracy'])\nplt.plot(model_fit8.history['val_accuracy'])\nplt.title('Model accuracy',fontweight ='bold',fontsize = 15)\nplt.ylabel('Accuracy',fontweight ='bold',fontsize = 15)\nplt.xlabel('Epoch',fontweight ='bold',fontsize = 15)\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()",
"_____no_output_____"
],
[
"#Creating csv file of prediction",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(X1, Y1, test_size=0.25, random_state=42)\n\ny_test_pred=loaded_model.predict(x_test)\ny_test_pred",
"_____no_output_____"
],
[
"y_test",
"_____no_output_____"
],
[
"y_test=y_test[:,0]",
"_____no_output_____"
],
[
"from numpy import savetxt\nsavetxt('ARRAY_DATA/lstm_y_test_pred.csv', y_test_pred[:1001], delimiter=',')",
"_____no_output_____"
],
[
"from numpy import savetxt\nsavetxt('ARRAY_DATA/lstm_y_test.csv', y_test[:1001], delimiter=',')",
"_____no_output_____"
],
[
"#completed",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a10d024c13579b6319a10e9913e37d50360bf1a
| 33,705 |
ipynb
|
Jupyter Notebook
|
examples/Coronavirus/Papers Trends.ipynb
|
data4goodlab/ScienceDynamics
|
1ba24a7a0ec64058b6095541b0ecc5d5d294b588
|
[
"MIT"
] | 1 |
2020-09-29T15:41:58.000Z
|
2020-09-29T15:41:58.000Z
|
examples/Coronavirus/Papers Trends.ipynb
|
data4goodlab/ScienceDynamics
|
1ba24a7a0ec64058b6095541b0ecc5d5d294b588
|
[
"MIT"
] | null | null | null |
examples/Coronavirus/Papers Trends.ipynb
|
data4goodlab/ScienceDynamics
|
1ba24a7a0ec64058b6095541b0ecc5d5d294b588
|
[
"MIT"
] | 1 |
2020-11-12T18:15:25.000Z
|
2020-11-12T18:15:25.000Z
| 26.53937 | 176 | 0.546121 |
[
[
[
"# Paper Trends",
"_____no_output_____"
],
[
"## Imports",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2\n%aimport\n%matplotlib inline",
"_____no_output_____"
],
[
"import os\nimport sys\nnb_dir = os.path.dirname(os.path.split(os.getcwd())[0])\nif nb_dir not in sys.path:\n sys.path.append(nb_dir)",
"_____no_output_____"
],
[
"from tqdm import tqdm_notebook as tqdm\nimport pandas as pd\nfrom turicreate import SFrame, load_sframe\nfrom pathlib import Path\nimport turicreate.aggregate as agg\nimport numpy as np",
"_____no_output_____"
],
[
"import json\nimport os\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport math\nimport glob\nimport ntpath\nfrom tqdm import tqdm\nimport seaborn as sns\nfrom matplotlib.ticker import FuncFormatter\nimport datetime\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport seaborn as sns",
"_____no_output_____"
]
],
[
[
"## Utility Functions",
"_____no_output_____"
]
],
[
[
"def convert_to_barchart_format(sf,x, year_column=\"Year\", count_column=\"count\", year_range=(1786,2019)):\n year_sf = SFrame()\n year_sf[year_column] = np.linspace(year_range[0],year_range[1],year_range[1]-year_range[0]+1).tolist()\n year_sf[year_column] = year_sf[year_column]\n sf[year_column] = sf[year_column].astype(float)\n res_sf = SFrame()\n for d in tqdm(sf[x].unique()):\n temp_sf = SFrame()\n temp_sf[x] = [d]*len(year_sf)\n temp_sf[year_column] = year_sf[year_column]\n res_sf = res_sf.append(temp_sf)\n sf = sf.join(res_sf,how=\"right\").sort(year_column)\n sf = sf.fillna(count_column,0)\n df = sf.to_dataframe()\n df = df.sort_values([x,year_column])\n df['value'] = df.groupby([x])[count_column].cumsum()\n df[\"lastValue\"] = df.groupby([x])[\"value\"].shift(1)\n df = df.fillna(0)\n df[\"rank\"] =df.groupby([year_column])[\"value\"].rank(ascending=False)\n return df.rename(columns={x:\"name\", year_column: \"year\",count_column:\"count\"})[[\"year\",\"name\",\"value\",\"lastValue\",\"rank\"]]",
"_____no_output_____"
],
[
"def chunks(l, n):\n # For item i in a range that is a length of l,\n for i in range(0, len(l), n):\n # Create an index range for l of n items:\n yield l[i:i + n]\n\ndef get_d(sf_corr, diseases_id):\n for data in sf_corr.groupby(\"id\"):\n if len(data[1]) >5:\n yield f\"{data[0]}: {diseases_id[diseases_id['id']==data[0]][0]['Disease'].title()}\", data[1].sort_values(\"year\")",
"_____no_output_____"
],
[
"\n\nsns.set(style=\"ticks\")\ndef create_gird(df, col, hue,x,y,sharey=True, legend=False):\n\n # Initialize a grid of plots with an Axes for each walk\n grid = sns.FacetGrid(df, col=col, hue=hue, palette=sns.color_palette(\"hls\", 4),sharey=sharey,\n col_wrap=3, height=4.5)\n plt.gca().xaxis.set_major_formatter(FuncFormatter(lambda x, _: int(x)))\n\n # Draw a horizontal line to show the starting point\n grid.map(plt.axhline, y=0, ls=\":\", c=\".5\")\n\n # Draw a line plot to show the trajectory of each random walk\n grid.map(plt.plot, x, y)\n grid.set_titles(\"{col_name}\")\n if legend:\n grid.add_legend()\n\n\n # Adjust the arrangement of the plots\n grid.fig.tight_layout(w_pad=1)\n return grid",
"_____no_output_____"
]
],
[
[
"## Analysis",
"_____no_output_____"
]
],
[
[
"spothlight = [\"SARS\",\"MERS Coronavirus\", \"Avian Influenza\",\"Ebola\", \"Influenza\", \"HIV/AIDS\",\"Hepatitis B\",\"Hepatitis C\", \"Swine Flu\"]\nyears = [2002,2012,1878,1976,1878,1981,1966,1987,1918 ]\nmin_refs = 5",
"_____no_output_____"
]
],
[
[
"### Data Loading",
"_____no_output_____"
]
],
[
[
"diseases_id= load_sframe(\"Data/diseases_id.csv\")\ndisease_names = SFrame.read_csv(\"Data/disease_names.csv\")",
"_____no_output_____"
]
],
[
[
"General MAG Medicine Publications:",
"_____no_output_____"
]
],
[
[
"med_mag = load_sframe(\"Data/mag/med_mag.sframe\")",
"_____no_output_____"
],
[
"len(med_mag)",
"_____no_output_____"
]
],
[
[
"MAG Medicine Publications about the specific diseases:\n",
"_____no_output_____"
]
],
[
[
"diseases_mag = load_sframe(\"Data/mag/diseases_med_mag.sframe\")",
"_____no_output_____"
]
],
[
[
"General MAG Virology Publications:",
"_____no_output_____"
]
],
[
[
"len(diseases_mag)",
"_____no_output_____"
],
[
"viro_mag = load_sframe(\"Data/mag/viro_mag.sframe\")",
"_____no_output_____"
]
],
[
[
"MAG Virology Publications about the specific diseases\"",
"_____no_output_____"
]
],
[
[
"len(viro_mag)",
"_____no_output_____"
],
[
"diseases_viro_mag = load_sframe(\"Data/mag/diseases_viro_mag.sframe\")",
"_____no_output_____"
],
[
"len(diseases_viro_mag)",
"_____no_output_____"
]
],
[
[
"### Number of papers by diseases from 2001",
"_____no_output_____"
]
],
[
[
"diseases = diseases_mag[(diseases_mag[\"Year\"]>2001)&(diseases_mag[\"Ref Number\"]>min_refs)]",
"_____no_output_____"
],
[
"diseases = diseases.filter_by(spothlight, \"disease\")[\"disease\"].value_counts()",
"_____no_output_____"
],
[
"diseases = diseases.rename({\"value\":\"Disease\", \"count\": \"Numer of Papers\"})",
"_____no_output_____"
],
[
"plt.figure(figsize=(20,10))\nsns.set()\ncolors = [\"#4374B3\", \"#4374B3\"]\n# Set your custom color palette\nsns.set_palette(sns.color_palette(colors))\n\nax = sns.barplot(x=\"Disease\", y=\"Numer of Papers\", data=diseases.to_dataframe(), color=\"#4374B3\")\nax.set_xticklabels(ax.get_xticklabels(),rotation=90)\nplt.tight_layout()\nplt.savefig(\"output/Papers/disease_count.svg\")\n",
"_____no_output_____"
]
],
[
[
"We filter all publication that are not academic papers (editorials, letters, etc.).\nThis type of publication rarely cite other papers filtering the number of refernces removes this kind of publications from the dataset.",
"_____no_output_____"
]
],
[
[
"med_mag = med_mag[med_mag[\"Ref Number\"]>min_refs]\nviro_mag = viro_mag[viro_mag[\"Ref Number\"]>min_refs]\ndiseases_mag = diseases_mag[diseases_mag[\"Ref Number\"]>min_refs].filter_by(spothlight, \"disease\")\ndiseases_viro_mag = diseases_viro_mag[diseases_viro_mag[\"Ref Number\"]>min_refs].filter_by(spothlight, \"disease\")",
"_____no_output_____"
]
],
[
[
"### Publications - Citation",
"_____no_output_____"
],
[
"#### NPR",
"_____no_output_____"
],
[
"Publication data normaliztion",
"_____no_output_____"
]
],
[
[
"def nomalize_disease_publications(diseases_sf, general_sf):\n diseases_pub_count = diseases_sf.groupby([\"disease\",\"Year\"], {\"Number of papers\": agg.COUNT()})\n papers_year = general_sf.groupby(\"Year\", {\"Total Number of papers\": agg.COUNT()})\n diseases_pub_count = diseases_pub_count.join(papers_year,{\"Year\":\"Year\"})\n diseases_pub_count[\"NPR\"] = diseases_pub_count[\"Number of papers\"] / diseases_pub_count[\"Total Number of papers\"]\n diseases_pub_count = diseases_pub_count.rename({\"disease\":\"Disease\"})\n return diseases_pub_count.sort([\"Disease\",\"Year\"])\n ",
"_____no_output_____"
],
[
"diseases_pub_count_viro = nomalize_disease_publications(diseases_viro_mag, viro_mag)\ndiseases_pub_count_med = nomalize_disease_publications(diseases_mag, med_mag)",
"_____no_output_____"
],
[
"diseases_pub_count_viro[\"Type\"] = \"Virolgy\"\ndiseases_pub_count_med[\"Type\"] = \"Medicine\"\ndiseases_pub_count = diseases_pub_count_viro.append(diseases_pub_count_med)\n",
"_____no_output_____"
],
[
"def chunks(l, n):\n # For item i in a range that is a length of l,\n for i in range(0, len(l), n):\n # Create an index range for l of n items:\n yield l[i:i + n]\n\ndef get_data(sf_corr):\n for data in sf_corr.groupby(\"Disease\"):\n if len(data[1]) >5:\n yield data[1].sort_values(\"Year\")",
"_____no_output_____"
]
],
[
[
"Filter the data:",
"_____no_output_____"
]
],
[
[
"pub = SFrame()\nfor d,y in zip(spothlight, years):\n pub = pub.append( diseases_pub_count[(diseases_pub_count[\"Disease\"]==d)&(diseases_pub_count[\"Year\"]>=y)])",
"_____no_output_____"
],
[
"pub[\"Normalized Paper Rate\"] = pub[\"NPR\"]",
"_____no_output_____"
]
],
[
[
"Generate SVG",
"_____no_output_____"
]
],
[
[
"\nsns.set(font_scale=1.3)\n\n\nplt.rc('text', usetex=False)\nplt.figure(figsize=(16, 12))\ndes = list(get_data(pub[(pub[\"Year\"]>=1980)&(pub[\"Type\"]== \"Virolgy\")].to_dataframe()))\nfor i, curr_f in enumerate(tqdm(chunks(des, 20), total=((len(des) // 20)+1))):\n create_gird(pd.concat(curr_f),\"Disease\",\"Type\",\"Year\", \"Normalized Paper Rate\",False,False)\n plt.savefig(f\"output/Papers/Virolgy_NPR_{i}.svg\")\n# plt.close()\n\n",
"_____no_output_____"
],
[
"\nsns.set(font_scale=1.3)\n\n\nplt.rc('text', usetex=False)\nplt.figure(figsize=(16, 12))\ndes = list(get_data(pub[(pub[\"Year\"]>=1980)&(pub[\"Type\"]== \"Medicine\")].to_dataframe()))\nfor i, curr_f in enumerate(tqdm(chunks(des, 20), total=((len(des) // 20)+1))):\n create_gird(pd.concat(curr_f),\"Disease\",\"Type\",\"Year\", \"Normalized Paper Rate\",False,False)\n plt.savefig(f\"output/Papers/Medicine_NPR_{i}.svg\")\n# plt.close()\n\n",
"_____no_output_____"
]
],
[
[
"Generate multi-page PDF",
"_____no_output_____"
]
],
[
[
"\nsns.set(font_scale=1.3)\n\n# Create the PdfPages object to which we will save the pages:\n# The with statement makes sure that the PdfPages object is closed properly at\n# the end of the block, even if an Exception occurs.\nwith PdfPages('output/Papers/Medicine_NPR.pdf') as pdf:\n # if LaTeX is not installed or error caught, change to `usetex=False`\n plt.rc('text', usetex=False)\n plt.figure(figsize=(8, 6))\n des = list(get_data(pub[(pub[\"Year\"]>=1980)&(pub[\"Type\"]== \"Medicine\")].to_dataframe()))\n for i, curr_f in enumerate(tqdm(chunks(des, 20), total=((len(des) // 20)+1))):\n create_gird(pd.concat(curr_f),\"Disease\",\"Type\",\"Year\", \"Normalized Paper Rate\",False,False)\n pdf.savefig()\n plt.close()\n\n",
"_____no_output_____"
],
[
"pub[\"Normalized Paper Rate\"] = np.log(pub[\"NPR\"])",
"_____no_output_____"
],
[
"import plotly.express as px\n\nfig = px.line(pub[(pub[\"Type\"]==\"Virolgy\")&(pub[\"Year\"]>1959)].to_dataframe(), x=\"Year\", y=\"Normalized Paper Rate\",color=\"Disease\", width=1600, height=800)\nfig.update_layout({\"legend\":{\"x\":0,\"y\":1.1}, \"legend_orientation\":\"h\"}, font=dict(\n size=20,\n ))\n\n\nfig.show()",
"_____no_output_____"
],
[
"# import plotly.io as pio\n# pio.orca.config.server_url = \"http://localhost:9091\"\n# fig.write_image(\"output/Papers/disease-npr.svg\")\n",
"_____no_output_____"
]
],
[
[
"Plot Similarity Using DTW",
"_____no_output_____"
]
],
[
[
"data = pub[(pub[\"Year\"]>=1980)&(pub[\"Type\"]== \"Virolgy\")&(pub[\"Year\"]<2019)][[\"Disease\",\"Year\",\"NPR\"]].to_dataframe()",
"_____no_output_____"
],
[
"data = data.sort_values([\"Disease\",\"Year\"])",
"_____no_output_____"
],
[
"from tslearn.metrics import dtw \n\nres= {\"Disease1\":[], \"Disease2\":[], \"dtw\":[]}\nfor d1, df1 in data.groupby(\"Disease\"):\n for d2, df2 in data.groupby(\"Disease\"):\n res[\"Disease1\"].append(d1)\n res[\"Disease2\"].append(d2)\n disease1 = df1[\"NPR\"].values\n disease2 = df2[\"NPR\"].values\n res[\"dtw\"].append(dtw(disease1, disease2))\n",
"_____no_output_____"
],
[
"piv_data = []\nfor d, df in data.groupby(\"Disease\"):\n piv_data.append(df[\"NPR\"].values)",
"_____no_output_____"
],
[
"sns.set(font_scale=2.0)\ncorr = pd.DataFrame(res).pivot(index='Disease1', columns='Disease2', values='dtw')\nmask = np.zeros_like(corr)\nmask[np.triu_indices_from(mask)] = True\nwith sns.axes_style(\"white\"):\n plt.figure(figsize=(40,20))\n ax = sns.heatmap(corr, mask=mask, vmax=.3, square=True, annot=True, fmt='0.3f', cmap=sns.light_palette(\"#cc0000\" , reverse=True, as_cmap=True))\n plt.savefig(\"output/Papers/dtw_npr.svg\")\n",
"_____no_output_____"
],
[
"from tslearn.utils import to_time_series_dataset\nfrom tslearn.clustering import TimeSeriesKMeans\n\nkm = TimeSeriesKMeans(n_clusters=2, metric=\"dtw\", max_iter=10, tol=1e-5).fit(to_time_series_dataset(piv_data))\n\nfrom collections import defaultdict\nclusters = defaultdict(lambda: [])\nfor d, c in zip(corr.index, km.labels_):\n clusters[c].append(d)\nclusters",
"_____no_output_____"
]
],
[
[
"#### NCR",
"_____no_output_____"
]
],
[
[
"# Calculte the number of citaions for each diseses per year.\ndef diseses_citations_year(publication_sf):\n disease_citations = publication_sf.stack(\"Dict of Year_Citation Number\",new_column_name=[\"cite year\", \"Citations\"], drop_na=True)\n disease_citations = disease_citations.groupby([\"disease\",\"cite year\"], {\"Citations\": agg.SUM(\"Citations\")})\n disease_citations[\"cite year\"] = disease_citations[\"cite year\"].astype(int)\n return disease_citations.rename({\"cite year\": \"year\"})",
"_____no_output_____"
],
[
"disease_citations_viro = diseses_citations_year(diseases_viro_mag)",
"_____no_output_____"
],
[
"disease_citations_med = diseses_citations_year(diseases_mag)",
"_____no_output_____"
],
[
"# The total number of citaions for a year, used to normalize the data.\ndef citaion_year_mag(publication_sf):\n med_citations = publication_sf.stack(\"Dict of Year_Citation Number\",new_column_name=[\"cite year\", \"Citations\"], drop_na=True)\n med_citations = med_citations.rename({\"cite year\": \"year\"})\n return med_citations.groupby([\"year\"], operations={\"Total Citations\": agg.SUM(\"Citations\")})",
"_____no_output_____"
],
[
"citations_year_viro = citaion_year_mag(viro_mag)",
"_____no_output_____"
],
[
"citations_year_med = citaion_year_mag(med_mag)",
"_____no_output_____"
],
[
"citations_year_med[\"year\"] = citations_year_med[\"year\"].astype(int)",
"_____no_output_____"
],
[
"citations_year_med.sort(\"Total Citations\",False)",
"_____no_output_____"
]
],
[
[
"Medicine citaions over time",
"_____no_output_____"
]
],
[
[
"citations_year_med.to_dataframe().sort_values(\"year\").plot(x=\"year\", y=\"Total Citations\")",
"_____no_output_____"
]
],
[
[
"Citaion data normaliztion",
"_____no_output_____"
]
],
[
[
"def norm_disease_citations(disease_citations, citations_year):\n disease_citations = disease_citations.join(citations_year, on=\"year\")\n disease_citations[\"Citations Norm\"] = disease_citations[\"Citations\"]/disease_citations[\"Total Citations\"]\n return disease_citations.join(disease_names)",
"_____no_output_____"
],
[
"disease_citations_med = norm_disease_citations(disease_citations_med, citations_year_med)\ndisease_citations_viro = norm_disease_citations(disease_citations_viro, citations_year_viro)",
"_____no_output_____"
],
[
"def clean_disease_citations(disease_citations):\n disease_citations = disease_citations.rename({\"year\":\"Year\",\"Citations Norm\":\"NCR\", \"disease\": \"Disease\"})\n disease_citations = disease_citations.join(disease_names, {\"id\":\"id\"})\n disease_citations = disease_citations.sort([\"Disease\", \"Year\"]) \n disease_citations = disease_citations.to_dataframe()\n disease_citations = disease_citations[disease_citations[\"Year\"].notna()]\n disease_citations = disease_citations[disease_citations[\"Year\"]<2019]\n return disease_citations.reset_index()",
"_____no_output_____"
],
[
"disease_citations_med = clean_disease_citations(disease_citations_med)\ndisease_citations_viro = clean_disease_citations(disease_citations_viro)",
"_____no_output_____"
],
[
"disease_citations_med[\"Type\"] = \"Medicine\"\ndisease_citations_viro[\"Type\"] = \"Virology\"\n\ndisease_citations = disease_citations_med.append(disease_citations_viro)\n",
"_____no_output_____"
],
[
"cite = pd.DataFrame()\nfor d,y in zip(spothlight, years):\n cite = cite.append( disease_citations[(disease_citations[\"Disease\"]==d)&(disease_citations[\"Year\"]>=y)])",
"_____no_output_____"
],
[
"cite[\"Normalized Citaion Rate\"] = cite[\"NCR\"]",
"_____no_output_____"
],
[
"cite = cite.rename(columns={\"Normalized Citaion Rate\":\"Normalized Citation Rate\"})",
"_____no_output_____"
],
[
"\nsns.set(font_scale=1.3)\n# sns.set(style=\"ticks\")\n\nplt.rc('text', usetex=False)\nplt.figure(figsize=(8, 6))\ndes = list(get_data(cite[(cite[\"Year\"]>=1980)&(cite[\"Type\"]== \"Medicine\")]))\nfor i, curr_f in enumerate(tqdm(chunks(des, 20), total=((len(des) // 20)+1))):\n create_gird(pd.concat(curr_f),\"Disease\",\"Type\",\"Year\", \"Normalized Citation Rate\", False, legend=False)\n plt.savefig(f\"output/Papers/Medicine_NCR_{i}.svg\")\n# plt.close()\n\n",
"_____no_output_____"
],
[
"\nsns.set(font_scale=1.3)\n\nplt.rc('text', usetex=False)\nplt.figure(figsize=(8, 6))\ndes = list(get_data(cite[(cite[\"Year\"]>=1980)&(cite[\"Type\"]== \"Virology\")]))\nfor i, curr_f in enumerate(tqdm(chunks(des, 20), total=((len(des) // 20)+1))):\n create_gird(pd.concat(curr_f),\"Disease\",\"Type\",\"Year\", \"Normalized Citation Rate\", False, legend=False)\n plt.savefig(f\"output/Papers/Virolgy_NCR_{i}.svg\")\n# plt.close()\n\n",
"_____no_output_____"
],
[
"np.log(10)",
"_____no_output_____"
],
[
"10 ** np.log(6)",
"_____no_output_____"
],
[
"cite[\"Normalized Citation Rate\"] = np.log(cite[\"NCR\"])",
"_____no_output_____"
],
[
"import plotly.express as px\n\nfig = px.line(cite, x=\"Year\", y=\"Normalized Citaion Rate\",color=\"Disease\", width=1600, height=800)\n\nfig.show()",
"_____no_output_____"
],
[
"data = cite[(cite[\"Year\"]>=1980)&(cite[\"Type\"]== \"Virology\")&(cite[\"Year\"]<2019)][[\"Disease\",\"Year\",\"NCR\"]]",
"_____no_output_____"
],
[
"data = data.sort_values([\"Disease\",\"Year\"])",
"_____no_output_____"
],
[
"from tslearn.metrics import dtw \n\nres= {\"Disease1\":[], \"Disease2\":[], \"dtw\":[]}\nfor d1, df1 in data.groupby(\"Disease\"):\n for d2, df2 in data.groupby(\"Disease\"):\n res[\"Disease1\"].append(d1)\n res[\"Disease2\"].append(d2)\n disease1 = df1[\"NCR\"].values\n disease2 = df2[\"NCR\"].values\n res[\"dtw\"].append(dtw(disease1, disease2))\n",
"_____no_output_____"
],
[
"piv_data = []\nfor d, df in data.groupby(\"Disease\"):\n piv_data.append(df[\"NCR\"].values)",
"_____no_output_____"
],
[
"sns.set( font_scale=2.0)\n\ncorr = pd.DataFrame(res).pivot(index='Disease1', columns='Disease2', values='dtw')\nmask = np.zeros_like(corr)\nmask[np.triu_indices_from(mask)] = True\nwith sns.axes_style(\"white\"):\n plt.figure(figsize=(40,20))\n ax = sns.heatmap(corr, mask=mask, vmax=.3, square=True, annot=True, fmt='0.3f', cmap=sns.light_palette(\"#cc0000\" , reverse=True, as_cmap=True))\n plt.savefig(\"output/Papers/dtw-ncr.svg\")",
"_____no_output_____"
],
[
"from tslearn.generators import random_walks\nfrom tslearn.clustering import TimeSeriesKMeans\n# X = random_walks(n_ts=50, sz=32, d=1)\nkm = TimeSeriesKMeans(n_clusters=2, metric=\"dtw\", max_iter=10, tol=1e-5).fit(to_time_series_dataset(piv_data))\n\nfrom collections import defaultdict\nclusters = defaultdict(lambda: [])\nfor d, c in zip(corr.index, km.labels_):\n clusters[c].append(d)\nclusters",
"_____no_output_____"
]
],
[
[
"### Data and Code in research",
"_____no_output_____"
]
],
[
[
"from ScienceDynamics.datasets.microsoft_academic_graph import MicrosoftAcademicGraph\nfrom ScienceDynamics.config.configs import DATASETS_BASE_DIR\nmag = MicrosoftAcademicGraph(DATASETS_BASE_DIR)",
"_____no_output_____"
],
[
"resources = diseases_mag.join(mag.paper_resources, on=\"PaperId\")",
"_____no_output_____"
]
],
[
[
"ResourceType. 1 = Project, 2 = Data, 4 = Code",
"_____no_output_____"
]
],
[
[
"resources[resources[\"ResourceType\"]==2][\"disease\"].value_counts()",
"_____no_output_____"
],
[
"len(resources[resources[\"ResourceType\"]==2][\"disease\"])",
"_____no_output_____"
],
[
"len(resources[resources[\"ResourceType\"]==4][\"disease\"])",
"_____no_output_____"
],
[
"resources[resources[\"ResourceType\"]==4][\"disease\"].value_counts()",
"_____no_output_____"
],
[
"resources[resources[\"ResourceType\"]==1][\"disease\"].value_counts()",
"_____no_output_____"
]
],
[
[
"## Data Fusion",
"_____no_output_____"
]
],
[
[
"diseases_pubmed = load_sframe(\"Data/pubmed/diseases_pubmed.sframe\")",
"_____no_output_____"
],
[
"pubmed_papers_year = diseases_pubmed.groupby(\"year\",{\"PubMed\":agg.COUNT()})",
"_____no_output_____"
],
[
"mag_papers_year = diseases_mag.groupby(\"Year\",{\"MAG\":agg.COUNT()})",
"_____no_output_____"
],
[
"pubmed = load_sframe(\"Data/pubmed/pubmed.sframe\")",
"_____no_output_____"
],
[
"pubmed_papers_year = pubmed.groupby(\"year\",{\"PubMed\":agg.COUNT()})",
"_____no_output_____"
],
[
"mag_papers_year = med_mag.groupby(\"Year\",{\"MAG\":agg.COUNT()})",
"_____no_output_____"
],
[
"df = pubmed_papers_year.join(mag_papers_year,{\"year\":\"Year\"}).sort(\"year\")",
"_____no_output_____"
],
[
"df =df.rename({\"year\":\"Year\"})",
"_____no_output_____"
],
[
"df2 = df.pack_columns(column_names=[\"MAG\",\"PubMed\"], dtype=dict, new_column_name='Papers').stack(\"Papers\", new_column_name=['Dataset', 'Total Papers'])",
"_____no_output_____"
],
[
"import plotly.express as px\n\nfig = px.line(df2[df2[\"Year\"]<2016].to_dataframe(), x=\"Year\", y=\"Total Papers\",color=\"Dataset\", width=1600, height=800)\nfig.update_layout({\"legend\":{\"x\":0,\"y\":1.1}, \"legend_orientation\":\"h\"}, font=dict(\n size=20,\n ))\n\n\nfig.show()",
"_____no_output_____"
],
[
"# fig.write_image(\"output/Papers/Total Papers.svg\")\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a10da94ba2521ca48e38d50aebd77066786179f
| 659 |
ipynb
|
Jupyter Notebook
|
notebooks/utils/stop_execution.ipynb
|
laurentgrenier/maps_hautegaronne
|
bfc7372e91247f6e26146fd01864c5f3f0c5dbad
|
[
"MIT"
] | null | null | null |
notebooks/utils/stop_execution.ipynb
|
laurentgrenier/maps_hautegaronne
|
bfc7372e91247f6e26146fd01864c5f3f0c5dbad
|
[
"MIT"
] | null | null | null |
notebooks/utils/stop_execution.ipynb
|
laurentgrenier/maps_hautegaronne
|
bfc7372e91247f6e26146fd01864c5f3f0c5dbad
|
[
"MIT"
] | null | null | null | 17.810811 | 42 | 0.526555 |
[
[
[
"class StopExecution(Exception):\n def _render_traceback_(self):\n pass",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code"
]
] |
4a10daa8d9a3a0867c1615ded28aa812d67496ad
| 7,830 |
ipynb
|
Jupyter Notebook
|
notebooks/L-BFGS-B Debug Example.ipynb
|
ihmeuw-msca/sfma
|
1531cd9978644be489d9215921b4933ebb9f59bd
|
[
"BSD-2-Clause"
] | null | null | null |
notebooks/L-BFGS-B Debug Example.ipynb
|
ihmeuw-msca/sfma
|
1531cd9978644be489d9215921b4933ebb9f59bd
|
[
"BSD-2-Clause"
] | null | null | null |
notebooks/L-BFGS-B Debug Example.ipynb
|
ihmeuw-msca/sfma
|
1531cd9978644be489d9215921b4933ebb9f59bd
|
[
"BSD-2-Clause"
] | null | null | null | 23.513514 | 211 | 0.523755 |
[
[
[
"### This is an example of where Scipy default optimizer (L-BFGS-B) does not correctly estimate the inefficiency variance. Even with $\\gamma$ set to 0 and small measurement error, it estimates $\\eta = 0$.",
"_____no_output_____"
],
[
"#### January 22, 2021 (after SFMA meeting)\nInstall the latest commit of `anml` from GitHub and the `logerf` branch from `SFMA`",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nfrom sfma.api import SFMAModel",
"_____no_output_____"
]
],
[
[
"## Make Simulations",
"_____no_output_____"
]
],
[
[
"np.random.seed(10)",
"_____no_output_____"
],
[
"n = 100",
"_____no_output_____"
],
[
"intercept = 1.5\nslope = 5\n\nx_domain = [0, 10]\n\nineff = 0.4\n\nsample_size_1 = [1000, 0.7]\nsample_size_2 = [1000, 0.3]",
"_____no_output_____"
],
[
"def frontier(x):\n return np.log(intercept + slope * x)",
"_____no_output_____"
],
[
"def simulate():\n x = np.random.uniform(low=x_domain[0], high=x_domain[1], size=n)\n sample_sizes_1 = np.random.negative_binomial(\n n=sample_size_1[0], p=sample_size_1[1], size=int(n / 2)\n )\n sample_sizes_2 = np.random.negative_binomial(\n n=sample_size_2[0], p=sample_size_2[1], size=int(n / 2)\n )\n sample_sizes = np.append(sample_sizes_1, sample_sizes_2)\n \n the_frontier = frontier(x)\n inefficiency = np.random.exponential(ineff, size=n)\n means = the_frontier - inefficiency\n samples = [np.random.normal(m, scale=4, size=s) for m, s in zip(means, sample_sizes)]\n est_means = np.array([np.mean(s) for s in samples])\n est_sterr = np.array([np.sqrt(np.sum(sum((s - np.mean(s))**2)) / ((len(s) - 1)))/np.sqrt(len(s)) for s in samples])\n df = pd.DataFrame({\n 'output': est_means,\n 'se': est_sterr,\n 'input': x,\n 'ones': np.ones(len(x)),\n 'frontier': the_frontier,\n 'truth': means,\n 'sample_size': sample_sizes\n })\n return df",
"_____no_output_____"
],
[
"sim = simulate()",
"_____no_output_____"
],
[
"the_frontier = sim['frontier']\nlinspace = np.linspace(x_domain[0], x_domain[1])\nfront = frontier(linspace)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1, 1, figsize=(8, 4))\nax.plot(linspace, front, linestyle='solid')\nax.scatter(sim.input, sim.output, color='orange')\nax.errorbar(sim.input, sim.output, yerr=sim.se, linestyle='None')",
"_____no_output_____"
],
[
"model = SFMAModel(\n df=sim,\n col_output='output',\n col_se='se',\n col_input='input',\n include_gamma=True\n)\nconcave = SFMAModel(\n df=sim,\n col_output='output',\n col_se='se',\n col_input='input',\n r_linear=True,\n concave=True,\n include_gamma=True,\n)",
"_____no_output_____"
],
[
"model.fit(options={'solver_options': {}})\n# concave.fit(options={'solver_options': {}})",
"_____no_output_____"
],
[
"sim['base_predictions'] = model.predict()\n# sim['concave_predictions'] = concave.predict()\nsim.sort_values('input', inplace=True)",
"_____no_output_____"
]
],
[
[
"#### The last entry is $\\eta$ and you can see that it's 0 for the `model` object but non-zero correct) for the `concave` object.",
"_____no_output_____"
]
],
[
[
"model.x_init",
"_____no_output_____"
],
[
"model.solver.x_opt",
"_____no_output_____"
],
[
"concave.solver.x_opt",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(1, 1, figsize=(8, 4))\naxes.plot(linspace, front, linestyle='dashed', color='black')\naxes.scatter(sim.input, sim.output, color='grey', alpha=0.4, label=\"data\")\n# axes.scatter(sim.input, sim.output + concave.inefficiencies, color='#008080', alpha=0.4, label=\"data + inefficiency\")\naxes.errorbar(sim.input, sim.output, yerr=sim.se, linestyle='None', color='grey', alpha=0.4)\naxes.plot(sim.input, sim.base_predictions, color='red', label='basic (L-BFGS-B)')\n# axes.plot(sim.input, sim.concave_predictions, color='green', label='concave (trust-constr)')\naxes.legend()\nplt.savefig(\"results.png\", bbox_inches=\"tight\")",
"_____no_output_____"
],
[
"model.solver.result",
"_____no_output_____"
],
[
"p = np.random.uniform(size=8)",
"_____no_output_____"
],
[
"p",
"_____no_output_____"
],
[
"model.marginal_model.gradient(x=p, data=model.data)",
"_____no_output_____"
],
[
"model.marginal_model.gradient_ad(x=p, data=model.data)",
"_____no_output_____"
],
[
"model.data.obs.shape[0]",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a10ecbb6b72e6ecce0ba49930fdaf6af66d4aae
| 347,036 |
ipynb
|
Jupyter Notebook
|
4_Convolution Neural Networks/Week2/ResNets/Residual_Networks.ipynb
|
zyong812/deeplearning.ai
|
5b1c29e8f84d0733ce6c49c14fb8e4a1bc736e55
|
[
"MIT"
] | 7 |
2017-12-14T20:44:15.000Z
|
2021-09-15T02:21:54.000Z
|
4_Convolution Neural Networks/Week2/ResNets/Residual_Networks.ipynb
|
zyong812/deeplearning.ai
|
5b1c29e8f84d0733ce6c49c14fb8e4a1bc736e55
|
[
"MIT"
] | null | null | null |
4_Convolution Neural Networks/Week2/ResNets/Residual_Networks.ipynb
|
zyong812/deeplearning.ai
|
5b1c29e8f84d0733ce6c49c14fb8e4a1bc736e55
|
[
"MIT"
] | 6 |
2017-12-29T01:35:52.000Z
|
2021-09-15T02:22:08.000Z
| 109.165146 | 110,302 | 0.705852 |
[
[
[
"# Residual Networks\n\nWelcome to the second assignment of this week! You will learn how to build very deep convolutional networks, using Residual Networks (ResNets). In theory, very deep networks can represent very complex functions; but in practice, they are hard to train. Residual Networks, introduced by [He et al.](https://arxiv.org/pdf/1512.03385.pdf), allow you to train much deeper networks than were previously practically feasible.\n\n**In this assignment, you will:**\n- Implement the basic building blocks of ResNets. \n- Put together these building blocks to implement and train a state-of-the-art neural network for image classification. \n\nThis assignment will be done in Keras. \n\nBefore jumping into the problem, let's run the cell below to load the required packages.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom keras import layers\nfrom keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D\nfrom keras.models import Model, load_model\nfrom keras.preprocessing import image\nfrom keras.utils import layer_utils\nfrom keras.utils.data_utils import get_file\nfrom keras.applications.imagenet_utils import preprocess_input\nimport pydot\nfrom IPython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\nfrom keras.utils import plot_model\nfrom resnets_utils import *\nfrom keras.initializers import glorot_uniform\nimport scipy.misc\nfrom matplotlib.pyplot import imshow\n%matplotlib inline\n\nimport keras.backend as K\nK.set_image_data_format('channels_last')\nK.set_learning_phase(1)",
"_____no_output_____"
]
],
[
[
"## 1 - The problem of very deep neural networks\n\nLast week, you built your first convolutional neural network. In recent years, neural networks have become deeper, with state-of-the-art networks going from just a few layers (e.g., AlexNet) to over a hundred layers.\n\nThe main benefit of a very deep network is that it can represent very complex functions. It can also learn features at many different levels of abstraction, from edges (at the lower layers) to very complex features (at the deeper layers). However, using a deeper network doesn't always help. A huge barrier to training them is vanishing gradients: very deep networks often have a gradient signal that goes to zero quickly, thus making gradient descent unbearably slow. More specifically, during gradient descent, as you backprop from the final layer back to the first layer, you are multiplying by the weight matrix on each step, and thus the gradient can decrease exponentially quickly to zero (or, in rare cases, grow exponentially quickly and \"explode\" to take very large values). \n\nDuring training, you might therefore see the magnitude (or norm) of the gradient for the earlier layers descrease to zero very rapidly as training proceeds: ",
"_____no_output_____"
],
[
"<img src=\"images/vanishing_grad_kiank.png\" style=\"width:450px;height:220px;\">\n<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **Vanishing gradient** <br> The speed of learning decreases very rapidly for the early layers as the network trains </center></caption>\n\nYou are now going to solve this problem by building a Residual Network!",
"_____no_output_____"
],
[
"## 2 - Building a Residual Network\n\nIn ResNets, a \"shortcut\" or a \"skip connection\" allows the gradient to be directly backpropagated to earlier layers: \n\n<img src=\"images/skip_connection_kiank.png\" style=\"width:650px;height:200px;\">\n<caption><center> <u> <font color='purple'> **Figure 2** </u><font color='purple'> : A ResNet block showing a **skip-connection** <br> </center></caption>\n\nThe image on the left shows the \"main path\" through the network. The image on the right adds a shortcut to the main path. By stacking these ResNet blocks on top of each other, you can form a very deep network. \n\nWe also saw in lecture that having ResNet blocks with the shortcut also makes it very easy for one of the blocks to learn an identity function. This means that you can stack on additional ResNet blocks with little risk of harming training set performance. (There is also some evidence that the ease of learning an identity function--even more than skip connections helping with vanishing gradients--accounts for ResNets' remarkable performance.)\n\nTwo main types of blocks are used in a ResNet, depending mainly on whether the input/output dimensions are same or different. You are going to implement both of them. ",
"_____no_output_____"
],
[
"### 2.1 - The identity block\n\nThe identity block is the standard block used in ResNets, and corresponds to the case where the input activation (say $a^{[l]}$) has the same dimension as the output activation (say $a^{[l+2]}$). To flesh out the different steps of what happens in a ResNet's identity block, here is an alternative diagram showing the individual steps:\n\n<img src=\"images/idblock2_kiank.png\" style=\"width:650px;height:150px;\">\n<caption><center> <u> <font color='purple'> **Figure 3** </u><font color='purple'> : **Identity block.** Skip connection \"skips over\" 2 layers. </center></caption>\n\nThe upper path is the \"shortcut path.\" The lower path is the \"main path.\" In this diagram, we have also made explicit the CONV2D and ReLU steps in each layer. To speed up training we have also added a BatchNorm step. Don't worry about this being complicated to implement--you'll see that BatchNorm is just one line of code in Keras! \n\nIn this exercise, you'll actually implement a slightly more powerful version of this identity block, in which the skip connection \"skips over\" 3 hidden layers rather than 2 layers. It looks like this: \n\n<img src=\"images/idblock3_kiank.png\" style=\"width:650px;height:150px;\">\n<caption><center> <u> <font color='purple'> **Figure 4** </u><font color='purple'> : **Identity block.** Skip connection \"skips over\" 3 layers.</center></caption>\n\nHere're the individual steps.\n\nFirst component of main path: \n- The first CONV2D has $F_1$ filters of shape (1,1) and a stride of (1,1). Its padding is \"valid\" and its name should be `conv_name_base + '2a'`. Use 0 as the seed for the random initialization. \n- The first BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2a'`.\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n\nSecond component of main path:\n- The second CONV2D has $F_2$ filters of shape $(f,f)$ and a stride of (1,1). Its padding is \"same\" and its name should be `conv_name_base + '2b'`. Use 0 as the seed for the random initialization. \n- The second BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2b'`.\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n\nThird component of main path:\n- The third CONV2D has $F_3$ filters of shape (1,1) and a stride of (1,1). Its padding is \"valid\" and its name should be `conv_name_base + '2c'`. Use 0 as the seed for the random initialization. \n- The third BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2c'`. Note that there is no ReLU activation function in this component. \n\nFinal step: \n- The shortcut and the input are added together.\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n\n**Exercise**: Implement the ResNet identity block. We have implemented the first component of the main path. Please read over this carefully to make sure you understand what it is doing. You should implement the rest. \n- To implement the Conv2D step: [See reference](https://keras.io/layers/convolutional/#conv2d)\n- To implement BatchNorm: [See reference](https://faroit.github.io/keras-docs/1.2.2/layers/normalization/) (axis: Integer, the axis that should be normalized (typically the channels axis))\n- For the activation, use: `Activation('relu')(X)`\n- To add the value passed forward by the shortcut: [See reference](https://keras.io/layers/merge/#add)",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: identity_block\n\ndef identity_block(X, f, filters, stage, block):\n \"\"\"\n Implementation of the identity block as defined in Figure 3\n \n Arguments:\n X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)\n f -- integer, specifying the shape of the middle CONV's window for the main path\n filters -- python list of integers, defining the number of filters in the CONV layers of the main path\n stage -- integer, used to name the layers, depending on their position in the network\n block -- string/character, used to name the layers, depending on their position in the network\n \n Returns:\n X -- output of the identity block, tensor of shape (n_H, n_W, n_C)\n \"\"\"\n \n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieve Filters\n F1, F2, F3 = filters\n \n # Save the input value. You'll need this later to add back to the main path. \n X_shortcut = X\n \n # First component of main path\n X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n \n ### START CODE HERE ###\n \n # Second component of main path (≈3 lines)\n X = Conv2D(filters = F2, kernel_size = (f,f), strides = (1,1), padding= 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path (≈2 lines)\n X = Conv2D(filters = F3, kernel_size = (1,1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n \n ### END CODE HERE ###\n \n return X",
"_____no_output_____"
],
[
"tf.reset_default_graph()\n\nwith tf.Session() as test:\n np.random.seed(1)\n A_prev = tf.placeholder(\"float\", [3, 4, 4, 6])\n X = np.random.randn(3, 4, 4, 6)\n A = identity_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')\n test.run(tf.global_variables_initializer())\n out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})\n print(\"out = \" + str(out[0][1][1][0]))",
"out = [ 0.94822985 0. 1.16101444 2.747859 0. 1.36677003]\n"
]
],
[
[
"**Expected Output**:\n\n<table>\n <tr>\n <td>\n **out**\n </td>\n <td>\n [ 0.94822985 0. 1.16101444 2.747859 0. 1.36677003]\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"## 2.2 - The convolutional block\n\nYou've implemented the ResNet identity block. Next, the ResNet \"convolutional block\" is the other type of block. You can use this type of block when the input and output dimensions don't match up. The difference with the identity block is that there is a CONV2D layer in the shortcut path: \n\n<img src=\"images/convblock_kiank.png\" style=\"width:650px;height:150px;\">\n<caption><center> <u> <font color='purple'> **Figure 4** </u><font color='purple'> : **Convolutional block** </center></caption>\n\nThe CONV2D layer in the shortcut path is used to resize the input $x$ to a different dimension, so that the dimensions match up in the final addition needed to add the shortcut value back to the main path. (This plays a similar role as the matrix $W_s$ discussed in lecture.) For example, to reduce the activation dimensions's height and width by a factor of 2, you can use a 1x1 convolution with a stride of 2. The CONV2D layer on the shortcut path does not use any non-linear activation function. Its main role is to just apply a (learned) linear function that reduces the dimension of the input, so that the dimensions match up for the later addition step. \n\nThe details of the convolutional block are as follows. \n\nFirst component of main path:\n- The first CONV2D has $F_1$ filters of shape (1,1) and a stride of (s,s). Its padding is \"valid\" and its name should be `conv_name_base + '2a'`. \n- The first BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2a'`.\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n\nSecond component of main path:\n- The second CONV2D has $F_2$ filters of (f,f) and a stride of (1,1). Its padding is \"same\" and it's name should be `conv_name_base + '2b'`.\n- The second BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2b'`.\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n\nThird component of main path:\n- The third CONV2D has $F_3$ filters of (1,1) and a stride of (1,1). Its padding is \"valid\" and it's name should be `conv_name_base + '2c'`.\n- The third BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2c'`. Note that there is no ReLU activation function in this component. \n\nShortcut path:\n- The CONV2D has $F_3$ filters of shape (1,1) and a stride of (s,s). Its padding is \"valid\" and its name should be `conv_name_base + '1'`.\n- The BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '1'`. \n\nFinal step: \n- The shortcut and the main path values are added together.\n- Then apply the ReLU activation function. This has no name and no hyperparameters. \n \n**Exercise**: Implement the convolutional block. We have implemented the first component of the main path; you should implement the rest. As before, always use 0 as the seed for the random initialization, to ensure consistency with our grader.\n- [Conv Hint](https://keras.io/layers/convolutional/#conv2d)\n- [BatchNorm Hint](https://keras.io/layers/normalization/#batchnormalization) (axis: Integer, the axis that should be normalized (typically the features axis))\n- For the activation, use: `Activation('relu')(X)`\n- [Addition Hint](https://keras.io/layers/merge/#add)",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: convolutional_block\n\ndef convolutional_block(X, f, filters, stage, block, s = 2):\n \"\"\"\n Implementation of the convolutional block as defined in Figure 4\n \n Arguments:\n X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)\n f -- integer, specifying the shape of the middle CONV's window for the main path\n filters -- python list of integers, defining the number of filters in the CONV layers of the main path\n stage -- integer, used to name the layers, depending on their position in the network\n block -- string/character, used to name the layers, depending on their position in the network\n s -- Integer, specifying the stride to be used\n \n Returns:\n X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)\n \"\"\"\n \n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieve Filters\n F1, F2, F3 = filters\n \n # Save the input value\n X_shortcut = X\n\n\n ##### MAIN PATH #####\n # First component of main path \n X = Conv2D(F1, (1, 1), strides = (s,s), name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n \n ### START CODE HERE ###\n\n # Second component of main path (≈3 lines)\n X = Conv2D(F2,(f, f),strides=(1, 1),padding='same',name=conv_name_base +'2b',kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3,name=bn_name_base+'2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path (≈2 lines)\n X = Conv2D(F3,(1,1),strides=(1,1),padding='valid',name=conv_name_base+'2c',kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3,name=bn_name_base+'2c')(X)\n\n ##### SHORTCUT PATH #### (≈2 lines)\n X_shortcut = Conv2D(F3,(1,1),strides=(s,s),padding='valid',name=conv_name_base+'1',kernel_initializer=glorot_uniform(seed=0))(X_shortcut)\n X_shortcut = BatchNormalization(axis=3,name=bn_name_base+'1')(X_shortcut)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n \n ### END CODE HERE ###\n \n return X",
"_____no_output_____"
],
[
"tf.reset_default_graph()\n\nwith tf.Session() as test:\n np.random.seed(1)\n A_prev = tf.placeholder(\"float\", [3, 4, 4, 6])\n X = np.random.randn(3, 4, 4, 6)\n A = convolutional_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')\n test.run(tf.global_variables_initializer())\n out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})\n print(\"out = \" + str(out[0][1][1][0]))",
"out = [ 0.09018463 1.23489773 0.46822017 0.0367176 0. 0.65516603]\n"
]
],
[
[
"**Expected Output**:\n\n<table>\n <tr>\n <td>\n **out**\n </td>\n <td>\n [ 0.09018463 1.23489773 0.46822017 0.0367176 0. 0.65516603]\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"## 3 - Building your first ResNet model (50 layers)\n\nYou now have the necessary blocks to build a very deep ResNet. The following figure describes in detail the architecture of this neural network. \"ID BLOCK\" in the diagram stands for \"Identity block,\" and \"ID BLOCK x3\" means you should stack 3 identity blocks together.\n\n<img src=\"images/resnet_kiank.png\" style=\"width:850px;height:150px;\">\n<caption><center> <u> <font color='purple'> **Figure 5** </u><font color='purple'> : **ResNet-50 model** </center></caption>\n\nThe details of this ResNet-50 model are:\n- Zero-padding pads the input with a pad of (3,3)\n- Stage 1:\n - The 2D Convolution has 64 filters of shape (7,7) and uses a stride of (2,2). Its name is \"conv1\".\n - BatchNorm is applied to the channels axis of the input.\n - MaxPooling uses a (3,3) window and a (2,2) stride.\n- Stage 2:\n - The convolutional block uses three set of filters of size [64,64,256], \"f\" is 3, \"s\" is 1 and the block is \"a\".\n - The 2 identity blocks use three set of filters of size [64,64,256], \"f\" is 3 and the blocks are \"b\" and \"c\".\n- Stage 3:\n - The convolutional block uses three set of filters of size [128,128,512], \"f\" is 3, \"s\" is 2 and the block is \"a\".\n - The 3 identity blocks use three set of filters of size [128,128,512], \"f\" is 3 and the blocks are \"b\", \"c\" and \"d\".\n- Stage 4:\n - The convolutional block uses three set of filters of size [256, 256, 1024], \"f\" is 3, \"s\" is 2 and the block is \"a\".\n - The 5 identity blocks use three set of filters of size [256, 256, 1024], \"f\" is 3 and the blocks are \"b\", \"c\", \"d\", \"e\" and \"f\".\n- Stage 5:\n - The convolutional block uses three set of filters of size [512, 512, 2048], \"f\" is 3, \"s\" is 2 and the block is \"a\".\n - The 2 identity blocks use three set of filters of size [512, 512, 2048], \"f\" is 3 and the blocks are \"b\" and \"c\".\n- The 2D Average Pooling uses a window of shape (2,2) and its name is \"avg_pool\".\n- The flatten doesn't have any hyperparameters or name.\n- The Fully Connected (Dense) layer reduces its input to the number of classes using a softmax activation. Its name should be `'fc' + str(classes)`.\n\n**Exercise**: Implement the ResNet with 50 layers described in the figure above. We have implemented Stages 1 and 2. Please implement the rest. (The syntax for implementing Stages 3-5 should be quite similar to that of Stage 2.) Make sure you follow the naming convention in the text above. \n\nYou'll need to use this function: \n- Average pooling [see reference](https://keras.io/layers/pooling/#averagepooling2d)\n\nHere're some other functions we used in the code below:\n- Conv2D: [See reference](https://keras.io/layers/convolutional/#conv2d)\n- BatchNorm: [See reference](https://keras.io/layers/normalization/#batchnormalization) (axis: Integer, the axis that should be normalized (typically the features axis))\n- Zero padding: [See reference](https://keras.io/layers/convolutional/#zeropadding2d)\n- Max pooling: [See reference](https://keras.io/layers/pooling/#maxpooling2d)\n- Fully conected layer: [See reference](https://keras.io/layers/core/#dense)\n- Addition: [See reference](https://keras.io/layers/merge/#add)",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: ResNet50\n\ndef ResNet50(input_shape = (64, 64, 3), classes = 6):\n \"\"\"\n Implementation of the popular ResNet50 the following architecture:\n CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3\n -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER\n\n Arguments:\n input_shape -- shape of the images of the dataset\n classes -- integer, number of classes\n\n Returns:\n model -- a Model() instance in Keras\n \"\"\"\n \n # Define the input as a tensor with shape input_shape\n X_input = Input(input_shape)\n\n \n # Zero-Padding\n X = ZeroPadding2D((3, 3))(X_input)\n \n # Stage 1\n X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((3, 3), strides=(2, 2))(X)\n\n # Stage 2\n X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1)\n X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')\n X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')\n\n ### START CODE HERE ###\n\n # Stage 3 (≈4 lines)\n X = convolutional_block(X, f=3, filters= [128, 128, 512], stage = 3, block='a', s=2)\n X = identity_block(X, f=3, filters= [128, 128, 512], stage = 3, block='b')\n X = identity_block(X, 3, [128, 128, 512], stage=3, block='c')\n X = identity_block(X, 3, [128, 128, 512], stage =3, block='d')\n\n # Stage 4 (≈6 lines)\n X = convolutional_block(X, f=3, filters=[256, 256, 1024], stage=4, block='a', s=2)\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e')\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block ='f')\n\n # Stage 5 (≈3 lines)\n X = convolutional_block(X, f=3, filters=[512, 512, 2048], stage=5, block='a', s=2)\n X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b')\n X = identity_block(X, 3, [512, 512, 2048], stage =5, block='c')\n\n # AVGPOOL (≈1 line). Use \"X = AveragePooling2D(...)(X)\"\n X = AveragePooling2D((2,2),name='avg_pool')(X)\n \n ### END CODE HERE ###\n\n # output layer\n X = Flatten()(X)\n X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)\n \n \n # Create model\n model = Model(inputs = X_input, outputs = X, name='ResNet50')\n\n return model",
"_____no_output_____"
]
],
[
[
"Run the following code to build the model's graph. If your implementation is not correct you will know it by checking your accuracy when running `model.fit(...)` below.",
"_____no_output_____"
]
],
[
[
"model = ResNet50(input_shape = (64, 64, 3), classes = 6)",
"_____no_output_____"
]
],
[
[
"As seen in the Keras Tutorial Notebook, prior training a model, you need to configure the learning process by compiling the model.",
"_____no_output_____"
]
],
[
[
"model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])",
"_____no_output_____"
]
],
[
[
"The model is now ready to be trained. The only thing you need is a dataset.",
"_____no_output_____"
],
[
"Let's load the SIGNS Dataset.\n\n<img src=\"images/signs_data_kiank.png\" style=\"width:450px;height:250px;\">\n<caption><center> <u> <font color='purple'> **Figure 6** </u><font color='purple'> : **SIGNS dataset** </center></caption>\n",
"_____no_output_____"
]
],
[
[
"X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()\n\n# Normalize image vectors\nX_train = X_train_orig/255.\nX_test = X_test_orig/255.\n\n# Convert training and test labels to one hot matrices\nY_train = convert_to_one_hot(Y_train_orig, 6).T\nY_test = convert_to_one_hot(Y_test_orig, 6).T\n\nprint (\"number of training examples = \" + str(X_train.shape[0]))\nprint (\"number of test examples = \" + str(X_test.shape[0]))\nprint (\"X_train shape: \" + str(X_train.shape))\nprint (\"Y_train shape: \" + str(Y_train.shape))\nprint (\"X_test shape: \" + str(X_test.shape))\nprint (\"Y_test shape: \" + str(Y_test.shape))",
"number of training examples = 1080\nnumber of test examples = 120\nX_train shape: (1080, 64, 64, 3)\nY_train shape: (1080, 6)\nX_test shape: (120, 64, 64, 3)\nY_test shape: (120, 6)\n"
]
],
[
[
"Run the following cell to train your model on 2 epochs with a batch size of 32. On a CPU it should take you around 5min per epoch. ",
"_____no_output_____"
]
],
[
[
"model.fit(X_train, Y_train, epochs = 2, batch_size = 32)",
"Epoch 1/2\n1080/1080 [==============================] - 257s - loss: 3.1609 - acc: 0.2463 \nEpoch 2/2\n1080/1080 [==============================] - 251s - loss: 2.2052 - acc: 0.3315 \n"
]
],
[
[
"**Expected Output**:\n\n<table>\n <tr>\n <td>\n ** Epoch 1/2**\n </td>\n <td>\n loss: between 1 and 5, acc: between 0.2 and 0.5, although your results can be different from ours.\n </td>\n </tr>\n <tr>\n <td>\n ** Epoch 2/2**\n </td>\n <td>\n loss: between 1 and 5, acc: between 0.2 and 0.5, you should see your loss decreasing and the accuracy increasing.\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"Let's see how this model (trained on only two epochs) performs on the test set.",
"_____no_output_____"
]
],
[
[
"preds = model.evaluate(X_test, Y_test)\nprint (\"Loss = \" + str(preds[0]))\nprint (\"Test Accuracy = \" + str(preds[1]))",
"120/120 [==============================] - 9s \nLoss = 2.26813445091\nTest Accuracy = 0.166666666667\n"
]
],
[
[
"**Expected Output**:\n\n<table>\n <tr>\n <td>\n **Test Accuracy**\n </td>\n <td>\n between 0.16 and 0.25\n </td>\n </tr>\n\n</table>",
"_____no_output_____"
],
[
"For the purpose of this assignment, we've asked you to train the model only for two epochs. You can see that it achieves poor performances. Please go ahead and submit your assignment; to check correctness, the online grader will run your code only for a small number of epochs as well.",
"_____no_output_____"
],
[
"After you have finished this official (graded) part of this assignment, you can also optionally train the ResNet for more iterations, if you want. We get a lot better performance when we train for ~20 epochs, but this will take more than an hour when training on a CPU. \n\nUsing a GPU, we've trained our own ResNet50 model's weights on the SIGNS dataset. You can load and run our trained model on the test set in the cells below. It may take ≈1min to load the model.",
"_____no_output_____"
]
],
[
[
"model = load_model('ResNet50.h5') ",
"_____no_output_____"
],
[
"preds = model.evaluate(X_test, Y_test)\nprint (\"Loss = \" + str(preds[0]))\nprint (\"Test Accuracy = \" + str(preds[1]))",
"120/120 [==============================] - 11s \nLoss = 0.530178320408\nTest Accuracy = 0.866666662693\n"
]
],
[
[
"ResNet50 is a powerful model for image classification when it is trained for an adequate number of iterations. We hope you can use what you've learnt and apply it to your own classification problem to perform state-of-the-art accuracy.\n\nCongratulations on finishing this assignment! You've now implemented a state-of-the-art image classification system! ",
"_____no_output_____"
],
[
"## 4 - Test on your own image (Optional/Ungraded)",
"_____no_output_____"
],
[
"If you wish, you can also take a picture of your own hand and see the output of the model. To do this:\n 1. Click on \"File\" in the upper bar of this notebook, then click \"Open\" to go on your Coursera Hub.\n 2. Add your image to this Jupyter Notebook's directory, in the \"images\" folder\n 3. Write your image's name in the following code\n 4. Run the code and check if the algorithm is right! ",
"_____no_output_____"
]
],
[
[
"img_path = 'images/my_image.jpg'\nimg = image.load_img(img_path, target_size=(64, 64))\nx = image.img_to_array(img)\nx = np.expand_dims(x, axis=0)\nx = preprocess_input(x)\nprint('Input image shape:', x.shape)\nmy_image = scipy.misc.imread(img_path)\nimshow(my_image)\nprint(\"class prediction vector [p(0), p(1), p(2), p(3), p(4), p(5)] = \")\nprint(model.predict(x))",
"Input image shape: (1, 64, 64, 3)\nclass prediction vector [p(0), p(1), p(2), p(3), p(4), p(5)] = \n[[ 1. 0. 0. 0. 0. 0.]]\n"
]
],
[
[
"You can also print a summary of your model by running the following code.",
"_____no_output_____"
]
],
[
[
"model.summary()",
"____________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n====================================================================================================\ninput_1 (InputLayer) (None, 64, 64, 3) 0 \n____________________________________________________________________________________________________\nzero_padding2d_1 (ZeroPadding2D) (None, 70, 70, 3) 0 input_1[0][0] \n____________________________________________________________________________________________________\nconv1 (Conv2D) (None, 32, 32, 64) 9472 zero_padding2d_1[0][0] \n____________________________________________________________________________________________________\nbn_conv1 (BatchNormalization) (None, 32, 32, 64) 256 conv1[0][0] \n____________________________________________________________________________________________________\nactivation_4 (Activation) (None, 32, 32, 64) 0 bn_conv1[0][0] \n____________________________________________________________________________________________________\nmax_pooling2d_1 (MaxPooling2D) (None, 15, 15, 64) 0 activation_4[0][0] \n____________________________________________________________________________________________________\nres2a_branch2a (Conv2D) (None, 15, 15, 64) 4160 max_pooling2d_1[0][0] \n____________________________________________________________________________________________________\nbn2a_branch2a (BatchNormalizatio (None, 15, 15, 64) 256 res2a_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_5 (Activation) (None, 15, 15, 64) 0 bn2a_branch2a[0][0] \n____________________________________________________________________________________________________\nres2a_branch2b (Conv2D) (None, 15, 15, 64) 36928 activation_5[0][0] \n____________________________________________________________________________________________________\nbn2a_branch2b (BatchNormalizatio (None, 15, 15, 64) 256 res2a_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_6 (Activation) (None, 15, 15, 64) 0 bn2a_branch2b[0][0] \n____________________________________________________________________________________________________\nres2a_branch2c (Conv2D) (None, 15, 15, 256) 16640 activation_6[0][0] \n____________________________________________________________________________________________________\nres2a_branch1 (Conv2D) (None, 15, 15, 256) 16640 max_pooling2d_1[0][0] \n____________________________________________________________________________________________________\nbn2a_branch2c (BatchNormalizatio (None, 15, 15, 256) 1024 res2a_branch2c[0][0] \n____________________________________________________________________________________________________\nbn2a_branch1 (BatchNormalization (None, 15, 15, 256) 1024 res2a_branch1[0][0] \n____________________________________________________________________________________________________\nadd_2 (Add) (None, 15, 15, 256) 0 bn2a_branch2c[0][0] \n bn2a_branch1[0][0] \n____________________________________________________________________________________________________\nactivation_7 (Activation) (None, 15, 15, 256) 0 add_2[0][0] \n____________________________________________________________________________________________________\nres2b_branch2a (Conv2D) (None, 15, 15, 64) 16448 activation_7[0][0] \n____________________________________________________________________________________________________\nbn2b_branch2a (BatchNormalizatio (None, 15, 15, 64) 256 res2b_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_8 (Activation) (None, 15, 15, 64) 0 bn2b_branch2a[0][0] \n____________________________________________________________________________________________________\nres2b_branch2b (Conv2D) (None, 15, 15, 64) 36928 activation_8[0][0] \n____________________________________________________________________________________________________\nbn2b_branch2b (BatchNormalizatio (None, 15, 15, 64) 256 res2b_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_9 (Activation) (None, 15, 15, 64) 0 bn2b_branch2b[0][0] \n____________________________________________________________________________________________________\nres2b_branch2c (Conv2D) (None, 15, 15, 256) 16640 activation_9[0][0] \n____________________________________________________________________________________________________\nbn2b_branch2c (BatchNormalizatio (None, 15, 15, 256) 1024 res2b_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_3 (Add) (None, 15, 15, 256) 0 bn2b_branch2c[0][0] \n activation_7[0][0] \n____________________________________________________________________________________________________\nactivation_10 (Activation) (None, 15, 15, 256) 0 add_3[0][0] \n____________________________________________________________________________________________________\nres2c_branch2a (Conv2D) (None, 15, 15, 64) 16448 activation_10[0][0] \n____________________________________________________________________________________________________\nbn2c_branch2a (BatchNormalizatio (None, 15, 15, 64) 256 res2c_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_11 (Activation) (None, 15, 15, 64) 0 bn2c_branch2a[0][0] \n____________________________________________________________________________________________________\nres2c_branch2b (Conv2D) (None, 15, 15, 64) 36928 activation_11[0][0] \n____________________________________________________________________________________________________\nbn2c_branch2b (BatchNormalizatio (None, 15, 15, 64) 256 res2c_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_12 (Activation) (None, 15, 15, 64) 0 bn2c_branch2b[0][0] \n____________________________________________________________________________________________________\nres2c_branch2c (Conv2D) (None, 15, 15, 256) 16640 activation_12[0][0] \n____________________________________________________________________________________________________\nbn2c_branch2c (BatchNormalizatio (None, 15, 15, 256) 1024 res2c_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_4 (Add) (None, 15, 15, 256) 0 bn2c_branch2c[0][0] \n activation_10[0][0] \n____________________________________________________________________________________________________\nactivation_13 (Activation) (None, 15, 15, 256) 0 add_4[0][0] \n____________________________________________________________________________________________________\nres3a_branch2a (Conv2D) (None, 8, 8, 128) 32896 activation_13[0][0] \n____________________________________________________________________________________________________\nbn3a_branch2a (BatchNormalizatio (None, 8, 8, 128) 512 res3a_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_14 (Activation) (None, 8, 8, 128) 0 bn3a_branch2a[0][0] \n____________________________________________________________________________________________________\nres3a_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_14[0][0] \n____________________________________________________________________________________________________\nbn3a_branch2b (BatchNormalizatio (None, 8, 8, 128) 512 res3a_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_15 (Activation) (None, 8, 8, 128) 0 bn3a_branch2b[0][0] \n____________________________________________________________________________________________________\nres3a_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_15[0][0] \n____________________________________________________________________________________________________\nres3a_branch1 (Conv2D) (None, 8, 8, 512) 131584 activation_13[0][0] \n____________________________________________________________________________________________________\nbn3a_branch2c (BatchNormalizatio (None, 8, 8, 512) 2048 res3a_branch2c[0][0] \n____________________________________________________________________________________________________\nbn3a_branch1 (BatchNormalization (None, 8, 8, 512) 2048 res3a_branch1[0][0] \n____________________________________________________________________________________________________\nadd_5 (Add) (None, 8, 8, 512) 0 bn3a_branch2c[0][0] \n bn3a_branch1[0][0] \n____________________________________________________________________________________________________\nactivation_16 (Activation) (None, 8, 8, 512) 0 add_5[0][0] \n____________________________________________________________________________________________________\nres3b_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_16[0][0] \n____________________________________________________________________________________________________\nbn3b_branch2a (BatchNormalizatio (None, 8, 8, 128) 512 res3b_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_17 (Activation) (None, 8, 8, 128) 0 bn3b_branch2a[0][0] \n____________________________________________________________________________________________________\nres3b_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_17[0][0] \n____________________________________________________________________________________________________\nbn3b_branch2b (BatchNormalizatio (None, 8, 8, 128) 512 res3b_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_18 (Activation) (None, 8, 8, 128) 0 bn3b_branch2b[0][0] \n____________________________________________________________________________________________________\nres3b_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_18[0][0] \n____________________________________________________________________________________________________\nbn3b_branch2c (BatchNormalizatio (None, 8, 8, 512) 2048 res3b_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_6 (Add) (None, 8, 8, 512) 0 bn3b_branch2c[0][0] \n activation_16[0][0] \n____________________________________________________________________________________________________\nactivation_19 (Activation) (None, 8, 8, 512) 0 add_6[0][0] \n____________________________________________________________________________________________________\nres3c_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_19[0][0] \n____________________________________________________________________________________________________\nbn3c_branch2a (BatchNormalizatio (None, 8, 8, 128) 512 res3c_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_20 (Activation) (None, 8, 8, 128) 0 bn3c_branch2a[0][0] \n____________________________________________________________________________________________________\nres3c_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_20[0][0] \n____________________________________________________________________________________________________\nbn3c_branch2b (BatchNormalizatio (None, 8, 8, 128) 512 res3c_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_21 (Activation) (None, 8, 8, 128) 0 bn3c_branch2b[0][0] \n____________________________________________________________________________________________________\nres3c_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_21[0][0] \n____________________________________________________________________________________________________\nbn3c_branch2c (BatchNormalizatio (None, 8, 8, 512) 2048 res3c_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_7 (Add) (None, 8, 8, 512) 0 bn3c_branch2c[0][0] \n activation_19[0][0] \n____________________________________________________________________________________________________\nactivation_22 (Activation) (None, 8, 8, 512) 0 add_7[0][0] \n____________________________________________________________________________________________________\nres3d_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_22[0][0] \n____________________________________________________________________________________________________\nbn3d_branch2a (BatchNormalizatio (None, 8, 8, 128) 512 res3d_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_23 (Activation) (None, 8, 8, 128) 0 bn3d_branch2a[0][0] \n____________________________________________________________________________________________________\nres3d_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_23[0][0] \n____________________________________________________________________________________________________\nbn3d_branch2b (BatchNormalizatio (None, 8, 8, 128) 512 res3d_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_24 (Activation) (None, 8, 8, 128) 0 bn3d_branch2b[0][0] \n____________________________________________________________________________________________________\nres3d_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_24[0][0] \n____________________________________________________________________________________________________\nbn3d_branch2c (BatchNormalizatio (None, 8, 8, 512) 2048 res3d_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_8 (Add) (None, 8, 8, 512) 0 bn3d_branch2c[0][0] \n activation_22[0][0] \n____________________________________________________________________________________________________\nactivation_25 (Activation) (None, 8, 8, 512) 0 add_8[0][0] \n____________________________________________________________________________________________________\nres4a_branch2a (Conv2D) (None, 4, 4, 256) 131328 activation_25[0][0] \n____________________________________________________________________________________________________\nbn4a_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4a_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_26 (Activation) (None, 4, 4, 256) 0 bn4a_branch2a[0][0] \n____________________________________________________________________________________________________\nres4a_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_26[0][0] \n____________________________________________________________________________________________________\nbn4a_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4a_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_27 (Activation) (None, 4, 4, 256) 0 bn4a_branch2b[0][0] \n____________________________________________________________________________________________________\nres4a_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_27[0][0] \n____________________________________________________________________________________________________\nres4a_branch1 (Conv2D) (None, 4, 4, 1024) 525312 activation_25[0][0] \n____________________________________________________________________________________________________\nbn4a_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4a_branch2c[0][0] \n____________________________________________________________________________________________________\nbn4a_branch1 (BatchNormalization (None, 4, 4, 1024) 4096 res4a_branch1[0][0] \n____________________________________________________________________________________________________\nadd_9 (Add) (None, 4, 4, 1024) 0 bn4a_branch2c[0][0] \n bn4a_branch1[0][0] \n____________________________________________________________________________________________________\nactivation_28 (Activation) (None, 4, 4, 1024) 0 add_9[0][0] \n____________________________________________________________________________________________________\nres4b_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_28[0][0] \n____________________________________________________________________________________________________\nbn4b_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4b_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_29 (Activation) (None, 4, 4, 256) 0 bn4b_branch2a[0][0] \n____________________________________________________________________________________________________\nres4b_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_29[0][0] \n____________________________________________________________________________________________________\nbn4b_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4b_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_30 (Activation) (None, 4, 4, 256) 0 bn4b_branch2b[0][0] \n____________________________________________________________________________________________________\nres4b_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_30[0][0] \n____________________________________________________________________________________________________\nbn4b_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4b_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_10 (Add) (None, 4, 4, 1024) 0 bn4b_branch2c[0][0] \n activation_28[0][0] \n____________________________________________________________________________________________________\nactivation_31 (Activation) (None, 4, 4, 1024) 0 add_10[0][0] \n____________________________________________________________________________________________________\nres4c_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_31[0][0] \n____________________________________________________________________________________________________\nbn4c_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4c_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_32 (Activation) (None, 4, 4, 256) 0 bn4c_branch2a[0][0] \n____________________________________________________________________________________________________\nres4c_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_32[0][0] \n____________________________________________________________________________________________________\nbn4c_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4c_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_33 (Activation) (None, 4, 4, 256) 0 bn4c_branch2b[0][0] \n____________________________________________________________________________________________________\nres4c_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_33[0][0] \n____________________________________________________________________________________________________\nbn4c_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4c_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_11 (Add) (None, 4, 4, 1024) 0 bn4c_branch2c[0][0] \n activation_31[0][0] \n____________________________________________________________________________________________________\nactivation_34 (Activation) (None, 4, 4, 1024) 0 add_11[0][0] \n____________________________________________________________________________________________________\nres4d_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_34[0][0] \n____________________________________________________________________________________________________\nbn4d_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4d_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_35 (Activation) (None, 4, 4, 256) 0 bn4d_branch2a[0][0] \n____________________________________________________________________________________________________\nres4d_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_35[0][0] \n____________________________________________________________________________________________________\nbn4d_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4d_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_36 (Activation) (None, 4, 4, 256) 0 bn4d_branch2b[0][0] \n____________________________________________________________________________________________________\nres4d_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_36[0][0] \n____________________________________________________________________________________________________\nbn4d_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4d_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_12 (Add) (None, 4, 4, 1024) 0 bn4d_branch2c[0][0] \n activation_34[0][0] \n____________________________________________________________________________________________________\nactivation_37 (Activation) (None, 4, 4, 1024) 0 add_12[0][0] \n____________________________________________________________________________________________________\nres4e_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_37[0][0] \n____________________________________________________________________________________________________\nbn4e_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4e_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_38 (Activation) (None, 4, 4, 256) 0 bn4e_branch2a[0][0] \n____________________________________________________________________________________________________\nres4e_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_38[0][0] \n____________________________________________________________________________________________________\nbn4e_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4e_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_39 (Activation) (None, 4, 4, 256) 0 bn4e_branch2b[0][0] \n____________________________________________________________________________________________________\nres4e_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_39[0][0] \n____________________________________________________________________________________________________\nbn4e_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4e_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_13 (Add) (None, 4, 4, 1024) 0 bn4e_branch2c[0][0] \n activation_37[0][0] \n____________________________________________________________________________________________________\nactivation_40 (Activation) (None, 4, 4, 1024) 0 add_13[0][0] \n____________________________________________________________________________________________________\nres4f_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_40[0][0] \n____________________________________________________________________________________________________\nbn4f_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4f_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_41 (Activation) (None, 4, 4, 256) 0 bn4f_branch2a[0][0] \n____________________________________________________________________________________________________\nres4f_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_41[0][0] \n____________________________________________________________________________________________________\nbn4f_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4f_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_42 (Activation) (None, 4, 4, 256) 0 bn4f_branch2b[0][0] \n____________________________________________________________________________________________________\nres4f_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_42[0][0] \n____________________________________________________________________________________________________\nbn4f_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4f_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_14 (Add) (None, 4, 4, 1024) 0 bn4f_branch2c[0][0] \n activation_40[0][0] \n____________________________________________________________________________________________________\nactivation_43 (Activation) (None, 4, 4, 1024) 0 add_14[0][0] \n____________________________________________________________________________________________________\nres5a_branch2a (Conv2D) (None, 2, 2, 512) 524800 activation_43[0][0] \n____________________________________________________________________________________________________\nbn5a_branch2a (BatchNormalizatio (None, 2, 2, 512) 2048 res5a_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_44 (Activation) (None, 2, 2, 512) 0 bn5a_branch2a[0][0] \n____________________________________________________________________________________________________\nres5a_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_44[0][0] \n____________________________________________________________________________________________________\nbn5a_branch2b (BatchNormalizatio (None, 2, 2, 512) 2048 res5a_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_45 (Activation) (None, 2, 2, 512) 0 bn5a_branch2b[0][0] \n____________________________________________________________________________________________________\nres5a_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_45[0][0] \n____________________________________________________________________________________________________\nres5a_branch1 (Conv2D) (None, 2, 2, 2048) 2099200 activation_43[0][0] \n____________________________________________________________________________________________________\nbn5a_branch2c (BatchNormalizatio (None, 2, 2, 2048) 8192 res5a_branch2c[0][0] \n____________________________________________________________________________________________________\nbn5a_branch1 (BatchNormalization (None, 2, 2, 2048) 8192 res5a_branch1[0][0] \n____________________________________________________________________________________________________\nadd_15 (Add) (None, 2, 2, 2048) 0 bn5a_branch2c[0][0] \n bn5a_branch1[0][0] \n____________________________________________________________________________________________________\nactivation_46 (Activation) (None, 2, 2, 2048) 0 add_15[0][0] \n____________________________________________________________________________________________________\nres5b_branch2a (Conv2D) (None, 2, 2, 512) 1049088 activation_46[0][0] \n____________________________________________________________________________________________________\nbn5b_branch2a (BatchNormalizatio (None, 2, 2, 512) 2048 res5b_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_47 (Activation) (None, 2, 2, 512) 0 bn5b_branch2a[0][0] \n____________________________________________________________________________________________________\nres5b_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_47[0][0] \n____________________________________________________________________________________________________\nbn5b_branch2b (BatchNormalizatio (None, 2, 2, 512) 2048 res5b_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_48 (Activation) (None, 2, 2, 512) 0 bn5b_branch2b[0][0] \n____________________________________________________________________________________________________\nres5b_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_48[0][0] \n____________________________________________________________________________________________________\nbn5b_branch2c (BatchNormalizatio (None, 2, 2, 2048) 8192 res5b_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_16 (Add) (None, 2, 2, 2048) 0 bn5b_branch2c[0][0] \n activation_46[0][0] \n____________________________________________________________________________________________________\nactivation_49 (Activation) (None, 2, 2, 2048) 0 add_16[0][0] \n____________________________________________________________________________________________________\nres5c_branch2a (Conv2D) (None, 2, 2, 512) 1049088 activation_49[0][0] \n____________________________________________________________________________________________________\nbn5c_branch2a (BatchNormalizatio (None, 2, 2, 512) 2048 res5c_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_50 (Activation) (None, 2, 2, 512) 0 bn5c_branch2a[0][0] \n____________________________________________________________________________________________________\nres5c_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_50[0][0] \n____________________________________________________________________________________________________\nbn5c_branch2b (BatchNormalizatio (None, 2, 2, 512) 2048 res5c_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_51 (Activation) (None, 2, 2, 512) 0 bn5c_branch2b[0][0] \n____________________________________________________________________________________________________\nres5c_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_51[0][0] \n____________________________________________________________________________________________________\nbn5c_branch2c (BatchNormalizatio (None, 2, 2, 2048) 8192 res5c_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_17 (Add) (None, 2, 2, 2048) 0 bn5c_branch2c[0][0] \n activation_49[0][0] \n____________________________________________________________________________________________________\nactivation_52 (Activation) (None, 2, 2, 2048) 0 add_17[0][0] \n____________________________________________________________________________________________________\navg_pool (AveragePooling2D) (None, 1, 1, 2048) 0 activation_52[0][0] \n____________________________________________________________________________________________________\nflatten_1 (Flatten) (None, 2048) 0 avg_pool[0][0] \n____________________________________________________________________________________________________\nfc6 (Dense) (None, 6) 12294 flatten_1[0][0] \n====================================================================================================\nTotal params: 23,600,006\nTrainable params: 23,546,886\nNon-trainable params: 53,120\n____________________________________________________________________________________________________\n"
]
],
[
[
"Finally, run the code below to visualize your ResNet50. You can also download a .png picture of your model by going to \"File -> Open...-> model.png\".",
"_____no_output_____"
]
],
[
[
"plot_model(model, to_file='model.png')\nSVG(model_to_dot(model).create(prog='dot', format='svg'))",
"_____no_output_____"
]
],
[
[
"<font color='blue'>\n**What you should remember:**\n- Very deep \"plain\" networks don't work in practice because they are hard to train due to vanishing gradients. \n- The skip-connections help to address the Vanishing Gradient problem. They also make it easy for a ResNet block to learn an identity function. \n- There are two main type of blocks: The identity block and the convolutional block. \n- Very deep Residual Networks are built by stacking these blocks together.",
"_____no_output_____"
],
[
"### References \n\nThis notebook presents the ResNet algorithm due to He et al. (2015). The implementation here also took significant inspiration and follows the structure given in the github repository of Francois Chollet: \n\n- Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun - [Deep Residual Learning for Image Recognition (2015)](https://arxiv.org/abs/1512.03385)\n- Francois Chollet's github repository: https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a10f8fab82d880f53f7f705baf54454ae9326a2
| 7,447 |
ipynb
|
Jupyter Notebook
|
rdf-explotacion/exploiting_rdf_with_rdflib.ipynb
|
opencitydata/guia-rdf-datosgob
|
d9c7ac0fffb504e8dff1055068e7e3e3842b32e6
|
[
"CC0-1.0"
] | null | null | null |
rdf-explotacion/exploiting_rdf_with_rdflib.ipynb
|
opencitydata/guia-rdf-datosgob
|
d9c7ac0fffb504e8dff1055068e7e3e3842b32e6
|
[
"CC0-1.0"
] | null | null | null |
rdf-explotacion/exploiting_rdf_with_rdflib.ipynb
|
opencitydata/guia-rdf-datosgob
|
d9c7ac0fffb504e8dff1055068e7e3e3842b32e6
|
[
"CC0-1.0"
] | 1 |
2021-12-22T09:58:32.000Z
|
2021-12-22T09:58:32.000Z
| 29.907631 | 182 | 0.48355 |
[
[
[
"!pip install rdflib\n!pip install folium\n!pip install pyproj",
"_____no_output_____"
],
[
"storage = \"https://raw.githubusercontent.com/opencitydata/guia-rdf-datosgob/main/rdf-explotacion/terrazas-madrid.nt\" #poner el enlace a tus datos en github (raw files)\n\nfrom rdflib import Graph, Namespace, Literal\nfrom rdflib.plugins.sparql import prepareQuery\nimport folium\nfrom pyproj import Transformer\n\ng = Graph()\n\ng.parse(storage, format=\"ntriples\") #quizá esto tarde un poco si el archivo es muy grande",
"_____no_output_____"
]
],
[
[
"# Cambio de serialización del grafo creado",
"_____no_output_____"
]
],
[
[
"# Escritura del grafo en RDF/XML en el archivo terrazas-madrid.xml\ng.serialize(destination='terrazas-madrid.xml',format=\"xml\")",
"_____no_output_____"
],
[
"# Escritura del grafo en Turtle en el archivo terrrazas-madrid.ttl\ng.serialize(destination='terrazas-madrid.ttl',format=\"ttl\")",
"_____no_output_____"
]
],
[
[
"# Consulta 1: Listado de terrazas y sus horarios de lunes a viernes anualmente",
"_____no_output_____"
]
],
[
[
"from rdflib import XSD\n\nESCOM = Namespace(\"http://vocab.ciudadesabiertas.es/def/comercio/tejido-comercial/\") \n\nq1 = prepareQuery('''\n SELECT \n ?terraza ?horario\n\tWHERE { \n ?terraza rdf:type escom:Terraza;\n <http://schema.org/openingHours> ?horario .\n \n FILTER(regex(?horario, \"Anual Lun-Juev.*\", \"i\" ))\n } \n ''',\n initNs = { \"escom\": ESCOM}\n)\n\nfor r in g.query(q1):\n print(r.terraza, r.horario)",
"_____no_output_____"
]
],
[
[
"# Consulta 2: Listado de terrazas que tengan más de 15 mesas autorizadas",
"_____no_output_____"
]
],
[
[
"from rdflib import XSD\n\nESCOM = Namespace(\"http://vocab.ciudadesabiertas.es/def/comercio/tejido-comercial/\") \n\nq1 = prepareQuery('''\n SELECT \n ?terraza ?mesas\n\tWHERE { \n ?terraza rdf:type escom:Terraza;\n escom:numeroMesasAutorizadas ?mesas .\n \n FILTER(?mesas > \"15\"^^<http://www.w3.org/2001/XMLSchema#integer>)\n } \n ''',\n initNs = { \"escom\": ESCOM}\n)\n\nfor r in g.query(q1):\n print(r.terraza, r.mesas)\n",
"_____no_output_____"
]
],
[
[
"# Consulta 3: Listado de terrazas con actividad en el periodo anual",
"_____no_output_____"
]
],
[
[
"from rdflib import XSD\n\nESCOM = Namespace(\"http://vocab.ciudadesabiertas.es/def/comercio/tejido-comercial/\") \nq1 = prepareQuery('''\n SELECT \n ?terraza\n\tWHERE { \n ?terraza rdf:type escom:Terraza;\n escom:periodoFuncionamiento <http://vocab.linkeddata.es/datosabiertos/kos/comercio/periodo-funcionamiento/anual> .\n } \n ''',\n initNs = { \"escom\": ESCOM}\n)\n\nfor r in g.query(q1):\n print(r.terraza)",
"_____no_output_____"
]
],
[
[
"# Ejemplo real: Dibujando puntos geográficos a partir de RDF",
"_____no_output_____"
]
],
[
[
"ESCOM = Namespace(\"http://vocab.ciudadesabiertas.es/def/comercio/tejido-comercial/\") \n# Preparamos la consulta, dame las terrazas, su horario anual de lunes a jueves y la latitud y longitud de su LocalComercial asociado\nq1 = prepareQuery('''\n SELECT \n ?horario ?lat ?lon\n\tWHERE { \n ?terraza rdf:type escom:Terraza .\n ?terraza <http://schema.org/openingHours> ?horario .\n ?terraza escom:perteneceA ?local .\n ?local rdf:type escom:LocalComercial .\n ?local <http://www.opengis.net/ont/geosparql#hasGeometry> ?point .\n ?point rdf:type <http://www.opengis.net/ont/sf#Point> .\n ?point <https://datos.ign.es/def/geo_core#xETRS89> ?lat .\n ?point <https://datos.ign.es/def/geo_core#yETRS89> ?lon .\n \n FILTER(regex(?horario, \"Anual Lun-Juev.*\", \"i\" ))\n } LIMIT 10\n ''',\n initNs = { \"escom\": ESCOM}\n)\n# inspeccionamos los datos que nos devuelve la consulta\nfor r in g.query(q1):\n print(r.lat, r.lon, r.horario)\n\n# debemos transformar el formato de los datos de lon de UTM a WGS 84\ntransformer = Transformer.from_crs('epsg:25830','epsg:4326')\nmapa = folium.Map(location=[40.4167, -3.70325])\nfor r in g.query(q1):\n x,y = transformer.transform(float(r.lat),float(r.lon))\n horario = (r.horario).replace(\"Anual Lun-Juev \",\"\")\n folium.Marker([x,y], popup=horario, tooltip=horario).add_to(mapa)\n\nmapa",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a110bd14bbf4c61cf29ade5ed44c08fee925bd3
| 19,759 |
ipynb
|
Jupyter Notebook
|
Notebooks/02.4 Elementwise functions on an array.ipynb
|
kgdunn/digital-skills-module5
|
5a5c5ef170b7b314b0a414c9a6f97570779c71c7
|
[
"BSD-3-Clause"
] | null | null | null |
Notebooks/02.4 Elementwise functions on an array.ipynb
|
kgdunn/digital-skills-module5
|
5a5c5ef170b7b314b0a414c9a6f97570779c71c7
|
[
"BSD-3-Clause"
] | 3 |
2018-08-10T10:07:58.000Z
|
2021-06-01T22:18:01.000Z
|
Notebooks/02.4 Elementwise functions on an array.ipynb
|
kgdunn/digital-skills-module5
|
5a5c5ef170b7b314b0a414c9a6f97570779c71c7
|
[
"BSD-3-Clause"
] | null | null | null | 29.668168 | 876 | 0.503973 |
[
[
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#1.-Addition-and-subtraction\" data-toc-modified-id=\"1.-Addition-and-subtraction-1\">1. Addition and subtraction</a></span></li><li><span><a href=\"#2.-Multiplication-and-division-(element-by-element)\" data-toc-modified-id=\"2.-Multiplication-and-division-(element-by-element)-2\">2. Multiplication and division (element-by-element)</a></span></li><li><span><a href=\"#3.-Square-roots-and-other-powers\" data-toc-modified-id=\"3.-Square-roots-and-other-powers-3\">3. Square roots and other powers</a></span></li><li><span><a href=\"#4.-Trigonometric-and-other-functions\" data-toc-modified-id=\"4.-Trigonometric-and-other-functions-4\">4. Trigonometric and other functions</a></span></li><li><span><a href=\"#5.-Enrichment\" data-toc-modified-id=\"5.-Enrichment-5\">5. Enrichment</a></span></li></ul></div>",
"_____no_output_____"
],
[
">All content is released under Creative Commons Attribution [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/) and all source code is released under a [BSD-3 clause license](https://en.wikipedia.org/wiki/BSD_licenses).\n>\n>Please reuse, remix, revise, and reshare this content in any way, keeping this notice. [Report issues please](https://github.com/kgdunn/digital-skills-module5/issues).\n>\n><img style=\"float: right;\" width=\"150px\" src=\"images/jupyter-logo.png\">**Are you viewing this on jupyter.org?** Then this notebook will be read-only. <br>\n>See how you can interactively run the code in this notebook by visiting our [instruction page about Notebooks](https://yint.org/notebooks). \n",
"_____no_output_____"
],
[
"# Simple elementwise functions and operations on a NumPy array\n\nOnce we have created an array - [see the prior notebooks](./) - we are then ready to actually use them for calculations!\n\nLet us consider these calculations:\n1. Addition and subtraction\n2. Multiplication and division (element-by-element)\n3. Square roots and other powers\n4. Trigonometric and other functions",
"_____no_output_____"
],
[
"## 1. Addition and subtraction\n\nNumPy can add to, or subtract from two arrays with the same shape. We will use array ``A`` and ``B`` in these examples.",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\nA = np.ones(shape=(5,5))\nB = np.ones(shape=(5,5))\nprint('A = \\n{}\\n\\nB = \\n{}'.format(A, B))",
"A = \n[[1. 1. 1. 1. 1.]\n [1. 1. 1. 1. 1.]\n [1. 1. 1. 1. 1.]\n [1. 1. 1. 1. 1.]\n [1. 1. 1. 1. 1.]]\n\nB = \n[[1. 1. 1. 1. 1.]\n [1. 1. 1. 1. 1.]\n [1. 1. 1. 1. 1.]\n [1. 1. 1. 1. 1.]\n [1. 1. 1. 1. 1.]]\n"
],
[
"print(A + B)",
"[[2. 2. 2. 2. 2.]\n [2. 2. 2. 2. 2.]\n [2. 2. 2. 2. 2.]\n [2. 2. 2. 2. 2.]\n [2. 2. 2. 2. 2.]]\n"
]
],
[
[
"The ``+`` operation on two arrays is actually just a convenience. The actual function in NumPy which is being called to do the work is the ``np.add(...)`` function. \n\nTry this to verify:",
"_____no_output_____"
]
],
[
[
"print(np.add(A, B))",
"[[2. 2. 2. 2. 2.]\n [2. 2. 2. 2. 2.]\n [2. 2. 2. 2. 2.]\n [2. 2. 2. 2. 2.]\n [2. 2. 2. 2. 2.]]\n"
]
],
[
[
"Similarly, we have the `-` and `.subtract()` functions that serve the same purpose:",
"_____no_output_____"
]
],
[
[
"print(A - B)\nprint(np.subtract(A, B)) # does the same thing as the prior line of code\nprint(np.add(A, -B)) # and this produces the same result",
"[[0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]]\n[[0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]]\n[[0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]]\n"
]
],
[
[
"These are element-by-element operations. That means, NumPy performed the operation of addition on each corresponding element in the arrays `A` and `B` and then repeats that entry-by-entry. This is also called elementwise in NumPy's documentation.\n\nNumPy will also allow you take shortcuts. Imagine that you want to subtract the value of 3 from every entry in matrix `A`. You no not first need to create a matrix with the same shape as ``A`` contain the value of 3, and then go subract that. \n\n**There is a shortcut: **",
"_____no_output_____"
]
],
[
[
"print(A - 3) # still does element-by-element calculations",
"[[-2. -2. -2. -2. -2.]\n [-2. -2. -2. -2. -2.]\n [-2. -2. -2. -2. -2.]\n [-2. -2. -2. -2. -2.]\n [-2. -2. -2. -2. -2.]]\n"
]
],
[
[
"## 2. Multiplication and division (element-by-element)\n\nMultiplication and division can also be done element-by-element. ",
"_____no_output_____"
]
],
[
[
"import numpy as np\nC = np.reshape(np.linspace(1, 25, 25), (5, 5))\nprint(C)",
"[[ 1. 2. 3. 4. 5.]\n [ 6. 7. 8. 9. 10.]\n [11. 12. 13. 14. 15.]\n [16. 17. 18. 19. 20.]\n [21. 22. 23. 24. 25.]]\n"
]
],
[
[
"Now go multiply every value in matrix `C` by 2.0 as follows:",
"_____no_output_____"
]
],
[
[
"doubled = C * 2\nprint(doubled)\n\nprint(np.multiply(C, 2)) # does exactly the same as the prior code\n\n# Also try this:\nprint(C * 0.0)",
"[[ 2. 4. 6. 8. 10.]\n [12. 14. 16. 18. 20.]\n [22. 24. 26. 28. 30.]\n [32. 34. 36. 38. 40.]\n [42. 44. 46. 48. 50.]]\n[[ 2. 4. 6. 8. 10.]\n [12. 14. 16. 18. 20.]\n [22. 24. 26. 28. 30.]\n [32. 34. 36. 38. 40.]\n [42. 44. 46. 48. 50.]]\n[[0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]]\n"
]
],
[
[
"What happens if you multiply matrix ``C`` by itself, if you want to calculated $C^2$",
"_____no_output_____"
]
],
[
[
"print(C * C)",
"[[ 1. 4. 9. 16. 25.]\n [ 36. 49. 64. 81. 100.]\n [121. 144. 169. 196. 225.]\n [256. 289. 324. 361. 400.]\n [441. 484. 529. 576. 625.]]\n"
]
],
[
[
"The multiply operator `*` is shorthand for ``numpy.multiply()`` and works on an element-by-element basis. Similarly the `/` operator is shorthand for `numpy.divide()`",
"_____no_output_____"
]
],
[
[
"print(C / C)\nprint(np.divide(C, C)) # both give you what you expect - a matrix of 1's\n\n# Advanced: add some code to see what happens if you divide by zero: C/0.0",
"[[1. 1. 1. 1. 1.]\n [1. 1. 1. 1. 1.]\n [1. 1. 1. 1. 1.]\n [1. 1. 1. 1. 1.]\n [1. 1. 1. 1. 1.]]\n[[1. 1. 1. 1. 1.]\n [1. 1. 1. 1. 1.]\n [1. 1. 1. 1. 1.]\n [1. 1. 1. 1. 1.]\n [1. 1. 1. 1. 1.]]\n"
]
],
[
[
"## 3. Square roots and other powers\n\nThere are other elementwise operations that can be done on matrices. These often involve raising the individual matrix elements to a certain power, or taking a square root (which is the same as raising a number to the power of $0.5$), or calculating the logarithm.\n\nLet's try it out interactively.\n\n### To try:\n\n> 1. Use the ``**`` operation to raise to a power\n> 2. Use the ``.square()`` function \n> 3. Use the ``.power()`` function \n> 4. Use the ``.sqrt()`` function\n> 5. Verify that `**(0.5)` gives the same values as the `.sqrt()` function\n",
"_____no_output_____"
]
],
[
[
"# Step 1:\nimport numpy as np\nD = np.reshape(np.linspace(-1, 1, 15), (3, 5)) # create a 3x5 matrix with positive and negative values\nprint(D**2)\nprint('-------')\n\n# Step 2\nprint(np.square(D)) # you should see the same as above\nprint('-------')\n\n# Step 3\nD_squared = np.power(D, 2)\nprint(D_squared)\nprint('-------')\n\n# Step 4: remember there are some negative values in D\n# The square root is undefined for negative values (exception for complex values)\nprint(np.sqrt(D)) \nprint('-------')\n\n# Step 5: raising something to the power of 0.5 is the same as square rooting\nprint(np.power(D, 0.5))",
"_____no_output_____"
]
],
[
[
"## 4. Trigonometric and other functions\n\nA wide variety of mathematical functions are possible. See the full list in the [NumPy documentation](https://docs.scipy.org/doc/numpy/reference/routines.math.html).\n\nYou will self-discover these function by running the code below.\n\n### Some questions to try answering below:\n>1. The standard trigonometric functions: ``np.sin(...)``, ``np.tan(...)``, etc\n>2. Rounding off to the closest integer. Do negative values round up towards zero, or away from zero?\n>3. Rounding off to a certain number of ``decimals``; try rounding to 1 decimal place. Are the results what you expect?\n>4. Similar to rounding: try the ``np.floor(...)`` and ``np.ceil(...)``: what is the difference between the floor and the ceiling? Hint: read the documentation for [`floor`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.floor.html) and [`ceil`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ceil.html).\n>5. Logarithms and exponents are also part of the standard calculations we expect to do with matrices using the ``np.log(...)`` and ``np.exp(...)`` functions. Recall that $\\log(\\exp(x)) = x$. \n",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\nradians = np.reshape(np.linspace(-2, +2, 16), (4, 4)) # create a 4x4 matrix with positive and negative values\nprint(radians)\nprint('-----')\n\n# Step 1\nprint(np.sin(radians))\nprint('-----')\nprint(np.tan(radians))",
"[[-2. -1.73333333 -1.46666667 -1.2 ]\n [-0.93333333 -0.66666667 -0.4 -0.13333333]\n [ 0.13333333 0.4 0.66666667 0.93333333]\n [ 1.2 1.46666667 1.73333333 2. ]]\n-----\n[[-0.90929743 -0.98681992 -0.9945834 -0.93203909]\n [-0.80360826 -0.6183698 -0.38941834 -0.13293862]\n [ 0.13293862 0.38941834 0.6183698 0.80360826]\n [ 0.93203909 0.9945834 0.98681992 0.90929743]]\n-----\n[[ 2.18503986 6.09817038 -9.56867673 -2.57215162]\n [-1.35024221 -0.78684289 -0.42279322 -0.13412912]\n [ 0.13412912 0.42279322 0.78684289 1.35024221]\n [ 2.57215162 9.56867673 -6.09817038 -2.18503986]]\n"
],
[
"# Step 2\nprint(np.around(radians)) # rounds to the closest integer. Check what happens with negatives!",
"[[-2. -2. -1. -1.]\n [-1. -1. -0. -0.]\n [ 0. 0. 1. 1.]\n [ 1. 1. 2. 2.]]\n"
],
[
"# Step 3\nprint(np.around(radians, decimals=1)) # rounds to the closest 0.1\n# Advanced: try this code: np.around(radians*100, decimals=-2)\n# What does it mean to round to a negative number of decimals?",
"[[-2. -1.7 -1.5 -1.2]\n [-0.9 -0.7 -0.4 -0.1]\n [ 0.1 0.4 0.7 0.9]\n [ 1.2 1.5 1.7 2. ]]\n"
],
[
"# Step 4\nprint(np.floor(radians)) # compare this output to the original matrix\nprint(np.ceil(radians)) ",
"[[-2. -2. -2. -2.]\n [-1. -1. -1. -1.]\n [ 0. 0. 0. 0.]\n [ 1. 1. 1. 2.]]\n[[-2. -1. -1. -1.]\n [-0. -0. -0. -0.]\n [ 1. 1. 1. 1.]\n [ 2. 2. 2. 2.]]\n"
],
[
"# Step 5\nexponent = np.exp(radians)\nprint(exponent)\nprint('-----')\n\nrecovered = np.log(exponent)\nprint(recovered) \nprint('-----')\n\n# Does \"recovered\" match the original \"radians\" matrix? \n# It should: we first took the exponent, then the logarithm.\n# This subtraction should be a matrix of all zeros:\nprint(recovered - radians)",
"[[0.13533528 0.17669445 0.23069318 0.30119421]\n [0.39324072 0.51341712 0.67032005 0.87517332]\n [1.14263081 1.4918247 1.94773404 2.54297164]\n [3.32011692 4.33476183 5.65948746 7.3890561 ]]\n-----\n[[-2. -1.73333333 -1.46666667 -1.2 ]\n [-0.93333333 -0.66666667 -0.4 -0.13333333]\n [ 0.13333333 0.4 0.66666667 0.93333333]\n [ 1.2 1.46666667 1.73333333 2. ]]\n-----\n[[ 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00]\n [ 0.00000000e+00 1.11022302e-16 -5.55111512e-17 0.00000000e+00]\n [ 8.32667268e-17 -5.55111512e-17 0.00000000e+00 0.00000000e+00]\n [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00]]\n"
]
],
[
[
"The last matrix in your printout above should be all zeros, but is not exactly equal to zero (it is very, very close to zero though).\n\nTo test that we can use the ``np.isclose(...)`` function. It is another elementwise function that you can add to your toolbox. It tests if the entries in an array are close to another:",
"_____no_output_____"
]
],
[
[
"np.isclose(recovered - radians, 0)",
"_____no_output_____"
],
[
"# There is a function to check if the entries are all `True`\nnp.allclose(recovered - radians, 0)",
"_____no_output_____"
]
],
[
[
"## 5. Enrichment\n\n### Try these elementwise operations on arrays yourself\n\n>1. Calculating the absolute values: `np.fabs(...)` and `np.absolute(...)`\n>2. Comparing two arrays and return the minimum `np.fmin(...)` and maximum `np.fmax(...)`\n>3. The reciprocal value of $x$ is equal to $1/x$. You can calculate it using `np.reciprocal(...)`\n>4. The sign of the values in the array: `np.sign(...)`",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
4a110d7e684258ab6d87fbf5a44b1fc301b00213
| 229,372 |
ipynb
|
Jupyter Notebook
|
.ipynb_checkpoints/prepareData_V2-checkpoint.ipynb
|
singchenyeo/CKD
|
73e9b5e3530fee75204e6b862996e390c6d1a443
|
[
"Apache-2.0"
] | null | null | null |
.ipynb_checkpoints/prepareData_V2-checkpoint.ipynb
|
singchenyeo/CKD
|
73e9b5e3530fee75204e6b862996e390c6d1a443
|
[
"Apache-2.0"
] | null | null | null |
.ipynb_checkpoints/prepareData_V2-checkpoint.ipynb
|
singchenyeo/CKD
|
73e9b5e3530fee75204e6b862996e390c6d1a443
|
[
"Apache-2.0"
] | null | null | null | 38.254169 | 26,196 | 0.403659 |
[
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib import rcParams\nrcParams['figure.figsize'] = 11.7,8.27 # figure size in inches\n\npd.options.mode.chained_assignment = None # default='warn'\npd.set_option('display.max_colwidth', None)\npd.set_option('display.max_rows', 500) \npd.set_option('display.max_columns', 30) \n\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\n\n%config Completer.use_jedi = False\n\nfrom sklearn.impute import KNNImputer\nfrom sklearn.preprocessing import LabelEncoder",
"_____no_output_____"
]
],
[
[
"# Note\n* Aggregate data by every 180 days",
"_____no_output_____"
]
],
[
[
"df_creatinine = pd.read_csv('CSV/T_creatinine.csv'); df_creatinine.rename(columns = {'value': 'creatinine'}, inplace=True)\ndf_dbp = pd.read_csv('CSV/T_DBP.csv'); df_dbp.rename(columns = {'value': 'dbp'}, inplace=True)\n\ndf_glucose = pd.read_csv('CSV/T_glucose.csv'); df_glucose.rename(columns = {'value': 'glucose'}, inplace=True)\ndf_hgb = pd.read_csv('CSV/T_HGB.csv'); df_hgb.rename(columns = {'value': 'hgb'}, inplace=True)\ndf_ldl = pd.read_csv('CSV/T_ldl.csv'); df_ldl.rename(columns = {'value': 'ldl'}, inplace=True)\ndf_meds = pd.read_csv('CSV/T_meds.csv')\ndf_sbp = pd.read_csv('CSV/T_sbp.csv'); df_sbp.rename(columns = {'value': 'sbp'}, inplace=True)\n",
"_____no_output_____"
]
],
[
[
"# Compute maximum time point (day) for each subject",
"_____no_output_____"
]
],
[
[
"df_creatinine_d = df_creatinine.groupby(['id'])['time'].max()\ndf_dbp_d = df_dbp.groupby(['id'])['time'].max()\ndf_glucose_d = df_glucose.groupby(['id'])['time'].max()\ndf_hgb_d = df_hgb.groupby(['id'])['time'].max()\ndf_ldl_d = df_ldl.groupby(['id'])['time'].max()\ndf_sbp_d = df_sbp.groupby(['id'])['time'].max()\ndf_meds_d = df_meds.groupby(['id'])['end_day'].max()\ndf_meds_d = df_meds_d.rename('time')\n\ndf_d_merge = pd.DataFrame(pd.concat([df_creatinine_d, df_dbp_d, df_glucose_d, df_hgb_d, df_ldl_d, df_sbp_d, df_meds_d])).reset_index()\ndf_d_merge = df_d_merge.groupby(['id']).max().reset_index()\ndf_d_merge = df_d_merge.sort_values('time')\nprint('Minimum = ' + str(df_d_merge['time'].min()) + ', Maximum = ' + str(df_d_merge['time'].max()))\nprint('Mean = ' + str(df_d_merge['time'].mean()) + ', Median = ' + str(df_d_merge['time'].median()))\nplt.plot(list(range(df_d_merge.shape[0])), df_d_merge['time'], '-p', markersize=1)\nplt.xlabel(\"Subject\")\nplt.ylabel(\"Days\")\nplt.title(\"Days of record\")\ndf_d_merge.to_csv('CSV/days_of_record.csv', index=False)",
"Minimum = 708, Maximum = 1429\nMean = 1131.21, Median = 1160.0\n"
]
],
[
[
"# Process med data\n",
"_____no_output_____"
]
],
[
[
"# Ignore medication ended before day 0\ndf_meds = df_meds[df_meds['end_day'] >= 0]\ndf_meds.head(10)",
"_____no_output_____"
],
[
"period_bin = 180\n\ndef generate_bin(n_start, n_end):\n \n global period_bin\n \n start_count = period_bin\n n = 1\n token = 0\n \n # keep trying until a code is assigned\n while token == 0:\n \n if n_end <= start_count:\n\n # start and end within period\n if n_start <= (start_count + 1):\n return int(start_count / period_bin)\n token = 1\n\n else:\n \n # the \"end of period\" is within start and end (e.g.: 90 < 180 < 280)\n if n_start <= start_count:\n \n # set a code for processing later\n return 99\n token = 1\n\n # start and end are both outside of the period\n else:\n \n # try the next period\n n += 1\n start_count *= n\n \n \ndf_meds['days_bin'] = df_meds.apply(lambda x: generate_bin(x['start_day'], x['end_day']), axis=1)",
"_____no_output_____"
],
[
"# Fix the in-between\nMID = df_meds['days_bin'] == 99\n\n# Replicate the error part to be fixed and concat with the main one\ndf_temp = df_meds[MID]\n\n\n# Bin months based on end_day\ndf_temp['days_bin'] = (df_temp['end_day'] / period_bin).astype(int) + 1\n\n# Value to be used to replace start (+1) or end\nv = (np.floor(df_meds.loc[MID, 'end_day'] / period_bin) * period_bin).astype(int)\n\ndf_meds.loc[MID, 'end_day'] = v\n# Bin months based on end_day\ndf_meds['days_bin'] = (df_meds['end_day'] / period_bin).astype(int) + 1\ndf_temp['start_day'] = (v + 1).astype(int)\n\ndf_meds = pd.concat([df_meds, df_temp], axis=0)\n\n\n",
"_____no_output_____"
],
[
"df_meds['days_bin'].value_counts().sort_index()",
"_____no_output_____"
],
[
"df_meds['end_day'].max()",
"_____no_output_____"
],
[
"# Get the total dosage during the period\ndf_meds['total_day'] = df_meds['end_day'] - df_meds['start_day'] + 1\ndf_meds['total_dosage'] = df_meds['total_day'] * df_meds['daily_dosage']",
"_____no_output_____"
],
[
"# Bin the data by days_bin\ndf_med_binned = df_meds.groupby(['id', 'days_bin', 'drug'])['total_dosage'].sum().reset_index()",
"_____no_output_____"
],
[
"df_med_binned.head()",
"_____no_output_____"
],
[
"# Convert df to wide format, with each column = dosage of one med\n# If drug not taken, assumed it's 0\ndf_med_wide = df_med_binned.pivot(index=['id', 'days_bin'],columns='drug',values='total_dosage').reset_index().fillna(0)\ndf_med_wide.head()",
"_____no_output_____"
]
],
[
[
"# Merge the raw measurements",
"_____no_output_____"
]
],
[
[
"# Check how many is between day 699 and day 720\ndf_hgb[(df_hgb['time']> 699) & (df_hgb['time'] <= 720)].shape[0]",
"_____no_output_____"
],
[
"# Sort columns to id, time, value first\n# First values are blood pressure, and systolic comes before diastolic\ndf_sbp = df_sbp[['id', 'time', 'sbp']]\n\ndf_merged = df_sbp.merge(df_dbp, on = ['id','time'], how='outer')\ndf_merged = df_merged.merge(df_creatinine, on = ['id','time'], how='outer')\ndf_merged = df_merged.merge(df_glucose, on = ['id','time'], how='outer')\ndf_merged = df_merged.merge(df_ldl, on = ['id','time'], how='outer')\ndf_merged = df_merged.merge(df_hgb, on = ['id','time'], how='outer')\ndf_merged = df_merged.sort_values(['id','time'])\ndf_merged.head()",
"_____no_output_____"
],
[
"# bin time \ndf_merged['days_bin'] = (df_merged['time'] / period_bin).astype(int) + 1\ndf_merged = df_merged.drop('time', axis=1)\ndf_merged['days_bin'].value_counts().sort_index()",
"_____no_output_____"
],
[
"# Aggregate data by months_bin and get mean\ndf_merged = df_merged.groupby(['id', 'days_bin']).median().reset_index()\ndf_merged.head()",
"_____no_output_____"
],
[
"# Merge with med\ndf_merged = df_merged.merge(df_med_wide, on = ['id','days_bin'], how='outer')\ndf_merged.head()",
"_____no_output_____"
],
[
"# Save output for modelling\ndf_merged.to_csv('CSV/df_daybin.csv', index=False)",
"_____no_output_____"
],
[
"# Only first 4 bins (720 days)\ndf_merged_4 = df_merged[df_merged['days_bin'] <= 4]\n\n# Change NA to 0 for drugs\ndf_merged_4.iloc[:, 8:29] = df_merged_4.iloc[:, 8:29].fillna(0)\n\n\n# Use KNNImputer to fill continuous missing values\nimputer = KNNImputer(n_neighbors=3)\n\nfor day in range(1,5):\n DID = df_merged_4['days_bin'] == day\n df_day = df_merged_4[DID]\n\n # Remove id from imputation\n df_day.iloc[:,2:8] = pd.DataFrame(imputer.fit_transform(df_day.iloc[:,2:8]), index = df_day.index, columns = df_day.columns[2:8]) \n df_merged_4[DID] = df_day\n\n# Merge with demographic \ndf_demo = pd.read_csv('CSV/T_demo.csv')\n\n# Change the unknown in df_demo race to the mode (White)\ndf_demo.loc[df_demo['race'] == 'Unknown','race'] = 'White'\n\ndf_merged_4 = df_merged_4.merge(df_demo, on='id')\n\n# Merge with output\ndf_stage = pd.read_csv('CSV/T_stage.csv')\n\n# Change state to 0, 1\ndf_stage['Stage_Progress'] = np.where(df_stage['Stage_Progress'] == True, 1, 0)\n\ndf_merged_4 = df_merged_4.merge(df_stage, on='id')\n\n# Save output for modelling\ndf_merged_4.to_csv('CSV/df_daybin_4.csv', index=False)",
"_____no_output_____"
],
[
"df_merged_4.head()",
"_____no_output_____"
]
],
[
[
"# Aggregated data",
"_____no_output_____"
]
],
[
[
"df_agg = df_merged_4.copy()\n# Take out demographic and outcome\ndf_agg.drop( ['race', 'gender', 'age', 'Stage_Progress'], axis=1, inplace=True)\n\ndf_agg_mean = df_agg.groupby('id').mean().reset_index()\ndf_agg_mean.head()",
"_____no_output_____"
],
[
"# Mean sbp, dbp, creatinine, glucose, ldl, hgb\ndf_agg_mean = df_agg.groupby('id').mean().reset_index()\ndf_agg_mean = df_agg_mean.iloc[:, np.r_[0, 2:8]]\ndf_agg_mean.head()\ndf_agg_mean.shape\n\n# Sum drugs\ndf_agg_sum = df_agg.groupby('id').sum().reset_index()\ndf_agg_sum = df_agg_sum.iloc[:, 8:]\ndf_agg_sum.head()\ndf_agg_sum.shape\n\ndf_agg_fixed = pd.concat([df_agg_mean, df_agg_sum], axis=1)\ndf_agg_fixed.shape\n\n# Put back demo\ndf_agg_fixed = df_agg_fixed.merge(df_demo, on = 'id')\n# Put back outcome\ndf_agg_fixed = df_agg_fixed.merge(df_stage, on = 'id')\n\ndf_agg_fixed.head()\ndf_agg_fixed.shape\ndf_agg_fixed.to_csv('CSV/df_agg.csv', index=False)",
"_____no_output_____"
]
],
[
[
"# Temporal data\n* Only use first 2 years of data (most measurements stop at day 699)",
"_____no_output_____"
]
],
[
[
"df_temporal = df_merged_4.copy()\ndf_temporal.head()",
"_____no_output_____"
],
[
"# Take out demographic and outcome\ndf_temporal.drop( ['race', 'gender', 'age', 'Stage_Progress'], axis=1, inplace=True)",
"_____no_output_____"
],
[
"# Convert to wide format\ndf_temporal = df_temporal.set_index(['id','days_bin']).unstack()\ndf_temporal.columns = df_temporal.columns.map(lambda x: '{}_{}'.format(x[0], x[1]))",
"_____no_output_____"
],
[
"# Some subjects don't have data in a time_bin, KNNImpute again\ndf_temporal = pd.DataFrame(imputer.fit_transform(df_temporal), index = df_temporal.index, columns = df_temporal.columns) ",
"_____no_output_____"
],
[
"df_temporal = df_temporal.reset_index()\n# Put back demo\ndf_temporal = df_temporal.merge(df_demo, on = 'id')\n# Put back outcome\ndf_temporal = df_temporal.merge(df_stage, on = 'id')",
"_____no_output_____"
],
[
"df_temporal.head()",
"_____no_output_____"
],
[
"# Save output for modelling\ndf_temporal.to_csv('CSV/df_temporal.csv', index=False)",
"_____no_output_____"
]
],
[
[
"# Categorize measurements\n* Set continuous readings to 1=low, 2=normal, 3=high\n* Categorize medicine by tertile split total dosage to categorize severity (1=low, 2=normal, 3=high)\n* Categorize medicine by the treatment target, sum binary code ",
"_____no_output_____"
]
],
[
[
"# Remove 0, get 75th percentile as threshold for high dosage\n# Set normal as 1, high as 2\ndef categorize_drug(df):\n \n NID = df > 0\n if sum(NID) > 0:\n threshold = np.percentile(df[NID], 75)\n df[NID] = np.where(df[NID] > threshold, 2, 1)\n\n return df",
"_____no_output_____"
]
],
[
[
"## Day_bin",
"_____no_output_____"
]
],
[
[
"df_merged_4_cat = df_merged_4.copy()\ndf_merged_4_cat.head()",
"_____no_output_____"
],
[
"names = ['1', '2', '3']\n\nbins = [0, 90, 120, np.inf]\ndf_merged_4_cat['sbp'] = pd.cut(df_merged_4['sbp'], bins, labels=names)\n\nbins = [0, 60, 80, np.inf]\ndf_merged_4_cat['dbp'] = pd.cut(df_merged_4['dbp'], bins, labels=names)\n\nbins = [0, 3.9, 7.8, np.inf]\ndf_merged_4_cat['glucose'] = pd.cut(df_merged_4['glucose'], bins, labels=names)\n\nbins = [0, 100, 129, np.inf]\ndf_merged_4_cat['ldl'] = pd.cut(df_merged_4['ldl'], bins, labels=names)\n\nMID = df_merged_4['gender'] == 'Male'\n\nbins = [0, 0.74, 1.35, np.inf]\ndf_merged_4_cat.loc[MID, 'creatinine'] = pd.cut(df_merged_4.loc[MID, 'creatinine'], bins, labels=names)\nbins = [0, 0.59, 1.04, np.inf]\ndf_merged_4_cat.loc[~MID, 'creatinine'] = pd.cut(df_merged_4.loc[~MID, 'creatinine'], bins, labels=names)\n\nbins = [0, 14, 17.5, np.inf]\ndf_merged_4_cat.loc[MID, 'hgb'] = pd.cut(df_merged_4.loc[MID, 'hgb'], bins, labels=names)\nbins = [0, 12.3, 15.3, np.inf]\ndf_merged_4_cat.loc[~MID, 'hgb'] = pd.cut(df_merged_4.loc[~MID, 'hgb'], bins, labels=names)\n\ndf_merged_4_cat.head()",
"_____no_output_____"
],
[
"# Remove 0, get 75th percentile as threshold for high dosage, set normal as 1, high as 2\n# Need to compute separately for different days_bin\n\nfor day in range(1, 5):\n DID = df_merged_4_cat['days_bin'] == day\n df_day = df_merged_4_cat[DID]\n df_merged_4_cat = df_merged_4_cat[~DID] \n df_day.iloc[:, 8:29] = df_day.iloc[:, 8:29].apply(lambda x: categorize_drug(x)).astype(int) \n df_merged_4_cat = pd.concat([df_merged_4_cat, df_day])\n \n",
"_____no_output_____"
],
[
"# Label encode race and gender\nle = LabelEncoder()\ndf_merged_4_cat['race'] = le.fit_transform(df_merged_4_cat['race'])\ndf_merged_4_cat['gender'] = le.fit_transform(df_merged_4_cat['gender'])",
"_____no_output_____"
],
[
"# Group age to young-old (≤74 y.o.) as 1, middle-old (75 to 84 y.o.) as 2, and old-old (≥85 y.o.) as 3\ndf_merged_4_cat['age'] = pd.qcut(df_merged_4['age'], 3, labels=[1,2,3])\ndf_merged_4_cat['age'].value_counts()",
"_____no_output_____"
],
[
"df_merged_4_cat.to_csv('CSV/df_merged_4_cat.csv', index=False)",
"_____no_output_____"
],
[
"# Group drug by treatment (sum the binary code)\ndf_merged_4_cat_drug = df_merged_4_cat.copy()\n\nglucose_col = ['canagliflozin', 'dapagliflozin', 'metformin']\ndf_merged_4_cat_drug['glucose_treatment'] = df_merged_4_cat_drug[glucose_col].sum(axis=1).astype(int)\ndf_merged_4_cat_drug.drop(glucose_col, axis=1, inplace=True)\n\nbp_col = ['atenolol','bisoprolol','carvedilol','irbesartan','labetalol','losartan','metoprolol','nebivolol','olmesartan','propranolol','telmisartan','valsartan']\ndf_merged_4_cat_drug['bp_treatment'] = df_merged_4_cat_drug[bp_col].sum(axis=1).astype(int)\ndf_merged_4_cat_drug.drop(bp_col, axis=1, inplace=True)\n\ncholesterol_col = ['atorvastatin','lovastatin','pitavastatin','pravastatin','rosuvastatin','simvastatin']\ndf_merged_4_cat_drug['cholesterol_treatment'] = df_merged_4_cat_drug[cholesterol_col].sum(axis=1).astype(int)\ndf_merged_4_cat_drug.drop(cholesterol_col, axis=1, inplace=True)\n\ndf_merged_4_cat_drug.head()\ndf_merged_4_cat_drug.to_csv('CSV/df_merged_4_cat_drug.csv', index=False)",
"_____no_output_____"
]
],
[
[
"## Aggregated",
"_____no_output_____"
]
],
[
[
"df_agg_cat = df_agg_fixed",
"_____no_output_____"
],
[
"names = ['1', '2', '3']\n\nbins = [0, 90, 120, np.inf]\ndf_agg_cat['sbp'] = pd.cut(df_agg_fixed['sbp'], bins, labels=names)\n\nbins = [0, 60, 80, np.inf]\ndf_agg_cat['dbp'] = pd.cut(df_agg_fixed['dbp'], bins, labels=names)\n\nbins = [0, 3.9, 7.8, np.inf]\ndf_agg_cat['glucose'] = pd.cut(df_agg_fixed['glucose'], bins, labels=names)\n\nbins = [0, 100, 129, np.inf]\ndf_agg_cat['ldl'] = pd.cut(df_agg_fixed['ldl'], bins, labels=names)\n\nMID = df_agg_fixed['gender'] == 'Male'\n\nbins = [0, 0.74, 1.35, np.inf]\ndf_agg_cat.loc[MID, 'creatinine'] = pd.cut(df_agg_fixed.loc[MID, 'creatinine'], bins, labels=names)\nbins = [0, 0.59, 1.04, np.inf]\ndf_agg_cat.loc[~MID, 'creatinine'] = pd.cut(df_agg_fixed.loc[~MID, 'creatinine'], bins, labels=names)\n\nbins = [0, 14, 17.5, np.inf]\ndf_agg_cat.loc[MID, 'hgb'] = pd.cut(df_agg_fixed.loc[MID, 'hgb'], bins, labels=names)\nbins = [0, 12.3, 15.3, np.inf]\ndf_agg_cat.loc[~MID, 'hgb'] = pd.cut(df_agg_fixed.loc[~MID, 'hgb'], bins, labels=names)\n\ndf_agg_cat.head()",
"_____no_output_____"
],
[
"# Remove 0, get 75th percentile as threshold for high dosage, set normal as 1, high as 2\ndf_agg_cat.iloc[:,7:28] = df_agg_fixed.iloc[:,7:28].apply(lambda x: categorize_drug(x)).astype(int)",
"_____no_output_____"
],
[
"# Label encode race and gender\nle = LabelEncoder()\ndf_agg_cat['race'] = le.fit_transform(df_agg_cat['race'])\ndf_agg_cat['gender'] = le.fit_transform(df_agg_cat['gender'])",
"_____no_output_____"
],
[
"# Group age to young-old (≤74 y.o.) as 1, middle-old (75 to 84 y.o.) as 2, and old-old (≥85 y.o.) as 3\ndf_agg_cat['age'] = pd.qcut(df_agg_cat['age'], 3, labels=[1,2,3])\ndf_agg_cat['age'].value_counts()",
"_____no_output_____"
],
[
"df_agg_cat.to_csv('CSV/df_agg_cat.csv', index=False)",
"_____no_output_____"
],
[
"# Group drug by treatment (sum the binary code)\ndf_agg_cat_drug = df_agg_cat.copy()\n\nglucose_col = ['canagliflozin', 'dapagliflozin', 'metformin']\ndf_agg_cat_drug['glucose_treatment'] = df_agg_cat_drug[glucose_col].sum(axis=1).astype(int)\ndf_agg_cat_drug.drop(glucose_col, axis=1, inplace=True)\n\nbp_col = ['atenolol','bisoprolol','carvedilol','irbesartan','labetalol','losartan','metoprolol','nebivolol','olmesartan','propranolol','telmisartan','valsartan']\ndf_agg_cat_drug['bp_treatment'] = df_agg_cat_drug[bp_col].sum(axis=1).astype(int)\ndf_agg_cat_drug.drop(bp_col, axis=1, inplace=True)\n\ncholesterol_col = ['atorvastatin','lovastatin','pitavastatin','pravastatin','rosuvastatin','simvastatin']\ndf_agg_cat_drug['cholesterol_treatment'] = df_agg_cat_drug[cholesterol_col].sum(axis=1).astype(int)\ndf_agg_cat_drug.drop(cholesterol_col, axis=1, inplace=True)\n\ndf_agg_cat_drug.head()\ndf_agg_cat_drug.to_csv('CSV/df_agg_cat_drug.csv', index=False)",
"_____no_output_____"
]
],
[
[
"## Temporal",
"_____no_output_____"
]
],
[
[
"df_temporal_cat = df_temporal.copy()",
"_____no_output_____"
],
[
"names = ['1', '2', '3']\n\nbins = [0, 90, 120, np.inf]\nfor colname in ['sbp_1', 'sbp_2', 'sbp_3', 'sbp_4']:\n df_temporal_cat[colname] = pd.cut(df_temporal_cat[colname], bins, labels=names)\n\nbins = [0, 60, 80, np.inf]\nfor colname in ['dbp_1', 'dbp_2', 'dbp_3', 'dbp_4']:\n df_temporal_cat[colname] = pd.cut(df_temporal_cat[colname], bins, labels=names)\n\nbins = [0, 3.9, 7.8, np.inf]\nfor colname in ['glucose_1', 'glucose_2', 'glucose_3', 'glucose_4']:\n df_temporal_cat[colname] = pd.cut(df_temporal_cat[colname], bins, labels=names)\n\nbins = [0, 100, 129, np.inf]\nfor colname in ['ldl_1', 'ldl_2', 'ldl_3', 'ldl_4']:\n df_temporal_cat[colname] = pd.cut(df_temporal_cat[colname], bins, labels=names)\n\nMID = df_temporal_cat['gender'] == 'Male'\n\nbins = [0, 0.74, 1.35, np.inf]\nfor colname in ['creatinine_1', 'creatinine_2', 'creatinine_3', 'creatinine_4']:\n df_temporal_cat.loc[MID, colname] = pd.cut(df_temporal_cat.loc[MID, colname], bins, labels=names)\n \n\nbins = [0, 0.59, 1.04, np.inf]\nfor colname in ['creatinine_1', 'creatinine_2', 'creatinine_3', 'creatinine_4']:\n df_temporal_cat.loc[~MID, colname] = pd.cut(df_temporal_cat.loc[~MID, colname], bins, labels=names)\n\nbins = [0, 14, 17.5, np.inf]\nfor colname in ['hgb_1', 'hgb_2', 'hgb_3', 'hgb_4']:\n df_temporal_cat.loc[MID, colname] = pd.cut(df_temporal_cat.loc[MID, colname], bins, labels=names)\n \nbins = [0, 12.3, 15.3, np.inf]\nfor colname in ['hgb_1', 'hgb_2', 'hgb_3', 'hgb_4']:\n df_temporal_cat.loc[~MID, colname] = pd.cut(df_temporal_cat.loc[~MID, colname], bins, labels=names)\n\ndf_temporal_cat.head()",
"_____no_output_____"
],
[
"# Remove 0, get 75th percentile as threshold for high dosage, set normal as 1, high as 2\ndf_temporal_cat.iloc[:,25:109] = df_temporal_cat.iloc[:,25:109].apply(lambda x: categorize_drug(x)).astype(int)",
"_____no_output_____"
],
[
"# Label encode race and gender\nle = LabelEncoder()\ndf_temporal_cat['race'] = le.fit_transform(df_temporal_cat['race'])\ndf_temporal_cat['gender'] = le.fit_transform(df_temporal_cat['gender'])",
"_____no_output_____"
],
[
"# Group age to young-old (≤74 y.o.) as 1, middle-old (75 to 84 y.o.) as 2, and old-old (≥85 y.o.) as 3\ndf_temporal_cat['age'] = pd.qcut(df_temporal_cat['age'], 3, labels=[1,2,3])\ndf_temporal_cat['age'].value_counts()",
"_____no_output_____"
],
[
"df_temporal_cat.to_csv('CSV/df_temporal_cat.csv', index=False)",
"_____no_output_____"
],
[
"# Group drug by treatment (sum the binary code)\ndf_temporal_cat_drug = df_temporal_cat.copy()\n\nfor i in range(1,5):\n glucose_col = ['canagliflozin_' + str(i), 'dapagliflozin_' + str(i), 'metformin_' + str(i)]\n df_temporal_cat_drug['glucose_treatment_'+ str(i)] = df_temporal_cat_drug[glucose_col].sum(axis=1).astype(int)\n df_temporal_cat_drug.drop(glucose_col, axis=1, inplace=True)\n\n bp_col = ['atenolol_' + str(i),'bisoprolol_' + str(i),'carvedilol_' + str(i),'irbesartan_' + str(i),'labetalol_' + str(i),'losartan_' + str(i),'metoprolol_' + str(i),'nebivolol_' + str(i),'olmesartan_' + str(i),'propranolol_' + str(i),'telmisartan_' + str(i),'valsartan_' + str(i)]\n df_temporal_cat_drug['bp_treatment_'+ str(i)] = df_temporal_cat_drug[bp_col].sum(axis=1).astype(int)\n df_temporal_cat_drug.drop(bp_col, axis=1, inplace=True)\n\n cholesterol_col = ['atorvastatin_' + str(i),'lovastatin_' + str(i),'pitavastatin_' + str(i),'pravastatin_' + str(i),'rosuvastatin_' + str(i),'simvastatin_' + str(i)]\n df_temporal_cat_drug['cholesterol_treatment_'+ str(i)] = df_temporal_cat_drug[cholesterol_col].sum(axis=1).astype(int)\n df_temporal_cat_drug.drop(cholesterol_col, axis=1, inplace=True)\n\ndf_temporal_cat_drug.head()\ndf_temporal_cat_drug.to_csv('CSV/df_temporal_cat_drug.csv', index=False)",
"_____no_output_____"
]
],
[
[
"# Compute GFR\n* CKD-EPI equations",
"_____no_output_____"
]
],
[
[
"def computeGFR(df):\n gender = df['gender'] \n f_constant = 1 \n if gender == 'Male':\n \n k = 0.9\n a = -0.411\n \n else:\n \n k = 0.7\n a = -0.329\n f_constant = 1.018 \n \n \n race = df['race']\n b_constant = 1\n if race == 'Black':\n \n b_constant = 1.159\n \n gfr = 141 * min(df['creatinine'] / k, 1) * (max(df['creatinine'] / k, 1)**(-1.209)) * (0.993**df['age']) * f_constant * b_constant\n \n return gfr",
"_____no_output_____"
]
],
[
[
"## 180-day bin",
"_____no_output_____"
]
],
[
[
"col_gfr = ['id', 'days_bin', 'creatinine', 'race', 'gender', 'age', 'Stage_Progress']\ndf_merged_4_gfr = df_merged_4[col_gfr].copy()\ndf_merged_4_gfr['gfr'] = df_merged_4_gfr.apply(lambda x: computeGFR(x), axis=1)\ndf_merged_4_gfr.drop(['creatinine', 'race', 'gender', 'age'], axis=1, inplace=True)\n# Categorize GFR\ndf_merged_4_gfr['gfr_cat'] = np.where(df_merged_4_gfr['gfr'] < 60, 1, 2)\ndf_merged_4_gfr['gfr_cat'].value_counts()\ndf_merged_4_gfr.to_csv('CSV/df_merged_4_gfr.csv', index=False)",
"_____no_output_____"
],
[
"df_merged_4.head()",
"_____no_output_____"
],
[
"df_merged_4_gfr.head()",
"_____no_output_____"
]
],
[
[
"## Aggregated",
"_____no_output_____"
]
],
[
[
"col_gfr = ['id', 'creatinine', 'race', 'gender', 'age', 'Stage_Progress']\ndf_agg_gfr = df_agg_fixed[col_gfr].copy()\ndf_agg_gfr['gfr'] = df_agg_gfr.apply(lambda x: computeGFR(x), axis=1)\ndf_agg_gfr.drop(['creatinine', 'race', 'gender', 'age'], axis=1, inplace=True)\n# Categorize GFR\ndf_agg_gfr['gfr_cat'] = np.where(df_agg_gfr['gfr'] < 60, 1, 2)\ndf_agg_gfr['gfr_cat'].value_counts()\ndf_agg_gfr.to_csv('CSV/df_agg_gfr.csv', index=False)",
"_____no_output_____"
]
],
[
[
"## Temporal",
"_____no_output_____"
]
],
[
[
"def computeGFR_temporal(df, i):\n gender = df['gender'] \n f_constant = 1 \n if gender == 'Male':\n \n k = 0.9\n a = -0.411\n \n else:\n \n k = 0.7\n a = -0.329\n f_constant = 1.018 \n \n \n race = df['race']\n b_constant = 1\n if race == 'Black':\n \n b_constant = 1.159\n \n gfr = 141 * min(df['creatinine_' + str(i)] / k, 1) * (max(df['creatinine_' + str(i)] / k, 1)**(-1.209)) * (0.993**df['age']) * f_constant * b_constant\n \n return gfr",
"_____no_output_____"
],
[
"col_gfr = ['id', 'creatinine_1', 'creatinine_2', 'creatinine_3', 'creatinine_4', 'race', 'gender', 'age', 'Stage_Progress']\ndf_temporal_gfr = df_temporal[col_gfr].copy()\nfor i in range(1, 5):\n df_temporal_gfr['gfr_' + str(i)] = df_temporal_gfr.apply(lambda x: computeGFR_temporal(x, i), axis=1)\n df_temporal_gfr.drop('creatinine_' + str(i), axis=1, inplace=True)\n\ndf_temporal_gfr.drop(['race', 'gender', 'age'], axis=1, inplace=True)\n# Categorize GFR\nfor i in range(1, 5):\n df_temporal_gfr['gfr_cat_' + str(i)] = np.where(df_temporal_gfr['gfr_' + str(i)] < 60, 1, 2)\n \ndf_temporal_gfr.to_csv('CSV/df_temporal_gfr.csv', index=False)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a111e484db78872632e0ba492b2019f227695cc
| 32,010 |
ipynb
|
Jupyter Notebook
|
sagemaker-fundamentals/create-training-job/create_training_job_vpc.ipynb
|
Amirosimani/amazon-sagemaker-examples
|
bc35e7a9da9e2258e77f98098254c2a8e308041a
|
[
"Apache-2.0"
] | 2,610 |
2020-10-01T14:14:53.000Z
|
2022-03-31T18:02:31.000Z
|
sagemaker-fundamentals/create-training-job/create_training_job_vpc.ipynb
|
Amirosimani/amazon-sagemaker-examples
|
bc35e7a9da9e2258e77f98098254c2a8e308041a
|
[
"Apache-2.0"
] | 1,959 |
2020-09-30T20:22:42.000Z
|
2022-03-31T23:58:37.000Z
|
sagemaker-fundamentals/create-training-job/create_training_job_vpc.ipynb
|
Amirosimani/amazon-sagemaker-examples
|
bc35e7a9da9e2258e77f98098254c2a8e308041a
|
[
"Apache-2.0"
] | 2,052 |
2020-09-30T22:11:46.000Z
|
2022-03-31T23:02:51.000Z
| 39.276074 | 806 | 0.594752 |
[
[
[
"# Training Job in Internet-free Mode\n\nIf you want to isolate your training data and training container from the rest of the Internet, then you should create the training job in a private subnet. A private subnet is a subnet in your VPC without a route to an Internet Gateway. This means, by default, no inbound calls to your container from the Internet is possible and your container cannot make outbound calls to the Internet. If you need the training container to access your S3 resource, you need to **explicitly** add a VPC endpoint and attach it to the route table of your private subnet to allow traffic to your S3 bucket. \n\nIn this notebook, you will walk through an example of creating such a training job. you will\n\n- Build a simple training image\n- Set up a VPC\n- Set up a private subnet in the VPC\n- Set up a security group in the VPC\n- Create a training job in your private subnet && security group and watch it to fail (because it cannot access your S3 resource)\n- Add a VPC endpoint to allow traffic to S3\n- Create another training job in your private subnet and watch it to succeeed \n\nIf you are not familiar with VPC security configuration, the following materials can help you\n- [Security in Amazon Virtual Private Cloud](https://docs.aws.amazon.com/vpc/latest/userguide/security.html)\n- [Training and Inference Containers in Internet-Free Mode](https://docs.aws.amazon.com/sagemaker/latest/dg/mkt-algo-model-internet-free.html)\n\nIt's okay if you don't understand everything from the official docs above. The code samples you will see in this notebook will help you grasp those concepts. ",
"_____no_output_____"
]
],
[
[
"# import libraries\nimport boto3\nimport pprint\nimport datetime\nimport time\n\npp = pprint.PrettyPrinter(indent=1)",
"_____no_output_____"
]
],
[
[
"## Permissions\n\nIf you are running this notebook on an EC2 instance with an IAM user (you) as the default profile, then you will need policies to allow you to create VPC / Subnet / Secruity group / VPC endpoint. Likewise, if you are running this notebook on a SageMaker notebook instance or Studio, the service role needs to have those permission as well. \n",
"_____no_output_____"
],
[
"## Build a training image\n\nYou will follow the same procedure for building a training image as in [this notebook](https://github.com/hsl89/amazon-sagemaker-examples/blob/sagemaker-fundamentals/sagemaker-fundamentals/create-training-job/create_training_job.ipynb). We will refer to this image as `example-image`. Please go through that notebook if you are not familiar with `CreateTrainingJob` API.",
"_____no_output_____"
]
],
[
[
"# create a repo in your ECR\n\necr = boto3.client(\"ecr\")\n\ntry:\n # The repository might already exist\n # in your ECR\n cr_res = ecr.create_repository(repositoryName=\"example-image\")\n pp.pprint(cr_res)\nexcept Exception as e:\n print(e)",
"_____no_output_____"
],
[
"%%sh\n# build the image\ncd container/\n\n# tag it as example-image:latest\ndocker build -t example-image:latest .\n \n# test the container\npython local_test/test_container.py\n\naccount=$(aws sts get-caller-identity --query Account | sed -e 's/^\"//' -e 's/\"$//')\nregion=$(aws configure get region)\necr_account=${account}.dkr.ecr.${region}.amazonaws.com\n\n# Give docker your ECR login password\naws ecr get-login-password --region $region | docker login --username AWS --password-stdin $ecr_account\n\n# Fullname of the repo\nfullname=$ecr_account/example-image:latest\n\n# Tag the image with the fullname\ndocker tag example-image:latest $fullname\n\n# Push to ECR\ndocker push $fullname",
"_____no_output_____"
]
],
[
[
"## Create a VPC\n\nYou can think of Amazon VPC as the traditional network in a data center in the cloud. \n\nThe following are the key concepts for VPCs: \n* Virtual private cloud (VPC) — A virtual network dedicated to your AWS account.\n* Subnet — A range of IP addresses in your VPC.\n* Route table — A set of rules, called routes, that are used to determine where network traffic is directed.\n* Internet gateway — A gateway that you attach to your VPC to enable communication between resources in your VPC and the internet.\n* VPC endpoint — Enables you to privately connect your VPC to supported AWS services and VPC endpoint services powered by PrivateLink without requiring an internet gateway, NAT device, VPN connection, or AWS Direct Connect connection. Instances in your VPC do not require public IP addresses to communicate with resources in the service. Traffic between your VPC and the other service does not leave the Amazon network. For more information, see AWS PrivateLink and VPC endpoints.\n* CIDR block —Classless Inter-Domain Routing. An internet protocol address allocation and route aggregation methodology. For more information, see [Classless Inter-Domain Routing](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation) in Wikipedia.\n\nAll of these concepts are explained in the [official docs](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html). \n",
"_____no_output_____"
]
],
[
[
"# Create a VPC in your default region\n\nec2 = boto3.client(\"ec2\")\n\nvpc_res = ec2.create_vpc(\n CidrBlock=\"10.0.0.0/20\", # 2^(32 - 20) = 4906 private ipv4 addrs\n AmazonProvidedIpv6CidrBlock=False,\n DryRun=False,\n TagSpecifications=[\n {\n \"ResourceType\": \"vpc\",\n \"Tags\": [\n {\"Key\": \"Name\", \"Value\": \"hello-world\"},\n ],\n },\n ],\n)\n\npp.pprint(vpc_res)",
"_____no_output_____"
],
[
"# inspect this VPC in details\n\nvpc_des = ec2.describe_vpcs(VpcIds=[vpc_res[\"Vpc\"][\"VpcId\"]])\npp.pprint(vpc_des[\"Vpcs\"])",
"_____no_output_____"
]
],
[
[
"## Create a subnet\nThe VPC you just created has the capacity to host 4906 compute instances. Think of the VPC as you just created as the entire data center for your organization. Of course, you did not spin up any instances yet, so you are not billed for 4906 instances (rest assured). Suppose you are running a real data center, part of your cluster might be pubic facing (for example, machines that host your frontend applications), part of your cluster might be insulated from the internet and is only accessible from other machines in your data center (for example, your backend or database servers). You can define the scope of your cluster (public / private) via **subnet**. Using subnet, you can define which part of your VPC (via its CIDR block) are public and which part are private. \n\nIf want to run a SageMaker training job in network isolation mode, then you will need to pass a private subnet id to the `CreateTrainingJob` API. SageMaker service will then start instances in the private subnet that run your training container. \n\nSo first off, let's create a private subnet. A subnet is defined within an availability zone, whereas a VPC is defined within a region. ",
"_____no_output_____"
]
],
[
[
"# create subnet and associate it with route table\n\n\ndef get_first_availability_zone():\n region_name = boto3.Session().region_name\n avz_res = ec2.describe_availability_zones(\n Filters=[{\"Name\": \"region-name\", \"Values\": [region_name]}],\n AllAvailabilityZones=True,\n )\n\n for az in avz_res[\"AvailabilityZones\"]:\n if az[\"ZoneType\"] == \"availability-zone\":\n return az\n else:\n return None\n\n\ndef create_subnet(vpc_id, cidr_block, dry_run):\n \"\"\"Create a subnet in the first availability zone in your current region\"\"\"\n az = get_first_availability_zone()\n if az is not None:\n subnet_res = ec2.create_subnet(\n AvailabilityZone=az[\"ZoneName\"], VpcId=vpc_id, CidrBlock=cidr_block, DryRun=dry_run\n )\n return subnet_res\n else:\n raise \"No availability zone\"\n\n\nsn_res = create_subnet(\n vpc_id=vpc_res[\"Vpc\"][\"VpcId\"],\n cidr_block=\"10.0.0.0/28\", # I want 2 ^ (32 - 28) private ipv4 in this subnet\n dry_run=False,\n)\n\npp.pprint(sn_res)",
"_____no_output_____"
]
],
[
[
"## Create a security group\nA [security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) is another layer of security configuration for instances running in your VPC. It acts as a firewall for your instance that controls its inbound and outbound calls. You need a security group for a SageMaker training job, because in complicated training job that involves distributed training, you need a security group configuration that allows traffics between instances that runs the training job. For the purpose of this notebook, the default setting of a security group (deny all inbound traffic; allow all outbound traffic) is enough. For more complicated training job, you will need to configure the security group accordingly. This will be discussed in more advanced notebooks for `CreateTrainingJob`.",
"_____no_output_____"
]
],
[
[
"# create a security group\n\nsg_res = ec2.create_security_group(\n Description=\"security group for SageMaker instances\",\n GroupName=\"sagemaker-private\",\n VpcId=vpc_res[\"Vpc\"][\"VpcId\"],\n TagSpecifications=[\n {\n \"ResourceType\": \"security-group\",\n \"Tags\": [\n {\n \"Key\": \"Service\", # Tag the sec gp by service, this can be used to filter sec gps\n \"Value\": \"SageMaker\",\n }\n ],\n }\n ],\n)\n\npp.pprint(sg_res)",
"_____no_output_____"
],
[
"# inspect the security group in detail\n\nec2.describe_security_groups(GroupIds=[sg_res[\"GroupId\"]])",
"_____no_output_____"
]
],
[
[
"## Creat a training job\nNow let's create a training job within your private subnet you just created. First, let's download some helper functions for creating service role for SageMaker. ",
"_____no_output_____"
]
],
[
[
"%%bash\ncp ../execution-role/iam_helpers.py .",
"_____no_output_____"
],
[
"# set up service role for SageMaker\nfrom iam_helpers import create_execution_role\n\niam = boto3.client(\"iam\")\nsts = boto3.client(\"sts\")\ncaller = sts.get_caller_identity()\n\nif \":user/\" in caller[\"Arn\"]: # as IAM user\n # either paste in a role_arn with or create a new one and attach\n # AmazonSageMakerFullAccess\n role_name = \"example-sm\"\n role_arn = create_execution_role(role_name=role_name)[\"Role\"][\"Arn\"]\n iam.attach_role_policy(\n RoleName=role_name,\n PolicyArn=\"arn:aws:iam::aws:policy/AmazonSageMakerFullAccess\",\n )\nelif \"assumed-role\" in caller[\"Arn\"]: # on SageMaker infra\n role_arn = caller[\"Arn\"]\nelse:\n print(\"I assume you are on an EC2 instance launched with an IAM role\")\n role_arn = caller[\"Arn\"]",
"_____no_output_____"
],
[
"# some helpers\ndef current_time():\n ct = datetime.datetime.now()\n return str(ct.now()).replace(\":\", \"-\").replace(\" \", \"-\")[:19]\n\n\ndef account_id():\n return boto3.client(\"sts\").get_caller_identity()[\"Account\"]",
"_____no_output_____"
]
],
[
[
"To make this notebook self-contained, you will create a bucket and upload some data there to pass to training container as you did in the [basic create training job notebook](https://github.com/hsl89/amazon-sagemaker-examples/blob/sagemaker-fundamentals/sagemaker-fundamentals/create-training-job/create_training_job.ipynb). But you don't have to do so, if you already have a bucket that SageMaker service can access (i.e. a bucket with bucket name containing `sagemaker`, see `AmazonSageMakerFullAccessPolicy`), then you can use that bucket as well. ",
"_____no_output_____"
]
],
[
[
"# create a bucket for SageMaker in your region\n\n\ndef create_bucket():\n \"\"\"Create an S3 bucket that is intended to be used for short term\"\"\"\n bucket = f\"sagemaker-{current_time()}\"\n\n region_name = boto3.Session().region_name\n create_bucket_config = {}\n if region_name != \"us-east-1\":\n # us-east-1 is the default region for S3 bucket\n # specify LocationConstraint if your VPC is not\n # in us-east-1\n create_bucket_config[\"LocationConstraint\"] = region_name\n\n boto3.client(\"s3\").create_bucket(Bucket=bucket, CreateBucketConfiguration=create_bucket_config)\n return bucket\n\n\n# replace it with your own SageMaker-accessible bucket\n# if you don't want to create a new one\n\nbucket = create_bucket()",
"_____no_output_____"
],
[
"# upload some mock data to your bucket\nimport os\n\ns3 = boto3.client(\"s3\")\ninput_prefix = \"input_data\"\n\nfor fname in os.listdir(\"data\"):\n with open(os.path.join(\"data\", fname), \"rb\") as f:\n key = input_prefix + fname\n s3.upload_fileobj(f, bucket, key)",
"_____no_output_____"
]
],
[
[
"Now, you will configure the training job. ",
"_____no_output_____"
]
],
[
[
"sm_cli = boto3.client(\"sagemaker\")\n\n\n# name training job\ntraining_job_name = \"example-training-job-{}\".format(current_time())\n\ndata_path = \"s3://\" + bucket + \"/\" + input_prefix\n\n# location that SageMaker saves the model artifacts\noutput_prefix = \"output/\"\noutput_path = \"s3://\" + bucket + \"/\" + output_prefix\n\n# ECR URI of your image\nregion = boto3.Session().region_name\naccount = account_id()\nimage_uri = \"{}.dkr.ecr.{}.amazonaws.com/example-image:latest\".format(account, region)\n\nalgorithm_specification = {\n \"TrainingImage\": image_uri,\n \"TrainingInputMode\": \"File\",\n}\n\n\ninput_data_config = [\n {\n \"ChannelName\": \"train\",\n \"DataSource\": {\n \"S3DataSource\": {\n \"S3DataType\": \"S3Prefix\",\n \"S3Uri\": data_path,\n \"S3DataDistributionType\": \"FullyReplicated\",\n }\n },\n },\n {\n \"ChannelName\": \"test\",\n \"DataSource\": {\n \"S3DataSource\": {\n \"S3DataType\": \"S3Prefix\",\n \"S3Uri\": data_path,\n \"S3DataDistributionType\": \"FullyReplicated\",\n }\n },\n },\n]\n\n\nvpc_config = {\n # security groups need to be configured to communicate\n # with each other for distributed training job\n \"SecurityGroupIds\": [sg_res[\"GroupId\"]],\n \"Subnets\": [sn_res[\"Subnet\"][\"SubnetId\"]],\n}\n\noutput_data_config = {\"S3OutputPath\": output_path}\n\nresource_config = {\"InstanceType\": \"ml.m5.large\", \"InstanceCount\": 1, \"VolumeSizeInGB\": 5}\n\nstopping_condition = {\n \"MaxRuntimeInSeconds\": 120,\n}\n\nenable_network_isolation = True",
"_____no_output_____"
],
[
"ct_res = sm_cli.create_training_job(\n TrainingJobName=training_job_name,\n AlgorithmSpecification=algorithm_specification,\n RoleArn=role_arn,\n InputDataConfig=input_data_config,\n OutputDataConfig=output_data_config,\n VpcConfig=vpc_config,\n ResourceConfig=resource_config,\n StoppingCondition=stopping_condition,\n EnableNetworkIsolation=enable_network_isolation,\n EnableManagedSpotTraining=False,\n)",
"_____no_output_____"
]
],
[
[
"The training job is expected to fail, because the subnet you created is isolated from the Internet and you have not created any mechanism for it to access your the data in your S3 bucket. ",
"_____no_output_____"
]
],
[
[
"# see the training job to fail\nstopped = False\nwhile not stopped:\n tj_state = sm_cli.describe_training_job(TrainingJobName=training_job_name)\n\n if tj_state[\"TrainingJobStatus\"] in [\"Completed\", \"Stopped\", \"Failed\"]:\n stopped = True\n else:\n print(\"Training in progress\")\n time.sleep(30)\n\nif tj_state[\"TrainingJobStatus\"] == \"Failed\":\n print(\"Training job failed \")\n print(\"Failed Reason: {}\".format(tj_state[\"FailureReason\"]))\nelse:\n print(\"Training job completed\")",
"_____no_output_____"
]
],
[
[
"## Add a VPC endpoint\n\nA VPC endpoint enables you to privately connect your VPC to supported AWS services and VPC endpoint services powered by PrivateLink without requiring an internet gateway, NAT device, VPN connection, or AWS Direct Connect connection. Instances in your VPC do not require public IP addresses to communicate with resources in the service. **Traffic between your VPC and the other service does not leave the Amazon network**. For more information, see [AWS PrivateLink and VPC endpoints](https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-services-overview.html). \n\nThere are three types of VPC endpoints as of March 2021.\n\nA **Gateway** endpoint serves as a target for a route in your route table for traffic destined for the AWS service. You can specify an endpoint policy to attach to the endpoint, which will control access to the service from your VPC. You can also specify the VPC route tables that use the endpoint.\n\nAn **Interface** endpoint is a network interface in your subnet that serves as an endpoint for communicating with the specified service. You can specify the subnets in which to create an endpoint, and the security groups to associate with the endpoint network interface.\n\nA **GatewayLoadBalancer** endpoint is a network interface in your subnet that serves an endpoint for communicating with a Gateway Load Balancer that you've configured as a VPC endpoint service.\n\n---\nOnly Gateway endpoint is a viable option for SageMaker service. So you will add a Gateway endpoint here. A Gateway endpoint needs to be added to a route table, so you will need to create a route table and associated it with your subnet first.",
"_____no_output_____"
]
],
[
[
"# Create a route table\nrt_res = ec2.create_route_table(\n VpcId=vpc_res[\"Vpc\"][\"VpcId\"],\n TagSpecifications=[\n {\"ResourceType\": \"route-table\", \"Tags\": [{\"Key\": \"Service\", \"Value\": \"SageMaker\"}]}\n ],\n)\n\npp.pprint(rt_res)",
"_____no_output_____"
],
[
"# Associate the route table with the subnet\n\nass_rt_res = ec2.associate_route_table(\n RouteTableId=rt_res[\"RouteTable\"][\"RouteTableId\"], SubnetId=sn_res[\"Subnet\"][\"SubnetId\"]\n)\n\npp.pprint(ass_rt_res)",
"_____no_output_____"
]
],
[
[
"Next, let's check service name of S3 bucket. ",
"_____no_output_____"
]
],
[
[
"# Check out service name for S3\nservices = ec2.describe_vpc_endpoint_services()\nfor s in services[\"ServiceNames\"]:\n if \"s3\" in s:\n print(s)",
"_____no_output_____"
],
[
"# Create a gateway endpoint\n\nregion_name = boto3.Session().region_name\n\niep_res = ec2.create_vpc_endpoint(\n VpcEndpointType=\"Gateway\",\n VpcId=vpc_res[\"Vpc\"][\"VpcId\"],\n ServiceName=f\"com.amazonaws.{region_name}.s3\", # return of previous cell\n RouteTableIds=[rt_res[\"RouteTable\"][\"RouteTableId\"]],\n # you don't need to add a tag, it is only\n # used as a convenient way to filter through your\n # endpoints in the future\n TagSpecifications=[\n {\"ResourceType\": \"vpc-endpoint\", \"Tags\": [{\"Key\": \"Service\", \"Value\": \"SageMaker\"}]}\n ],\n)\n\npp.pprint(iep_res)",
"_____no_output_____"
]
],
[
[
"Now you have added a Gateway endpoint to the route table of the subnet. This endpoint allows the subnet to talk to your S3 bucket **privately**. The traffic between the subnet and your S3 bucket does not leave AWS network. Let's create another training job to verify that the training container can access the data in your S3 bucket. ",
"_____no_output_____"
]
],
[
[
"training_job_name = \"example-training-job-{}\".format(current_time())\n\nct_res = sm_cli.create_training_job(\n TrainingJobName=training_job_name,\n AlgorithmSpecification=algorithm_specification,\n RoleArn=role_arn,\n InputDataConfig=input_data_config,\n OutputDataConfig=output_data_config,\n VpcConfig=vpc_config,\n ResourceConfig=resource_config,\n StoppingCondition=stopping_condition,\n EnableNetworkIsolation=enable_network_isolation,\n EnableManagedSpotTraining=False,\n)",
"_____no_output_____"
],
[
"# watch to to succeed\n\nstopped = False\nwhile not stopped:\n tj_state = sm_cli.describe_training_job(TrainingJobName=training_job_name)\n\n if tj_state[\"TrainingJobStatus\"] in [\"Completed\", \"Stopped\", \"Failed\"]:\n stopped = True\n else:\n print(\"Training in progress\")\n time.sleep(30)\n\nif tj_state[\"TrainingJobStatus\"] == \"Failed\":\n print(\"Training job failed \")\n print(\"Failed Reason: {}\".format(tj_state[\"FailureReason\"]))\nelse:\n print(\"Training job completed\")",
"_____no_output_____"
]
],
[
[
"## Review\n\nLet's review what you did in this notebook: you have created \n- a VPC\n- a subnet inside the VPC\n- a security group inside the VPC\n\nThe VPC is isolated from the Internet, because you did not add an Internet Gateway to it. \nYou created a training job in the subnet. The traffic in and out the SageMaker Instance running your training container is controlled by the security group permissions. You verified that this training job failed, because SageMaker cannot download data from your S3 bucket. \n\nNext, you added \n- a route table to your subnet\n- an S3 Gateway Endpoint to the route table\n\nThen you verified that once you added the S3 Gateway Endpoint to your VPC, the same training job can go through. ",
"_____no_output_____"
],
[
"## Practical considerations\nIf you are an ML practioner, then most likely you will not need to touch VPC, because the network admin in your organization should have configured the VPC, subnet, security group, route table and VPC endpoints for you. The reason we discussed VPC configuration in this notebook is to get you familiar with the basic concepts of network engineering, so that when something goes wrong, you can message your network admin with more precise questions or requests. \n\nOne common situation is that your org owns a VPC has has both public and private subnet. You are configuring a SageMaker training job on an EC2 / Notebook Instance / Studio in the public subnet and you want the training job to be executed in the private subnet. In that case, all you need to to is to pass the subnet id and security group id to the `CreateTrainingJob` API and set the `EnableNetworkIsolation` flag to `True`. ",
"_____no_output_____"
],
[
"## Clean up \nNow, let's tear down all resources you created in this notebook. ",
"_____no_output_____"
]
],
[
[
"# delete the entire VPC and its associated resources\n# adapted from https://gist.github.com/alberto-morales/b6d7719763f483185db27289d51f8ec5\n\n\ndef vpc_cleanup(vpcid):\n \"\"\"Remove VPC from AWS\n Set your region/access-key/secret-key from env variables or boto config.\n :param vpcid: id of vpc to delete\n \"\"\"\n if not vpcid:\n return\n print(\"Removing VPC ({}) from AWS\".format(vpcid))\n ec2 = boto3.resource(\"ec2\")\n ec2client = ec2.meta.client\n vpc = ec2.Vpc(vpcid)\n # detach default dhcp_options if associated with the vpc\n dhcp_options_default = ec2.DhcpOptions(\"default\")\n if dhcp_options_default:\n dhcp_options_default.associate_with_vpc(VpcId=vpc.id)\n # detach and delete all gateways associated with the vpc\n for gw in vpc.internet_gateways.all():\n vpc.detach_internet_gateway(InternetGatewayId=gw.id)\n gw.delete()\n\n # delete any instances\n for subnet in vpc.subnets.all():\n for instance in subnet.instances.all():\n instance.terminate()\n\n # delte all subnets\n for subnet in vpc.subnets.all():\n for interface in subnet.network_interfaces.all():\n interface.delete()\n subnet.delete()\n\n # delete all route table associations\n for rt in vpc.route_tables.all():\n for rta in rt.associations:\n if not rta.main:\n rta.delete()\n\n try:\n rt.delete()\n except Exception as e:\n pass\n\n # delete our endpoints\n for ep in ec2client.describe_vpc_endpoints(Filters=[{\"Name\": \"vpc-id\", \"Values\": [vpcid]}])[\n \"VpcEndpoints\"\n ]:\n ec2client.delete_vpc_endpoints(VpcEndpointIds=[ep[\"VpcEndpointId\"]])\n # delete our security groups\n for sg in vpc.security_groups.all():\n if sg.group_name != \"default\":\n sg.delete()\n # delete any vpc peering connections\n for vpcpeer in ec2client.describe_vpc_peering_connections(\n Filters=[{\"Name\": \"requester-vpc-info.vpc-id\", \"Values\": [vpcid]}]\n )[\"VpcPeeringConnections\"]:\n ec2.VpcPeeringConnection(vpcpeer[\"VpcPeeringConnectionId\"]).delete()\n # delete non-default network acls\n for netacl in vpc.network_acls.all():\n if not netacl.is_default:\n netacl.delete()\n\n # finally, delete the vpc\n ec2client.delete_vpc(VpcId=vpcid)\n return\n\n\nvpc_cleanup(vpc_res[\"Vpc\"][\"VpcId\"])",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
4a1127619176793faf1b644da2b932250075193f
| 32,680 |
ipynb
|
Jupyter Notebook
|
marlowe-cli/lectures/05-marlowe-cli-pab.ipynb
|
input-output-hk/marlowe-cardano
|
452632c2bd2c7368e676ef8f176ba2c916afa0f3
|
[
"Apache-2.0"
] | 26 |
2021-11-01T20:55:57.000Z
|
2022-03-31T14:54:08.000Z
|
marlowe-cli/lectures/05-marlowe-cli-pab.ipynb
|
input-output-hk/marlowe-cardano
|
452632c2bd2c7368e676ef8f176ba2c916afa0f3
|
[
"Apache-2.0"
] | 33 |
2021-11-04T23:43:24.000Z
|
2022-03-24T22:54:10.000Z
|
marlowe-cli/lectures/05-marlowe-cli-pab.ipynb
|
input-output-hk/marlowe-cardano
|
452632c2bd2c7368e676ef8f176ba2c916afa0f3
|
[
"Apache-2.0"
] | 9 |
2021-11-01T20:56:09.000Z
|
2022-02-28T03:04:10.000Z
| 36.926554 | 734 | 0.58724 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a1131db876a76820c7b1059b34a6e5e387e2824
| 11,041 |
ipynb
|
Jupyter Notebook
|
notebooks/user/jtrauer/coxs_bazar_check_inputs.ipynb
|
jtrauer/AuTuMN
|
2e1defd0104bbecfe667b8ea5ecaf4bc6741905c
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
notebooks/user/jtrauer/coxs_bazar_check_inputs.ipynb
|
jtrauer/AuTuMN
|
2e1defd0104bbecfe667b8ea5ecaf4bc6741905c
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
notebooks/user/jtrauer/coxs_bazar_check_inputs.ipynb
|
jtrauer/AuTuMN
|
2e1defd0104bbecfe667b8ea5ecaf4bc6741905c
|
[
"BSD-2-Clause-FreeBSD"
] | 1 |
2019-10-22T04:47:34.000Z
|
2019-10-22T04:47:34.000Z
| 35.16242 | 136 | 0.602935 |
[
[
[
"import matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport matplotlib.dates as mdates\nfrom datetime import date\nfrom datetime import datetime\nimport numpy as np\n\nfrom summer.utils import ref_times_to_dti\n\nfrom autumn.tools.inputs.demography.queries import get_population_by_agegroup\nfrom autumn.models.covid_19.detection import create_cdr_function\nfrom autumn.tools.utils.utils import apply_moving_average\nfrom autumn.tools.curve.scale_up import scale_up_function\nfrom autumn.tools.project import get_project\nfrom autumn.settings import Region, Models\nfrom autumn.models.covid_19.constants import AGEGROUP_STRATA, BASE_DATETIME\nfrom autumn.models.covid_19.mixing_matrix.macrodistancing import weight_mobility_data\nfrom autumn.tools.plots.utils import REF_DATE\nfrom autumn.tools import inputs\nfrom autumn.tools.inputs.database import get_input_db\nfrom autumn.tools.utils.display import pretty_print\nfrom autumn.tools.inputs.social_mixing.build_synthetic_matrices import build_synthetic_matrices\nfrom autumn.models.covid_19.detection import get_testing_numbers_for_region",
"_____no_output_____"
],
[
"age_integers = [int(group) for group in AGEGROUP_STRATA]\nmodel = Models.SM_SIR\nregion = Region.COXS_BAZAR",
"_____no_output_____"
]
],
[
[
"## Population",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(1, 1, figsize=(10, 6))\nproject = get_project(model, region)\ntotal_pops = inputs.get_population_by_agegroup(\n AGEGROUP_STRATA, \n project.param_set.baseline[\"country\"][\"iso3\"],\n \"FDMN\", \n year=project.param_set.baseline[\"population\"][\"year\"]\n)\nprint(f\"total modelled population of {region} is: {round(sum(total_pops) / 1e3, 3)} thousand\")\nax.bar(age_integers, total_pops, width=4)\nax.set_title(region)\nax.set_ylabel(\"population\")\nax.set_xlabel(\"starting age of age bracket\")\nfig.suptitle(\"population distribution by age\")",
"_____no_output_____"
]
],
[
[
"## Mobility\n### Mobility is only available at the national level, not for Cox's Bazar or FDMNs",
"_____no_output_____"
]
],
[
[
"print(\"Whether the mobility effects are actually turned on at all:\")\nproject.param_set.baseline[\"is_dynamic_mixing_matrix\"]",
"_____no_output_____"
],
[
"y_upper = 2.\n\n# Collate data together\ninput_db = get_input_db()\nmob_df = input_db.query(\"mobility\", conditions={\"iso3\": \"BGD\"})\ntimes = [datetime.strptime(i, \"%Y-%m-%d\") for i in mob_df[\"date\"]]\ngoogle_mob_df = weight_mobility_data(mob_df, project.param_set.baseline[\"mobility\"][\"google_mobility_locations\"])\n\n# Get plots ready\nmob_fig, mob_axes = plt.subplots(1, 2, figsize=(12, 6))\nplot_left_date = date(2020, 1, 1)\nplot_right_date = times[-1] # Not sure why this is necessary\n\n# Plot raw mobility data\nax = mob_axes[0]\nfor mobility_domain in [\"grocery_and_pharmacy\", \"residential\", \"parks\", \"retail_and_recreation\", \"transit_stations\"]:\n ax.plot(times, mob_df[mobility_domain], label=mobility_domain)\nax.set_ylim((0., y_upper))\nax.tick_params(axis=\"x\", labelrotation=45)\nax.set_title(\"raw Google mobility domains\")\nax.legend(loc=\"lower right\")\nax.set_xlim(left=plot_left_date, right=plot_right_date)\n\n# Plot processed mobility data\nax = mob_axes[1]\nfor location in list(project.param_set.baseline[\"mobility\"][\"google_mobility_locations\"].keys()):\n ax.plot(times, google_mob_df[location], label=location)\nax.tick_params(axis=\"x\", labelrotation=45)\nax.set_ylim((0., y_upper))\nax.legend(loc=\"lower left\")\nax.set_title(\"mobility as implemented in the model\")\nmob_fig.tight_layout(w_pad=1.5, h_pad=3.5)\nax.set_xlim(left=plot_left_date, right=plot_right_date)\n",
"_____no_output_____"
]
],
[
[
"## Mixing matrix\n### Check how mixing matrix is specified for each region",
"_____no_output_____"
]
],
[
[
"print(f\"Modelled country: {project.param_set.baseline['country']['iso3']}\")\nprint(f\"Modelled sub-region: {project.param_set.baseline['population']['region']}\")\nprint(f\"Proxy country: {project.param_set.baseline['ref_mixing_iso3']}\")\nprint(\"Always age-adjusted under SM-SIR code\")",
"_____no_output_____"
]
],
[
[
"### Display the matrix and the matrix components",
"_____no_output_____"
]
],
[
[
"agegroup_types = {\n \"base age groups\": AGEGROUP_STRATA,\n \"modelled age groups\": project.param_set.baseline[\"age_groups\"],\n}\n\nfor title, agegroups in agegroup_types.items():\n\n mixing_matrix = build_synthetic_matrices(\n project.param_set.baseline[\"country\"][\"iso3\"],\n project.param_set.baseline[\"ref_mixing_iso3\"],\n agegroups,\n True,\n project.param_set.baseline[\"population\"][\"region\"]\n )\n\n fig = plt.figure(figsize=(12, 8))\n positions = [1, 2, 3, 5, 6]\n for i_loc, location in zip(positions, mixing_matrix.keys()):\n ax = fig.add_subplot(2, 3, i_loc)\n ax.imshow(\n np.flipud(np.transpose(mixing_matrix[location])), \n cmap=cm.hot, \n vmin=0,\n vmax=mixing_matrix[location].max(), \n origin=\"lower\"\n )\n ax.set_title(location.replace(\"_\", \" \"))\n ax.set_xticks([])\n ax.set_yticks([])\n fig.suptitle(title)",
"_____no_output_____"
]
],
[
[
"## Case detection",
"_____no_output_____"
]
],
[
[
"# Get the CDR function of tests\ncdr_from_tests_func = create_cdr_function(\n project.param_set.baseline[\"testing_to_detection\"][\"assumed_tests_parameter\"],\n project.param_set.baseline[\"testing_to_detection\"][\"assumed_cdr_parameter\"],\n)\n\n# Get the denominator population\ntesting_pops = get_population_by_agegroup(\n project.param_set.baseline[\"age_groups\"],\n project.param_set.baseline[\"country\"][\"iso3\"],\n project.param_set.baseline[\"population\"][\"region\"]\n)\n\n# Process the data\ntest_times, test_values = get_testing_numbers_for_region(\"BGD\", \"FDMN\")\ntest_dates = ref_times_to_dti(BASE_DATETIME, [int(time) for time in test_times])\nper_capita_tests = [i_tests / sum(testing_pops) for i_tests in test_values]\ndummy_tests = np.linspace(0, max(per_capita_tests), 200)\nif project.param_set.baseline[\"testing_to_detection\"][\"assumed_tests_parameter\"]:\n smoothed_per_capita_tests = apply_moving_average(\n per_capita_tests, \n project.param_set.baseline[\"testing_to_detection\"][\"smoothing_period\"]\n )\nelse:\n smoothed_per_capita_tests = per_capita_tests\ncdr_function_of_time = scale_up_function(\n test_times,\n [cdr_from_tests_func(test_rate) for test_rate in smoothed_per_capita_tests],\n smoothness=0.2, method=4, bound_low=0.,\n) \n\n# Plot\nfig, axes = plt.subplots(2, 2, figsize=(12, 8))\nfig.tight_layout(w_pad=1.5, h_pad=5)\n\ndef sort_axis_dates(ax):\n axis.tick_params(axis=\"x\", labelrotation=45)\n axis.set_xlim(left=plot_left_date, right=plot_right_date)\n\n# Plot daily number of tests\naxis = axes[0, 0]\naxis.plot(test_dates, test_values, marker=\"o\")\naxis.set_title(\"daily testing numbers\")\nsort_axis_dates(axis)\n\n# Plot daily number of tests\naxis = axes[0, 1]\naxis.plot(test_dates, per_capita_tests, label=\"raw\")\naxis.plot(test_dates, smoothed_per_capita_tests, label=\"smoothed\")\naxis.set_title(\"daily per capita testing rate\")\nsort_axis_dates(axis)\naxis.legend()\n\n# Plot relationship of daily tests to CDR proportion\naxis = axes[1, 0]\naxis.plot(dummy_tests, cdr_from_tests_func(dummy_tests))\naxis.scatter(per_capita_tests, [cdr_from_tests_func(i_tests) for i_tests in per_capita_tests], color=\"r\")\naxis.set_ylabel(\"case detection proportion\")\naxis.set_xlabel(\"per capita testing rate\")\naxis.set_title(\"daily per capita tests to CDR relationship\")\naxis.set_ylim(top=1.)\n\n# Plot CDR values\naxis = axes[1, 1]\naxis.scatter(test_dates, [cdr_from_tests_func(i_test_rate) for i_test_rate in smoothed_per_capita_tests], color=\"r\")\naxis.plot(test_dates, [cdr_function_of_time(time) for time in test_times])\naxis.set_title(\"Final case detection rate\")\naxis.set_ylabel(\"proportion\")\nsort_axis_dates(axis)\n\nfig.tight_layout()",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a113ad29c1cf241176e0ccd00e573c9a33c40e8
| 100,245 |
ipynb
|
Jupyter Notebook
|
notebooks/.ipynb_checkpoints/run_DA_best_CAE-checkpoint.ipynb
|
scheng1992/Data_Assimilation
|
b4d43895229205ee2cd16b15ee20beccb33b71d6
|
[
"MIT"
] | 1 |
2021-11-25T12:46:48.000Z
|
2021-11-25T12:46:48.000Z
|
notebooks/.ipynb_checkpoints/run_DA_best_CAE-checkpoint.ipynb
|
bugsuse/Data_Assimilation
|
2965ccf78951df11f8686282cd6814bae18afde5
|
[
"MIT"
] | null | null | null |
notebooks/.ipynb_checkpoints/run_DA_best_CAE-checkpoint.ipynb
|
bugsuse/Data_Assimilation
|
2965ccf78951df11f8686282cd6814bae18afde5
|
[
"MIT"
] | 2 |
2021-03-02T13:29:34.000Z
|
2022-03-12T11:01:08.000Z
| 285.598291 | 83,732 | 0.890927 |
[
[
[
"import os\nimport sys\nimport utils\nfrom matplotlib import pyplot as plt\n%matplotlib inline\n\n#import pipeline\nparent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))\nsys.path.append(parent_dir) #to import pipeline\nimport pipeline\n\n\n",
"_____no_output_____"
]
],
[
[
"## Get best model",
"_____no_output_____"
]
],
[
[
"###CHANGE THIS FILE TO THE SUBDIRECTORY OF INTEREST:\nexp_dir_base = \"/data/home/jfm1118/DA/experiments\"\n##############\n\nresults = utils.extract_res_from_files(exp_dir_base)\ndf_res = utils.create_res_df(results, True)\n\n\n#get best model(s)\nnmodels = 6\n\nbest = df_res.loc[df_res[\"valid_loss\"].nsmallest(nmodels).index]\npaths = list(best.path.values)\n\n#plot traing:\nres_best = utils.extract_res_from_files(paths)\nutils.plot_results_loss_epochs(res_best)\n\nbest",
"70 experiments conducted\n6 experiments conducted\n(2, 3)\n"
],
[
"## best model is top middle (?) (final model). Index = 1\nidx = 1 ##UPDATE THIS\nfp_base = res_best[idx][\"path\"]\nprint(os.listdir(fp_base))\nmodel_fp = fp_base + \"/24.pth\" ##UPDATE THIS\nsettings = res_best[idx][\"settings\"]\n",
"['24.pth', 'test.csv', '20.pth', 'train.csv', '15.pth', '0.pth', '10.pth', 'settings.txt', '5.pth']\n"
]
],
[
[
"### Perform DA",
"_____no_output_____"
]
],
[
[
"from pipeline import DAPipeline\n\nsettings.OBS_FRAC = 0.01 #1 % of observations\nsettings.OBS_VARIANCE = 0.01\nsettings.AE_MODEL_FP = model_fp\nda = DAPipeline(settings)\nw_opt = da.Var_DA_routine(settings)\n",
"(2475, 247520)\n"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a114ce0c71ca9d8e81bb8508f87f261bbbebee3
| 140,355 |
ipynb
|
Jupyter Notebook
|
1. ANN/1.1.2 MLP and MNIST.ipynb
|
zhoubaohang/deep-learning-keras-tensorflow
|
0855de36f868e8177819ecdc7f8471c3309be369
|
[
"MIT"
] | null | null | null |
1. ANN/1.1.2 MLP and MNIST.ipynb
|
zhoubaohang/deep-learning-keras-tensorflow
|
0855de36f868e8177819ecdc7f8471c3309be369
|
[
"MIT"
] | null | null | null |
1. ANN/1.1.2 MLP and MNIST.ipynb
|
zhoubaohang/deep-learning-keras-tensorflow
|
0855de36f868e8177819ecdc7f8471c3309be369
|
[
"MIT"
] | null | null | null | 98.563904 | 35,726 | 0.784945 |
[
[
[
"##### (exceprt from Python Machine Learning Essentials, Supplementary Materials)",
"_____no_output_____"
],
[
"## Sections",
"_____no_output_____"
],
[
"- [Classifying handwritten digits](#Classifying-handwritten-digits)\n - [Obtaining the MNIST dataset](#Obtaining-the-MNIST-dataset)\n - [Implementing a multi-layer perceptron](#Implementing-a-multi-layer-perceptron)\n- [Training an artificial neural network](#Training-an-artificial-neural-network)\n- [Debugging neural networks with gradient checking](#Debugging-neural-networks-with-gradient-checking)",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"# Classifying handwritten digits",
"_____no_output_____"
],
[
"## Obtaining the MNIST dataset",
"_____no_output_____"
],
[
"[[back to top](#Sections)]",
"_____no_output_____"
],
[
"The MNIST dataset is publicly available at http://yann.lecun.com/exdb/mnist/ and consists of the following four parts:\n\n- Training set images: train-images-idx3-ubyte.gz (9.9 MB, 47 MB unzipped, 60,000 samples)\n- Training set labels: train-labels-idx1-ubyte.gz (29 KB, 60 KB unzipped, 60,000 labels)\n- Test set images: t10k-images-idx3-ubyte.gz (1.6 MB, 7.8 MB, 10,000 samples)\n- Test set labels: t10k-labels-idx1-ubyte.gz (5 KB, 10 KB unzipped, 10,000 labels)\n\nIn this section, we will only be working with a subset of MNIST, thus, we only need to download the training set images and training set labels. After downloading the files, I recommend unzipping the files using the Unix/Linux gzip tool from the terminal for efficiency, e.g., using the command \n\n gzip *ubyte.gz -d\n \nin your local MNIST download directory, or, using your favorite unzipping tool if you are working with a machine running on Microsoft Windows. The images are stored in byte form, and using the following function, we will read them into NumPy arrays that we will use to train our MLP.\n",
"_____no_output_____"
],
[
"### Get MNIST Dataset\n\n**Note**: The following commands will work on Linux/Unix (e.g. Mac OSX) Platforms",
"_____no_output_____"
]
],
[
[
"!mkdir -p ../data/mnist",
"_____no_output_____"
],
[
"!curl http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz --output ../data/mnist/train-images-idx3-ubyte.gz",
"_____no_output_____"
],
[
"!curl http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz --output ../data/mnist/train-labels-idx1-ubyte.gz",
"_____no_output_____"
],
[
"!curl http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz --output ../data/mnist/t10k-images-idx3-ubyte.gz",
"_____no_output_____"
],
[
"!curl http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz --output ../data/mnist/t10k-labels-idx1-ubyte.gz",
"_____no_output_____"
]
],
[
[
"### Load MNIST Data",
"_____no_output_____"
]
],
[
[
"import os\nimport struct\nimport numpy as np\n \ndef load_mnist(path, kind='train'):\n \"\"\"Load MNIST data from `path`\"\"\"\n labels_path = os.path.join(path, \n '%s-labels-idx1-ubyte' \n % kind)\n images_path = os.path.join(path, \n '%s-images-idx3-ubyte' \n % kind)\n \n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II', \n lbpath.read(8))\n labels = np.fromfile(lbpath, \n dtype=np.uint8)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\", \n imgpath.read(16))\n images = np.fromfile(imgpath, \n dtype=np.uint8).reshape(len(labels), 784)\n \n return images, labels",
"_____no_output_____"
],
[
"X_train, y_train = load_mnist('data/mnist', kind='train')\nprint('Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1]))",
"Rows: 60000, columns: 784\n"
],
[
"X_test, y_test = load_mnist('data/mnist', kind='t10k')\nprint('Rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1]))",
"Rows: 10000, columns: 784\n"
]
],
[
[
"Visualize the first digit of each class:",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline\n\nfig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True,)\nax = ax.flatten()\nfor i in range(10):\n img = X_train[y_train == i][0].reshape(28, 28)\n ax[i].imshow(img, cmap='Greys', interpolation='nearest')\n\nax[0].set_xticks([])\nax[0].set_yticks([])\nplt.tight_layout()\n# plt.savefig('./figures/mnist_all.png', dpi=300)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Visualize 25 different versions of \"7\":",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True,)\nax = ax.flatten()\nfor i in range(25):\n img = X_train[y_train == 7][i].reshape(28, 28)\n ax[i].imshow(img, cmap='Greys', interpolation='nearest')\n\nax[0].set_xticks([])\nax[0].set_yticks([])\nplt.tight_layout()\n# plt.savefig('./figures/mnist_7.png', dpi=300)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Uncomment the following lines to optionally save the data in CSV format. \nHowever, note that those CSV files will take up a substantial amount of storage space:\n\n- train_img.csv 1.1 GB (gigabytes)\n- train_labels.csv 1.4 MB (megabytes)\n- test_img.csv 187.0 MB\n- test_labels 144 KB (kilobytes)\n",
"_____no_output_____"
]
],
[
[
"#np.savetxt('train_img.csv', X_train, fmt='%i', delimiter=',')\n#np.savetxt('train_labels.csv', y_train, fmt='%i', delimiter=',')\nX_train = np.genfromtxt('train_img.csv', dtype=int, delimiter=',')\ny_train = np.genfromtxt('train_labels.csv', dtype=int, delimiter=',')\n\n#np.savetxt('test_img.csv', X_test, fmt='%i', delimiter=',')\n#np.savetxt('test_labels.csv', y_test, fmt='%i', delimiter=',')\nX_test = np.genfromtxt('test_img.csv', dtype=int, delimiter=',')\ny_test = np.genfromtxt('test_labels.csv', dtype=int, delimiter=',')\n",
"_____no_output_____"
]
],
[
[
"<br>\n<br>",
"_____no_output_____"
],
[
"## Implementing a multi-layer perceptron",
"_____no_output_____"
],
[
"[[back to top](#Sections)]",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom scipy.special import expit\nimport sys\n\n\nclass NeuralNetMLP(object):\n \"\"\" Feedforward neural network / Multi-layer perceptron classifier.\n\n Parameters\n ------------\n n_output : int\n Number of output units, should be equal to the\n number of unique class labels.\n\n n_features : int\n Number of features (dimensions) in the target dataset.\n Should be equal to the number of columns in the X array.\n\n n_hidden : int (default: 30)\n Number of hidden units.\n\n l1 : float (default: 0.0)\n Lambda value for L1-regularization.\n No regularization if l1=0.0 (default)\n\n l2 : float (default: 0.0)\n Lambda value for L2-regularization.\n No regularization if l2=0.0 (default)\n\n epochs : int (default: 500)\n Number of passes over the training set.\n\n eta : float (default: 0.001)\n Learning rate.\n\n alpha : float (default: 0.0)\n Momentum constant. Factor multiplied with the\n gradient of the previous epoch t-1 to improve\n learning speed\n w(t) := w(t) - (grad(t) + alpha*grad(t-1))\n \n decrease_const : float (default: 0.0)\n Decrease constant. Shrinks the learning rate\n after each epoch via eta / (1 + epoch*decrease_const)\n\n shuffle : bool (default: False)\n Shuffles training data every epoch if True to prevent circles.\n\n minibatches : int (default: 1)\n Divides training data into k minibatches for efficiency.\n Normal gradient descent learning if k=1 (default).\n\n random_state : int (default: None)\n Set random state for shuffling and initializing the weights.\n\n Attributes\n -----------\n cost_ : list\n Sum of squared errors after each epoch.\n\n \"\"\"\n def __init__(self, n_output, n_features, n_hidden=30,\n l1=0.0, l2=0.0, epochs=500, eta=0.001, \n alpha=0.0, decrease_const=0.0, shuffle=True, \n minibatches=1, random_state=None):\n\n np.random.seed(random_state)\n self.n_output = n_output\n self.n_features = n_features\n self.n_hidden = n_hidden\n self.w1, self.w2 = self._initialize_weights()\n self.l1 = l1\n self.l2 = l2\n self.epochs = epochs\n self.eta = eta\n self.alpha = alpha\n self.decrease_const = decrease_const\n self.shuffle = shuffle\n self.minibatches = minibatches\n\n def _encode_labels(self, y, k):\n \"\"\"Encode labels into one-hot representation\n\n Parameters\n ------------\n y : array, shape = [n_samples]\n Target values.\n\n Returns\n -----------\n onehot : array, shape = (n_labels, n_samples)\n\n \"\"\"\n onehot = np.zeros((k, y.shape[0]))\n for idx, val in enumerate(y):\n onehot[val, idx] = 1.0\n return onehot\n\n def _initialize_weights(self):\n \"\"\"Initialize weights with small random numbers.\"\"\"\n w1 = np.random.uniform(-1.0, 1.0, size=self.n_hidden*(self.n_features + 1))\n w1 = w1.reshape(self.n_hidden, self.n_features + 1)\n w2 = np.random.uniform(-1.0, 1.0, size=self.n_output*(self.n_hidden + 1))\n w2 = w2.reshape(self.n_output, self.n_hidden + 1)\n return w1, w2\n\n def _sigmoid(self, z):\n \"\"\"Compute logistic function (sigmoid)\n\n Uses scipy.special.expit to avoid overflow\n error for very small input values z.\n\n \"\"\"\n # return 1.0 / (1.0 + np.exp(-z))\n return expit(z)\n\n def _sigmoid_gradient(self, z):\n \"\"\"Compute gradient of the logistic function\"\"\"\n sg = self._sigmoid(z)\n return sg * (1 - sg)\n\n def _add_bias_unit(self, X, how='column'):\n \"\"\"Add bias unit (column or row of 1s) to array at index 0\"\"\"\n if how == 'column':\n X_new = np.ones((X.shape[0], X.shape[1]+1))\n X_new[:, 1:] = X\n elif how == 'row':\n X_new = np.ones((X.shape[0]+1, X.shape[1]))\n X_new[1:, :] = X\n else:\n raise AttributeError('`how` must be `column` or `row`')\n return X_new\n\n def _feedforward(self, X, w1, w2):\n \"\"\"Compute feedforward step\n\n Parameters\n -----------\n X : array, shape = [n_samples, n_features]\n Input layer with original features.\n\n w1 : array, shape = [n_hidden_units, n_features]\n Weight matrix for input layer -> hidden layer.\n\n w2 : array, shape = [n_output_units, n_hidden_units]\n Weight matrix for hidden layer -> output layer.\n\n Returns\n ----------\n a1 : array, shape = [n_samples, n_features+1]\n Input values with bias unit.\n\n z2 : array, shape = [n_hidden, n_samples]\n Net input of hidden layer.\n\n a2 : array, shape = [n_hidden+1, n_samples]\n Activation of hidden layer.\n\n z3 : array, shape = [n_output_units, n_samples]\n Net input of output layer.\n\n a3 : array, shape = [n_output_units, n_samples]\n Activation of output layer.\n\n \"\"\"\n a1 = self._add_bias_unit(X, how='column')\n z2 = w1.dot(a1.T)\n a2 = self._sigmoid(z2)\n a2 = self._add_bias_unit(a2, how='row')\n z3 = w2.dot(a2)\n a3 = self._sigmoid(z3)\n return a1, z2, a2, z3, a3\n\n def _L2_reg(self, lambda_, w1, w2):\n \"\"\"Compute L2-regularization cost\"\"\"\n return (lambda_/2.0) * (np.sum(w1[:, 1:] ** 2) + np.sum(w2[:, 1:] ** 2))\n\n def _L1_reg(self, lambda_, w1, w2):\n \"\"\"Compute L1-regularization cost\"\"\"\n return (lambda_/2.0) * (np.abs(w1[:, 1:]).sum() + np.abs(w2[:, 1:]).sum())\n\n def _get_cost(self, y_enc, output, w1, w2):\n \"\"\"Compute cost function.\n\n y_enc : array, shape = (n_labels, n_samples)\n one-hot encoded class labels.\n\n output : array, shape = [n_output_units, n_samples]\n Activation of the output layer (feedforward)\n\n w1 : array, shape = [n_hidden_units, n_features]\n Weight matrix for input layer -> hidden layer.\n\n w2 : array, shape = [n_output_units, n_hidden_units]\n Weight matrix for hidden layer -> output layer.\n\n Returns\n ---------\n cost : float\n Regularized cost.\n\n \"\"\"\n term1 = -y_enc * (np.log(output))\n term2 = (1 - y_enc) * np.log(1 - output)\n cost = np.sum(term1 - term2)\n L1_term = self._L1_reg(self.l1, w1, w2)\n L2_term = self._L2_reg(self.l2, w1, w2)\n cost = cost + L1_term + L2_term\n return cost\n\n def _get_gradient(self, a1, a2, a3, z2, y_enc, w1, w2):\n \"\"\" Compute gradient step using backpropagation.\n\n Parameters\n ------------\n a1 : array, shape = [n_samples, n_features+1]\n Input values with bias unit.\n\n a2 : array, shape = [n_hidden+1, n_samples]\n Activation of hidden layer.\n\n a3 : array, shape = [n_output_units, n_samples]\n Activation of output layer.\n\n z2 : array, shape = [n_hidden, n_samples]\n Net input of hidden layer.\n\n y_enc : array, shape = (n_labels, n_samples)\n one-hot encoded class labels.\n\n w1 : array, shape = [n_hidden_units, n_features]\n Weight matrix for input layer -> hidden layer.\n\n w2 : array, shape = [n_output_units, n_hidden_units]\n Weight matrix for hidden layer -> output layer.\n\n Returns\n ---------\n\n grad1 : array, shape = [n_hidden_units, n_features]\n Gradient of the weight matrix w1.\n\n grad2 : array, shape = [n_output_units, n_hidden_units]\n Gradient of the weight matrix w2.\n\n \"\"\"\n # backpropagation\n sigma3 = a3 - y_enc\n z2 = self._add_bias_unit(z2, how='row')\n sigma2 = w2.T.dot(sigma3) * self._sigmoid_gradient(z2)\n sigma2 = sigma2[1:, :]\n grad1 = sigma2.dot(a1)\n grad2 = sigma3.dot(a2.T)\n\n # regularize\n grad1[:, 1:] += (w1[:, 1:] * (self.l1 + self.l2))\n grad2[:, 1:] += (w2[:, 1:] * (self.l1 + self.l2))\n\n return grad1, grad2\n\n def predict(self, X):\n \"\"\"Predict class labels\n\n Parameters\n -----------\n X : array, shape = [n_samples, n_features]\n Input layer with original features.\n\n Returns:\n ----------\n y_pred : array, shape = [n_samples]\n Predicted class labels.\n\n \"\"\"\n if len(X.shape) != 2:\n raise AttributeError('X must be a [n_samples, n_features] array.\\n'\n 'Use X[:,None] for 1-feature classification,'\n '\\nor X[[i]] for 1-sample classification')\n\n a1, z2, a2, z3, a3 = self._feedforward(X, self.w1, self.w2)\n y_pred = np.argmax(z3, axis=0)\n return y_pred\n\n def fit(self, X, y, print_progress=False):\n \"\"\" Learn weights from training data.\n\n Parameters\n -----------\n X : array, shape = [n_samples, n_features]\n Input layer with original features.\n\n y : array, shape = [n_samples]\n Target class labels.\n\n print_progress : bool (default: False)\n Prints progress as the number of epochs\n to stderr.\n\n Returns:\n ----------\n self\n\n \"\"\"\n self.cost_ = []\n X_data, y_data = X.copy(), y.copy()\n y_enc = self._encode_labels(y, self.n_output)\n\n delta_w1_prev = np.zeros(self.w1.shape)\n delta_w2_prev = np.zeros(self.w2.shape)\n\n for i in range(self.epochs):\n \n # adaptive learning rate\n self.eta /= (1 + self.decrease_const*i)\n\n if print_progress:\n sys.stderr.write('\\rEpoch: %d/%d' % (i+1, self.epochs))\n sys.stderr.flush()\n\n if self.shuffle:\n idx = np.random.permutation(y_data.shape[0])\n X_data, y_data = X_data[idx], y_data[idx]\n\n mini = np.array_split(range(y_data.shape[0]), self.minibatches)\n for idx in mini:\n\n # feedforward\n a1, z2, a2, z3, a3 = self._feedforward(X[idx], self.w1, self.w2)\n cost = self._get_cost(y_enc=y_enc[:, idx],\n output=a3,\n w1=self.w1,\n w2=self.w2)\n self.cost_.append(cost)\n\n # compute gradient via backpropagation\n grad1, grad2 = self._get_gradient(a1=a1, a2=a2,\n a3=a3, z2=z2,\n y_enc=y_enc[:, idx],\n w1=self.w1,\n w2=self.w2)\n\n delta_w1, delta_w2 = self.eta * grad1, self.eta * grad2\n self.w1 -= (delta_w1 + (self.alpha * delta_w1_prev))\n self.w2 -= (delta_w2 + (self.alpha * delta_w2_prev))\n delta_w1_prev, delta_w2_prev = delta_w1, delta_w2\n\n return self",
"_____no_output_____"
]
],
[
[
"<br>\n<br>",
"_____no_output_____"
],
[
"## Training an artificial neural network",
"_____no_output_____"
],
[
"[[back to top](#Sections)]",
"_____no_output_____"
]
],
[
[
"nn = NeuralNetMLP(n_output=10, \n n_features=X_train.shape[1], \n n_hidden=50, \n l2=0.1, \n l1=0.0, \n epochs=1000, \n eta=0.001,\n alpha=0.001,\n decrease_const=0.00001,\n minibatches=50, \n random_state=1)",
"_____no_output_____"
],
[
"nn.fit(X_train, y_train, print_progress=True)",
"Epoch: 1000/1000"
],
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.plot(range(len(nn.cost_)), nn.cost_)\nplt.ylim([0, 2000])\nplt.ylabel('Cost')\nplt.xlabel('Epochs * 50')\nplt.tight_layout()\n# plt.savefig('./figures/cost.png', dpi=300)\nplt.show()",
"_____no_output_____"
],
[
"batches = np.array_split(range(len(nn.cost_)), 1000)\ncost_ary = np.array(nn.cost_)\ncost_avgs = [np.mean(cost_ary[i]) for i in batches]",
"_____no_output_____"
],
[
"plt.plot(range(len(cost_avgs)), cost_avgs, color='red')\nplt.ylim([0, 2000])\nplt.ylabel('Cost')\nplt.xlabel('Epochs')\nplt.tight_layout()\nplt.savefig('./figures/cost2.png', dpi=300)\nplt.show()",
"_____no_output_____"
],
[
"y_train_pred = nn.predict(X_train)\nacc = np.sum(y_train == y_train_pred, axis=0) / X_train.shape[0]\nprint('Training accuracy: %.2f%%' % (acc * 100))",
"Training accuracy: 97.74%\n"
],
[
"y_test_pred = nn.predict(X_test)\nacc = np.sum(y_test == y_test_pred, axis=0) / X_test.shape[0]\nprint('Training accuracy: %.2f%%' % (acc * 100))",
"Training accuracy: 96.18%\n"
],
[
"miscl_img = X_test[y_test != y_test_pred][:25]\ncorrect_lab = y_test[y_test != y_test_pred][:25]\nmiscl_lab= y_test_pred[y_test != y_test_pred][:25]\n\nfig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True,)\nax = ax.flatten()\nfor i in range(25):\n img = miscl_img[i].reshape(28, 28)\n ax[i].imshow(img, cmap='Greys', interpolation='nearest')\n ax[i].set_title('%d) t: %d p: %d' % (i+1, correct_lab[i], miscl_lab[i]))\n\nax[0].set_xticks([])\nax[0].set_yticks([])\nplt.tight_layout()\n# plt.savefig('./figures/mnist_miscl.png', dpi=300)\nplt.show()",
"_____no_output_____"
]
],
[
[
"<br>\n<br>",
"_____no_output_____"
],
[
"# Debugging neural networks with gradient checking",
"_____no_output_____"
],
[
"[[back to top](#Sections)]",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom scipy.special import expit\nimport sys\n\n\nclass MLPGradientCheck(object):\n \"\"\" Feedforward neural network / Multi-layer perceptron classifier.\n\n Parameters\n ------------\n n_output : int\n Number of output units, should be equal to the\n number of unique class labels.\n\n n_features : int\n Number of features (dimensions) in the target dataset.\n Should be equal to the number of columns in the X array.\n\n n_hidden : int (default: 30)\n Number of hidden units.\n\n l1 : float (default: 0.0)\n Lambda value for L1-regularization.\n No regularization if l1=0.0 (default)\n\n l2 : float (default: 0.0)\n Lambda value for L2-regularization.\n No regularization if l2=0.0 (default)\n\n epochs : int (default: 500)\n Number of passes over the training set.\n\n eta : float (default: 0.001)\n Learning rate.\n\n alpha : float (default: 0.0)\n Momentum constant. Factor multiplied with the\n gradient of the previous epoch t-1 to improve\n learning speed\n w(t) := w(t) - (grad(t) + alpha*grad(t-1))\n \n decrease_const : float (default: 0.0)\n Decrease constant. Shrinks the learning rate\n after each epoch via eta / (1 + epoch*decrease_const)\n\n shuffle : bool (default: False)\n Shuffles training data every epoch if True to prevent circles.\n\n minibatches : int (default: 1)\n Divides training data into k minibatches for efficiency.\n Normal gradient descent learning if k=1 (default).\n\n random_state : int (default: None)\n Set random state for shuffling and initializing the weights.\n\n Attributes\n -----------\n cost_ : list\n Sum of squared errors after each epoch.\n\n \"\"\"\n def __init__(self, n_output, n_features, n_hidden=30,\n l1=0.0, l2=0.0, epochs=500, eta=0.001, \n alpha=0.0, decrease_const=0.0, shuffle=True, \n minibatches=1, random_state=None):\n\n np.random.seed(random_state)\n self.n_output = n_output\n self.n_features = n_features\n self.n_hidden = n_hidden\n self.w1, self.w2 = self._initialize_weights()\n self.l1 = l1\n self.l2 = l2\n self.epochs = epochs\n self.eta = eta\n self.alpha = alpha\n self.decrease_const = decrease_const\n self.shuffle = shuffle\n self.minibatches = minibatches\n\n def _encode_labels(self, y, k):\n \"\"\"Encode labels into one-hot representation\n\n Parameters\n ------------\n y : array, shape = [n_samples]\n Target values.\n\n Returns\n -----------\n onehot : array, shape = (n_labels, n_samples)\n\n \"\"\"\n onehot = np.zeros((k, y.shape[0]))\n for idx, val in enumerate(y):\n onehot[val, idx] = 1.0\n return onehot\n\n def _initialize_weights(self):\n \"\"\"Initialize weights with small random numbers.\"\"\"\n w1 = np.random.uniform(-1.0, 1.0, size=self.n_hidden*(self.n_features + 1))\n w1 = w1.reshape(self.n_hidden, self.n_features + 1)\n w2 = np.random.uniform(-1.0, 1.0, size=self.n_output*(self.n_hidden + 1))\n w2 = w2.reshape(self.n_output, self.n_hidden + 1)\n return w1, w2\n\n def _sigmoid(self, z):\n \"\"\"Compute logistic function (sigmoid)\n\n Uses scipy.special.expit to avoid overflow\n error for very small input values z.\n\n \"\"\"\n # return 1.0 / (1.0 + np.exp(-z))\n return expit(z)\n\n def _sigmoid_gradient(self, z):\n \"\"\"Compute gradient of the logistic function\"\"\"\n sg = self._sigmoid(z)\n return sg * (1 - sg)\n\n def _add_bias_unit(self, X, how='column'):\n \"\"\"Add bias unit (column or row of 1s) to array at index 0\"\"\"\n if how == 'column':\n X_new = np.ones((X.shape[0], X.shape[1]+1))\n X_new[:, 1:] = X\n elif how == 'row':\n X_new = np.ones((X.shape[0]+1, X.shape[1]))\n X_new[1:, :] = X\n else:\n raise AttributeError('`how` must be `column` or `row`')\n return X_new\n\n def _feedforward(self, X, w1, w2):\n \"\"\"Compute feedforward step\n\n Parameters\n -----------\n X : array, shape = [n_samples, n_features]\n Input layer with original features.\n\n w1 : array, shape = [n_hidden_units, n_features]\n Weight matrix for input layer -> hidden layer.\n\n w2 : array, shape = [n_output_units, n_hidden_units]\n Weight matrix for hidden layer -> output layer.\n\n Returns\n ----------\n a1 : array, shape = [n_samples, n_features+1]\n Input values with bias unit.\n\n z2 : array, shape = [n_hidden, n_samples]\n Net input of hidden layer.\n\n a2 : array, shape = [n_hidden+1, n_samples]\n Activation of hidden layer.\n\n z3 : array, shape = [n_output_units, n_samples]\n Net input of output layer.\n\n a3 : array, shape = [n_output_units, n_samples]\n Activation of output layer.\n\n \"\"\"\n a1 = self._add_bias_unit(X, how='column')\n z2 = w1.dot(a1.T)\n a2 = self._sigmoid(z2)\n a2 = self._add_bias_unit(a2, how='row')\n z3 = w2.dot(a2)\n a3 = self._sigmoid(z3)\n return a1, z2, a2, z3, a3\n\n def _L2_reg(self, lambda_, w1, w2):\n \"\"\"Compute L2-regularization cost\"\"\"\n return (lambda_/2.0) * (np.sum(w1[:, 1:] ** 2) + np.sum(w2[:, 1:] ** 2))\n\n def _L1_reg(self, lambda_, w1, w2):\n \"\"\"Compute L1-regularization cost\"\"\"\n return (lambda_/2.0) * (np.abs(w1[:, 1:]).sum() + np.abs(w2[:, 1:]).sum())\n\n def _get_cost(self, y_enc, output, w1, w2):\n \"\"\"Compute cost function.\n\n y_enc : array, shape = (n_labels, n_samples)\n one-hot encoded class labels.\n\n output : array, shape = [n_output_units, n_samples]\n Activation of the output layer (feedforward)\n\n w1 : array, shape = [n_hidden_units, n_features]\n Weight matrix for input layer -> hidden layer.\n\n w2 : array, shape = [n_output_units, n_hidden_units]\n Weight matrix for hidden layer -> output layer.\n\n Returns\n ---------\n cost : float\n Regularized cost.\n\n \"\"\"\n term1 = -y_enc * (np.log(output))\n term2 = (1 - y_enc) * np.log(1 - output)\n cost = np.sum(term1 - term2)\n L1_term = self._L1_reg(self.l1, w1, w2)\n L2_term = self._L2_reg(self.l2, w1, w2)\n cost = cost + L1_term + L2_term\n return cost\n\n def _get_gradient(self, a1, a2, a3, z2, y_enc, w1, w2):\n \"\"\" Compute gradient step using backpropagation.\n\n Parameters\n ------------\n a1 : array, shape = [n_samples, n_features+1]\n Input values with bias unit.\n\n a2 : array, shape = [n_hidden+1, n_samples]\n Activation of hidden layer.\n\n a3 : array, shape = [n_output_units, n_samples]\n Activation of output layer.\n\n z2 : array, shape = [n_hidden, n_samples]\n Net input of hidden layer.\n\n y_enc : array, shape = (n_labels, n_samples)\n one-hot encoded class labels.\n\n w1 : array, shape = [n_hidden_units, n_features]\n Weight matrix for input layer -> hidden layer.\n\n w2 : array, shape = [n_output_units, n_hidden_units]\n Weight matrix for hidden layer -> output layer.\n\n Returns\n ---------\n\n grad1 : array, shape = [n_hidden_units, n_features]\n Gradient of the weight matrix w1.\n\n grad2 : array, shape = [n_output_units, n_hidden_units]\n Gradient of the weight matrix w2.\n\n \"\"\"\n # backpropagation\n sigma3 = a3 - y_enc\n z2 = self._add_bias_unit(z2, how='row')\n sigma2 = w2.T.dot(sigma3) * self._sigmoid_gradient(z2)\n sigma2 = sigma2[1:, :]\n grad1 = sigma2.dot(a1)\n grad2 = sigma3.dot(a2.T)\n\n # regularize\n grad1[:, 1:] += (w1[:, 1:] * (self.l1 + self.l2))\n grad2[:, 1:] += (w2[:, 1:] * (self.l1 + self.l2))\n\n return grad1, grad2\n\n def _gradient_checking(self, X, y_enc, w1, w2, epsilon, grad1, grad2):\n \"\"\" Apply gradient checking (for debugging only)\n\n Returns\n ---------\n relative_error : float\n Relative error between the numerically\n approximated gradients and the backpropagated gradients.\n\n \"\"\"\n num_grad1 = np.zeros(np.shape(w1))\n epsilon_ary1 = np.zeros(np.shape(w1))\n for i in range(w1.shape[0]):\n for j in range(w1.shape[1]):\n epsilon_ary1[i, j] = epsilon\n a1, z2, a2, z3, a3 = self._feedforward(X, w1 - epsilon_ary1, w2)\n cost1 = self._get_cost(y_enc, a3, w1-epsilon_ary1, w2)\n a1, z2, a2, z3, a3 = self._feedforward(X, w1 + epsilon_ary1, w2)\n cost2 = self._get_cost(y_enc, a3, w1 + epsilon_ary1, w2)\n num_grad1[i, j] = (cost2 - cost1) / (2 * epsilon)\n epsilon_ary1[i, j] = 0\n\n num_grad2 = np.zeros(np.shape(w2))\n epsilon_ary2 = np.zeros(np.shape(w2))\n for i in range(w2.shape[0]):\n for j in range(w2.shape[1]):\n epsilon_ary2[i, j] = epsilon\n a1, z2, a2, z3, a3 = self._feedforward(X, w1, w2 - epsilon_ary2)\n cost1 = self._get_cost(y_enc, a3, w1, w2 - epsilon_ary2)\n a1, z2, a2, z3, a3 = self._feedforward(X, w1, w2 + epsilon_ary2)\n cost2 = self._get_cost(y_enc, a3, w1, w2 + epsilon_ary2)\n num_grad2[i, j] = (cost2 - cost1) / (2 * epsilon)\n epsilon_ary2[i, j] = 0\n\n num_grad = np.hstack((num_grad1.flatten(), num_grad2.flatten()))\n grad = np.hstack((grad1.flatten(), grad2.flatten()))\n norm1 = np.linalg.norm(num_grad - grad)\n norm2 = np.linalg.norm(num_grad)\n norm3 = np.linalg.norm(grad)\n relative_error = norm1 / (norm2 + norm3)\n return relative_error\n\n def predict(self, X):\n \"\"\"Predict class labels\n\n Parameters\n -----------\n X : array, shape = [n_samples, n_features]\n Input layer with original features.\n\n Returns:\n ----------\n y_pred : array, shape = [n_samples]\n Predicted class labels.\n\n \"\"\"\n if len(X.shape) != 2:\n raise AttributeError('X must be a [n_samples, n_features] array.\\n'\n 'Use X[:,None] for 1-feature classification,'\n '\\nor X[[i]] for 1-sample classification')\n\n a1, z2, a2, z3, a3 = self._feedforward(X, self.w1, self.w2)\n y_pred = np.argmax(z3, axis=0)\n return y_pred\n\n def fit(self, X, y, print_progress=False):\n \"\"\" Learn weights from training data.\n\n Parameters\n -----------\n X : array, shape = [n_samples, n_features]\n Input layer with original features.\n\n y : array, shape = [n_samples]\n Target class labels.\n\n print_progress : bool (default: False)\n Prints progress as the number of epochs\n to stderr.\n\n Returns:\n ----------\n self\n\n \"\"\"\n self.cost_ = []\n X_data, y_data = X.copy(), y.copy()\n y_enc = self._encode_labels(y, self.n_output)\n\n delta_w1_prev = np.zeros(self.w1.shape)\n delta_w2_prev = np.zeros(self.w2.shape)\n\n for i in range(self.epochs):\n \n # adaptive learning rate\n self.eta /= (1 + self.decrease_const*i)\n\n if print_progress:\n sys.stderr.write('\\rEpoch: %d/%d' % (i+1, self.epochs))\n sys.stderr.flush()\n\n if self.shuffle:\n idx = np.random.permutation(y_data.shape[0])\n X_data, y_data = X_data[idx], y_data[idx]\n\n mini = np.array_split(range(y_data.shape[0]), self.minibatches)\n for idx in mini:\n\n # feedforward\n a1, z2, a2, z3, a3 = self._feedforward(X[idx], self.w1, self.w2)\n cost = self._get_cost(y_enc=y_enc[:, idx],\n output=a3,\n w1=self.w1,\n w2=self.w2)\n self.cost_.append(cost)\n\n # compute gradient via backpropagation\n grad1, grad2 = self._get_gradient(a1=a1, a2=a2,\n a3=a3, z2=z2,\n y_enc=y_enc[:, idx],\n w1=self.w1,\n w2=self.w2)\n \n ## start gradient checking\n grad_diff = self._gradient_checking(X=X[idx], y_enc=y_enc[:, idx],\n w1=self.w1, w2=self.w2,\n epsilon=1e-5,\n grad1=grad1, grad2=grad2)\n \n if grad_diff <= 1e-7:\n print('Ok: %s' % grad_diff)\n elif grad_diff <= 1e-4:\n print('Warning: %s' % grad_diff)\n else:\n print('PROBLEM: %s' % grad_diff)\n\n # update weights; [alpha * delta_w_prev] for momentum learning\n delta_w1, delta_w2 = self.eta * grad1, self.eta * grad2\n self.w1 -= (delta_w1 + (self.alpha * delta_w1_prev))\n self.w2 -= (delta_w2 + (self.alpha * delta_w2_prev))\n delta_w1_prev, delta_w2_prev = delta_w1, delta_w2\n\n return self",
"_____no_output_____"
],
[
"nn_check = MLPGradientCheck(n_output=10, \n n_features=X_train.shape[1], \n n_hidden=10, \n l2=0.0, \n l1=0.0, \n epochs=10, \n eta=0.001,\n alpha=0.0,\n decrease_const=0.0,\n minibatches=1, \n random_state=1)",
"_____no_output_____"
],
[
"nn_check.fit(X_train[:5], y_train[:5], print_progress=False)",
"Ok: 2.56712936241e-10\nOk: 2.94603251069e-10\nOk: 2.37615620231e-10\nOk: 2.43469423226e-10\nOk: 3.37872073158e-10\nOk: 3.63466384861e-10\nOk: 2.22472120785e-10\nOk: 2.33163708438e-10\nOk: 3.44653686551e-10\nOk: 2.17161707211e-10\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a114fe51cae4ac51f0004a068c1117fceab4b7d
| 2,696 |
ipynb
|
Jupyter Notebook
|
cc_cet/hst_ddt/notes.ipynb
|
davidjwilson/pceb
|
259cf4b18b51b7163d6ce84ab150c5f65f8cfdec
|
[
"MIT"
] | null | null | null |
cc_cet/hst_ddt/notes.ipynb
|
davidjwilson/pceb
|
259cf4b18b51b7163d6ce84ab150c5f65f8cfdec
|
[
"MIT"
] | null | null | null |
cc_cet/hst_ddt/notes.ipynb
|
davidjwilson/pceb
|
259cf4b18b51b7163d6ce84ab150c5f65f8cfdec
|
[
"MIT"
] | null | null | null | 18.340136 | 62 | 0.501113 |
[
[
[
"Notes for an hst ddt proposal",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport astropy.io.fits as fits\nimport os\nimport glob\nfrom astropy.table import Table\nfrom astropy.io import ascii\nimport astropy.units as u\nimport astropy.constants as const\nfrom astropy.convolution import convolve, Box1DKernel\nfrom scipy.optimize import leastsq\nfrom scipy.interpolate import interp1d\nfrom astropy.modeling import models, fitting\n\n#matplotlib set up\n%matplotlib inline\nfrom matplotlib import rcParams\nrcParams[\"figure.figsize\"] = (14, 5)\nrcParams[\"font.size\"] = 20",
"_____no_output_____"
],
[
"1800*5 ",
"_____no_output_____"
],
[
"5*1.5",
"_____no_output_____"
],
[
"52*60",
"_____no_output_____"
],
[
"2200*u.s.to(u.min)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a115a1fc09aacb4db7a169dc4e24986460279c7
| 96,441 |
ipynb
|
Jupyter Notebook
|
EDA/Notebooks/Titanic EDA Chennai.ipynb
|
Aujasvi-Moudgil/EDA-Miscellaneous-Data
|
7c0fbd745918fec316fda2e5a0392a5f63fec24a
|
[
"MIT"
] | null | null | null |
EDA/Notebooks/Titanic EDA Chennai.ipynb
|
Aujasvi-Moudgil/EDA-Miscellaneous-Data
|
7c0fbd745918fec316fda2e5a0392a5f63fec24a
|
[
"MIT"
] | null | null | null |
EDA/Notebooks/Titanic EDA Chennai.ipynb
|
Aujasvi-Moudgil/EDA-Miscellaneous-Data
|
7c0fbd745918fec316fda2e5a0392a5f63fec24a
|
[
"MIT"
] | null | null | null | 54.702779 | 26,172 | 0.638484 |
[
[
[
"import pandas as pd\nimport numpy as np\nfrom pandas import Series, DataFrame\nimport matplotlib.pyplot as plt\nimport seaborn as sns",
"_____no_output_____"
],
[
"t = sns.load_dataset(\"titanic\") ",
"_____no_output_____"
],
[
"t.head()",
"_____no_output_____"
],
[
"t.describe()",
"_____no_output_____"
],
[
"df1 = DataFrame(t.dtypes)\ndf1",
"_____no_output_____"
],
[
"# identify missing values of the 11 columns,add the stats to the datadict\ndf1['MissingVal'] = t.isnull().sum()\ndf1",
"_____no_output_____"
],
[
"# Identify number of unique values, For object nunique will the number of levels\n# Add the stats the data dict\ndf1['NUnique']=t.nunique()\ndf1",
"_____no_output_____"
],
[
"# Identify the count for each variable, add the stats to datadict\ndf1['Count']=t.count()\ndf1",
"_____no_output_____"
],
[
"# rename the 0 column\ndf1 = df1.rename(columns={0:'DataType'})\ndf1",
"_____no_output_____"
],
[
"# get discripte statistcs on \"object\" datatypes\nt.describe(include=['object'])",
"_____no_output_____"
],
[
"# get discriptive statistcs on \"number\" datatypes\nt.describe(include=['number'])",
"_____no_output_____"
],
[
"# get discripte statistcs on \"int\" datatypes\nt.describe(include=['int'])",
"_____no_output_____"
],
[
"t.alive.value_counts(normalize=True)",
"_____no_output_____"
]
],
[
[
"only 38% of the passengers were survived, where as a majority 61% the passenger did not survive the disaster",
"_____no_output_____"
]
],
[
[
"t.columns",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(2, 3, figsize=(16, 10))\nsns.countplot('survived',data=t,ax=axes[0,0])\nsns.countplot('pclass',data=t,ax=axes[0,1])\nsns.countplot('sex',data=t,ax=axes[0,2])\nsns.countplot('sibsp',data=t,ax=axes[1,0])\nsns.countplot('parch',data=t,ax=axes[1,1])\nsns.countplot('embarked',data=t,ax=axes[1,2])",
"_____no_output_____"
]
],
[
[
"1) We can clearly see that male survial rates is around 20% where as female survial rate is about 75% which suggests that gender has a strong relationship with the survival rates.\n\n2) There is also a clear relationship between Pclass and the survival by referring to first plot below. Passengers on Pclass1 had a better survial rate of approx 60% whereas passengers on pclass3 had the worst survial rate of approx 22%",
"_____no_output_____"
]
],
[
[
"sns.boxplot(x=\"survived\", y=\"age\", data=t)",
"_____no_output_____"
],
[
"t.fare.mean()",
"_____no_output_____"
],
[
"t.fare.median()",
"_____no_output_____"
],
[
"# impute the missing Fare values with the mean Fare value\nt.fare.fillna(t.fare.mean(),inplace=True)",
"_____no_output_____"
],
[
"t.isnull().sum() #Check for the nulls in Fare",
"_____no_output_____"
],
[
"print(\"Skewness: %f\" % t['fare'].skew())",
"Skewness: 4.787317\n"
],
[
"print(\"Kurtosis: %f\" % t['fare'].kurt())",
"Kurtosis: 33.398141\n"
],
[
"sns.distplot(t['fare'],bins=100);",
"/Users/aniruddhakalbande/anaconda2/lib/python2.7/site-packages/matplotlib/axes/_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n"
],
[
"g = sns.catplot(x=\"who\", y=\"survived\", col=\"class\",data=titanic, kind=\"bar\",saturation=.5, ci=None, aspect=.6)",
"_____no_output_____"
],
[
"ser1=Series({'a':1,'b':2,'c':3})",
"_____no_output_____"
],
[
"ser1",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"raw",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"raw"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a1164a6202de79d2775ebc2cb3df138832e7fe2
| 33,620 |
ipynb
|
Jupyter Notebook
|
00_starter.ipynb
|
iyaja/ought
|
0b75b6237584f249f70a9e7e6686a3bf7aaae717
|
[
"Apache-2.0"
] | null | null | null |
00_starter.ipynb
|
iyaja/ought
|
0b75b6237584f249f70a9e7e6686a3bf7aaae717
|
[
"Apache-2.0"
] | null | null | null |
00_starter.ipynb
|
iyaja/ought
|
0b75b6237584f249f70a9e7e6686a3bf7aaae717
|
[
"Apache-2.0"
] | null | null | null | 49.368576 | 1,623 | 0.611749 |
[
[
[
"# default_exp starter",
"_____no_output_____"
]
],
[
[
"# Starter Code\n\n> Utility functions for binary classification on scientific paper abstracts using GPT-2",
"_____no_output_____"
]
],
[
[
"#hide\nfrom nbdev.showdoc import *",
"_____no_output_____"
]
],
[
[
"## GPT-2 Prompt Manipulation",
"_____no_output_____"
],
[
"The simplest approach is to to feed in a small number of trainging examples into the prompt directly for zero-shot classification.",
"_____no_output_____"
],
[
"### Setup",
"_____no_output_____"
]
],
[
[
"#export\nfrom transformers import GPT2Tokenizer, GPT2LMHeadModel\nimport torch\nimport json\nimport pandas as pd",
"_____no_output_____"
],
[
"tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\ntokenizer.pad_token = tokenizer.eos_token\nmodel = GPT2LMHeadModel.from_pretrained('gpt2')\nmodel.eval().cuda()",
"_____no_output_____"
]
],
[
[
"### Utilities",
"_____no_output_____"
]
],
[
[
"#export\ndef generate(prompt, max_length=5, stop_token=None):\n input_ids = tokenizer.encode(prompt, return_tensors=\"pt\")\n generated_text_ids = model.generate(input_ids=input_ids.cuda(), max_length=max_length+len(input_ids[0]), do_sample=False)\n generated_text = tokenizer.decode(generated_text_ids[0], clean_up_tokenization_spaces=True)\n post_prompt_text = generated_text[len(tokenizer.decode(input_ids[0], clean_up_tokenization_spaces=True)):]\n return prompt + post_prompt_text[:post_prompt_text.find(stop_token) if stop_token else None]",
"_____no_output_____"
],
[
"# Note that the logits are shifted over 1 to the left, since HuggingFace doesn't give a logit for the first token\ndef get_logits_and_tokens(text):\n input_ids = tokenizer.encode(text, return_tensors=\"pt\")\n tokens = [tokenizer.decode([input_id]) for input_id in input_ids[0]]\n output = model(input_ids.cuda())\n return output.logits[0][:-1], tokens",
"_____no_output_____"
],
[
"EXAMPLE_PROMPT = \"\"\"Horrible: negative\nGreat: positive\nBad:\"\"\"\n\ngenerated_text = generate(EXAMPLE_PROMPT, stop_token=\"\\n\")\ngenerated_text",
"Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
],
[
"logits, tokens = get_logits_and_tokens(generated_text)\nlast_token_probs = torch.softmax(logits[-1], dim=0)\nnegative_prob = last_token_probs[tokenizer.encode(\" negative\")[0]]\npositive_prob = last_token_probs[tokenizer.encode(\" positive\")[0]]\n\nprint(f\"tokens: {tokens}\\nnegative prob: {negative_prob}\\npositive prob: {positive_prob}\")",
"tokens: ['Hor', 'rible', ':', ' negative', '\\n', 'Great', ':', ' positive', '\\n', 'Bad', ':', ' negative']\nnegative prob: 0.7252255082130432\npositive prob: 0.11788686364889145\n"
]
],
[
[
"### Load Data",
"_____no_output_____"
],
[
"Define helper function to load text from `.jsonl` files.",
"_____no_output_____"
]
],
[
[
"#export\ndef load_jsonl(filename):\n f = open(filename)\n return [json.loads(line) for line in f.read().splitlines()]",
"_____no_output_____"
],
[
"train_examples = load_jsonl(\"data/train.jsonl\")\ntrain_examples[-1]",
"_____no_output_____"
],
[
"#export\ndef render_example(example):\n title = example[\"text\"].split(\".\")[0].strip()\n abstract = example[\"text\"][len(title)+1:].strip()\n return f'TITLE: {title}\\nABSTRACT: {abstract}\\nLABEL: {\"AI\" if example[\"label\"] == \"True\" else \"NOT AI\"}'",
"_____no_output_____"
],
[
"#export\ndef render_end_example(example):\n title = example[\"text\"].split(\".\")[0].strip()\n abstract = example[\"text\"][len(title)+1:].strip()\n return f\"TITLE: {title}\\nABSTRACT: {abstract}\\nLABEL:\"",
"_____no_output_____"
],
[
"#export\ndef make_prompt(instructions, train_examples, end_example):\n rendered_train_examples = \"\\n\\n--\\n\\n\".join([render_example(example) for example in train_examples])\n return f\"\"\"{instructions}\n\n{rendered_train_examples}\n\n--\n\n{render_end_example(end_example)}\"\"\"",
"_____no_output_____"
],
[
"INSTRUCTIONS = \"Classify the following examples based on whether they are AI-relevant or not:\"\n\nprompt = make_prompt(INSTRUCTIONS, train_examples[:4], train_examples[4])\nprint(prompt)",
"Classify the following examples based on whether they are AI-relevant or not:\n\nTITLE: thermodynamic analysis of quantum error correcting engines\nABSTRACT: quantum error correcting codes can be cast in a way which is strikingly similar to a quantum heat engine undergoing an otto cycle. in this paper we strengthen this connection further by carrying out a complete assessment of the thermodynamic properties of strokes operator based error correcting codes. this includes an expression for the entropy production in the cycle which as we show contains clear contributions stemming from the different sources of irreversibility. to illustrate our results we study a classical qubit error correcting code well suited for incoherent states and the qubit shor code capable of handling fully quantum states. we show that the work cost associated with the correction gate is directly associated with the heat introduced by the error. moreover the work cost associated with encoding decoding quantum information is always positive a fact which is related to the intrinsic irreversibility introduced by the noise. finally we find that correcting the coherent and thus genuinely quantum part of a quantum state introduces substantial modifications related to the hadamard gates required to encode and decode coherences.\nLABEL: NOT AI\n\n--\n\nTITLE: nlo qcd corrections to wzjj production at the lhc\nABSTRACT: we present a summary of the first calculation of nlo qcd corrections to wzjj production with leptonic decays at the lhc. our results show that the next to leading order corrections reduce significantly the scale uncertainties.\nLABEL: NOT AI\n\n--\n\nTITLE: asymptotics for lipschitz percolation above tilted planes\nABSTRACT: we consider lipschitz percolation in dimensions above planes tilted by an angle along one or several coordinate axes. in particular we are interested in the asymptotics of the critical probability as as well as our principal results show that the convergence of the critical probability to is polynomial as and in addition we identify the correct order of this polynomial convergence and in we also obtain the correct prefactor.\nLABEL: NOT AI\n\n--\n\nTITLE: the colored jones polynomials for bridge links\nABSTRACT: kuperberg introduced web spaces for some lie algebras which are generalizations of the kauffman bracket skein module on a disk with marked points. we derive some formulas for and clasped web spaces by graphical calculus using skein theory. these formulas are colored version of skein relations twist formulas and bubble skein expansion formulas. we calculate the and colored jones polynomials of bridge knots and links explicitly using twist formulas.\nLABEL: NOT AI\n\n--\n\nTITLE: population mixtures and searches of lensed and extended quasars across photometric surveys\nABSTRACT: wide field photometric surveys enable searches of rare yet interesting objects such as strongly lensed quasars or quasars with a bright host galaxy. past searches for lensed quasars based on their optical and near infrared properties have relied on photometric cuts and spectroscopic pre selection as in the sloan quasar lens search or neural networks applied to photometric samples. these methods rely on cuts in morphology and colours with the risk of losing many interesting objects due to scatter in their population properties restrictive training sets systematic uncertainties in catalog based magnitudes and survey to survey photometric variations. here we explore the performance of a gaussian mixture model to separate point like quasars quasars with an extended host and strongly lensed quasars using griz psf and model magnitudes and wise w1 w2. the choice of optical magnitudes is due to their presence in all current and upcoming releases of wide field surveys whereas uv information is not always available. we then assess the contamination from blue galaxies and the role of additional features such as w3 magnitudes or psf model terms as morphological information. as a demonstration we conduct a search in a random of the sdss footprint and we provide the catalog of the sdss object with the highest `lens score in our selection that survive visual inspection and are spectroscopically confirmed to host active nuclei. we inspect archival data and find images of objects in the hubble legacy archive including known lenses. the code and materials are available to facilitate follow up.\nLABEL:\n"
],
[
"generated_text = generate(prompt, stop_token=\"\\n\")\nprint(generated_text)",
"Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
]
],
[
[
"## Extra Helper Function",
"_____no_output_____"
],
[
"These are some extra utility functions that were not defined in the original starter code, but were still useful.",
"_____no_output_____"
]
],
[
[
"train = load_jsonl(\"data/train.jsonl\")\ndf = pd.DataFrame(train)",
"_____no_output_____"
]
],
[
[
"Collect some random samples across classes. This should be flexible enough to generalize beyong the `'AI'` and `'Not AI'` labels.",
"_____no_output_____"
]
],
[
[
"df.label.unique()",
"_____no_output_____"
],
[
"samples_per_label = 2\nsamples = []\nfor label in df['label'].unique():\n group = df[df.label == label]\n idxs = group.index[:samples_per_label]\n samples += [train[idx] for idx in idxs]\nsamples",
"_____no_output_____"
],
[
"#export\ndef uniform_samples(json='data/train.jsonl', n_samples=2):\n superset = load_jsonl(json)\n df = pd.DataFrame(superset)\n samples = []\n for label in df['label'].unique():\n group = df[df.label == label]\n idxs = group.index[:n_samples]\n samples += [superset[idx] for idx in idxs]\n return samples",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a117eb18763b906be3949d4d36f5c834e48d18a
| 36,280 |
ipynb
|
Jupyter Notebook
|
1. Beginner/Pytorch7_2_RNN_Basic.ipynb
|
gjustin40/Pytorch
|
b099fe7975490790f5179f2733a3fc546b5b3d46
|
[
"MIT"
] | null | null | null |
1. Beginner/Pytorch7_2_RNN_Basic.ipynb
|
gjustin40/Pytorch
|
b099fe7975490790f5179f2733a3fc546b5b3d46
|
[
"MIT"
] | 1 |
2022-03-12T01:01:53.000Z
|
2022-03-12T01:01:53.000Z
|
1. Beginner/Pytorch7_2_RNN_Basic.ipynb
|
gjustin40/Pytorch
|
b099fe7975490790f5179f2733a3fc546b5b3d46
|
[
"MIT"
] | 1 |
2020-06-06T09:09:39.000Z
|
2020-06-06T09:09:39.000Z
| 40.311111 | 11,206 | 0.615987 |
[
[
[
"# Master Pytorch 7 : RNN Basic\n- RNN의 기초에 대해 알아보자",
"_____no_output_____"
],
[
"# RNN one Cell process",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn\n\n# hello에 있는 각각의 문자들을 원핫벡터로 변환\nh = [1, 0, 0, 0]\ne = [0, 1, 0, 0]\nl = [0, 0, 1, 0]\no = [0, 0, 0, 1]\n\n# RNN의 한 cell 정의 후 출력\ncell = nn.RNN(input_size = 4, hidden_size = 2, batch_first = True)\ncell\n\nhidden = torch.randn(1, 1, 2) # (num_layers * num_direction) X batch_size X hidden_size\nhidden\n\ninputs = torch.Tensor([h, e, l, l, o])\ninputs\n\nfor p in cell.parameters():\n print(p)\n\nfor c in inputs:\n c = c.view(1, 1, -1) # input : (batch_size X seq_len X input_size) if batch_first = True\n out, hidden = cell(c, hidden)\n print(c.size(), out.size())",
"Parameter containing:\ntensor([[-0.1846, -0.0750, -0.2294, 0.3287],\n [ 0.5021, 0.3439, 0.2003, -0.0298]], requires_grad=True)\nParameter containing:\ntensor([[0.0927, 0.0048],\n [0.1965, 0.1792]], requires_grad=True)\nParameter containing:\ntensor([-0.5593, -0.4830], requires_grad=True)\nParameter containing:\ntensor([ 0.6956, -0.3588], requires_grad=True)\ntorch.Size([1, 1, 4]) torch.Size([1, 1, 2])\ntorch.Size([1, 1, 4]) torch.Size([1, 1, 2])\ntorch.Size([1, 1, 4]) torch.Size([1, 1, 2])\ntorch.Size([1, 1, 4]) torch.Size([1, 1, 2])\ntorch.Size([1, 1, 4]) torch.Size([1, 1, 2])\n"
],
[
"# 위 작업은 한 번에 가능하다.\n# input : (Batch_size X seq_len X input_size) if batch_first == True\ninputs = inputs.view(1, 5, -1) # Batch X seq_len X input_size\nout, hidden = cell(inputs, hidden)\nprint(out.size(), hidden.size())\n# sequence length = 5로 나오는데, 이것은 곧 hidden이 5개 이어져있다는 뜻",
"torch.Size([1, 5, 2]) torch.Size([1, 1, 2])\n"
],
[
"hidden = torch.randn(1, 3, 2)\n\n# cell 한개 RNN : input_dim(4) -> output_dim(2) / seqence = 5, batch = 3\ninputs = torch.Tensor([[h,e,l,l,o],\n [e,o,l,l,l],\n [l,l,e,e,l]])\ninputs\n\n# input : (batch, seq_len, input_size) when batch_first = True\n# B X S X I\nout, hidden = cell(inputs, hidden)\nprint(inputs.size(), out.size())\nout",
"torch.Size([3, 5, 4]) torch.Size([3, 5, 2])\n"
]
],
[
[
"# RNN Example",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn\n\ntorch.manual_seed(777)\n\nidx2char = ['h', 'i', 'e', 'l', 'o']\n\n# hihell -> ihello로 가르쳐보자\nx_data = [0, 1, 0, 2, 3, 3] # hihell\none_hot_lookup = [[1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1]]\n\ny_data = [1, 0, 2, 3, 3, 4] # ihello\nx_one_hot = [one_hot_lookup[x] for x in x_data]\nx_one_hot # hihell에 대한 one-hot 벡터",
"_____no_output_____"
],
[
"# 데이터 준비\ninputs = torch.Tensor(x_one_hot)\nlabels = torch.LongTensor(y_data).view(6,1)\nprint(inputs)\nprint(labels)",
"tensor([[1., 0., 0., 0., 0.],\n [0., 1., 0., 0., 0.],\n [1., 0., 0., 0., 0.],\n [0., 0., 1., 0., 0.],\n [0., 0., 0., 1., 0.],\n [0., 0., 0., 1., 0.]])\ntensor([[1],\n [0],\n [2],\n [3],\n [3],\n [4]])\n"
],
[
"class_n = 5\ninput_size = 5 # one-hot 벡터의 사이즈\nhidden_size = 5 # hidden 레이어의 사이즈\nbatch_size = 1 # 한 문장\nsequence_length = 1 # 한 글자당 하나씩\nlayer_n = 1 # one-layer RNN\n\nclass Model(nn.Module):\n \n def __init__(self):\n super(Model, self).__init__()\n \n self.rnn = nn.RNN(input_size = input_size, hidden_size = hidden_size, batch_first = True)\n \n def forward(self, hidden, x):\n \n x = x.view(batch_size, sequence_length, input_size) # Reshape input(batch_first = True)\n \n out, hidden = self.rnn(x, hidden)\n return hidden, out.view(-1, class_n)\n \n def init_hidden(self):\n \n return torch.zeros(layer_n, batch_size, hidden_size)\n \nmodel = Model()\nprint(model)",
"Model(\n (rnn): RNN(5, 5, batch_first=True)\n)\n"
],
[
"import torch.optim as optim\nimport sys\n\nloss_function = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr = 0.001)\n\nepoch_n = 100\n\n\n\nfor epoch in range(1, epoch_n+1):\n \n optimizer.zero_grad()\n loss = 0\n hidden = model.init_hidden() # 초기값 설정(h0)\n \n sys.stdout.write(\"predicted string: \")\n for input, label in zip(inputs, labels):\n # print(input.size(), label.size())\n hidden, output = model(hidden, input)\n val, idx = output.max(1) # 최대값(val)과 index(idx) 출력\n sys.stdout.write(idx2char[idx.data[0]])\n loss += loss_function(output, label)\n \n \n print(' Epoch : %d/100, Loss : %1.3f' %(epoch, loss.data[0])) \n \n loss.backward()\n optimizer.step()\n",
"_____no_output_____"
]
],
[
[
"# 스스로 해보기\n- \"hihello\"에서 \"hihell\"를 입력값으로 한 후 'o'를 예측하자",
"_____no_output_____"
],
[
"# 데이터 생성",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn\n\nidx2char = ['h', 'i', 'e', 'l', 'o']\n\nh = [1,0,0,0,0]\ni = [0,1,0,0,0]\ne = [0,0,1,0,0]\nl = [0,0,0,1,0]\no = [0,0,0,0,1]\n\nx_data = [h,i,h,e,l,l]\ny_data = [i,h,e,l,l,o]\n\ninputs = torch.Tensor(x_data)\nlabels = torch.LongTensor([1, 0, 2, 3, 3, 4]).view(6, 1) # index2char\n\nprint(inputs)\nprint(labels)",
"tensor([[1., 0., 0., 0., 0.],\n [0., 1., 0., 0., 0.],\n [1., 0., 0., 0., 0.],\n [0., 0., 1., 0., 0.],\n [0., 0., 0., 1., 0.],\n [0., 0., 0., 1., 0.]])\ntensor([[1],\n [0],\n [2],\n [3],\n [3],\n [4]])\n"
],
[
"class_n = 5\ninput_size = 5\nhidden_size = 5\nbatch_size = 1\nseq_n = 1\nlayer_n = 1\n\nclass Model(nn.Module):\n \n def __init__(self):\n super(Model, self).__init__()\n \n self.rnn = nn.RNN(input_size = input_size, hidden_size = hidden_size,\n batch_first = True)\n \n def forward(self, x, hidden):\n \n x = x.view(batch_size, seq_n, input_size)\n out, hidden = self.rnn(x, hidden)\n \n return out.view(-1, class_n), hidden\n \n def init_hidden(self):\n \n return torch.zeros(layer_n, batch_size, hidden_size)\n\nmodel = Model()\nprint(model)",
"Model(\n (rnn): RNN(5, 5, batch_first=True)\n)\n"
],
[
"import torch.optim as optim\n\nloss_function = nn.CrossEntropyLoss()\noptimizer = optim.SGD(model.parameters(), lr = 0.1)\n\nepoch_n = 100\nloss_list = []\nfor epoch in range(1, epoch_n + 1):\n \n optimizer.zero_grad()\n hidden = model.init_hidden()\n loss = 0\n \n for input, label in zip(inputs, labels):\n output, hidden = model(input, hidden)\n \n val, idx = output.max(1)\n if epoch % 10 == 0:\n print(idx2char[idx.data[0]], end = '')\n \n loss += loss_function(output, label)\n loss.backward()\n optimizer.step()\n if epoch % 10 == 0:\n print(' Epoch : %d/100, Loss : %1.3f' %(epoch, loss))\n loss_list.append(loss)",
"ehllll Epoch : 10/100, Loss : 7.883\nihelll Epoch : 20/100, Loss : 6.278\nihello Epoch : 30/100, Loss : 5.075\nihello Epoch : 40/100, Loss : 4.319\nihello Epoch : 50/100, Loss : 3.864\nihello Epoch : 60/100, Loss : 3.590\nihello Epoch : 70/100, Loss : 3.418\nihello Epoch : 80/100, Loss : 3.305\nihello Epoch : 90/100, Loss : 3.226\nihello Epoch : 100/100, Loss : 3.168\n"
]
],
[
[
"# Loss 시각화",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline\n\nplt.plot(loss_list)",
"_____no_output_____"
]
],
[
[
"# 한글 버젼\n- \"내가 사랑하는 사람\"에서 마지막 '사람' 맞추기",
"_____no_output_____"
]
],
[
[
"from konlpy.tag import Okt\nimport torch\n\n# POS 태깅 함수\nokt = Okt()\n\n# POS 태깅 적용\nmy_string = '내가 사랑하는 사람'\ntokens = okt.pos(my_string)\n\n# 사용된 모든 글자 모음\nletters = [val for val, tag in tokens] + [' ']\nprint(letters)\n\n# 글자를 Tensor로 변환하는 사전 생성\nletter2tensor = {}\nfor i, val in enumerate(letters):\n tensor = torch.zeros(1, len(letters))\n tensor[0][i] = 1\n letter2tensor[val] = tensor\n\nletter2tensor",
"['내', '가', '사랑', '하는', '사람', ' ']\n"
],
[
"x_data = ['내', '가', ' ', '사랑', '하는', ' '] # 입력값\ny_data = [1, 5, 2, 3, 5, 4] # 출력값, letters의 index값으로 label 생성(CorssEntropyLoss 함수 쓸 때 index값으로 넣어줌)\n\nlabels = torch.LongTensor(y_data).view(6, 1)\ninputs = torch.zeros(len(x_data), len(letters)) # x_data를 Tensor로 변환(위에 만든 사전 이용해서)\nfor i, val in enumerate(x_data):\n inputs[i] = letter2tensor[val]\n\nprint(inputs)\nprint(labels)",
"tensor([[1., 0., 0., 0., 0., 0.],\n [0., 1., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 1.],\n [0., 0., 1., 0., 0., 0.],\n [0., 0., 0., 1., 0., 0.],\n [0., 0., 0., 0., 0., 1.]])\ntensor([[1],\n [5],\n [2],\n [3],\n [5],\n [4]])\n"
],
[
"import torch.nn as nn\n\nclass_n = 6 # 분류되는 개수, 우리가 가지고 있는 letters의 개수가 6개이기 때문에 class는 6(6개 중 하나로 인식돼야하니까)\ninput_size = 6 # input의 길이, inputs = (1 X 1 X 6)\nhidden_size = 6 # hidden layer의 size\nbatch_size = 1 \nseq_n = 1\nlayer_n = 1\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n \n self.rnn = nn.RNN(input_size = input_size, hidden_size = hidden_size,\n batch_first = True)\n \n def forward(self, x, hidden):\n \n x = x.view(batch_size, seq_n, input_size)\n \n output, hidden = self.rnn(x, hidden)\n \n return output.view(-1, class_n), hidden\n \n def init_hidden(self):\n \n return torch.zeros(layer_n, batch_size, hidden_size)\n \nmodel = Model()\nprint(model) ",
"Model(\n (rnn): RNN(6, 6, batch_first=True)\n)\n"
],
[
"import torch.optim as optim\n\nloss_function = nn.CrossEntropyLoss()\noptimizer = optim.SGD(model.parameters(), lr = 0.1)\n\nepoch_n = 30\n\nfor epoch in range(epoch_n):\n optimizer.zero_grad()\n hidden = model.init_hidden()\n loss = 0\n \n for input, label in zip(inputs, labels):\n output, hidden = model(input, hidden)\n loss += loss_function(output, label)\n \n val, idx = output.max(1)\n print(letters[idx.data[0]], end = '')\n \n loss.backward()\n optimizer.step()\n \n print('\\tEpoch ; %d/100, Loss : %1.3f' %(epoch, loss))\n loss = 0",
"가가 가가가\tEpoch ; 0/100, Loss : 11.046\n가 \tEpoch ; 1/100, Loss : 9.934\n가 \tEpoch ; 2/100, Loss : 9.249\n가 \tEpoch ; 3/100, Loss : 8.801\n가 \tEpoch ; 4/100, Loss : 8.479\n가 \tEpoch ; 5/100, Loss : 8.221\n가 \tEpoch ; 6/100, Loss : 7.995\n가 하는 \tEpoch ; 7/100, Loss : 7.785\n가 하는 \tEpoch ; 8/100, Loss : 7.583\n가 사랑하는 사람\tEpoch ; 9/100, Loss : 7.385\n가 사랑하는 사람\tEpoch ; 10/100, Loss : 7.192\n가 사랑하는 사람\tEpoch ; 11/100, Loss : 7.005\n가 사랑하는 사람\tEpoch ; 12/100, Loss : 6.825\n가 사랑하는 사람\tEpoch ; 13/100, Loss : 6.655\n가 사랑하는 사람\tEpoch ; 14/100, Loss : 6.495\n가 사랑하는 사람\tEpoch ; 15/100, Loss : 6.346\n가 사랑하는 사람\tEpoch ; 16/100, Loss : 6.207\n가 사랑하는 사람\tEpoch ; 17/100, Loss : 6.079\n가 사랑하는 사람\tEpoch ; 18/100, Loss : 5.961\n가 사랑하는 사람\tEpoch ; 19/100, Loss : 5.850\n가 사랑하는 사람\tEpoch ; 20/100, Loss : 5.748\n가 사랑하는 사람\tEpoch ; 21/100, Loss : 5.652\n가 사랑하는 사람\tEpoch ; 22/100, Loss : 5.562\n가 사랑하는 사람\tEpoch ; 23/100, Loss : 5.477\n가 사랑하는 사람\tEpoch ; 24/100, Loss : 5.397\n가 사랑하는 사람\tEpoch ; 25/100, Loss : 5.321\n가 사랑하는 사람\tEpoch ; 26/100, Loss : 5.249\n가 사랑하는 사람\tEpoch ; 27/100, Loss : 5.180\n가 사랑하는 사람\tEpoch ; 28/100, Loss : 5.114\n가 사랑하는 사람\tEpoch ; 29/100, Loss : 5.051\n"
]
],
[
[
"# 새로 배운 것\n\n## 1. nn.rnn(batch_first = True)\n - Pytorch Document를 보면 inputs의 shape는 (batch_size, seq_n, input_size)로 되어있다.\n - 하지만 batch_size을 먼저 쓰고 싶다면 batch_first = True 옵션을 추가해주면 된다.\n - nn.rnn(input_size, hidden_size, batch_fisrt = True) -> inputs의 shape = (batch_size, seq_n, input_size)\n - 참고 : https://github.com/yunjey/pytorch-tutorial/issues/122",
"_____no_output_____"
],
[
"# 오류\n\n1. untimeError: invalid argument 2: size '[1 x 1 x 6]' is invalid for input with 4 elements at ..\\aten\\src\\TH\\THStorage.cpp:84\n - output size와 hidden size의 크기가 같아야 하나? 그런 것 같다.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
4a1184ecf6862f64457f809bf7fb0fc9d8a8e251
| 2,206 |
ipynb
|
Jupyter Notebook
|
is_vs_==.ipynb
|
MaiaNgo/python-zerotomastery
|
6b37021af531b9adc029f1dd20b4aa0be3c6a800
|
[
"Apache-2.0"
] | null | null | null |
is_vs_==.ipynb
|
MaiaNgo/python-zerotomastery
|
6b37021af531b9adc029f1dd20b4aa0be3c6a800
|
[
"Apache-2.0"
] | null | null | null |
is_vs_==.ipynb
|
MaiaNgo/python-zerotomastery
|
6b37021af531b9adc029f1dd20b4aa0be3c6a800
|
[
"Apache-2.0"
] | null | null | null | 17.648 | 50 | 0.443336 |
[
[
[
"## is VS == ",
"_____no_output_____"
]
],
[
[
"print(True == 1)\nprint('' == 1)\nprint([] == 1)\nprint(10 == 10.0)\nprint([] == [])\nprint([1,2,3] == [1,2,3])",
"True\nFalse\nFalse\nTrue\nTrue\nTrue\n"
],
[
"print(True is 1)\nprint('' is 1)\nprint([] is 1)\nprint(10 is 10.0)\nprint([] is [])\nprint([1,2,3] is [1,2,3])",
"False\nFalse\nFalse\nFalse\nFalse\nFalse\n"
],
[
"print(True is True)\nprint('' is '')\nprint([] is [])\nprint(10 is 10)\nprint([] is [])\nprint([1,2,3] is [1,2,3])",
"True\nTrue\nFalse\nTrue\nFalse\nFalse\n"
],
[
"a = [1,2,3]\nb = [1,2,3]\nprint(a == b)\nprint(a is b)",
"True\nFalse\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4a11856f17b146a9860d64bc4cad4c0444156982
| 163,718 |
ipynb
|
Jupyter Notebook
|
P1.ipynb
|
yuwang/CarND-LaneLines-P1
|
f550388b84de057abede9b73e5de951f5884b0ae
|
[
"MIT"
] | null | null | null |
P1.ipynb
|
yuwang/CarND-LaneLines-P1
|
f550388b84de057abede9b73e5de951f5884b0ae
|
[
"MIT"
] | null | null | null |
P1.ipynb
|
yuwang/CarND-LaneLines-P1
|
f550388b84de057abede9b73e5de951f5884b0ae
|
[
"MIT"
] | null | null | null | 196.069461 | 119,954 | 0.864596 |
[
[
[
"# Self-Driving Car Engineer Nanodegree\n\n\n## Project: **Finding Lane Lines on the Road** \n***\nIn this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip \"raw-lines-example.mp4\" (also contained in this repository) to see what the output should look like after using the helper functions below. \n\nOnce you have a result that looks roughly like \"raw-lines-example.mp4\", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.\n\nIn addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.\n\n---\nLet's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the \"play\" button above) to display the image.\n\n**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the \"Kernel\" menu above and selecting \"Restart & Clear Output\".**\n\n---",
"_____no_output_____"
],
[
"**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**\n\n---\n\n<figure>\n <img src=\"examples/line-segments-example.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your output should look something like this (above) after detecting line segments using the helper functions below </p> \n </figcaption>\n</figure>\n <p></p> \n<figure>\n <img src=\"examples/laneLines_thirdPass.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your goal is to connect/average/extrapolate line segments to get output like this</p> \n </figcaption>\n</figure>",
"_____no_output_____"
],
[
"**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** ",
"_____no_output_____"
],
[
"## Import Packages",
"_____no_output_____"
]
],
[
[
"#importing some useful packages\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Read in an Image",
"_____no_output_____"
]
],
[
[
"#reading in an image\nimage = mpimg.imread('test_images/solidWhiteRight.jpg')\n\n#printing out some stats and plotting\nprint('This image is:', type(image), 'with dimensions:', image.shape)\nplt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')",
"This image is: <class 'numpy.ndarray'> with dimensions: (540, 960, 3)\n"
]
],
[
[
"## Ideas for Lane Detection Pipeline",
"_____no_output_____"
],
[
"**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**\n\n`cv2.inRange()` for color selection \n`cv2.fillPoly()` for regions selection \n`cv2.line()` to draw lines on an image given endpoints \n`cv2.addWeighted()` to coadd / overlay two images\n`cv2.cvtColor()` to grayscale or change color\n`cv2.imwrite()` to output images to file \n`cv2.bitwise_and()` to apply a mask to an image\n\n**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**",
"_____no_output_____"
],
[
"## Helper Functions",
"_____no_output_____"
],
[
"Below are some helper functions to help get you started. They should look familiar from the lesson!",
"_____no_output_____"
]
],
[
[
"import math\n\ndef grayscale(img):\n \"\"\"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n (assuming your grayscaled image is called 'gray')\n you should call plt.imshow(gray, cmap='gray')\"\"\"\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \ndef canny(img, low_threshold, high_threshold):\n \"\"\"Applies the Canny transform\"\"\"\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n \"\"\"Applies a Gaussian Noise kernel\"\"\"\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n \n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n `vertices` should be a numpy array of integer points.\n \"\"\"\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=2):\n \"\"\"\n NOTE: this is the function you might want to use as a starting point once you want to \n average/extrapolate the line segments you detect to map out the full\n extent of the lane (going from the result shown in raw-lines-example.mp4\n to that shown in P1_example.mp4). \n \n Think about things like separating line segments by their \n slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left\n line vs. the right line. Then, you can average the position of each of \n the lines and extrapolate to the top and bottom of the lane.\n \n This function draws `lines` with `color` and `thickness`. \n Lines are drawn on the image inplace (mutates the image).\n If you want to make the lines semi-transparent, think about combining\n this function with the weighted_img() function below\n \"\"\"\n for line in lines:\n for x1,y1,x2,y2 in line:\n cv2.line(img, (x1, y1), (x2, y2), color, thickness)\n\ndef hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):\n \"\"\"\n `img` should be the output of a Canny transform.\n \n Returns an image with hough lines drawn.\n \"\"\"\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n draw_lines(line_img, lines)\n return line_img\n\n# Python 3 has support for cool math symbols.\n\ndef weighted_img(img, initial_img, α=0.8, β=1., γ=0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n \n `initial_img` should be the image before any processing.\n \n The result image is computed as follows:\n \n initial_img * α + img * β + γ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n return cv2.addWeighted(initial_img, α, img, β, γ)",
"_____no_output_____"
]
],
[
[
"## Test Images\n\nBuild your pipeline to work on the images in the directory \"test_images\" \n**You should make sure your pipeline works well on these images before you try the videos.**",
"_____no_output_____"
]
],
[
[
"import os\nos.listdir(\"test_images/\")",
"_____no_output_____"
]
],
[
[
"## Build a Lane Finding Pipeline\n\n",
"_____no_output_____"
],
[
"Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.\n\nTry tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.",
"_____no_output_____"
],
[
"## Test on Videos\n\nYou know what's cooler than drawing lanes over images? Drawing lanes over video!\n\nWe can test our solution on two provided videos:\n\n`solidWhiteRight.mp4`\n\n`solidYellowLeft.mp4`\n\n**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**\n\n**If you get an error that looks like this:**\n```\nNeedDownloadError: Need ffmpeg exe. \nYou can download it by calling: \nimageio.plugins.ffmpeg.download()\n```\n**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**",
"_____no_output_____"
]
],
[
[
"# TODO: Build your pipeline that will draw lane lines on the test_images\n# then save them to the test_images_output directory.\n# importing some useful packages\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\nimport math\nimport os\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML\n\n%matplotlib inline\n\n######## Helper functions #######\ndef grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n \ndef canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=2):\n '''Updated method to use cv2.addWeight() to overlay lines'''\n img = np.copy(img)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n\n for line in lines:\n for x1,y1,x2,y2 in line:\n if x1!=0 and x2!=0:\n cv2.line(line_img, (x1, y1), (x2, y2), color, thickness)\n\n img = weighted_img(line_img, img, 1, 0.9, 0)\n \n return img\n\ndef hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n draw_lines(line_img, lines)\n return line_img\n\n# Python 3 has support for cool math symbols.\n\ndef weighted_img(img, initial_img, α=0.8, β=1., γ=0.):\n return cv2.addWeighted(initial_img, α, img, β, γ)\n\n# For debugging purpose only\ndef show_image(img, title=\"\"): \n plt.figure()\n plt.title(title)\n plt.imshow(img)\n\ndef single_lines(initial_img, lines):\n '''Given lines returned from hough transform, return a single left averaging the lines on the left side, and \n a single right line averaging the lines on the right side'''\n left_line_x = []\n left_line_y = []\n right_line_x = []\n right_line_y = []\n # sort the lines\n for line in lines:\n for x1,y1,x2,y2 in line:\n slope = (y2 - y1)/(x2 - x1)\n if math.fabs(slope) < 0.5:\n continue\n if slope <=0:\n left_line_x.extend([x1, x2])\n left_line_y.extend([y1, y2])\n else: \n right_line_x.extend([x1, x2])\n right_line_y.extend([y1, y2])\n\n # handle case where there is no lines. \n if not left_line_x:\n left_line_x.append(0)\n if not left_line_y:\n left_line_y.append(0)\n if not right_line_x:\n right_line_x.append(0)\n if not right_line_y:\n right_line_y.append(0)\n \n min_y = int(initial_img.shape[0] * (3/5)) # reasonable vison range\n max_y = int(initial_img.shape[0]) # bottom of the image\n\n fit_left = np.poly1d(np.polyfit(left_line_y, left_line_x, deg=1))\n left_x_start = int(fit_left(max_y)) if not math.isnan(fit_left(max_y)) else 0\n left_x_end = int(fit_left(min_y)) if not math.isnan(fit_left(min_y)) else 0\n \n fit_right = np.poly1d(np.polyfit(right_line_y, right_line_x, deg=1))\n right_x_start = int(fit_right(max_y)) if not math.isnan(fit_right(max_y)) else 0\n right_x_end = int(fit_right(min_y)) if not math.isnan(fit_right(min_y)) else 0\n \n return [[[left_x_start, max_y, left_x_end, min_y],\n [right_x_start, max_y, right_x_end, min_y]]]\n\n###### main functions #######\ndef process_image(image):\n gray_image = grayscale(image)\n \n kernel_size = 5\n blur_gray_image = gaussian_blur(gray_image, kernel_size)\n \n low_threshold = 100\n high_threshold = 200\n edges = canny(blur_gray_image, low_threshold, high_threshold)\n\n imshape = image.shape\n vertices = np.array([[(0,imshape[0]),(imshape[1]/2,imshape[0]/2),(imshape[1],imshape[0])]], dtype=np.int32)\n masked_edges = region_of_interest(edges, vertices)\n\n rho = 6\n theta = np.pi/60\n threshold = 160\n min_line_length = 40\n max_line_gap = 25\n lines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)\n final_single_lines = single_lines(image, lines)\n\n combo = draw_lines(image, final_single_lines, thickness=8)\n return combo\n\ndef process_all_test_images():\n path = \"test_images/\"\n output_path_name = \"test_images_output/\"\n os.makedirs(output_path_name, exist_ok=True)\n \n for fileName in os.listdir(path):\n print(\"processing:\", fileName)\n combo = process_image(mpimg.imread(os.path.join(path, fileName)))\n show_image(combo, fileName)\n mpimg.imsave(output_path_name + fileName, combo)\n\nwhite_output = 'test_videos_output/solidWhiteRight.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n# clip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\").subclip(0,5)\nclip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\")\nwhite_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\n%time white_clip.write_videofile(white_output, audio=False)\n\nyellow_output = 'test_videos_output/solidYellowLeft.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)\nclip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')\nyellow_clip = clip2.fl_image(process_image)\n%time yellow_clip.write_videofile(yellow_output, audio=False)\n\nchallenge_output = 'test_videos_output/challenge.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)\nclip3 = VideoFileClip('test_videos/challenge.mp4')\nchallenge_clip = clip3.fl_image(process_image)\n%time challenge_clip.write_videofile(challenge_output, audio=False)\n",
"_____no_output_____"
],
[
"# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML",
"_____no_output_____"
],
[
"def process_image(image):\n # NOTE: The output you return should be a color image (3 channel) for processing video below\n # TODO: put your pipeline here,\n # you should return the final output (image where lines are drawn on lanes)\n\n return result",
"_____no_output_____"
]
],
[
[
"Let's try the one with the solid white lane on the right first ...",
"_____no_output_____"
]
],
[
[
"white_output = 'test_videos_output/solidWhiteRight.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\").subclip(0,5)\nclip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\")\nwhite_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\n%time white_clip.write_videofile(white_output, audio=False)",
"_____no_output_____"
]
],
[
[
"Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.",
"_____no_output_____"
]
],
[
[
"HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(white_output))",
"_____no_output_____"
]
],
[
[
"## Improve the draw_lines() function\n\n**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\".**\n\n**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**",
"_____no_output_____"
],
[
"Now for the one with the solid yellow lane on the left. This one's more tricky!",
"_____no_output_____"
]
],
[
[
"yellow_output = 'test_videos_output/solidYellowLeft.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)\nclip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')\nyellow_clip = clip2.fl_image(process_image)\n%time yellow_clip.write_videofile(yellow_output, audio=False)",
"_____no_output_____"
],
[
"HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(yellow_output))",
"_____no_output_____"
]
],
[
[
"## Writeup and Submission\n\nIf you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file.\n",
"_____no_output_____"
],
[
"## Optional Challenge\n\nTry your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!",
"_____no_output_____"
]
],
[
[
"challenge_output = 'test_videos_output/challenge.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)\nclip3 = VideoFileClip('test_videos/challenge.mp4')\nchallenge_clip = clip3.fl_image(process_image)\n%time challenge_clip.write_videofile(challenge_output, audio=False)",
"_____no_output_____"
],
[
"HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(challenge_output))",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
4a119205a6dcfcc35c1d117131944a70280eb6f8
| 3,646 |
ipynb
|
Jupyter Notebook
|
scripts/onnx/notebooks/classification/vgg13.ipynb
|
stephenkl/gluon-cv
|
e5b13e4e95ceb2f604c9b142a67a9b48a0aff407
|
[
"Apache-2.0"
] | 5,447 |
2018-04-25T18:02:51.000Z
|
2022-03-31T00:59:49.000Z
|
scripts/onnx/notebooks/classification/vgg13.ipynb
|
stephenkl/gluon-cv
|
e5b13e4e95ceb2f604c9b142a67a9b48a0aff407
|
[
"Apache-2.0"
] | 1,566 |
2018-04-25T21:14:04.000Z
|
2022-03-31T06:42:42.000Z
|
scripts/onnx/notebooks/classification/vgg13.ipynb
|
stephenkl/gluon-cv
|
e5b13e4e95ceb2f604c9b142a67a9b48a0aff407
|
[
"Apache-2.0"
] | 1,345 |
2018-04-25T18:44:13.000Z
|
2022-03-30T19:32:53.000Z
| 25.496503 | 249 | 0.53977 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a11948d7073d38f72160f68e322324322b635cd
| 92,977 |
ipynb
|
Jupyter Notebook
|
TD/SARSA Solution.ipynb
|
tarzan212/reinforcement-learning
|
843c2c4894dff2b7ac100e5fcaec06dbc282dfd0
|
[
"MIT"
] | 1 |
2019-03-12T23:42:54.000Z
|
2019-03-12T23:42:54.000Z
|
TD/SARSA Solution.ipynb
|
naifmeh/reinforcement-learning
|
843c2c4894dff2b7ac100e5fcaec06dbc282dfd0
|
[
"MIT"
] | null | null | null |
TD/SARSA Solution.ipynb
|
naifmeh/reinforcement-learning
|
843c2c4894dff2b7ac100e5fcaec06dbc282dfd0
|
[
"MIT"
] | 1 |
2018-10-14T14:36:31.000Z
|
2018-10-14T14:36:31.000Z
| 384.202479 | 31,608 | 0.919797 |
[
[
[
"# Algorithm used : \n\n",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n\nimport gym\nimport itertools\nimport matplotlib\nimport numpy as np\nimport pandas as pd\nimport sys\n\nif \"../\" not in sys.path:\n sys.path.append(\"../\") \n\nfrom collections import defaultdict\nfrom lib.envs.windy_gridworld import WindyGridworldEnv\nfrom lib import plotting\n\nmatplotlib.style.use('ggplot')",
"_____no_output_____"
],
[
"env = WindyGridworldEnv()",
"_____no_output_____"
],
[
"def make_epsilon_greedy_policy(Q, epsilon, nA):\n \"\"\"\n Creates an epsilon-greedy policy based on a given Q-function and epsilon.\n \n Args:\n Q: A dictionary that maps from state -> action-values.\n Each value is a numpy array of length nA (see below)\n epsilon: The probability to select a random action . float between 0 and 1.\n nA: Number of actions in the environment.\n \n Returns:\n A function that takes the observation as an argument and returns\n the probabilities for each action in the form of a numpy array of length nA.\n \n \"\"\"\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n best_action = np.argmax(Q[observation])\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn",
"_____no_output_____"
],
[
"def sarsa(env, num_episodes, discount_factor=1.0, alpha=0.5, epsilon=0.1):\n \"\"\"\n SARSA algorithm: On-policy TD control. Finds the optimal epsilon-greedy policy.\n \n Args:\n env: OpenAI environment.\n num_episodes: Number of episodes to run for.\n discount_factor: Gamma discount factor.\n alpha: TD learning rate.\n epsilon: Chance the sample a random action. Float betwen 0 and 1.\n \n Returns:\n A tuple (Q, stats).\n Q is the optimal action-value function, a dictionary mapping state -> action values.\n stats is an EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.\n \"\"\"\n \n # The final action-value function.\n # A nested dictionary that maps state -> (action -> action-value).\n Q = defaultdict(lambda: np.zeros(env.action_space.n))\n \n # Keeps track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n\n # The policy we're following\n policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)\n \n for i_episode in range(num_episodes):\n # Print out which episode we're on, useful for debugging.\n if (i_episode + 1) % 100 == 0:\n print(\"\\rEpisode {}/{}.\".format(i_episode + 1, num_episodes), end=\"\")\n sys.stdout.flush()\n \n # Reset the environment and pick the first action\n state = env.reset()\n #Chaque action est modelisé par un numero, on va mettre une prob\n #qui suit le epsilon greedy pour choisir l'action a prendre selon \n #l'etat.\n action_probs = policy(state)\n action = np.random.choice(np.arange(len(action_probs)), p=action_probs)\n \n # One step in the environment\n for t in itertools.count():\n # Take a step\n next_state, reward, done, _ = env.step(action)\n \n # Pick the next action\n next_action_probs = policy(next_state)\n next_action = np.random.choice(np.arange(len(next_action_probs)), p=next_action_probs)\n \n # Update statistics\n stats.episode_rewards[i_episode] += reward\n stats.episode_lengths[i_episode] = t\n \n # TD Update\n td_target = reward + discount_factor * Q[next_state][next_action]\n td_delta = td_target - Q[state][action]\n Q[state][action] += alpha * td_delta\n \n if done:\n break\n \n action = next_action\n state = next_state \n \n return Q, stats",
"_____no_output_____"
],
[
"Q, stats = sarsa(env, 200)",
"Episode 200/200."
],
[
"plotting.plot_episode_stats(stats)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a119680f6324e38cd55e07cf8297e864917efce
| 22,176 |
ipynb
|
Jupyter Notebook
|
docs/notebooks/01_references.ipynb
|
simbilod/gdsfactory
|
4d76db32674c3edb4d16260e3177ee29ef9ce11d
|
[
"MIT"
] | null | null | null |
docs/notebooks/01_references.ipynb
|
simbilod/gdsfactory
|
4d76db32674c3edb4d16260e3177ee29ef9ce11d
|
[
"MIT"
] | null | null | null |
docs/notebooks/01_references.ipynb
|
simbilod/gdsfactory
|
4d76db32674c3edb4d16260e3177ee29ef9ce11d
|
[
"MIT"
] | null | null | null | 25.876313 | 520 | 0.551136 |
[
[
[
"# References and ports\n\nGDS allows defining the component once in memory and reference to that structure in other components.",
"_____no_output_____"
],
[
"As you build complex components you can include references to other simpler components. Adding a reference is like having a pointer to a component.\n\nThe GDSII specification allows the use of references, and similarly gdsfactory uses them (with the `add_ref()` function). So what is a reference? Simply put: **A reference does not contain any geometry. It only *points* to an existing geometry**.\n\nSay you have a ridiculously large polygon with 100 billion vertices that you call BigPolygon. It's huge, and you need to use it in your design 250 times. Well, a single copy of BigPolygon takes up 1MB of memory, so you don't want to make 250 copies of it. You can instead *references* the polygon 250 times. Each reference only uses a few bytes of memory -- it only needs to know the memory address of BigPolygon and a few other things. This way, you can keep one copy of BigPolygon and use it again and again.\n\nLet's start by making a blank geometry (`Component`) then adding a single polygon to it.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport gdsfactory as gf\n\ngf.config.set_plot_options(show_subports=False)\n\n# Create a blank Component\np = gf.Component(\"component_with_polygon\")\n\n# Add a polygon\nxpts = [0, 0, 5, 6, 9, 12]\nypts = [0, 1, 1, 2, 2, 0]\np.add_polygon([xpts, ypts], layer=(2, 0))\n\n# plot the Component with the polygon in it\np",
"_____no_output_____"
]
],
[
[
"Now, you want to reuse this polygon repeatedly without creating multiple copies of it.\n\nTo do so, you need to make a second blank `Component`, this time called `c`.\n\nIn this new Component you *reference* our Component `p` which contains our polygon.",
"_____no_output_____"
]
],
[
[
"c = gf.Component(\"Component_with_references\") # Create a new blank Component\npoly_ref = c.add_ref(p) # Reference the Component \"p\" that has the polygon in it\nc",
"_____no_output_____"
]
],
[
[
"you just made a copy of your polygon -- but remember, you didn't actually\nmake a second polygon, you just made a reference (aka pointer) to the original\npolygon. Let's add two more references to `c`:",
"_____no_output_____"
]
],
[
[
"poly_ref2 = c.add_ref(p) # Reference the Component \"p\" that has the polygon in it\npoly_ref3 = c.add_ref(p) # Reference the Component \"p\" that has the polygon in it\nc",
"_____no_output_____"
]
],
[
[
"Now you have 3x polygons all on top of each other. Again, this would appear\nuseless, except that you can manipulate each reference indepedently. Notice that\nwhen you called `c.add_ref(p)` above, we saved the result to a new variable each\ntime (`poly_ref`, `poly_ref2`, and `poly_ref3`)? You can use those variables to\nreposition the references.",
"_____no_output_____"
]
],
[
[
"poly_ref2.rotate(15) # Rotate the 2nd reference we made 15 degrees\npoly_ref3.rotate(30) # Rotate the 3rd reference we made 30 degrees\nc",
"_____no_output_____"
]
],
[
[
"Now you're getting somewhere! You've only had to make the polygon once, but you're\nable to reuse it as many times as you want.\n\n## Modifying the referenced geometry\n\nWhat happens when you change the original geometry that the reference points to? In your case, your references in\n`c` all point to the Component `p` that with the original polygon. Let's try\nadding a second polygon to `p`.\n\nFirst you add the second polygon and make sure `P` looks like you expect:",
"_____no_output_____"
]
],
[
[
"# Add a 2nd polygon to \"p\"\nxpts = [14, 14, 16, 16]\nypts = [0, 2, 2, 0]\np.add_polygon([xpts, ypts], layer=(1, 0))\np",
"_____no_output_____"
]
],
[
[
"That looks good. Now let's find out what happened to `c` that contains the\nthree references. Keep in mind that you have not modified `c` or executed any\nfunctions/operations on `c` -- all you have done is modify `p`.",
"_____no_output_____"
]
],
[
[
"c",
"_____no_output_____"
]
],
[
[
" **When you modify the original geometry, all of the\nreferences automatically reflect the modifications.** This is very powerful,\nbecause you can use this to make very complicated designs from relatively simple\nelements in a computation- and memory-efficienct way.\n\nLet's try making references a level deeper by referencing `c`. Note here we use\nthe `<<` operator to add the references -- this is just shorthand, and is\nexactly equivalent to using `add_ref()`",
"_____no_output_____"
]
],
[
[
"c2 = gf.Component() # Create a new blank Component\nd_ref1 = c2.add_ref(c) # Reference the Component \"c\" that 3 references in it\nd_ref2 = c2 << c # Use the \"<<\" operator to create a 2nd reference to c\nd_ref3 = c2 << c # Use the \"<<\" operator to create a 3rd reference to c\n\nd_ref1.move([20, 0])\nd_ref2.move([40, 0])\n\nc2",
"_____no_output_____"
]
],
[
[
"As you've seen you have two ways to add a reference to our component:\n\n1. create the reference and add it to the component",
"_____no_output_____"
]
],
[
[
"c = gf.Component(\"reference_sample\")\nw = gf.components.straight(width=0.6)\nwr = w.ref()\nc.add(wr)\nc",
"_____no_output_____"
]
],
[
[
"2. or do it in a single line",
"_____no_output_____"
]
],
[
[
"c = gf.Component(\"reference_sample_shorter_syntax\")\nwr = c << gf.components.straight(width=0.6)\nc",
"_____no_output_____"
]
],
[
[
"in both cases you can move the reference `wr` after created",
"_____no_output_____"
]
],
[
[
"import gdsfactory as gf\n\nc = gf.Component(\"two_references\")\nwr1 = c << gf.components.straight(width=0.6)\nwr2 = c << gf.components.straight(width=0.6)\nwr2.movey(10)\nc.add_ports(wr1.get_ports_list(), prefix=\"top_\")\nc.add_ports(wr2.get_ports_list(), prefix=\"bot_\")",
"_____no_output_____"
],
[
"c.ports",
"_____no_output_____"
]
],
[
[
"You can also auto_rename ports using gdsfactory default convention, where ports are numbered clockwise starting from the bottom left",
"_____no_output_____"
]
],
[
[
"c.auto_rename_ports()",
"_____no_output_____"
],
[
"c.ports",
"_____no_output_____"
],
[
"c",
"_____no_output_____"
]
],
[
[
"## Arrays of references\n\nIn GDS, there's a type of structure called a \"CellArray\" which takes a cell and repeats it NxM times on a fixed grid spacing. For convenience, `Component` includes this functionality with the add_array() function.\nNote that CellArrays are not compatible with ports (since there is no way to access/modify individual elements in a GDS cellarray)\n\ngdsfactory also provides with more flexible arrangement options if desired, see for example `grid()` and `packer()`.\n\nAs well as `gf.components.array`\n\nLet's make a new Component and put a big array of our Component `c` in it:",
"_____no_output_____"
]
],
[
[
"c3 = gf.Component() # Create a new blank Component\naref = c3.add_array(\n c, columns=6, rows=3, spacing=[20, 15]\n) # Reference the Component \"c\" 3 references in it with a 3 rows, 6 columns array\nc3",
"_____no_output_____"
]
],
[
[
"CellArrays don't have ports and there is no way to access/modify individual elements in a GDS cellarray.\n\ngdsfactory provides you with similar functions in `gf.components.array` and `gf.components.array_2d`",
"_____no_output_____"
]
],
[
[
"c4 = gf.Component() # Create a new blank Component\naref = c4 << gf.components.array(component=c, columns=3, rows=2)\nc4.add_ports(aref.get_ports_list())\nc4",
"_____no_output_____"
],
[
"gf.components.array?",
"_____no_output_____"
]
],
[
[
"You can also create an array of references for periodic structures. Lets create a [Distributed Bragg Reflector](https://picwriter.readthedocs.io/en/latest/components/dbr.html)\n",
"_____no_output_____"
]
],
[
[
"import gdsfactory as gf\n\n\[email protected]\ndef dbr_period(w1=0.5, w2=0.6, l1=0.2, l2=0.4, straight=gf.components.straight):\n \"\"\"Return one DBR period.\"\"\"\n c = gf.Component()\n r1 = c << straight(length=l1, width=w1)\n r2 = c << straight(length=l2, width=w2)\n r2.connect(port=\"o1\", destination=r1.ports[\"o2\"])\n c.add_port(\"o1\", port=r1.ports[\"o1\"])\n c.add_port(\"o2\", port=r2.ports[\"o2\"])\n return c\n\n\nl1 = 0.2\nl2 = 0.4\nn = 3\nperiod = dbr_period(l1=l1, l2=l2)\nperiod",
"_____no_output_____"
],
[
"dbr = gf.Component(\"DBR\")\ndbr.add_array(period, columns=n, rows=1, spacing=(l1 + l2, 100))\ndbr",
"_____no_output_____"
]
],
[
[
"Finally we need to add ports to the new component",
"_____no_output_____"
]
],
[
[
"p0 = dbr.add_port(\"o1\", port=period.ports[\"o1\"])\np1 = dbr.add_port(\"o2\", port=period.ports[\"o2\"])\n\np1.midpoint = [(l1 + l2) * n, 0]\ndbr",
"_____no_output_____"
]
],
[
[
"## Connect references\n\nWe have seen that once you create a reference you can manipulate the reference to move it to a location. Here we are going to connect that reference to a port. Remeber that we follow that a certain reference `source` connects to a `destination` port",
"_____no_output_____"
]
],
[
[
"bend = gf.components.bend_circular()\nbend",
"_____no_output_____"
],
[
"c = gf.Component(\"sample_reference_connect\")\n\nmmi = c << gf.components.mmi1x2()\nb = c << gf.components.bend_circular()\nb.connect(\"o1\", destination=mmi.ports[\"o2\"])\n\nc.add_port(\"o1\", port=mmi.ports[\"o1\"])\nc.add_port(\"o2\", port=b.ports[\"o2\"])\nc.add_port(\"o3\", port=mmi.ports[\"o3\"])\nc",
"_____no_output_____"
]
],
[
[
"## Port naming\n\nYou have the freedom to name the ports as you want, and you can use `gf.port.auto_rename_ports(prefix='o')` to rename them later on.\n\nHere is the default naming convention.\n\nPorts are numbered clock-wise starting from the bottom left corner\n\nOptical ports have `o` prefix and Electrical ports `e` prefix\n\nThe port naming comes in most cases from the `gdsfactory.cross_section`. For example\n\n- `gdsfactory.cross_section.strip` has ports `o1` for input and `o2` for output\n- `gdsfactory.cross_section.metal1` has ports `e1` for input and `e2` for output",
"_____no_output_____"
]
],
[
[
"import gdsfactory as gf\n\nsize = 4\nc = gf.components.nxn(west=2, south=2, north=2, east=2, xsize=size, ysize=size)\nc",
"_____no_output_____"
],
[
"c = gf.components.straight_heater_metal(length=30)\nc",
"_____no_output_____"
],
[
"c.ports",
"_____no_output_____"
]
],
[
[
"You can get the optical ports by `layer`",
"_____no_output_____"
]
],
[
[
"c.get_ports_dict(layer=(1, 0))",
"_____no_output_____"
]
],
[
[
"or by `width`",
"_____no_output_____"
]
],
[
[
"c.get_ports_dict(width=0.5)",
"_____no_output_____"
],
[
"c0 = gf.components.straight_heater_metal()\nc0.ports",
"_____no_output_____"
],
[
"c1 = c0.copy()\nc1.auto_rename_ports_layer_orientation()\nc1.ports",
"_____no_output_____"
],
[
"c2 = c0.copy()\nc2.auto_rename_ports()\nc2.ports",
"_____no_output_____"
]
],
[
[
"You can also rename them with a different port naming convention\n\n- prefix: add `e` for electrical `o` for optical\n- clockwise\n- counter-clockwise\n- orientation `E` East, `W` West, `N` North, `S` South\n\n\nHere is the default one we use (clockwise starting from bottom left west facing port)\n\n```\n 3 4\n |___|_\n 2 -| |- 5\n | |\n 1 -|______|- 6\n | |\n 8 7\n\n```",
"_____no_output_____"
]
],
[
[
"import gdsfactory as gf\n\nc = gf.Component(\"demo_ports\")\nnxn = gf.components.nxn(west=2, north=2, east=2, south=2, xsize=4, ysize=4)\nref = c.add_ref(nxn)\nc.add_ports(ref.ports)\nc",
"_____no_output_____"
],
[
"ref.get_ports_list() # by default returns ports clockwise starting from bottom left west facing port",
"_____no_output_____"
],
[
"c.auto_rename_ports()\nc",
"_____no_output_____"
]
],
[
[
"You can also get the ports counter-clockwise\n\n```\n 4 3\n |___|_\n 5 -| |- 2\n | |\n 6 -|______|- 1\n | |\n 7 8\n\n```",
"_____no_output_____"
]
],
[
[
"c.auto_rename_ports_counter_clockwise()\nc",
"_____no_output_____"
],
[
"c.get_ports_list(clockwise=False)",
"_____no_output_____"
],
[
"c.ports_layer",
"_____no_output_____"
],
[
"c.port_by_orientation_cw(\"W0\")",
"_____no_output_____"
],
[
"c.port_by_orientation_ccw(\"W1\")",
"_____no_output_____"
]
],
[
[
"Lets extend the East facing ports (orientation = 0 deg)",
"_____no_output_____"
]
],
[
[
"import gdsfactory as gf\n\nnxn = gf.components.nxn(\n west=2,\n north=2,\n east=2,\n south=2,\n cross_section=gf.cross_section.strip,\n xsize=4,\n ysize=4,\n)\nc = gf.components.extension.extend_ports(component=nxn, orientation=0)\nc",
"_____no_output_____"
],
[
"c.ports",
"_____no_output_____"
]
],
[
[
"## pins\n\nYou can add pins (port markers) to each port. Each foundry PDK does this differently, so gdsfactory supports all of them.\n\n- square with port inside the component\n- square centered (half inside, half outside component)\n- triangular\n- path (SiEPIC)\n\n\nby default Component.show() will add triangular pins, so you can see the direction of the port in Klayout.",
"_____no_output_____"
]
],
[
[
"gf.components.mmi1x2(decorator=gf.add_pins.add_pins)",
"_____no_output_____"
],
[
"gf.components.mmi1x2(decorator=gf.add_pins.add_pins_triangle)",
"_____no_output_____"
]
],
[
[
"## component_sequence\n\nWhen you have repetitive connections you can describe the connectivity as an ASCII map",
"_____no_output_____"
]
],
[
[
"import gdsfactory as gf",
"_____no_output_____"
],
[
"bend180 = gf.components.bend_circular180()\nwg_pin = gf.components.straight_pin(length=40)\nwg = gf.components.straight()\n\n# Define a map between symbols and (component, input port, output port)\nsymbol_to_component = {\n \"D\": (bend180, \"o1\", \"o2\"),\n \"C\": (bend180, \"o2\", \"o1\"),\n \"P\": (wg_pin, \"o1\", \"o2\"),\n \"-\": (wg, \"o1\", \"o2\"),\n}\n\n# Generate a sequence\n# This is simply a chain of characters. Each of them represents a component\n# with a given input and and a given output\n\nsequence = \"DC-P-P-P-P-CD\"\ncomponent = gf.components.component_sequence(\n sequence=sequence, symbol_to_component=symbol_to_component\n)\ncomponent.name = \"component_sequence\"\ncomponent",
"_____no_output_____"
]
],
[
[
"As the sequence is defined as a string you can use the string operations to easily build complex sequences",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
4a11a569ed5493d9a51e1f5819f1cad97363e314
| 188,316 |
ipynb
|
Jupyter Notebook
|
Legacy_code/Jupyter_legacy/FullPreprocessing-regression.ipynb
|
YohannFaure/recnn
|
9d7e9df9c3bdcdac3369f20178f98f59a05773a6
|
[
"BSD-3-Clause"
] | null | null | null |
Legacy_code/Jupyter_legacy/FullPreprocessing-regression.ipynb
|
YohannFaure/recnn
|
9d7e9df9c3bdcdac3369f20178f98f59a05773a6
|
[
"BSD-3-Clause"
] | 1 |
2018-06-26T12:42:06.000Z
|
2018-06-26T12:42:06.000Z
|
Legacy_code/Jupyter_legacy/FullPreprocessing-regression.ipynb
|
YohannFaure/recnn
|
9d7e9df9c3bdcdac3369f20178f98f59a05773a6
|
[
"BSD-3-Clause"
] | 1 |
2018-06-15T13:34:49.000Z
|
2018-06-15T13:34:49.000Z
| 64.824785 | 1,210 | 0.390609 |
[
[
[
"# Processing training data from raw files",
"_____no_output_____"
],
[
"# Notebook n°1",
"_____no_output_____"
]
],
[
[
"basepath = '/data/conda/recnn/data'",
"_____no_output_____"
],
[
"### Importing usefull packages ###\n%load_ext cython\nimport sys\nimport copy\nimport numpy as np\nimport multiprocessing as mp\nfrom functools import partial\nfrom rootpy.vector import LorentzVector\nsys.path.append(\"..\")\n\n### Importing preprocessing functions ###\nfrom recnn.preprocessing import _pt\nfrom recnn.preprocessing import randomize\nfrom recnn.preprocessing import multithreadmap\nfrom recnn.preprocessing import sequentialize_by_pt\n",
"The cython extension is already loaded. To reload it, use:\n %reload_ext cython\n"
],
[
"%%cython -f -+ -I/usr/local/include --link-args=-Wl,-rpath,/usr/local/lib -lm -L/usr/local/lib -lfastjettools -lfastjet -lfastjetplugins -lsiscone_spherical -lsiscone\nimport numpy as np\ncimport numpy as np\nnp.import_array()\n\nfrom libcpp.pair cimport pair\nfrom libcpp.vector cimport vector\n\ncdef extern from \"/home/yohann/Desktop/stage/recnn/notebooks/fj.cc\":\n void fj(vector[double]& a, \n vector[vector[int]]& trees, \n vector[vector[double]]& contents, \n vector[double]& masses, \n vector[double]& pts, \n double R, int jet_algorithm)\n \ncpdef cluster(np.ndarray[np.double_t, ndim=2, mode=\"c\"] a, \n R=1.0, jet_algorithm=0):\n cdef vector[double] v\n cdef vector[vector[int]] trees\n cdef vector[vector[double]] contents\n cdef vector[double] masses\n cdef vector[double] pts \n for value in a.ravel():\n v.push_back(value)\n \n fj(v, trees, contents, masses, pts, R=R, jet_algorithm=jet_algorithm)\n jets = []\n \n for tree, content, mass, pt in zip(trees, contents, masses, pts):\n tree = np.array(tree).reshape(-1, 2)\n content = np.array(content).reshape(-1, 4)\n jets.append((tree, content, mass, pt))\n \n return jets",
"cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\nIn file included from /opt/deeplearning/anaconda/lib/python2.7/site-packages/numpy/core/include/numpy/ndarraytypes.h:1816:0,\n from /opt/deeplearning/anaconda/lib/python2.7/site-packages/numpy/core/include/numpy/ndarrayobject.h:18,\n from /opt/deeplearning/anaconda/lib/python2.7/site-packages/numpy/core/include/numpy/arrayobject.h:4,\n from /home/yohann/.cache/ipython/cython/_cython_magic_40b19f283acd648836ea3ae4f6e83c85.cpp:535:\n/opt/deeplearning/anaconda/lib/python2.7/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h:15:2: warning: #warning \"Using deprecated NumPy API, disable it by \" \"#defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION\" [-Wcpp]\n #warning \"Using deprecated NumPy API, disable it by \" \\\n ^\n"
],
[
"def cast(event, soft=0):\n \"\"\"\n Converts an envent into a list of p4, usable by fastjet\n \"\"\"\n a = np.zeros((len(event)+soft, 4))\n for i, p in enumerate(event):\n a[i, 3] = p[0]\n a[i, 0] = p[1]\n a[i, 1] = p[2]\n a[i, 2] = p[3]\n \n ### Robustness check : sprinkling soft particles ###\n for i in range(len(event), len(event)+soft):\n v = LorentzVector()\n v.set_pt_eta_phi_m(10e-5, np.random.rand() * 10 - 5, np.random.rand() * 2 * np.pi, 0.0)\n a[i, 0] = v.px\n a[i, 1] = v.py\n a[i, 2] = v.pz\n a[i, 3] = v.e\n \n return(a)\n",
"_____no_output_____"
],
[
"def ff(e):\n \"\"\"\n create the Jet dictionary stucture from fastjet\n \"\"\"\n ye=e[-1]\n e=e[0]\n t=cast(e, soft=0)\n tree, content, mass, pt = cluster(t, jet_algorithm=1)[0] # dump highest pt jet only\n jet = {}\n \n jet[\"root_id\"] = 0\n jet[\"tree\"] = tree # tree structure, tree[i] constains [left son, right son] of subjet i\n jet[\"content\"] = content # list of every p4 of every subjet used to create the full jet\n jet[\"mass\"] = mass\n jet[\"pt\"] = pt\n jet[\"energy\"] = content[0, 3]\n\n px = content[0, 0]\n py = content[0, 1]\n pz = content[0, 2]\n p = (content[0, 0:3] ** 2).sum() ** 0.5\n eta = 0.5 * (np.log(p + pz) - np.log(p - pz))\n phi = np.arctan2(py, px)\n \n jet[\"eta\"] = eta\n jet[\"phi\"] = phi\n jet[\"genpt\"] = ye\n \n return(jet)",
"_____no_output_____"
],
[
"### Loading and \"jetting\" data with ff ###\n\nsignallist = ['/BackgroundJEC.npy']\n\n\nsignal = []\n\nfor path_file in signallist:\n events = np.array(np.load(basepath+path_file))\n signal = signal + multithreadmap(ff, events)",
"#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n"
],
[
"signal[0]",
"_____no_output_____"
]
],
[
[
"# Notebook n°2",
"_____no_output_____"
],
[
"## W vs QCD",
"_____no_output_____"
]
],
[
[
"### creating files to be preprocessed ###\ndef extractgenpt(e):\n return(e[\"genpt\"])\n\nprint(len(signal))\n\nX = np.array(signal)\ny = np.array(multithreadmap(extractgenpt,X))",
"535216\n"
]
],
[
[
"# Notebook n°3",
"_____no_output_____"
],
[
"### preprocessing function",
"_____no_output_____"
]
],
[
[
"%%cython -f -+ -I/usr/local/include --link-args=-Wl,-rpath,/usr/local/lib -lm -L/usr/local/lib -lfastjettools -lfastjet -lfastjetplugins -lsiscone_spherical -lsiscone\nimport numpy as np\ncimport numpy as np\nnp.import_array()\n\nfrom libcpp.pair cimport pair\nfrom libcpp.vector cimport vector\n\ncdef extern from \"/home/yohann/Desktop/stage/recnn/notebooks/fj.cc\":\n void fj(vector[double]& a, \n vector[vector[int]]& trees, \n vector[vector[double]]& contents, \n vector[double]& masses, \n vector[double]& pts, \n double R, int jet_algorithm)\n \ncpdef cluster(np.ndarray[np.double_t, ndim=2, mode=\"c\"] a, \n R=0.3, jet_algorithm=0):\n cdef vector[double] v\n cdef vector[vector[int]] trees\n cdef vector[vector[double]] contents\n cdef vector[double] masses\n cdef vector[double] pts \n for value in a.ravel():\n v.push_back(value)\n \n fj(v, trees, contents, masses, pts, R=R, jet_algorithm=jet_algorithm)\n jets = []\n \n for tree, content, mass, pt in zip(trees, contents, masses, pts):\n tree = np.array(tree).reshape(-1, 2)\n content = np.array(content).reshape(-1, 4)\n jets.append((tree, content, mass, pt))\n \n return jets",
"cc1plus: warning: command line option ‘-Wstrict-prototypes’ is valid for C/ObjC but not for C++\nIn file included from /opt/deeplearning/anaconda/lib/python2.7/site-packages/numpy/core/include/numpy/ndarraytypes.h:1816:0,\n from /opt/deeplearning/anaconda/lib/python2.7/site-packages/numpy/core/include/numpy/ndarrayobject.h:18,\n from /opt/deeplearning/anaconda/lib/python2.7/site-packages/numpy/core/include/numpy/arrayobject.h:4,\n from /home/yohann/.cache/ipython/cython/_cython_magic_2dc99a72b660134dd64116f5b0daad65.cpp:535:\n/opt/deeplearning/anaconda/lib/python2.7/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h:15:2: warning: #warning \"Using deprecated NumPy API, disable it by \" \"#defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION\" [-Wcpp]\n #warning \"Using deprecated NumPy API, disable it by \" \\\n ^\n"
],
[
"def preprocess(jet, output=\"kt\", colinear_splits=0, trimming=0.0):\n \"\"\"\n preprocesses the data to make it usable by the recnn\n Preprocessing algorithm:\n 1. j = the highest pt anti-kt jet (R=1)\n 2. run kt (R=0.3) on the constituents c of j, resulting in subjets sj1, sj2, ..., sjN\n 3. phi = sj1.phi(); for all c, do c.rotate_z(-phi)\n 4. bv = sj1.boost_vector(); bv.set_perp(0); for all c, do c.boost(-bv)\n 5. deltaz = sj1.pz - sj2.pz; deltay = sj1.py - sj2.py; alpha = -atan2(deltaz, deltay); for all c, do c.rotate_x(alpha)\n 6. if sj3.pz < 0: for all c, do c.set_pz(-c.pz)\n 7. finally recluster all transformed constituents c into a single jet\n \"\"\"\n jet = copy.deepcopy(jet)\n constituents = jet[\"content\"][jet[\"tree\"][:, 0] == -1]\n genpt=jet[\"genpt\"]\n ### Robustness check : Colinear splits ###\n for i in range(colinear_splits):\n j = np.argmax([_pt(c) for c in constituents])\n v = LorentzVector(constituents[j])\n eps = np.random.rand()\n \n p1 = LorentzVector()\n p2 = LorentzVector()\n p1.set_pt_eta_phi_m(v.pt() * eps, v.eta(), v.phi(), v.m() * eps ** 0.5)\n p2.set_pt_eta_phi_m(v.pt() * (1. - eps), v.eta(), v.phi(), 0.0)\n\n constituents[j][0] = p1.px\n constituents[j][1] = p1.py\n constituents[j][2] = p1.pz\n constituents[j][3] = p1.e\n \n constituents = np.vstack([constituents, \n np.array([[p2.px, p2.py, p2.pz, p2.e]])])\n\n ### run kt (R=0.3) on the constituents c of j, resulting in subjets sj1, sj2, ..., sjN ###\n subjets = cluster(constituents, R=0.3, jet_algorithm=0)\n \n ### trimming ###\n if trimming > 0.0:\n subjets = [(tree, content, mass, pt) for tree, content, mass, pt in subjets if pt > trimming * jet[\"pt\"]]\n else:\n subjets = [(tree, content, mass, pt) for tree, content, mass, pt in subjets]\n\n ### Rot phi ###\n # phi = sj1.phi()\n # for all c, do c.rotate_z(-phi)\n v = subjets[0][1][0]\n v = LorentzVector(v)\n\n phi = v.phi()\n \n for _, content, _, _ in subjets:\n for i in range(len(content)):\n v = LorentzVector(content[i])\n v.rotate_z(-phi)\n content[i, 0] = v[0]\n content[i, 1] = v[1]\n content[i, 2] = v[2]\n content[i, 3] = v[3]\n\n ### boost ###\n # bv = sj1.boost_vector()\n # bv.set_perp(0)\n # for all c, do c.boost(-bv)\n v = subjets[0][1][0]\n v = LorentzVector(v)\n bv = v.boost_vector()\n bv.set_perp(0)\n for _, content, _, _ in subjets:\n for i in range(len(content)):\n v = LorentzVector(content[i])\n v.boost(-bv)\n content[i, 0] = v[0]\n content[i, 1] = v[1]\n content[i, 2] = v[2]\n content[i, 3] = v[3]\n \n ### Rot alpha ###\n # deltaz = sj1.pz - sj2.pz\n # deltay = sj1.py - sj2.py\n # alpha = -atan2(deltaz, deltay)\n # for all c, do c.rotate_x(alpha)\n if len(subjets) >= 2:\n deltaz = subjets[0][1][0, 2] - subjets[1][1][0, 2]\n deltay = subjets[0][1][0, 1] - subjets[1][1][0, 1]\n alpha = -np.arctan2(deltaz, deltay)\n for _, content, _, _ in subjets:\n for i in range(len(content)):\n v = LorentzVector(content[i])\n v.rotate_x(alpha)\n content[i, 0] = v[0]\n content[i, 1] = v[1]\n content[i, 2] = v[2]\n content[i, 3] = v[3]\n \n ### flip if necessary ###\n # if sj3.pz < 0: for all c, do c.set_pz(-c.pz)\n if len(subjets) >= 3 and subjets[2][1][0, 2] < 0:\n for _, content, _, _ in subjets:\n for i in range(len(content)):\n content[i, 2] *= -1.0\n \n ### finally recluster all transformed constituents c into a single jet ###\n constituents = []\n \n for tree, content, _, _ in subjets:\n constituents.append(content[tree[:, 0] == -1])\n \n constituents = np.vstack(constituents)\n\n if output == \"anti-kt\":\n subjets = cluster(constituents, R=100., jet_algorithm=1)\n elif output == \"kt\":\n subjets = cluster(constituents, R=100., jet_algorithm=0)\n elif output == \"cambridge\":\n subjets = cluster(constituents, R=100., jet_algorithm=2)\n else:\n raise\n \n jet[\"tree\"] = subjets[0][0]\n jet[\"content\"] = subjets[0][1]\n v = LorentzVector(jet[\"content\"][0])\n jet[\"phi\"] = v.phi()\n jet[\"eta\"] = v.eta()\n jet[\"energy\"] = v.E()\n jet[\"mass\"] = v.m()\n jet[\"pt\"] = v.pt()\n jet[\"root_id\"] = 0\n jet[\"genpt\"] = genpt\n return(jet)",
"_____no_output_____"
]
],
[
[
"### Convert data",
"_____no_output_____"
]
],
[
[
"f = basepath+'/npyfilesregression/subjet_oriented_'\n\n### eliminate single particles ###\ni=0\nwhile i < (len(y)):\n if X[i]['tree'].shape == (1, 2):\n X,y=np.delete(X,i),np.delete(y,i)\n else :\n i+=1",
"_____no_output_____"
],
[
"### Save all versions of the dataset ###\n\n### anti-kt ###\n\n#random permutation\nflush = np.random.permutation(len(X))\nX_,y_ = np.copy(X[flush]),np.copy(y[flush])\n\n#preprocess\nX_ = multithreadmap(preprocess,X_,output='anti-kt')\n\n#separate training and testing data\n\n\n#saving\nnp.save(f+\"anti-kt_train.npy\",np.array([X_, y_]))\n\n\n\n",
"#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n"
],
[
"### kt ###\nflush = np.random.permutation(len(X))\nX_,y_ = np.copy(X[flush]),np.copy(y[flush])\n\nX_ = multithreadmap(preprocess,X_,output='kt')\n\nnp.save(f+\"kt_train.npy\", np.array([X_, y_]))",
"#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n"
],
[
"### cambridge ###\nflush = np.random.permutation(len(X))\nX_,y_ = np.copy(X[flush]),np.copy(y[flush])\n\nX_ = multithreadmap(preprocess,X_,output='cambridge')\n\nnp.save(f+\"cambridge_train.npy\", np.array([X_, y_]))",
"#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n"
],
[
"### random tree ###\nflush = np.random.permutation(len(X))\nX_,y_ = np.copy(X[flush]),np.copy(y[flush])\n\nX_=multithreadmap(randomize,multithreadmap(preprocess,X_,output=\"anti-kt\"))\n\nnp.save(f+\"random_train.npy\", np.array([X_, y_]))",
"#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n"
],
[
"### seq by pt ###\nflush = np.random.permutation(len(X))\nX_,y_ = np.copy(X[flush]),np.copy(y[flush])\n\nX_=multithreadmap(sequentialize_by_pt,multithreadmap(preprocess,X_,output=\"anti-kt\"),reverse=False)\n\nnp.save(f+\"seqpt_train.npy\", np.array([X_, y_]))",
"#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n"
],
[
"### seq by pt reversed ###\nflush = np.random.permutation(len(X))\nX_,y_ = np.copy(X[flush]),np.copy(y[flush])\n\nX_=multithreadmap(sequentialize_by_pt,multithreadmap(preprocess,X_,output=\"anti-kt\"),reverse=True)\n\nnp.save(f+\"seqpt_reversed_train.npy\", np.array([X_, y_]))",
"#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n#--------------------------------------------------------------------------\n# FastJet release 3.0.6\n# M. Cacciari, G.P. Salam and G. Soyez \n# A software package for jet finding and analysis at colliders \n# http://fastjet.fr \n#\t \n# Please cite EPJC72(2012)1896 [arXiv:1111.6097] if you use this package\n# for scientific work and optionally PLB641(2006)57 [hep-ph/0512210]. \n# \n# FastJet is provided without warranty under the terms of the GNU GPLv2.\n# It uses T. Chan's closest pair algorithm, S. Fortune's Voronoi code\n# and 3rd party plugin jet algorithms. See COPYING file for details.\n#--------------------------------------------------------------------------\n"
]
],
[
[
"# Verification of the formating",
"_____no_output_____"
]
],
[
[
"### Load data to check ###\nfd = f+\"anti-kt_test.npy\"\nX, y = np.load(fd)\n\n### import plt and set options ###\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nplt.rcParams[\"figure.figsize\"] = (7,6)",
"_____no_output_____"
],
[
"### Check for signal ###\n\na1 = []\nw1=[]\nfor i,j in enumerate(X):\n constituents = j[\"content\"][j[\"tree\"][:, 0] == -1]\n# if len(constituents)>1:\n# constituents = np.delete(constituents,0,0)\n if y[i]==1:\n a1.append(np.array([[LorentzVector(c).eta(), \n LorentzVector(c).phi()] for c in constituents]))\n w1.append([LorentzVector(c).pt() for c in constituents])\nw1 = [item for sublist in w1 for item in sublist]\n\nw1=100*np.array(w1)/sum(w1)\na1 = np.vstack(a1)",
"_____no_output_____"
],
[
"plt.close()\nt=plt.hist2d(a1[:, 0], a1[:, 1], range=[(-0.5,0.5), (-0.5,0.5)], \n bins=200, cmap=plt.cm.jet,weights=w1,norm=LogNorm())\ncbar = plt.colorbar()\nplt.xlabel(r'$\\eta$')\nplt.ylabel(r'$\\varphi$')\ncbar.set_label(r'% of p$_t$')\n#plt.savefig('tau_pfd_log_bis.png',dpi=600, transparent=True)\nplt.show()",
"_____no_output_____"
],
[
"### For background ###\n\na = []\nw=[]\nfor i,j in enumerate(X):\n constituents = j[\"content\"][j[\"tree\"][:, 0] == -1]\n# if len(constituents)>1:\n# constituents = np.delete(constituents,0,0)\n if y[i]==0:\n a.append(np.array([[LorentzVector(c).eta(), \n LorentzVector(c).phi()] for c in constituents]))\n w.append([LorentzVector(c).pt() for c in constituents])\nw = [item for sublist in w for item in sublist]\n\nw=100*np.array(w)/sum(w)\na = np.vstack(a)",
"_____no_output_____"
],
[
"plt.close()\nt=plt.hist2d(a[:, 0], a[:, 1], range=[(-0.5,0.5), (-0.5,0.5)], \n bins=200, cmap=plt.cm.jet, weights=w,norm=LogNorm())\ncbar = plt.colorbar()\nplt.xlabel(r'$\\eta$')\nplt.ylabel(r'$\\varphi$')\ncbar.set_label(r'% of p$_t$')\n#plt.savefig('non_tau_pfd_log_bis.png',dpi=600, transparent=True)\nplt.show()",
"_____no_output_____"
],
[
"### few taus plotting ###\n\na = []\nw=[]\n\nnjets = 10\ni0=2000\n\ni1=i0+njets\n\nfor i,j in enumerate(X[i0:i1]):\n constituents = j[\"content\"][j[\"tree\"][:, 0] == -1]\n if y[i+i0]==1:\n a.append(np.array([[LorentzVector(c).eta(), \n LorentzVector(c).phi()] for c in constituents]))\n w.append([LorentzVector(c).pt() for c in constituents])\n\n\nfor i in range(len(a)):\n plt.scatter(a[i][:,0],a[i][:,1],s=w[i]*100)\nplt.show()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a11ad73ac5cc81196108f4b46f95cd1d445944f
| 39,240 |
ipynb
|
Jupyter Notebook
|
Project_Plagiarism_Detection/1_Data_Exploration.ipynb
|
Inventrohyder/ML_SageMaker_Studies
|
db31d116d80e35107d18cb87504c9f6bcb1f430c
|
[
"MIT"
] | null | null | null |
Project_Plagiarism_Detection/1_Data_Exploration.ipynb
|
Inventrohyder/ML_SageMaker_Studies
|
db31d116d80e35107d18cb87504c9f6bcb1f430c
|
[
"MIT"
] | null | null | null |
Project_Plagiarism_Detection/1_Data_Exploration.ipynb
|
Inventrohyder/ML_SageMaker_Studies
|
db31d116d80e35107d18cb87504c9f6bcb1f430c
|
[
"MIT"
] | null | null | null | 40.621118 | 4,384 | 0.538812 |
[
[
[
"# Plagiarism Text Data\n\nIn this project, you will be tasked with building a plagiarism detector that examines a text file and performs binary classification; labeling that file as either plagiarized or not, depending on how similar the text file is when compared to a provided source text. \n\nThe first step in working with any dataset is loading the data in and noting what information is included in the dataset. This is an important step in eventually working with this data, and knowing what kinds of features you have to work with as you transform and group the data!\n\nSo, this notebook is all about exploring the data and noting patterns about the features you are given and the distribution of data. \n\n> There are not any exercises or questions in this notebook, it is only meant for exploration. This notebook will note be required in your final project submission.\n\n---",
"_____no_output_____"
],
[
"## Read in the Data\n\nThe cell below will download the necessary data and extract the files into the folder `data/`.\n\nThis data is a slightly modified version of a dataset created by Paul Clough (Information Studies) and Mark Stevenson (Computer Science), at the University of Sheffield. You can read all about the data collection and corpus, at [their university webpage](https://ir.shef.ac.uk/cloughie/resources/plagiarism_corpus.html). \n\n> **Citation for data**: Clough, P. and Stevenson, M. Developing A Corpus of Plagiarised Short Answers, Language Resources and Evaluation: Special Issue on Plagiarism and Authorship Analysis, In Press. [Download]",
"_____no_output_____"
]
],
[
[
"!wget https://s3.amazonaws.com/video.udacity-data.com/topher/2019/January/5c4147f9_data/data.zip\n!unzip data",
"--2020-03-16 15:53:14-- https://s3.amazonaws.com/video.udacity-data.com/topher/2019/January/5c4147f9_data/data.zip\nResolving s3.amazonaws.com (s3.amazonaws.com)... 54.231.81.67\nConnecting to s3.amazonaws.com (s3.amazonaws.com)|54.231.81.67|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 113826 (111K) [application/zip]\nSaving to: ‘data.zip’\n\ndata.zip 100%[===================>] 111.16K --.-KB/s in 0.003s \n\n2020-03-16 15:53:14 (32.8 MB/s) - ‘data.zip’ saved [113826/113826]\n\nArchive: data.zip\n creating: data/\n inflating: data/.DS_Store \n creating: __MACOSX/\n creating: __MACOSX/data/\n inflating: __MACOSX/data/._.DS_Store \n inflating: data/file_information.csv \n inflating: __MACOSX/data/._file_information.csv \n inflating: data/g0pA_taska.txt \n inflating: __MACOSX/data/._g0pA_taska.txt \n inflating: data/g0pA_taskb.txt \n inflating: __MACOSX/data/._g0pA_taskb.txt \n inflating: data/g0pA_taskc.txt \n inflating: __MACOSX/data/._g0pA_taskc.txt \n inflating: data/g0pA_taskd.txt \n inflating: __MACOSX/data/._g0pA_taskd.txt \n inflating: data/g0pA_taske.txt \n inflating: __MACOSX/data/._g0pA_taske.txt \n inflating: data/g0pB_taska.txt \n inflating: __MACOSX/data/._g0pB_taska.txt \n inflating: data/g0pB_taskb.txt \n inflating: __MACOSX/data/._g0pB_taskb.txt \n inflating: data/g0pB_taskc.txt \n inflating: __MACOSX/data/._g0pB_taskc.txt \n inflating: data/g0pB_taskd.txt \n inflating: __MACOSX/data/._g0pB_taskd.txt \n inflating: data/g0pB_taske.txt \n inflating: __MACOSX/data/._g0pB_taske.txt \n inflating: data/g0pC_taska.txt \n inflating: __MACOSX/data/._g0pC_taska.txt \n inflating: data/g0pC_taskb.txt \n inflating: __MACOSX/data/._g0pC_taskb.txt \n inflating: data/g0pC_taskc.txt \n inflating: __MACOSX/data/._g0pC_taskc.txt \n inflating: data/g0pC_taskd.txt \n inflating: __MACOSX/data/._g0pC_taskd.txt \n inflating: data/g0pC_taske.txt \n inflating: __MACOSX/data/._g0pC_taske.txt \n inflating: data/g0pD_taska.txt \n inflating: __MACOSX/data/._g0pD_taska.txt \n inflating: data/g0pD_taskb.txt \n inflating: __MACOSX/data/._g0pD_taskb.txt \n inflating: data/g0pD_taskc.txt \n inflating: __MACOSX/data/._g0pD_taskc.txt \n inflating: data/g0pD_taskd.txt \n inflating: __MACOSX/data/._g0pD_taskd.txt \n inflating: data/g0pD_taske.txt \n inflating: __MACOSX/data/._g0pD_taske.txt \n inflating: data/g0pE_taska.txt \n inflating: __MACOSX/data/._g0pE_taska.txt \n inflating: data/g0pE_taskb.txt \n inflating: __MACOSX/data/._g0pE_taskb.txt \n inflating: data/g0pE_taskc.txt \n inflating: __MACOSX/data/._g0pE_taskc.txt \n inflating: data/g0pE_taskd.txt \n inflating: __MACOSX/data/._g0pE_taskd.txt \n inflating: data/g0pE_taske.txt \n inflating: __MACOSX/data/._g0pE_taske.txt \n inflating: data/g1pA_taska.txt \n inflating: __MACOSX/data/._g1pA_taska.txt \n inflating: data/g1pA_taskb.txt \n inflating: __MACOSX/data/._g1pA_taskb.txt \n inflating: data/g1pA_taskc.txt \n inflating: __MACOSX/data/._g1pA_taskc.txt \n inflating: data/g1pA_taskd.txt \n inflating: __MACOSX/data/._g1pA_taskd.txt \n inflating: data/g1pA_taske.txt \n inflating: __MACOSX/data/._g1pA_taske.txt \n inflating: data/g1pB_taska.txt \n inflating: __MACOSX/data/._g1pB_taska.txt \n inflating: data/g1pB_taskb.txt \n inflating: __MACOSX/data/._g1pB_taskb.txt \n inflating: data/g1pB_taskc.txt \n inflating: __MACOSX/data/._g1pB_taskc.txt \n inflating: data/g1pB_taskd.txt \n inflating: __MACOSX/data/._g1pB_taskd.txt \n inflating: data/g1pB_taske.txt \n inflating: __MACOSX/data/._g1pB_taske.txt \n inflating: data/g1pD_taska.txt \n inflating: __MACOSX/data/._g1pD_taska.txt \n inflating: data/g1pD_taskb.txt \n inflating: __MACOSX/data/._g1pD_taskb.txt \n inflating: data/g1pD_taskc.txt \n inflating: __MACOSX/data/._g1pD_taskc.txt \n inflating: data/g1pD_taskd.txt \n inflating: __MACOSX/data/._g1pD_taskd.txt \n inflating: data/g1pD_taske.txt \n inflating: __MACOSX/data/._g1pD_taske.txt \n inflating: data/g2pA_taska.txt \n inflating: __MACOSX/data/._g2pA_taska.txt \n inflating: data/g2pA_taskb.txt \n inflating: __MACOSX/data/._g2pA_taskb.txt \n inflating: data/g2pA_taskc.txt \n inflating: __MACOSX/data/._g2pA_taskc.txt \n inflating: data/g2pA_taskd.txt \n inflating: __MACOSX/data/._g2pA_taskd.txt \n inflating: data/g2pA_taske.txt \n inflating: __MACOSX/data/._g2pA_taske.txt \n inflating: data/g2pB_taska.txt \n inflating: __MACOSX/data/._g2pB_taska.txt \n inflating: data/g2pB_taskb.txt \n inflating: __MACOSX/data/._g2pB_taskb.txt \n inflating: data/g2pB_taskc.txt \n inflating: __MACOSX/data/._g2pB_taskc.txt \n inflating: data/g2pB_taskd.txt \n inflating: __MACOSX/data/._g2pB_taskd.txt \n inflating: data/g2pB_taske.txt \n inflating: __MACOSX/data/._g2pB_taske.txt \n inflating: data/g2pC_taska.txt \n inflating: __MACOSX/data/._g2pC_taska.txt \n inflating: data/g2pC_taskb.txt \n inflating: __MACOSX/data/._g2pC_taskb.txt \n inflating: data/g2pC_taskc.txt \n inflating: __MACOSX/data/._g2pC_taskc.txt \n inflating: data/g2pC_taskd.txt \n inflating: __MACOSX/data/._g2pC_taskd.txt \n inflating: data/g2pC_taske.txt \n inflating: __MACOSX/data/._g2pC_taske.txt \n inflating: data/g2pE_taska.txt \n inflating: __MACOSX/data/._g2pE_taska.txt \n inflating: data/g2pE_taskb.txt \n inflating: __MACOSX/data/._g2pE_taskb.txt \n inflating: data/g2pE_taskc.txt \n inflating: __MACOSX/data/._g2pE_taskc.txt \n inflating: data/g2pE_taskd.txt \n inflating: __MACOSX/data/._g2pE_taskd.txt \n inflating: data/g2pE_taske.txt \n inflating: __MACOSX/data/._g2pE_taske.txt \n inflating: data/g3pA_taska.txt \n inflating: __MACOSX/data/._g3pA_taska.txt \n inflating: data/g3pA_taskb.txt \n inflating: __MACOSX/data/._g3pA_taskb.txt \n inflating: data/g3pA_taskc.txt \n inflating: __MACOSX/data/._g3pA_taskc.txt \n inflating: data/g3pA_taskd.txt \n inflating: __MACOSX/data/._g3pA_taskd.txt \n inflating: data/g3pA_taske.txt \n inflating: __MACOSX/data/._g3pA_taske.txt \n inflating: data/g3pB_taska.txt \n inflating: __MACOSX/data/._g3pB_taska.txt \n inflating: data/g3pB_taskb.txt \n inflating: __MACOSX/data/._g3pB_taskb.txt \n inflating: data/g3pB_taskc.txt \n inflating: __MACOSX/data/._g3pB_taskc.txt \n inflating: data/g3pB_taskd.txt \n inflating: __MACOSX/data/._g3pB_taskd.txt \n inflating: data/g3pB_taske.txt \n inflating: __MACOSX/data/._g3pB_taske.txt \n inflating: data/g3pC_taska.txt \n inflating: __MACOSX/data/._g3pC_taska.txt \n inflating: data/g3pC_taskb.txt \n inflating: __MACOSX/data/._g3pC_taskb.txt \n inflating: data/g3pC_taskc.txt \n inflating: __MACOSX/data/._g3pC_taskc.txt \n inflating: data/g3pC_taskd.txt \n inflating: __MACOSX/data/._g3pC_taskd.txt \n inflating: data/g3pC_taske.txt \n inflating: __MACOSX/data/._g3pC_taske.txt \n inflating: data/g4pB_taska.txt \n inflating: __MACOSX/data/._g4pB_taska.txt \n inflating: data/g4pB_taskb.txt \n inflating: __MACOSX/data/._g4pB_taskb.txt \n inflating: data/g4pB_taskc.txt \n inflating: __MACOSX/data/._g4pB_taskc.txt \n inflating: data/g4pB_taskd.txt \n inflating: __MACOSX/data/._g4pB_taskd.txt \n inflating: data/g4pB_taske.txt \n inflating: __MACOSX/data/._g4pB_taske.txt \n inflating: data/g4pC_taska.txt \n inflating: __MACOSX/data/._g4pC_taska.txt \n inflating: data/g4pC_taskb.txt \n inflating: __MACOSX/data/._g4pC_taskb.txt \n inflating: data/g4pC_taskc.txt \n inflating: __MACOSX/data/._g4pC_taskc.txt \n inflating: data/g4pC_taskd.txt \n inflating: __MACOSX/data/._g4pC_taskd.txt \n inflating: data/g4pC_taske.txt \n inflating: __MACOSX/data/._g4pC_taske.txt \n inflating: data/g4pD_taska.txt \n inflating: __MACOSX/data/._g4pD_taska.txt \n inflating: data/g4pD_taskb.txt \n inflating: __MACOSX/data/._g4pD_taskb.txt \n inflating: data/g4pD_taskc.txt \n inflating: __MACOSX/data/._g4pD_taskc.txt \n inflating: data/g4pD_taskd.txt \n inflating: __MACOSX/data/._g4pD_taskd.txt \n inflating: data/g4pD_taske.txt \n inflating: __MACOSX/data/._g4pD_taske.txt \n inflating: data/g4pE_taska.txt \n inflating: __MACOSX/data/._g4pE_taska.txt \n inflating: data/g4pE_taskb.txt \n inflating: __MACOSX/data/._g4pE_taskb.txt \n inflating: data/g4pE_taskc.txt \n inflating: __MACOSX/data/._g4pE_taskc.txt \n inflating: data/g4pE_taskd.txt \n inflating: __MACOSX/data/._g4pE_taskd.txt \n inflating: data/g4pE_taske.txt \n inflating: __MACOSX/data/._g4pE_taske.txt \n inflating: data/orig_taska.txt \n inflating: __MACOSX/data/._orig_taska.txt \n inflating: data/orig_taskb.txt \n inflating: data/orig_taskc.txt \n inflating: __MACOSX/data/._orig_taskc.txt \n inflating: data/orig_taskd.txt \n inflating: __MACOSX/data/._orig_taskd.txt \n inflating: data/orig_taske.txt \n inflating: __MACOSX/data/._orig_taske.txt \n inflating: data/test_info.csv \n inflating: __MACOSX/data/._test_info.csv \n inflating: __MACOSX/._data \n"
],
[
"# import libraries\nimport pandas as pd\nimport numpy as np\nimport os",
"_____no_output_____"
]
],
[
[
"This plagiarism dataset is made of multiple text files; each of these files has characteristics that are is summarized in a `.csv` file named `file_information.csv`, which we can read in using `pandas`.",
"_____no_output_____"
]
],
[
[
"csv_file = 'data/file_information.csv'\nplagiarism_df = pd.read_csv(csv_file)\n\n# print out the first few rows of data info\nplagiarism_df.head(10)",
"_____no_output_____"
]
],
[
[
"## Types of Plagiarism\n\nEach text file is associated with one **Task** (task A-E) and one **Category** of plagiarism, which you can see in the above DataFrame.\n\n### Five task types, A-E\n\nEach text file contains an answer to one short question; these questions are labeled as tasks A-E.\n* Each task, A-E, is about a topic that might be included in the Computer Science curriculum that was created by the authors of this dataset. \n * For example, Task A asks the question: \"What is inheritance in object oriented programming?\"\n\n### Four categories of plagiarism \n\nEach text file has an associated plagiarism label/category:\n\n1. `cut`: An answer is plagiarized; it is copy-pasted directly from the relevant Wikipedia source text.\n2. `light`: An answer is plagiarized; it is based on the Wikipedia source text and includes some copying and paraphrasing.\n3. `heavy`: An answer is plagiarized; it is based on the Wikipedia source text but expressed using different words and structure. Since this doesn't copy directly from a source text, this will likely be the most challenging kind of plagiarism to detect.\n4. `non`: An answer is not plagiarized; the Wikipedia source text is not used to create this answer.\n5. `orig`: This is a specific category for the original, Wikipedia source text. We will use these files only for comparison purposes.\n\n> So, out of the submitted files, the only category that does not contain any plagiarism is `non`.\n\nIn the next cell, print out some statistics about the data.",
"_____no_output_____"
]
],
[
[
"# print out some stats about the data\nprint('Number of files: ', plagiarism_df.shape[0]) # .shape[0] gives the rows \n# .unique() gives unique items in a specified column\nprint('Number of unique tasks/question types (A-E): ', (len(plagiarism_df['Task'].unique())))\nprint('Unique plagiarism categories: ', (plagiarism_df['Category'].unique()))",
"Number of files: 100\nNumber of unique tasks/question types (A-E): 5\nUnique plagiarism categories: ['non' 'cut' 'light' 'heavy' 'orig']\n"
]
],
[
[
"You should see the number of text files in the dataset as well as some characteristics about the `Task` and `Category` columns. **Note that the file count of 100 *includes* the 5 _original_ wikipedia files for tasks A-E.** If you take a look at the files in the `data` directory, you'll notice that the original, source texts start with the filename `orig_` as opposed to `g` for \"group.\" \n\n> So, in total there are 100 files, 95 of which are answers (submitted by people) and 5 of which are the original, Wikipedia source texts.\n\nYour end goal will be to use this information to classify any given answer text into one of two categories, plagiarized or not-plagiarized.",
"_____no_output_____"
],
[
"### Distribution of Data\n\nNext, let's look at the distribution of data. In this course, we've talked about traits like class imbalance that can inform how you develop an algorithm. So, here, we'll ask: **How evenly is our data distributed among different tasks and plagiarism levels?**\n\nBelow, you should notice two things:\n* Our dataset is quite small, especially with respect to examples of varying plagiarism levels.\n* The data is distributed fairly evenly across task and plagiarism types.",
"_____no_output_____"
]
],
[
[
"# Show counts by different tasks and amounts of plagiarism\n\n# group and count by task\ncounts_per_task=plagiarism_df.groupby(['Task']).size().reset_index(name=\"Counts\")\nprint(\"\\nTask:\")\ndisplay(counts_per_task)\n\n# group by plagiarism level\ncounts_per_category=plagiarism_df.groupby(['Category']).size().reset_index(name=\"Counts\")\nprint(\"\\nPlagiarism Levels:\")\ndisplay(counts_per_category)\n\n# group by task AND plagiarism level\ncounts_task_and_plagiarism=plagiarism_df.groupby(['Task', 'Category']).size().reset_index(name=\"Counts\")\nprint(\"\\nTask & Plagiarism Level Combos :\")\ndisplay(counts_task_and_plagiarism)",
"\nTask:\n"
]
],
[
[
"It may also be helpful to look at this last DataFrame, graphically.\n\nBelow, you can see that the counts follow a pattern broken down by task. Each task has one source text (original) and the highest number on `non` plagiarized cases.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n% matplotlib inline\n\n# counts\ngroup = ['Task', 'Category']\ncounts = plagiarism_df.groupby(group).size().reset_index(name=\"Counts\")\n\nplt.figure(figsize=(8,5))\nplt.bar(range(len(counts)), counts['Counts'], color = 'blue')",
"_____no_output_____"
]
],
[
[
"## Up Next\n\nThis notebook is just about data loading and exploration, and you do not need to include it in your final project submission. \n\nIn the next few notebooks, you'll use this data to train a complete plagiarism classifier. You'll be tasked with extracting meaningful features from the text data, reading in answers to different tasks and comparing them to the original Wikipedia source text. You'll engineer similarity features that will help identify cases of plagiarism. Then, you'll use these features to train and deploy a classification model in a SageMaker notebook instance. ",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a11afd6f24ba2573965da6792814cd9613fed68
| 190,026 |
ipynb
|
Jupyter Notebook
|
image-classification/dlnd_image_classification.ipynb
|
wanesta/udacity
|
d6e62bb80ecda283ec5f3b4e23814c763da9e907
|
[
"MIT"
] | null | null | null |
image-classification/dlnd_image_classification.ipynb
|
wanesta/udacity
|
d6e62bb80ecda283ec5f3b4e23814c763da9e907
|
[
"MIT"
] | null | null | null |
image-classification/dlnd_image_classification.ipynb
|
wanesta/udacity
|
d6e62bb80ecda283ec5f3b4e23814c763da9e907
|
[
"MIT"
] | null | null | null | 83.896689 | 62,054 | 0.733931 |
[
[
[
"# 图像分类\n\n在此项目中,你将对 [CIFAR-10 数据集](https://www.cs.toronto.edu/~kriz/cifar.html) 中的图片进行分类。该数据集包含飞机、猫狗和其他物体。你需要预处理这些图片,然后用所有样本训练一个卷积神经网络。图片需要标准化(normalized),标签需要采用 one-hot 编码。你需要应用所学的知识构建卷积的、最大池化(max pooling)、丢弃(dropout)和完全连接(fully connected)的层。最后,你需要在样本图片上看到神经网络的预测结果。\n\n\n## 获取数据\n\n请运行以下单元,以下载 [CIFAR-10 数据集(Python版)](https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz)。\n",
"_____no_output_____"
]
],
[
[
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nfrom urllib.request import urlretrieve\nfrom os.path import isfile, isdir\nfrom tqdm import tqdm\nimport problem_unittests as tests\nimport tarfile\n\ncifar10_dataset_folder_path = 'cifar-10-batches-py'\n\n# Use Floyd's cifar-10 dataset if present\nfloyd_cifar10_location = '/input/cifar-10/python.tar.gz'\nif isfile(floyd_cifar10_location):\n tar_gz_path = floyd_cifar10_location\nelse:\n tar_gz_path = 'cifar-10-python.tar.gz'\n\nclass DLProgress(tqdm):\n last_block = 0\n\n def hook(self, block_num=1, block_size=1, total_size=None):\n self.total = total_size\n self.update((block_num - self.last_block) * block_size)\n self.last_block = block_num\n\nif not isfile(tar_gz_path):\n with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:\n urlretrieve(\n 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',\n tar_gz_path,\n pbar.hook)\n\nif not isdir(cifar10_dataset_folder_path):\n with tarfile.open(tar_gz_path) as tar:\n tar.extractall()\n tar.close()\n\n\ntests.test_folder_path(cifar10_dataset_folder_path)",
"All files found!\n"
]
],
[
[
"## 探索数据\n\n该数据集分成了几部分/批次(batches),以免你的机器在计算时内存不足。CIFAR-10 数据集包含 5 个部分,名称分别为 `data_batch_1`、`data_batch_2`,以此类推。每个部分都包含以下某个类别的标签和图片:\n\n* 飞机\n* 汽车\n* 鸟类\n* 猫\n* 鹿\n* 狗\n* 青蛙\n* 马\n* 船只\n* 卡车\n\n了解数据集也是对数据进行预测的必经步骤。你可以通过更改 `batch_id` 和 `sample_id` 探索下面的代码单元。`batch_id` 是数据集一个部分的 ID(1 到 5)。`sample_id` 是该部分中图片和标签对(label pair)的 ID。\n\n问问你自己:“可能的标签有哪些?”、“图片数据的值范围是多少?”、“标签是按顺序排列,还是随机排列的?”。思考类似的问题,有助于你预处理数据,并使预测结果更准确。\n",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport helper\nimport numpy as np\n\n# Explore the dataset\nbatch_id = 1\nsample_id = 50\nhelper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)",
"\nStats of batch 1:\nSamples: 10000\nLabel Counts: {0: 1005, 1: 974, 2: 1032, 3: 1016, 4: 999, 5: 937, 6: 1030, 7: 1001, 8: 1025, 9: 981}\nFirst 20 Labels: [6, 9, 9, 4, 1, 1, 2, 7, 8, 3, 4, 7, 7, 2, 9, 9, 9, 3, 2, 6]\n\nExample of Image 50:\nImage - Min Value: 8 Max Value: 243\nImage - Shape: (32, 32, 3)\nLabel - Label Id: 9 Name: truck\n"
]
],
[
[
"## 实现预处理函数\n\n### 标准化\n\n在下面的单元中,实现 `normalize` 函数,传入图片数据 `x`,并返回标准化 Numpy 数组。值应该在 0 到 1 的范围内(含 0 和 1)。返回对象应该和 `x` 的形状一样。\n",
"_____no_output_____"
]
],
[
[
"def normalize(x):\n \"\"\"\n Normalize a list of sample image data in the range of 0 to 1\n : x: List of image data. The image shape is (32, 32, 3)\n : return: Numpy array of normalize data\n \"\"\"\n # TODO: Implement Function\n return (x / 255)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_normalize(normalize)",
"Tests Passed\n"
]
],
[
[
"### One-hot 编码\n\n和之前的代码单元一样,你将为预处理实现一个函数。这次,你将实现 `one_hot_encode` 函数。输入,也就是 `x`,是一个标签列表。实现该函数,以返回为 one_hot 编码的 Numpy 数组的标签列表。标签的可能值为 0 到 9。每次调用 `one_hot_encode` 时,对于每个值,one_hot 编码函数应该返回相同的编码。确保将编码映射保存到该函数外面。\n\n提示:不要重复发明轮子。\n",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom sklearn import preprocessing\n\ndef one_hot_encode(x):\n \"\"\"\n One hot encode a list of sample labels. Return a one-hot encoded vector for each label.\n : x: List of sample Labels\n : return: Numpy array of one-hot encoded labels\n \"\"\"\n # TODO: Implement Function\n return np.eye(10)[x]\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_one_hot_encode(one_hot_encode)",
"Tests Passed\n"
]
],
[
[
"### 随机化数据\n\n之前探索数据时,你已经了解到,样本的顺序是随机的。再随机化一次也不会有什么关系,但是对于这个数据集没有必要。\n",
"_____no_output_____"
],
[
"## 预处理所有数据并保存\n\n运行下方的代码单元,将预处理所有 CIFAR-10 数据,并保存到文件中。下面的代码还使用了 10% 的训练数据,用来验证。\n",
"_____no_output_____"
]
],
[
[
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# Preprocess Training, Validation, and Testing Data\nhelper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)",
"_____no_output_____"
]
],
[
[
"# 检查点\n\n这是你的第一个检查点。如果你什么时候决定再回到该记事本,或需要重新启动该记事本,你可以从这里开始。预处理的数据已保存到本地。\n",
"_____no_output_____"
]
],
[
[
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport pickle\nimport problem_unittests as tests\nimport helper\n\n# Load the Preprocessed Validation data\nvalid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))",
"_____no_output_____"
]
],
[
[
"## 构建网络\n\n对于该神经网络,你需要将每层都构建为一个函数。你看到的大部分代码都位于函数外面。要更全面地测试你的代码,我们需要你将每层放入一个函数中。这样使我们能够提供更好的反馈,并使用我们的统一测试检测简单的错误,然后再提交项目。\n\n>**注意**:如果你觉得每周很难抽出足够的时间学习这门课程,我们为此项目提供了一个小捷径。对于接下来的几个问题,你可以使用 [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) 或 [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) 程序包中的类来构建每个层级,但是“卷积和最大池化层级”部分的层级除外。TF Layers 和 Keras 及 TFLearn 层级类似,因此很容易学会。\n\n>但是,如果你想充分利用这门课程,请尝试自己解决所有问题,不使用 TF Layers 程序包中的任何类。你依然可以使用其他程序包中的类,这些类和你在 TF Layers 中的类名称是一样的!例如,你可以使用 TF Neural Network 版本的 `conv2d` 类 [tf.nn.conv2d](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d),而不是 TF Layers 版本的 `conv2d` 类 [tf.layers.conv2d](https://www.tensorflow.org/api_docs/python/tf/layers/conv2d)。\n\n我们开始吧!\n\n\n### 输入\n\n神经网络需要读取图片数据、one-hot 编码标签和丢弃保留概率(dropout keep probability)。请实现以下函数:\n\n* 实现 `neural_net_image_input`\n * 返回 [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder)\n * 使用 `image_shape` 设置形状,部分大小设为 `None`\n * 使用 [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) 中的 TensorFlow `name` 参数对 TensorFlow 占位符 \"x\" 命名\n* 实现 `neural_net_label_input`\n * 返回 [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder)\n * 使用 `n_classes` 设置形状,部分大小设为 `None`\n * 使用 [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) 中的 TensorFlow `name` 参数对 TensorFlow 占位符 \"y\" 命名\n* 实现 `neural_net_keep_prob_input`\n * 返回 [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder),用于丢弃保留概率\n * 使用 [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) 中的 TensorFlow `name` 参数对 TensorFlow 占位符 \"keep_prob\" 命名\n\n这些名称将在项目结束时,用于加载保存的模型。\n\n注意:TensorFlow 中的 `None` 表示形状可以是动态大小。",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\n\ndef neural_net_image_input(image_shape):\n \"\"\"\n Return a Tensor for a batch of image input\n : image_shape: Shape of the images\n : return: Tensor for image input.\n \"\"\"\n # TODO: Implement Function\n return tf.placeholder(tf.float32, shape = (None, *image_shape), name = \"x\")\n\n\ndef neural_net_label_input(n_classes):\n \"\"\"\n Return a Tensor for a batch of label input\n : n_classes: Number of classes\n : return: Tensor for label input.\n \"\"\"\n # TODO: Implement Function\n return tf.placeholder(tf.int8, shape = (None, n_classes), name = \"y\")\n\n\ndef neural_net_keep_prob_input():\n \"\"\"\n Return a Tensor for keep probability\n : return: Tensor for keep probability.\n \"\"\"\n # TODO: Implement Function\n return tf.placeholder(tf.float32, shape = None, name = \"keep_prob\")\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntf.reset_default_graph()\ntests.test_nn_image_inputs(neural_net_image_input)\ntests.test_nn_label_inputs(neural_net_label_input)\ntests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)",
"Image Input Tests Passed.\nLabel Input Tests Passed.\nKeep Prob Tests Passed.\n"
]
],
[
[
"### 卷积和最大池化层\n\n卷积层级适合处理图片。对于此代码单元,你应该实现函数 `conv2d_maxpool` 以便应用卷积然后进行最大池化:\n\n* 使用 `conv_ksize`、`conv_num_outputs` 和 `x_tensor` 的形状创建权重(weight)和偏置(bias)。\n* 使用权重和 `conv_strides` 对 `x_tensor` 应用卷积。\n * 建议使用我们建议的间距(padding),当然也可以使用任何其他间距。\n* 添加偏置\n* 向卷积中添加非线性激活(nonlinear activation)\n* 使用 `pool_ksize` 和 `pool_strides` 应用最大池化\n * 建议使用我们建议的间距(padding),当然也可以使用任何其他间距。\n\n**注意**:对于**此层**,**请勿使用** [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) 或 [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers),但是仍然可以使用 TensorFlow 的 [Neural Network](https://www.tensorflow.org/api_docs/python/tf/nn) 包。对于所有**其他层**,你依然可以使用快捷方法。\n",
"_____no_output_____"
]
],
[
[
"def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):\n \"\"\"\n Apply convolution then max pooling to x_tensor\n :param x_tensor: TensorFlow Tensor\n :param conv_num_outputs: Number of outputs for the convolutional layer\n :param conv_ksize: kernal size 2-D Tuple for the convolutional layer\n :param conv_strides: Stride 2-D Tuple for convolution\n :param pool_ksize: kernal size 2-D Tuple for pool\n :param pool_strides: Stride 2-D Tuple for pool\n : return: A tensor that represents convolution and max pooling of x_tensor\n \"\"\"\n # TODO: Implement Function\n input_chanel = int(x_tensor.shape[3])\n output_chanel = conv_num_outputs\n weight_shape = (*conv_ksize,input_chanel,output_chanel) # * \n weight = tf.Variable(tf.random_normal(weight_shape, stddev = 0.1)) #权重\n bias = tf.Variable(tf.zeros(output_chanel)) #设置偏置项\n l_active = tf.nn.conv2d(x_tensor, weight, (1, *conv_strides, 1), 'SAME')\n l_active = tf.nn.bias_add(l_active,bias)\n #active_layers = tf.nn.relu(tf.add(tf.matmul(features,label),bias)) #ReLu\n mx_layer = tf.nn.relu(l_active)\n \n return tf.nn.max_pool(mx_layer, (1, *pool_ksize, 1), (1, *pool_strides, 1), 'VALID')\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_con_pool(conv2d_maxpool)",
"Tests Passed\n"
]
],
[
[
"### 扁平化层\n\n实现 `flatten` 函数,将 `x_tensor` 的维度从四维张量(4-D tensor)变成二维张量。输出应该是形状(*部分大小(Batch Size)*,*扁平化图片大小(Flattened Image Size)*)。快捷方法:对于此层,你可以使用 [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) 或 [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) 包中的类。如果你想要更大挑战,可以仅使用其他 TensorFlow 程序包。\n",
"_____no_output_____"
]
],
[
[
"from functools import reduce\nfrom operator import mul\n\ndef flatten(x_tensor):\n \"\"\"\n Flatten x_tensor to (Batch Size, Flattened Image Size)\n : x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.\n : return: A tensor of size (Batch Size, Flattened Image Size).\n \"\"\"\n # TODO: Implement Function\n _, *image_size = x_tensor.get_shape().as_list()\n #print(*image_size)\n return tf.reshape(x_tensor, (-1, reduce(mul, image_size)))\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_flatten(flatten)",
"Tests Passed\n"
]
],
[
[
"### 完全连接的层\n\n实现 `fully_conn` 函数,以向 `x_tensor` 应用完全连接的层级,形状为(*部分大小(Batch Size)*,*num_outputs*)。快捷方法:对于此层,你可以使用 [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) 或 [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) 包中的类。如果你想要更大挑战,可以仅使用其他 TensorFlow 程序包。",
"_____no_output_____"
]
],
[
[
"def fully_conn(x_tensor, num_outputs):\n \"\"\"\n Apply a fully connected layer to x_tensor using weight and bias\n : x_tensor: A 2-D tensor where the first dimension is batch size.\n : num_outputs: The number of output that the new tensor should be.\n : return: A 2-D tensor where the second dimension is num_outputs.\n \"\"\"\n # TODO: Implement Function\n num_input = x_tensor.get_shape().as_list()[1]\n weight_shape = (num_input, num_outputs)\n #print(weight_shape)\n weight = tf.Variable(tf.truncated_normal(weight_shape, stddev = 0.1))\n \n bias = tf.Variable(tf.zeros(num_outputs))\n \n activation = tf.nn.bias_add(tf.matmul(x_tensor, weight), bias)\n return tf.nn.relu(activation)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_fully_conn(fully_conn)",
"Tests Passed\n"
]
],
[
[
"### 输出层\n\n实现 `output` 函数,向 x_tensor 应用完全连接的层级,形状为(*部分大小(Batch Size)*,*num_outputs*)。快捷方法:对于此层,你可以使用 [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) 或 [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) 包中的类。如果你想要更大挑战,可以仅使用其他 TensorFlow 程序包。\n\n**注意**:该层级不应应用 Activation、softmax 或交叉熵(cross entropy)。",
"_____no_output_____"
]
],
[
[
"def output(x_tensor, num_outputs):\n \"\"\"\n Apply a output layer to x_tensor using weight and bias\n : x_tensor: A 2-D tensor where the first dimension is batch size.\n : num_outputs: The number of output that the new tensor should be.\n : return: A 2-D tensor where the second dimension is num_outputs.\n \"\"\"\n # TODO: Implement Function\n num_input = x_tensor.get_shape().as_list()[1] #not 0\n weight_shape = (num_input, num_outputs) \n weight = tf.Variable(tf.truncated_normal(weight_shape, stddev = 0.1))\n \n bias = tf.Variable(tf.zeros(num_outputs))\n \n return tf.nn.bias_add(tf.matmul(x_tensor,weight),bias)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_output(output)",
"Tests Passed\n"
]
],
[
[
"### 创建卷积模型\n\n实现函数 `conv_net`, 创建卷积神经网络模型。该函数传入一批图片 `x`,并输出对数(logits)。使用你在上方创建的层创建此模型:\n\n* 应用 1、2 或 3 个卷积和最大池化层(Convolution and Max Pool layers)\n* 应用一个扁平层(Flatten Layer)\n* 应用 1、2 或 3 个完全连接层(Fully Connected Layers)\n* 应用一个输出层(Output Layer)\n* 返回输出\n* 使用 `keep_prob` 向模型中的一个或多个层应用 [TensorFlow 的 Dropout](https://www.tensorflow.org/api_docs/python/tf/nn/dropout)",
"_____no_output_____"
]
],
[
[
"def conv_net(x, keep_prob):\n \"\"\"\n Create a convolutional neural network model\n : x: Placeholder tensor that holds image data.\n : keep_prob: Placeholder tensor that hold dropout keep probability.\n : return: Tensor that represents logits\n \"\"\"\n # TODO: Apply 1, 2, or 3 Convolution and Max Pool layers\n # Play around with different number of outputs, kernel size and stride\n # Function Definition from Above:\n # conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)\n x = conv2d_maxpool(x, 64, (3, 3), (1, 1), (2, 2), (2, 2))\n x = tf.nn.dropout(x, keep_prob)\n \n x = conv2d_maxpool(x, 128, (3, 3), (1, 1), (2, 2), (2, 2))\n x = tf.nn.dropout(x, keep_prob)\n # x has shape (batch, 8, 8, 128)\n x = conv2d_maxpool(x, 256, (3, 3), (1, 1), (2, 2), (2, 2))\n x = tf.nn.dropout(x, keep_prob)\n \n # TODO: Apply a Flatten Layer\n # Function Definition from Above:\n # flatten(x_tensor)\n x = flatten(x)\n\n # TODO: Apply 1, 2, or 3 Fully Connected Layers\n # Play around with different number of outputs\n # Function Definition from Above:\n # fully_conn(x_tensor, num_outputs)\n x = fully_conn(x, 512)\n x = tf.nn.dropout(x, keep_prob)\n \n # TODO: Apply an Output Layer\n # Set this to the number of classes\n # Function Definition from Above:\n # output(x_tensor, num_outputs)\n \n \n # TODO: return output\n return output(x, 10)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\n\n##############################\n## Build the Neural Network ##\n##############################\n\n# Remove previous weights, bias, inputs, etc..\ntf.reset_default_graph()\n\n# Inputs\nx = neural_net_image_input((32, 32, 3))\ny = neural_net_label_input(10)\nkeep_prob = neural_net_keep_prob_input()\n\n# Model\nlogits = conv_net(x, keep_prob)\n\n# Name logits Tensor, so that is can be loaded from disk after training\nlogits = tf.identity(logits, name='logits')\n\n# Loss and Optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))\noptimizer = tf.train.AdamOptimizer().minimize(cost)\n\n# Accuracy\ncorrect_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')\n\ntests.test_conv_net(conv_net)",
"Neural Network Built!\n"
]
],
[
[
"## 训练神经网络\n\n### 单次优化\n\n实现函数 `train_neural_network` 以进行单次优化(single optimization)。该优化应该使用 `optimizer` 优化 `session`,其中 `feed_dict` 具有以下参数:\n\n* `x` 表示图片输入\n* `y` 表示标签\n* `keep_prob` 表示丢弃的保留率\n\n每个部分都会调用该函数,所以 `tf.global_variables_initializer()` 已经被调用。\n\n注意:不需要返回任何内容。该函数只是用来优化神经网络。\n",
"_____no_output_____"
]
],
[
[
"def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):\n \"\"\"\n Optimize the session on a batch of images and labels\n : session: Current TensorFlow session\n : optimizer: TensorFlow optimizer function\n : keep_probability: keep probability\n : feature_batch: Batch of Numpy image data\n : label_batch: Batch of Numpy label data\n \"\"\"\n # TODO: Implement Function\n session.run(optimizer, feed_dict={x: feature_batch, y: label_batch, keep_prob: keep_probability})\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_train_nn(train_neural_network)",
"Tests Passed\n"
]
],
[
[
"### 显示数据\n\n实现函数 `print_stats` 以输出损失和验证准确率。使用全局变量 `valid_features` 和 `valid_labels` 计算验证准确率。使用保留率 `1.0` 计算损失和验证准确率(loss and validation accuracy)。\n",
"_____no_output_____"
]
],
[
[
"def print_stats(session, feature_batch, label_batch, cost, accuracy):\n \"\"\"\n Print information about loss and validation accuracy\n : session: Current TensorFlow session\n : feature_batch: Batch of Numpy image data\n : label_batch: Batch of Numpy label data\n : cost: TensorFlow cost function\n : accuracy: TensorFlow accuracy function\n \"\"\"\n # TODO: Implement Function\n global valid_features, valid_labels\n validation_accuracy = session.run(accuracy, feed_dict={x: valid_features, y: valid_labels, keep_prob: 1.0})\n loss = session.run( cost, feed_dict={x: feature_batch, y: label_batch, keep_prob: 1.0})\n \n prt = 'Loss: {:.4f} Accuracy: {:.4f}'\n print(prt.format(loss, validation_accuracy, prec=3))",
"_____no_output_____"
]
],
[
[
"### 超参数\n\n调试以下超参数:\n* 设置 `epochs` 表示神经网络停止学习或开始过拟合的迭代次数\n* 设置 `batch_size`,表示机器内存允许的部分最大体积。大部分人设为以下常见内存大小:\n\n * 64\n * 128\n * 256\n * ...\n* 设置 `keep_probability` 表示使用丢弃时保留节点的概率",
"_____no_output_____"
]
],
[
[
"# TODO: Tune Parameters\nepochs = 200\nbatch_size = 128\nkeep_probability = 0.5",
"_____no_output_____"
]
],
[
[
"### 在单个 CIFAR-10 部分上训练\n\n我们先用单个部分,而不是用所有的 CIFAR-10 批次训练神经网络。这样可以节省时间,并对模型进行迭代,以提高准确率。最终验证准确率达到 50% 或以上之后,在下一部分对所有数据运行模型。\n",
"_____no_output_____"
]
],
[
[
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nprint('Checking the Training on a Single Batch...')\nwith tf.Session() as sess:\n # Initializing the variables\n sess.run(tf.global_variables_initializer())\n \n # Training cycle\n for epoch in range(epochs):\n batch_i = 1\n for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):\n train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)\n print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')\n print_stats(sess, batch_features, batch_labels, cost, accuracy)",
"Checking the Training on a Single Batch...\nEpoch 1, CIFAR-10 Batch 1: Loss: 2.2983 Accuracy: 0.1052\nEpoch 2, CIFAR-10 Batch 1: Loss: 2.3019 Accuracy: 0.1010\nEpoch 3, CIFAR-10 Batch 1: Loss: 2.2950 Accuracy: 0.1258\nEpoch 4, CIFAR-10 Batch 1: Loss: 2.2787 Accuracy: 0.1510\nEpoch 5, CIFAR-10 Batch 1: Loss: 2.2601 Accuracy: 0.1592\nEpoch 6, CIFAR-10 Batch 1: Loss: 2.2481 Accuracy: 0.1556\nEpoch 7, CIFAR-10 Batch 1: Loss: 2.2328 Accuracy: 0.1614\nEpoch 8, CIFAR-10 Batch 1: Loss: 2.1974 Accuracy: 0.1950\nEpoch 9, CIFAR-10 Batch 1: Loss: 2.1336 Accuracy: 0.2274\nEpoch 10, CIFAR-10 Batch 1: Loss: 2.0589 Accuracy: 0.2510\nEpoch 11, CIFAR-10 Batch 1: Loss: 1.9907 Accuracy: 0.3046\nEpoch 12, CIFAR-10 Batch 1: Loss: 2.0030 Accuracy: 0.3036\nEpoch 13, CIFAR-10 Batch 1: Loss: 1.9698 Accuracy: 0.3454\nEpoch 14, CIFAR-10 Batch 1: Loss: 1.9381 Accuracy: 0.3614\nEpoch 15, CIFAR-10 Batch 1: Loss: 1.9321 Accuracy: 0.3576\nEpoch 16, CIFAR-10 Batch 1: Loss: 1.9110 Accuracy: 0.3924\nEpoch 17, CIFAR-10 Batch 1: Loss: 1.8847 Accuracy: 0.3914\nEpoch 18, CIFAR-10 Batch 1: Loss: 1.8655 Accuracy: 0.4000\nEpoch 19, CIFAR-10 Batch 1: Loss: 1.7937 Accuracy: 0.4150\nEpoch 20, CIFAR-10 Batch 1: Loss: 1.8148 Accuracy: 0.4054\nEpoch 21, CIFAR-10 Batch 1: Loss: 1.8180 Accuracy: 0.4088\nEpoch 22, CIFAR-10 Batch 1: Loss: 1.7589 Accuracy: 0.4220\nEpoch 23, CIFAR-10 Batch 1: Loss: 1.7052 Accuracy: 0.4270\nEpoch 24, CIFAR-10 Batch 1: Loss: 1.6943 Accuracy: 0.4394\nEpoch 25, CIFAR-10 Batch 1: Loss: 1.7342 Accuracy: 0.4196\nEpoch 26, CIFAR-10 Batch 1: Loss: 1.6755 Accuracy: 0.4378\nEpoch 27, CIFAR-10 Batch 1: Loss: 1.7083 Accuracy: 0.4226\nEpoch 28, CIFAR-10 Batch 1: Loss: 1.6642 Accuracy: 0.4306\nEpoch 29, CIFAR-10 Batch 1: Loss: 1.6127 Accuracy: 0.4432\nEpoch 30, CIFAR-10 Batch 1: Loss: 1.6448 Accuracy: 0.4346\nEpoch 31, CIFAR-10 Batch 1: Loss: 1.7516 Accuracy: 0.4092\nEpoch 32, CIFAR-10 Batch 1: Loss: 1.8120 Accuracy: 0.3852\nEpoch 33, CIFAR-10 Batch 1: Loss: 1.6668 Accuracy: 0.4274\nEpoch 34, CIFAR-10 Batch 1: Loss: 1.6681 Accuracy: 0.4178\nEpoch 35, CIFAR-10 Batch 1: Loss: 1.4836 Accuracy: 0.4710\nEpoch 36, CIFAR-10 Batch 1: Loss: 1.5404 Accuracy: 0.4686\nEpoch 37, CIFAR-10 Batch 1: Loss: 1.5020 Accuracy: 0.4682\nEpoch 38, CIFAR-10 Batch 1: Loss: 1.6325 Accuracy: 0.4268\nEpoch 39, CIFAR-10 Batch 1: Loss: 1.5342 Accuracy: 0.4504\nEpoch 40, CIFAR-10 Batch 1: Loss: 1.4532 Accuracy: 0.4696\nEpoch 41, CIFAR-10 Batch 1: Loss: 1.4693 Accuracy: 0.4648\nEpoch 42, CIFAR-10 Batch 1: Loss: 1.4926 Accuracy: 0.4594\nEpoch 43, CIFAR-10 Batch 1: Loss: 1.4819 Accuracy: 0.4456\nEpoch 44, CIFAR-10 Batch 1: Loss: 1.3472 Accuracy: 0.4768\nEpoch 45, CIFAR-10 Batch 1: Loss: 1.4583 Accuracy: 0.4568\nEpoch 46, CIFAR-10 Batch 1: Loss: 1.3864 Accuracy: 0.4916\nEpoch 47, CIFAR-10 Batch 1: Loss: 1.4450 Accuracy: 0.4776\nEpoch 48, CIFAR-10 Batch 1: Loss: 1.4338 Accuracy: 0.4794\nEpoch 49, CIFAR-10 Batch 1: Loss: 1.4163 Accuracy: 0.4852\nEpoch 50, CIFAR-10 Batch 1: Loss: 1.5015 Accuracy: 0.4518\nEpoch 51, CIFAR-10 Batch 1: Loss: 1.2733 Accuracy: 0.4982\nEpoch 52, CIFAR-10 Batch 1: Loss: 1.2747 Accuracy: 0.5010\nEpoch 53, CIFAR-10 Batch 1: Loss: 1.2742 Accuracy: 0.5102\nEpoch 54, CIFAR-10 Batch 1: Loss: 1.1779 Accuracy: 0.5264\nEpoch 55, CIFAR-10 Batch 1: Loss: 1.2011 Accuracy: 0.5122\nEpoch 56, CIFAR-10 Batch 1: Loss: 1.1687 Accuracy: 0.5296\nEpoch 57, CIFAR-10 Batch 1: Loss: 1.2115 Accuracy: 0.5148\nEpoch 58, CIFAR-10 Batch 1: Loss: 1.1978 Accuracy: 0.5310\nEpoch 59, CIFAR-10 Batch 1: Loss: 1.0567 Accuracy: 0.5438\nEpoch 60, CIFAR-10 Batch 1: Loss: 1.0710 Accuracy: 0.5416\nEpoch 61, CIFAR-10 Batch 1: Loss: 1.3034 Accuracy: 0.5246\nEpoch 62, CIFAR-10 Batch 1: Loss: 1.2312 Accuracy: 0.5232\nEpoch 63, CIFAR-10 Batch 1: Loss: 0.8969 Accuracy: 0.5768\nEpoch 64, CIFAR-10 Batch 1: Loss: 1.1836 Accuracy: 0.5162\nEpoch 65, CIFAR-10 Batch 1: Loss: 0.9282 Accuracy: 0.5576\nEpoch 66, CIFAR-10 Batch 1: Loss: 0.9694 Accuracy: 0.5752\nEpoch 67, CIFAR-10 Batch 1: Loss: 0.9271 Accuracy: 0.5564\nEpoch 68, CIFAR-10 Batch 1: Loss: 1.0297 Accuracy: 0.5514\nEpoch 69, CIFAR-10 Batch 1: Loss: 0.9722 Accuracy: 0.5600\nEpoch 70, CIFAR-10 Batch 1: Loss: 0.8621 Accuracy: 0.5584\nEpoch 71, CIFAR-10 Batch 1: Loss: 0.7735 Accuracy: 0.5928\nEpoch 72, CIFAR-10 Batch 1: Loss: 0.7675 Accuracy: 0.5842\nEpoch 73, CIFAR-10 Batch 1: Loss: 0.7573 Accuracy: 0.5896\nEpoch 74, CIFAR-10 Batch 1: Loss: 0.8836 Accuracy: 0.5774\nEpoch 75, CIFAR-10 Batch 1: Loss: 0.7967 Accuracy: 0.5888\nEpoch 76, CIFAR-10 Batch 1: Loss: 0.8046 Accuracy: 0.5856\nEpoch 77, CIFAR-10 Batch 1: Loss: 0.8143 Accuracy: 0.5876\nEpoch 78, CIFAR-10 Batch 1: Loss: 0.6007 Accuracy: 0.6284\nEpoch 79, CIFAR-10 Batch 1: Loss: 0.7138 Accuracy: 0.6140\nEpoch 80, CIFAR-10 Batch 1: Loss: 0.7523 Accuracy: 0.5996\nEpoch 81, CIFAR-10 Batch 1: Loss: 0.5827 Accuracy: 0.6090\nEpoch 82, CIFAR-10 Batch 1: Loss: 0.5288 Accuracy: 0.6216\nEpoch 83, CIFAR-10 Batch 1: Loss: 0.6867 Accuracy: 0.5948\nEpoch 84, CIFAR-10 Batch 1: Loss: 0.5612 Accuracy: 0.6026\nEpoch 85, CIFAR-10 Batch 1: Loss: 0.5291 Accuracy: 0.6164\nEpoch 86, CIFAR-10 Batch 1: Loss: 0.5034 Accuracy: 0.6268\nEpoch 87, CIFAR-10 Batch 1: Loss: 0.4220 Accuracy: 0.6418\nEpoch 88, CIFAR-10 Batch 1: Loss: 0.4642 Accuracy: 0.6306\nEpoch 89, CIFAR-10 Batch 1: Loss: 0.4608 Accuracy: 0.6152\nEpoch 90, CIFAR-10 Batch 1: Loss: 0.4452 Accuracy: 0.6202\nEpoch 91, CIFAR-10 Batch 1: Loss: 0.4382 Accuracy: 0.6230\nEpoch 92, CIFAR-10 Batch 1: Loss: 0.4110 Accuracy: 0.6286\nEpoch 93, CIFAR-10 Batch 1: Loss: 0.3510 Accuracy: 0.6536\nEpoch 94, CIFAR-10 Batch 1: Loss: 0.5068 Accuracy: 0.6160\nEpoch 95, CIFAR-10 Batch 1: Loss: 0.3317 Accuracy: 0.6320\nEpoch 96, CIFAR-10 Batch 1: Loss: 0.3361 Accuracy: 0.6398\nEpoch 97, CIFAR-10 Batch 1: Loss: 0.3171 Accuracy: 0.6428\nEpoch 98, CIFAR-10 Batch 1: Loss: 0.3584 Accuracy: 0.6360\nEpoch 99, CIFAR-10 Batch 1: Loss: 0.3089 Accuracy: 0.6298\nEpoch 100, CIFAR-10 Batch 1: Loss: 0.2543 Accuracy: 0.6514\nEpoch 101, CIFAR-10 Batch 1: Loss: 0.3009 Accuracy: 0.6462\nEpoch 102, CIFAR-10 Batch 1: Loss: 0.2655 Accuracy: 0.6338\nEpoch 103, CIFAR-10 Batch 1: Loss: 0.3240 Accuracy: 0.6238\nEpoch 104, CIFAR-10 Batch 1: Loss: 0.2384 Accuracy: 0.6342\nEpoch 105, CIFAR-10 Batch 1: Loss: 0.2554 Accuracy: 0.6394\nEpoch 106, CIFAR-10 Batch 1: Loss: 0.2115 Accuracy: 0.6658\nEpoch 107, CIFAR-10 Batch 1: Loss: 0.1919 Accuracy: 0.6430\nEpoch 108, CIFAR-10 Batch 1: Loss: 0.1703 Accuracy: 0.6618\nEpoch 109, CIFAR-10 Batch 1: Loss: 0.1701 Accuracy: 0.6584\nEpoch 110, CIFAR-10 Batch 1: Loss: 0.2421 Accuracy: 0.6316\nEpoch 111, CIFAR-10 Batch 1: Loss: 0.1909 Accuracy: 0.6492\nEpoch 112, CIFAR-10 Batch 1: Loss: 0.1853 Accuracy: 0.6550\nEpoch 113, CIFAR-10 Batch 1: Loss: 0.1865 Accuracy: 0.6508\nEpoch 114, CIFAR-10 Batch 1: Loss: 0.1740 Accuracy: 0.6566\nEpoch 115, CIFAR-10 Batch 1: Loss: 0.1679 Accuracy: 0.6458\nEpoch 116, CIFAR-10 Batch 1: Loss: 0.1648 Accuracy: 0.6430\nEpoch 117, CIFAR-10 Batch 1: Loss: 0.1855 Accuracy: 0.6450\nEpoch 118, CIFAR-10 Batch 1: Loss: 0.1670 Accuracy: 0.6528\nEpoch 119, CIFAR-10 Batch 1: Loss: 0.1661 Accuracy: 0.6412\nEpoch 120, CIFAR-10 Batch 1: Loss: 0.1051 Accuracy: 0.6566\nEpoch 121, CIFAR-10 Batch 1: Loss: 0.0815 Accuracy: 0.6694\nEpoch 122, CIFAR-10 Batch 1: Loss: 0.0835 Accuracy: 0.6744\nEpoch 123, CIFAR-10 Batch 1: Loss: 0.0970 Accuracy: 0.6672\nEpoch 124, CIFAR-10 Batch 1: Loss: 0.0846 Accuracy: 0.6546\nEpoch 125, CIFAR-10 Batch 1: Loss: 0.0916 Accuracy: 0.6660\nEpoch 126, CIFAR-10 Batch 1: Loss: 0.0668 Accuracy: 0.6756\nEpoch 127, CIFAR-10 Batch 1: Loss: 0.0661 Accuracy: 0.6726\nEpoch 128, CIFAR-10 Batch 1: Loss: 0.0693 Accuracy: 0.6738\nEpoch 129, CIFAR-10 Batch 1: Loss: 0.0782 Accuracy: 0.6628\nEpoch 130, CIFAR-10 Batch 1: Loss: 0.0725 Accuracy: 0.6670\nEpoch 131, CIFAR-10 Batch 1: Loss: 0.0957 Accuracy: 0.6616\nEpoch 132, CIFAR-10 Batch 1: Loss: 0.0959 Accuracy: 0.6536\nEpoch 133, CIFAR-10 Batch 1: Loss: 0.0861 Accuracy: 0.6546\nEpoch 134, CIFAR-10 Batch 1: Loss: 0.0524 Accuracy: 0.6670\nEpoch 135, CIFAR-10 Batch 1: Loss: 0.0457 Accuracy: 0.6680\nEpoch 136, CIFAR-10 Batch 1: Loss: 0.0494 Accuracy: 0.6748\nEpoch 137, CIFAR-10 Batch 1: Loss: 0.0328 Accuracy: 0.6736\nEpoch 138, CIFAR-10 Batch 1: Loss: 0.0367 Accuracy: 0.6800\nEpoch 139, CIFAR-10 Batch 1: Loss: 0.0377 Accuracy: 0.6724\nEpoch 140, CIFAR-10 Batch 1: Loss: 0.0390 Accuracy: 0.6774\nEpoch 141, CIFAR-10 Batch 1: Loss: 0.0469 Accuracy: 0.6624\nEpoch 142, CIFAR-10 Batch 1: Loss: 0.0420 Accuracy: 0.6754\nEpoch 143, CIFAR-10 Batch 1: Loss: 0.0444 Accuracy: 0.6684\nEpoch 144, CIFAR-10 Batch 1: Loss: 0.0311 Accuracy: 0.6678\nEpoch 145, CIFAR-10 Batch 1: Loss: 0.0244 Accuracy: 0.6870\nEpoch 146, CIFAR-10 Batch 1: Loss: 0.0256 Accuracy: 0.6916\nEpoch 147, CIFAR-10 Batch 1: Loss: 0.0382 Accuracy: 0.6788\nEpoch 148, CIFAR-10 Batch 1: Loss: 0.0364 Accuracy: 0.6724\nEpoch 149, CIFAR-10 Batch 1: Loss: 0.0314 Accuracy: 0.6722\nEpoch 150, CIFAR-10 Batch 1: Loss: 0.0238 Accuracy: 0.6872\nEpoch 151, CIFAR-10 Batch 1: Loss: 0.0274 Accuracy: 0.6614\nEpoch 152, CIFAR-10 Batch 1: Loss: 0.0197 Accuracy: 0.6724\nEpoch 153, CIFAR-10 Batch 1: Loss: 0.0162 Accuracy: 0.6890\nEpoch 154, CIFAR-10 Batch 1: Loss: 0.0305 Accuracy: 0.6794\nEpoch 155, CIFAR-10 Batch 1: Loss: 0.0187 Accuracy: 0.6912\nEpoch 156, CIFAR-10 Batch 1: Loss: 0.0214 Accuracy: 0.6702\nEpoch 157, CIFAR-10 Batch 1: Loss: 0.0166 Accuracy: 0.6830\nEpoch 158, CIFAR-10 Batch 1: Loss: 0.0180 Accuracy: 0.6758\nEpoch 159, CIFAR-10 Batch 1: Loss: 0.0201 Accuracy: 0.6864\nEpoch 160, CIFAR-10 Batch 1: Loss: 0.0141 Accuracy: 0.6844\nEpoch 161, CIFAR-10 Batch 1: Loss: 0.0152 Accuracy: 0.6800\nEpoch 162, CIFAR-10 Batch 1: Loss: 0.0171 Accuracy: 0.6790\nEpoch 163, CIFAR-10 Batch 1: Loss: 0.0108 Accuracy: 0.6830\nEpoch 164, CIFAR-10 Batch 1: Loss: 0.0092 Accuracy: 0.6878\nEpoch 165, CIFAR-10 Batch 1: Loss: 0.0300 Accuracy: 0.6662\nEpoch 166, CIFAR-10 Batch 1: Loss: 0.0240 Accuracy: 0.6798\nEpoch 167, CIFAR-10 Batch 1: Loss: 0.0102 Accuracy: 0.6732\nEpoch 168, CIFAR-10 Batch 1: Loss: 0.0129 Accuracy: 0.6688\nEpoch 169, CIFAR-10 Batch 1: Loss: 0.0084 Accuracy: 0.6810\nEpoch 170, CIFAR-10 Batch 1: Loss: 0.0082 Accuracy: 0.6846\nEpoch 171, CIFAR-10 Batch 1: Loss: 0.0064 Accuracy: 0.6870\nEpoch 172, CIFAR-10 Batch 1: Loss: 0.0122 Accuracy: 0.6616\nEpoch 173, CIFAR-10 Batch 1: Loss: 0.0074 Accuracy: 0.6762\nEpoch 174, CIFAR-10 Batch 1: Loss: 0.0214 Accuracy: 0.6730\nEpoch 175, CIFAR-10 Batch 1: Loss: 0.0078 Accuracy: 0.6768\nEpoch 176, CIFAR-10 Batch 1: Loss: 0.0061 Accuracy: 0.6896\nEpoch 177, CIFAR-10 Batch 1: Loss: 0.0217 Accuracy: 0.6620\nEpoch 178, CIFAR-10 Batch 1: Loss: 0.0059 Accuracy: 0.6882\nEpoch 179, CIFAR-10 Batch 1: Loss: 0.0048 Accuracy: 0.6914\nEpoch 180, CIFAR-10 Batch 1: Loss: 0.0081 Accuracy: 0.6790\nEpoch 181, CIFAR-10 Batch 1: Loss: 0.0058 Accuracy: 0.6862\nEpoch 182, CIFAR-10 Batch 1: Loss: 0.0070 Accuracy: 0.6808\nEpoch 183, CIFAR-10 Batch 1: Loss: 0.0163 Accuracy: 0.6722\nEpoch 184, CIFAR-10 Batch 1: Loss: 0.0093 Accuracy: 0.6856\nEpoch 185, CIFAR-10 Batch 1: Loss: 0.0096 Accuracy: 0.6850\nEpoch 186, CIFAR-10 Batch 1: Loss: 0.0100 Accuracy: 0.6802\nEpoch 187, CIFAR-10 Batch 1: Loss: 0.0064 Accuracy: 0.6758\nEpoch 188, CIFAR-10 Batch 1: Loss: 0.0148 Accuracy: 0.6520\nEpoch 189, CIFAR-10 Batch 1: Loss: 0.0065 Accuracy: 0.6796\nEpoch 190, CIFAR-10 Batch 1: Loss: 0.0063 Accuracy: 0.6796\nEpoch 191, CIFAR-10 Batch 1: Loss: 0.0041 Accuracy: 0.6740\nEpoch 192, CIFAR-10 Batch 1: Loss: 0.0039 Accuracy: 0.6714\nEpoch 193, CIFAR-10 Batch 1: Loss: 0.0027 Accuracy: 0.6938\nEpoch 194, CIFAR-10 Batch 1: Loss: 0.0022 Accuracy: 0.6938\nEpoch 195, CIFAR-10 Batch 1: Loss: 0.0029 Accuracy: 0.6778\nEpoch 196, CIFAR-10 Batch 1: Loss: 0.0093 Accuracy: 0.6772\nEpoch 197, CIFAR-10 Batch 1: Loss: 0.0029 Accuracy: 0.6734\nEpoch 198, CIFAR-10 Batch 1: Loss: 0.0023 Accuracy: 0.6838\nEpoch 199, CIFAR-10 Batch 1: Loss: 0.0034 Accuracy: 0.6754\nEpoch 200, CIFAR-10 Batch 1: Loss: 0.0028 Accuracy: 0.6836\n"
]
],
[
[
"### 完全训练模型\n\n现在,单个 CIFAR-10 部分的准确率已经不错了,试试所有五个部分吧。",
"_____no_output_____"
]
],
[
[
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nsave_model_path = './image_classification'\n\nprint('Training...')\nwith tf.Session() as sess:\n # Initializing the variables\n sess.run(tf.global_variables_initializer())\n \n # Training cycle\n for epoch in range(epochs):\n # Loop over all batches\n n_batches = 5\n for batch_i in range(1, n_batches + 1):\n for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):\n train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)\n print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')\n print_stats(sess, batch_features, batch_labels, cost, accuracy)\n \n # Save Model\n saver = tf.train.Saver()\n save_path = saver.save(sess, save_model_path)",
"Training...\nEpoch 1, CIFAR-10 Batch 1: Loss: 2.2990 Accuracy: 0.1242\nEpoch 1, CIFAR-10 Batch 2: Loss: 2.2989 Accuracy: 0.0984\nEpoch 1, CIFAR-10 Batch 3: Loss: 2.2903 Accuracy: 0.1134\nEpoch 1, CIFAR-10 Batch 4: Loss: 2.2772 Accuracy: 0.1192\nEpoch 1, CIFAR-10 Batch 5: Loss: 2.2432 Accuracy: 0.1762\nEpoch 2, CIFAR-10 Batch 1: Loss: 2.1821 Accuracy: 0.2398\nEpoch 2, CIFAR-10 Batch 2: Loss: 2.0755 Accuracy: 0.2804\nEpoch 2, CIFAR-10 Batch 3: Loss: 1.9589 Accuracy: 0.2936\nEpoch 2, CIFAR-10 Batch 4: Loss: 1.9475 Accuracy: 0.3336\nEpoch 2, CIFAR-10 Batch 5: Loss: 1.9736 Accuracy: 0.3324\nEpoch 3, CIFAR-10 Batch 1: Loss: 2.0157 Accuracy: 0.3342\nEpoch 3, CIFAR-10 Batch 2: Loss: 1.8046 Accuracy: 0.3512\nEpoch 3, CIFAR-10 Batch 3: Loss: 1.7284 Accuracy: 0.3564\nEpoch 3, CIFAR-10 Batch 4: Loss: 1.7664 Accuracy: 0.3980\nEpoch 3, CIFAR-10 Batch 5: Loss: 1.7433 Accuracy: 0.3826\nEpoch 4, CIFAR-10 Batch 1: Loss: 1.8219 Accuracy: 0.4198\nEpoch 4, CIFAR-10 Batch 2: Loss: 1.6349 Accuracy: 0.4172\nEpoch 4, CIFAR-10 Batch 3: Loss: 1.5965 Accuracy: 0.4204\nEpoch 4, CIFAR-10 Batch 4: Loss: 1.6699 Accuracy: 0.4382\nEpoch 4, CIFAR-10 Batch 5: Loss: 1.6740 Accuracy: 0.4078\nEpoch 5, CIFAR-10 Batch 1: Loss: 1.7249 Accuracy: 0.4488\nEpoch 5, CIFAR-10 Batch 2: Loss: 1.4835 Accuracy: 0.4432\nEpoch 5, CIFAR-10 Batch 3: Loss: 1.4859 Accuracy: 0.4434\nEpoch 5, CIFAR-10 Batch 4: Loss: 1.6320 Accuracy: 0.4500\nEpoch 5, CIFAR-10 Batch 5: Loss: 1.5858 Accuracy: 0.4222\nEpoch 6, CIFAR-10 Batch 1: Loss: 1.6674 Accuracy: 0.4660\nEpoch 6, CIFAR-10 Batch 2: Loss: 1.5061 Accuracy: 0.4328\nEpoch 6, CIFAR-10 Batch 3: Loss: 1.4265 Accuracy: 0.4772\nEpoch 6, CIFAR-10 Batch 4: Loss: 1.5376 Accuracy: 0.4732\nEpoch 6, CIFAR-10 Batch 5: Loss: 1.4722 Accuracy: 0.4830\nEpoch 7, CIFAR-10 Batch 1: Loss: 1.5917 Accuracy: 0.4764\nEpoch 7, CIFAR-10 Batch 2: Loss: 1.4341 Accuracy: 0.4738\nEpoch 7, CIFAR-10 Batch 3: Loss: 1.3711 Accuracy: 0.4870\nEpoch 7, CIFAR-10 Batch 4: Loss: 1.4752 Accuracy: 0.4874\nEpoch 7, CIFAR-10 Batch 5: Loss: 1.4793 Accuracy: 0.4506\nEpoch 8, CIFAR-10 Batch 1: Loss: 1.5289 Accuracy: 0.5098\nEpoch 8, CIFAR-10 Batch 2: Loss: 1.4208 Accuracy: 0.5026\nEpoch 8, CIFAR-10 Batch 3: Loss: 1.3289 Accuracy: 0.5000\nEpoch 8, CIFAR-10 Batch 4: Loss: 1.4045 Accuracy: 0.5242\nEpoch 8, CIFAR-10 Batch 5: Loss: 1.4499 Accuracy: 0.4628\nEpoch 9, CIFAR-10 Batch 1: Loss: 1.4232 Accuracy: 0.5146\nEpoch 9, CIFAR-10 Batch 2: Loss: 1.3775 Accuracy: 0.5368\nEpoch 9, CIFAR-10 Batch 3: Loss: 1.3581 Accuracy: 0.5020\nEpoch 9, CIFAR-10 Batch 4: Loss: 1.3345 Accuracy: 0.5460\nEpoch 9, CIFAR-10 Batch 5: Loss: 1.3752 Accuracy: 0.5066\nEpoch 10, CIFAR-10 Batch 1: Loss: 1.3309 Accuracy: 0.5494\nEpoch 10, CIFAR-10 Batch 2: Loss: 1.3696 Accuracy: 0.5344\nEpoch 10, CIFAR-10 Batch 3: Loss: 1.2076 Accuracy: 0.5432\nEpoch 10, CIFAR-10 Batch 4: Loss: 1.2601 Accuracy: 0.5446\nEpoch 10, CIFAR-10 Batch 5: Loss: 1.3157 Accuracy: 0.5092\nEpoch 11, CIFAR-10 Batch 1: Loss: 1.2238 Accuracy: 0.5454\nEpoch 11, CIFAR-10 Batch 2: Loss: 1.4382 Accuracy: 0.5516\nEpoch 11, CIFAR-10 Batch 3: Loss: 1.2283 Accuracy: 0.5364\nEpoch 11, CIFAR-10 Batch 4: Loss: 1.1790 Accuracy: 0.5680\nEpoch 11, CIFAR-10 Batch 5: Loss: 1.2405 Accuracy: 0.5320\nEpoch 12, CIFAR-10 Batch 1: Loss: 1.1388 Accuracy: 0.5820\nEpoch 12, CIFAR-10 Batch 2: Loss: 1.3224 Accuracy: 0.5634\nEpoch 12, CIFAR-10 Batch 3: Loss: 1.1503 Accuracy: 0.5740\nEpoch 12, CIFAR-10 Batch 4: Loss: 1.1259 Accuracy: 0.5864\nEpoch 12, CIFAR-10 Batch 5: Loss: 1.2610 Accuracy: 0.5458\nEpoch 13, CIFAR-10 Batch 1: Loss: 1.1514 Accuracy: 0.5936\nEpoch 13, CIFAR-10 Batch 2: Loss: 1.2045 Accuracy: 0.6064\nEpoch 13, CIFAR-10 Batch 3: Loss: 1.1842 Accuracy: 0.5526\nEpoch 13, CIFAR-10 Batch 4: Loss: 1.0902 Accuracy: 0.5976\nEpoch 13, CIFAR-10 Batch 5: Loss: 1.2523 Accuracy: 0.5404\nEpoch 14, CIFAR-10 Batch 1: Loss: 1.0472 Accuracy: 0.6164\nEpoch 14, CIFAR-10 Batch 2: Loss: 1.1703 Accuracy: 0.6236\nEpoch 14, CIFAR-10 Batch 3: Loss: 0.9812 Accuracy: 0.6112\nEpoch 14, CIFAR-10 Batch 4: Loss: 1.0638 Accuracy: 0.5832\nEpoch 14, CIFAR-10 Batch 5: Loss: 1.1103 Accuracy: 0.5834\nEpoch 15, CIFAR-10 Batch 1: Loss: 1.0332 Accuracy: 0.6114\nEpoch 15, CIFAR-10 Batch 2: Loss: 1.1341 Accuracy: 0.6414\nEpoch 15, CIFAR-10 Batch 3: Loss: 1.0505 Accuracy: 0.5838\nEpoch 15, CIFAR-10 Batch 4: Loss: 0.9442 Accuracy: 0.6464\nEpoch 15, CIFAR-10 Batch 5: Loss: 1.1141 Accuracy: 0.5910\nEpoch 16, CIFAR-10 Batch 1: Loss: 1.0301 Accuracy: 0.6044\nEpoch 16, CIFAR-10 Batch 2: Loss: 1.1813 Accuracy: 0.6204\nEpoch 16, CIFAR-10 Batch 3: Loss: 1.0379 Accuracy: 0.5800\nEpoch 16, CIFAR-10 Batch 4: Loss: 0.9038 Accuracy: 0.6382\nEpoch 16, CIFAR-10 Batch 5: Loss: 1.1423 Accuracy: 0.6112\nEpoch 17, CIFAR-10 Batch 1: Loss: 0.9734 Accuracy: 0.6424\nEpoch 17, CIFAR-10 Batch 2: Loss: 1.0955 Accuracy: 0.6250\nEpoch 17, CIFAR-10 Batch 3: Loss: 0.9875 Accuracy: 0.5926\nEpoch 17, CIFAR-10 Batch 4: Loss: 0.8259 Accuracy: 0.6594\nEpoch 17, CIFAR-10 Batch 5: Loss: 1.0233 Accuracy: 0.6412\nEpoch 18, CIFAR-10 Batch 1: Loss: 0.8530 Accuracy: 0.6746\nEpoch 18, CIFAR-10 Batch 2: Loss: 1.0345 Accuracy: 0.6548\nEpoch 18, CIFAR-10 Batch 3: Loss: 0.8824 Accuracy: 0.6372\nEpoch 18, CIFAR-10 Batch 4: Loss: 0.8165 Accuracy: 0.6660\nEpoch 18, CIFAR-10 Batch 5: Loss: 0.9477 Accuracy: 0.6742\nEpoch 19, CIFAR-10 Batch 1: Loss: 0.8394 Accuracy: 0.6670\nEpoch 19, CIFAR-10 Batch 2: Loss: 1.0045 Accuracy: 0.6578\nEpoch 19, CIFAR-10 Batch 3: Loss: 0.7900 Accuracy: 0.6604\nEpoch 19, CIFAR-10 Batch 4: Loss: 0.7782 Accuracy: 0.6710\nEpoch 19, CIFAR-10 Batch 5: Loss: 0.9517 Accuracy: 0.6470\nEpoch 20, CIFAR-10 Batch 1: Loss: 0.7991 Accuracy: 0.6802\nEpoch 20, CIFAR-10 Batch 2: Loss: 1.0704 Accuracy: 0.6352\nEpoch 20, CIFAR-10 Batch 3: Loss: 0.8044 Accuracy: 0.6554\nEpoch 20, CIFAR-10 Batch 4: Loss: 0.8120 Accuracy: 0.6718\nEpoch 20, CIFAR-10 Batch 5: Loss: 0.9089 Accuracy: 0.6626\nEpoch 21, CIFAR-10 Batch 1: Loss: 0.7858 Accuracy: 0.6850\nEpoch 21, CIFAR-10 Batch 2: Loss: 1.0655 Accuracy: 0.6426\nEpoch 21, CIFAR-10 Batch 3: Loss: 0.7813 Accuracy: 0.6518\nEpoch 21, CIFAR-10 Batch 4: Loss: 0.8061 Accuracy: 0.6624\nEpoch 21, CIFAR-10 Batch 5: Loss: 0.8049 Accuracy: 0.6740\nEpoch 22, CIFAR-10 Batch 1: Loss: 0.7169 Accuracy: 0.6954\nEpoch 22, CIFAR-10 Batch 2: Loss: 0.9672 Accuracy: 0.6764\nEpoch 22, CIFAR-10 Batch 3: Loss: 0.7923 Accuracy: 0.6416\nEpoch 22, CIFAR-10 Batch 4: Loss: 0.7833 Accuracy: 0.6720\nEpoch 22, CIFAR-10 Batch 5: Loss: 0.8444 Accuracy: 0.6804\nEpoch 23, CIFAR-10 Batch 1: Loss: 0.7841 Accuracy: 0.6918\nEpoch 23, CIFAR-10 Batch 2: Loss: 0.8397 Accuracy: 0.7058\nEpoch 23, CIFAR-10 Batch 3: Loss: 0.7241 Accuracy: 0.6434\nEpoch 23, CIFAR-10 Batch 4: Loss: 0.7098 Accuracy: 0.6898\nEpoch 23, CIFAR-10 Batch 5: Loss: 0.8419 Accuracy: 0.6642\nEpoch 24, CIFAR-10 Batch 1: Loss: 0.7428 Accuracy: 0.6838\nEpoch 24, CIFAR-10 Batch 2: Loss: 0.7919 Accuracy: 0.6878\nEpoch 24, CIFAR-10 Batch 3: Loss: 0.6420 Accuracy: 0.6858\nEpoch 24, CIFAR-10 Batch 4: Loss: 0.6830 Accuracy: 0.6962\nEpoch 24, CIFAR-10 Batch 5: Loss: 0.8711 Accuracy: 0.6720\nEpoch 25, CIFAR-10 Batch 1: Loss: 0.6751 Accuracy: 0.7030\nEpoch 25, CIFAR-10 Batch 2: Loss: 0.7409 Accuracy: 0.6998\nEpoch 25, CIFAR-10 Batch 3: Loss: 0.5714 Accuracy: 0.6954\nEpoch 25, CIFAR-10 Batch 4: Loss: 0.5789 Accuracy: 0.7102\nEpoch 25, CIFAR-10 Batch 5: Loss: 0.6612 Accuracy: 0.7132\nEpoch 26, CIFAR-10 Batch 1: Loss: 0.6890 Accuracy: 0.6960\nEpoch 26, CIFAR-10 Batch 2: Loss: 0.7506 Accuracy: 0.7046\nEpoch 26, CIFAR-10 Batch 3: Loss: 0.7047 Accuracy: 0.6746\nEpoch 26, CIFAR-10 Batch 4: Loss: 0.7435 Accuracy: 0.6876\nEpoch 26, CIFAR-10 Batch 5: Loss: 0.7175 Accuracy: 0.6966\nEpoch 27, CIFAR-10 Batch 1: Loss: 0.6820 Accuracy: 0.7110\nEpoch 27, CIFAR-10 Batch 2: Loss: 0.7786 Accuracy: 0.6828\nEpoch 27, CIFAR-10 Batch 3: Loss: 0.5677 Accuracy: 0.6962\nEpoch 27, CIFAR-10 Batch 4: Loss: 0.5545 Accuracy: 0.7230\nEpoch 27, CIFAR-10 Batch 5: Loss: 0.7726 Accuracy: 0.7030\nEpoch 28, CIFAR-10 Batch 1: Loss: 0.7231 Accuracy: 0.7082\nEpoch 28, CIFAR-10 Batch 2: Loss: 0.7387 Accuracy: 0.6892\nEpoch 28, CIFAR-10 Batch 3: Loss: 0.5264 Accuracy: 0.6916\nEpoch 28, CIFAR-10 Batch 4: Loss: 0.5427 Accuracy: 0.7216\nEpoch 28, CIFAR-10 Batch 5: Loss: 0.6609 Accuracy: 0.7050\nEpoch 29, CIFAR-10 Batch 1: Loss: 0.6431 Accuracy: 0.7190\nEpoch 29, CIFAR-10 Batch 2: Loss: 0.7013 Accuracy: 0.7066\nEpoch 29, CIFAR-10 Batch 3: Loss: 0.4793 Accuracy: 0.7118\nEpoch 29, CIFAR-10 Batch 4: Loss: 0.6576 Accuracy: 0.6996\nEpoch 29, CIFAR-10 Batch 5: Loss: 0.7519 Accuracy: 0.6874\nEpoch 30, CIFAR-10 Batch 1: Loss: 0.6193 Accuracy: 0.7420\nEpoch 30, CIFAR-10 Batch 2: Loss: 0.6538 Accuracy: 0.7138\nEpoch 30, CIFAR-10 Batch 3: Loss: 0.4199 Accuracy: 0.7212\nEpoch 30, CIFAR-10 Batch 4: Loss: 0.6472 Accuracy: 0.7076\nEpoch 30, CIFAR-10 Batch 5: Loss: 0.6747 Accuracy: 0.6896\nEpoch 31, CIFAR-10 Batch 1: Loss: 0.5582 Accuracy: 0.7334\nEpoch 31, CIFAR-10 Batch 2: Loss: 0.6682 Accuracy: 0.6976\nEpoch 31, CIFAR-10 Batch 3: Loss: 0.5242 Accuracy: 0.7138\nEpoch 31, CIFAR-10 Batch 4: Loss: 0.5034 Accuracy: 0.7270\nEpoch 31, CIFAR-10 Batch 5: Loss: 0.5787 Accuracy: 0.7188\nEpoch 32, CIFAR-10 Batch 1: Loss: 0.5440 Accuracy: 0.7352\nEpoch 32, CIFAR-10 Batch 2: Loss: 0.6663 Accuracy: 0.6984\nEpoch 32, CIFAR-10 Batch 3: Loss: 0.4486 Accuracy: 0.7248\nEpoch 32, CIFAR-10 Batch 4: Loss: 0.5507 Accuracy: 0.7260\nEpoch 32, CIFAR-10 Batch 5: Loss: 0.5716 Accuracy: 0.7218\nEpoch 33, CIFAR-10 Batch 1: Loss: 0.4697 Accuracy: 0.7300\nEpoch 33, CIFAR-10 Batch 2: Loss: 0.5284 Accuracy: 0.7210\nEpoch 33, CIFAR-10 Batch 3: Loss: 0.5077 Accuracy: 0.7036\nEpoch 33, CIFAR-10 Batch 4: Loss: 0.5529 Accuracy: 0.7162\nEpoch 33, CIFAR-10 Batch 5: Loss: 0.5547 Accuracy: 0.7324\nEpoch 34, CIFAR-10 Batch 1: Loss: 0.4196 Accuracy: 0.7514\nEpoch 34, CIFAR-10 Batch 2: Loss: 0.5479 Accuracy: 0.7204\nEpoch 34, CIFAR-10 Batch 3: Loss: 0.4248 Accuracy: 0.7364\nEpoch 34, CIFAR-10 Batch 4: Loss: 0.4809 Accuracy: 0.7402\nEpoch 34, CIFAR-10 Batch 5: Loss: 0.5629 Accuracy: 0.7230\nEpoch 35, CIFAR-10 Batch 1: Loss: 0.5213 Accuracy: 0.7484\nEpoch 35, CIFAR-10 Batch 2: Loss: 0.7084 Accuracy: 0.6712\nEpoch 35, CIFAR-10 Batch 3: Loss: 0.3471 Accuracy: 0.7396\nEpoch 35, CIFAR-10 Batch 4: Loss: 0.4446 Accuracy: 0.7326\nEpoch 35, CIFAR-10 Batch 5: Loss: 0.4609 Accuracy: 0.7348\nEpoch 36, CIFAR-10 Batch 1: Loss: 0.4922 Accuracy: 0.7350\nEpoch 36, CIFAR-10 Batch 2: Loss: 0.6410 Accuracy: 0.7082\nEpoch 36, CIFAR-10 Batch 3: Loss: 0.3765 Accuracy: 0.7278\nEpoch 36, CIFAR-10 Batch 4: Loss: 0.4533 Accuracy: 0.7218\nEpoch 36, CIFAR-10 Batch 5: Loss: 0.4645 Accuracy: 0.7250\nEpoch 37, CIFAR-10 Batch 1: Loss: 0.4627 Accuracy: 0.7498\nEpoch 37, CIFAR-10 Batch 2: Loss: 0.5271 Accuracy: 0.7338\nEpoch 37, CIFAR-10 Batch 3: Loss: 0.3728 Accuracy: 0.7374\nEpoch 37, CIFAR-10 Batch 4: Loss: 0.4523 Accuracy: 0.7340\nEpoch 37, CIFAR-10 Batch 5: Loss: 0.4630 Accuracy: 0.7520\nEpoch 38, CIFAR-10 Batch 1: Loss: 0.5189 Accuracy: 0.7442\nEpoch 38, CIFAR-10 Batch 2: Loss: 0.5561 Accuracy: 0.7230\nEpoch 38, CIFAR-10 Batch 3: Loss: 0.2987 Accuracy: 0.7396\nEpoch 38, CIFAR-10 Batch 4: Loss: 0.4000 Accuracy: 0.7484\nEpoch 38, CIFAR-10 Batch 5: Loss: 0.4616 Accuracy: 0.7384\nEpoch 39, CIFAR-10 Batch 1: Loss: 0.4557 Accuracy: 0.7560\nEpoch 39, CIFAR-10 Batch 2: Loss: 0.5801 Accuracy: 0.7286\nEpoch 39, CIFAR-10 Batch 3: Loss: 0.3125 Accuracy: 0.7412\nEpoch 39, CIFAR-10 Batch 4: Loss: 0.4242 Accuracy: 0.7276\nEpoch 39, CIFAR-10 Batch 5: Loss: 0.4778 Accuracy: 0.7144\nEpoch 40, CIFAR-10 Batch 1: Loss: 0.4739 Accuracy: 0.7380\nEpoch 40, CIFAR-10 Batch 2: Loss: 0.4849 Accuracy: 0.7390\nEpoch 40, CIFAR-10 Batch 3: Loss: 0.3120 Accuracy: 0.7432\nEpoch 40, CIFAR-10 Batch 4: Loss: 0.4082 Accuracy: 0.7340\nEpoch 40, CIFAR-10 Batch 5: Loss: 0.5077 Accuracy: 0.7244\nEpoch 41, CIFAR-10 Batch 1: Loss: 0.4157 Accuracy: 0.7522\nEpoch 41, CIFAR-10 Batch 2: Loss: 0.4557 Accuracy: 0.7448\nEpoch 41, CIFAR-10 Batch 3: Loss: 0.3861 Accuracy: 0.7230\nEpoch 41, CIFAR-10 Batch 4: Loss: 0.3625 Accuracy: 0.7510\nEpoch 41, CIFAR-10 Batch 5: Loss: 0.4289 Accuracy: 0.7454\nEpoch 42, CIFAR-10 Batch 1: Loss: 0.4259 Accuracy: 0.7476\nEpoch 42, CIFAR-10 Batch 2: Loss: 0.4318 Accuracy: 0.7506\nEpoch 42, CIFAR-10 Batch 3: Loss: 0.3366 Accuracy: 0.7372\nEpoch 42, CIFAR-10 Batch 4: Loss: 0.4306 Accuracy: 0.7356\nEpoch 42, CIFAR-10 Batch 5: Loss: 0.3897 Accuracy: 0.7408\nEpoch 43, CIFAR-10 Batch 1: Loss: 0.3964 Accuracy: 0.7608\nEpoch 43, CIFAR-10 Batch 2: Loss: 0.4090 Accuracy: 0.7450\nEpoch 43, CIFAR-10 Batch 3: Loss: 0.2613 Accuracy: 0.7630\nEpoch 43, CIFAR-10 Batch 4: Loss: 0.4524 Accuracy: 0.7304\nEpoch 43, CIFAR-10 Batch 5: Loss: 0.3883 Accuracy: 0.7356\nEpoch 44, CIFAR-10 Batch 1: Loss: 0.3773 Accuracy: 0.7708\nEpoch 44, CIFAR-10 Batch 2: Loss: 0.4500 Accuracy: 0.7426\nEpoch 44, CIFAR-10 Batch 3: Loss: 0.2481 Accuracy: 0.7578\nEpoch 44, CIFAR-10 Batch 4: Loss: 0.3320 Accuracy: 0.7530\nEpoch 44, CIFAR-10 Batch 5: Loss: 0.4072 Accuracy: 0.7472\nEpoch 45, CIFAR-10 Batch 1: Loss: 0.3763 Accuracy: 0.7614\nEpoch 45, CIFAR-10 Batch 2: Loss: 0.4845 Accuracy: 0.7414\nEpoch 45, CIFAR-10 Batch 3: Loss: 0.2826 Accuracy: 0.7508\nEpoch 45, CIFAR-10 Batch 4: Loss: 0.3281 Accuracy: 0.7436\nEpoch 45, CIFAR-10 Batch 5: Loss: 0.3487 Accuracy: 0.7534\nEpoch 46, CIFAR-10 Batch 1: Loss: 0.4031 Accuracy: 0.7546\nEpoch 46, CIFAR-10 Batch 2: Loss: 0.3633 Accuracy: 0.7678\nEpoch 46, CIFAR-10 Batch 3: Loss: 0.2409 Accuracy: 0.7632\nEpoch 46, CIFAR-10 Batch 4: Loss: 0.3666 Accuracy: 0.7542\nEpoch 46, CIFAR-10 Batch 5: Loss: 0.3941 Accuracy: 0.7300\nEpoch 47, CIFAR-10 Batch 1: Loss: 0.3484 Accuracy: 0.7542\nEpoch 47, CIFAR-10 Batch 2: Loss: 0.4104 Accuracy: 0.7450\nEpoch 47, CIFAR-10 Batch 3: Loss: 0.2377 Accuracy: 0.7626\nEpoch 47, CIFAR-10 Batch 4: Loss: 0.3378 Accuracy: 0.7440\nEpoch 47, CIFAR-10 Batch 5: Loss: 0.3510 Accuracy: 0.7476\nEpoch 48, CIFAR-10 Batch 1: Loss: 0.4207 Accuracy: 0.7376\nEpoch 48, CIFAR-10 Batch 2: Loss: 0.4389 Accuracy: 0.7316\nEpoch 48, CIFAR-10 Batch 3: Loss: 0.3238 Accuracy: 0.7228\nEpoch 48, CIFAR-10 Batch 4: Loss: 0.3445 Accuracy: 0.7444\nEpoch 48, CIFAR-10 Batch 5: Loss: 0.3402 Accuracy: 0.7372\nEpoch 49, CIFAR-10 Batch 1: Loss: 0.3516 Accuracy: 0.7594\nEpoch 49, CIFAR-10 Batch 2: Loss: 0.3818 Accuracy: 0.7540\nEpoch 49, CIFAR-10 Batch 3: Loss: 0.2065 Accuracy: 0.7682\nEpoch 49, CIFAR-10 Batch 4: Loss: 0.3164 Accuracy: 0.7456\nEpoch 49, CIFAR-10 Batch 5: Loss: 0.2689 Accuracy: 0.7660\nEpoch 50, CIFAR-10 Batch 1: Loss: 0.3109 Accuracy: 0.7694\nEpoch 50, CIFAR-10 Batch 2: Loss: 0.4700 Accuracy: 0.7272\nEpoch 50, CIFAR-10 Batch 3: Loss: 0.2431 Accuracy: 0.7674\nEpoch 50, CIFAR-10 Batch 4: Loss: 0.2287 Accuracy: 0.7774\nEpoch 50, CIFAR-10 Batch 5: Loss: 0.3236 Accuracy: 0.7576\nEpoch 51, CIFAR-10 Batch 1: Loss: 0.3493 Accuracy: 0.7778\nEpoch 51, CIFAR-10 Batch 2: Loss: 0.3794 Accuracy: 0.7658\nEpoch 51, CIFAR-10 Batch 3: Loss: 0.2293 Accuracy: 0.7552\nEpoch 51, CIFAR-10 Batch 4: Loss: 0.2817 Accuracy: 0.7592\nEpoch 51, CIFAR-10 Batch 5: Loss: 0.3450 Accuracy: 0.7548\nEpoch 52, CIFAR-10 Batch 1: Loss: 0.3077 Accuracy: 0.7844\nEpoch 52, CIFAR-10 Batch 2: Loss: 0.3874 Accuracy: 0.7544\nEpoch 52, CIFAR-10 Batch 3: Loss: 0.2020 Accuracy: 0.7696\nEpoch 52, CIFAR-10 Batch 4: Loss: 0.2275 Accuracy: 0.7654\nEpoch 52, CIFAR-10 Batch 5: Loss: 0.2890 Accuracy: 0.7626\nEpoch 53, CIFAR-10 Batch 1: Loss: 0.2955 Accuracy: 0.7866\nEpoch 53, CIFAR-10 Batch 2: Loss: 0.4292 Accuracy: 0.7440\nEpoch 53, CIFAR-10 Batch 3: Loss: 0.1879 Accuracy: 0.7640\nEpoch 53, CIFAR-10 Batch 4: Loss: 0.2703 Accuracy: 0.7720\nEpoch 53, CIFAR-10 Batch 5: Loss: 0.3677 Accuracy: 0.7470\nEpoch 54, CIFAR-10 Batch 1: Loss: 0.2827 Accuracy: 0.7698\nEpoch 54, CIFAR-10 Batch 2: Loss: 0.2753 Accuracy: 0.7724\nEpoch 54, CIFAR-10 Batch 3: Loss: 0.1960 Accuracy: 0.7746\nEpoch 54, CIFAR-10 Batch 4: Loss: 0.2482 Accuracy: 0.7790\nEpoch 54, CIFAR-10 Batch 5: Loss: 0.2988 Accuracy: 0.7722\nEpoch 55, CIFAR-10 Batch 1: Loss: 0.2741 Accuracy: 0.7806\nEpoch 55, CIFAR-10 Batch 2: Loss: 0.2970 Accuracy: 0.7642\nEpoch 55, CIFAR-10 Batch 3: Loss: 0.2392 Accuracy: 0.7692\nEpoch 55, CIFAR-10 Batch 4: Loss: 0.1716 Accuracy: 0.7882\nEpoch 55, CIFAR-10 Batch 5: Loss: 0.2274 Accuracy: 0.7718\nEpoch 56, CIFAR-10 Batch 1: Loss: 0.2951 Accuracy: 0.7694\n"
]
],
[
[
"# 检查点\n\n模型已保存到本地。\n\n## 测试模型\n\n利用测试数据集测试你的模型。这将是最终的准确率。你的准确率应该高于 50%。如果没达到,请继续调整模型结构和参数。",
"_____no_output_____"
]
],
[
[
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport tensorflow as tf\nimport pickle\nimport helper\nimport random\n\n# Set batch size if not already set\ntry:\n if batch_size:\n pass\nexcept NameError:\n batch_size = 64\n\nsave_model_path = './image_classification'\nn_samples = 4\ntop_n_predictions = 3\n\ndef test_model():\n \"\"\"\n Test the saved model against the test dataset\n \"\"\"\n\n test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb'))\n loaded_graph = tf.Graph()\n\n with tf.Session(graph=loaded_graph) as sess:\n # Load model\n loader = tf.train.import_meta_graph(save_model_path + '.meta')\n loader.restore(sess, save_model_path)\n\n # Get Tensors from loaded model\n loaded_x = loaded_graph.get_tensor_by_name('x:0')\n loaded_y = loaded_graph.get_tensor_by_name('y:0')\n loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')\n loaded_logits = loaded_graph.get_tensor_by_name('logits:0')\n loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')\n \n # Get accuracy in batches for memory limitations\n test_batch_acc_total = 0\n test_batch_count = 0\n \n for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):\n test_batch_acc_total += sess.run(\n loaded_acc,\n feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0})\n test_batch_count += 1\n\n print('Testing Accuracy: {}\\n'.format(test_batch_acc_total/test_batch_count))\n\n # Print Random Samples\n random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))\n random_test_predictions = sess.run(\n tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),\n feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})\n helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)\n\n\ntest_model()",
"INFO:tensorflow:Restoring parameters from ./image_classification\nTesting Accuracy: 0.7875199044585988\n\n"
]
],
[
[
"## 为何准确率只有50-80%?\n\n你可能想问,为何准确率不能更高了?首先,对于简单的 CNN 网络来说,50% 已经不低了。纯粹猜测的准确率为10%。但是,你可能注意到有人的准确率[远远超过 80%](http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html#43494641522d3130)。这是因为我们还没有介绍所有的神经网络知识。我们还需要掌握一些其他技巧。\n\n## 提交项目\n\n提交项目时,确保先运行所有单元,然后再保存记事本。将 notebook 文件另存为“dlnd_image_classification.ipynb”,再在目录 \"File\" -> \"Download as\" 另存为 HTML 格式。请在提交的项目中包含 “helper.py” 和 “problem_unittests.py” 文件。\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a11b59738239dfc28257222471c90dccdab5570
| 1,875 |
ipynb
|
Jupyter Notebook
|
tarea10.ipynb
|
humbertoguell/daa2020_1
|
14a628002c86917f9954ab157bc653c7f4e281ac
|
[
"MIT"
] | null | null | null |
tarea10.ipynb
|
humbertoguell/daa2020_1
|
14a628002c86917f9954ab157bc653c7f4e281ac
|
[
"MIT"
] | null | null | null |
tarea10.ipynb
|
humbertoguell/daa2020_1
|
14a628002c86917f9954ab157bc653c7f4e281ac
|
[
"MIT"
] | null | null | null | 28.846154 | 229 | 0.4784 |
[
[
[
"<a href=\"https://colab.research.google.com/github/humbertoguell/daa2020_1/blob/master/tarea10.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"class NodoArbol:\r\n def __init__( self , value , left = None , right = None ):\r\n self.data = value\r\n self.left = left\r\n self.right = right\r\n\r\narbol = NodoArbol(\"0\",NodoArbol(\"54\",NodoArbol(\"4\",NodoArbol(\"14\"))\r\n,NodoArbol(\"11\",NodoArbol(\"7\",NodoArbol(\"10\")),NodoArbol(\"20\",NodoArbol(\"12\")))))\r\n \r\n \r\nauxiliar = arbol\r\nwhile auxiliar.left != None:\r\n auxiliar = auxiliar.left\r\n\r\nprint(\"la ultima hoja del arbol es \", auxiliar.data)\r\n",
"la ultima hoja del arbol es 14\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
]
] |
4a11c50741c11eac44073c40d874f78c2e996cc7
| 222,290 |
ipynb
|
Jupyter Notebook
|
notebooks/Deprecated/SimGenerator_v0.ipynb
|
tdps/DeepBench
|
f02df8da6d4d629eb2c8e7f585f4f1eaa4ec24f9
|
[
"Apache-2.0"
] | 1 |
2020-02-27T16:15:04.000Z
|
2020-02-27T16:15:04.000Z
|
notebooks/Deprecated/SimGenerator_v0.ipynb
|
tdps/DeepBench
|
f02df8da6d4d629eb2c8e7f585f4f1eaa4ec24f9
|
[
"Apache-2.0"
] | 2 |
2020-02-27T16:50:56.000Z
|
2020-03-10T14:48:56.000Z
|
notebooks/Deprecated/SimGenerator_v0.ipynb
|
tdps/DeepBench
|
f02df8da6d4d629eb2c8e7f585f4f1eaa4ec24f9
|
[
"Apache-2.0"
] | 2 |
2020-07-23T21:33:02.000Z
|
2022-03-12T08:10:41.000Z
| 199.54219 | 4,976 | 0.892082 |
[
[
[
"# Deprecated",
"_____no_output_____"
],
[
"# packages: random\nimport random\n\n# packages: data structure\nimport numpy as np\nimport pandas as pd\nimport astropy.io as io\n\n# packages: image generation and plot generation\nfrom matplotlib import pyplot as plt",
"_____no_output_____"
],
[
"# pandas\n# https://pandas.pydata.org/pandas-docs/stable/tutorials.html\n# https://pandas.pydata.org/pandas-docs/stable/10min.html\n\n# ascii:io\n# http://docs.astropy.org/en/stable/io/ascii/\n\n# matplotlib\n# https://nickcharlton.net/posts/drawing-animating-shapes-matplotlib.html",
"_____no_output_____"
],
[
"# numpy: empty canvas\ndef empty_canvas(image_side_length=100):\n\n return np.indices((image_side_length, image_side_length))\n \n \n# scikit learn: circle\ndef circle_sk(canvas, x_center=50, y_center=50, radius=30):\n\n y, x = canvas\n circle = (x - x_center)**2 + (y - y_center)**2 < radius**2\n img = circle.astype(float)\n \n return img\n\n\n# scikit learn: rectangle\ndef rect_sk(canvas, x_center=50, y_center=50, radius=30):\n\n y, x = canvas\n rect = (x < x_center + radius) & (x > x_center - radius) & (y < y_center + radius) & (y > y_center - radius)\n img = rect.astype(float)\n \n return img\n \n \n# scikit learn: rectangle\n#def triangle_sk(canvas, x_center=50, y_center=50, radius=30):\n\n# y, x = canvas\n# rect = (x < x_center + radius) & (x > x_center - radius) & (y < y_center + radius) & (y > y_center - radius)\n# img = rect.astype(float)\n \n# return img\n \n \n# plot for SPI package\ndef plot_spi(img):\n plt.axes() \n plt.imshow(img)\n plt.clf()\n \n \n# matplotlib pyplot\ndef circle_plt(x_center=0, y_center=0, radius=0.75, fc='r', show=False):\n\n plt.axes()\n\n circle = plt.Circle((x_center, y_center), radius=radius, fc=fc)\n plt.gca().add_patch(circle)\n plt.axis('scaled')\n imgplot = plt.imshow(img)\n imgplot = plt.savefig(\"test3.png\", dpi = (200))\n #imgplot = plt.imshow()\n \n if show:\n plt.show()\n\n# test each individual function\ndef test_individual():\n #circle()\n img = circle_sk()\n plot_spi(img)\n #star()\n return\n\n# generate one image data set\ndef generate_dataset(nb_obj, \n image_side_length=100, \n index_start=0, \n shape='rect',\n x_min=32,\n x_max=32,\n y_min=32,\n y_max=32,\n radius_min=10,\n radius_max=10,\n show_plot=False, \n verbose=False):\n\n # initiate image values\n fac = -1.0\n #x_center_list = np.random.uniform(0 + fac* radius_max, image_side_length + fac* radius_max, nb_obj)\n #y_center_list = np.random.uniform(0 + fac* radius_max, image_side_length + fac* radius_max, nb_obj)\n x_center_list = np.random.uniform(x_min, x_max, nb_obj)\n y_center_list = np.random.uniform(y_min, y_max, nb_obj)\n\n radius_list = np.random.uniform(radius_min, radius_max, nb_obj)\n print('x ranges', min(x_center_list), max(x_center_list))\n print('y ranges', min(y_center_list), max(y_center_list))\n column_names = ['ident', 'x_center', 'y_center', 'radius', 'shape']\n \n # create empty data structures\n tab_list = np.empty((nb_obj, len(column_names)))\n img_list = np.empty((nb_obj, image_side_length, image_side_length))\n \n # create empty canvas for a single image\n canvas = empty_canvas(image_side_length=image_side_length)\n \n # loop over objects\n icount = 0\n for i_obj in np.arange(nb_obj):\n \n # draw object properties from list\n x_center = x_center_list[i_obj]\n y_center = y_center_list[i_obj] \n radius = radius_list[i_obj]\n \n # identification value\n ident = int(index_start + i_obj)\n \n # create object\n if shape == 'rect':\n img = rect_sk(canvas, x_center=x_center, y_center=y_center, radius=radius)\n shape_num = 0\n elif shape == 'circ':\n img = circle_sk(canvas, x_center=x_center, y_center=y_center, radius=radius)\n shape_num = 1 \n\n # add tabular data to data list structure\n tab_list[i_obj] = [ident, x_center, y_center, radius, int(shape_num)]\n \n # add image data to image list structure\n img_list[i_obj] = img\n\n # plot image\n if show_plot and icount <20:\n icount+=1\n plt.figure()\n plt.axes() \n plt.imshow(img)\n \n # Data Frame: Tabular Data for Objects\n tab_list = pd.DataFrame(tab_list,columns=column_names)\n \n # verbose\n if verbose:\n print(tab_list[0:10])\n print(img_list[0:10])\n \n return tab_list, img_list\n\n\n# save data\ndef save_data(f_data_list, f_img_list, data_list, img_list, verbose=False):\n\n # Pandas Data Frame for tabular data: save to file\n data_list.to_csv(f_data_list)\n\n # Numpy Array for image data: save to file\n np.save(f_img_list, img_list)\n \n # verbose\n if verbose:\n print(f_data_list_pd)\n print(f_img_list)\n \n return\n \n\n# combine data sets\ndef combine_data(frames, data_type='tab'): \n \n if data_type=='tab':\n data = pd.concat(frames)\n elif data_type=='img':\n data = np.concatenate(frames)\n \n return data\n\n\n# randomize data \ndef randomize_data(tab, img, seed=5, verbose=False):\n \n if verbose:\n print('Before:', tab)\n \n # create randomized indices\n random.seed(seed) \n nb_tab = len(tab)\n ind_random = np.arange(nb_tab) \n random.shuffle(ind_random)\n \n # re-order data based on randomized indices\n tab = tab.iloc[ind_random]\n img = img[ind_random]\n \n if verbose:\n print('After:', tab)\n\n return tab, img\n\n\n# split data \ndef split_data(nb_train, nb_valid, nb_test, tab, img, printcheck=0):\n\n \n ind_start_train = 0\n ind_end_train = ind_start_valid = ind_start_train + nb_train \n ind_end_valid = ind_start_test = ind_start_valid + nb_valid\n ind_end_test = ind_start_test + nb_test\n \n if printcheck > 0:\n print(tab[0:printcheck])\n print(ind_start_train, ind_end_train)\n # good place for unit test\n \n # split data in train, valid, test\n tab_train = tab[ind_start_train: ind_end_train]\n img_train = img[ind_start_train: ind_end_train]\n\n tab_valid = tab[ind_start_valid: ind_end_valid]\n img_valid = img[ind_start_valid: ind_end_valid]\n\n tab_test = tab[ind_start_test: ind_end_test]\n img_test = img[ind_start_test: ind_end_test]\n \n return tab_train, tab_valid, tab_test, img_train, img_valid, img_test",
"_____no_output_____"
],
[
"# Generate Data Parameters\nnb_obj = 5000\n#seed = 47283\nimage_side_length = 64\nx_min, x_max = 10, 54\ny_min, y_max = 10, 54\nradius_min, radius_max = 4,30\nshow_plot = True\n\n# Generate Data\ntab_a, img_a = generate_dataset(nb_obj, image_side_length=image_side_length, radius_min=radius_min, radius_max=radius_max, x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max, shape='rect', show_plot=show_plot)\ntab_b, img_b = generate_dataset(nb_obj, image_side_length=image_side_length, radius_min=radius_min, radius_max=radius_max, x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max, shape='circ', show_plot=show_plot, index_start=nb_obj)\n\n# combine data\ntab = combine_data([tab_a, tab_b])\nimg = combine_data([img_a, img_b], data_type='img')\n\n# randomize data\ntab, img = randomize_data(tab, img, verbose=True)\nprint('range', np.min(img), np.max(img))\n\n# save data\nf_tab = 'test_generate_pipeline_circle_data.csv'\nf_img = 'test_generate_pipeline_circle_image.npy'\nsave_data(f_tab, f_img, tab, img, verbose=False )\n",
"x ranges 10.006670793022877 53.997868245890025\ny ranges 10.012215820298476 53.995160518827696\nx ranges 10.003455246206755 53.99969589009031\ny ranges 10.021885568801949 53.99680971642177\n"
]
],
[
[
"# Example: read data file and prepare data for network",
"_____no_output_____"
]
],
[
[
"# read data from file\ndata_list = pd.read_csv(f_tab)\nimg_list = np.load(f_img)\nprint('range', np.min(img), np.max(img))",
"range 0.0 1.0\n"
],
[
"# Training parameters\nbatch_size = 20\nnum_classes = 2\nepochs = 5\ntrain_me = True\n\nnb_train = 1000\nnb_valid = 100\nnb_test = 1000\n\nimg_rows = img_cols = img_list.shape[1]",
"_____no_output_____"
],
[
"# Prepare data\n\n# ... split data\noutput = split_data(nb_train, nb_valid, nb_test, tab, img, printcheck=0)\ny_train_temp, y_valid_temp, y_test_temp, x_train, x_valid, x_test = output\nprint(np.min(x_train), np.max(x_train))\n\n\n# ... identify value to train on\ny_train = y_train_temp['shape'].values\ny_valid = y_valid_temp['shape'].values\ny_test = y_test_temp['shape'].values\n\n\nprint(\"X train, valid, test shapes:\", \"\\n\", x_train.shape,\"\\n\", x_valid.shape,\"\\n\", x_test.shape)\nprint(\"y train, valid, test shapes:\", \"\\n\", y_train.shape,\"\\n\", y_valid.shape,\"\\n\", y_test.shape)",
"0.0 1.0\nX train, valid, test shapes: \n (1000, 64, 64) \n (100, 64, 64) \n (1000, 64, 64)\ny train, valid, test shapes: \n (1000,) \n (100,) \n (1000,)\n"
],
[
"''' MY DATA\n'''\n\nfrom __future__ import print_function\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\n\n\nif K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_valid = x_valid.reshape(x_valid.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\nelse:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_valid = x_valid.reshape(x_valid.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n\nx_train = x_train.astype('float32')\nx_valid = x_valid.astype('float32')\nx_test = x_test.astype('float32')\nprint('range', np.min(x_train), np.max(x_train))\n\n\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_valid.shape[0], 'valid samples')\nprint(x_test.shape[0], 'test samples')\n\n# convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_valid = keras.utils.to_categorical(y_valid, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\n",
"range 0.0 1.0\nx_train shape: (1000, 64, 64, 1)\n1000 train samples\n100 valid samples\n1000 test samples\n"
],
[
"# create model\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(2, 2), activation='relu', input_shape=input_shape))\n#model.add(Conv2D(64, kernel_size=(2, 2), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.3))\nmodel.add(Flatten())\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(num_classes, activation='softmax'))\n\n# compile model\nmodel.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])\nif train_me:\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_valid, y_valid))\n score = model.evaluate(x_test, y_test, verbose=0)\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n",
"Train on 1000 samples, validate on 100 samples\nEpoch 1/5\n1000/1000 [==============================] - 3s 3ms/step - loss: 0.7142 - acc: 0.5270 - val_loss: 0.6753 - val_acc: 0.5100\nEpoch 2/5\n1000/1000 [==============================] - 2s 2ms/step - loss: 0.6608 - acc: 0.6060 - val_loss: 0.6310 - val_acc: 0.6700\nEpoch 3/5\n1000/1000 [==============================] - 2s 2ms/step - loss: 0.5702 - acc: 0.7430 - val_loss: 0.5467 - val_acc: 0.7800\nEpoch 4/5\n1000/1000 [==============================] - 2s 2ms/step - loss: 0.3856 - acc: 0.8790 - val_loss: 0.3321 - val_acc: 0.9300\nEpoch 5/5\n1000/1000 [==============================] - 2s 2ms/step - loss: 0.1981 - acc: 0.9510 - val_loss: 0.2033 - val_acc: 0.9300\nTest loss: 0.19812715935707093\nTest accuracy: 0.931\n"
],
[
"a = np.array([1.])\nb = a.astype('float32')\nprint(a, b)",
"_____no_output_____"
],
[
"import PIL.ImageDraw as ImageDraw,PIL.Image as Image, PIL.ImageShow as ImageShow \nim = Image.new(\"RGB\", (400,300))\ndraw = ImageDraw.Draw(im)\n\ndraw.arc((100,100,300,200),0,270,fill=255)\n\nim.show()",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a11ccac3919e6c6aa54386b9ec7d580e26fdaa8
| 323,563 |
ipynb
|
Jupyter Notebook
|
content/labs/lab06/notebook/cs109b_lab6_CNN2_2020.ipynb
|
jlopezra/2020-CS109B
|
530b2fd9f3f225e8fe4ea38bdc42fbe0ebdea98e
|
[
"MIT"
] | null | null | null |
content/labs/lab06/notebook/cs109b_lab6_CNN2_2020.ipynb
|
jlopezra/2020-CS109B
|
530b2fd9f3f225e8fe4ea38bdc42fbe0ebdea98e
|
[
"MIT"
] | null | null | null |
content/labs/lab06/notebook/cs109b_lab6_CNN2_2020.ipynb
|
jlopezra/2020-CS109B
|
530b2fd9f3f225e8fe4ea38bdc42fbe0ebdea98e
|
[
"MIT"
] | null | null | null | 167.910223 | 70,356 | 0.887833 |
[
[
[
"# <img style=\"float: left; padding-right: 10px; width: 45px\" src=\"https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/iacs.png\"> CS-109B Introduction to Data Science\n## Lab 6: Convolutional Neural Networks 2\n\n**Harvard University**<br>\n**Spring 2020**<br>\n**Instructors:** Mark Glickman, Pavlos Protopapas, and Chris Tanner<br>\n**Lab Instructors:** Chris Tanner and Eleni Angelaki Kaxiras<br>\n**Content:** Eleni Angelaki Kaxiras, Cedric Flamant, Pavlos Protopapas\n\n---",
"_____no_output_____"
]
],
[
[
"# RUN THIS CELL TO PROPERLY HIGHLIGHT THE EXERCISES\nimport requests\nfrom IPython.core.display import HTML\nstyles = requests.get(\"https://raw.githubusercontent.com/Harvard-IACS/2019-CS109B/master/content/styles/cs109.css\").text\nHTML(styles)",
"_____no_output_____"
]
],
[
[
"## Learning Goals\n\nIn this lab we will continue with Convolutional Neural Networks (CNNs), will look into the `tf.data` interface which enables us to build complex input pipelines for our data. We will also touch upon visualization techniques to peak into our CNN's hidden layers.\n\nBy the end of this lab, you should be able to:\n\n- know how a CNN works from start to finish\n- use `tf.data.Dataset` to import and, if needed, transform, your data for feeding into the network. Transformations might include normalization, scaling, tilting, resizing, or applying other data augmentation techniques.\n- understand how `saliency maps` are implemented with code.",
"_____no_output_____"
],
[
"<a id=top></a> \n\n## Table of Contents\n\n1. **Part 1**: [Beginning-to-end Convolutional Neural Networks](#part1).\n2. **Part 2**: [Image Pipelines with `tf.data.Dataset`](#part2). \n3. **Part 3**: [Hidden Layer Visualization, Saliency Maps](#part3).",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom scipy.optimize import minimize\nfrom sklearn.utils import shuffle\n\nimport matplotlib.pyplot as plt\nplt.rcParams[\"figure.figsize\"] = (5,5)\n%matplotlib inline",
"_____no_output_____"
],
[
"import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Dense, Conv2D, Conv1D, MaxPooling2D, MaxPooling1D,\\\n Dropout, Flatten, Activation, Input\nfrom tensorflow.keras.optimizers import Adam, SGD, RMSprop\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.metrics import AUC, Precision, Recall, FalsePositives, \\\n FalseNegatives, TruePositives, TrueNegatives\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.regularizers import l2",
"_____no_output_____"
],
[
"from __future__ import absolute_import, division, print_function, unicode_literals\ntf.keras.backend.clear_session() # For easy reset of notebook state.\nprint(tf.__version__) # You should see a > 2.0.0 here!\nfrom tf_keras_vis.utils import print_gpus\nprint_gpus()",
"2.1.0\n0 GPUs\n"
],
[
"## Additional Packages required if you don't already have them\n# While in your conda environment,\n\n# imageio\n# Install using \"conda install imageio\"\n# pillow\n# Install using \"conda install pillow\"\n# tensorflow-datasets\n# Install using \"conda install tensorflow-datasets\"\n# tf-keras-vis\n# Install using \"pip install tf-keras-vis\"\n# tensorflow-addons\n# Install using \"pip install tensorflow-addons\"",
"_____no_output_____"
],
[
"from tf_keras_vis.saliency import Saliency\nfrom tf_keras_vis.utils import normalize\nimport tf_keras_vis.utils as utils\nfrom matplotlib import cm\nfrom tf_keras_vis.gradcam import Gradcam",
"_____no_output_____"
],
[
"np.random.seed(109)\ntf.random.set_seed(109)",
"_____no_output_____"
]
],
[
[
"## Part 0: Running on SEAS JupyterHub\n\n**PLEASE READ**: [Instructions for Using SEAS JupyterHub](https://canvas.harvard.edu/courses/65462/pages/instructions-for-using-seas-jupyterhub?module_item_id=638544)\n\nSEAS and FAS are providing you with a platform in AWS to use for the class (accessible from the 'Jupyter' menu link in Canvas). These are AWS p2 instances with a GPU, 10GB of disk space, and 61 GB of RAM, for faster training for your networks. Most of the libraries such as keras, tensorflow, pandas, etc. are pre-installed. If a library is missing you may install it via the Terminal.\n\n**NOTE: The AWS platform is funded by SEAS and FAS for the purposes of the class. It is FREE for you - not running against your personal AWS credit. For this reason you are only allowed to use it for purposes related to this course, and with prudence.**\n\n**Help us keep this service: Make sure you stop your instance as soon as you do not need it. Your instance will terminate after 30 min of inactivity.**\n\n\n*source: CS231n Stanford, Google Cloud Tutorial*",
"_____no_output_____"
],
[
"<a id=part1></a>\n\n## Part 1: Beginning-to-end Convolutional Neural Networks\n\n\n\n*image [source](http://www.wildml.com/2015/11/understanding-convolutional-neural-networks-for-nlp/)*\n<BR><BR>\nWe will go through the various steps of training a CNN, including:\n- difference between cross-validation and validation\n- specifying a loss, metrics, and an optimizer,\n- performing validation,\n- using callbacks, specifically `EarlyStopping`, which stops the training when training is no longer improving the validation metrics,\n- learning rate significance\n<BR><BR>\n<div class=\"exercise\" style=\"background-color:#b3e6ff\"><b>Table Exercise</b>: Use the whiteboard next to your table to draw a CNN from start to finish as per the instructions. We will then draw it together in class.</div> ",
"_____no_output_____"
],
[
"<a id=part2></a> [Back to Table of Contents](#top)\n\n## Part 2: Image Preprocessing: Using `tf.data.Dataset`",
"_____no_output_____"
]
],
[
[
"import tensorflow_addons as tfa\nimport tensorflow_datasets as tfds",
"_____no_output_____"
]
],
[
[
" `tf.data` API in `tensorflow` enables you to build complex **input pipelines** from simple, reusable pieces. For example, the pipeline for an image model might aggregate data from files in a distributed file system, apply random perturbations to each image, and merge randomly selected images into a batch for training. \n\nThe pipeline for a text model might involve extracting symbols from raw text data, converting them to embedding identifiers with a lookup table, and batching together sequences of different lengths. The `tf.data API` makes it possible to handle large amounts of data, read from different data formats, and perform complex transformations.\n\nThe `tf.data API` introduces a `tf.data.Dataset` that represents a sequence of **elements**, consistινγ of one or more **components**. For example, in an image pipeline, an element might be a single training example, with a pair of tensor components representing the image and its label.\n\nTo create an input pipeline, you must start with a data **source**. For example, to construct a Dataset from data in memory, you can use `tf.data.Dataset.from_tensors()` or `tf.data.Dataset.from_tensor_slices()`. Alternatively, if your input data is stored in a file in the recommended TFRecord format, you can use `tf.data.TFRecordDataset()`.\n\nThe Dataset object is a Python iterable. You may view its elements using a for loop:",
"_____no_output_____"
]
],
[
[
"dataset = tf.data.Dataset.from_tensor_slices(tf.random.uniform([4, 10], minval=1, maxval=10, dtype=tf.int32))\n\nfor elem in dataset:\n print(elem.numpy())",
"[4 3 1 9 7 4 8 9 4 6]\n[9 6 2 2 6 4 7 2 9 8]\n[5 7 5 4 8 5 6 4 8 4]\n[6 2 2 2 6 6 4 2 2 2]\n"
]
],
[
[
"Once you have a Dataset object, you can **transform** it into a new Dataset by chaining method calls on the `tf.data.Dataset` object. For example, you can apply per-element transformations such as `Dataset.map()`, and multi-element transformations such as `Dataset.batch()`. See the [documentation](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) for `tf.data.Dataset` for a complete list of transformations.\n\nThe `map` function takes a function and returns a new and augmented dataset. ",
"_____no_output_____"
]
],
[
[
"dataset = dataset.map(lambda x: x*2) \nfor elem in dataset:\n print(elem.numpy())",
"[ 8 6 2 18 14 8 16 18 8 12]\n[18 12 4 4 12 8 14 4 18 16]\n[10 14 10 8 16 10 12 8 16 8]\n[12 4 4 4 12 12 8 4 4 4]\n"
]
],
[
[
"Datasets are powerful objects because they are effectively dictionaries that can store tensors and other data such as the response variable. We can also construct them by passing small sized `numpy` arrays, such as in the following example.\n\nTensorflow has a plethora of them:",
"_____no_output_____"
]
],
[
[
"# uncomment to see available datasets\n#tfds.list_builders()",
"_____no_output_____"
]
],
[
[
"#### `mnist` dataset",
"_____no_output_____"
]
],
[
[
"# load mnist\n(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\nx_train.shape, y_train.shape",
"_____no_output_____"
],
[
"# take only 10 images for simplicity\ntrain_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))\ntest_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))",
"_____no_output_____"
],
[
"# In case you want to retrieve the images/numpy arrays\nfor element in iter(train_dataset.take(1)):\n image = element[0].numpy()\n print(image.shape)\n print(image.shape)\n plt.figure()\n plt.imshow(image, cmap='gray')\n plt.show()",
"(28, 28)\n(28, 28)\n"
]
],
[
[
"Once you have your Model, you may pass a Dataset instance directly to the methods `fit()`, `evaluate()`, and `predict()`. The difference with the way we have been previously using these methods is that we are not passing the images and labels separately. They are now both in the Dataset object.\n\n```\nmodel.fit(train_dataset, epochs=3)\n\nmodel.evaluate(test_dataset)\n```",
"_____no_output_____"
],
[
"#### Data Augmentation",
"_____no_output_____"
]
],
[
[
"fig, axes = plt.subplots(1,6, figsize=(10,3))\nfor i, (image, label) in enumerate(train_dataset.take(4)):\n axes[i].imshow(image)\n axes[i].set_title(f'{label:.2f}')\nimage_flip_up = tf.image.flip_up_down(np.expand_dims(image, axis=2)).numpy()\nimage_rot_90 = tf.image.rot90(np.expand_dims(image, axis=2), k=1).numpy()\naxes[4].imshow(image_flip_up.reshape(28,-1))\naxes[4].set_title(f'{label:.2f}-flip')\naxes[5].imshow(image_rot_90.reshape(28,-1))\naxes[5].set_title(f'{label:.2f}-rot90')\nplt.show();",
"_____no_output_____"
]
],
[
[
"#### Note:\n\nThe tf.data API is a set of utilities in TensorFlow 2.0 for loading and preprocessing data in a way that's fast and scalable. You also have the option to use the `keras` [`ImageDataGenerator`](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/ImageDataGenerator), that accepts `numpy` arrays, instead of the Dataset. We think it's good for you to learn to use Datasets.\n\nAs a general rule, for input to NNs, Tensorflow recommends that you use `numpy` arrays if your data is small and fit in memory, and `tf.data.Datasets` otherwise.\n\n#### References:\n1. `tf.data.Dataset` [Documentation](https://www.tensorflow.org/api_docs/python/tf/data/Dataset).\n2. Import [`numpy` arrays in Tensorflow](https://www.tensorflow.org/tutorials/load_data/numpy)",
"_____no_output_____"
],
[
"### The Street View House Numbers (SVHN) Dataset\n\nWe will play with the SVHN real-world image dataset. It can be seen as similar in flavor to MNIST (e.g., the images are of small cropped digits), but incorporates an order of magnitude more labeled data (over 600,000 digit images) and comes from a significantly harder, unsolved, real world problem (recognizing digits and numbers in natural scene images). SVHN is obtained from house numbers in Google Street View images. \n\nAll digits have been resized to a fixed resolution of 32-by-32 pixels. The original character bounding boxes are extended in the appropriate dimension to become square windows, so that resizing them to 32-by-32 pixels does not introduce aspect ratio distortions. Nevertheless this preprocessing introduces some distracting digits to the sides of the digit of interest. Loading the .mat files creates 2 variables: X which is a 4-D matrix containing the images, and y which is a vector of class labels. To access the images, $X(:,:,:,i)$ gives the i-th 32-by-32 RGB image, with class label $y(i)$.\n\n\n\n*Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco, Bo Wu, Andrew Y. Ng Reading Digits in Natural Images with Unsupervised Feature Learning NIPS Workshop on Deep Learning and Unsupervised Feature Learning 2011.*",
"_____no_output_____"
]
],
[
[
"# Will take some time but will only load once\ntrain_svhn_cropped, test_svhn_cropped = tfds.load('svhn_cropped', split=['train', 'test'], shuffle_files=False)",
"_____no_output_____"
],
[
"isinstance(train_svhn_cropped, tf.data.Dataset)",
"_____no_output_____"
],
[
"# # convert to numpy if needed\nfeatures = next(iter(train_svhn_cropped))\nimages = features['image'].numpy()\nlabels = features['label'].numpy()\nimages.shape, labels.shape",
"_____no_output_____"
],
[
"for i, element in enumerate(train_svhn_cropped):\n if i==1: break;\n image = element['image'] \n label = element['label']\n print(label)",
"tf.Tensor(5, shape=(), dtype=int64)\n"
],
[
"# batch_size indicates that the dataset should be divided in batches \n# each consisting of 4 elements (a.k.a images and their labels)\n# take_size chooses a number of these batches, e.g. 3 of them for display\n\nbatch_size = 4\ntake_size = 3\n\n# Plot\nfig, axes = plt.subplots(take_size,batch_size, figsize=(10,10))\nfor i, element in enumerate(train_svhn_cropped.batch(batch_size).take(take_size)):\n for j in range(4):\n image = element['image'][j]\n label = element['label'][j]\n axes[i][j].imshow(image)\n axes[i][j].set_title(f'true label={label:d}')",
"_____no_output_____"
]
],
[
[
"Here we convert from a collection of dictionaries to a collection of tuples. We will still have a `tf.data.Dataset`",
"_____no_output_____"
]
],
[
[
"def normalize_image(img):\n return tf.cast(img, tf.float32)/255.\n\ndef normalize_dataset(element):\n img = element['image']\n lbl = element['label']\n return normalize_image(img), lbl",
"_____no_output_____"
],
[
"train_svhn = train_svhn_cropped.map(normalize_dataset)\ntest_svhn = test_svhn_cropped.map(normalize_dataset)",
"_____no_output_____"
],
[
"isinstance(train_svhn, tf.data.Dataset)",
"_____no_output_____"
]
],
[
[
"#### Define our CNN model ",
"_____no_output_____"
]
],
[
[
"n_filters = 16\ninput_shape = (32, 32, 3)\n\nsvhn_model = Sequential() \nsvhn_model.add(Conv2D(n_filters, (3, 3), activation='relu', input_shape=input_shape))\nsvhn_model.add(MaxPooling2D((2, 2)))\nsvhn_model.add(Conv2D(n_filters*2, (3, 3), activation='relu')) \nsvhn_model.add(MaxPooling2D((2, 2)))\nsvhn_model.add(Conv2D(n_filters*4, (3, 3), activation='relu'))\nsvhn_model.add(Flatten())\nsvhn_model.add(Dense(n_filters*2, activation='relu'))\nsvhn_model.add(Dense(10, activation='softmax'))\nsvhn_model.summary()",
"Model: \"sequential_5\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_15 (Conv2D) (None, 30, 30, 16) 448 \n_________________________________________________________________\nmax_pooling2d_10 (MaxPooling (None, 15, 15, 16) 0 \n_________________________________________________________________\nconv2d_16 (Conv2D) (None, 13, 13, 32) 4640 \n_________________________________________________________________\nmax_pooling2d_11 (MaxPooling (None, 6, 6, 32) 0 \n_________________________________________________________________\nconv2d_17 (Conv2D) (None, 4, 4, 64) 18496 \n_________________________________________________________________\nflatten_5 (Flatten) (None, 1024) 0 \n_________________________________________________________________\ndense_10 (Dense) (None, 32) 32800 \n_________________________________________________________________\ndense_11 (Dense) (None, 10) 330 \n=================================================================\nTotal params: 56,714\nTrainable params: 56,714\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"loss = keras.losses.sparse_categorical_crossentropy # we use this because we did not 1-hot encode the labels\noptimizer = Adam(lr=0.001)\nmetrics = ['accuracy'] \n\n# Compile model\nsvhn_model.compile(optimizer=optimizer,\n loss=loss,\n metrics=metrics)",
"_____no_output_____"
]
],
[
[
"#### With Early Stopping",
"_____no_output_____"
]
],
[
[
"%%time\nbatch_size = 64\nepochs=15\n\ncallbacks = [ \n keras.callbacks.EarlyStopping(\n # Stop training when `val_accuracy` is no longer improving\n monitor='val_accuracy',\n # \"no longer improving\" being further defined as \"for at least 2 epochs\"\n patience=2,\n verbose=1)\n ]\n\nhistory = svhn_model.fit(train_svhn.batch(batch_size), #.take(50), # change 50 only\n epochs=epochs,\n callbacks=callbacks,\n validation_data=test_svhn.batch(batch_size)) #.take(50))",
"Epoch 1/15\n1145/1145 [==============================] - 30s 26ms/step - loss: 1.0362 - accuracy: 0.6684 - val_loss: 0.6124 - val_accuracy: 0.8285\nEpoch 2/15\n1145/1145 [==============================] - 30s 26ms/step - loss: 0.5177 - accuracy: 0.8515 - val_loss: 0.5254 - val_accuracy: 0.8519\nEpoch 3/15\n1145/1145 [==============================] - 30s 26ms/step - loss: 0.4393 - accuracy: 0.8739 - val_loss: 0.4789 - val_accuracy: 0.8639\nEpoch 4/15\n1145/1145 [==============================] - 30s 26ms/step - loss: 0.3956 - accuracy: 0.8865 - val_loss: 0.4440 - val_accuracy: 0.8750\nEpoch 5/15\n1145/1145 [==============================] - 30s 26ms/step - loss: 0.3654 - accuracy: 0.8951 - val_loss: 0.4233 - val_accuracy: 0.8816\nEpoch 6/15\n1145/1145 [==============================] - 29s 25ms/step - loss: 0.3412 - accuracy: 0.9014 - val_loss: 0.4168 - val_accuracy: 0.8846\nEpoch 7/15\n1145/1145 [==============================] - 29s 25ms/step - loss: 0.3215 - accuracy: 0.9072 - val_loss: 0.4084 - val_accuracy: 0.8871\nEpoch 8/15\n1145/1145 [==============================] - 30s 26ms/step - loss: 0.3055 - accuracy: 0.9124 - val_loss: 0.4026 - val_accuracy: 0.8888\nEpoch 9/15\n1145/1145 [==============================] - 31s 27ms/step - loss: 0.2916 - accuracy: 0.9163 - val_loss: 0.4100 - val_accuracy: 0.8887\nEpoch 10/15\n1145/1145 [==============================] - 30s 26ms/step - loss: 0.2780 - accuracy: 0.9200 - val_loss: 0.4217 - val_accuracy: 0.8861\nEpoch 00010: early stopping\nCPU times: user 20min 1s, sys: 8min 31s, total: 28min 33s\nWall time: 4min 58s\n"
],
[
"def print_history(history):\n fig, ax = plt.subplots(1, 1, figsize=(8,4))\n ax.plot((history.history['accuracy']), 'b', label='train')\n ax.plot((history.history['val_accuracy']), 'g' ,label='val')\n ax.set_xlabel(r'Epoch', fontsize=20)\n ax.set_ylabel(r'Accuracy', fontsize=20)\n ax.legend()\n ax.tick_params(labelsize=20)\n fig, ax = plt.subplots(1, 1, figsize=(8,4))\n ax.plot((history.history['loss']), 'b', label='train')\n ax.plot((history.history['val_loss']), 'g' ,label='val')\n ax.set_xlabel(r'Epoch', fontsize=20)\n ax.set_ylabel(r'Loss', fontsize=20)\n ax.legend()\n ax.tick_params(labelsize=20)\n plt.show();\n \nprint_history(history)",
"_____no_output_____"
],
[
"svhn_model.save('svhn_good.h5')",
"_____no_output_____"
]
],
[
[
"#### Too High Learning Rate",
"_____no_output_____"
]
],
[
[
"loss = keras.losses.sparse_categorical_crossentropy \noptimizer = Adam(lr=0.5) # really big learning rate\nmetrics = ['accuracy'] \n\n# Compile model\nsvhn_model.compile(optimizer=optimizer,\n loss=loss,\n metrics=metrics)",
"_____no_output_____"
],
[
"%%time\nbatch_size = 64\nepochs=10\n\nhistory = svhn_model.fit(train_svhn.batch(batch_size), #.take(50), # change 50 to see the difference\n epochs=epochs,\n validation_data=test_svhn.batch(batch_size)) #.take(50))",
"Epoch 1/10\n1145/1145 [==============================] - 29s 25ms/step - loss: 1518.9293 - accuracy: 0.1763 - val_loss: 2.2455 - val_accuracy: 0.1594\nEpoch 2/10\n1145/1145 [==============================] - 29s 25ms/step - loss: 2.2719 - accuracy: 0.1741 - val_loss: 2.2437 - val_accuracy: 0.1594\nEpoch 3/10\n1145/1145 [==============================] - 29s 25ms/step - loss: 2.2734 - accuracy: 0.1745 - val_loss: 2.2431 - val_accuracy: 0.1959\nEpoch 4/10\n1145/1145 [==============================] - 29s 26ms/step - loss: 2.2737 - accuracy: 0.1743 - val_loss: 2.2429 - val_accuracy: 0.1959\nEpoch 5/10\n1145/1145 [==============================] - 29s 26ms/step - loss: 2.2738 - accuracy: 0.1743 - val_loss: 2.2428 - val_accuracy: 0.1959\nEpoch 6/10\n1145/1145 [==============================] - 29s 25ms/step - loss: 2.2738 - accuracy: 0.1743 - val_loss: 2.2428 - val_accuracy: 0.1959\nEpoch 7/10\n1145/1145 [==============================] - 29s 25ms/step - loss: 2.2738 - accuracy: 0.1743 - val_loss: 2.2428 - val_accuracy: 0.1959\nEpoch 8/10\n1145/1145 [==============================] - 29s 25ms/step - loss: 2.2738 - accuracy: 0.1743 - val_loss: 2.2428 - val_accuracy: 0.1959\nEpoch 9/10\n1145/1145 [==============================] - 29s 25ms/step - loss: 2.2738 - accuracy: 0.1743 - val_loss: 2.2428 - val_accuracy: 0.1959\nEpoch 10/10\n1145/1145 [==============================] - 29s 25ms/step - loss: 2.2738 - accuracy: 0.1743 - val_loss: 2.2428 - val_accuracy: 0.1959\nCPU times: user 19min 22s, sys: 8min 17s, total: 27min 40s\nWall time: 4min 50s\n"
],
[
"print_history(history)\nfig.savefig('../images/train_high_lr.png')",
"_____no_output_____"
]
],
[
[
"#### Too Low Learning Rate\n\nExperiment with the learning rate using a small sample of the training set by using .take(num) which takes only `num` number of samples.\n```\nhistory = svhn_model.fit(train_svhn.batch(batch_size).take(50))\n```",
"_____no_output_____"
]
],
[
[
"#loss = keras.losses.categorical_crossentropy\nloss = keras.losses.sparse_categorical_crossentropy # we use this because we did not 1-hot encode the labels\noptimizer = Adam(lr=1e-5) # very low learning rate\nmetrics = ['accuracy'] \n\n# Compile model\nsvhn_model.compile(optimizer=optimizer,\n loss=loss,\n metrics=metrics)",
"_____no_output_____"
],
[
"%%time\nbatch_size = 32\nepochs=10\n\nhistory = svhn_model.fit(train_svhn.batch(batch_size).take(50),\n epochs=epochs,\n validation_data=test_svhn.batch(batch_size)) #.take(50))",
"Epoch 1/10\n2290/2290 [==============================] - 37s 16ms/step - loss: 2.2603 - accuracy: 0.1707 - val_loss: 2.2314 - val_accuracy: 0.1957\nEpoch 2/10\n2290/2290 [==============================] - 34s 15ms/step - loss: 2.2295 - accuracy: 0.1894 - val_loss: 2.2119 - val_accuracy: 0.1970\nEpoch 3/10\n2290/2290 [==============================] - 35s 15ms/step - loss: 2.2046 - accuracy: 0.2012 - val_loss: 2.1738 - val_accuracy: 0.2342\nEpoch 4/10\n2290/2290 [==============================] - 35s 15ms/step - loss: 2.1504 - accuracy: 0.2458 - val_loss: 2.0987 - val_accuracy: 0.2948\nEpoch 5/10\n2290/2290 [==============================] - 36s 16ms/step - loss: 2.0492 - accuracy: 0.3008 - val_loss: 1.9756 - val_accuracy: 0.3434\nEpoch 6/10\n2290/2290 [==============================] - 37s 16ms/step - loss: 1.9201 - accuracy: 0.3507 - val_loss: 1.8509 - val_accuracy: 0.3832\nEpoch 7/10\n2290/2290 [==============================] - 38s 16ms/step - loss: 1.7967 - accuracy: 0.3975 - val_loss: 1.7373 - val_accuracy: 0.4274\nEpoch 8/10\n2290/2290 [==============================] - 35s 15ms/step - loss: 1.6818 - accuracy: 0.4490 - val_loss: 1.6338 - val_accuracy: 0.4714\nEpoch 9/10\n2290/2290 [==============================] - 34s 15ms/step - loss: 1.5778 - accuracy: 0.4939 - val_loss: 1.5412 - val_accuracy: 0.5111\nEpoch 10/10\n2290/2290 [==============================] - 35s 15ms/step - loss: 1.4837 - accuracy: 0.5307 - val_loss: 1.4577 - val_accuracy: 0.5436\nCPU times: user 20min 26s, sys: 9min 2s, total: 29min 28s\nWall time: 5min 56s\n"
],
[
"print_history(history)\nfig.savefig('../images/train_50.png')",
"_____no_output_____"
]
],
[
[
"#### Changing the batch size",
"_____no_output_____"
]
],
[
[
"#loss = keras.losses.categorical_crossentropy\nloss = keras.losses.sparse_categorical_crossentropy # we use this because we did not 1-hot encode the labels\noptimizer = Adam(lr=0.001)\nmetrics = ['accuracy'] \n\n# Compile model\nsvhn_model.compile(optimizer=optimizer,\n loss=loss,\n metrics=metrics)",
"_____no_output_____"
],
[
"%%time\nbatch_size = 2\nepochs=5\n\nhistory = svhn_model.fit(train_svhn.batch(batch_size), \n epochs=epochs,\n validation_data=test_svhn.batch(batch_size)) ",
"Epoch 1/5\n36629/36629 [==============================] - 175s 5ms/step - loss: 0.8544 - accuracy: 0.7295 - val_loss: 0.5765 - val_accuracy: 0.8363\nEpoch 2/5\n36629/36629 [==============================] - 135s 4ms/step - loss: 0.5045 - accuracy: 0.8494 - val_loss: 0.5326 - val_accuracy: 0.8511\nEpoch 3/5\n36629/36629 [==============================] - 134s 4ms/step - loss: 0.4520 - accuracy: 0.8649 - val_loss: 0.5270 - val_accuracy: 0.8584\nEpoch 4/5\n36629/36629 [==============================] - 141s 4ms/step - loss: 0.4209 - accuracy: 0.8744 - val_loss: 0.5106 - val_accuracy: 0.8614\nEpoch 5/5\n36629/36629 [==============================] - 126s 3ms/step - loss: 0.4007 - accuracy: 0.8811 - val_loss: 0.5079 - val_accuracy: 0.8617\nCPU times: user 19min 36s, sys: 10min 1s, total: 29min 37s\nWall time: 11min 50s\n"
],
[
"print_history(history)",
"_____no_output_____"
]
],
[
[
"<a id=part3></a> [Back to Table of Contents](#top)\n## Part 3: Hidden Layer Visualization, Saliency Maps\n\n[Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps](https://arxiv.org/pdf/1312.6034.pdf)\n\nIt is often said that Deep Learning Models are black boxes. But we can peak into these boxes. ",
"_____no_output_____"
],
[
"#### Let's train a small model on MNIST",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras.datasets import mnist\n# load MNIST data\n(x_train, y_train), (x_test, y_test) = mnist.load_data()",
"_____no_output_____"
],
[
"x_train.min(), x_train.max()",
"_____no_output_____"
],
[
"x_train = x_train.reshape((60000, 28, 28, 1)) # Reshape to get third dimension\nx_test = x_test.reshape((10000, 28, 28, 1)) \n\nx_train = x_train.astype('float32') / 255 # Normalize between 0 and 1\nx_test = x_test.astype('float32') / 255 \n\n# Convert labels to categorical data \ny_train = to_categorical(y_train)\ny_test = to_categorical(y_test)",
"_____no_output_____"
],
[
"x_train.min(), x_train.max()",
"_____no_output_____"
],
[
"# (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data(\n# path='mnist.npz')\nx_train.shape",
"_____no_output_____"
],
[
"class_idx = 0\nindices = np.where(y_test[:, class_idx] == 1.)[0]\n\n# pick some random input from here.\nidx = indices[0]\nimg = x_test[idx]",
"_____no_output_____"
],
[
"# pick some random input from here.\nidx = indices[0]\n\n# Lets sanity check the picked image.\nfrom matplotlib import pyplot as plt\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (18, 6)\n\n#plt.imshow(test_images[idx][..., 0])\nimg = x_test[idx] * 255 \nimg = img.astype('float32')\nimg = np.squeeze(img) # trick to reduce img from (28,28,1) to (28,28)\nplt.imshow(img, cmap='gray');",
"_____no_output_____"
],
[
"input_shape=(28, 28, 1)\nnum_classes = 10\n\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax', name='preds'))\nmodel.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 26, 26, 32) 320 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 24, 24, 64) 18496 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 12, 12, 64) 0 \n_________________________________________________________________\ndropout (Dropout) (None, 12, 12, 64) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 9216) 0 \n_________________________________________________________________\ndense (Dense) (None, 128) 1179776 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 128) 0 \n_________________________________________________________________\npreds (Dense) (None, 10) 1290 \n=================================================================\nTotal params: 1,199,882\nTrainable params: 1,199,882\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adam(),\n metrics=['accuracy'])",
"_____no_output_____"
],
[
"num_samples = x_train.shape[0]\nnum_samples",
"_____no_output_____"
],
[
"%%time\nbatch_size = 32\nepochs = 10\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_split=0.2,\n shuffle=True)",
"Train on 48000 samples, validate on 12000 samples\nEpoch 1/10\n48000/48000 [==============================] - 34s 706us/sample - loss: 0.2091 - accuracy: 0.9363 - val_loss: 0.0577 - val_accuracy: 0.9831\nEpoch 2/10\n48000/48000 [==============================] - 33s 691us/sample - loss: 0.0875 - accuracy: 0.9729 - val_loss: 0.0524 - val_accuracy: 0.9847\nEpoch 3/10\n48000/48000 [==============================] - 33s 691us/sample - loss: 0.0639 - accuracy: 0.9801 - val_loss: 0.0422 - val_accuracy: 0.9879\nEpoch 4/10\n48000/48000 [==============================] - 33s 690us/sample - loss: 0.0542 - accuracy: 0.9835 - val_loss: 0.0412 - val_accuracy: 0.9887\nEpoch 5/10\n48000/48000 [==============================] - 33s 694us/sample - loss: 0.0449 - accuracy: 0.9859 - val_loss: 0.0360 - val_accuracy: 0.9901\nEpoch 6/10\n48000/48000 [==============================] - 33s 694us/sample - loss: 0.0373 - accuracy: 0.9887 - val_loss: 0.0335 - val_accuracy: 0.9905\nEpoch 7/10\n48000/48000 [==============================] - 33s 693us/sample - loss: 0.0358 - accuracy: 0.9884 - val_loss: 0.0492 - val_accuracy: 0.9873\nEpoch 8/10\n48000/48000 [==============================] - 33s 694us/sample - loss: 0.0324 - accuracy: 0.9894 - val_loss: 0.0392 - val_accuracy: 0.9900\nEpoch 9/10\n48000/48000 [==============================] - 33s 695us/sample - loss: 0.0284 - accuracy: 0.9908 - val_loss: 0.0384 - val_accuracy: 0.9903\nEpoch 10/10\n48000/48000 [==============================] - 33s 695us/sample - loss: 0.0284 - accuracy: 0.9914 - val_loss: 0.0448 - val_accuracy: 0.9902\nWall time: 5min 33s\n"
],
[
"score = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])",
"Test loss: 0.030485706004477395\nTest accuracy: 0.9913\n"
]
],
[
[
"### Let's look at the layers with `tf.keras.viz` \n\nhttps://pypi.org/project/tf-keras-vis/\n\nAnd an example: https://github.com/keisen/tf-keras-vis/blob/master/examples/visualize_conv_filters.ipynb",
"_____no_output_____"
],
[
"We can identify layers by their layer id:",
"_____no_output_____"
]
],
[
[
"# Alternatively we can specify layer_id as -1 since it corresponds to the last layer.\nlayer_id = 0\nmodel.layers[layer_id].name, model.layers[-2].name",
"_____no_output_____"
]
],
[
[
"Or you may look at their output",
"_____no_output_____"
]
],
[
[
"output = [model.layers[layer_id].output]\noutput",
"_____no_output_____"
],
[
"# # You may also replace part of your NN with other parts,\n# # e.g. replace the activation function of the last layer\n# # with a linear one\n\n# model.layers[-1].activation = tf.keras.activations.linear",
"_____no_output_____"
]
],
[
[
"Generate Feature Maps",
"_____no_output_____"
]
],
[
[
"def get_feature_maps(model, layer_id, input_image):\n \"\"\"Returns intermediate output (activation map) from passing an image to the model\n \n Parameters:\n model (tf.keras.Model): Model to examine\n layer_id (int): Which layer's (from zero) output to return\n input_image (ndarray): The input image\n Returns:\n maps (List[ndarray]): Feature map stack output by the specified layer\n \"\"\"\n model_ = Model(inputs=[model.input], outputs=[model.layers[layer_id].output]) \n return model_.predict(np.expand_dims(input_image, axis=0))[0,:,:,:].transpose((2,0,1))",
"_____no_output_____"
],
[
"# Choose an arbitrary image\nimage_id = 67\nimg = x_test[image_id,:,:,:]\nimg.shape",
"_____no_output_____"
],
[
"img_to_show = np.squeeze(img)\nplt.imshow(img_to_show, cmap='gray')",
"_____no_output_____"
],
[
"# Was this successfully predicted?\nimg_batch = (np.expand_dims(img,0))\nprint(img_batch.shape)\npredictions_single = model.predict(img_batch)\nprint(f'Prediction is: {np.argmax(predictions_single[0])}') ",
"(1, 28, 28, 1)\nPrediction is: 4\n"
],
[
"# layer id should be for a Conv layer, a Flatten will not do\nmaps = get_feature_maps(model, layer_id, img)# [0:10]\nmaps.shape",
"_____no_output_____"
],
[
"# Plot just a subset\nmaps = get_feature_maps(model, layer_id, img)[0:10]\n\nfig, ax = plt.subplots()\nimg = np.squeeze(img)\nax.imshow(img + 0.5)\nlabel = y_test[image_id,:]\nlabel = int(np.where(label == 1.)[0])\n\nax.set_title(f'true label = {label}')\n\nf, ax = plt.subplots(3,3, figsize=(8,8))\nfor i, axis in enumerate(ax.ravel()):\n axis.imshow(maps[i], cmap='gray')",
"_____no_output_____"
]
],
[
[
"### `tf_keras_vis.gradcam.Gradcam`\n\n[Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization](https://arxiv.org/pdf/1610.02391.pdf)",
"_____no_output_____"
]
],
[
[
"#from tensorflow.keras import backend as K\n# Define modifier to replace a softmax function of the last layer to a linear function.\ndef model_modifier(m):\n m.layers[-1].activation = tf.keras.activations.linear",
"_____no_output_____"
],
[
"#img_batch = (np.expand_dims(img,0))\n# Define modifier to replace a softmax function of the last layer to a linear function.\ndef model_modifier(m):\n m.layers[-1].activation = tf.keras.activations.linear\n\n# Create Saliency object\nsaliency = Saliency(model, model_modifier)\n\n# Define loss function. Pass it the correct class label.\nloss = lambda output: tf.keras.backend.mean(output[:, tf.argmax(y_test[image_id])])",
"_____no_output_____"
],
[
"# Generate saliency map\nprint(img_batch.shape)",
"(1, 28, 28, 1)\n"
],
[
"saliency_map = saliency(loss, img_batch)\n\nsaliency_map = normalize(saliency_map)\n\nf, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5)) #, subplot_kw={'xticks': [], 'yticks': []})\nax[0].imshow(saliency_map[0], cmap='jet')\nax[1].imshow(img);",
"_____no_output_____"
],
[
"# from matplotlib import cm\n# from tf_keras_vis.gradcam import Gradcam\n\n# Create Gradcam object\ngradcam = Gradcam(model, model_modifier)\n\n# Generate heatmap with GradCAM\ncam = gradcam(loss, img_batch)\ncam = normalize(cam)\n\nf, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5),\n subplot_kw={'xticks': [], 'yticks': []})\nfor i in range(len(cam)):\n heatmap = np.uint8(cm.jet(cam[i])[..., :3] * 255)\n ax.imshow(img)\n ax.imshow(heatmap, cmap='jet', alpha=0.5)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a11d0ea09718f755ad1b2b4bfd63fcad176b67c
| 1,011 |
ipynb
|
Jupyter Notebook
|
Untitled4.ipynb
|
rahul263-stack/PROJECT-Dump
|
d8b1cfe0da8cad9fe2f3bbd427334b979c7d2c09
|
[
"MIT"
] | 1 |
2020-04-06T04:41:56.000Z
|
2020-04-06T04:41:56.000Z
|
.ipynb_checkpoints/Untitled4-checkpoint.ipynb
|
rahul263-stack/quarantine
|
d8b1cfe0da8cad9fe2f3bbd427334b979c7d2c09
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/Untitled4-checkpoint.ipynb
|
rahul263-stack/quarantine
|
d8b1cfe0da8cad9fe2f3bbd427334b979c7d2c09
|
[
"MIT"
] | null | null | null | 19.823529 | 48 | 0.484669 |
[
[
[
"def neural_network(input, weights):\n out = 0\n for i in range (len(input)):\n out += (input[i] * weights[i] )\n return out ",
"_____no_output_____"
],
[
"def ele_mul(scaler, vector):\n out = [0, 0, 0]\n for i in range (len(out)):\n out [i] = vecctor[i] * scaler",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
4a11d717edf6c77c739fc7794c572c032af5e9b3
| 845 |
ipynb
|
Jupyter Notebook
|
pset_pandas_ext/101problems/solutions/nb/p52.ipynb
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 5 |
2019-04-08T20:05:37.000Z
|
2019-12-04T20:48:45.000Z
|
pset_pandas_ext/101problems/solutions/nb/p52.ipynb
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 8 |
2019-04-15T15:16:05.000Z
|
2022-02-12T10:33:32.000Z
|
pset_pandas_ext/101problems/solutions/nb/p52.ipynb
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 2 |
2019-04-10T00:14:42.000Z
|
2020-02-26T20:35:21.000Z
| 22.236842 | 90 | 0.456805 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a11db575d1463c730ca77abaa92d70e5d1862be
| 717,378 |
ipynb
|
Jupyter Notebook
|
notebooks/examples/05_logical-operators.ipynb
|
gitter-badger/ntakt
|
d1256469d167b4a47dfa366cae456f5ce6414f91
|
[
"BSD-2-Clause"
] | null | null | null |
notebooks/examples/05_logical-operators.ipynb
|
gitter-badger/ntakt
|
d1256469d167b4a47dfa366cae456f5ce6414f91
|
[
"BSD-2-Clause"
] | null | null | null |
notebooks/examples/05_logical-operators.ipynb
|
gitter-badger/ntakt
|
d1256469d167b4a47dfa366cae456f5ce6414f91
|
[
"BSD-2-Clause"
] | null | null | null | 1,519.868644 | 165,470 | 0.681933 |
[
[
[
"# Logical Operators\n\nThis notebook demonstrates use of the `ge` infix function for the comparison of two tensors. In this particular example, the probablity density `f` at two-dimensional location `(x, y)` is compared against a value `u` drawn uniformly from the unit interval `[0, 1)`. If `f(x, y) >= u`, or `f(x, y) ge u`, the point `(x, y)` [is accepted, otherwise rejected](#Accept-or-Reject-Samples). Infix operators `eq` (`==`), `ge` (`>=`), `le` (`<=`), `gt` (`>`), `lt` (`<`) are implemented for these containers/tensors:\n```\nRandomAccessible\nRandomAccessibleInterval\nRealRandomAccessible\nRealRandomAccessibleRealInterval\n```\nand for scalars with any of these tensors.",
"_____no_output_____"
],
[
"## Dependencies and Imports",
"_____no_output_____"
]
],
[
[
"// set up dependencies\n// use local maven repository; not yet deployed to remote maven repositories.\n@file:Repository(\"*mavenLocal\")\n@file:Repository(\"https://maven.scijava.org/content/groups/public\")\n@file:Repository(\"https://jitpack.io\")\n\n// uncomment to search in your local maven repo\n// requires installation into local maven repository (./gradlew build publishToMavenLocal)\n@file:DependsOn(\"org.ntakt:ntakt:0.1.0-SNAPSHOT\")\n\n// uncomment to search in jitpack (TODO)\n// @file:DependsOn(\"com.github.saalfeldlab:ntakt:<tbd>\")\n\n%use lets-plot",
"_____no_output_____"
],
[
"import kotlin.math.PI\nimport kotlin.math.pow\nimport kotlin.random.Random\nimport net.imglib2.RandomAccessibleInterval as RAI\nimport org.ntakt.*\nimport net.imglib2.type.numeric.real.DoubleType\nimport net.imglib2.view.Views",
"_____no_output_____"
]
],
[
[
"## Set up Data",
"_____no_output_____"
]
],
[
[
"val rng = Random(100)\nval dims = longArrayOf(600, 400)\nval mean = dims.map { it / 2.0 }.toDoubleArray()\nval sigma = dims.map { it / 10.0 }.map{ it * it }.toDoubleArray()\nval sigmaInverse = sigma.map { 1.0 / it }.toDoubleArray()\nval sigmaDeterminant = sigma.map { it * it }.sum()\nval twoPiPow = (2*PI).pow(2)\nval normalizationFactor = (twoPiPow * sigmaDeterminant).pow(-0.5)\nval exponent = ntakt.function(2, { 0.0.asType() }) { p, t -> \n val dx = p.getDoublePosition(0) - mean[0];\n val dy = p.getDoublePosition(1) - mean[1];\n t.set(-0.5 * (dx * dx * sigmaInverse[0] + dy * dy * sigmaInverse[1]))\n}\nval gaussianInfinite = exponent.exp() * normalizationFactor\nval gaussian = gaussianInfinite.rastered.interval(*dims)\nval uniform = ntakt.doubles(*dims) { rng.nextDouble() }",
"_____no_output_____"
]
],
[
[
"### Visualize Conditional Distributions",
"_____no_output_____"
]
],
[
[
"val bellCurvesAtY = \n Views.hyperSlice(gaussian, 1, 100L).flatIterable.map { it.realDouble } +\n Views.hyperSlice(gaussian, 1, 200L).flatIterable.map { it.realDouble } +\n Views.hyperSlice(gaussian, 1, 250L).flatIterable.map { it.realDouble }\nval dY = mapOf<String, Any>(\n \"X\" to DoubleArray(dims[0].toInt()) { it.toDouble() }.let { it + it + it },\n \"bell curve at y\" to bellCurvesAtY,\n \"y\" to Array(dims[0].toInt()) { \"100\" } + Array(dims[0].toInt()) { \"200\" } + Array(dims[0].toInt()) { \"250\" } \n)\nval p = lets_plot(dY) { x = \"X\"; color = \"y\" }\np + \n geom_line { y = \"bell curve at y\" } +\n ggsize(800, 500)",
"_____no_output_____"
],
[
"val bellCurvesAtX = \n Views.hyperSlice(gaussian, 0, 200L).flatIterable.map { it.realDouble } +\n Views.hyperSlice(gaussian, 0, 300L).flatIterable.map { it.realDouble } +\n Views.hyperSlice(gaussian, 0, 350L).flatIterable.map { it.realDouble }\nval dX = mapOf<String, Any>(\n \"X\" to DoubleArray(dims[1].toInt()) { it.toDouble() }.let { it + it + it },\n \"bell curve at x\" to bellCurvesAtX,\n \"x\" to Array(dims[1].toInt()) { \"200\" } + Array(dims[1].toInt()) { \"300\" } + Array(dims[1].toInt()) { \"350\" } \n)\nval p = lets_plot(dX) { x = \"X\"; color = \"x\" }\np + \n geom_line { y = \"bell curve at x\" } +\n ggsize(800, 500)",
"_____no_output_____"
]
],
[
[
"## Accept or Reject Samples",
"_____no_output_____"
]
],
[
[
"val sampled = gaussian ge uniform * normalizationFactor\nval points = sampled.where()",
"_____no_output_____"
]
],
[
[
"### Visualize samples",
"_____no_output_____"
]
],
[
[
"val scatterData = mapOf<String, Any>(\n \"X\" to points.map { it.getDoublePosition(0) } + listOf(mean[0]),\n \"Y\" to points.map { it.getDoublePosition(1) } + listOf(mean[1]),\n \"label\" to Array(points.size) { \"sample\" } + arrayOf(\"mean\"),\n \"alpha\" to DoubleArray(points.size) { 0.1 } + doubleArrayOf(1.0)\n)\nlets_plot(scatterData) { x = \"X\"; y = \"Y\"; color = \"label\"; alpha = \"alpha\" } +\n geom_point(size = 3.0) +\n geom_density2d(color=\"black\") +\n ggsize(900, 600)",
"_____no_output_____"
],
[
"val sampleMean = Pair(\n points.map{ it.getDoublePosition(0) }.sum() / points.size,\n points.map{ it.getDoublePosition(1) }.sum() / points.size\n)\nval (meanX, meanY) = sampleMean\nsampleMean",
"_____no_output_____"
],
[
"val sampleVariance = points.fold(DoubleArray(3)) { m, p ->\n val dX = p.getDoublePosition(0) - meanX\n val dY = p.getDoublePosition(1) - meanY\n m[0] += dX*dX\n m[1] += dX*dY\n m[2] += dY*dY\n m \n}.map { it / (points.size - 1) }\nsampleVariance",
"_____no_output_____"
],
[
"sigma[0] to sigma[1]",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4a11e2fd21100e60bd2d29527f8670d008acf300
| 1,549 |
ipynb
|
Jupyter Notebook
|
Untitled.ipynb
|
JEB12345/superball_cpg
|
20d7a80fe4af02b2d27e7fdf43636d39f3b1cdf4
|
[
"Apache-2.0"
] | null | null | null |
Untitled.ipynb
|
JEB12345/superball_cpg
|
20d7a80fe4af02b2d27e7fdf43636d39f3b1cdf4
|
[
"Apache-2.0"
] | null | null | null |
Untitled.ipynb
|
JEB12345/superball_cpg
|
20d7a80fe4af02b2d27e7fdf43636d39f3b1cdf4
|
[
"Apache-2.0"
] | null | null | null | 17.211111 | 72 | 0.500323 |
[
[
[
"%pylab\nimport numpy as np",
"Using matplotlib backend: TkAgg\nPopulating the interactive namespace from numpy and matplotlib\n"
],
[
"x = linspace(0,2*np.pi,num=1000)\nsine = np.sin(x)",
"_____no_output_____"
],
[
"plt.plot(sine)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code"
]
] |
4a11eb8a166e8ce49a7f878b4da633bc3567900d
| 59,274 |
ipynb
|
Jupyter Notebook
|
String Alignment.ipynb
|
ReggieCarey/CMSC701
|
aef46634de2e2928f241a025ef65a3e0b021a266
|
[
"Apache-2.0"
] | null | null | null |
String Alignment.ipynb
|
ReggieCarey/CMSC701
|
aef46634de2e2928f241a025ef65a3e0b021a266
|
[
"Apache-2.0"
] | null | null | null |
String Alignment.ipynb
|
ReggieCarey/CMSC701
|
aef46634de2e2928f241a025ef65a3e0b021a266
|
[
"Apache-2.0"
] | null | null | null | 53.836512 | 1,010 | 0.543139 |
[
[
[
"# Global And Local Alignment",
"_____no_output_____"
]
],
[
[
"import sys\nimport numpy as np\nfrom collections import Counter\nimport re\nfrom collections import defaultdict\nfrom functools import partial",
"_____no_output_____"
]
],
[
[
"The score_function returns a score for a pair of characters. These may be the same different or one may be missing. The code supports three scoring frameworks, one where all match/mismatch/indel scores have fixed values, and one based on BLOSUM62 and the other based PAM250. Other scoring matrices can be incorporated.",
"_____no_output_____"
]
],
[
[
"def score_function(score_name=\"BASIC\", A=1, B=0, C=1):\n\n def score(a, b, indel, s):\n \"\"\"Return the match/mismatch/indel score for the symbols a, and b.\"\"\"\n return indel if '-' in [a,b] else s[a][b]\n \n if \"BLOSUM62\" == score_name:\n inputString = \"\"\"\n A C D E F G H I K L M N P Q R S T V W Y\n A 4 0 -2 -1 -2 0 -2 -1 -1 -1 -1 -2 -1 -1 -1 1 0 0 -3 -2\n C 0 9 -3 -4 -2 -3 -3 -1 -3 -1 -1 -3 -3 -3 -3 -1 -1 -1 -2 -2\n D -2 -3 6 2 -3 -1 -1 -3 -1 -4 -3 1 -1 0 -2 0 -1 -3 -4 -3\n E -1 -4 2 5 -3 -2 0 -3 1 -3 -2 0 -1 2 0 0 -1 -2 -3 -2\n F -2 -2 -3 -3 6 -3 -1 0 -3 0 0 -3 -4 -3 -3 -2 -2 -1 1 3\n G 0 -3 -1 -2 -3 6 -2 -4 -2 -4 -3 0 -2 -2 -2 0 -2 -3 -2 -3\n H -2 -3 -1 0 -1 -2 8 -3 -1 -3 -2 1 -2 0 0 -1 -2 -3 -2 2\n I -1 -1 -3 -3 0 -4 -3 4 -3 2 1 -3 -3 -3 -3 -2 -1 3 -3 -1\n K -1 -3 -1 1 -3 -2 -1 -3 5 -2 -1 0 -1 1 2 0 -1 -2 -3 -2\n L -1 -1 -4 -3 0 -4 -3 2 -2 4 2 -3 -3 -2 -2 -2 -1 1 -2 -1\n M -1 -1 -3 -2 0 -3 -2 1 -1 2 5 -2 -2 0 -1 -1 -1 1 -1 -1\n N -2 -3 1 0 -3 0 1 -3 0 -3 -2 6 -2 0 0 1 0 -3 -4 -2\n P -1 -3 -1 -1 -4 -2 -2 -3 -1 -3 -2 -2 7 -1 -2 -1 -1 -2 -4 -3\n Q -1 -3 0 2 -3 -2 0 -3 1 -2 0 0 -1 5 1 0 -1 -2 -2 -1\n R -1 -3 -2 0 -3 -2 0 -3 2 -2 -1 0 -2 1 5 -1 -1 -3 -3 -2\n S 1 -1 0 0 -2 0 -1 -2 0 -2 -1 1 -1 0 -1 4 1 -2 -3 -2\n T 0 -1 -1 -1 -2 -2 -2 -1 -1 -1 -1 0 -1 -1 -1 1 5 0 -2 -2\n V 0 -1 -3 -2 -1 -3 -3 3 -2 1 1 -3 -2 -2 -3 -2 0 4 -3 -1\n W -3 -2 -4 -3 1 -2 -2 -3 -3 -2 -1 -4 -4 -2 -3 -3 -2 -3 11 2\n Y -2 -2 -3 -2 3 -3 2 -1 -2 -1 -1 -2 -3 -1 -2 -2 -2 -1 2 7\n \"\"\"\n elif \"PAM250\" == score_name:\n inputString = \"\"\"\n A C D E F G H I K L M N P Q R S T V W Y\n A 2 -2 0 0 -3 1 -1 -1 -1 -2 -1 0 1 0 -2 1 1 0 -6 -3\n C -2 12 -5 -5 -4 -3 -3 -2 -5 -6 -5 -4 -3 -5 -4 0 -2 -2 -8 0\n D 0 -5 4 3 -6 1 1 -2 0 -4 -3 2 -1 2 -1 0 0 -2 -7 -4\n E 0 -5 3 4 -5 0 1 -2 0 -3 -2 1 -1 2 -1 0 0 -2 -7 -4\n F -3 -4 -6 -5 9 -5 -2 1 -5 2 0 -3 -5 -5 -4 -3 -3 -1 0 7\n G 1 -3 1 0 -5 5 -2 -3 -2 -4 -3 0 0 -1 -3 1 0 -1 -7 -5\n H -1 -3 1 1 -2 -2 6 -2 0 -2 -2 2 0 3 2 -1 -1 -2 -3 0\n I -1 -2 -2 -2 1 -3 -2 5 -2 2 2 -2 -2 -2 -2 -1 0 4 -5 -1\n K -1 -5 0 0 -5 -2 0 -2 5 -3 0 1 -1 1 3 0 0 -2 -3 -4\n L -2 -6 -4 -3 2 -4 -2 2 -3 6 4 -3 -3 -2 -3 -3 -2 2 -2 -1\n M -1 -5 -3 -2 0 -3 -2 2 0 4 6 -2 -2 -1 0 -2 -1 2 -4 -2\n N 0 -4 2 1 -3 0 2 -2 1 -3 -2 2 0 1 0 1 0 -2 -4 -2\n P 1 -3 -1 -1 -5 0 0 -2 -1 -3 -2 0 6 0 0 1 0 -1 -6 -5\n Q 0 -5 2 2 -5 -1 3 -2 1 -2 -1 1 0 4 1 -1 -1 -2 -5 -4\n R -2 -4 -1 -1 -4 -3 2 -2 3 -3 0 0 0 1 6 0 -1 -2 2 -4\n S 1 0 0 0 -3 1 -1 -1 0 -3 -2 1 1 -1 0 2 1 -1 -2 -3\n T 1 -2 0 0 -3 0 -1 0 0 -2 -1 0 0 -1 -1 1 3 0 -5 -3\n V 0 -2 -2 -2 -1 -1 -2 4 -2 2 2 -2 -1 -2 -2 -1 0 4 -6 -2\n W -6 -8 -7 -7 0 -7 -3 -5 -3 -2 -4 -4 -6 -5 2 -2 -5 -6 17 0\n Y -3 0 -4 -4 7 -5 0 -1 -4 -1 -2 -2 -5 -4 -4 -3 -3 -2 0 10\n \"\"\"\n elif \"BASIC\" == score_name:\n return lambda a, b: -C if '-' in [a,b] else A if a == b else -B\n else:\n raise ValueError(\"score_name constrained to ['BASIC', 'BLOSUM62', 'PAM250']\")\n\n # divide the inputString into lines where each line is a row from the tables above\n lines = inputString.strip().splitlines()\n \n # determine the order of the amino acids repesented in the table.\n amino_acids = \"\".join(re.split(\" +\", lines[0].strip()))\n \n # setup a dictionary of dictionaries to encode the matrix\n target = defaultdict(dict)\n \n # step through the remaining lines and add them to the target matrix\n for row in lines[1:]:\n \n # get the amino acid associated with this row and then the row values\n rowId, *values = re.split(\" +\", row.strip())\n \n # put the data into the dictionary of dictionaries\n for colId, value in zip(amino_acids, values):\n target[rowId][colId] = int(value)\n\n # return a function that returns a score for match/mismatch/indel based on data from the matrix \n # and the passed in indel score, C.\n return partial(score, indel=-C, s=target)",
"_____no_output_____"
]
],
[
[
"An internal function _sequence_similarity, computes the sequence similarity between sequences X and Y. It will compute either a global alignment or a local alignment.",
"_____no_output_____"
]
],
[
[
"def _sequence_similarity(X, Y, s, global_seq):\n \"\"\"\n Compute the sequence similarity between to sequences X and Y. We return a populated matrix representing\n the confusion between the two sequences as well as the optimal alignment data.\n \n This is our implementation of the Needleman-Wunsch Algorithm and the local alignment algorithm.\n \n Arguments\n ---------\n X: sequence to align\n Y: sequence to align\n s: a function taking a pair of parameters, the two characters to compare\n global_seq: \n \"\"\"\n def maximum(*args):\n \"\"\"\n Given an array of tuples return a tuple where the first element is a string consisting of all first\n elements in the array of tuples where the second element is maximal in the array. E.G.\n maximum([('a',5),('b',3),('c',6),('d',3),('e',6)]) = ('ce',6)\n \"\"\"\n m = max(map(lambda sv: sv[1], args))\n s = \"\".join(map(lambda sv: sv[0], filter(lambda sv: sv[1] == m, args)))\n return (s, m)\n def horzValue(i, j):\n \"\"\"\n Return the value of the horizontal path.\n We use the s function to return a similarity score which will be an indel score\n \"\"\"\n return mat[i, j-1][1] + s('-', X[j-1])\n def diagValue(i, j):\n \"\"\"\n Return the value of the diagonal path.\n We use the s function to return a similarity score which will be a match/mismatch score\n \"\"\"\n return mat[i-1, j-1][1] + s(Y[i-1], X[j-1])\n def vertValue(i, j):\n \"\"\"\n Return the value of the vertical path.\n We use the s function to return a similarity score which will be an indel score\n \"\"\"\n return mat[i-1, j][1] + s(Y[i-1], '-')\n\n m = len(Y) + 1\n n = len(X) + 1\n index = (-1, -1)\n maxval = -sys.maxsize\n minval = -sys.maxsize * global_seq\n mat = np.empty((m, n), dtype=tuple)\n for i in range(0, m):\n mat[i, 0] = maximum((\"\", minval), (\"v\", i * s('-', '-')))\n maxval, index = (mat[i, 0][1], (i, 0)) if mat[i, 0][1] > maxval else (maxval, index)\n for j in range(0, n):\n mat[0, j] = maximum((\"\", minval), (\"h\", j * s('-', '-')))\n maxval, index = (mat[0, j][1], (0, j)) if mat[0, j][1] > maxval else (maxval, index)\n for i in range(1, m):\n for j in range(1, n):\n mat[i, j] = maximum((\"\", minval), (\"h\", horzValue(i, j)), (\"d\", diagValue(i, j)), (\"v\", vertValue(i, j)))\n maxval, index = (mat[i, j][1], (i, j)) if mat[i, j][1] > maxval else (maxval, index)\n if global_seq:\n index = m-1, n-1\n maxval = mat[m-1, n-1][1]\n return (mat, maxval, index)\n\ndef global_sequence_similarity():\n return partial(_sequence_similarity, global_seq=True)\n\ndef local_sequence_similarity():\n return partial(_sequence_similarity, global_seq=False)",
"_____no_output_____"
]
],
[
[
"Traceback returns one of potentially many optimal alignments. Each point where a cell has more than\none maximal value, we have a bi or tri - furication leading to a doubling/tripling in the number of equivalent alignments observed along a particular path.",
"_____no_output_____"
]
],
[
[
"def global_traceback(mat, X, Y):\n \"\"\"\n Traceback returns one of potentially many optimal alignments. Each point where a cell has more than\n one maximal value, we have a bifurication leading to a doubling in the number of optimal alignments.\n \"\"\"\n n, m = mat.shape[1]-1, mat.shape[0]-1\n retval = (\"\",\"\")\n while m > 0 or n > 0:\n a, b = (\"\",\"\")\n if 'h' in mat[m, n][0]:\n a, b = X[n-1], \"-\"\n n = n - 1\n elif 'd' in mat[m, n][0]:\n a, b = X[n-1], Y[m-1]\n n, m = n - 1, m - 1\n elif 'v' in mat[m, n][0]:\n a, b = \"-\", Y[m-1]\n m = m - 1\n else:\n break\n retval = (a + retval[0], b + retval[1])\n return retval",
"_____no_output_____"
],
[
"def local_traceback(mat, X, Y):\n \"\"\"\n Traceback returns one of potentially many optimal alignments. Each point where a cell has more than\n one maximal value, we have a bifurication leading to a doubling in the number of optimal alignments.\n \"\"\"\n n, m = mat.shape[1]-1, mat.shape[0]-1\n retval = (\"\",\"\")\n while (m > 0 or n > 0) and mat[m, n][1] > 0:\n a, b = (\"\",\"\")\n if 'h' in mat[m, n][0]:\n a, b = X[n-1], \"-\"\n n = n - 1\n elif 'd' in mat[m, n][0]:\n a, b = X[n-1], Y[m-1]\n n, m = n - 1, m - 1\n elif 'v' in mat[m, n][0]:\n a, b = \"-\", Y[m-1]\n m = m - 1\n else:\n break\n retval = (a + retval[0], b + retval[1])\n return retval",
"_____no_output_____"
]
],
[
[
"Print a formatted result wraps sequences at <width> characters and identifies offsets",
"_____no_output_____"
]
],
[
[
"def print_formatted(traceback_results, width=100):\n \"\"\"\n Print a formatted result wraps sequences at <width> characters and identifies offsets\n \"\"\"\n \n def chunk(s, w):\n \"\"\"\n Break s into w width chunks and return a list. Used to print results\n \"\"\"\n retval = []\n start = 0\n while len(s):\n end = start + w\n retval.append(s[start:end])\n s = s[end:]\n return retval\n \n for (alignedX, alignedY) in traceback_results:\n s1 = 0\n s2 = 0\n print()\n for (A, B) in zip(chunk(alignedX, width), chunk(alignedY, width)):\n print(f'{s1:>5} {A}{\" \"*(width-len(A))} {(s1 + len(A) - 1):<5}')\n print(f\" {''.join(a if a==b else ' ' for a, b in zip(A, B))}{' '*(width-len(A))}\")\n print(f'{s2:>5} {B}{\" \"*(width-len(B))} {(s2 + len(B) - 1):<5}')\n print()\n s1 += len(A)\n s2 += len(B)",
"_____no_output_____"
]
],
[
[
"Try data from the class notes. Dont use BLOSUM62 and set indel=-1, match=1, mismatch=0",
"_____no_output_____"
]
],
[
[
"def trial_1():\n X, Y = \"ACGC\", \"GACTAC\"\n similarity = global_sequence_similarity()\n mat,maxval,index = similarity(X, Y, s=score_function(\"BASIC\", C=1))\n print(maxval,index)\n print_formatted([global_traceback(mat[0:index[0]+1, 0:index[1]+1], X, Y)])\n \ntrial_1()",
"1 (6, 4)\n\n 0 -AC-GC 5 \n AC C \n 0 GACTAC 5 \n\n"
]
],
[
[
"Try data from Rosalind Question.",
"_____no_output_____"
]
],
[
[
"def trial_2():\n X, Y = \"MEANLY\", \"PLEASANTLY\"\n similarity = global_sequence_similarity()\n mat,maxval,index = similarity(X, Y, s=score_function(\"BLOSUM62\", C=5))\n print(maxval,index)\n print_formatted([global_traceback(mat[0:index[0]+1, 0:index[1]+1], X, Y)])\n\ntrial_2()",
"8 (10, 6)\n\n 0 -ME--AN-LY 9 \n E AN LY \n 0 PLEASANTLY 9 \n\n"
]
],
[
[
"Try another data set from Rosalind Question.",
"_____no_output_____"
]
],
[
[
"def trial_3():\n X, Y = \"\"\"\\\nILYPRQSMICMSFCFWDMWKKDVPVVLMMFLERRQMQSVFSWLVTVKTDCGKGIYNHRKYLGLPTMTAGDWHWIKKQNDPHEW\\\nFQGRLETAWLHSTFLYWKYFECDAVKVCMDTFGLFGHCDWDQQIHTCTHENEPAIAFLDLYCRHSPMCDKLYPVWDMACQTCH\\\nFHHSWFCRNQEMWMKGDVDDWQWGYHYHTINSAQCNQWFKEICKDMGWDSVFPPRHNCQRHKKCMPALYAGIWMATDHACTFM\\\nVRLIYTENIAEWHQVYCYRSMNMFTCGNVCLRCKSWIFVKNYMMAPVVNDPMIEAFYKRCCILGKAWYDMWGICPVERKSHWE\\\nIYAKDLLSFESCCSQKKQNCYTDNWGLEYRLFFQSIQMNTDPHYCQTHVCWISAMFPIYSPFYTSGPKEFYMWLQARIDQNMH\\\nGHANHYVTSGNWDSVYTPEKRAGVFPVVVPVWYPPQMCNDYIKLTYECERFHVEGTFGCNRWDLGCRRYIIFQCPYCDTMKIC\\\nYVDQWRSIKEGQFRMSGYPNHGYWFVHDDHTNEWCNQPVLAKFVRSKIVAICKKSQTVFHYAYTPGYNATWPQTNVCERMYGP\\\nHDNLLNNQQNVTFWWKMVPNCGMQILISCHNKMKWPTSHYVFMRLKCMHVLMQMEYLDHFTGPGEGDFCRNMQPYMHQDLHWE\\\nGSMRAILEYQAEHHRRAFRAELCAQYDQEIILWSGGWGVQDCGFHANYDGSLQVVSGEPCSMWCTTVMQYYADCWEKCMFA\"\"\", \"\"\"\\\nILIPRQQMGCFPFPWHFDFCFWSAHHSLVVPLNPQMQTVFQNRGLDRVTVKTDCHDHRWKWIYNLGLPTMTAGDWHFIKKHVV\\\nRANNPHQWFQGRLTTAWLHSTFLYKKTEYCLVRHSNCCHCDWDQIIHTCAFIAFLDLYQRHWPMCDKLYCHFHHSWFCRNQEM\\\nSMDWNQWFPWDSVPRANCLEEGALIALYAGIWANSMKRDMKTDHACTVRLIYVCELHAWLKYCYTSINMLCGNVCLRCKSWIF\\\nVKLFYMYAPVVNTIEANSPHYYKRCCILGQGICPVERKSHCEIYAKDLLSFESCCSQKQNCYTDNWGLEYRLFFQHIQMECTD\\\nPHANRGWTSCQTAKYWHFNLDDRPPKEFYMWLQATPTDLCMYQHCLMFKIVKQNFRKQHGHANPAASTSGNWDSVYTPEKMAY\\\nKDWYVSHPPVDMRRNGSKMVPVWYPPGIWHWKQSYKLTYECFFTVPGRFHVEGTFGCNRWDHQPGTRRDRQANHQFQCPYSDT\\\nMAIWEHAYTYVDQWRSIKEGQMPMSGYPNHGQWNVHDDHTNEQERSPICNQPVLAKFVRSKNVSNHEICKKSQTVFHWACEAQ\\\nTNVCERMLNNQHVAVKRNVTFWWQMVPNCLWSCHNKMTWPTRPEQHRLFFVKMRLKCMHEYLDVAPSDFCRNMQAYMHSMRAI\\\nLEYQADFDLKRRLRAIAPMDLCAQYDQEIILWSGGYIYDQSLQVVSCEGCSYYADCYVKCINVKEKCMFA\"\"\"\n similarity = global_sequence_similarity()\n mat,maxval,index = similarity(X, Y, s=score_function(\"BLOSUM62\", C=5))\n print(maxval,index)\n print_formatted([global_traceback(mat[0:index[0]+1, 0:index[1]+1], X, Y)])\n\ntrial_3()",
"1555 (734, 745)\n\n 0 ILYPRQSMICMSFCF-WD--MWKKDVPVVLMMFLERRQMQSVF-S-WL--VTVKTDCGKGIYNHR-K--Y-LGLPTMTAGDWHWIKK---Q-NDPHEWFQ 99 \n IL PRQ M C F D W V L QMQ VF L VTVKTDC HR K Y LGLPTMTAGDWH IKK N PH WFQ\n 0 ILIPRQQMGCFPFPWHFDFCFWSAHHSLVVP--LNP-QMQTVFQNRGLDRVTVKTDC----HDHRWKWIYNLGLPTMTAGDWHFIKKHVVRANNPHQWFQ 99 \n\n 100 GRLETAWLHSTFLYWKYFE-CDAVKVCMDTFGLFGHCDWDQQIHTCTHENEPAIAFLDLYCRHSPMCDKLYPVWDMACQTCHFHHSWFCRNQEMWMKGDV 199 \n GRL TAWLHSTFLY K E C V HCDWDQ IHTC IAFLDLY RH PMCDKLY C HFHHSWFCRNQEM M D \n 100 GRLTTAWLHSTFLY-KKTEYC-LVR---HS-NCC-HCDWDQIIHTCAF-----IAFLDLYQRHWPMCDKLY------C---HFHHSWFCRNQEMSM--D- 199 \n\n 200 DDWQWGYHYHTINSAQCNQWFKEICKDMGWDSVFPPRHNCQRHKKCMPALYAGIW-------MATDHACTFMVRLIYTENIAEWHQVYCYRSMNMFTCGN 299 \n W N Q WF WDSV P R NC ALYAGIW M TDHACT VRLIY W YCY S NM CGN\n 200 --W---------N--Q---WFP-------WDSV-P-RANCLE-EGALIALYAGIWANSMKRDMKTDHACT--VRLIYVCELHAWLK-YCYTSINML-CGN 299 \n\n 300 VCLRCKSWIFVK-NYMMAPVVN--DPMIEAFYKRCCILGKAWYDMWGICPVERKSHWEIYAKDLLSFESCCSQKKQNCYTDNWGLEYRLFFQSIQMN-TD 399 \n VCLRCKSWIFVK YM APVVN YKRCCILG GICPVERKSH EIYAKDLLSFESCCSQK QNCYTDNWGLEYRLFFQ IQM TD\n 300 VCLRCKSWIFVKLFYMYAPVVNTIEANSPHYYKRCCILGQ------GICPVERKSHCEIYAKDLLSFESCCSQK-QNCYTDNWGLEYRLFFQHIQMECTD 399 \n\n 400 PH----Y--CQTHVCW-ISA-MFPIYSPFY--TSG-PKE--FYM-WLQARI-DQN---MHGHANHYV-TSGNWDSVYTPEKRA--G--V-F-PV------ 499 \n PH CQT W P FY P Y L I QN HGHAN TSGNWDSVYTPEK A V PV \n 400 PHANRGWTSCQTAKYWHFNLDDRPP-KEFYMWLQATPTDLCMYQHCLMFKIVKQNFRKQHGHANPAASTSGNWDSVYTPEKMAYKDWYVSHPPVDMRRNG 499 \n\n 500 --VVPVWYPPQMCN---DYIKLTYEC----E-RFHVEGTFGCNRWD-L-GC-R-RYI--IFQCPYCDTMKI---C--YVDQWRSIKEGQFRMSGYPNHGY 599 \n VPVWYPP Y KLTYEC RFHVEGTFGCNRWD G R R FQCPY DTM I YVDQWRSIKEGQ MSGYPNHG \n 500 SKMVPVWYPPGIWHWKQSY-KLTYECFFTVPGRFHVEGTFGCNRWDHQPGTRRDRQANHQFQCPYSDTMAIWEHAYTYVDQWRSIKEGQMPMSGYPNHGQ 599 \n\n 600 WFVHDDHTNEW-----CNQPVLAKFVRSKIVA---ICKKSQTVFHYAYTPGYNATWPQTNVCERMYGPHDNLLNNQQNVTFWWKMVPNCGMQILISCHNK 699 \n W VHDDHTNE CNQPVLAKFVRSK V ICKKSQTVFH A A QTNVCERM NVTFWW MVPNC L SCHNK\n 600 WNVHDDHTNEQERSPICNQPVLAKFVRSKNVSNHEICKKSQTVFHWA-C---EA---QTNVCERMLN-NQHVAV-KRNVTFWWQMVPNC----LWSCHNK 699 \n\n 700 MKWPT---SHYV-F--MRLKCMHVLMQMEYLDHFTGPGEGDFCRNMQPYMHQDLHWEGSMRAILEYQAEHH-RRAFR--A--ELCAQYDQEIILWSGGWG 799 \n M WPT H F MRLKCMH EYLD P DFCRNMQ YMH SMRAILEYQA R R A LCAQYDQEIILWSGG \n 700 MTWPTRPEQHRLFFVKMRLKCMH-----EYLD--VAPS--DFCRNMQAYMH-------SMRAILEYQADFDLKRRLRAIAPMDLCAQYDQEIILWSGGY- 799 \n\n 800 VQDCGFHANYDGSLQVVSGEPCSMWCTTVMQYYADCWEKCMFA 842 \n YD SLQVVS E CS EKCMFA \n 800 I--------YDQSLQVVSCEGCSYYADCYVKCI-NVKEKCMFA 842 \n\n"
]
],
[
[
"#### Process a BIG dataset - warning can take some time to execute",
"_____no_output_____"
]
],
[
[
"def trial_4():\n [X, Y, *_] = open(\"rosalind_ba5e.txt\").read().split(\"\\n\")\n similarity = global_sequence_similarity()\n mat,maxval,index = similarity(X, Y, s=score_function(\"BLOSUM62\", C=5))\n print(maxval)\n print_formatted([global_traceback(mat[0:index[0]+1, 0:index[1]+1], X, Y)])\n \ntrial_4()",
"11229\n\n 0 RYYAPL---SR--FAYHSHIQGHDCIWLKFLRHFCPEDRVTL-A------MHMAAVARVMADR-NIK-FRKF-D--DLNGCHTVLKNPDLVSMMFYFIHR 99 \n RYYAPL FAYHSH QGHD IWLKFLRHFCPEDRVTL A MHMAA K RKF LNGCHTVLKNPD YF HR\n 0 RYYAPLAPCNQNAFAYHSHAQGHDNIWLKFLRHFCPEDRVTLCAGMPWMKMHMAAHMYPQPEAWTMKQIRKFWQHKNLNGCHTVLKNPD------YF-HR 99 \n\n 100 EGTFLMMVRGEKWIKSPFWFQHEGKICE------Y--NCFLHTNGAFYTIAACELKFLTQNNVPQKWSLDDGSKD-------MFQQDKNIHCPQFM-PWL 199 \n E TFL RG K PFWFQHEGKIC Y NCFLHTN AFYTI ELKF N PQKWSLDDGSKD MFQQDKNIHCPQFM \n 100 EQTFLVPCRG-KY-DAPFWFQHEGKICQPRAVMSYHGNCFLHTNTAFYTI---ELKF---N--PQKWSLDDGSKDFFEPPANMFQQDKNIHCPQFMINHC 199 \n\n 200 --VVMTV-FGGKDAFGHTM----Y-NP-EKIELIHFLDWQFWWWVEQAWKEVHKCDSWWIHPNMSDNLAQFPMGKKWSACHQKGRERRSTCYK---FKDW 299 \n T FGGKDAFGHTM NP KIE IHFLDWQF WW E WWIHPNMSDNLAQFPMGKK CHQ GRERRSTCYK KDW\n 200 QQEENTFNFGGKDAFGHTMLVVKQTNPLIKIERIHFLDWQFDWWEEFDECDIQSFTYWWIHPNMSDNLAQFPMGKK---CHQCGRERRSTCYKIIDMKDW 299 \n\n 300 NENFVTYGYIENWADFQWWCADLNITDWEICNPSDEMGINAIGAEYWNKWYAETRWFNVVMETWINGSARMSS-HDFCFDGPWDQNPNPERPVYYGMRPQ 399 \n ENF YGYI D W CADLNITDWEICNPSDEMGI AIGA YWNKWYAET N M HDFC DGPWDQNPNPERPVYYGMRPQ\n 300 HENFRAYGYIFQ-SD-AW-CADLNITDWEICNPSDEMGITAIGAGYWNKWYAETLQERTHFTGEFN-VV-METWHDFCPDGPWDQNPNPERPVYYGMRPQ 399 \n\n 400 WQGEMENRTPVSYMNAIKVMCNTSRLLLPLLSTIADNCVTSKQITPFIKIERVNHKHLDDKGRHAFIYFKNQLGMQDQPPEPVIQNR----EHIFVPVDP 499 \n QGEME RTPVSYMNA M LPLLSTIADNCVTS LD K RHA G DQPPE IQNR HIFVP D \n 400 IQGEME-RTPVSYMNANN-MLK--KVSLPLLSTIADNCVTSYE-D-WDQAQ-I----LD-KSRHADMSIYCS-G--DQPPEFRIQNRGYVMMHIFVPLDS 499 \n\n 500 -DKH-DMHDAKACERDMEHHNFSLCNTLSTHIYTMSCCPRHPVFQLMRE-H-PVNH---PAWYNAHLMKGMCCRLTRCQQDNKHLVWKYGWYPPNQF-N- 599 \n DKH AK D CN LSTHIYTMSC PRH FQLMRE PVNH AWYN HLM CCRLTRCQQDNKHLVWKYGWYPPNQF N \n 500 CDKHTEIRIAKK-DHDAKAQE-T-CNDLSTHIYTMSC-PRH--FQLMRETAVPVNHRERTAWYNFHLM---CCRLTRCQQDNKHLVWKYGWYPPNQFMNT 599 \n\n 600 ----TPWG------I-H-SISH-L---MWEDLCQVT--TDQCECHQPCQMG-KVFDEWVQDQFAHGWDCLHHVVPASYHHTYHNHMYYVYEYELQHSLDV 699 \n TPWG H ISH MWEDLCQVT Q HQ QM KVF E WDCLHHVVPASYHHTYH MY VYE EL HSLDV\n 600 CAYVTPWGRQRHTQMQHWDISHPFDHQMWEDLCQVTDHIQQFDMHQIWQMRLKVF-E-LNQMGGDMWDCLHHVVPASYHHTYH--MYCVYEMELFHSLDV 699 \n\n 700 CYLGYTEIKA-D-----D-F------FR-QKYYYISMSGYG----H--HVGPDDAAHARRVENWYEEMMDKIMNS------T-GLDEHRKN--S----CC 799 \n CYLGYTEI A D D KYYYISMS YG H HVGPDDAAHARRVENWY MMDKIMNS GLDEHRKN S CC\n 700 CYLGYTEIQADDSCTQWDHWCLLAMCYSGGKYYYISMSKYGEHKAHECHVGPDDAAHARRVENWYNNMMDKIMNSQLLAQEAHGLDEHRKNSFSQTNQCC 799 \n\n 800 PK--IRTDKEDAIRPYPHYYKHLNSLSMHHLRCRPNVYNKNLDECNETYIGMPDKIMLRFWINGTYA----CVKR-Y--NKMGSRKI-SKFRMYNPVFNY 899 \n PK IRTD EDAI P PHYYKHLNSLSMH N Y K D T IGMPDK MLRFWINGTYA C R NKMGSRKI NPVFNY\n 800 PKHSIRTDLEDAIAPSPHYYKHLNSLSMHY-----NKY-K-VDR---TSIGMPDKDMLRFWINGTYAMVKRCWRRAHQMNKMGSRKIYTRLNQ-NPVFNY 899 \n\n 900 NCCH---K--A-CSG------H-WIHWNWFYRPGCGWDK---I-EE--DREGLN-FKCYDIQEFDHDRKIPWELWLTANREDCAYEFRYLMQKMCFQ-PN 999 \n CCH A CSG WIHWNWF P W I G YD QEFDHDRKIPWELWLTANREDCA EFRY MQK Q P \n 900 GCCHSVFEAFARCSGMRWCCLRWWIHWNWF-EPKQIWENHTAIMPTLWNNAGVKAHSHYDMQEFDHDRKIPWELWLTANREDCAREFRYPMQKRP-QIPP 999 \n\n 1000 VGMCFKQFS----SINYIGWEP-TQTVNCA---AGEPMVKGNVWPECHVCCEYEGVLSYWAVPINMTRALKTKCLHDTATYYVMRSPTEQWRQDCSFWGY 1099 \n CFKQFS INYIGWEP A AGEPMVKGNVW V G SYWAVPINMTRALKTKCLHD ATYYVMRSPTEQ Q \n 1000 CMICFKQFSDTQPAINYIGWEPVNEVMYWAQTVAGEPMVKGNVW----V-----G--SYWAVPINMTRALKTKCLHDWATYYVMRSPTEQGYQKV-V-E- 1099 \n\n 1100 QYVHQVHHADCKQHAIPYKKHSGNLPTQMSYPTYRAVWVKTKECSTFGTRNMCILGANHMYTVQYEPWRWKPAYDAESVTCWWSIG--G-----GVENR- 1199 \n Q QVHHADCKQH I YK HS LPTQM V KTKECSTFGTR L YTVQYEPWRWKPAYDA SVT WWSIG VENR \n 1100 QF--QVHHADCKQHIIDYKFHS--LPTQM-----K-V--KTKECSTFGTR----L-----YTVQYEPWRWKPAYDAQSVTGWWSIGKMDMEVQFPVENRN 1199 \n\n 1200 ILDSGHPIFDWDECCVIINHHACCYFQWPYQFVTNQYINTCTFMLEFMTASAPSKWNHL----G--FYKMKIRHESHLQ----G--GNNYATTPWQ--R- 1299 \n SGHPIFDWDE C HACCYFQWPYQF TNQYINTCTFMLEFMTASAP KWNHL FYKMKIRHESHLQ GN YATTPWQ \n 1200 KVHSGHPIFDWDESCKMFAMHACCYFQWPYQFGTNQYINTCTFMLEFMTASAPWKWNHLRRRTDIKFYKMKIRHESHLQILIEDCCGNFYATTPWQICNL 1299 \n\n 1300 LRLDT-CFTP---RTCAYMIRYITSFIVFTQVTHYGPHEFYKCLGCSIRTVDCWFRQACPHLGNYGMHQIQRQMMYCVSYHFPIMTHFWCIMPYDYNRLR 1399 \n LRLD F AY IRYITSFIVFTQVTHYGP LG SIR VDCWFRQ NYGMHQIQRQMMYCVSYHFPIMTHFWC PY RL \n 1300 LRLDEHFFVGILLHDNAYFIRYITSFIVFTQVTHYGP------LGRSIRSVDCWFRQ------NYGMHQIQRQMMYCVSYHFPIMTHFWC--PYQAFRLE 1399 \n\n 1400 CRTRATQAHWPVNGWNTNEAEWSLWRINFIFADAAQETAFAF-W-----NFIYCWCIAHGTFQVNLLQTISAANTLEHCCAWGVGCDGETMRMDRRLIVF 1499 \n R TQA WPV G NTNEAEWSL RINF ADA QETAFAF NFIYCW IA QTI A NTL W D ETMRM RRLIV \n 1400 TRGD-TQAKWPVYGANTNEAEWSL-RINFTLADADQETAFAFSFAVVQTNFIYCWEIAN-LYHP-M-QTIHARNTLTK---W----DLETMRMVRRLIVN 1499 \n\n 1500 PNSDEYGGMSGNKAHIETTKDNFRWCPPHTEEIRSYDEMVSNSFPCAITAWRLFFYVPGNNTHHAQAKYIFTQVVLHDGNGFQWGDKMEELGDMCRVMKG 1599 \n PN D YGGMSGNKA I TTKDNFRWCPPHTEEIRSYD VSN ITAWR V AKYIFTQVV HDGNGFQWGDKM L \n 1500 PNIDNYGGMSGNKAKIHTTKDNFRWCPPHTEEIRSYDNRVSN-----ITAWR---QVH-QQFLYV-AKYIFTQVVSHDGNGFQWGDKMP-L--Y--T-E- 1599 \n\n 1600 ILPWHQFKPCAEQHFSPKDESPQSHHWQPIACNWSKQDWGMLQWM-VRQFHWYN-Q----SHIGWCLIVDMWMRKQLTVSCRNEFDWLTGDMRVDYYEHG 1699 \n L WHQFKPCAEQHFSPKDESPQSHHW SKQDWGMLQW VRQFHWYN SH GWCL MWMRKQLTV CRNE DWLTGD Y \n 1600 -L-WHQFKPCAEQHFSPKDESPQSHHWVNTR-DASKQDWGMLQWAHVRQFHWYNCKDHCFSHQGWCL---MWMRKQLTVHCRNEADWLTGDL-T-Y-SNA 1699 \n\n 1700 WMFMMLWRTWAMHYGGMWPVAACIRKVKIDRQACCHNEYSVGIAHWWGTKECMDAVKAANYQRNFGQSIHYILYNDKEAQ-GLVDTFEHCNQYTV-KKKI 1799 \n W MHYGGMWPV I DRQA CH G GTK MDAV AAN QRNFGQSI YIL Q TFEHCNQYTV KKK \n 1700 -----IWM---MHYGGMWPV---I-----DRQATCH-----G-----GTKVSMDAVAAANKQRNFGQSIPYILQGLQTFQVTVLMTFEHCNQYTVKKKKL 1799 \n\n 1800 NCQVSQFMCGRI--L-QAHTLQSKD-SALNMVHDPDMKYPIVLIFSKHICNFISYPTMKY------EFNPITCSWMTW-RR--AKK---NCLHKLADRCL 1899 \n N CG I A NMVHDPDMKYPIVL FSKHICNFISYPTM Y E PITCS CLHKLADRCL\n 1800 NNKICYSNCGMIGFFWVSQFMCGRILCAHNMVHDPDMKYPIVLLFSKHICNFISYPTMCYMFRMGCEVTPITCSRLPHNKKGVSTPVVISCLHKLADRCL 1899 \n\n 1900 PRTTLRVDIMWYSTQSRGRCSHFEHNHWKKIQPEQFQCSWWDNSVDKQWPGD-E--MI-CANFTK-LQSRSLWCAQLFSTCHNTYLFVPFQPFNIINWLI 1999 \n PRTTLRVD W STQS GRCSH EH H W SVDKQWPGD E T Q WCAQLFSTCHNTYLFVP Q FNIINWLI\n 1900 PRTTLRVDKIWKSTQSYGRCSHTEHDHHHRM----I-LGW--KSVDKQWPGDAEAGKLQLMRSTSDIQTMG-WCAQLFSTCHNTYLFVPYQWFNIINWLI 1999 \n\n 2000 AIGSGHDPKITYDNRNCPCPTDVQTLMGWASYGDQWFKGCWY-K-SD----C----WNYNCTI----C---EA-FAWTRKTYLCFTNYAGHWKHEIPENQ 2099 \n AIGS HDPKITY NRNCPCPT VQTLM Y WFKGCWY SD C W CTI C EA FAWTRKTYLCFT H EIPENQ\n 2000 AIGSMHDPKITYWNRNCPCPTNVQTLMASGDY-TEWFKGCWYPQVSDIENKCFMSGWWFVCTICVPHCFGNEAEFAWTRKTYLCFT----H---EIPENQ 2099 \n\n 2100 RACDEIIKSPQTYGDKTRPKIDIWMKFENFCPDDGDWMHKRMLSKNVAV-RTPSGQAN-WTDSNAQLT--D---TEVNHTGKAIMYTPYHGLVHCGGQEY 2199 \n RACDEI TYGD P MKFENF D WM N AV PSGQAN N T VN IM TPYHG C G \n 2100 RACDEI-----TYGD---P-----MKFENF-KD---WM-P---DPNPAVNQMPSGQANHFNELNKNHTGWNKFHVQVNADAPPIM-TPYHGGGQCYGTPM 2199 \n\n 2200 GIKGQDMMTSPGQAYKPPIDVFIDTNETGTRQTINKGC-N-NR-KGRDDHKNPDIMLGMRMFD-DANSKPWFTMKATNHRCHAVISQTHRHGYS-KAPDR 2299 \n KGQD P NETGTRQTINK C RDD N DI LG RM K NHRCHAVISQTHRH YS R\n 2200 RVKGQDK-P-PIDVLRHMLICDSSSNETGTRQTINK-CPRITKIRSRDD--NIDI-LG-RMYSPKMGIKDKGLLETRNHRCHAVISQTHRHQYSMRKYNR 2299 \n\n 2300 KL-WKLQQG-EQM--TCVVFPY-NIDGEPFQQYCEFYWM---HMGFCIKGYNYRQTRYGPYLWQHNSITHCVPRCSYFDRSH-A-D----N--A-EQGLH 2399 \n QG M TCVVFPY F QYCEFYWM MGFCIKGYNYRQ YGPYLWQHNSI CSYFDRSH D N EQGLH\n 2300 LFHYQVEQGWHEMPATCVVFPYPKLS-R-FPQYCEFYWMDDFEMGFCIKGYNYRQKLYGPYLWQHNSIKFVNLACSYFDRSHIVFDWNEVNKHGLEQGLH 2399 \n\n 2400 DCMIFSIAWTAQITSS------SFDEGHYSQC-HLT---NLKNKISTALWHPNIAPPKVNVNIFWPEMRQLGYMIVRIKNTDAEIWKKTEPVQGRCISVT 2499 \n DCMIFSIAWTAQITS SFDEGHY QC LT KISTA HPNIAPPKVNVNI WP R LGYMIVRIKN IW G T\n 2400 DCMIFSIAWTAQITSNLIVPKGSFDEGHYFQCMRLTVCIHMTTKISTA--HPNIAPPKVNVNIAWP--RHLGYMIVRIKN----IWWGFCS-NGKETEMT 2499 \n\n 2500 TEINPMMPE-FEVPEAKTDGEWGWNQTEGGTSDFSWSGRPCVSN-TFVEIMRKHNAHSW---CPNERPLDFKPEGQLFANNWSNHFWMIL-VVPWIQLPM 2599 \n PM FEVP AKTDGEWGWNQTEG S VSN T R W C LDFKPEGQLFA NHFWMI VVPWIQLPM\n 2500 -D--PMPKQWFEVPHAKTDGEWGWNQTEG-KCN-SMF---LVSNTTSCQVSR-NTWRPWIVLCGEIAHLDFKPEGQLFA---CNHFWMIVCVVPWIQLPM 2599 \n\n 2600 DGTCECYWYERRMGGSMYSSFQWVYEFFMDTEGTYF---TRFENLIYWKYDWLISPSAFTSF--NMTDKMVHYPPLHVRNGYCGKNRGEQWYYEKK---A 2699 \n DGT ECYWYER MGG WVYEFFM EGTY TRFENLIYWKYDWLISPSAFT NMTDKMVH NG KNRGEQWYYEKK A\n 2600 DGTWECYWYERTMGG-------WVYEFFMIFEGTYWDIRTRFENLIYWKYDWLISPSAFTPIPKNMTDKMVH---C---NG---KNRGEQWYYEKKPSMA 2699 \n\n 2700 NHYEKVYCCHNKFVPSWQRSKWSMHWRWWI-GIWE-WESTYNVEPPHTMGQF-NVGRAQSMTGPRGWEPDMRRIQLEHMVFYLPCKNEFPYSGGLKSFAM 2799 \n NHYEKVYCCH VPSW W W W E E VE P TMGQF VG AQSMTGPRGWEPDMRRIQL M Y K \n 2700 NHYEKVYCCH-E-VPSW--NYWESKWIWETESTYEQYEIC-QVEFPQTMGQFWDVGFAQSMTGPRGWEPDMRRIQL--M--QV---HQ--YTS--K-YTD 2799 \n\n 2800 MRCLCEGSISNACFIDRKNHS----VEWMTNDADQIWEPELWPRYWLMP--PDKYQRSRKDFFERRPLCGYCIMTMALWWKFCQNNCVIEGHEPRQLREH 2899 \n GS SNACFIDRKN S VEWMTNDADQ W YW MP PDK R R GYCIMT N CVIEGHEPRQLREH\n 2800 DE-F--GSCSNACFIDRKNPSAGCHVEWMTNDADQTG----WEQYWTMPFFPDK-DIER-GC-NQRDF-GYCIMT---------NPCVIEGHEPRQLREH 2899 \n\n 2900 HKMQCHMYPLAQEELWDGHFKTDMWAKWKEIASWLVYDLISGCTMHQHWCDFGRPPIPSDWFPDSQC---RIPTDNVCDNGI--R---IWRAYLEDMNMM 2999 \n H MQCHMYPLAQ EL DGHFKTDMWAKW EI G Q WCDF IP DW C RIPTDNVCDN IWRAYLE \n 2900 HAMQCHMYPLAQFELADGHFKTDMWAKWNEIET-M-HQ---G----Q-WCDF----IPQDWWLEYPCRSIRIPTDNVCDNNMPKKIIIIWRAYLE----- 2999 \n\n 3000 KPVLLQMPHTDKPHHWTHMCHKADMNWHAEGYVDFCFRTTLNVGALLTF------KLI-I-WGNFRHAGNGDPINRWHKEVADMPKIVITVRSAIWKDGA 3099 \n LLQMPHTDKPHHWTH CHKADMNWHA G D T L VGALL F K GNFRH GNGDPINR DMP I TVRSAIWKDGA\n 3000 ---LLQMPHTDKPHHWTHPCHKADMNWHA-GD-D---GTVLRVGALLAFKHDKYYKFASLNFGNFRHNGNGDPINR--P---DMP-I--TVRSAIWKDGA 3099 \n\n 3100 IYSMTRARTYI-------MGDTKCHAIHIFWFPTICQMGYKCIWNFSQAIGNQWHQDFGQL--S-TYRHNASHPIKNATT-RFHPHNCDRRDHG---IMV 3199 \n IY M RARTY MGDTKCHA HIFWFP Q IWNFSQAIGNQWH RHNASHPI DRR HG MV\n 3100 IYWMVRARTYMKWYCYCQMGDTKCHA-HIFWFPRKFQDCFHFIWNFSQAIGNQWHNHPKEAPDDFAHRHNASHPIAERNSFQATTRESDRRLHGQTHVMV 3199 \n\n 3200 YSKHSKV----Y---CWFCICDRLSATDQ-------CHTN------D-WHMEICFHRVRFLGWD-HLG-PEEVIDRYRVSDRW--IDNFGPMCNCLTISG 3299 \n YSKHSKV CWFCICDRLSAT CHTN W MEIC R R L D P VIDRY S W DNFGPMCN L \n 3200 YSKHSKVLLFWFEVHCWFCICDRLSATNKGLGWGAICHTNWHKKIIEYWSMEIC--RARHL-LDMRVRYPD-VIDRYP-S-WWKSCDNFGPMCNHLCVQH 3299 \n\n 3300 VNFNWFICFMPYDPMFWTNHHY----TGQVA-SDNKMHNARENLVLNFF-YDL-FSFWKCNLRWVMEHGGEW--GE---M--FDNYPCYI-----QWCRC 3399 \n W Y FW T Q NKMHNARENLVLNF F KCNLR VME GG W E Y QWCRC\n 3300 ATC-W-LNHH-YTG-FWIDYGWCASETNQIVHCQNKMHNARENLVLNFSGWVCQMPFSKCNLRDVMEDGGGWHLQERKRVRGWHAYMHFMNDNHTQWCRC 3399 \n\n 3400 DPEVDWKGCLHGR--H---E--RA--VIWAG---YKKYTHAMV---GF----W---VYSPQRYYREVDIIKWVNNNWFKT-----M-FKMTIAVWDKMYG 3499 \n DPEVDWK CLH A VIW G KKYTHA V F W VYSPQRYYREV IIKWVNNNWFKT FKMTIAVWDKMYG\n 3400 DPEVDWKTCLHEQFDNIMFNCAHAYDVIWNGPIHWKKYTHAVVLHPNFHEPKWLEKVYSPQRYYREVNIIKWVNNNWFKTNWSDGVWFKMTIAVWDKMYG 3499 \n\n 3500 PLWYKRPFPEMEGEGY--YWTLEFATMYDCYPCYDTGITVLCWNNLHHRIGCARTERREGVQIVFWCLMDKPKFLDTCDGPWKIKFVSY-IWHVKY--YH 3599 \n PLWY FP ME E YWTLEFATMYD YPCYDTGITVLCWNNLHHRIGCARTER QIVFWCLMDKPKFLDTCD PW Y IWH H\n 3500 PLWYT--FPWMEEENWLKYWTLEFATMYDAYPCYDTGITVLCWNNLHHRIGCARTERIN--QIVFWCLMDKPKFLDTCDRPWW-STI-YTIWHSQHSSIH 3599 \n\n 3600 SEHSSIMWNIVHEK---M--WFFCIVKNYGTPTNCQMDG-WCSEDTHIQDMFWRQYAFHPPLQDPWFDGPPTHPKHISYKDNISEVMILLFYIPPAFNSV 3699 \n H NIVHEK WFFCIV NYGTPTNCQMD W D H Q WFDGPPTHPKHIS K NISEVMILLF \n 3600 GKHKQ-E-NIVHEKPVSICGWFFCIVMNYGTPTNCQMDQFWRQYDFHSYKPVPLQ-D-KKKAYN-WFDGPPTHPKHIS-KENISEVMILLFCEWKNMLYI 3699 \n\n 3700 P-GEAVDSP-QHFY-TQFTVDFTYM--DN----GHNLYEYHSCGSGMSCNIHTLDHNSCDMKNRYSCSDFGGGVRELEFDPTWTFVNQNADYQVRNCRHS 3799 \n P EAV SP Y FTVDF YM DN GHNLYEYHSCGSG N HNS Y SDFGG VRELEFD V NCRHS\n 3700 PVREAVRSPTMKTYDIKFTVDFYYMDTDNKIYYGHNLYEYHSCGSG-T-N--AI-HNSF-YP--YLISDFGG-VRELEFD--YQ-VSK-L-FRAANCRHS 3799 \n\n 3800 KD-L-PRTNDAPTANNF-RLMRLRKTLEYGVLV-L--PCIQRRFTQHGEIFAML-LQVLLHLFDFFEDQMPCNWVHHYTGWPHLTISLLGDFKMNWIKNI 3899 \n KD L PRTN APTANNF T LV PCIQRRFTQHG L D FEDQMPCNWV T ISLLGD KMNWIKN \n 3800 KDELKPRTNQAPTANNFLKTLEYGQT-DSKPLVDSENPCIQRRFTQHGMLYQVLVVRAGREPEDGFEDQMPCNWV---T------ISLLGDQKMNWIKNT 3899 \n\n 3900 AFFIDTWSLAYPQSKPCPGKNNPDDVLSGPEFKYHELNVINQHRIMATVLYSDTTHEVVVCLMWDEWTAEWSPFDRFLIKPAHWLE--YFH--PSHNRV- 3999 \n AFFID W L KPC VLSGPEFK INQHRIMATVLY W E TAEWSPFDRFLIKPAH LE Y S NRV \n 3900 AFFIDWW-L-----KPC--------VLSGPEFK------INQHRIMATVLY------------WHE-TAEWSPFDRFLIKPAHNLETSYIQSDDSYNRVY 3999 \n\n 4000 --VVFWQCLIEQIQNMWCNPLWKDSLDMDK-RLMLSSYDFQVLCCKETVRMQFFCLADRRCKHSNRHKEK-----W--G----FG--LKMYWEKQQNTVL 4099 \n FWQCLIE IQ KDSLDMDK L MQFFCLADRR KHSNRHKEK G FG LKMYWEKQQN VL\n 4000 KNFCFWQCLIEEIQ-L----V-KDSLDMDKYDCKMGDTGESMLP-SVSWNMQFFCLADRRSKHSNRHKEKSLVRHFMCGFKSCFGHALKMYWEKQQNMVL 4099 \n\n 4100 TGMICANPAPQFKENDIPPTLWTYW-I-WWPPEHYWQFCSVANTETFMLVTAWRFYADFSWTFGYMCRAAKYYHCFSPAWW-C----K-VTWYVHVKCTM 4199 \n TGMI ANP PQFKEND W I WWPPEHYWQFCSVA E LVT WR HCFSPAWW C K VTWYVHVKC \n 4100 TGMILANPWPQFKENDHKIFMYSAWIICWWPPEHYWQFCSVAS-E---LVTWWRMDVMMP-NIADF-KIT-FGHCFSPAWWCCKALRKNVTWYVHVKC-- 4199 \n\n 4200 HTWQWN-WAHDQVQTMAIIGARGVGDARFKLFH-IDAIVIHNTSEPDRMTMYCISFTMEVEYSPWVDLWGTGW--GG-P--WNSKKIHACEAMDMCKW-- 4299 \n H W W MAIIGARGVG ARF FH D VIHNTSEPDRMT CISF EVEYSPWV LWGTGW G WNSKKIHA E CKW \n 4200 H-WTWQLYLVIPADQMAIIGARGVGRARFNVFHQYD-VVIHNTSEPDRMT--CISFRGEVEYSPWVCLWGTGWPYAGDKNNWNSKKIHAKE--H-CKWVG 4299 \n\n 4300 H----KDMGCTHSVDGENICERCQSDECWRATGSAPGIIWSSVKEINNKAIENHDWGQSRVDINNP-MMHEWFLDFRRDQVKSHFERAKWVIMSYWLHGN 4399 \n H KDMG THSVDGENICERC S ECWRATG A G IWSSVK I I P EWFLDFRRDQVKSHFE VIMSYWLHG \n 4300 HAFESKDMGNTHSVDGENICERCWSPECWRATGKANGRIWSSVKLIDDW-IQSRI-NNPVMQMILPGWVMEWFLDFRRDQVKSHFE----VIMSYWLHGW 4399 \n\n 4400 VN-CQACPCK----SQGQMMNIWGCGTNHAPMWDHIGMWYAIQKRICHGFIQHKCSGATRFYAN-TGRPMFMCQSCRKQNVDPAFMMDSYPIPQSTVWDD 4499 \n VN QA P K QG MMNIWGCGTNHAPMWDHIGMWYAI KRICHGFIQHKCSGATRFY TGRPMFMCQS AFMMDSYPIPQS WDD\n 4400 VNGHQAHPNKRYRDGQGHMMNIWGCGTNHAPMWDHIGMWYAIEKRICHGFIQHKCSGATRFYRQYTGRPMFMCQSY--WS-E-AFMMDSYPIPQS--WDD 4499 \n\n 4500 TQWSNFCHP-C------THCWVQMRCL---WLWRYDN-NFPEIVM-RMRWRCMKKSMFVFTPGCRYRFWLDIDFEFIDASCRDWT-FTLNNVNTVFNDAD 4599 \n Q SNFCHP C HCWVQMRC W WRYDN FPEIVM W F F FWL S WT F N AD\n 4500 NQYSNFCHPCCRYDNHVNHCWVQMRCVKTQWEWRYDNCLFPEIVMKKLFW--LDID-FEFRD---WTFWLNMAL-W---SWYVWTVFNYRRLN--WHTAD 4599 \n\n 4600 NQSKRR-----M-Q-GVHCSSLVWDWPESMGLHENTCEWVCA-CNCLD-RNRKTI-YGFRGSYRLDSSNGRFGHCAAFLTCTWEMDYFLMQILEISQCCT 4699 \n NQS RR VHCS LVW PESMGLHENT EW C C C Y RGSYRLDS RFGH AAFLTCTWEMDYFLMQILEISQCCT\n 4600 NQSHRRAYNFDIHNCNVHCSLLVWVPPESMGLHENTEEW-CSFCMCATFQMAALVMYYQRGSYRLDS---RFGHTAAFLTCTWEMDYFLMQILEISQCCT 4699 \n\n 4700 YALWHSFRWWFSSAGQKAEIRHG-D---DLGNCTEHVDHDGHSFYLMIAACQDLA--RAPTN-HTDSSTGFHMEKMNMSQMV-K--IGEPLPYLIYYAET 4799 \n H RWWFSSAGQKAEIRHG DLGNCTEHVDHDGHSFYLMIA A HTDSSTG KMNMS MV K IGEPLPYL YYAET\n 4700 ICS-H--RWWFSSAGQKAEIRHGWQVYPDLGNCTEHVDHDGHSFYLMIAQHLMMAWSQCQYGCHTDSSTG----KMNMSMMVDKLTIGEPLPYLSYYAET 4799 \n\n 4800 WCWIQVTEGMVTNPARMLYWEAGKMLKRQTYFKGFLWGFKYCMRARSGDGIPGDKLYPIE-Y-QAQRHWS---I-----FPNMFVERHKEMDQI-HYADH 4899 \n WCWIQVTEGMVT PARMLYW AGK GFLWGF YCMRA G IPGDKL E QAQRHWS FPNMFVER KEMDQI YAD \n 4800 WCWIQVTEGMVTLPARMLYWHAGK---------GFLWGFEYCMRA--G--IPGDKLIEREFFSQAQRHWSPDWAGQHPDFPNMFVERSKEMDQICIYAD- 4899 \n\n 4900 EAHNFGCMKPEVIVIHAFYTDYGYADWHIVCCAKSQQWVWRDFTAFNTVDG 4950 \n GCM FYTDYGYADWHIVCCA SQ WVWRDFTAFNTV G \n 4900 -----GCMN--T---R-FYTDYGYADWHIVCCAFSQVWVWRDFTAFNTVGG 4950 \n\n"
],
[
"def printmat(mat, X, Y):\n ylen, xlen = mat.shape\n print(\" \"+\" \".join(Y))\n for x in range(xlen):\n print(\" | \"+\" | \".join(f\"{mat[y, x][1]:3}\" for y in range(ylen))+\" |\")\n if x+1 < xlen: print(f\"{X[x]:1} -\",\"-\"*(ylen*6),sep=\"\")",
"_____no_output_____"
],
[
"def trial_5():\n X, Y = \"ACGC\", \"GATTGA\"\n similarity = local_sequence_similarity()\n mat,maxval,index = similarity(X, Y, s=score_function(\"BASIC\",A=4, B=1, C=2))\n printmat(mat, X, Y)\n print(\"Value is\",maxval,\"at\",index)\n printmat(mat[0:index[0]+1,0:index[1]+1], X, Y)\n print()\n print_formatted([local_traceback(mat[0:index[0]+1, 0:index[1]+1], X, Y)])\n\ntrial_5()",
" G A T T G A\n | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\nA -------------------------------------------\n | 0 | 0 | 4 | 2 | 0 | 0 | 4 |\nC -------------------------------------------\n | 0 | 0 | 2 | 3 | 1 | 0 | 2 |\nG -------------------------------------------\n | 0 | 4 | 2 | 1 | 2 | 5 | 3 |\nC -------------------------------------------\n | 0 | 2 | 3 | 1 | 0 | 3 | 4 |\nValue is 5 at (5, 3)\n G A T T G A\n | 0 | 0 | 0 | 0 | 0 | 0 |\nA -------------------------------------\n | 0 | 0 | 4 | 2 | 0 | 0 |\nC -------------------------------------\n | 0 | 0 | 2 | 3 | 1 | 0 |\nG -------------------------------------\n | 0 | 4 | 2 | 1 | 2 | 5 |\n\n\n 0 A-CG 3 \n A G \n 0 ATTG 3 \n\n"
],
[
"def trial_6():\n X, Y = \"MEANLY\", \"PENALTY\"\n similarity = local_sequence_similarity()\n mat,maxval,index = similarity(X, Y, s=score_function(\"PAM250\", C=5))\n printmat(mat, X, Y)\n print(\"Value is\",maxval,\"at\",index)\n printmat(mat[0:index[0]+1,0:index[1]+1], X, Y)\n print()\n print_formatted([local_traceback(mat[0:index[0]+1, 0:index[1]+1], X, Y)])\n\ntrial_6()",
" P E N A L T Y\n | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\nM -------------------------------------------------\n | 0 | 0 | 0 | 0 | 0 | 4 | 0 | 0 |\nE -------------------------------------------------\n | 0 | 0 | 4 | 1 | 0 | 0 | 4 | 0 |\nA -------------------------------------------------\n | 0 | 1 | 0 | 4 | 3 | 0 | 1 | 1 |\nN -------------------------------------------------\n | 0 | 0 | 2 | 2 | 4 | 0 | 0 | 0 |\nL -------------------------------------------------\n | 0 | 0 | 0 | 0 | 0 | 10 | 5 | 0 |\nY -------------------------------------------------\n | 0 | 0 | 0 | 0 | 0 | 5 | 7 | 15 |\nValue is 15 at (7, 6)\n P E N A L T Y\n | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\nM -------------------------------------------------\n | 0 | 0 | 0 | 0 | 0 | 4 | 0 | 0 |\nE -------------------------------------------------\n | 0 | 0 | 4 | 1 | 0 | 0 | 4 | 0 |\nA -------------------------------------------------\n | 0 | 1 | 0 | 4 | 3 | 0 | 1 | 1 |\nN -------------------------------------------------\n | 0 | 0 | 2 | 2 | 4 | 0 | 0 | 0 |\nL -------------------------------------------------\n | 0 | 0 | 0 | 0 | 0 | 10 | 5 | 0 |\nY -------------------------------------------------\n | 0 | 0 | 0 | 0 | 0 | 5 | 7 | 15 |\n\n\n 0 EANL-Y 5 \n E L Y \n 0 ENALTY 5 \n\n"
],
[
"def trial_7():\n [X, Y, *_] = open(\"local_alignment.txt\").read().split(\"\\n\")\n similarity = local_sequence_similarity()\n mat,maxval,index = similarity(X, Y, s=score_function(\"PAM250\", C=5))\n print(\"Value is\",maxval,\"at\",index)\n print_formatted([local_traceback(mat[0:index[0]+1, 0:index[1]+1], X, Y)])\n\ntrial_7()",
"Value is 1062 at (862, 881)\n\n 0 YQAGIIRQPPRGD-RGVSDRNYSQCGKQ-NQ-AQLDNNPTWTKYEIEWRVQI-LPPGAGVFEGDNGQNQCLCPNW--A-W-EQPCQW----GALHS-NEQ 99 \n Y R D C A L T W P G GV C W A E P W L N Q\n 0 Y-P-MSRKTAKSQFIEWCDW-F--CFNHWTNWAPLSIVRTSVAFAV-W-GHCWYPCG-GVCKTNRCKDD-FCGRWRKALFAEGPRDWKCCKNDLQNWNPQ 99 \n\n 100 YPNRIHLWAPMSKLHIKIEKSSYN-RNAQ-FPNRCMYECE-FPSY-REQVDSCHYENVQIAF-TIFSGAEQKRKFCSCHFWSNFIDQAVFSTGLI-PWCY 199 \n Y F C Y N AF I K Q ST P \n 100 YSQGTR--NTK-RMVATTNQTMIEWKQSHIFETW-LF-CHVIIEYNWSAF-W-MWMNRNEAFNSIIKSGYPKLLL-T-QY-P-L-SQG--STPIVKPL-I 199 \n\n 200 RRDDHSAFFMPNWNKQ--YKHPQLQFRVAGEGTQCRPFYTREMFTKVSAWRIAGRFAGPYERHHDAHLELWY-QHHKVRT-GQQLGIIWNNRDKTRNPCP 299 \n RRD F W P A C R GP E E WY RT GQQLGIIWNNR KTRNPCP\n 200 RRD-QGKFW-A-WAQMWWFREPT-NIPTA-D-Y-CHSW--WQ--SR-ADLQ-NDRDMGP-EADASFYVEFWYWVRCAARTYGQQLGIIWNNRLKTRNPCP 299 \n\n 300 FSA--YYNK--LP-WWK-I-NQ-N-AFYNCLQNIAHSTHDETHEFNPVKCIDWLQGTMV--P-----TECKKGFVHEKCECYRNPGPPLHDMYHQMEDIF 399 \n SA NK WWK AFY CLQN AH THD T EFNPVKCIDWLQG MV TECKK FVHEKCECYR EDIF\n 300 YSADGIQNKENYVFWWKNMCTKSHIAFYYCLQNVAHYTHDVTAEFNPVKCIDWLQGHMVLSSWFKYNTECKKLFVHEKCECYRM----FCGV---VEDIF 399 \n\n 400 GVRFDCLTGWKHLS------D---YNPC-QERRNINDFYIFAYEIAPAVKNLVLSPQPLADATKKCAFNYTPLDQSPVVIA-CK--WYIHQPI-CMLL-- 499 \n GVRF TGWKHLS YNP QERRNINDFYIF YEIAPAVKNLVLS QPL D TKKCAFNYTP W H I C \n 400 GVRFH--TGWKHLSTAKPVPHVCVYNPSVQERRNINDFYIF-YEIAPAVKNLVLSAQPLHDYTKKCAFNYTPITITRIISTRNQIIW-AHVVIACQFYSP 499 \n\n 500 --IVLIC-AMDKYNAHMIVIRTTEGQQPMHACRMTEGPGMCMKEPLVTFTLPAQWQWPNHEFKYVYMYVLNYHLSQYTYTDEGHAGGQHYSFNVAVDVGM 599 \n LI AMDKY A M V R TEG QPMHACR T GPGM KEPLVTFTL A WQWPNHEF YVYMY Q G V \n 500 HQMLLIELAMDKYCADMNVRRSTEGHQPMHACRSTFGPGMAAKEPLVTFTLVAFWQWPNHEFQYVYMYTED-KIIQIG-PHLSN-GCEMVEYCVDC-YAK 599 \n\n 600 AWGHNRCYCQPACYSQQETQTRTIDYEKWQYMKHQAFKWGLWFCEQ-ERHA--WFKGQNRCEMFTAKMTRMGADSNLDQYKLMLAQNYEEQWEQPIMECG 699 \n R Y A Y T Y A G C W C F YE M \n 600 -RPCYRAYSAEAQYWRMITEAEDYSYKTRNAIAATATVRGQ-YCHPFRWLGIVWM-AHHDC-FFANECGTICI-PQMAEMRPPETTPYEI--DIIFMMF- 699 \n\n 700 MSEIIEIDPPYRSELIFTFWPFCTYSPWQNLIKCRCNNVIEEMDQCVP-LTF-IGFGVKQAGGIQAWAFYKE--EWTSTYYLMCQCMKSDKAQYPYEIIL 799 \n E P T S W C C L F F Q G W E E M K AQ \n 700 WKE--HMSTTIL-DVVGMYRP-ATFSHWHDAHH-QCEPYLTPL-MCQSKLVFDAAFT--QVGVKGVW-YHTEKLELMAGFNHM-K-FKKEEAQ---QSCF 799 \n\n 800 FWMQ--P-MDTGE--QEPPQQNMWIFLPHSWFFDWCCNAPWSEICSSRHD--H---GQ-CQDAFYPCELFTVF 872 \n W Q P D W C I SRH H Q C Y ELF F \n 800 YWFQDCPDYDPPDAVRKTDEKHIRAHGEIWWLMRYYCMYHILHI-ASRHEWMHLRWDQACTNPGY--ELFE-F 872 \n\n"
]
],
[
[
"```\n1062\nYQAGIIRQPPRGD-RGVSDRNYSQCGKQ-NQ-AQLDNNPTWTKYEIEWRVQI-LPPGAGVFEGDNGQNQCLCPNW--A-W-EQPCQW----GALHS-NEQYPNRIHLWAPMSKLHIKIEKSSYN-RNAQ-FPNRCMYECE-FPSY-REQVDSCHYENVQIAF-TIFSGAEQKRKFCSCHFWSNFIDQAVFSTGLI-PWCYRRDDHSAFFMPNWNKQ--YKHPQLQFRVAGEGTQCRPFYTREMFTKVSAWRIAGRFAGPYERHHDAHLELWY-QHHKVRT-GQQLGIIWNNRDKTRNPCPFSAY-Y-NK--LP-WWK-I-NQ-N-AFYNCLQNIAHSTHDETHEFNPVKCIDWLQGTMV-P------TECKKGFVHEKCECYRNPGPPLHDMYHQMEDIFGVRFDCLTGWKHLS------D---YNPC-QERRNINDFYIFAYEIAPAVKNLVLSPQPLADATKKCAFNYTPLDQSPVVIACK---WYIHQPI-CMLL----IVLIC-AMDKYNAHMIVIRTTEGQQPMHACRMTEGPGMCMKEPLVTFTLPAQWQWPNHEFKYVYMYVLNYHLSQYTYTDEGHAGGQHYSFNVAVDVGMAWGHNRCYCQPACYSQQETQTRTIDYEKWQYMKHQAFKWGLWFCEQER-HA--WFKGQNRCEMFTAKMTRMGADSNLDQYKLMLAQNYEEQWEQPIMECGMSEIIEIDPPYRSELIFTFWPFCTYSPWQNLIKCRCNNVIEEMDQCVP-LTF-IGFGVKQAGGIQA-WAFYKE--EWTSTYYLMCQCMKSDKAQYPYEIILFWMQ--P-MDTGE--QEPPQQNMWIFLPHSWFFDWCCNAPWSEICSSRHD--H---GQ-CQDAFYPCELFTVF\nY-P-MSRKTAKSQFIEWCDW-F--CFNHWTNWAPLSIVRTSVAFAV-W-GHCWYPCG-GVCKTNRCKDD-FCGRWRKALFAEGPRDWKCCKNDLQNWNPQYSQGTR--NTK-RMVATTNQTMIEWKQSHIFETW-LF-CHVIIEYNWSAF-W-MWMNRNEAFNSIIKSGYPKLLL-T-QY-P-L-SQG--STPIVKPL-IRRD-QGKFW-A-WAQMWWFREPT-NIPTA-D-Y-CHSW--WQ--SR-ADLQ-NDRDMGP-EADASFYVEFWYWVRCAARTYGQQLGIIWNNRLKTRNPCPYSADGIQNKENYVFWWKNMCTKSHIAFYYCLQNVAHYTHDVTAEFNPVKCIDWLQGHMVLSSWFKYNTECKKLFVHEKCECYRM----FCGV---VEDIFGVRFH--TGWKHLSTAKPVPHVCVYNPSVQERRNINDFYIF-YEIAPAVKNLVLSAQPLHDYTKKCAFNYTPITITRIISTRNQIIW-AHVVIACQFYSPHQMLLIELAMDKYCADMNVRRSTEGHQPMHACRSTFGPGMAAKEPLVTFTLVAFWQWPNHEFQYVYMYTED-KIIQIG-PHLSN-GCEMVEYCVDC-YAK-RPCYRAYSAEAQYWRMITEAEDYSYKTRNAIAATATVRGQ-YCHPFRWLGIVWM-AHHDC-FFANECGTICI-PQMAEMRPPETTPYEI--DIIFMMF-WKE--HMSTTIL-DVVGMYRP-ATFSHWHDAHH-QCEPYLTPL-MCQSKLVFDAAFT--QVG-VKGVW-YHTEKLELMAGFNHM-K-FKKEEAQ---QSCFYWFQDCPDYDPPDAVRKTDEKHIRAHGEIWWLMRYYCMYHILHI-ASRHEWMHLRWDQACTNPGY--ELFE-F\n```",
"_____no_output_____"
]
],
[
[
"# Expected Number of High-Scoring Alignments\nfrom math import exp, log\n\nK = 0.050\nlamda = 0.25\nS = 35\nm = 250\nn = 1000000000\nE = K*m*n*exp(-lamda*S)\nprint(f\"E value associated with S>={S} =\",E)\n\n# The Number of high-scoring alignments is poisson distributed with expected value E.\n# probability of finding 0 alignments with score >= S is e^-E\n\np_no_alignments = exp(-E)\nprint(f\"probability of no alignments with score >= {S} is\",p_no_alignments)\nprint(\"probability of finding at least one alignment (p-value) is\",1-p_no_alignments)",
"E value associated with S>=35 = 1980766.5639468906\nprobability of no alignments with score >= 35 is 0.0\nprobability of finding at least one alignment (p-value) is 1.0\n"
],
[
"# Normalized Scores\n\ns_prime = (lamda*S - log(K)) / log(2)\nprint(\"Normalized score S' =\",s_prime)",
"Normalized score S' = 16.94550970266579\n"
],
[
"N = n*m\nE = N/2**s_prime\nprint(\"E-value\",E)",
"E-value 1980766.5639468913\n"
]
],
[
[
"# Question 3\nAssume the background frequencies for all four nucleotides are equal, and consider the DNA substitution matrix which give all matches a score +1 and all mismatches a score -1.\n\nWhat is the expected score for this matri?Is this a valid matrix for local alignment? Why?",
"_____no_output_____"
]
],
[
[
"from genomics import *\n\nX=add_codon(10)\nY=add_codon(1000)\nX, Y = \"MEANLY\", \"PENALTY\"",
"_____no_output_____"
],
[
"similarity = local_sequence_similarity()\nmat,maxval,index = similarity(X, Y, s=score_function(\"PAM250\", C=5))\n# printmat(mat, X, Y)\nprint(\"Value is\",maxval,\"at\",index)\n# printmat(mat[0:index[0]+1,0:index[1]+1], X, Y)\nprint(X)\nprint(Y)\nprint_formatted([local_traceback(mat[0:index[0]+1, 0:index[1]+1], X, Y)])\n",
"Value is 15 at (7, 6)\nMEANLY\nPENALTY\n\n 0 EANL-Y 5 \n E L Y \n 0 ENALTY 5 \n\n"
],
[
"print(Y)",
"TGTTGTGTCTTAGCATGATTGACGCAATGTGTACAAATACCAACGTGTCGAGCGGTTGTGACTCTCCGCCGTAACACTCAGAGTCACTGTATTCATTAATGCGGGCCACTTTAACAACCGATTCGATCCGTCCCTTTCCAGTTTCTCGGTCCCGTACGTTGGACCACACCTTGTACTTAAGGCCATACGTTATCAGGTACAGGAATGCGGTTAATGCTGCGGACAAAAAGACTCTATAAAGATAGGAAGAGATATCCCCCAGGCCCACACGGCAGCTGAGGTATTTTCTGTTGAACGTTCCAGTCGACATACATGCGCGCTCTGCGAAACGCAGACAATCTCCAACAGCCCTGTAGTAGCGAGAGACAAGCAACTCCCGACGGACACCATTGATAAAGCACGCATCAACACCCCGGCTCCAGAGAGGTGGATTGTTGGACCTAAATACAGTCTATGACCGTTCTTGGTGTGCGGCCACTTAATAACCTATGCGACTTATCGCGGCAACCTGCTTATTATCAGATTACGGCCCGCCCAGGACGTATGCGGACGTGCCGCGCGGACGTGCTATCCCCACATCACGATGCACACCTGGAACAATCTACCTGTTACAGAACGACAATACGTATGTCCACGGAACTTACCCGAGCGGATGATGTCAACCACGCGGATAATTAGAGTCGTGCAAGACTTGGGAATGGTCACTCGGAGTACCTGGTCTTCTCGTTTACTTGCGTCTAAAATTCCGTGGTGAAAGAACCACATGAGGTGAACCCGCGATTACTAGTGCTGCAGTACAAACGCTGTATATTGTACTTATGTCTCACCGAATTGAATGATAGTAAACACCGATGCAGACGGTATATCGTATAGTAGGCTGTTCTAACAGCGAATTCGTCGGACTATGAACACTTCGATGCGTGTTTCACACACAGCCCCGTGTGGCTCTCGAGTCTTGACAAACAGCGGGGCGTCATCGGAACCACCCCTGCGCAGGGCT\n"
],
[
"print(mat)",
"[[('h', 0) ('', 0) ('', 0) ... ('', 0) ('', 0) ('', 0)]\n [('', 0) ('d', 0) ('d', 0) ... ('d', 0) ('d', 4) ('d', 0)]\n [('', 0) ('d', 6) ('h', 1) ... ('d', 6) ('h', 1) ('d', 2)]\n ...\n [('', 0) ('', 0) ('d', 5) ... ('d', 13) ('d', 10) ('d', 12)]\n [('', 0) ('d', 6) ('h', 1) ... ('d', 16) ('d', 13) ('hd', 8)]\n [('', 0) ('v', 1) ('d', 5) ... ('v', 11) ('d', 16) ('d', 12)]]\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4a11f252824ecfb3555ac204b88b5e35f2596282
| 7,837 |
ipynb
|
Jupyter Notebook
|
hw/hw3-3.ipynb
|
Bertha-ding/20MA573-yuning-ding
|
21b8fb9596b4d72eff972643602bb8e55f26453c
|
[
"MIT"
] | null | null | null |
hw/hw3-3.ipynb
|
Bertha-ding/20MA573-yuning-ding
|
21b8fb9596b4d72eff972643602bb8e55f26453c
|
[
"MIT"
] | null | null | null |
hw/hw3-3.ipynb
|
Bertha-ding/20MA573-yuning-ding
|
21b8fb9596b4d72eff972643602bb8e55f26453c
|
[
"MIT"
] | null | null | null | 30.375969 | 238 | 0.432053 |
[
[
[
"<a href=\"https://colab.research.google.com/github/Bertha-ding/20MA573-yuning-ding/blob/master/hw/hw3-3.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"- Prove the following facts: Supose $f$ is a function satisfying\n - $f(0) = f_{min},$ and $\\lim_{x\\to \\infty}f(x) = f_{max}$\n - $f$ is continuous\n - $f$ is strictly increasing\n \n then, for any $p\\in (f_{min}, f_{max})$, \n - there exists unique $\\hat \\sigma$, such that $f(\\hat \\sigma) = p$ and \n $$\\hat \\sigma = \\arg\\min_{\\sigma\\in (0,\\infty)} | f(\\sigma) - p|.$$",
"_____no_output_____"
],
[
"- Now we denote by $f(\\sigma)$ the BSM put price with the following parameters:\n - vol_ratio = $\\sigma$; spot_price = 100.; drift_ratio = .0475; strike = 110.; maturity = 1.\n \n Answer the following questions:\n - What is $f_{min}$ and $f_{max}$?\n - Is $f$ strictly increasing on $(0,\\infty)$? Justify your answer.\n - If the market put price is $10$, then what's the implied volatility?",
"_____no_output_____"
],
[
"- Find its implied volatility with the following parameters:\n - BSM call price is 10.; spot_price = 100.; drift_ratio = .0475; strike = 110.; maturity = 1.\n\n",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport scipy.stats as ss\n\n\nclass VanillaOption:\n def __init__(\n self,\n otype = 1, # 1: 'call'\n # -1: 'put'\n strike = 110.,\n maturity = 1.,\n market_price = 10.):\n self.otype = otype\n self.strike = strike\n self.maturity = maturity\n self.market_price = market_price #this will be used for calibration\n \n \n def payoff(self, s): #s: excercise price\n otype = self.otype\n k = self.strike\n maturity = self.maturity\n return max([0, (s - k)*otype])\n\n\n\n\nclass Gbm:\n def __init__(self,\n init_state = 100.,\n drift_ratio = .0475,\n vol_ratio = .2\n ):\n self.init_state = init_state\n self.drift_ratio = drift_ratio\n self.vol_ratio = vol_ratio\n\n\n\n\n\ndef bsm_price(self, vanilla_option):\n s0 = self.init_state\n sigma = self.vol_ratio\n r = self.drift_ratio\n \n otype = vanilla_option.otype\n k = vanilla_option.strike\n maturity = vanilla_option.maturity\n \n d1 = (np.log(s0 / k) + (r + 0.5 * sigma ** 2) \n * maturity) / (sigma * np.sqrt(maturity))\n d2 = d1 - sigma * np.sqrt(maturity)\n \n return (otype * s0 * ss.norm.cdf(otype * d1) #line break needs parenthesis\n - otype * np.exp(-r * maturity) * k * ss.norm.cdf(otype * d2))\n\nGbm.bsm_price = bsm_price",
"_____no_output_____"
],
[
"gbm1 = Gbm(\n init_state = 100., \n drift_ratio = .0475,\n vol_ratio = .2)\noption1 = VanillaOption(\n otype = -1,\n strike = 110., \n maturity = 1.\n) ",
"_____no_output_____"
],
[
"def error_function(vol, gbm, option):\n gbm.vol_ratio = vol\n return abs(option.market_price - gbm.bsm_price(option))\n\nimport scipy.optimize as so\ndef implied_volatility(gbm, option):\n init_vol = .1 #initial guess\n return so.fmin(error_function, init_vol, \n args = (gbm, option), disp = 0)[0]\n\n\noption1.market_price = 10\n\nprint('>>>>>>>>implied volatility is ' + \n str(implied_volatility(gbm1, option1)))\n",
">>>>>>>>implied volatility is 0.17867187500000026\n"
]
],
[
[
"Find its implied volatility of call option with same parameters.",
"_____no_output_____"
]
],
[
[
"gbm1 = Gbm(\n init_state = 100., \n drift_ratio = .0475,\n vol_ratio = .2)\noption1 = VanillaOption(\n otype = 1,\n strike = 110., \n maturity = 1.\n) \n\nprint('>>>>>>>>implied volatility is ' + \n str(implied_volatility(gbm1, option1)))",
">>>>>>>>implied volatility is 0.3020312500000007\n"
]
],
[
[
"The first proof and answers of questions are attached in hw3-4.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a120206c5d762bcae3b61cf8551a05090b8d7a0
| 7,469 |
ipynb
|
Jupyter Notebook
|
class-2020-04-19/MapReduce.ipynb
|
spu-bigdataanalytics-201/class-materials
|
3a7c84d851c4aadd702420e7d4a5d0e80ada0387
|
[
"MIT"
] | null | null | null |
class-2020-04-19/MapReduce.ipynb
|
spu-bigdataanalytics-201/class-materials
|
3a7c84d851c4aadd702420e7d4a5d0e80ada0387
|
[
"MIT"
] | 3 |
2021-06-08T21:12:36.000Z
|
2022-03-12T00:22:27.000Z
|
class-2020-04-19/MapReduce.ipynb
|
spu-bigdataanalytics-201/class-materials
|
3a7c84d851c4aadd702420e7d4a5d0e80ada0387
|
[
"MIT"
] | 1 |
2020-04-24T23:48:50.000Z
|
2020-04-24T23:48:50.000Z
| 21.903226 | 183 | 0.44437 |
[
[
[
"## Command Line Commands from Jupyter",
"_____no_output_____"
]
],
[
[
"!ls",
"\u001b[31mREADME.md\u001b[m\u001b[m \u001b[31mpg2701.txt.map\u001b[m\u001b[m \u001b[31msimple.py\u001b[m\u001b[m\r\nUntitled.ipynb \u001b[31mpg2701.txt.map.sorted\u001b[m\u001b[m \u001b[31msingle_machine.py\u001b[m\u001b[m\r\n\u001b[31mmapping.py\u001b[m\u001b[m \u001b[31mreducing.py\u001b[m\u001b[m\r\n\u001b[31mpg2701.txt\u001b[m\u001b[m \u001b[31mshuffling.py\u001b[m\u001b[m\r\n"
],
[
"!pwd",
"/Users/owl/Projects/spu-bigdataanalytics-201/class-materials/class-2020-04-19\r\n"
],
[
"!python3 --version",
"Python 3.7.6\r\n"
],
[
"!python3 simple.py",
"<map object at 0x10800cd10>\r\n[('Bear', 1), ('Bear', 1), ('Car', 1), ('Car', 1), ('Car', 1), ('Deer', 1), ('Deer', 1), ('River', 1), ('River', 1)]\r\n[('Bear', 2), ('Car', 3), ('Deer', 2), ('River', 2)]\r\n"
]
],
[
[
"## Map Reduce",
"_____no_output_____"
]
],
[
[
"from functools import reduce\nfrom itertools import groupby",
"_____no_output_____"
],
[
"# ========= Mapping ==========\n\nwords = ['Deer', 'Bear', 'River', 'Car',\n 'Car', 'River', 'Deer', 'Car', 'Bear']\n\nmapping = list(map(lambda x: (x, 1), words))\nprint(mapping)",
"[('Deer', 1), ('Bear', 1), ('River', 1), ('Car', 1), ('Car', 1), ('River', 1), ('Deer', 1), ('Car', 1), ('Bear', 1)]\n"
],
[
"# ========= Shuffling ==========\n\nsorted_mapping = sorted(mapping)\nprint(sorted_mapping)",
"[('Bear', 1), ('Bear', 1), ('Car', 1), ('Car', 1), ('Car', 1), ('Deer', 1), ('Deer', 1), ('River', 1), ('River', 1)]\n"
],
[
"# ========= Reducing ==========\n\ngrouper = groupby(sorted_mapping, lambda p: p[0])\n\nfinal = map(\n lambda l: (l[0], reduce(lambda x, y: x + y, map(lambda p: p[1], l[1]))), \n grouper # <- [('Bear', 1), ('Bear', 1)] ----> l[0] = 'Bear' and l[1] = 1\n)\nprint(list(final))",
"[('Bear', 2), ('Car', 3), ('Deer', 2), ('River', 2)]\n"
],
[
"# ------ breakdown of 'Reducing' Phase ---------\ngrouper = groupby(sorted_mapping, lambda p: p[0])\n\nresults = []\n\n# loop through each group\nfor group in grouper:\n # key that is in the group\n key = group[0] \n print(key)\n \n # list of groups for this key\n key_value_pairs = list(group[1]) \n print(key_value_pairs)\n \n # only values for current key\n values_for_this_key = list(map(lambda p: p[1], key_value_pairs)) \n print(values_for_this_key)\n \n # final part of map 'reduce', for this key\n count_of_current_key = reduce(lambda x, y: x + y, values_for_this_key) \n print(count_of_current_key)\n \n # put it to results\n results.append((key, count_of_current_key))\n #break",
"Bear\n[('Bear', 1), ('Bear', 1)]\n[1, 1]\n2\nCar\n[('Car', 1), ('Car', 1), ('Car', 1)]\n[1, 1, 1]\n3\nDeer\n[('Deer', 1), ('Deer', 1)]\n[1, 1]\n2\nRiver\n[('River', 1), ('River', 1)]\n[1, 1]\n2\n"
],
[
"results",
"_____no_output_____"
],
[
"# reduce(lambda x, y: x + y, [1, 2, 3, 4, 8])",
"_____no_output_____"
]
],
[
[
"## Single Machine",
"_____no_output_____"
]
],
[
[
"%%time\n\n! python single_machine.py",
"max: the = 14620\n[[\"\", 3235], [\"funereal\", 1], [\"unscientific\", 1], [\"divinely\", 2], [\"cussed\", 1], [\"foul\", 11], [\"four\", 74], [\"gag\", 2], [\"prefix\", 1], [\"woods\", 10]]\nCPU times: user 13.8 ms, sys: 10.3 ms, total: 24.1 ms\nWall time: 718 ms\n"
],
[
"! python reducing.py",
"max: the = 14620\r\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a1215f32ebf4f9c51c2ed71d37ba29d8a664c84
| 329,144 |
ipynb
|
Jupyter Notebook
|
CH1/Ch1.2-LM.ipynb
|
nevermind78/Proba_stat_4_LM
|
49a54c9e8b77eb2dfe688591c38014c0c8945489
|
[
"Unlicense"
] | 7 |
2021-10-04T09:12:21.000Z
|
2022-01-01T14:45:09.000Z
|
CH1/Ch1.2-LM.ipynb
|
SidiahmedHABIB/Proba_stat_4_LM
|
712ec4a3078136405fb7c32c61f3d0eebd6ca899
|
[
"Unlicense"
] | null | null | null |
CH1/Ch1.2-LM.ipynb
|
SidiahmedHABIB/Proba_stat_4_LM
|
712ec4a3078136405fb7c32c61f3d0eebd6ca899
|
[
"Unlicense"
] | 12 |
2020-10-30T23:35:53.000Z
|
2021-12-08T23:35:41.000Z
| 33.500662 | 525 | 0.59635 |
[
[
[
"# Chapter 2: Conditional probability\n\n\n----",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
]
],
[
[
"## Simulating the frequentist interpretation \n\nRecall that the frequentist interpretation of conditional probability based on a large number `n` of repetitions of an experiment is $P(A|B) ≈ n_{AB}/n_{B}$, where $n_{AB}$ is the number of times that $A \\cap B$ occurs and $n_{B}$ is the number of times that $B$ occurs. Let's try this out by simulation, and verify the results of Example 2.2.5. So let's use [`numpy.random.choice`](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.random.choice.html) to simulate `n` families, each with two children.",
"_____no_output_____"
]
],
[
[
"np.random.seed(34)\n\nn = 10**5\nchild1 = np.random.choice([1,2], n, replace=True) \nchild2 = np.random.choice([1,2], n, replace=True) \n\nprint('child1:\\n{}\\n'.format(child1))\n\nprint('child2:\\n{}\\n'.format(child2))",
"child1:\n[2 1 1 ... 1 2 1]\n\nchild2:\n[2 2 2 ... 2 2 1]\n\n"
]
],
[
[
"Here `child1` is a NumPy `array` of length `n`, where each element is a 1 or a 2. Letting 1 stand for \"girl\" and 2 stand for \"boy\", this `array` represents the gender of the elder child in each of the `n` families. Similarly, `child2` represents the gender of the younger child in each family. \n\nAlternatively, we could have used",
"_____no_output_____"
]
],
[
[
"np.random.choice([\"girl\", \"boy\"], n, replace=True)",
"_____no_output_____"
]
],
[
[
"but it is more convenient working with numerical values.\n\nLet $A$ be the event that both children are girls and $B$ the event that the elder is a girl. Following the frequentist interpretation, we count the number of repetitions where $B$ occurred and name it `n_b`, and we also count the number of repetitions where $A \\cap B$ occurred and name it `n_ab`. Finally, we divide `n_ab` by ` n_b` to approximate $P(A|B)$.",
"_____no_output_____"
]
],
[
[
"n_b = np.sum(child1==1)\nn_ab = np.sum((child1==1) & (child2==1))\n\nprint('P(both girls | elder is girl) = {:0.2F}'.format(n_ab / n_b))",
"P(both girls | elder is girl) = 0.50\n"
]
],
[
[
"The ampersand `&` is an elementwise $AND$, so `n_ab` is the number of families where both the first child and the second child are girls. When we ran this code, we got 0.50, confirming our answer $P(\\text{both girls | elder is a girl}) = 1/2$. \n\nNow let $A$ be the event that both children are girls and $B$ the event that at least one of the children is a girl. Then $A \\cap B$ is the same, but `n_b` needs to count the number of families where at least one child is a girl. This is accomplished with the elementwise $OR$ operator `|` (this is not a conditioning bar; it is an inclusive $OR$, returning `True` if at least one element is `True`).",
"_____no_output_____"
]
],
[
[
"n_b = np.sum((child1==1) | (child2==2))\nn_ab = np.sum((child1==1) & (child2==1))\n\nprint('P(both girls | at least one girl) = {:0.2F}'.format(n_ab / n_b))",
"P(both girls | at least one girl) = 0.33\n"
]
],
[
[
"For us, the result was 0.33, confirming that $P(\\text{both girls | at least one girl}) = 1/3$.",
"_____no_output_____"
],
[
"## Monty Hall simulation\n\nMany long, bitter debates about the Monty Hall problem could have been averted by trying it out with a simulation. To study how well the never-switch strategy performs, let's generate 10<sup>5</sup> runs of the Monty Hall game. To simplify notation, assume the contestant always chooses door 1. Then we can generate a vector specifying which door has the car for each repetition:\n",
"_____no_output_____"
]
],
[
[
"np.random.seed(55)\n\nn = 10**5\ncardoor = np.random.choice([1,2,3] , n, replace=True)\n\nprint('The never-switch strategy has success rate {:.3F}'.format(np.sum(cardoor==1) / n))",
"The never-switch strategy has success rate 0.331\n"
]
],
[
[
"At this point we could generate the vector specifying which doors Monty opens, but that's unnecessary since the never-switch strategy succeeds if and only if door 1 has the car! So the fraction of times when the never-switch strategy succeeds is `numpy.sum(cardoor==1)/n`, which was 0.331in our simulation. This is very close to 1/3.\n\nWhat if we want to play the Monty Hall game interactively? We can do this by programming a Python class that would let us play interactively or let us run a simulation across many trials.",
"_____no_output_____"
]
],
[
[
"class Monty():\n \n def __init__(self):\n \"\"\" Object creation function. \"\"\"\n self.state = 0\n self.doors = np.array([1, 2, 3])\n self.prepare_game()\n\n def get_success_rate(self):\n \"\"\" Return the rate of success in this series of plays: num. wins / num. plays. \"\"\"\n if self.num_plays > 0:\n return 1.0*self.num_wins / self.num_plays\n else:\n return 0.0\n\n def prepare_game(self):\n \"\"\" Prepare initial values for game play, and randonly choose the door with the car. \"\"\"\n self.num_plays = 0\n self.num_wins = 0\n self.cardoor = np.random.choice(self.doors)\n self.players_choice = None\n self.montys_choice = None\n \n def choose_door(self, door):\n \"\"\" Player chooses a door at state 0. Monty will choose a remaining door to reveal a goat. \"\"\"\n self.state = 1\n self.players_choice = door\n self.montys_choice = np.random.choice(self.doors[(self.doors!=self.players_choice) & (self.doors!=self.cardoor)])\n \n def switch_door(self, do_switch):\n \"\"\" Player has the option to switch from the door she has chosen to the remaining unopened door. \n \n If the door the player has selected is the same as the cardoor, then num. of wins is incremented.\n \n Finally, number of plays will be incremented.\n \"\"\"\n self.state = 2\n if do_switch:\n self.players_choice = self.doors[(self.doors!=self.players_choice) & (self.doors!=self.montys_choice)][0]\n if self.players_choice == self.cardoor:\n self.num_wins += 1\n self.num_plays += 1\n \n def continue_play(self):\n \"\"\" Player opts to continue playing in this series. \n \n The game is returned to state 0, but the counters for num. wins and num. plays\n will be kept intact and running.\n \n A new cardoor is randomly chosen.\n \"\"\"\n self.state = 0\n self.cardoor = np.random.choice(self.doors)\n self.players_choice = None\n self.montys_choice = None\n \n def reset(self):\n \"\"\" The entire game state is returned to its initial state. \n \n All counters and variable holdling state are re-initialized.\n \"\"\"\n self.state = 0\n self.prepare_game()",
"_____no_output_____"
]
],
[
[
"In brief:\n* The `Monty` class represents a simple state model for the game.\n* When an instance of the `Monty` game is created, game state-holding variables are initialized and a `cardoor` randomly chosen.\n* After the player initially picks a door, `Monty` will choose a remaining door that does not have car behind it.\n* The player can then choose to switch to the other, remaining unopened door, or stick with her initial choice.\n* `Monty` will then see if the player wins or not, and updates the state-holding variables for num. wins and num. plays.\n* The player can continue playing, or stop and reset the game to its original state.\n\n### As a short simulation program\n\nHere is an example showing how to use the `Monty` class above to run a simulation to see how often the switching strategy succeeds.",
"_____no_output_____"
]
],
[
[
"np.random.seed(89)\n\ntrials = 10**5\n\ngame = Monty()\nfor _ in range(trials):\n game.choose_door(np.random.choice([1,2,3]))\n game.switch_door(True)\n game.continue_play()\n\nprint('In {} trials, the switching strategy won {} times.'.format(game.num_plays, game.num_wins))\nprint('Success rate is {:.3f}'.format(game.get_success_rate()))",
"In 100000 trials, the switching strategy won 66730 times.\nSuccess rate is 0.667\n"
]
],
[
[
"### As an interactive widget in this Jupyter notebook\n\nOptionally, the `Monty` Python class above can also be used as an engine to power an interactive widget that lets you play the three-door game _in the browser_ using [`ipywidgets` ](https://ipywidgets.readthedocs.io/en/stable/user_guide.html).\n\nTo run the interactive widget, make sure you have the `ipywidgets` package installed (v7.4.2 or greater).\n\nTo install with the `conda` package manager, execute the following command:\n\n conda install ipywidgets\n\nTo install with the `pip` package manager, execute the following command:\n\n pip install ipywidgets",
"_____no_output_____"
]
],
[
[
"from ipywidgets import Box, Button, ButtonStyle, FloatText, GridBox, IntText, Label, Layout, HBox\nfrom IPython.display import display",
"_____no_output_____"
]
],
[
[
"The doors in the game are represented by [`ipywidgets.Button`](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.html#Button).",
"_____no_output_____"
]
],
[
[
"door1 = Button(description='Door 1', layout=Layout(flex='1 1 auto', width='auto'))\ndoor2 = Button(description='Door 2', layout=door1.layout)\ndoor3 = Button(description='Door 3', layout=door1.layout)\ndoors_arr = [door1, door2, door3]\ndoors = Box(doors_arr, layout=Layout(width='auto', grid_area='doors'))",
"_____no_output_____"
]
],
[
[
"State-holding variables in the `Monty` object are displayed using [`ipywidgets.IntText`](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.html#IntText) (for the `num_wins` and `num_plays`); and [`ipywidgets.FloatText`](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.html#FloatText) (for the success rate).",
"_____no_output_____"
]
],
[
[
"label1 = Label(value='number of plays', layout=Layout(width='auto', grid_area='label1'))\ntext1 = IntText(disabled=True, layout=Layout(width='auto', grid_area='text1'))\n\nlabel2 = Label(value='number of wins', layout=Layout(width='auto', grid_area='label2'))\ntext2 = IntText(disabled=True, layout=Layout(width='auto', grid_area='text2'))\n\nlabel3 = Label(value='success rate', layout=Layout(width='auto', grid_area='label3'))\ntext3 = FloatText(disabled=True, layout=Layout(width='auto', grid_area='text3'))",
"_____no_output_____"
]
],
[
[
"[`ipywidgets.Label`](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.html#Label) is used to display the title and descriptive text in the game widget.",
"_____no_output_____"
]
],
[
[
"banner = Box([Label(value='Interactive widget: Monty Hall problem', \n layout=Layout(width='50%'))], \n layout=Layout(width='auto', justify_content='center', grid_area='banner'))\n\nstatus = Label(value='Pick a door...', layout=Layout(width='auto', grid_area='status'))",
"_____no_output_____"
]
],
[
[
"Buttons allowing for further user actions are located at the bottom of the widget.\n\n* The `reveal` button is used to show what's behind all of the doors after the player makes her final choice.\n* After the player completes a round of play, she can click the `continue` button to keep counting game state (num. wins and num. plays)\n* The `reset` button lets the player return the game to its original state after completing a round of play.",
"_____no_output_____"
]
],
[
[
"button_layout = Layout(flex='1 1 auto', width='auto')\nreveal = Button(description='reveal', tooltip='open selected door', layout=button_layout, disabled=True)\ncontin = Button(description='continue', tooltip='continue play', layout=button_layout, disabled=True)\nreset = Button(description='reset', tooltip='reset game', layout=button_layout, disabled=True)\nactions = Box([reveal, contin, reset], layout=Layout(width='auto', grid_area='actions'))",
"_____no_output_____"
]
],
[
[
"[`ipywidgets.GridBox`](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20Styling.html#The-Grid-layout) helps us lay out the user interface elements for the `Monty` game widget.",
"_____no_output_____"
]
],
[
[
"ui = GridBox(children=[banner, doors, label1, text1, label2, text2, label3, text3, status, actions],\n layout=Layout(\n width='50%',\n grid_template_rows='auto auto auto auto auto auto auto',\n grid_template_columns='25% 25% 25% 25%',\n grid_template_areas='''\n \"banner banner banner banner\"\n \"doors doors doors doors\"\n \"label1 label1 text1 text1\"\n \"label2 label2 text2 text2\"\n \"label3 label3 text3 text3\"\n \"status status status status\"\n \". . actions actions\"\n '''\n )\n)",
"_____no_output_____"
]
],
[
[
"We lastly create some functions to connect the widget to the `Monty` game object. These functions adapt player action [events](https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20Events.html#Example) to state changes in the `Monty` object, and then update the widget user interface accordingly.",
"_____no_output_____"
]
],
[
[
"uigame = Monty()\n\ndef reset_ui(disable_reset=True):\n \"\"\" Return widget elements to their initial state.\n Do not disable the reset button in the case of continue.\n \"\"\"\n for i,d in enumerate(doors_arr):\n d.description = 'Door {}'.format(i+1)\n d.disabled = False\n d.icon = ''\n d.button_style = ''\n \n reveal.disabled = True\n contin.disabled = True\n reset.disabled = disable_reset\n \ndef update_status(new_status):\n \"\"\" Update the widget text fields for displaying present game status. \"\"\"\n text1.value = uigame.num_plays\n text2.value = uigame.num_wins\n text3.value = uigame.get_success_rate()\n status.value = new_status\n \ndef update_ui_reveal():\n \"\"\" Helper function to update the widget after the player clicks the reveal button. \"\"\"\n if uigame.players_choice == uigame.cardoor:\n new_status = 'You win! Continue playing?'\n else:\n new_status = 'Sorry, you lose. Continue playing?'\n\n for i,d in enumerate(doors_arr):\n d.disabled = True\n if uigame.cardoor == i+1:\n d.description = 'car'\n else:\n d.description = 'goat'\n if uigame.players_choice == i+1:\n if uigame.players_choice == uigame.cardoor:\n d.button_style = 'success' \n d.icon = 'check'\n else:\n d.button_style = 'danger' \n d.icon = 'times'\n update_status(new_status)\n reveal.disabled = True\n contin.disabled = False\n reset.disabled = False \n \ndef on_button_clicked(b): \n \"\"\" Event-handling function that maps button click events in the widget \n to corresponding functions in Monty, and updates the user interface\n according to the present game state.\n \"\"\"\n if uigame.state == 0:\n if b.description in ['Door 1', 'Door 2', 'Door 3']:\n c = int(b.description.split()[1])\n uigame.choose_door(c)\n\n b.disabled = True\n b.button_style = 'info'\n\n m = doors_arr[uigame.montys_choice-1]\n m.disabled = True\n m.description = 'goat'\n\n unopened = uigame.doors[(uigame.doors != uigame.players_choice) &\n (uigame.doors != uigame.montys_choice)][0]\n status.value = 'Monty reveals a goat behind Door {}. Click Door {} to switch, or \\'reveal\\' Door {}.' \\\n .format(uigame.montys_choice, unopened, uigame.players_choice)\n\n reveal.disabled = False\n reset.disabled = False\n elif b.description == 'reset':\n uigame.reset()\n reset_ui()\n update_status('Pick a door...') \n elif uigame.state == 1:\n if b.description in ['Door 1', 'Door 2', 'Door 3']:\n prev_choice = uigame.players_choice\n uigame.switch_door(True)\n \n pb = doors_arr[prev_choice-1]\n pb.icon = ''\n pb.button_style = ''\n \n b.disabled = True\n b.button_style = 'info'\n \n status.value = 'Now click \\'reveal\\' to see what\\'s behind Door {}.'.format(uigame.players_choice)\n \n elif b.description == 'reset':\n uigame.reset()\n reset_ui()\n update_status('Pick a door...')\n elif b.description == 'reveal':\n uigame.switch_door(False)\n update_ui_reveal()\n elif uigame.state == 2:\n if b.description == 'reveal':\n update_ui_reveal() \n else: \n if b.description == 'continue':\n uigame.continue_play()\n reset_ui(False)\n update_status('Pick a door once more...')\n elif b.description == 'reset':\n uigame.reset()\n reset_ui()\n update_status('Pick a door...')\n\n# hook up all buttons to our event-handling function\ndoor1.on_click(on_button_clicked)\ndoor2.on_click(on_button_clicked)\ndoor3.on_click(on_button_clicked)\nreveal.on_click(on_button_clicked)\ncontin.on_click(on_button_clicked)\nreset.on_click(on_button_clicked)\n\ndisplay(ui)",
"_____no_output_____"
]
],
[
[
"How to play:\n* Click a door to select.\n* Monty will select a remaining door and open to reveal a goat.\n* Click the `reveal` button to open your selected door.\n* Or click the remaining unopened Door button to switch your door choice, and then click `reveal`.\n* Click the `continue` button to keep playing.\n* You may click the `reset` button at any time to return the game back to its initial state.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a121a4586b65555b4dd86d3b8c9c410a3b77bdc
| 57,199 |
ipynb
|
Jupyter Notebook
|
Neural Networks and Deep Learning/Building your Deep Neural Network - Step by Step.ipynb
|
venf2k/deep-learning-coursera
|
1beef8d17e804448c302b21a0b19833a7572f379
|
[
"MIT"
] | 11 |
2017-10-23T06:23:30.000Z
|
2022-02-28T16:45:17.000Z
|
Neural Networks and Deep Learning/Building your Deep Neural Network - Step by Step.ipynb
|
venf2k/deep-learning-coursera
|
1beef8d17e804448c302b21a0b19833a7572f379
|
[
"MIT"
] | 10 |
2020-11-18T21:17:43.000Z
|
2022-03-11T23:23:30.000Z
|
Neural Networks and Deep Learning/Building your Deep Neural Network - Step by Step.ipynb
|
venf2k/deep-learning-coursera
|
1beef8d17e804448c302b21a0b19833a7572f379
|
[
"MIT"
] | 16 |
2018-03-17T01:10:50.000Z
|
2022-01-24T04:53:11.000Z
| 37.094034 | 562 | 0.507561 |
[
[
[
"# Building your Deep Neural Network: Step by Step\n\nWelcome to your week 4 assignment (part 1 of 2)! You have previously trained a 2-layer Neural Network (with a single hidden layer). This week, you will build a deep neural network, with as many layers as you want!\n\n- In this notebook, you will implement all the functions required to build a deep neural network.\n- In the next assignment, you will use these functions to build a deep neural network for image classification.\n\n**After this assignment you will be able to:**\n- Use non-linear units like ReLU to improve your model\n- Build a deeper neural network (with more than 1 hidden layer)\n- Implement an easy-to-use neural network class\n\n**Notation**:\n- Superscript $[l]$ denotes a quantity associated with the $l^{th}$ layer. \n - Example: $a^{[L]}$ is the $L^{th}$ layer activation. $W^{[L]}$ and $b^{[L]}$ are the $L^{th}$ layer parameters.\n- Superscript $(i)$ denotes a quantity associated with the $i^{th}$ example. \n - Example: $x^{(i)}$ is the $i^{th}$ training example.\n- Lowerscript $i$ denotes the $i^{th}$ entry of a vector.\n - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the $l^{th}$ layer's activations).\n\nLet's get started!",
"_____no_output_____"
],
[
"## 1 - Packages\n\nLet's first import all the packages that you will need during this assignment. \n- [numpy](www.numpy.org) is the main package for scientific computing with Python.\n- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.\n- dnn_utils provides some necessary functions for this notebook.\n- testCases provides some test cases to assess the correctness of your functions\n- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work. Please don't change the seed. ",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nfrom testCases import *\nfrom dnn_utils import sigmoid, sigmoid_backward, relu, relu_backward\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n%load_ext autoreload\n%autoreload 2\n\nnp.random.seed(1)",
"_____no_output_____"
]
],
[
[
"## 2 - Outline of the Assignment\n\nTo build your neural network, you will be implementing several \"helper functions\". These helper functions will be used in the next assignment to build a two-layer neural network and an L-layer neural network. Each small helper function you will implement will have detailed instructions that will walk you through the necessary steps. Here is an outline of this assignment, you will:\n\n- Initialize the parameters for a two-layer network and for an $L$-layer neural network.\n- Implement the forward propagation module (shown in purple in the figure below).\n - Complete the LINEAR part of a layer's forward propagation step (resulting in $Z^{[l]}$).\n - We give you the ACTIVATION function (relu/sigmoid).\n - Combine the previous two steps into a new [LINEAR->ACTIVATION] forward function.\n - Stack the [LINEAR->RELU] forward function L-1 time (for layers 1 through L-1) and add a [LINEAR->SIGMOID] at the end (for the final layer $L$). This gives you a new L_model_forward function.\n- Compute the loss.\n- Implement the backward propagation module (denoted in red in the figure below).\n - Complete the LINEAR part of a layer's backward propagation step.\n - We give you the gradient of the ACTIVATE function (relu_backward/sigmoid_backward) \n - Combine the previous two steps into a new [LINEAR->ACTIVATION] backward function.\n - Stack [LINEAR->RELU] backward L-1 times and add [LINEAR->SIGMOID] backward in a new L_model_backward function\n- Finally update the parameters.\n\n<img src=\"images/final outline.png\" style=\"width:800px;height:500px;\">\n<caption><center> **Figure 1**</center></caption><br>\n\n\n**Note** that for every forward function, there is a corresponding backward function. That is why at every step of your forward module you will be storing some values in a cache. The cached values are useful for computing gradients. In the backpropagation module you will then use the cache to calculate the gradients. This assignment will show you exactly how to carry out each of these steps. ",
"_____no_output_____"
],
[
"## 3 - Initialization\n\nYou will write two helper functions that will initialize the parameters for your model. The first function will be used to initialize parameters for a two layer model. The second one will generalize this initialization process to $L$ layers.\n\n### 3.1 - 2-layer Neural Network\n\n**Exercise**: Create and initialize the parameters of the 2-layer neural network.\n\n**Instructions**:\n- The model's structure is: *LINEAR -> RELU -> LINEAR -> SIGMOID*. \n- Use random initialization for the weight matrices. Use `np.random.randn(shape)*0.01` with the correct shape.\n- Use zero initialization for the biases. Use `np.zeros(shape)`.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: initialize_parameters\n\ndef initialize_parameters(n_x, n_h, n_y):\n \"\"\"\n Argument:\n n_x -- size of the input layer\n n_h -- size of the hidden layer\n n_y -- size of the output layer\n \n Returns:\n parameters -- python dictionary containing your parameters:\n W1 -- weight matrix of shape (n_h, n_x)\n b1 -- bias vector of shape (n_h, 1)\n W2 -- weight matrix of shape (n_y, n_h)\n b2 -- bias vector of shape (n_y, 1)\n \"\"\"\n \n np.random.seed(1)\n \n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = np.random.randn(n_h, n_x) * 0.01\n b1 = np.zeros(shape=(n_h, 1))\n W2 = np.random.randn(n_y, n_h) * 0.01\n b2 = np.zeros(shape=(n_y, 1))\n ### END CODE HERE ###\n \n assert(W1.shape == (n_h, n_x))\n assert(b1.shape == (n_h, 1))\n assert(W2.shape == (n_y, n_h))\n assert(b2.shape == (n_y, 1))\n \n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n \n return parameters ",
"_____no_output_____"
],
[
"parameters = initialize_parameters(2,2,1)\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))",
"W1 = [[ 0.01624345 -0.00611756]\n [-0.00528172 -0.01072969]]\nb1 = [[ 0.]\n [ 0.]]\nW2 = [[ 0.00865408 -0.02301539]]\nb2 = [[ 0.]]\n"
]
],
[
[
"**Expected output**:\n \n<table style=\"width:80%\">\n <tr>\n <td> **W1** </td>\n <td> [[ 0.01624345 -0.00611756]\n [-0.00528172 -0.01072969]] </td> \n </tr>\n\n <tr>\n <td> **b1**</td>\n <td>[[ 0.]\n [ 0.]]</td> \n </tr>\n \n <tr>\n <td>**W2**</td>\n <td> [[ 0.00865408 -0.02301539]]</td>\n </tr>\n \n <tr>\n <td> **b2** </td>\n <td> [[ 0.]] </td> \n </tr>\n \n</table>",
"_____no_output_____"
],
[
"### 3.2 - L-layer Neural Network\n\nThe initialization for a deeper L-layer neural network is more complicated because there are many more weight matrices and bias vectors. When completing the `initialize_parameters_deep`, you should make sure that your dimensions match between each layer. Recall that $n^{[l]}$ is the number of units in layer $l$. Thus for example if the size of our input $X$ is $(12288, 209)$ (with $m=209$ examples) then:\n\n<table style=\"width:100%\">\n\n\n <tr>\n <td> </td> \n <td> **Shape of W** </td> \n <td> **Shape of b** </td> \n <td> **Activation** </td>\n <td> **Shape of Activation** </td> \n <tr>\n \n <tr>\n <td> **Layer 1** </td> \n <td> $(n^{[1]},12288)$ </td> \n <td> $(n^{[1]},1)$ </td> \n <td> $Z^{[1]} = W^{[1]} X + b^{[1]} $ </td> \n \n <td> $(n^{[1]},209)$ </td> \n <tr>\n \n <tr>\n <td> **Layer 2** </td> \n <td> $(n^{[2]}, n^{[1]})$ </td> \n <td> $(n^{[2]},1)$ </td> \n <td>$Z^{[2]} = W^{[2]} A^{[1]} + b^{[2]}$ </td> \n <td> $(n^{[2]}, 209)$ </td> \n <tr>\n \n <tr>\n <td> $\\vdots$ </td> \n <td> $\\vdots$ </td> \n <td> $\\vdots$ </td> \n <td> $\\vdots$</td> \n <td> $\\vdots$ </td> \n <tr>\n \n <tr>\n <td> **Layer L-1** </td> \n <td> $(n^{[L-1]}, n^{[L-2]})$ </td> \n <td> $(n^{[L-1]}, 1)$ </td> \n <td>$Z^{[L-1]} = W^{[L-1]} A^{[L-2]} + b^{[L-1]}$ </td> \n <td> $(n^{[L-1]}, 209)$ </td> \n <tr>\n \n \n <tr>\n <td> **Layer L** </td> \n <td> $(n^{[L]}, n^{[L-1]})$ </td> \n <td> $(n^{[L]}, 1)$ </td>\n <td> $Z^{[L]} = W^{[L]} A^{[L-1]} + b^{[L]}$</td>\n <td> $(n^{[L]}, 209)$ </td> \n <tr>\n\n</table>\n\nRemember that when we compute $W X + b$ in python, it carries out broadcasting. For example, if: \n\n$$ W = \\begin{bmatrix}\n j & k & l\\\\\n m & n & o \\\\\n p & q & r \n\\end{bmatrix}\\;\\;\\; X = \\begin{bmatrix}\n a & b & c\\\\\n d & e & f \\\\\n g & h & i \n\\end{bmatrix} \\;\\;\\; b =\\begin{bmatrix}\n s \\\\\n t \\\\\n u\n\\end{bmatrix}\\tag{2}$$\n\nThen $WX + b$ will be:\n\n$$ WX + b = \\begin{bmatrix}\n (ja + kd + lg) + s & (jb + ke + lh) + s & (jc + kf + li)+ s\\\\\n (ma + nd + og) + t & (mb + ne + oh) + t & (mc + nf + oi) + t\\\\\n (pa + qd + rg) + u & (pb + qe + rh) + u & (pc + qf + ri)+ u\n\\end{bmatrix}\\tag{3} $$",
"_____no_output_____"
],
[
"**Exercise**: Implement initialization for an L-layer Neural Network. \n\n**Instructions**:\n- The model's structure is *[LINEAR -> RELU] $ \\times$ (L-1) -> LINEAR -> SIGMOID*. I.e., it has $L-1$ layers using a ReLU activation function followed by an output layer with a sigmoid activation function.\n- Use random initialization for the weight matrices. Use `np.random.rand(shape) * 0.01`.\n- Use zeros initialization for the biases. Use `np.zeros(shape)`.\n- We will store $n^{[l]}$, the number of units in different layers, in a variable `layer_dims`. For example, the `layer_dims` for the \"Planar Data classification model\" from last week would have been [2,4,1]: There were two inputs, one hidden layer with 4 hidden units, and an output layer with 1 output unit. Thus means `W1`'s shape was (4,2), `b1` was (4,1), `W2` was (1,4) and `b2` was (1,1). Now you will generalize this to $L$ layers! \n- Here is the implementation for $L=1$ (one layer neural network). It should inspire you to implement the general case (L-layer neural network).\n```python\n if L == 1:\n parameters[\"W\" + str(L)] = np.random.randn(layer_dims[1], layer_dims[0]) * 0.01\n parameters[\"b\" + str(L)] = np.zeros((layer_dims[1], 1))\n```",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: initialize_parameters_deep\n\ndef initialize_parameters_deep(layer_dims):\n \"\"\"\n Arguments:\n layer_dims -- python array (list) containing the dimensions of each layer in our network\n \n Returns:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\":\n Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])\n bl -- bias vector of shape (layer_dims[l], 1)\n \"\"\"\n \n np.random.seed(3)\n parameters = {}\n L = len(layer_dims) # number of layers in the network\n\n for l in range(1, L):\n ### START CODE HERE ### (≈ 2 lines of code)\n parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01\n parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))\n ### END CODE HERE ###\n \n assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l - 1]))\n assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))\n\n \n return parameters",
"_____no_output_____"
],
[
"parameters = initialize_parameters_deep([5,4,3])\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))",
"W1 = [[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388]\n [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218]\n [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034]\n [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]\nb1 = [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]]\nW2 = [[-0.01185047 -0.0020565 0.01486148 0.00236716]\n [-0.01023785 -0.00712993 0.00625245 -0.00160513]\n [-0.00768836 -0.00230031 0.00745056 0.01976111]]\nb2 = [[ 0.]\n [ 0.]\n [ 0.]]\n"
]
],
[
[
"**Expected output**:\n \n<table style=\"width:80%\">\n <tr>\n <td> **W1** </td>\n <td>[[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388]\n [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218]\n [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034]\n [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]</td> \n </tr>\n \n <tr>\n <td>**b1** </td>\n <td>[[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]]</td> \n </tr>\n \n <tr>\n <td>**W2** </td>\n <td>[[-0.01185047 -0.0020565 0.01486148 0.00236716]\n [-0.01023785 -0.00712993 0.00625245 -0.00160513]\n [-0.00768836 -0.00230031 0.00745056 0.01976111]]</td> \n </tr>\n \n <tr>\n <td>**b2** </td>\n <td>[[ 0.]\n [ 0.]\n [ 0.]]</td> \n </tr>\n \n</table>",
"_____no_output_____"
],
[
"## 4 - Forward propagation module\n\n### 4.1 - Linear Forward \nNow that you have initialized your parameters, you will do the forward propagation module. You will start by implementing some basic functions that you will use later when implementing the model. You will complete three functions in this order:\n\n- LINEAR\n- LINEAR -> ACTIVATION where ACTIVATION will be either ReLU or Sigmoid. \n- [LINEAR -> RELU] $\\times$ (L-1) -> LINEAR -> SIGMOID (whole model)\n\nThe linear forward module (vectorized over all the examples) computes the following equations:\n\n$$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}\\tag{4}$$\n\nwhere $A^{[0]} = X$. \n\n**Exercise**: Build the linear part of forward propagation.\n\n**Reminder**:\nThe mathematical representation of this unit is $Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}$. You may also find `np.dot()` useful. If your dimensions don't match, printing `W.shape` may help.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: linear_forward\n\ndef linear_forward(A, W, b):\n \"\"\"\n Implement the linear part of a layer's forward propagation.\n\n Arguments:\n A -- activations from previous layer (or input data): (size of previous layer, number of examples)\n W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)\n b -- bias vector, numpy array of shape (size of the current layer, 1)\n\n Returns:\n Z -- the input of the activation function, also called pre-activation parameter \n cache -- a python dictionary containing \"A\", \"W\" and \"b\" ; stored for computing the backward pass efficiently\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line of code)\n Z = np.dot(W, A) + b\n ### END CODE HERE ###\n \n assert(Z.shape == (W.shape[0], A.shape[1]))\n cache = (A, W, b)\n \n return Z, cache",
"_____no_output_____"
],
[
"A, W, b = linear_forward_test_case()\n\nZ, linear_cache = linear_forward(A, W, b)\nprint(\"Z = \" + str(Z))",
"Z = [[ 3.1980455 7.85763489]]\n"
]
],
[
[
"**Expected output**:\n\n<table style=\"width:35%\">\n \n <tr>\n <td> **Z** </td>\n <td> [[ 3.1980455 7.85763489]] </td> \n </tr>\n \n</table>",
"_____no_output_____"
],
[
"### 4.2 - Linear-Activation Forward\n\nIn this notebook, you will use two activation functions:\n\n- **Sigmoid**: $\\sigma(Z) = \\sigma(W A + b) = \\frac{1}{ 1 + e^{-(W A + b)}}$. We have provided you with the `sigmoid` function. This function returns **two** items: the activation value \"`a`\" and a \"`cache`\" that contains \"`Z`\" (it's what we will feed in to the corresponding backward function). To use it you could just call: \n``` python\nA, activation_cache = sigmoid(Z)\n```\n\n- **ReLU**: The mathematical formula for ReLu is $A = RELU(Z) = max(0, Z)$. We have provided you with the `relu` function. This function returns **two** items: the activation value \"`A`\" and a \"`cache`\" that contains \"`Z`\" (it's what we will feed in to the corresponding backward function). To use it you could just call:\n``` python\nA, activation_cache = relu(Z)\n```",
"_____no_output_____"
],
[
"For more convenience, you are going to group two functions (Linear and Activation) into one function (LINEAR->ACTIVATION). Hence, you will implement a function that does the LINEAR forward step followed by an ACTIVATION forward step.\n\n**Exercise**: Implement the forward propagation of the *LINEAR->ACTIVATION* layer. Mathematical relation is: $A^{[l]} = g(Z^{[l]}) = g(W^{[l]}A^{[l-1]} +b^{[l]})$ where the activation \"g\" can be sigmoid() or relu(). Use linear_forward() and the correct activation function.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: linear_activation_forward\n\ndef linear_activation_forward(A_prev, W, b, activation):\n \"\"\"\n Implement the forward propagation for the LINEAR->ACTIVATION layer\n\n Arguments:\n A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)\n W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)\n b -- bias vector, numpy array of shape (size of the current layer, 1)\n activation -- the activation to be used in this layer, stored as a text string: \"sigmoid\" or \"relu\"\n\n Returns:\n A -- the output of the activation function, also called the post-activation value \n cache -- a python dictionary containing \"linear_cache\" and \"activation_cache\";\n stored for computing the backward pass efficiently\n \"\"\"\n \n if activation == \"sigmoid\":\n # Inputs: \"A_prev, W, b\". Outputs: \"A, activation_cache\".\n ### START CODE HERE ### (≈ 2 lines of code)\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = sigmoid(Z)\n ### END CODE HERE ###\n \n elif activation == \"relu\":\n # Inputs: \"A_prev, W, b\". Outputs: \"A, activation_cache\".\n ### START CODE HERE ### (≈ 2 lines of code)\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = relu(Z)\n ### END CODE HERE ###\n \n assert (A.shape == (W.shape[0], A_prev.shape[1]))\n cache = (linear_cache, activation_cache)\n\n return A, cache",
"_____no_output_____"
],
[
"A_prev, W, b = linear_activation_forward_test_case()\n\nA, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = \"sigmoid\")\nprint(\"With sigmoid: A = \" + str(A))\n\nA, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = \"relu\")\nprint(\"With ReLU: A = \" + str(A))",
"With sigmoid: A = [[ 0.96076066 0.99961336]]\nWith ReLU: A = [[ 3.1980455 7.85763489]]\n"
]
],
[
[
"**Expected output**:\n \n<table style=\"width:35%\">\n <tr>\n <td> **With sigmoid: A ** </td>\n <td > [[ 0.96076066 0.99961336]]</td> \n </tr>\n <tr>\n <td> **With ReLU: A ** </td>\n <td > [[ 3.1980455 7.85763489]]</td> \n </tr>\n</table>\n",
"_____no_output_____"
],
[
"**Note**: In deep learning, the \"[LINEAR->ACTIVATION]\" computation is counted as a single layer in the neural network, not two layers. ",
"_____no_output_____"
],
[
"### d) L-Layer Model \n\nFor even more convenience when implementing the $L$-layer Neural Net, you will need a function that replicates the previous one (`linear_activation_forward` with RELU) $L-1$ times, then follows that with one `linear_activation_forward` with SIGMOID.\n\n<img src=\"images/model_architecture_kiank.png\" style=\"width:600px;height:300px;\">\n<caption><center> **Figure 2** : *[LINEAR -> RELU] $\\times$ (L-1) -> LINEAR -> SIGMOID* model</center></caption><br>\n\n**Exercise**: Implement the forward propagation of the above model.\n\n**Instruction**: In the code below, the variable `AL` will denote $A^{[L]} = \\sigma(Z^{[L]}) = \\sigma(W^{[L]} A^{[L-1]} + b^{[L]})$. (This is sometimes also called `Yhat`, i.e., this is $\\hat{Y}$.) \n\n**Tips**:\n- Use the functions you had previously written \n- Use a for loop to replicate [LINEAR->RELU] (L-1) times\n- Don't forget to keep track of the caches in the \"caches\" list. To add a new value `c` to a `list`, you can use `list.append(c)`.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: L_model_forward\n\ndef L_model_forward(X, parameters):\n \"\"\"\n Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation\n \n Arguments:\n X -- data, numpy array of shape (input size, number of examples)\n parameters -- output of initialize_parameters_deep()\n \n Returns:\n AL -- last post-activation value\n caches -- list of caches containing:\n every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)\n the cache of linear_sigmoid_forward() (there is one, indexed L-1)\n \"\"\"\n\n caches = []\n A = X\n L = len(parameters) // 2 # number of layers in the neural network\n \n # Implement [LINEAR -> RELU]*(L-1). Add \"cache\" to the \"caches\" list.\n for l in range(1, L):\n A_prev = A \n ### START CODE HERE ### (≈ 2 lines of code)\n A, cache = linear_activation_forward(A_prev, \n parameters['W' + str(l)], \n parameters['b' + str(l)], \n activation='relu')\n caches.append(cache)\n \n ### END CODE HERE ###\n \n # Implement LINEAR -> SIGMOID. Add \"cache\" to the \"caches\" list.\n ### START CODE HERE ### (≈ 2 lines of code)\n AL, cache = linear_activation_forward(A, \n parameters['W' + str(L)], \n parameters['b' + str(L)], \n activation='sigmoid')\n caches.append(cache)\n \n ### END CODE HERE ###\n \n assert(AL.shape == (1, X.shape[1]))\n \n return AL, caches",
"_____no_output_____"
],
[
"X, parameters = L_model_forward_test_case()\nAL, caches = L_model_forward(X, parameters)\nprint(\"AL = \" + str(AL))\nprint(\"Length of caches list = \" + str(len(caches)))",
"AL = [[ 0.0844367 0.92356858]]\nLength of caches list = 2\n"
]
],
[
[
"<table style=\"width:40%\">\n <tr>\n <td> **AL** </td>\n <td > [[ 0.0844367 0.92356858]]</td> \n </tr>\n <tr>\n <td> **Length of caches list ** </td>\n <td > 2</td> \n </tr>\n</table>",
"_____no_output_____"
],
[
"Great! Now you have a full forward propagation that takes the input X and outputs a row vector $A^{[L]}$ containing your predictions. It also records all intermediate values in \"caches\". Using $A^{[L]}$, you can compute the cost of your predictions.",
"_____no_output_____"
],
[
"## 5 - Cost function\n\nNow you will implement forward and backward propagation. You need to compute the cost, because you want to check if your model is actually learning.\n\n**Exercise**: Compute the cross-entropy cost $J$, using the following formula: $$-\\frac{1}{m} \\sum\\limits_{i = 1}^{m} (y^{(i)}\\log\\left(a^{[L] (i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{[L](i)}\\right)) \\tag{7}$$\n",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: compute_cost\n\ndef compute_cost(AL, Y):\n \"\"\"\n Implement the cost function defined by equation (7).\n\n Arguments:\n AL -- probability vector corresponding to your label predictions, shape (1, number of examples)\n Y -- true \"label\" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)\n\n Returns:\n cost -- cross-entropy cost\n \"\"\"\n \n m = Y.shape[1]\n\n # Compute loss from aL and y.\n ### START CODE HERE ### (≈ 1 lines of code)\n cost = (-1 / m) * np.sum(np.multiply(Y, np.log(AL)) + np.multiply(1 - Y, np.log(1 - AL)))\n ### END CODE HERE ###\n \n cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).\n assert(cost.shape == ())\n \n return cost",
"_____no_output_____"
],
[
"Y, AL = compute_cost_test_case()\n\nprint(\"cost = \" + str(compute_cost(AL, Y)))",
"cost = 0.414931599615\n"
]
],
[
[
"**Expected Output**:\n\n<table>\n\n <tr>\n <td>**cost** </td>\n <td> 0.41493159961539694</td> \n </tr>\n</table>",
"_____no_output_____"
],
[
"## 6 - Backward propagation module\n\nJust like with forward propagation, you will implement helper functions for backpropagation. Remember that back propagation is used to calculate the gradient of the loss function with respect to the parameters. \n\n**Reminder**: \n<img src=\"images/backprop_kiank.png\" style=\"width:650px;height:250px;\">\n<caption><center> **Figure 3** : Forward and Backward propagation for *LINEAR->RELU->LINEAR->SIGMOID* <br> *The purple blocks represent the forward propagation, and the red blocks represent the backward propagation.* </center></caption>\n\n<!-- \nFor those of you who are expert in calculus (you don't need to be to do this assignment), the chain rule of calculus can be used to derive the derivative of the loss $\\mathcal{L}$ with respect to $z^{[1]}$ in a 2-layer network as follows:\n\n$$\\frac{d \\mathcal{L}(a^{[2]},y)}{{dz^{[1]}}} = \\frac{d\\mathcal{L}(a^{[2]},y)}{{da^{[2]}}}\\frac{{da^{[2]}}}{{dz^{[2]}}}\\frac{{dz^{[2]}}}{{da^{[1]}}}\\frac{{da^{[1]}}}{{dz^{[1]}}} \\tag{8} $$\n\nIn order to calculate the gradient $dW^{[1]} = \\frac{\\partial L}{\\partial W^{[1]}}$, you use the previous chain rule and you do $dW^{[1]} = dz^{[1]} \\times \\frac{\\partial z^{[1]} }{\\partial W^{[1]}}$. During the backpropagation, at each step you multiply your current gradient by the gradient corresponding to the specific layer to get the gradient you wanted.\n\nEquivalently, in order to calculate the gradient $db^{[1]} = \\frac{\\partial L}{\\partial b^{[1]}}$, you use the previous chain rule and you do $db^{[1]} = dz^{[1]} \\times \\frac{\\partial z^{[1]} }{\\partial b^{[1]}}$.\n\nThis is why we talk about **backpropagation**.\n!-->\n\nNow, similar to forward propagation, you are going to build the backward propagation in three steps:\n- LINEAR backward\n- LINEAR -> ACTIVATION backward where ACTIVATION computes the derivative of either the ReLU or sigmoid activation\n- [LINEAR -> RELU] $\\times$ (L-1) -> LINEAR -> SIGMOID backward (whole model)",
"_____no_output_____"
],
[
"### 6.1 - Linear backward\n\nFor layer $l$, the linear part is: $Z^{[l]} = W^{[l]} A^{[l-1]} + b^{[l]}$ (followed by an activation).\n\nSuppose you have already calculated the derivative $dZ^{[l]} = \\frac{\\partial \\mathcal{L} }{\\partial Z^{[l]}}$. You want to get $(dW^{[l]}, db^{[l]} dA^{[l-1]})$.\n\n<img src=\"images/linearback_kiank.png\" style=\"width:250px;height:300px;\">\n<caption><center> **Figure 4** </center></caption>\n\nThe three outputs $(dW^{[l]}, db^{[l]}, dA^{[l]})$ are computed using the input $dZ^{[l]}$.Here are the formulas you need:\n$$ dW^{[l]} = \\frac{\\partial \\mathcal{L} }{\\partial W^{[l]}} = \\frac{1}{m} dZ^{[l]} A^{[l-1] T} \\tag{8}$$\n$$ db^{[l]} = \\frac{\\partial \\mathcal{L} }{\\partial b^{[l]}} = \\frac{1}{m} \\sum_{i = 1}^{m} dZ^{[l](i)}\\tag{9}$$\n$$ dA^{[l-1]} = \\frac{\\partial \\mathcal{L} }{\\partial A^{[l-1]}} = W^{[l] T} dZ^{[l]} \\tag{10}$$\n",
"_____no_output_____"
],
[
"**Exercise**: Use the 3 formulas above to implement linear_backward().",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: linear_backward\n\ndef linear_backward(dZ, cache):\n \"\"\"\n Implement the linear portion of backward propagation for a single layer (layer l)\n\n Arguments:\n dZ -- Gradient of the cost with respect to the linear output (of current layer l)\n cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer\n\n Returns:\n dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n db -- Gradient of the cost with respect to b (current layer l), same shape as b\n \"\"\"\n A_prev, W, b = cache\n m = A_prev.shape[1]\n\n ### START CODE HERE ### (≈ 3 lines of code)\n dW = np.dot(dZ, cache[0].T) / m\n db = np.squeeze(np.sum(dZ, axis=1, keepdims=True)) / m\n dA_prev = np.dot(cache[1].T, dZ)\n ### END CODE HERE ###\n \n assert (dA_prev.shape == A_prev.shape)\n assert (dW.shape == W.shape)\n assert (isinstance(db, float))\n \n return dA_prev, dW, db",
"_____no_output_____"
],
[
"# Set up some test inputs\ndZ, linear_cache = linear_backward_test_case()\n\ndA_prev, dW, db = linear_backward(dZ, linear_cache)\nprint (\"dA_prev = \"+ str(dA_prev))\nprint (\"dW = \" + str(dW))\nprint (\"db = \" + str(db))",
"dA_prev = [[ 2.38272385 5.85438014]\n [ 6.31969219 15.52755701]\n [ -3.97876302 -9.77586689]]\ndW = [[ 2.77870358 -0.05500058 -5.13144969]]\ndb = 5.527840195\n"
]
],
[
[
"**Expected Output**: \n\n<table style=\"width:90%\">\n <tr>\n <td> **dA_prev** </td>\n <td > [[ 2.38272385 5.85438014]\n [ 6.31969219 15.52755701]\n [ -3.97876302 -9.77586689]] </td> \n </tr> \n \n <tr>\n <td> **dW** </td>\n <td > [[ 2.77870358 -0.05500058 -5.13144969]] </td> \n </tr> \n \n <tr>\n <td> **db** </td>\n <td> 5.527840195 </td> \n </tr> \n \n</table>\n\n",
"_____no_output_____"
],
[
"### 6.2 - Linear-Activation backward\n\nNext, you will create a function that merges the two helper functions: **`linear_backward`** and the backward step for the activation **`linear_activation_backward`**. \n\nTo help you implement `linear_activation_backward`, we provided two backward functions:\n- **`sigmoid_backward`**: Implements the backward propagation for SIGMOID unit. You can call it as follows:\n\n```python\ndZ = sigmoid_backward(dA, activation_cache)\n```\n\n- **`relu_backward`**: Implements the backward propagation for RELU unit. You can call it as follows:\n\n```python\ndZ = relu_backward(dA, activation_cache)\n```\n\nIf $g(.)$ is the activation function, \n`sigmoid_backward` and `relu_backward` compute $$dZ^{[l]} = dA^{[l]} * g'(Z^{[l]}) \\tag{11}$$. \n\n**Exercise**: Implement the backpropagation for the *LINEAR->ACTIVATION* layer.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: linear_activation_backward\n\ndef linear_activation_backward(dA, cache, activation):\n \"\"\"\n Implement the backward propagation for the LINEAR->ACTIVATION layer.\n \n Arguments:\n dA -- post-activation gradient for current layer l \n cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently\n activation -- the activation to be used in this layer, stored as a text string: \"sigmoid\" or \"relu\"\n \n Returns:\n dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n db -- Gradient of the cost with respect to b (current layer l), same shape as b\n \"\"\"\n linear_cache, activation_cache = cache\n \n if activation == \"relu\":\n ### START CODE HERE ### (≈ 2 lines of code)\n dZ = relu_backward(dA, activation_cache)\n ### END CODE HERE ###\n \n elif activation == \"sigmoid\":\n ### START CODE HERE ### (≈ 2 lines of code)\n dZ = sigmoid_backward(dA, activation_cache)\n ### END CODE HERE ###\n \n # Shorten the code\n dA_prev, dW, db = linear_backward(dZ, linear_cache)\n \n return dA_prev, dW, db",
"_____no_output_____"
],
[
"AL, linear_activation_cache = linear_activation_backward_test_case()\n\ndA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = \"sigmoid\")\nprint (\"sigmoid:\")\nprint (\"dA_prev = \"+ str(dA_prev))\nprint (\"dW = \" + str(dW))\nprint (\"db = \" + str(db) + \"\\n\")\n\ndA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = \"relu\")\nprint (\"relu:\")\nprint (\"dA_prev = \"+ str(dA_prev))\nprint (\"dW = \" + str(dW))\nprint (\"db = \" + str(db))",
"sigmoid:\ndA_prev = [[ 0.08982777 0.00226265]\n [ 0.23824996 0.00600122]\n [-0.14999783 -0.00377826]]\ndW = [[-0.06001514 -0.09687383 -0.10598695]]\ndb = 0.061800984273\n\nrelu:\ndA_prev = [[ 2.38272385 5.85438014]\n [ 6.31969219 15.52755701]\n [ -3.97876302 -9.77586689]]\ndW = [[ 2.77870358 -0.05500058 -5.13144969]]\ndb = 5.527840195\n"
]
],
[
[
"**Expected output with sigmoid:**\n\n<table style=\"width:100%\">\n <tr>\n <td > dA_prev </td> \n <td >[[ 0.08982777 0.00226265]\n [ 0.23824996 0.00600122]\n [-0.14999783 -0.00377826]] </td> \n\n </tr> \n \n <tr>\n <td > dW </td> \n <td > [[-0.06001514 -0.09687383 -0.10598695]] </td> \n </tr> \n \n <tr>\n <td > db </td> \n <td > 0.061800984273 </td> \n </tr> \n</table>\n\n",
"_____no_output_____"
],
[
"**Expected output with relu**\n\n<table style=\"width:100%\">\n <tr>\n <td > dA_prev </td> \n <td > [[ 2.38272385 5.85438014]\n [ 6.31969219 15.52755701]\n [ -3.97876302 -9.77586689]] </td> \n\n </tr> \n \n <tr>\n <td > dW </td> \n <td > [[ 2.77870358 -0.05500058 -5.13144969]] </td> \n </tr> \n \n <tr>\n <td > db </td> \n <td > 5.527840195 </td> \n </tr> \n</table>\n\n",
"_____no_output_____"
],
[
"### 6.3 - L-Model Backward \n\nNow you will implement the backward function for the whole network. Recall that when you implemented the `L_model_forward` function, at each iteration, you stored a cache which contains (X,W,b, and z). In the back propagation module, you will use those variables to compute the gradients. Therefore, in the `L_model_backward` function, you will iterate through all the hidden layers backward, starting from layer $L$. On each step, you will use the cached values for layer $l$ to backpropagate through layer $l$. Figure 5 below shows the backward pass. \n\n\n<img src=\"images/mn_backward.png\" style=\"width:450px;height:300px;\">\n<caption><center> **Figure 5** : Backward pass </center></caption>\n\n** Initializing backpropagation**:\nTo backpropagate through this network, we know that the output is, \n$A^{[L]} = \\sigma(Z^{[L]})$. Your code thus needs to compute `dAL` $= \\frac{\\partial \\mathcal{L}}{\\partial A^{[L]}}$.\nTo do so, use this formula (derived using calculus which you don't need in-depth knowledge of):\n```python\ndAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL\n```\n\nYou can then use this post-activation gradient `dAL` to keep going backward. As seen in Figure 5, you can now feed in `dAL` into the LINEAR->SIGMOID backward function you implemented (which will use the cached values stored by the L_model_forward function). After that, you will have to use a `for` loop to iterate through all the other layers using the LINEAR->RELU backward function. You should store each dA, dW, and db in the grads dictionary. To do so, use this formula : \n\n$$grads[\"dW\" + str(l)] = dW^{[l]}\\tag{15} $$\n\nFor example, for $l=3$ this would store $dW^{[l]}$ in `grads[\"dW3\"]`.\n\n**Exercise**: Implement backpropagation for the *[LINEAR->RELU] $\\times$ (L-1) -> LINEAR -> SIGMOID* model.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: L_model_backward\n\ndef L_model_backward(AL, Y, caches):\n \"\"\"\n Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group\n \n Arguments:\n AL -- probability vector, output of the forward propagation (L_model_forward())\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat)\n caches -- list of caches containing:\n every cache of linear_activation_forward() with \"relu\" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)\n the cache of linear_activation_forward() with \"sigmoid\" (it's caches[L-1])\n \n Returns:\n grads -- A dictionary with the gradients\n grads[\"dA\" + str(l)] = ... \n grads[\"dW\" + str(l)] = ...\n grads[\"db\" + str(l)] = ... \n \"\"\"\n grads = {}\n L = len(caches) # the number of layers\n m = AL.shape[1]\n Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL\n \n # Initializing the backpropagation\n ### START CODE HERE ### (1 line of code)\n dAL = dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))\n ### END CODE HERE ###\n \n # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: \"AL, Y, caches\". Outputs: \"grads[\"dAL\"], grads[\"dWL\"], grads[\"dbL\"]\n ### START CODE HERE ### (approx. 2 lines)\n current_cache = caches[-1]\n grads[\"dA\" + str(L)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_backward(sigmoid_backward(dAL, \n current_cache[1]), \n current_cache[0])\n ### END CODE HERE ###\n \n for l in reversed(range(L-1)):\n # lth layer: (RELU -> LINEAR) gradients.\n # Inputs: \"grads[\"dA\" + str(l + 2)], caches\". Outputs: \"grads[\"dA\" + str(l + 1)] , grads[\"dW\" + str(l + 1)] , grads[\"db\" + str(l + 1)] \n ### START CODE HERE ### (approx. 5 lines)\n current_cache = caches[l]\n dA_prev_temp, dW_temp, db_temp = linear_backward(sigmoid_backward(dAL, caches[1]), caches[0])\n grads[\"dA\" + str(l + 1)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n ### END CODE HERE ###\n\n return grads",
"_____no_output_____"
],
[
"X_assess, Y_assess, AL, caches = L_model_backward_test_case()\ngrads = L_model_backward(AL, Y_assess, caches)\nprint (\"dW1 = \"+ str(grads[\"dW1\"]))\nprint (\"db1 = \"+ str(grads[\"db1\"]))\nprint (\"dA1 = \"+ str(grads[\"dA1\"]))",
"dW1 = [[-0.09686122 -0.04840482 -0.11864308]]\ndb1 = -0.262594998379\ndA1 = [[-0.71011462 -0.22925516]\n [-0.17330152 -0.05594909]\n [-0.03831107 -0.01236844]]\n"
]
],
[
[
"**Expected Output**\n\n<table style=\"width:60%\">\n \n <tr>\n <td > dW1 </td> \n <td > [[-0.09686122 -0.04840482 -0.11864308]] </td> \n </tr> \n \n <tr>\n <td > db1 </td> \n <td > -0.262594998379 </td> \n </tr> \n \n <tr>\n <td > dA1 </td> \n <td > [[-0.71011462 -0.22925516]\n [-0.17330152 -0.05594909]\n [-0.03831107 -0.01236844]] </td> \n\n </tr> \n</table>\n\n",
"_____no_output_____"
],
[
"### 6.4 - Update Parameters\n\nIn this section you will update the parameters of the model, using gradient descent: \n\n$$ W^{[l]} = W^{[l]} - \\alpha \\text{ } dW^{[l]} \\tag{16}$$\n$$ b^{[l]} = b^{[l]} - \\alpha \\text{ } db^{[l]} \\tag{17}$$\n\nwhere $\\alpha$ is the learning rate. After computing the updated parameters, store them in the parameters dictionary. ",
"_____no_output_____"
],
[
"**Exercise**: Implement `update_parameters()` to update your parameters using gradient descent.\n\n**Instructions**:\nUpdate parameters using gradient descent on every $W^{[l]}$ and $b^{[l]}$ for $l = 1, 2, ..., L$. \n",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: update_parameters\n\ndef update_parameters(parameters, grads, learning_rate):\n \"\"\"\n Update parameters using gradient descent\n \n Arguments:\n parameters -- python dictionary containing your parameters \n grads -- python dictionary containing your gradients, output of L_model_backward\n \n Returns:\n parameters -- python dictionary containing your updated parameters \n parameters[\"W\" + str(l)] = ... \n parameters[\"b\" + str(l)] = ...\n \"\"\"\n \n L = len(parameters) // 2 # number of layers in the neural network\n\n # Update rule for each parameter. Use a for loop.\n ### START CODE HERE ### (≈ 3 lines of code)\n for l in range(L):\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * grads[\"dW\" + str(l + 1)]\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * grads[\"db\" + str(l + 1)]\n ### END CODE HERE ###\n \n return parameters",
"_____no_output_____"
],
[
"parameters, grads = update_parameters_test_case()\nparameters = update_parameters(parameters, grads, 0.1)\n\nprint (\"W1 = \" + str(parameters[\"W1\"]))\nprint (\"b1 = \" + str(parameters[\"b1\"]))\nprint (\"W2 = \" + str(parameters[\"W2\"]))\nprint (\"b2 = \" + str(parameters[\"b2\"]))\nprint (\"W3 = \" + str(parameters[\"W3\"]))\nprint (\"b3 = \" + str(parameters[\"b3\"]))",
"W1 = [[ 1.72555789 0.3700272 0.07818896]\n [-1.8634927 -0.2773882 -0.35475898]\n [-0.08274148 -0.62700068 -0.04381817]\n [-0.47721803 -1.31386475 0.88462238]]\nb1 = [[-0.07593768]\n [-0.07593768]\n [-0.07593768]\n [-0.07593768]]\nW2 = [[ 0.71838378 1.70957306 0.05003364 -0.40467741]\n [-0.54535995 -1.54647732 0.98236743 -1.10106763]\n [-1.18504653 -0.2056499 1.48614836 0.23671627]]\nb2 = [[-0.08616376]\n [-0.08616376]\n [-0.08616376]]\nW3 = [[-0.88352436 -0.7129932 0.62524497]\n [-0.02025258 -0.76883635 -0.23003072]]\nb3 = [[ 0.08416196]\n [ 0.08416196]]\n"
]
],
[
[
"**Expected Output**:\n\n<table style=\"width:100%\"> \n <tr>\n <td > W1 </td> \n <td > [[ 1.72555789 0.3700272 0.07818896]\n [-1.8634927 -0.2773882 -0.35475898]\n [-0.08274148 -0.62700068 -0.04381817]\n [-0.47721803 -1.31386475 0.88462238]] </td> \n </tr> \n \n <tr>\n <td > b1 </td> \n <td > [[-0.07593768]\n [-0.07593768]\n [-0.07593768]\n [-0.07593768]] </td> \n </tr> \n <tr>\n <td > W2 </td> \n <td > [[ 0.71838378 1.70957306 0.05003364 -0.40467741]\n [-0.54535995 -1.54647732 0.98236743 -1.10106763]\n [-1.18504653 -0.2056499 1.48614836 0.23671627]] </td> \n </tr> \n \n <tr>\n <td > b2 </td> \n <td > [[-0.08616376]\n [-0.08616376]\n [-0.08616376]] </td> \n </tr> \n <tr>\n <td > W3 </td> \n <td > [[-0.88352436 -0.7129932 0.62524497]\n [-0.02025258 -0.76883635 -0.23003072]] </td> \n </tr> \n \n <tr>\n <td > b3 </td> \n <td > [[ 0.08416196]\n [ 0.08416196]] </td> \n </tr> \n \n</table>\n",
"_____no_output_____"
],
[
"\n## 7 - Conclusion\n\nCongrats on implementing all the functions required for building a deep neural network! \n\nWe know it was a long assignment but going forward it will only get better. The next part of the assignment is easier. \n\nIn the next assignment you will put all these together to build two models:\n- A two-layer neural network\n- An L-layer neural network\n\nYou will in fact use these models to classify cat vs non-cat images!",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
4a121c40fadf31c4865d69303c2b756c6cb3bc5b
| 118,360 |
ipynb
|
Jupyter Notebook
|
build-cpu.ipynb
|
daniel-fudge/sagemaker-tennis
|
7b95b6e2ade4ecf2e4e6424a4d2779c9dec0188c
|
[
"MIT"
] | null | null | null |
build-cpu.ipynb
|
daniel-fudge/sagemaker-tennis
|
7b95b6e2ade4ecf2e4e6424a4d2779c9dec0188c
|
[
"MIT"
] | 2 |
2020-06-26T16:24:23.000Z
|
2020-06-26T19:08:47.000Z
|
build-cpu.ipynb
|
daniel-fudge/sagemaker-tennis
|
7b95b6e2ade4ecf2e4e6424a4d2779c9dec0188c
|
[
"MIT"
] | null | null | null | 143.81531 | 61,664 | 0.825684 |
[
[
[
"# Part 1: Packaging and Uploading your Algorithm for use with Amazon SageMaker\n\n### An overview of Docker\n\nIf you're familiar with Docker already, you can skip ahead to the next section.\n\nFor many data scientists, Docker containers are a new technology. But they are not difficult and can significantly simplify the deployment of your software packages. \n\nDocker provides a simple way to package arbitrary code into an _image_ that is totally self-contained. Once you have an image, you can use Docker to run a _container_ based on that image. Running a container is just like running a program on the machine except that the container creates a fully self-contained environment for the program to run. Containers are isolated from each other and from the host environment, so the way your program is set up is the way it runs, no matter where you run it.\n\nDocker is more powerful than environment managers like conda or virtualenv because (a) it is completely language independent and (b) it comprises your whole operating environment, including startup commands, and environment variable.\n\nA Docker container is like a virtual machine, but it is much lighter weight. For example, a program running in a container can start in less than a second and many containers can run simultaneously on the same physical or virtual machine instance.\n\nDocker uses a simple file called a `Dockerfile` to specify how the image is assembled. An example is provided below. You can build your Docker images based on Docker images built by yourself or by others, which can simplify things quite a bit.\n\nDocker has become very popular in programming and devops communities due to its flexibility and its well-defined specification of how code can be run in its containers. It is the underpinning of many services built in the past few years, such as [Amazon ECS].\n\nAmazon SageMaker uses Docker to allow users to train and deploy arbitrary algorithms.\n\nIn Amazon SageMaker, Docker containers are invoked in a one way for training and another, slightly different, way for hosting. The following sections outline how to build containers for the SageMaker environment.\n\nSome helpful links:\n\n* [Docker home page](http://www.docker.com)\n* [Getting started with Docker](https://docs.docker.com/get-started/)\n* [Dockerfile reference](https://docs.docker.com/engine/reference/builder/)\n* [`docker run` reference](https://docs.docker.com/engine/reference/run/)\n\n[Amazon ECS]: https://aws.amazon.com/ecs/\n\n### How Amazon SageMaker runs your Docker container\n\nBecause you can run the same image in training or hosting, Amazon SageMaker runs your container with the argument `train` or `serve`. How your container processes this argument depends on the container. All SageMaker deep learning framework containers already cover this requirement and will trigger your defined training algorithm and inference code.\n\n* If you specify a program as an `ENTRYPOINT` in the Dockerfile, that program will be run at startup and its first argument will be `train` or `serve`. The program can then look at that argument and decide what to do. The original `ENTRYPOINT` specified within the SageMaker PyTorch is [here](https://github.com/aws/sagemaker-pytorch-container/blob/master/docker/0.4.0/final/Dockerfile.cpu#L18).\n\n#### Running your container during training\n\nCurrently, our SageMaker PyTorch container utilizes [console_scripts](http://python-packaging.readthedocs.io/en/latest/command-line-scripts.html#the-console-scripts-entry-point) to make use of the `train` command issued at training time. The line that gets invoked during `train` is defined within the setup.py file inside [SageMaker Containers](https://github.com/aws/sagemaker-containers/blob/master/setup.py#L48), our common SageMaker deep learning container framework. When this command is run, it will invoke the [trainer class](https://github.com/aws/sagemaker-containers/blob/master/src/sagemaker_containers/cli/train.py) to run, which will finally invoke our [PyTorch container code](https://github.com/aws/sagemaker-pytorch-container/blob/master/src/sagemaker_pytorch_container/training.py) to run your Python file.\n\nA number of files are laid out for your use, under the `/opt/ml` directory:\n\n /opt/ml\n |-- input\n | |-- config\n | | |-- hyperparameters.json\n | | `-- resourceConfig.json\n | `-- data\n | `-- <channel_name>\n | `-- <input data>\n |-- model\n | `-- <model files>\n `-- output\n `-- failure\n\n##### The input\n\n* `/opt/ml/input/config` contains information to control how your program runs. `hyperparameters.json` is a JSON-formatted dictionary of hyperparameter names to values. These values are always strings, so you may need to convert them. `resourceConfig.json` is a JSON-formatted file that describes the network layout used for distributed training.\n* `/opt/ml/input/data/<channel_name>/` (for File mode) contains the input data for that channel. The channels are created based on the call to CreateTrainingJob but it's generally important that channels match algorithm expectations. The files for each channel are copied from S3 to this directory, preserving the tree structure indicated by the S3 key structure. \n* `/opt/ml/input/data/<channel_name>_<epoch_number>` (for Pipe mode) is the pipe for a given epoch. Epochs start at zero and go up by one each time you read them. There is no limit to the number of epochs that you can run, but you must close each pipe before reading the next epoch.\n\n##### The output\n\n* `/opt/ml/model/` is the directory where you write the model that your algorithm generates. Your model can be in any format that you want. It can be a single file or a whole directory tree. SageMaker packages any files in this directory into a compressed tar archive file. This file is made available at the S3 location returned in the `DescribeTrainingJob` result.\n* `/opt/ml/output` is a directory where the algorithm can write a file `failure` that describes why the job failed. The contents of this file are returned in the `FailureReason` field of the `DescribeTrainingJob` result. For jobs that succeed, there is no reason to write this file as it is ignored.",
"_____no_output_____"
],
[
"### The parts of the sample container\n\nThe `container` directory has all the components you need to extend the SageMaker PyTorch CPU or GPU container to use as an sample algorithm.\n\n .\n |-- build_and_push.sh\n |-- Dockerfile-cpu\n |-- Dockerfile-gpu\n |-- requirements.txt\n `-- src\n `-- train.py\n -- [Python Modules]\n\nLet's discuss each of these in turn:\n\n* __`build_and_push.sh`__ is a script that uses the Dockerfile to build your container images and then pushes it to ECR. We invoke the commands directly later in this notebook, but you can just copy and run the script for your own algorithms.\n* __`src`__ is the directory which contains our user code to be invoked.\n* __`train.py`__ is the interface to SageMaker.\n* __`Dockerfile-cpu`__ describes how to build your Docker container image. More details are provided below.\n* __`Dockerfile-gpu`__ builds the GPU image.\n",
"_____no_output_____"
],
[
"### The Dockerfile\n\nThe Dockerfile describes the image that we want to build. You can think of it as describing the complete operating system installation of the system that you want to run. A Docker container running is quite a bit lighter than a full operating system, however, because it takes advantage of Linux on the host machine for the basic operations. \n\nWe start from the SageMaker PyTorch image as the base. The base image is an ECR image, so it will have the following pattern.\n* {account}.dkr.ecr.{region}.amazonaws.com/sagemaker-{framework}:{framework_version}-{processor_type}-{python_version}\n\nHere is an explanation of each field.\n1. account - AWS account ID the ECR image belongs to. Our public deep learning framework images are all under the 520713654638 account.\n2. region - The region the ECR image belongs to. [Available regions](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/).\n3. framework - The deep learning framework.\n4. framework_version - The version of the deep learning framework.\n5. processor_type - CPU or GPU.\n6. python_version - The supported version of Python.\n\nSo the SageMaker PyTorch ECR image would be:\n520713654638.dkr.ecr.us-west-2.amazonaws.com/sagemaker-pytorch:0.4.0-cpu-py3\n\nInformation on supported frameworks and versions can be found in this [README](https://github.com/aws/sagemaker-python-sdk).\n\nNext, we add the code that implements our specific algorithm to the container and set up the right environment for it to run under.\n\nFinally, we need to specify two environment variables.\n1. SAGEMAKER_SUBMIT_DIRECTORY - the directory within the container containing our Python script for training and inference.\n2. SAGEMAKER_PROGRAM - the Python script that should be invoked for training and inference.\n\nLet's look at the Dockerfile for this example.",
"_____no_output_____"
],
[
"### Building and registering the container\n\nThe `build-and-push.sh` builds the container image using `docker build` and push the container image to ECR using `docker push`. \n\nIf the `gpu` argument is passed to `build-and-push.sh` the GPU Docker file is used to create the GPU instance. Otherwise the CPU instance is created.\n\nThis code looks for an ECR repository in the account you're using and the current default region (if you're using a SageMaker notebook instance, this is the region where the notebook instance was created). If the repository doesn't exist, the script will create it. In addition, since we are using the SageMaker PyTorch image as the base, we will need to retrieve ECR credentials to pull this public image.",
"_____no_output_____"
]
],
[
[
"!./container/build_and_push.sh",
"Requesting CPU image\nLogin Succeeded\nLogin Succeeded\nSending build context to Docker daemon 178.8MB\nStep 1/10 : ARG REGION=us-east-1\nStep 2/10 : FROM 520713654638.dkr.ecr.$REGION.amazonaws.com/sagemaker-pytorch:1.1.0-cpu-py3\n1.1.0-cpu-py3: Pulling from sagemaker-pytorch\n\n\u001b[1B7927d38a: Pulling fs layer \n\u001b[1Bac894db4: Pulling fs layer \n\u001b[1B2af6d627: Pulling fs layer \n\u001b[1B86211d23: Pulling fs layer \n\u001b[1Baf39bebe: Pulling fs layer \n\u001b[1B03f425cd: Pulling fs layer \n\u001b[1B1ec18efe: Pulling fs layer \n\u001b[1B8ad8ba55: Pulling fs layer \n\u001b[1B6c282ffb: Pulling fs layer \n\u001b[1B77dfb459: Pulling fs layer \n\u001b[1Bbbd8c730: Pulling fs layer \n\u001b[1BDigest: sha256:bd973d810e8cf494a37dc9cc477b619d13da901d5f2804a953064b5bafc1e484[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[7A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[5A\u001b[1K\u001b[K\u001b[4A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[3A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[12A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[11A\u001b[1K\u001b[K\u001b[11A\u001b[1K\u001b[K\u001b[10A\u001b[1K\u001b[K\u001b[10A\u001b[1K\u001b[K\u001b[9A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[8A\u001b[1K\u001b[K\u001b[7A\u001b[1K\u001b[K\u001b[7A\u001b[1K\u001b[K\u001b[7A\u001b[1K\u001b[K\u001b[7A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[6A\u001b[1K\u001b[K\u001b[5A\u001b[1K\u001b[K\u001b[5A\u001b[1K\u001b[K\u001b[4A\u001b[1K\u001b[K\u001b[4A\u001b[1K\u001b[K\u001b[3A\u001b[1K\u001b[K\u001b[2A\u001b[1K\u001b[K\u001b[2A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\u001b[1A\u001b[1K\u001b[K\nStatus: Downloaded newer image for 520713654638.dkr.ecr.us-east-1.amazonaws.com/sagemaker-pytorch:1.1.0-cpu-py3\n ---> d374fb352c72\nStep 3/10 : RUN pip install --upgrade pip\n ---> Running in ac5ba6f64b73\nCollecting pip\n Downloading https://files.pythonhosted.org/packages/43/84/23ed6a1796480a6f1a2d38f2802901d078266bda38388954d01d3f2e821d/pip-20.1.1-py2.py3-none-any.whl (1.5MB)\nInstalling collected packages: pip\n Found existing installation: pip 18.1\n Uninstalling pip-18.1:\n Successfully uninstalled pip-18.1\nSuccessfully installed pip-20.1.1\nRemoving intermediate container ac5ba6f64b73\n ---> d65915466bcd\nStep 4/10 : COPY requirements.txt requirements.txt\n ---> 076291856580\nStep 5/10 : RUN pip install -r requirements.txt\n ---> Running in e29ef68688d1\nCollecting grpcio-tools\n Downloading grpcio_tools-1.30.0-cp36-cp36m-manylinux2010_x86_64.whl (2.5 MB)\nCollecting gym\n Downloading gym-0.17.2.tar.gz (1.6 MB)\nCollecting box2d-py~=2.3.5\n Downloading box2d_py-2.3.8-cp36-cp36m-manylinux1_x86_64.whl (448 kB)\nCollecting protobuf>=3.5.0.post1\n Downloading protobuf-3.12.2-cp36-cp36m-manylinux1_x86_64.whl (1.3 MB)\nCollecting grpcio>=1.30.0\n Downloading grpcio-1.30.0-cp36-cp36m-manylinux2010_x86_64.whl (3.0 MB)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from gym->-r requirements.txt (line 2)) (1.3.1)\nRequirement already satisfied: numpy>=1.10.4 in /usr/local/lib/python3.6/dist-packages (from gym->-r requirements.txt (line 2)) (1.16.4)\nCollecting pyglet<=1.5.0,>=1.4.0\n Downloading pyglet-1.5.0-py2.py3-none-any.whl (1.0 MB)\nCollecting cloudpickle<1.4.0,>=1.2.0\n Downloading cloudpickle-1.3.0-py2.py3-none-any.whl (26 kB)\nRequirement already satisfied: six>=1.9 in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.5.0.post1->grpcio-tools->-r requirements.txt (line 1)) (1.12.0)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.5.0.post1->grpcio-tools->-r requirements.txt (line 1)) (41.1.0)\nCollecting future\n Downloading future-0.18.2.tar.gz (829 kB)\nBuilding wheels for collected packages: gym, future\n Building wheel for gym (setup.py): started\n Building wheel for gym (setup.py): finished with status 'done'\n Created wheel for gym: filename=gym-0.17.2-py3-none-any.whl size=1650891 sha256=1ea88fbbd3f9682e37a33c5ff2c9e96a8f2a4638e6912985e0b953e3ad42ee32\n Stored in directory: /root/.cache/pip/wheels/be/a1/84/6b4caa6c1cea703acbfea8a24cc3c1729bd359cd4a65755d8b\n Building wheel for future (setup.py): started\n Building wheel for future (setup.py): finished with status 'done'\n Created wheel for future: filename=future-0.18.2-py3-none-any.whl size=491058 sha256=32f9d168e6ce425960b0bf60b1d2dfbc29ab05d36133b64d81af438f99434b64\n Stored in directory: /root/.cache/pip/wheels/6e/9c/ed/4499c9865ac1002697793e0ae05ba6be33553d098f3347fb94\nSuccessfully built gym future\nInstalling collected packages: protobuf, grpcio, grpcio-tools, future, pyglet, cloudpickle, gym, box2d-py\nSuccessfully installed box2d-py-2.3.8 cloudpickle-1.3.0 future-0.18.2 grpcio-1.30.0 grpcio-tools-1.30.0 gym-0.17.2 protobuf-3.12.2 pyglet-1.5.0\nRemoving intermediate container e29ef68688d1\n ---> b6b7d1876658\nStep 6/10 : ENV PATH=\"/opt/ml/code:${PATH}\"\n ---> Running in c00a2448e01c\nRemoving intermediate container c00a2448e01c\n ---> d47d578bdf87\nStep 7/10 : COPY /src /opt/ml/code\n ---> 15072931a941\nStep 8/10 : RUN chmod -R 755 /opt/ml/code\n ---> Running in b18d8dbecd40\nRemoving intermediate container b18d8dbecd40\n ---> f04ad4f32e30\nStep 9/10 : ENV SAGEMAKER_SUBMIT_DIRECTORY /opt/ml/code\n ---> Running in 5014dabfd09b\nRemoving intermediate container 5014dabfd09b\n ---> ae9c6b473ebc\nStep 10/10 : ENV SAGEMAKER_PROGRAM train.py\n ---> Running in 4a8b7f2ca4fd\nRemoving intermediate container 4a8b7f2ca4fd\n ---> 9d413381f8ae\nSuccessfully built 9d413381f8ae\nSuccessfully tagged sagemaker-tennis-cpu:latest\nBuilding CPU image\nThe push refers to repository [031118886020.dkr.ecr.us-east-1.amazonaws.com/sagemaker-tennis-cpu]\n\n\u001b[1Bb62ef907: Preparing \n\u001b[1B0fc5303c: Preparing \n\u001b[1B937dd526: Preparing \n\u001b[1B650268ac: Preparing \n\u001b[1B7d608e97: Preparing \n\u001b[1Becc4e4d4: Preparing \n\u001b[1B808cebd3: Preparing \n\u001b[1Bf05eda79: Preparing \n\u001b[1B71db9add: Preparing \n\u001b[1B53464ab3: Preparing \n\u001b[1Bc4bd5031: Preparing \n\u001b[1Bd01ff144: Preparing \n\u001b[1B7f77d9db: Preparing \n\u001b[1B42719515: Preparing \n\u001b[1B103e78c9: Preparing \n\u001b[1Be637fbff: Preparing \n"
]
],
[
[
"## Testing your algorithm on your local machine\n\nWhen you're packaging your first algorithm to use with Amazon SageMaker, you probably want to test it yourself to make sure it's working correctly. We use the [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk) to test both locally and on SageMaker. For more examples with the SageMaker Python SDK, see [Amazon SageMaker Examples](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/sagemaker-python-sdk). In order to test our algorithm, we need our dataset.",
"_____no_output_____"
],
[
"## SageMaker Python SDK Local Training\nTo represent our training, we use the Estimator class, which needs to be configured in five steps. \n1. IAM role - our AWS execution role\n2. train_instance_count - number of instances to use for training.\n3. train_instance_type - type of instance to use for training. For training locally, we specify `local`.\n4. image_name - our custom PyTorch Docker image we created.\n5. hyperparameters - hyperparameters we want to pass.\n\nLet's start with setting up our IAM role. We make use of a helper function within the Python SDK. This function throw an exception if run outside of a SageMaker notebook instance, as it gets metadata from the notebook instance.",
"_____no_output_____"
],
[
"### Setup Notebook for local execution",
"_____no_output_____"
]
],
[
[
"!/bin/bash ./utils/setup.sh",
"The user has root access.\nSageMaker instance route table setup is ok. We are good to go.\nSageMaker instance routing for Docker is ok. We are good to go!\n"
]
],
[
[
"### Training the Reinforcement Learning Model Locally\nNote we are only training for 200 iterations, which is too few to see any increase in the average score. We are a purely checking for mechanical errors.",
"_____no_output_____"
]
],
[
[
"from sagemaker.estimator import Estimator\nfrom sagemaker import get_execution_role\n\nrole = get_execution_role()\nestimator = Estimator(role=role,\n train_instance_count=1,\n train_instance_type='local',\n image_name='sagemaker-tennis-cpu:latest',\n hyperparameters={'epochs': 200})\n\nestimator.fit()",
"Creating tmpl4u04c4__algo-1-dwbrk_1 ... \n\u001b[1BAttaching to tmpl4u04c4__algo-1-dwbrk_12mdone\u001b[0m\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m 2020-07-04 15:34:24,066 sagemaker-containers INFO Imported framework sagemaker_pytorch_container.training\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m 2020-07-04 15:34:24,070 sagemaker-containers INFO No GPUs detected (normal if no gpus installed)\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m 2020-07-04 15:34:24,084 sagemaker_pytorch_container.training INFO Block until all host DNS lookups succeed.\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m 2020-07-04 15:34:24,088 sagemaker_pytorch_container.training INFO Invoking user training script.\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m 2020-07-04 15:34:24,089 sagemaker-containers INFO Module train does not provide a setup.py. \n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Generating setup.py\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m 2020-07-04 15:34:24,090 sagemaker-containers INFO Generating setup.cfg\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m 2020-07-04 15:34:24,090 sagemaker-containers INFO Generating MANIFEST.in\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m 2020-07-04 15:34:24,090 sagemaker-containers INFO Installing module with the following command:\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m /usr/bin/python -m pip install . \n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Processing /opt/ml/code\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Building wheels for collected packages: train\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Building wheel for train (setup.py) ... \u001b[?25ldone\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \u001b[?25h Created wheel for train: filename=train-1.0.0-py2.py3-none-any.whl size=45199378 sha256=bbfe7c8fcd55829e755556f37466ad480efb9e35c89171e44b0f18db5fbc9041\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Stored in directory: /tmp/pip-ephem-wheel-cache-rrvc2ie6/wheels/95/c1/85/65aaf48b35aba88c6e896d2fd04a4b69f1cee0d81ea32993ca\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Successfully built train\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Installing collected packages: train\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Successfully installed train-1.0.0\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m 2020-07-04 15:34:37,849 sagemaker-containers INFO No GPUs detected (normal if no gpus installed)\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m 2020-07-04 15:34:37,863 sagemaker-containers INFO Invoking user script\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Training Env:\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \n\u001b[36malgo-1-dwbrk_1 |\u001b[0m {\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"additional_framework_parameters\": {},\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"channel_input_dirs\": {},\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"current_host\": \"algo-1-dwbrk\",\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"framework_module\": \"sagemaker_pytorch_container.training:main\",\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"hosts\": [\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"algo-1-dwbrk\"\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m ],\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"hyperparameters\": {\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"epochs\": 200\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m },\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"input_config_dir\": \"/opt/ml/input/config\",\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"input_data_config\": {},\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"input_dir\": \"/opt/ml/input\",\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"is_master\": true,\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"job_name\": \"sagemaker-tennis-cpu-2020-07-04-15-34-21-570\",\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"log_level\": 20,\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"master_hostname\": \"algo-1-dwbrk\",\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"model_dir\": \"/opt/ml/model\",\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"module_dir\": \"/opt/ml/code\",\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"module_name\": \"train\",\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"network_interface_name\": \"eth0\",\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"num_cpus\": 2,\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"num_gpus\": 0,\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"output_data_dir\": \"/opt/ml/output/data\",\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"output_dir\": \"/opt/ml/output\",\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"output_intermediate_dir\": \"/opt/ml/output/intermediate\",\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"resource_config\": {\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"current_host\": \"algo-1-dwbrk\",\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"hosts\": [\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"algo-1-dwbrk\"\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m ]\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m },\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \"user_entry_point\": \"train.py\"\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m }\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Environment variables:\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_HOSTS=[\"algo-1-dwbrk\"]\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_NETWORK_INTERFACE_NAME=eth0\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_HPS={\"epochs\":200}\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_USER_ENTRY_POINT=train.py\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_FRAMEWORK_PARAMS={}\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_RESOURCE_CONFIG={\"current_host\":\"algo-1-dwbrk\",\"hosts\":[\"algo-1-dwbrk\"]}\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_INPUT_DATA_CONFIG={}\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_OUTPUT_DATA_DIR=/opt/ml/output/data\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_CHANNELS=[]\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_CURRENT_HOST=algo-1-dwbrk\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_MODULE_NAME=train\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_LOG_LEVEL=20\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_FRAMEWORK_MODULE=sagemaker_pytorch_container.training:main\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_INPUT_DIR=/opt/ml/input\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_INPUT_CONFIG_DIR=/opt/ml/input/config\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_OUTPUT_DIR=/opt/ml/output\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_NUM_CPUS=2\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_NUM_GPUS=0\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_MODEL_DIR=/opt/ml/model\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_MODULE_DIR=/opt/ml/code\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_TRAINING_ENV={\"additional_framework_parameters\":{},\"channel_input_dirs\":{},\"current_host\":\"algo-1-dwbrk\",\"framework_module\":\"sagemaker_pytorch_container.training:main\",\"hosts\":[\"algo-1-dwbrk\"],\"hyperparameters\":{\"epochs\":200},\"input_config_dir\":\"/opt/ml/input/config\",\"input_data_config\":{},\"input_dir\":\"/opt/ml/input\",\"is_master\":true,\"job_name\":\"sagemaker-tennis-cpu-2020-07-04-15-34-21-570\",\"log_level\":20,\"master_hostname\":\"algo-1-dwbrk\",\"model_dir\":\"/opt/ml/model\",\"module_dir\":\"/opt/ml/code\",\"module_name\":\"train\",\"network_interface_name\":\"eth0\",\"num_cpus\":2,\"num_gpus\":0,\"output_data_dir\":\"/opt/ml/output/data\",\"output_dir\":\"/opt/ml/output\",\"output_intermediate_dir\":\"/opt/ml/output/intermediate\",\"resource_config\":{\"current_host\":\"algo-1-dwbrk\",\"hosts\":[\"algo-1-dwbrk\"]},\"user_entry_point\":\"train.py\"}\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_USER_ARGS=[\"--epochs\",\"200\"]\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_OUTPUT_INTERMEDIATE_DIR=/opt/ml/output/intermediate\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m SM_HP_EPOCHS=200\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m PYTHONPATH=/usr/local/bin:/usr/lib/python36.zip:/usr/lib/python3.6:/usr/lib/python3.6/lib-dynload:/usr/local/lib/python3.6/dist-packages:/usr/lib/python3/dist-packages\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Invoking script with the following command:\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \n\u001b[36malgo-1-dwbrk_1 |\u001b[0m /usr/bin/python -m train --epochs 200\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Found path: /opt/ml/code/Tennis_Linux_NoVis/Tennis.x86_64\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Mono path[0] = '/opt/ml/code/Tennis_Linux_NoVis/Tennis_Data/Managed'\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Mono config path = '/opt/ml/code/Tennis_Linux_NoVis/Tennis_Data/MonoBleedingEdge/etc'\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Preloaded 'libgrpc_csharp_ext.x64.so'\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Unable to preload the following plugins:\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m \tlibgrpc_csharp_ext.x86.so\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m PlayerPrefs - Creating folder: /root/.config/unity3d/Unity Technologies\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m PlayerPrefs - Creating folder: /root/.config/unity3d/Unity Technologies/Unity Environment\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Logging to /root/.config/unity3d/Unity Technologies/Unity Environment/Player.log\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Episode 100 Average Score: 0.00\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m Episode 200 Average Score: 0.00\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m 200 training episodes completed.\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m 0.00 average score.\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m 1.23 minutes of training.\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m 212.31 training objective.\n\u001b[36malgo-1-dwbrk_1 |\u001b[0m 2020-07-04 15:35:53,945 sagemaker-containers INFO Reporting training SUCCESS\n\u001b[36mtmpl4u04c4__algo-1-dwbrk_1 exited with code 0\n\u001b[0mAborting on container exit...\n"
]
],
[
[
"## Training on SageMaker\nTraining a model on SageMaker with the Python SDK is done in a way that is similar to the way we trained it locally. This is done by changing our train_instance_type from `local` to one of the [supported EC2 instance types](https://aws.amazon.com/sagemaker/pricing/instance-types/).",
"_____no_output_____"
],
[
"### Locate the ECR image just built and pushed",
"_____no_output_____"
]
],
[
[
"import boto3\n\nclient = boto3.client('sts')\naccount = client.get_caller_identity()['Account']\nregion = boto3.Session().region_name\necr_image = '{}.dkr.ecr.{}.amazonaws.com/sagemaker-tennis-cpu:latest'.format(account, region)\n\nprint(ecr_image)",
"031118886020.dkr.ecr.us-east-1.amazonaws.com/sagemaker-tennis-cpu:latest\n"
]
],
[
[
"### Submit the training job",
"_____no_output_____"
]
],
[
[
"from sagemaker.estimator import Estimator\nestimator = Estimator(role=role,\n train_instance_count=1,\n train_instance_type='ml.m4.xlarge',\n image_name=ecr_image,\n hyperparameters={'epochs': 200})\nestimator.fit()",
"2020-07-04 15:35:55 Starting - Starting the training job...\n2020-07-04 15:35:57 Starting - Launching requested ML instances......\n2020-07-04 15:37:11 Starting - Preparing the instances for training......\n2020-07-04 15:38:19 Downloading - Downloading input data\n2020-07-04 15:38:19 Training - Downloading the training image......\n2020-07-04 15:39:26 Training - Training image download completed. Training in progress..\u001b[34mbash: cannot set terminal process group (-1): Inappropriate ioctl for device\u001b[0m\n\u001b[34mbash: no job control in this shell\u001b[0m\n\u001b[34m2020-07-04 15:39:27,526 sagemaker-containers INFO Imported framework sagemaker_pytorch_container.training\u001b[0m\n\u001b[34m2020-07-04 15:39:27,529 sagemaker-containers INFO No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34m2020-07-04 15:39:27,542 sagemaker_pytorch_container.training INFO Block until all host DNS lookups succeed.\u001b[0m\n\u001b[34m2020-07-04 15:39:27,543 sagemaker_pytorch_container.training INFO Invoking user training script.\u001b[0m\n\u001b[34m2020-07-04 15:39:27,544 sagemaker-containers INFO Module train does not provide a setup.py. \u001b[0m\n\u001b[34mGenerating setup.py\u001b[0m\n\u001b[34m2020-07-04 15:39:27,544 sagemaker-containers INFO Generating setup.cfg\u001b[0m\n\u001b[34m2020-07-04 15:39:27,544 sagemaker-containers INFO Generating MANIFEST.in\u001b[0m\n\u001b[34m2020-07-04 15:39:27,544 sagemaker-containers INFO Installing module with the following command:\u001b[0m\n\u001b[34m/usr/bin/python -m pip install . \u001b[0m\n\u001b[34mProcessing /opt/ml/code\u001b[0m\n\u001b[34mBuilding wheels for collected packages: train\n Building wheel for train (setup.py): started\u001b[0m\n\u001b[34m Building wheel for train (setup.py): finished with status 'done'\n Created wheel for train: filename=train-1.0.0-py2.py3-none-any.whl size=45199378 sha256=316b095c6b28ee694397f0ff130ad0e21f4b5fee647bae96d77682cb01176305\n Stored in directory: /tmp/pip-ephem-wheel-cache-58hoe3yc/wheels/95/c1/85/65aaf48b35aba88c6e896d2fd04a4b69f1cee0d81ea32993ca\u001b[0m\n\u001b[34mSuccessfully built train\u001b[0m\n\u001b[34mInstalling collected packages: train\u001b[0m\n\u001b[34mSuccessfully installed train-1.0.0\u001b[0m\n\u001b[34m2020-07-04 15:39:38,772 sagemaker-containers INFO No GPUs detected (normal if no gpus installed)\u001b[0m\n\u001b[34m2020-07-04 15:39:38,785 sagemaker-containers INFO Invoking user script\n\u001b[0m\n\u001b[34mTraining Env:\n\u001b[0m\n\u001b[34m{\n \"additional_framework_parameters\": {},\n \"channel_input_dirs\": {},\n \"current_host\": \"algo-1\",\n \"framework_module\": \"sagemaker_pytorch_container.training:main\",\n \"hosts\": [\n \"algo-1\"\n ],\n \"hyperparameters\": {\n \"epochs\": 200\n },\n \"input_config_dir\": \"/opt/ml/input/config\",\n \"input_data_config\": {},\n \"input_dir\": \"/opt/ml/input\",\n \"is_master\": true,\n \"job_name\": \"sagemaker-tennis-cpu-2020-07-04-15-35-54-764\",\n \"log_level\": 20,\n \"master_hostname\": \"algo-1\",\n \"model_dir\": \"/opt/ml/model\",\n \"module_dir\": \"/opt/ml/code\",\n \"module_name\": \"train\",\n \"network_interface_name\": \"eth0\",\n \"num_cpus\": 4,\n \"num_gpus\": 0,\n \"output_data_dir\": \"/opt/ml/output/data\",\n \"output_dir\": \"/opt/ml/output\",\n \"output_intermediate_dir\": \"/opt/ml/output/intermediate\",\n \"resource_config\": {\n \"current_host\": \"algo-1\",\n \"hosts\": [\n \"algo-1\"\n ],\n \"network_interface_name\": \"eth0\"\n },\n \"user_entry_point\": \"train.py\"\u001b[0m\n\u001b[34m}\n\u001b[0m\n\u001b[34mEnvironment variables:\n\u001b[0m\n\u001b[34mSM_HOSTS=[\"algo-1\"]\u001b[0m\n\u001b[34mSM_NETWORK_INTERFACE_NAME=eth0\u001b[0m\n\u001b[34mSM_HPS={\"epochs\":200}\u001b[0m\n\u001b[34mSM_USER_ENTRY_POINT=train.py\u001b[0m\n\u001b[34mSM_FRAMEWORK_PARAMS={}\u001b[0m\n\u001b[34mSM_RESOURCE_CONFIG={\"current_host\":\"algo-1\",\"hosts\":[\"algo-1\"],\"network_interface_name\":\"eth0\"}\u001b[0m\n\u001b[34mSM_INPUT_DATA_CONFIG={}\u001b[0m\n\u001b[34mSM_OUTPUT_DATA_DIR=/opt/ml/output/data\u001b[0m\n\u001b[34mSM_CHANNELS=[]\u001b[0m\n\u001b[34mSM_CURRENT_HOST=algo-1\u001b[0m\n\u001b[34mSM_MODULE_NAME=train\u001b[0m\n\u001b[34mSM_LOG_LEVEL=20\u001b[0m\n\u001b[34mSM_FRAMEWORK_MODULE=sagemaker_pytorch_container.training:main\u001b[0m\n\u001b[34mSM_INPUT_DIR=/opt/ml/input\u001b[0m\n\u001b[34mSM_INPUT_CONFIG_DIR=/opt/ml/input/config\u001b[0m\n\u001b[34mSM_OUTPUT_DIR=/opt/ml/output\u001b[0m\n\u001b[34mSM_NUM_CPUS=4\u001b[0m\n\u001b[34mSM_NUM_GPUS=0\u001b[0m\n\u001b[34mSM_MODEL_DIR=/opt/ml/model\u001b[0m\n\u001b[34mSM_MODULE_DIR=/opt/ml/code\u001b[0m\n\u001b[34mSM_TRAINING_ENV={\"additional_framework_parameters\":{},\"channel_input_dirs\":{},\"current_host\":\"algo-1\",\"framework_module\":\"sagemaker_pytorch_container.training:main\",\"hosts\":[\"algo-1\"],\"hyperparameters\":{\"epochs\":200},\"input_config_dir\":\"/opt/ml/input/config\",\"input_data_config\":{},\"input_dir\":\"/opt/ml/input\",\"is_master\":true,\"job_name\":\"sagemaker-tennis-cpu-2020-07-04-15-35-54-764\",\"log_level\":20,\"master_hostname\":\"algo-1\",\"model_dir\":\"/opt/ml/model\",\"module_dir\":\"/opt/ml/code\",\"module_name\":\"train\",\"network_interface_name\":\"eth0\",\"num_cpus\":4,\"num_gpus\":0,\"output_data_dir\":\"/opt/ml/output/data\",\"output_dir\":\"/opt/ml/output\",\"output_intermediate_dir\":\"/opt/ml/output/intermediate\",\"resource_config\":{\"current_host\":\"algo-1\",\"hosts\":[\"algo-1\"],\"network_interface_name\":\"eth0\"},\"user_entry_point\":\"train.py\"}\u001b[0m\n\u001b[34mSM_USER_ARGS=[\"--epochs\",\"200\"]\u001b[0m\n\u001b[34mSM_OUTPUT_INTERMEDIATE_DIR=/opt/ml/output/intermediate\u001b[0m\n\u001b[34mSM_HP_EPOCHS=200\u001b[0m\n\u001b[34mPYTHONPATH=/usr/local/bin:/usr/lib/python36.zip:/usr/lib/python3.6:/usr/lib/python3.6/lib-dynload:/usr/local/lib/python3.6/dist-packages:/usr/lib/python3/dist-packages\n\u001b[0m\n\u001b[34mInvoking script with the following command:\n\u001b[0m\n\u001b[34m/usr/bin/python -m train --epochs 200\n\n\u001b[0m\n\u001b[34mFound path: /opt/ml/code/Tennis_Linux_NoVis/Tennis.x86_64\u001b[0m\n\u001b[34mMono path[0] = '/opt/ml/code/Tennis_Linux_NoVis/Tennis_Data/Managed'\u001b[0m\n\u001b[34mMono config path = '/opt/ml/code/Tennis_Linux_NoVis/Tennis_Data/MonoBleedingEdge/etc'\u001b[0m\n\u001b[34mPreloaded 'libgrpc_csharp_ext.x64.so'\u001b[0m\n\u001b[34mUnable to preload the following plugins:\u001b[0m\n\u001b[34m#011libgrpc_csharp_ext.x86.so\u001b[0m\n\u001b[34mPlayerPrefs - Creating folder: /root/.config/unity3d/Unity Technologies\u001b[0m\n\u001b[34mPlayerPrefs - Creating folder: /root/.config/unity3d/Unity Technologies/Unity Environment\u001b[0m\n\u001b[34mLogging to /root/.config/unity3d/Unity Technologies/Unity Environment/Player.log\u001b[0m\n\u001b[34mEpisode 100 Average Score: 0.00\u001b[0m\n\n2020-07-04 15:40:53 Uploading - Uploading generated training model\n2020-07-04 15:40:53 Completed - Training job completed\n\u001b[34mEpisode 200 Average Score: 0.00\u001b[0m\n\u001b[34m200 training episodes completed.\u001b[0m\n\u001b[34m0.00 average score.\u001b[0m\n\u001b[34m1.07 minutes of training.\u001b[0m\n\u001b[34m210.69 training objective.\u001b[0m\n\u001b[34m2020-07-04 15:40:46,264 sagemaker-containers INFO Reporting training SUCCESS\u001b[0m\nTraining seconds: 162\nBillable seconds: 162\n"
]
],
[
[
"### Get the results",
"_____no_output_____"
],
[
"#### Get the bucket name",
"_____no_output_____"
]
],
[
[
"from sagemaker.session import Session\n\nsagemaker_session = Session()\nbucket = sagemaker_session.default_bucket()\njob_name = estimator._current_job_name\nprint(bucket)\nprint(job_name)",
"sagemaker-us-east-1-031118886020\nsagemaker-tennis-cpu-2020-07-04-15-35-54-764\n"
]
],
[
[
"#### Copy and unpack the result archive",
"_____no_output_____"
]
],
[
[
"import shutil\n\ns3 = boto3.resource('s3')\nkey = '{}/output/output.tar.gz'.format(estimator._current_job_name)\nprint(key)\ns3.Bucket(bucket).download_file(key, 'output.tar.gz')\nshutil.unpack_archive('output.tar.gz')",
"sagemaker-tennis-cpu-2020-07-04-15-35-54-764/output/output.tar.gz\n"
],
[
"from IPython.display import Image\nImage(filename='scores.png') ",
"_____no_output_____"
]
],
[
[
"As expected the above image isn't very interesting since we only ran for 200 iterations. Here is the same result when we ran for 2000.\n",
"_____no_output_____"
],
[
"# Reference\n- [SageMaker Example: Extending PyTorch Container](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/advanced_functionality/pytorch_extending_our_containers)\n- [How Amazon SageMaker interacts with your Docker container for training](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html)\n- [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk)\n- [Dockerfile](https://docs.docker.com/engine/reference/builder/)\n- [scikit-bring-your-own](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/advanced_functionality/scikit_bring_your_own/scikit_bring_your_own.ipynb)\n- [SageMaker PyTorch container](https://github.com/aws/sagemaker-pytorch-container)\n- [SageMaker Instance types](https://aws.amazon.com/sagemaker/pricing/instance-types/)\n- [SageMaker Instance prices](https://aws.amazon.com/sagemaker/pricing/)\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
4a121fb4d4d6a80d03a70e84057202f2bc3b393d
| 3,874 |
ipynb
|
Jupyter Notebook
|
python/quiz game.ipynb
|
Nnadiukwu-Miracle/miracleCSC101
|
9a20f57f5164ce75a0ba7f84f3f3e766d093f16b
|
[
"MIT"
] | null | null | null |
python/quiz game.ipynb
|
Nnadiukwu-Miracle/miracleCSC101
|
9a20f57f5164ce75a0ba7f84f3f3e766d093f16b
|
[
"MIT"
] | null | null | null |
python/quiz game.ipynb
|
Nnadiukwu-Miracle/miracleCSC101
|
9a20f57f5164ce75a0ba7f84f3f3e766d093f16b
|
[
"MIT"
] | null | null | null | 26.717241 | 112 | 0.443986 |
[
[
[
"### list of questions\nquestions = {\"Find the square root of 36\":6, \n \"What is 983 + 274\" :1257, \n \"What is the product of 60 and 59\": 3540, \n \"What is 6728 / 8\": 841, \n \"Solve -6h + 18 = -h -7\": 5, \n \"Solve 2x + 6 = 7x - 4\": 2, \n \"Find the next term: 48, 42, 36, 30, ?\": 24, \n \"Find the cube root of 1\": 1, \n \"How many sides does an enneadecagon have?\": 19,\n \"How many sides does a tetracontagon have?\": 40\n }\nprint(\"There are 10 questions, you have 3 chancs to answer each question, each question has 1 point\")\nprint(\"Do you want to play. yes / no\")\nif str(input(\"\")) == \"yes\":\n score = 0\n question = 0\n for x in questions:\n question += 1\n chances = 3\n print(\"Question\", question)\n while chances > 0:\n print(x)\n answer = int(input(\"Enter your answer: \"))\n if answer == questions[x]:\n print(\"correct\")\n score += 1\n break\n else:\n print(\"wrong\")\n chances -= 1\n print(\"You have\", chances, \"chance(s) left\")\n if chances == 0:\n print(\"The correct answer is\", questions[x])\n if score < 4:\n print(\"Your score is\", score)\n print(\"Better luck next time\")\n elif 4 <= score <= 6:\n print(\"Your score is\", score)\n print(\"Good try, you can do better\")\n elif 7 <= score <= 9:\n print(\"Your score is\", score)\n print(\"Great, aim for a 10 next time\")\n else:\n print(\"10 out of 10, You're a genius\")\nelse:\n print(\"You quit the game\")\n\n\n",
"There are 10 questions, you have 3 chancs to answer each question, each question has 1 point\nDo you want to play. yes / no\n"
]
]
] |
[
"code"
] |
[
[
"code"
]
] |
4a122f78381651f93d9feabb84bab64590257e45
| 21,581 |
ipynb
|
Jupyter Notebook
|
bert/test-topics/bert-multilanguage-topics.ipynb
|
leowmjw/Malaya
|
33f39835eca08c238d2dd68aeca3b09c5d0a45ab
|
[
"MIT"
] | null | null | null |
bert/test-topics/bert-multilanguage-topics.ipynb
|
leowmjw/Malaya
|
33f39835eca08c238d2dd68aeca3b09c5d0a45ab
|
[
"MIT"
] | null | null | null |
bert/test-topics/bert-multilanguage-topics.ipynb
|
leowmjw/Malaya
|
33f39835eca08c238d2dd68aeca3b09c5d0a45ab
|
[
"MIT"
] | null | null | null | 33.562986 | 244 | 0.518558 |
[
[
[
"# !pip3 install bert-tensorflow --user\n# !wget https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip\n# !unzip multi_cased_L-12_H-768_A-12.zip",
"_____no_output_____"
],
[
"import bert\nfrom bert import run_classifier\nfrom bert import optimization\nfrom bert import tokenization\nfrom bert import modeling\nimport numpy as np\nimport tensorflow as tf",
"_____no_output_____"
],
[
"from tqdm import tqdm\nimport json",
"_____no_output_____"
],
[
"with open('selected-topics.json') as fopen:\n x = json.load(fopen)\ntexts = x['X']\nlabels = x['Y']",
"_____no_output_____"
],
[
"MAX_SEQ_LENGTH = 100",
"_____no_output_____"
],
[
"BERT_VOCAB = 'multi_cased_L-12_H-768_A-12/vocab.txt'\nBERT_INIT_CHKPNT = 'multi_cased_L-12_H-768_A-12/bert_model.ckpt'\nBERT_CONFIG = 'multi_cased_L-12_H-768_A-12/bert_config.json'\n\ntokenization.validate_case_matches_checkpoint(False, '')\ntokenizer = tokenization.FullTokenizer(\n vocab_file=BERT_VOCAB, do_lower_case=False)",
"_____no_output_____"
],
[
"tokenizer.tokenize(texts[1])",
"_____no_output_____"
],
[
"input_ids, input_masks, segment_ids = [], [], []\n\nfor text in tqdm(texts):\n tokens_a = tokenizer.tokenize(text)\n if len(tokens_a) > MAX_SEQ_LENGTH - 2:\n tokens_a = tokens_a[:(MAX_SEQ_LENGTH - 2)]\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_id = [0] * len(tokens)\n input_id = tokenizer.convert_tokens_to_ids(tokens)\n input_mask = [1] * len(input_id)\n padding = [0] * (MAX_SEQ_LENGTH - len(input_id))\n input_id += padding\n input_mask += padding\n segment_id += padding\n \n input_ids.append(input_id)\n input_masks.append(input_mask)\n segment_ids.append(segment_id)",
"100%|██████████| 65000/65000 [00:21<00:00, 3028.02it/s]\n"
],
[
"bert_config = modeling.BertConfig.from_json_file(BERT_CONFIG)",
"_____no_output_____"
],
[
"epoch = 10\nbatch_size = 60\nwarmup_proportion = 0.1\nnum_train_steps = int(len(texts) / batch_size * epoch)\nnum_warmup_steps = int(num_train_steps * warmup_proportion)",
"_____no_output_____"
],
[
"class Model:\n def __init__(\n self,\n dimension_output,\n learning_rate = 2e-5,\n ):\n self.X = tf.placeholder(tf.int32, [None, None])\n self.segment_ids = tf.placeholder(tf.int32, [None, None])\n self.input_masks = tf.placeholder(tf.int32, [None, None])\n self.Y = tf.placeholder(tf.int32, [None])\n \n model = modeling.BertModel(\n config=bert_config,\n is_training=True,\n input_ids=self.X,\n input_mask=self.input_masks,\n token_type_ids=self.segment_ids,\n use_one_hot_embeddings=False)\n \n output_layer = model.get_pooled_output()\n self.logits = tf.layers.dense(output_layer, dimension_output)\n self.logits = tf.identity(self.logits, name = 'logits')\n \n self.cost = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits = self.logits, labels = self.Y\n )\n )\n \n self.optimizer = optimization.create_optimizer(self.cost, learning_rate, \n num_train_steps, num_warmup_steps, False)\n correct_pred = tf.equal(\n tf.argmax(self.logits, 1, output_type = tf.int32), self.Y\n )\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))",
"_____no_output_____"
],
[
"unique_labels = np.unique(labels)",
"_____no_output_____"
],
[
"dimension_output = len(unique_labels)\nlearning_rate = 1e-5\n\ntf.reset_default_graph()\nsess = tf.InteractiveSession()\nmodel = Model(\n dimension_output,\n learning_rate\n)\n\nsess.run(tf.global_variables_initializer())\nvar_lists = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = 'bert')\nsaver = tf.train.Saver(var_list = var_lists)\nsaver.restore(sess, BERT_INIT_CHKPNT)",
"WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\n\nWARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.\nFor more information, please see:\n * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n * https://github.com/tensorflow/addons\nIf you depend on functionality not listed there, please file an issue.\n\nWARNING:tensorflow:From /home/jupyter/.local/lib/python3.6/site-packages/bert/modeling.py:358: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\nWARNING:tensorflow:From /home/jupyter/.local/lib/python3.6/site-packages/bert/modeling.py:671: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse keras.layers.dense instead.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/learning_rate_decay_v2.py:321: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nDeprecated in favor of operator or tf.math.divide.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.cast instead.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/saver.py:1266: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse standard file APIs to check for files with this prefix.\nINFO:tensorflow:Restoring parameters from multi_cased_L-12_H-768_A-12/bert_model.ckpt\n"
],
[
"from sklearn.model_selection import train_test_split\n\ntrain_input_ids, test_input_ids, train_input_masks, test_input_masks, train_segment_ids, test_segment_ids, train_Y, test_Y = train_test_split(\n input_ids, input_masks, segment_ids, labels, test_size = 0.2\n)",
"_____no_output_____"
],
[
"from tqdm import tqdm\nimport time\n\nEARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 3, 0, 0, 0\n\nwhile True:\n lasttime = time.time()\n if CURRENT_CHECKPOINT == EARLY_STOPPING:\n print('break epoch:%d\\n' % (EPOCH))\n break\n\n train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0\n pbar = tqdm(\n range(0, len(train_input_ids), batch_size), desc = 'train minibatch loop'\n )\n for i in pbar:\n index = min(i + batch_size, len(train_input_ids))\n batch_x = train_input_ids[i: index]\n batch_masks = train_input_masks[i: index]\n batch_segment = train_segment_ids[i: index]\n batch_y = train_Y[i: index]\n acc, cost, _ = sess.run(\n [model.accuracy, model.cost, model.optimizer],\n feed_dict = {\n model.Y: batch_y,\n model.X: batch_x,\n model.segment_ids: batch_segment,\n model.input_masks: batch_masks\n },\n )\n assert not np.isnan(cost)\n train_loss += cost\n train_acc += acc\n pbar.set_postfix(cost = cost, accuracy = acc)\n \n pbar = tqdm(range(0, len(test_input_ids), batch_size), desc = 'test minibatch loop')\n for i in pbar:\n index = min(i + batch_size, len(test_input_ids))\n batch_x = test_input_ids[i: index]\n batch_masks = test_input_masks[i: index]\n batch_segment = test_segment_ids[i: index]\n batch_y = test_Y[i: index]\n acc, cost = sess.run(\n [model.accuracy, model.cost],\n feed_dict = {\n model.Y: batch_y,\n model.X: batch_x,\n model.segment_ids: batch_segment,\n model.input_masks: batch_masks\n },\n )\n test_loss += cost\n test_acc += acc\n pbar.set_postfix(cost = cost, accuracy = acc)\n\n train_loss /= len(train_input_ids) / batch_size\n train_acc /= len(train_input_ids) / batch_size\n test_loss /= len(test_input_ids) / batch_size\n test_acc /= len(test_input_ids) / batch_size\n\n if test_acc > CURRENT_ACC:\n print(\n 'epoch: %d, pass acc: %f, current acc: %f'\n % (EPOCH, CURRENT_ACC, test_acc)\n )\n CURRENT_ACC = test_acc\n CURRENT_CHECKPOINT = 0\n else:\n CURRENT_CHECKPOINT += 1\n \n print('time taken:', time.time() - lasttime)\n print(\n 'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\\n'\n % (EPOCH, train_loss, train_acc, test_loss, test_acc)\n )\n EPOCH += 1",
"train minibatch loop: 100%|██████████| 867/867 [05:59<00:00, 2.62it/s, accuracy=0.975, cost=0.133] \ntest minibatch loop: 100%|██████████| 217/217 [00:30<00:00, 7.16it/s, accuracy=0.95, cost=0.24] \ntrain minibatch loop: 0%| | 0/867 [00:00<?, ?it/s]"
],
[
"real_Y, predict_Y = [], []\n\npbar = tqdm(\n range(0, len(test_input_ids), batch_size), desc = 'validation minibatch loop'\n)\nfor i in pbar:\n index = min(i + batch_size, len(test_input_ids))\n batch_x = test_input_ids[i: index]\n batch_masks = test_input_masks[i: index]\n batch_segment = test_segment_ids[i: index]\n batch_y = test_Y[i: index]\n predict_Y += np.argmax(sess.run(model.logits,\n feed_dict = {\n model.Y: batch_y,\n model.X: batch_x,\n model.segment_ids: batch_segment,\n model.input_masks: batch_masks\n },\n ), 1, ).tolist()\n real_Y += batch_y",
"validation minibatch loop: 100%|██████████| 217/217 [00:30<00:00, 7.19it/s]\n"
],
[
"labels = ['kesihatan',\n 'kes lemas',\n 'kes pecah rumah',\n 'kes tangkap basah',\n 'kewangan dan perniagaan',\n 'kos sara hidup',\n 'suruhanjaya pilihan raya malaysia',\n 'tentera malaysia',\n 'nilai ringgit jatuh',\n 'kes buang bayi',\n 'isu kemiskinan',\n 'infrastruktur',\n 'harga minyak']",
"_____no_output_____"
],
[
"from sklearn import metrics\n\nprint(\n metrics.classification_report(\n real_Y, predict_Y, target_names = labels, digits=5\n )\n)",
" precision recall f1-score support\n\n kesihatan 0.97542 0.98805 0.98169 1004\n kes lemas 0.98827 0.97068 0.97940 955\n kes pecah rumah 0.94521 0.95549 0.95032 1011\n kes tangkap basah 0.95064 0.97568 0.96300 987\n kewangan dan perniagaan 0.94614 0.96178 0.95389 968\n kos sara hidup 0.95679 0.94801 0.95238 981\nsuruhanjaya pilihan raya malaysia 0.93681 0.93214 0.93447 1002\n tentera malaysia 0.97431 0.96422 0.96924 1062\n nilai ringgit jatuh 0.91910 0.97505 0.94625 1002\n kes buang bayi 0.95591 0.95400 0.95495 1000\n isu kemiskinan 0.94271 0.90541 0.92368 1036\n infrastruktur 0.98000 0.93531 0.95714 943\n harga minyak 0.94952 0.95043 0.94998 1049\n\n avg / total 0.95527 0.95500 0.95497 13000\n\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a12356e1341d4c4f5860938b1721b324767c6f6
| 22,271 |
ipynb
|
Jupyter Notebook
|
JuliaWorkshop/Part1-BasicJulia/13. Optional - Factorizations and other fun.ipynb
|
IBM/precision-aquaculture
|
deab48f1532455dbb63995cd2d9e185ab0b6b32e
|
[
"Apache-2.0"
] | 1 |
2021-07-17T17:55:32.000Z
|
2021-07-17T17:55:32.000Z
|
JuliaWorkshop/Part1-BasicJulia/13. Optional - Factorizations and other fun.ipynb
|
IBM/precision-aquaculture
|
deab48f1532455dbb63995cd2d9e185ab0b6b32e
|
[
"Apache-2.0"
] | 6 |
2021-06-22T15:21:10.000Z
|
2021-07-29T12:46:31.000Z
|
JuliaWorkshop/Part1-BasicJulia/13. Optional - Factorizations and other fun.ipynb
|
IBM/precision-aquaculture
|
deab48f1532455dbb63995cd2d9e185ab0b6b32e
|
[
"Apache-2.0"
] | null | null | null | 21.476374 | 286 | 0.490818 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a124f6d03897b1364eb9dedffae3c4b224d73ca
| 253,824 |
ipynb
|
Jupyter Notebook
|
scikit/Chapter 7/Text classification.ipynb
|
KarthikKothareddy/Data-Science-Practice
|
2c12128e29f5e3455882db6c83d1e1ffafa7126f
|
[
"Apache-2.0"
] | 2 |
2017-01-06T23:51:52.000Z
|
2018-12-26T08:42:19.000Z
|
scikit/Chapter 7/Text classification.ipynb
|
KarthikKothareddy/Data-Science-Practice
|
2c12128e29f5e3455882db6c83d1e1ffafa7126f
|
[
"Apache-2.0"
] | null | null | null |
scikit/Chapter 7/Text classification.ipynb
|
KarthikKothareddy/Data-Science-Practice
|
2c12128e29f5e3455882db6c83d1e1ffafa7126f
|
[
"Apache-2.0"
] | 5 |
2015-12-20T02:47:03.000Z
|
2018-12-26T08:42:22.000Z
| 298.968198 | 49,942 | 0.920441 |
[
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"# Text Classification of Movie Reviews",
"_____no_output_____"
]
],
[
[
"from helpers import Timer",
"_____no_output_____"
],
[
"from sklearn.datasets import load_files\n\nreviews_train = load_files(\"aclImdb/train/\")\ntext_train, y_train = reviews_train.data, reviews_train.target",
"_____no_output_____"
],
[
"print(\"Number of documents in training data: %d\" % len(text_train))\nprint(np.bincount(y_train))",
"Number of documents in training data: 25000\n[12500 12500]\n"
],
[
"reviews_test = load_files(\"aclImdb/test/\")\ntext_test, y_test = reviews_test.data, reviews_test.target\nprint(\"Number of documents in test data: %d\" % len(text_test))\nprint(np.bincount(y_test))",
"Number of documents in test data: 25000\n[12500 12500]\n"
],
[
"print(text_train[1])",
"Words can't describe how bad this movie is. I can't explain it by writing only. You have too see it for yourself to get at grip of how horrible a movie really can be. Not that I recommend you to do that. There are so many clichés, mistakes (and all other negative things you can imagine) here that will just make you cry. To start with the technical first, there are a LOT of mistakes regarding the airplane. I won't list them here, but just mention the coloring of the plane. They didn't even manage to show an airliner in the colors of a fictional airline, but instead used a 747 painted in the original Boeing livery. Very bad. The plot is stupid and has been done many times before, only much, much better. There are so many ridiculous moments here that i lost count of it really early. Also, I was on the bad guys' side all the time in the movie, because the good guys were so stupid. \"Executive Decision\" should without a doubt be you're choice over this one, even the \"Turbulence\"-movies are better. In fact, every other movie in the world is better than this one.\n"
],
[
"print(y_train[1])",
"0\n"
]
],
[
[
"### Bag of words reminder:",
"_____no_output_____"
],
[
"<img src=\"bag_of_words.svg\" width=80%>",
"_____no_output_____"
]
],
[
[
"from sklearn.feature_extraction.text import CountVectorizer\ncv = CountVectorizer()\ncv.fit(text_train)\n\nlen(cv.vocabulary_)",
"_____no_output_____"
],
[
"print(cv.get_feature_names()[:50])\nprint(cv.get_feature_names()[50000:50050])",
"[u'00', u'000', u'0000000000001', u'00001', u'00015', u'000s', u'001', u'003830', u'006', u'007', u'0079', u'0080', u'0083', u'0093638', u'00am', u'00pm', u'00s', u'01', u'01pm', u'02', u'020410', u'029', u'03', u'04', u'041', u'05', u'050', u'06', u'06th', u'07', u'08', u'087', u'089', u'08th', u'09', u'0f', u'0ne', u'0r', u'0s', u'10', u'100', u'1000', u'1000000', u'10000000000000', u'1000lb', u'1000s', u'1001', u'100b', u'100k', u'100m']\n[u'pincher', u'pinchers', u'pinches', u'pinching', u'pinchot', u'pinciotti', u'pine', u'pineal', u'pineapple', u'pineapples', u'pines', u'pinet', u'pinetrees', u'pineyro', u'pinfall', u'pinfold', u'ping', u'pingo', u'pinhead', u'pinheads', u'pinho', u'pining', u'pinjar', u'pink', u'pinkerton', u'pinkett', u'pinkie', u'pinkins', u'pinkish', u'pinko', u'pinks', u'pinku', u'pinkus', u'pinky', u'pinnacle', u'pinnacles', u'pinned', u'pinning', u'pinnings', u'pinnochio', u'pinnocioesque', u'pino', u'pinocchio', u'pinochet', u'pinochets', u'pinoy', u'pinpoint', u'pinpoints', u'pins', u'pinsent']\n"
],
[
"X_train = cv.transform(text_train)\nX_train",
"_____no_output_____"
],
[
"print(text_train[19726])",
"This movie is terrible but it has some good effects.\n"
],
[
"X_train[19726].nonzero()[1]",
"_____no_output_____"
],
[
"X_test = cv.transform(text_test)",
"_____no_output_____"
],
[
"from sklearn.svm import LinearSVC\n\nsvm = LinearSVC()\n\nwith Timer():\n svm.fit(X_train, y_train)",
"Elapsed: 7s\n"
],
[
"svm.score(X_train, y_train)",
"_____no_output_____"
],
[
"svm.score(X_test, y_test)",
"_____no_output_____"
],
[
"def visualize_coefficients(classifier, feature_names, n_top_features=25):\n # get coefficients with large absolute values \n coef = classifier.coef_.ravel()\n positive_coefficients = np.argsort(coef)[-n_top_features:]\n negative_coefficients = np.argsort(coef)[:n_top_features]\n interesting_coefficients = np.hstack([negative_coefficients, positive_coefficients])\n # plot them\n plt.figure(figsize=(15, 5))\n colors = [\"red\" if c < 0 else \"blue\" for c in coef[interesting_coefficients]]\n plt.bar(np.arange(2 * n_top_features), coef[interesting_coefficients], color=colors)\n feature_names = np.array(feature_names)\n plt.xticks(np.arange(1, 1 + 2 * n_top_features), feature_names[interesting_coefficients], rotation=60, ha=\"right\");\n",
"_____no_output_____"
],
[
"visualize_coefficients(svm, cv.get_feature_names())",
"_____no_output_____"
],
[
"from sklearn.pipeline import make_pipeline\n\ntext_pipe = make_pipeline(CountVectorizer(), LinearSVC())\nwith Timer():\n text_pipe.fit(text_train, y_train)\ntext_pipe.score(text_test, y_test)",
"Elapsed: 12s\n"
],
[
"from sklearn.grid_search import GridSearchCV\n\nparam_grid = {'linearsvc__C': np.logspace(-5, 0, 6)}\ngrid = GridSearchCV(text_pipe, param_grid, cv=5)\nwith Timer():\n grid.fit(text_train, y_train);",
"Elapsed: 4m 21s\n"
],
[
"from figures import plot_grid_1d\nplot_grid_1d(grid)\n\ngrid.best_params_",
"_____no_output_____"
],
[
"visualize_coefficients(grid.best_estimator_.named_steps['linearsvc'],\n grid.best_estimator_.named_steps['countvectorizer'].get_feature_names())",
"_____no_output_____"
],
[
"grid.best_score_",
"_____no_output_____"
],
[
"grid.score(text_test, y_test)",
"_____no_output_____"
]
],
[
[
"# Text Classification continuation.",
"_____no_output_____"
],
[
"## TfidfVectorizer",
"_____no_output_____"
]
],
[
[
"from sklearn.feature_extraction.text import TfidfVectorizer\ntfidf_pipe = make_pipeline(TfidfVectorizer(), LinearSVC())\n\nparam_grid = {'linearsvc__C': np.logspace(-3, 2, 6)}\ngrid = GridSearchCV(tfidf_pipe, param_grid, cv=5)\nwith Timer():\n grid.fit(text_train, y_train)\nplot_grid_1d(grid)",
"Elapsed: 3m 52s\n"
],
[
"visualize_coefficients(grid.best_estimator_.named_steps['linearsvc'],\n grid.best_estimator_.named_steps['tfidfvectorizer'].get_feature_names())",
"_____no_output_____"
],
[
"grid.best_score_",
"_____no_output_____"
],
[
"grid.score(text_test, y_test)",
"_____no_output_____"
]
],
[
[
"# N-Grams",
"_____no_output_____"
]
],
[
[
"text_pipe = make_pipeline(CountVectorizer(), LinearSVC())\n\nparam_grid = {'linearsvc__C': np.logspace(-3, 2, 6),\n \"countvectorizer__ngram_range\": [(1, 1), (1, 2), (1, 3)]}\n\ngrid = GridSearchCV(text_pipe, param_grid, cv=5)\n\nwith Timer():\n grid.fit(text_train, y_train)",
"Elapsed: 81m 36s\n"
],
[
"scores = np.array([score.mean_validation_score for score in grid.grid_scores_]).reshape(3, -1)\nplt.matshow(scores)\nplt.ylabel(\"n-gram range\")\nplt.yticks(range(3), param_grid[\"countvectorizer__ngram_range\"])\nplt.xlabel(\"C\")\nplt.xticks(range(6), param_grid[\"linearsvc__C\"]);\nplt.colorbar()",
"_____no_output_____"
],
[
"grid.best_params_",
"_____no_output_____"
],
[
"visualize_coefficients(grid.best_estimator_.named_steps['linearsvc'],\n grid.best_estimator_.named_steps['countvectorizer'].get_feature_names())",
"_____no_output_____"
],
[
"grid.score(text_test, y_test)",
"_____no_output_____"
]
],
[
[
"## Look at the Natural Laguage Tool Kit (NLTK)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a1255103a2d3eaf35e9368c2285dcdca85d9d79
| 4,586 |
ipynb
|
Jupyter Notebook
|
ipynb/Burma.ipynb
|
skirienko/oscovida.github.io
|
eda5412d02365a8a000239be5480512c53bee8c2
|
[
"CC-BY-4.0"
] | null | null | null |
ipynb/Burma.ipynb
|
skirienko/oscovida.github.io
|
eda5412d02365a8a000239be5480512c53bee8c2
|
[
"CC-BY-4.0"
] | null | null | null |
ipynb/Burma.ipynb
|
skirienko/oscovida.github.io
|
eda5412d02365a8a000239be5480512c53bee8c2
|
[
"CC-BY-4.0"
] | null | null | null | 28.134969 | 159 | 0.505451 |
[
[
[
"# Burma\n\n* Homepage of project: https://oscovida.github.io\n* Plots are explained at http://oscovida.github.io/plots.html\n* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Burma.ipynb)",
"_____no_output_____"
]
],
[
[
"import datetime\nimport time\n\nstart = datetime.datetime.now()\nprint(f\"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}\")",
"_____no_output_____"
],
[
"%config InlineBackend.figure_formats = ['svg']\nfrom oscovida import *",
"_____no_output_____"
],
[
"overview(\"Burma\", weeks=5);",
"_____no_output_____"
],
[
"overview(\"Burma\");",
"_____no_output_____"
],
[
"compare_plot(\"Burma\", normalise=True);\n",
"_____no_output_____"
],
[
"# load the data\ncases, deaths = get_country_data(\"Burma\")\n\n# compose into one table\ntable = compose_dataframe_summary(cases, deaths)\n\n# show tables with up to 500 rows\npd.set_option(\"max_rows\", 500)\n\n# display the table\ntable",
"_____no_output_____"
]
],
[
[
"# Explore the data in your web browser\n\n- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Burma.ipynb)\n- and wait (~1 to 2 minutes)\n- Then press SHIFT+RETURN to advance code cell to code cell\n- See http://jupyter.org for more details on how to use Jupyter Notebook",
"_____no_output_____"
],
[
"# Acknowledgements:\n\n- Johns Hopkins University provides data for countries\n- Robert Koch Institute provides data for within Germany\n- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)\n- Open source and scientific computing community for the data tools\n- Github for hosting repository and html files\n- Project Jupyter for the Notebook and binder service\n- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))\n\n--------------------",
"_____no_output_____"
]
],
[
[
"print(f\"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and \"\n f\"deaths at {fetch_deaths_last_execution()}.\")",
"_____no_output_____"
],
[
"# to force a fresh download of data, run \"clear_cache()\"",
"_____no_output_____"
],
[
"print(f\"Notebook execution took: {datetime.datetime.now()-start}\")\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a1258767f41dc8ef755cd6cd6fb4aecb34c0890
| 67,177 |
ipynb
|
Jupyter Notebook
|
Week 5/matplotlib Exercise Notebook.ipynb
|
innat-2k14/Data-Science-In-Python
|
3d1731d2cd61c5dfd33776d830997c5c7d8841d1
|
[
"Apache-2.0"
] | 11 |
2018-04-28T20:49:10.000Z
|
2019-01-03T07:45:16.000Z
|
Week 5/matplotlib Exercise Notebook.ipynb
|
innat-2k14/Data-Science-In-Python
|
3d1731d2cd61c5dfd33776d830997c5c7d8841d1
|
[
"Apache-2.0"
] | 2 |
2021-03-19T08:14:38.000Z
|
2021-06-08T20:34:36.000Z
|
Week 5/matplotlib Exercise Notebook.ipynb
|
innat-2k14/Data-Science-In-Python
|
3d1731d2cd61c5dfd33776d830997c5c7d8841d1
|
[
"Apache-2.0"
] | 7 |
2019-10-26T13:53:00.000Z
|
2020-12-02T06:03:39.000Z
| 90.41319 | 16,936 | 0.783661 |
[
[
[
"<p style=\"font-family: Arial; font-size:3.75vw;color:purple; font-style:bold\"><br>\nmatplotlib Exercise Notebook\n</p><br>",
"_____no_output_____"
],
[
"# Exercise Notebook Instructions\n\n### 1. Important: Only modify the cells which instruct you to modify them - leave \"do not modify\" cells alone. \n\nThe code which tests your responses assumes you have run the startup/read-only code exactly.\n\n### 2. Work through the notebook in order.\n\nSome of the steps depend on previous, so you'll want to move through the notebook in order.\n\n### 3. It is okay to use numpy libraries.\n\nYou may find some of these questions are fairly straightforward to answer using built-in numpy functions. That's totally okay - part of the point of these exercises is to familiarize you with the commonly used numpy functions.\n\n### 4. Seek help if stuck\n\nIf you get stuck, don't worry! You can either review the videos/notebooks from this week, ask in the course forums, or look to the solutions for the correct answer. BUT, be careful about looking to the solutions too quickly. Struggling to get the right answer is an important part of the learning process.",
"_____no_output_____"
]
],
[
[
"# DO NOT MODIFY\n\n# import appropriate libraries\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sqlite3\nimport pandas as pd \n%matplotlib inline",
"_____no_output_____"
],
[
"# DO NOT MODIFY\n\n# we will use this dataset for some portions of this exercise.\n# source: https://www.kaggle.com/hugomathien/soccer\n\ndef get_data():\n cnx = sqlite3.connect('database.sqlite')\n df = pd.read_sql_query(\"SELECT * FROM Player_Attributes\", cnx)\n return df\n\ndf = get_data()",
"_____no_output_____"
],
[
"#DO NOT MODIFY\n\n# Let's see what is in our dataset\ndf.describe()",
"_____no_output_____"
]
],
[
[
"<p style=\"font-family: Arial; font-size:2.75vw;color:purple; font-style:bold\"><br>\n\nExercise 1: Line Plot<br><br></p>\n\n\nIn the cell below, modify the function to plot x vs y, where x and y \nare column names of dataframe (df) which is also entered as input to the function. The function should\n\n- First sort the dataframe by the column 'x'\n- Take the first 50 rows for plotting (discard the remaining)\n- Provide a title\n- Label x and y axes",
"_____no_output_____"
]
],
[
[
"# modify this cell\n\ndef line_plot(df, x, y):\n ### BEGIN SOLUTION\n\n pass\n \n ### END SOLUTION",
"_____no_output_____"
],
[
"# DO NOT MODIFY\n\n# your function should give a plot similar to the following:\nline_plot(df, 'potential', 'overall_rating')",
"_____no_output_____"
]
],
[
[
"Your solution to Exercise 1 should look like this:\n",
"_____no_output_____"
],
[
"<p style=\"font-family: Arial; font-size:2.75vw;color:purple; font-style:bold\"><br>\n\nExercise 2: Histogram <br><br></p>\n\n\nIn the cell below, modify the function to plot a histogram. The function should take an input parameter X which is a column name of the dataframe df, also passed to the function. Be sure to drop NULL values before you plot the histogram.",
"_____no_output_____"
]
],
[
[
"# modify this cell\n\ndef plot_histogram(df, X):\n ### BEGIN SOLUTION\n\n \n ### END SOLUTION",
"_____no_output_____"
],
[
"# DO NOT MODIFY\n\n# your plot should look similar to the following:\nplot_histogram(df, 'overall_rating')",
"_____no_output_____"
]
],
[
[
"Your solution for Exercise 2 should look like this:\n",
"_____no_output_____"
],
[
"<p style=\"font-family: Arial; font-size:2.75vw;color:purple; font-style:bold\"><br>\n\nExercise 3: Scatter Plot<br><br></p>\n\nIn the cell below, modify the function to plot...",
"_____no_output_____"
]
],
[
[
"# modify this cell\n\ndef plot_scatter(df, x, y):\n ### BEGIN SOLUTION\n\n \n \n \n \n ### END SOLUTION",
"_____no_output_____"
],
[
"# DO NOT MODIFY\n\n# your plot should look similar to the following:\nplot_scatter(df, 'gk_diving', 'gk_handling')",
"_____no_output_____"
]
],
[
[
"Your solution to Excercise 3 should look like this:\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
4a125f915dcc44cd8d0c1f94393786d67d02e162
| 3,457 |
ipynb
|
Jupyter Notebook
|
chapter-1/section4.ipynb
|
Enidsky/latex-cookbook
|
69b591e4712fcac0955431587dce23663f2ab836
|
[
"MIT"
] | 1 |
2022-03-31T03:16:55.000Z
|
2022-03-31T03:16:55.000Z
|
chapter-1/section4.ipynb
|
liudmmmmm/latex-cookbook
|
69b591e4712fcac0955431587dce23663f2ab836
|
[
"MIT"
] | null | null | null |
chapter-1/section4.ipynb
|
liudmmmmm/latex-cookbook
|
69b591e4712fcac0955431587dce23663f2ab836
|
[
"MIT"
] | null | null | null | 36.776596 | 287 | 0.653457 |
[
[
[
"## 1.4 LaTeX问答社区\n\n在LaTeX刚被推出的那个年代,使用手册、教程、帮助文档等远没有今天这么丰富,获取资源的渠道也没有今天这么便捷和多元化。在互联网技术高度发达的今天,我们能通过一个浏览器访问到各种相关学习素材,遇到代码报错,也能在一些专业问答社区找到最佳解决方案。毫无疑问,对于今天的我们来说,如何利用好互联网问答社区是熟练掌握一门计算机程序语言的重要手段。\n\n### 1.4.1 问答社区的介绍\n\n对于从事计算机程序语言相关的技术人员来说,专业的技术问答社区往往是不可多得的资源,它能帮助技术人员提升个人编程能力、学习与掌握新技术,并解决一些在实际工作中遇到的代码报错等问题。Stack Exchange是一个著名的计算机程序语言技术问答社区,涵盖大量计算机程序语言相关的技术帖子以及优质回答。\n\nStack Exchange技术问答社区按计算机程序语言类型进行划分,我们所关心的LaTeX相关的技术问题通常被分配在TeX Stack Exchange社区(网址为[https://tex.stackexchange.com/](https://tex.stackexchange.com/))。截至目前,TeX Stack Exchange涉及到的问题与帖子包括TeX、LaTeX以及其他排版系统,其中多数与LaTeX相关,该问答社区支持内容搜索,根据需要,可在首页显示当前热门问题、当月高频访问问题等。\n\n> 网址为[https://stackexchange.com/](https://stackexchange.com/),它与Stack Overflow问答社区一样在全球范围内拥有广泛的用户群体。\n\n<p align=\"center\">\n<img align=\"middle\" src=\"graphics/tex_stackexchange_webpage.png\" width=\"900\" />\n</p>\n\n<center><b>图1-4-1</b> https://tex.stackexchange.com/首页,该问答社区支持内容搜索,我们也可以根据需要在首页显示当前热门问题、当月高频访问问题等。</center>\n\n除Stack Exchange这种涵盖了多种计算机程序语言的技术问答社区外,LaTeX forum社区([https://latex.org/forum/](https://latex.org/forum/))是一个专门面向LaTeX的技术交流平台,它拥有活跃的用户群体与丰富的问答资源,该平台上有超过10万篇分门别类的技术帖子,我们可根据浏览量从该平台上一览高频访问问题,如图1-6所示,涉及“图表”(Graphics, Figures & Tables)的帖子已超过15000篇,涉及文本排版(text formatting)的帖子已超过10000篇。\n\n<p align=\"center\">\n<img align=\"middle\" src=\"graphics/latex_forum_webpage.png\" width=\"900\" />\n</p>\n\n<center><b>图1-4-2</b> https://latex.org/forum/首页,该问答社区已经将帖子按照话题进行分门别类,我们可以看到:涉及“图表”(Graphics, Figures & Tables)的帖子已超过15000篇,涉及文本排版(text formatting)的帖子已超过10000篇。</center>\n\n实际上,不管是LaTeX初学者还是高级用户,在遇到LaTeX使用问题时,去问答社区寻找解决方案都是一种非常有效的方式。TeX Stack Exchange社区的用户非常活跃,每天都会有大量关于LaTeX的问题和回答,且每个问题下面的回答都会根据用户的认可度进行排序。",
"_____no_output_____"
],
[
"### 1.4.2 高频访问问题\n\n顾名思义,高频访问问题是指访问量较高的问题。LaTeX forum社区已将问答帖子进行分类,针对某一特定话题,展开内容即可看到各类问题的访问情况。\n\n<p align=\"center\">\n<img align=\"middle\" src=\"graphics/latex_forum_math_science.png\" width=\"750\" />\n</p>\n\n<center><b>图1-4-3</b> LaTeX forum社区中涉及“数学和科学”(math & science)话题的帖子,这里已经按照访问量对问答帖子进行了排序,图片来自于https://latex.org/forum/。</center>\n",
"_____no_output_____"
],
[
"【回放】[**1.3 应运而生的在线系统**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-1/section3.ipynb)\n\n【继续】[**1.5 关于LaTeX的开源项目**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-1/section5.ipynb)",
"_____no_output_____"
],
[
"### License\n\n<div class=\"alert alert-block alert-danger\">\n<b>This work is released under the MIT license.</b>\n</div>",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a1262175785ebe22b25d739562fd54f45a0fe90
| 182,022 |
ipynb
|
Jupyter Notebook
|
Analysis_and_Modeling/Feature Selection with Suppression Flags.ipynb
|
georgetown-analytics/Medicare-Fraud
|
d158667701c9525ace71818399c2f6b49e419f3d
|
[
"MIT"
] | null | null | null |
Analysis_and_Modeling/Feature Selection with Suppression Flags.ipynb
|
georgetown-analytics/Medicare-Fraud
|
d158667701c9525ace71818399c2f6b49e419f3d
|
[
"MIT"
] | null | null | null |
Analysis_and_Modeling/Feature Selection with Suppression Flags.ipynb
|
georgetown-analytics/Medicare-Fraud
|
d158667701c9525ace71818399c2f6b49e419f3d
|
[
"MIT"
] | 2 |
2020-02-18T04:31:11.000Z
|
2020-04-14T20:02:04.000Z
| 275.373676 | 155,276 | 0.900699 |
[
[
[
"%matplotlib inline\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.linear_model import Ridge, Lasso, ElasticNet\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA",
"_____no_output_____"
],
[
"#Reading in data with the suppression flags\ndf_suppress = pd.read_csv('Data/Final_Suppress_Features.txt', sep='\\t')\ndf_suppress.head()",
"_____no_output_____"
],
[
"df_suppress.columns",
"_____no_output_____"
],
[
"#Dropping all columns that are not possible features or the outcome or which are categorical\ndf_suppress_features = df_suppress.drop(columns=['npi', 'EXCLYear', 'REINYear', 'excl_type', 'specialty_description', 'state', 'nppes_credentials'])\ndf_suppress_features.shape",
"_____no_output_____"
]
],
[
[
"## Feature Selection\n\nThere is the possibility of 40 features. Most likely all of these features are not strong predictors of the outcome (exclusion from Medicare) so will most likely want to filter these features down before running our final model. Will start looking at all together by using Radviz.",
"_____no_output_____"
]
],
[
[
"#Selecting target column separate from features\nfeatures_2 = df_suppress_features.drop(columns = ['exclusion_flag']).columns\nX = df_suppress_features[features_2].values\ny = df_suppress_features['exclusion_flag'].values",
"_____no_output_____"
],
[
"from yellowbrick.features import RadViz\n# Specify the target classes\nclasses = ['0', '1']\n\n# Instantiate the visualizer\nvisualizer = RadViz(classes=classes, features=features_2, size=(1080, 720))\n\nvisualizer.fit(X, y) # Fit the data to the visualizer\nvisualizer.transform(X) # Transform the data\nvisualizer.show() # Draw the data",
"_____no_output_____"
]
],
[
[
"## Regularization\n\nLooks like too much noise is included so will look at Regularization techniques to look at the overall importance of these features in a couple different ways",
"_____no_output_____"
]
],
[
[
"features_3 = df_suppress_features[features_2]\nlabels = df_suppress_features['exclusion_flag']",
"_____no_output_____"
],
[
"#Using Lasso Regularization\nmodel = Lasso(tol = 0.001)\nmodel.fit(features_3, labels)\nprint(list(zip(features_3, model.coef_.tolist())))",
"[('nppes_provider_gender', -0.0), ('medicare_prvdr_enroll_status', -0.0), ('total_claim_count', 1.9571660691424238e-05), ('total_30_day_fill_count', -4.28092932305434e-05), ('total_drug_cost', 1.0973079461181432e-08), ('total_day_supply', 1.0791228107031819e-06), ('bene_count', -7.124602526342238e-05), ('ge65_suppress_flag', -0.0), ('bene_count_ge65_suppress_flag', -0.0), ('brand_suppress_flag', 0.0), ('generic_suppress_flag', -0.0), ('other_suppress_flag', 0.0), ('mapd_suppress_flag', 0.0), ('pdp_suppress_flag', 0.0), ('lis_suppress_flag', 0.0), ('nonlis_suppress_flag', -0.0), ('opioid_claim_count', -0.0), ('opioid_drug_cost', -9.96603020742136e-07), ('opioid_day_supply', 6.376551454140049e-06), ('opioid_bene_count', -0.0), ('opioid_prescriber_rate', -0.0), ('la_opioid_claim_count', -0.0), ('la_opioid_drug_cost', 2.7465610062733103e-06), ('la_opioid_day_supply', -1.607067808143407e-05), ('la_opioid_bene_count', 0.0), ('la_opioid_prescriber_rate', 0.0), ('antibiotic_claim_count', -0.0), ('antibiotic_drug_cost', -1.1175296715457811e-07), ('antibiotic_bene_count', -0.0), ('antipsych_ge65_suppress_flag', 0.0), ('antipsych_bene_ge65_suppress_flg', 0.0), ('average_age_of_beneficiaries', -0.0), ('beneficiary_average_risk_score', -0.0), ('total_30_day_per_claim', 0.0), ('drug_cost_per_claim', -0.0), ('day_supply_per_claim', 0.0), ('female_count', -0.0), ('male_count', -0.0), ('nondual_count', -0.0), ('dual_count', -0.0)]\n"
],
[
"#Applying Ridge Regression\nmodel = Ridge()\nmodel.fit(features_3, labels)\nprint(list(zip(features_3, model.coef_.tolist())))",
"[('nppes_provider_gender', -0.09504375553125544), ('medicare_prvdr_enroll_status', -0.2206119247911582), ('total_claim_count', 5.392630433836534e-06), ('total_30_day_fill_count', -1.006660053498885e-05), ('total_drug_cost', 1.2342518978116272e-08), ('total_day_supply', 1.8404504430710488e-07), ('bene_count', -8.216559412999424e-05), ('ge65_suppress_flag', 0.0025640495995891523), ('bene_count_ge65_suppress_flag', -0.0010608695274298933), ('brand_suppress_flag', 0.019206971394619885), ('generic_suppress_flag', -0.005641708398922497), ('other_suppress_flag', -0.025689246196357114), ('mapd_suppress_flag', 0.004857157733346224), ('pdp_suppress_flag', 0.012956309962707534), ('lis_suppress_flag', 0.0038467332916022846), ('nonlis_suppress_flag', -0.0060942780088399295), ('opioid_claim_count', 0.00029073448234533166), ('opioid_drug_cost', -6.067253316469032e-07), ('opioid_day_supply', -2.4839176242396434e-06), ('opioid_bene_count', -0.0005210558211937608), ('opioid_prescriber_rate', 0.00048108267658748246), ('la_opioid_claim_count', -0.0016342736356690962), ('la_opioid_drug_cost', 2.715015855211453e-06), ('la_opioid_day_supply', 9.816352484341335e-06), ('la_opioid_bene_count', 0.003838890878720341), ('la_opioid_prescriber_rate', 0.00043596669317629363), ('antibiotic_claim_count', -3.949783815058285e-05), ('antibiotic_drug_cost', -1.1685185317956072e-07), ('antibiotic_bene_count', 0.00027683547334384314), ('antipsych_ge65_suppress_flag', -0.019634000104942134), ('antipsych_bene_ge65_suppress_flg', 0.04021839116431636), ('average_age_of_beneficiaries', -0.004372728956895663), ('beneficiary_average_risk_score', -0.01134827350913087), ('total_30_day_per_claim', -0.2943231600431238), ('drug_cost_per_claim', -2.4038310797778358e-05), ('day_supply_per_claim', 0.008050495785303506), ('female_count', 0.00796469523918947), ('male_count', 0.007964695239189627), ('nondual_count', 0.013282043274802202), ('dual_count', 0.013282043274802688)]\n"
],
[
"#Applying ElasticNet\nmodel = ElasticNet(tol = 0.1)\nmodel.fit(features_3, labels)\nprint(list(zip(features_3, model.coef_.tolist())))",
"[('nppes_provider_gender', -0.0), ('medicare_prvdr_enroll_status', -0.0), ('total_claim_count', 2.1782212767673396e-05), ('total_30_day_fill_count', -4.863631113322388e-05), ('total_drug_cost', 1.629768200877684e-08), ('total_day_supply', 1.2613661542635663e-06), ('bene_count', -0.00010540398970016658), ('ge65_suppress_flag', -0.0), ('bene_count_ge65_suppress_flag', -0.0), ('brand_suppress_flag', 0.0), ('generic_suppress_flag', -0.0), ('other_suppress_flag', 0.0), ('mapd_suppress_flag', 0.0), ('pdp_suppress_flag', 0.0), ('lis_suppress_flag', 0.0), ('nonlis_suppress_flag', -0.0), ('opioid_claim_count', -0.0), ('opioid_drug_cost', -1.0253737345989753e-06), ('opioid_day_supply', 6.588617333231134e-06), ('opioid_bene_count', 0.0), ('opioid_prescriber_rate', -0.0), ('la_opioid_claim_count', -0.0), ('la_opioid_drug_cost', 2.810029314172946e-06), ('la_opioid_day_supply', -1.689281241103598e-05), ('la_opioid_bene_count', 0.0), ('la_opioid_prescriber_rate', 0.0), ('antibiotic_claim_count', -0.0), ('antibiotic_drug_cost', -9.492082639013911e-08), ('antibiotic_bene_count', -0.0), ('antipsych_ge65_suppress_flag', 0.0), ('antipsych_bene_ge65_suppress_flg', 0.0), ('average_age_of_beneficiaries', -0.0), ('beneficiary_average_risk_score', -0.0), ('total_30_day_per_claim', 0.0), ('drug_cost_per_claim', -6.992268509520107e-06), ('day_supply_per_claim', 0.0), ('female_count', -0.0), ('male_count', -0.0), ('nondual_count', -0.0), ('dual_count', -0.0)]\n"
]
],
[
[
"## Transformer Methods",
"_____no_output_____"
]
],
[
[
"model = Lasso(tol=.001)\nsfm = SelectFromModel(model)\nsfm.fit(features_3, labels)\nprint(list(features_3.iloc[:, sfm.get_support(indices=True)]))",
"['total_claim_count', 'total_30_day_fill_count', 'bene_count', 'la_opioid_day_supply']\n"
],
[
"model = Ridge()\nsfm = SelectFromModel(model)\nsfm.fit(features_3, labels)\nprint(list(features_3.iloc[:, sfm.get_support(indices=True)]))",
"['nppes_provider_gender', 'medicare_prvdr_enroll_status', 'other_suppress_flag', 'antipsych_bene_ge65_suppress_flg', 'total_30_day_per_claim']\n"
],
[
"model = ElasticNet(tol=.1)\nsfm = SelectFromModel(model)\nsfm.fit(features_3, labels)\nprint(list(features_3.iloc[:, sfm.get_support(indices=True)]))",
"['total_claim_count', 'total_30_day_fill_count', 'bene_count', 'opioid_day_supply', 'la_opioid_day_supply', 'drug_cost_per_claim']\n"
]
],
[
[
"## Correlation Matrix\n\nTo ensure we don't miss any strongly correlated features with our outcome, will also include any features with a correlations with our outcome that is greater than 0.1",
"_____no_output_____"
]
],
[
[
"#Looking at correlations with outcome variable using spearman\ncorrelations_spearman = df_suppress_features.corr(method='spearman')\ncorr_spear = correlations_spearman['exclusion_flag']\ncorr_spear.sort_values()",
"_____no_output_____"
],
[
"#Selecting the features from all three of the transformers, the categorical features (as seen in previous visualizations)\n#there seem to be some correlations within them), any additional features with a spearman correlation > absolute value \n#of 0.1 and features flagged in earlier analysis where they may be differences when looking at their distributions\nkeep = ['specialty_description', 'state', 'nppes_credentials', 'nppes_provider_gender', \n 'medicare_prvdr_enroll_status', 'other_suppress_flag', 'antipsych_bene_ge65_suppress_flg', \n 'total_30_day_per_claim', 'total_claim_count', 'total_30_day_fill_count', 'bene_count', 'opioid_day_supply', \n 'la_opioid_day_supply', 'drug_cost_per_claim', 'average_age_of_beneficiaries', 'day_supply_per_claim', 'exclusion_flag']",
"_____no_output_____"
],
[
"#Saving as new dataset\nModel_suppress = df_suppress[keep]\nModel_suppress.shape",
"_____no_output_____"
],
[
"Model_suppress.to_csv('Data/Model_suppress.txt', sep='\\t', index=False)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.