hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a821b21fa167b74685e574ce4b6e85cac49c95f
| 416,397 |
ipynb
|
Jupyter Notebook
|
didi_dataset/didi_dataset.ipynb
|
ojInc/google-research
|
9929c88b664800a25b8716c22068dd77d80bd5ee
|
[
"Apache-2.0"
] | 23,901 |
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
didi_dataset/didi_dataset.ipynb
|
MitchellTesla/google-research
|
393e60a28e676992af1e7cb4f93e5c2d4e0cf517
|
[
"Apache-2.0"
] | 891 |
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
didi_dataset/didi_dataset.ipynb
|
MitchellTesla/google-research
|
393e60a28e676992af1e7cb4f93e5c2d4e0cf517
|
[
"Apache-2.0"
] | 6,047 |
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
| 564.989145 | 29,382 | 0.925477 |
[
[
[
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"This colab notebook demonstrates how to read and visualize the data in the Didi dataset: Digital Ink Diagram data.\n\nMore information about this data is available at\n* https://github.com/google-research/google-research/tree/master/didi_dataset\n* [The Didi dataset: Digital Ink Diagram data](https://arxiv.org/abs/2002.09303). P. Gervais, T. Deselaers, E. Aksan, O. Hilliges, 2020.\n\nThe colab demonstrates how to:\n\n1. display the data along with the prompt images.\n1. convert the data to a sharded `TFRecord` file of `TFExample`s.",
"_____no_output_____"
]
],
[
[
"from __future__ import division\n\nimport collections\nimport contextlib\nimport io\nimport json\nimport os\nimport random\nimport statistics\n\nfrom googleapiclient.discovery import build\nfrom google.colab import auth\nfrom google.colab import files\nfrom googleapiclient.http import MediaIoBaseDownload\nfrom apiclient import errors\n\n%tensorflow_version 2.x\nimport tensorflow as tf\n\nimport numpy as np\nfrom matplotlib import pylab\nfrom IPython.display import Image, display",
"_____no_output_____"
],
[
"# Setup and settings.\n\n# Settings\nJSON_FILES=[\"diagrams_wo_text_20200131.ndjson\", \"diagrams_20200131.ndjson\"]\nPROJECT_ID = \"digital-ink-diagram-data\"\nBUCKET_NAME = \"digital_ink_diagram_data\"\nLOCAL_DATA_DIR = \"/tmp\"\nNUM_TFRECORD_SHARDS = 1\n\nauth.authenticate_user()\n\n# Creating the service client.\ngcs_service = build(\"storage\", \"v1\")",
"_____no_output_____"
],
[
"# Download the data\ndef download_file_from_gcs(filename):\n directory_name = os.path.join(LOCAL_DATA_DIR, os.path.dirname(filename))\n if not os.path.exists(directory_name):\n os.mkdir(directory_name)\n with open(os.path.join(LOCAL_DATA_DIR, filename), \"wb\") as f:\n request = gcs_service.objects().get_media(bucket=BUCKET_NAME, object=filename)\n media = MediaIoBaseDownload(f, request)\n\n done = False\n while not done:\n status, done = media.next_chunk()\n if not done:\n print(\"Downloading '%s': %-3.0f%%\" % (filename, status.progress() * 100))\n\ndef get_label_file(type, labelid):\n file_id = os.path.join(type, \"%s.%s\" % (labelid, type))\n fname = os.path.join(LOCAL_DATA_DIR, file_id)\n if os.path.exists(fname):\n return fname\n download_file_from_gcs(file_id)\n return fname\n\nfor json_file in JSON_FILES:\n download_file_from_gcs(json_file)",
"Downloading 'diagrams_wo_text_20200131.ndjson': 10 %\nDownloading 'diagrams_wo_text_20200131.ndjson': 20 %\nDownloading 'diagrams_wo_text_20200131.ndjson': 30 %\nDownloading 'diagrams_wo_text_20200131.ndjson': 40 %\nDownloading 'diagrams_wo_text_20200131.ndjson': 50 %\nDownloading 'diagrams_wo_text_20200131.ndjson': 60 %\nDownloading 'diagrams_wo_text_20200131.ndjson': 71 %\nDownloading 'diagrams_wo_text_20200131.ndjson': 81 %\nDownloading 'diagrams_wo_text_20200131.ndjson': 91 %\nDownloading 'diagrams_20200131.ndjson': 9 %\nDownloading 'diagrams_20200131.ndjson': 17 %\nDownloading 'diagrams_20200131.ndjson': 26 %\nDownloading 'diagrams_20200131.ndjson': 34 %\nDownloading 'diagrams_20200131.ndjson': 43 %\nDownloading 'diagrams_20200131.ndjson': 51 %\nDownloading 'diagrams_20200131.ndjson': 60 %\nDownloading 'diagrams_20200131.ndjson': 69 %\nDownloading 'diagrams_20200131.ndjson': 77 %\nDownloading 'diagrams_20200131.ndjson': 86 %\nDownloading 'diagrams_20200131.ndjson': 94 %\n"
],
[
"# Displays prompt images with drawing overlaid.\ndef PrepareDrawing():\n pylab.clf()\n pylab.axes().set_aspect(\"equal\")\n pylab.gca().yaxis.set_visible(False)\n pylab.gca().xaxis.set_visible(False)\n\ndef display_image(ink):\n im = pylab.imread(os.path.join(LOCAL_DATA_DIR, \"png\", ink[\"label_id\"] + \".png\"))\n # Compute scaling of the image.\n guide_width = ink[\"writing_guide\"][\"width\"]\n guide_height = ink[\"writing_guide\"][\"height\"]\n im_height, im_width, _ = im.shape\n scale=min(guide_width / im_width, guide_height / im_height)\n offset_x = (guide_width - scale * im_width) / 2\n offset_y = (guide_height - scale * im_height) / 2\n pylab.imshow(im, origin=\"upper\",\n extent=(offset_x, offset_x + scale * im_width,\n offset_y + scale * im_height, offset_y),\n aspect=\"equal\")\n\ndef display_strokes(ink):\n for s in ink[\"drawing\"]:\n pylab.plot(s[0], [y for y in s[1]], color=\"red\")\n\ndef display_ink(ink):\n # Fetch the corresponding PNG image.\n get_label_file(\"png\", ink[\"label_id\"])\n # Draw image, overlay strokes.\n PrepareDrawing()\n display_image(ink)\n display_strokes(ink)\n pylab.show()\n\nfor json_file in JSON_FILES:\n count = 0\n with open(os.path.join(LOCAL_DATA_DIR, json_file)) as f:\n for line in f:\n ink = json.loads(line)\n display_ink(ink)\n\n count += 1\n if count == 10:\n break",
"_____no_output_____"
],
[
"# This cell converts the file to tf.Record of tf.Example.\n# This cell takes long time to run.\n\ndef get_label_file_contents(type, labelid):\n get_label_file(type, labelid)\n with open(os.path.join(LOCAL_DATA_DIR, type, \"%s.%s\" %(labelid, type))) as f:\n return f.read()\n\ndef ink_to_tfexample(ink, dot=None):\n \"\"\"Takes a LabeledInk and outputs a TF.Example with stroke information.\n\n Args:\n ink: A JSON array containing the drawing information.\n dot: (Optional) textual content of the GrahViz dotfile that was used to\n generate the prompt image.\n\n Returns:\n a Tensorflow Example proto with the drawing data.\n \"\"\"\n features = {}\n features[\"key\"] = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[ink[\"key\"].encode(\"utf-8\")]))\n features[\"label_id\"] = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[ink[\"label_id\"].encode(\"utf-8\")]))\n if dot:\n features[\"label_dot\"] = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[dot.encode(\"utf-8\")]))\n\n max_len = np.array([len(stroke[0]) for stroke in ink[\"drawing\"]]).max()\n\n strokes = []\n stroke_lengths = []\n for stroke in ink[\"drawing\"]:\n stroke_len = len(stroke[0])\n padded_stroke_with_pen = np.zeros([1, max_len, 4], dtype=np.float32)\n padded_stroke_with_pen[0, 0:stroke_len, 0] = stroke[0]\n padded_stroke_with_pen[0, 0:stroke_len, 1] = stroke[1]\n padded_stroke_with_pen[0, 0:stroke_len, 2] = stroke[2]\n padded_stroke_with_pen[0, stroke_len - 1, 3] = 1\n strokes.append(padded_stroke_with_pen)\n stroke_lengths.append(stroke_len)\n\n all_strokes = np.concatenate(strokes, axis=0).astype(float) # (num_strokes, max_len, 4)\n all_stroke_lengths = np.array(stroke_lengths).astype(int)\n\n features[\"ink\"] = tf.train.Feature(\n float_list=tf.train.FloatList(value=all_strokes.flatten()))\n features[\"stroke_length\"] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=all_stroke_lengths))\n features[\"shape\"] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=all_strokes.shape))\n features[\"num_strokes\"] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[len(ink[\"drawing\"])]))\n example = tf.train.Example(features=tf.train.Features(feature=features))\n return example\n\[email protected]\ndef create_tfrecord_writers(output_file, num_output_shards):\n writers = collections.defaultdict(list)\n for split in [\"train\", \"valid\", \"test\"]:\n for i in range(num_output_shards):\n writers[split].append(\n tf.io.TFRecordWriter(\"%s-%s-%05i-of-%05i\" %\n (output_file, split, i, num_output_shards)))\n try:\n yield writers\n finally:\n for split in [\"train\", \"valid\", \"test\"]:\n for w in writers[split]:\n w.close()\n\ndef pick_output_shard(num_shards):\n return random.randint(0, num_shards - 1)\n\ndef size_normalization(drawing):\n def get_bounding_box(drawing):\n minx = 99999\n miny = 99999\n maxx = 0\n maxy = 0\n\n for s in drawing:\n minx = min(minx, min(s[0]))\n maxx = max(maxx, max(s[0]))\n miny = min(miny, min(s[1]))\n maxy = max(maxy, max(s[1]))\n return (minx, miny, maxx, maxy)\n\n bb = get_bounding_box(drawing)\n width, height = bb[2] - bb[0], bb[3] - bb[1]\n offset_x, offset_y = bb[0], bb[1]\n if height < 1e-6:\n height = 1\n\n size_normalized_drawing = [[[(x - offset_x) / height for x in stroke[0]],\n [(y - offset_y) / height for y in stroke[1]],\n [t for t in stroke[2]]]\n for stroke in drawing]\n\n return size_normalized_drawing\n\ndef resample_ink(drawing, timestep):\n def resample_stroke(stroke, timestep):\n def interpolate(t, t_prev, t_next, v0, v1):\n d0 = abs(t-t_prev)\n d1 = abs(t-t_next)\n dist_sum = d0 + d1\n d0 /= dist_sum\n d1 /= dist_sum\n return d1 * v0 + d0 * v1\n\n x,y,t = stroke\n if len(t) < 3:\n return stroke\n r_x, r_y, r_t = [x[0]], [y[0]], [t[0]]\n final_time = t[-1]\n stroke_time = final_time - t[0]\n necessary_steps = int(stroke_time / timestep)\n\n i = 1\n current_time = t[i]\n while current_time < final_time:\n current_time += timestep\n while i < len(t) - 1 and current_time > t[i]:\n i += 1\n r_x.append(interpolate(current_time, t[i-1], t[i], x[i-1], x[i]))\n r_y.append(interpolate(current_time, t[i-1], t[i], y[i-1], y[i]))\n r_t.append(interpolate(current_time, t[i-1], t[i], t[i-1], t[i]))\n return [r_x, r_y, r_t]\n\n resampled = [resample_stroke(s, timestep) for s in drawing]\n return resampled\n\nfor json_file in JSON_FILES:\n counts = collections.defaultdict(int)\n with create_tfrecord_writers(os.path.join(LOCAL_DATA_DIR, json_file + \".tfrecord\"), NUM_TFRECORD_SHARDS) as writers:\n with open(os.path.join(LOCAL_DATA_DIR, json_file)) as f:\n for line in f:\n ink = json.loads(line)\n dot = get_label_file_contents(\"dot\", ink[\"label_id\"])\n ink[\"drawing\"] = size_normalization(ink[\"drawing\"])\n ink[\"drawing\"] = resample_ink(ink[\"drawing\"], 20)\n\n example = ink_to_tfexample(ink, dot)\n counts[ink[\"split\"]] += 1\n writers[ink[\"split\"]][pick_output_shard(NUM_TFRECORD_SHARDS)].write(example.SerializeToString())\n\n print (\"Finished writing: %s train: %i valid: %i test: %i\" %(json_file, counts[\"train\"], counts[\"valid\"], counts[\"test\"]))",
"Finished writing: diagrams_wo_text_20200131.ndjson train: 27278 valid: 4545 test: 4545\nFinished writing: diagrams_20200131.ndjson train: 16717 valid: 2785 test: 2785\n"
],
[
"# Download the TFRecord files to local machine (or use the filemanager on the left).\nfor json_file in JSON_FILES:\n for split in [\"train\", \"valid\", \"test\"]:\n for i in range(NUM_TFRECORD_SHARDS):\n filename = os.path.join(LOCAL_DATA_DIR, json_file + \".tfrecord-%s-%05i-of-%05i\" % (split, i, NUM_TFRECORD_SHARDS))\n print(filename)\n files.download(filename)",
"_____no_output_____"
],
[
"stats = {}\n\n# Compute some dataset statistics\ndef count_points_strokes(ink):\n return sum([len(stroke[0]) for stroke in ink]), len(ink)\n\n# Collect data to compute statistics\nfor json_file in JSON_FILES:\n stats[json_file] = collections.defaultdict(list)\n with open(os.path.join(LOCAL_DATA_DIR, json_file)) as f:\n for line in f:\n ink = json.loads(line)\n points, strokes = count_points_strokes(ink[\"drawing\"])\n stats[json_file][\"points\"].append(points)\n stats[json_file][\"strokes\"].append(strokes)\n stats[json_file][\"labels\"].append(ink[\"label_id\"])\n\n print (json_file)\n for i in [\"points\", \"strokes\"]:\n print (i, min(stats[json_file][i]), max(stats[json_file][i]), statistics.median(stats[json_file][i]))\n\n for i in [\"labels\"]:\n labels, counts = np.unique(stats[json_file][i], return_counts=True)\n print (i, len(labels), min(counts), max(counts), statistics.median(counts))\n print()",
"diagrams_wo_text_20200131.ndjson\npoints 2 17980 1068.0\nstrokes 1 71 9.0\nlabels 940 1 169 36.0\n\ndiagrams_20200131.ndjson\npoints 2 7268 2295\nstrokes 1 161 31\nlabels 5629 1 27 3\n\n"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a821d577e041a658e4f8aea0f703d93905f9aed
| 181,209 |
ipynb
|
Jupyter Notebook
|
rebound/ipython_examples/PoincareMap.ipynb
|
rodluger/ttv-devil
|
e534e4f3cd75db951cba54441f7a5458c87e0cf9
|
[
"MIT"
] | null | null | null |
rebound/ipython_examples/PoincareMap.ipynb
|
rodluger/ttv-devil
|
e534e4f3cd75db951cba54441f7a5458c87e0cf9
|
[
"MIT"
] | null | null | null |
rebound/ipython_examples/PoincareMap.ipynb
|
rodluger/ttv-devil
|
e534e4f3cd75db951cba54441f7a5458c87e0cf9
|
[
"MIT"
] | null | null | null | 534.539823 | 171,250 | 0.9346 |
[
[
[
"# Poincare Map\nThis example shows how to calculate a simple Poincare Map with REBOUND. A Poincare Map (or sometimes calles Poincare Section) can be helpful to understand dynamical systems.",
"_____no_output_____"
]
],
[
[
"import rebound\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"We first create the initial conditions for our map. The most interesting Poincare maps exist near resonance, so we have to find a system near a resonance. The easiest way to get planets into resonance is migration. So that's what we'll do. Initially we setup a simulation in which the planets are placed just outside the 2:1 mean motion resonance.",
"_____no_output_____"
]
],
[
[
"sim = rebound.Simulation()\nsim.add(m=1.)\nsim.add(m=1e-3,a=1,e=0.001)\nsim.add(m=0.,a=1.65)\nsim.move_to_com()",
"_____no_output_____"
]
],
[
[
"We then define a simple migration force that will act on the outer planet. We implement it in python. This is relatively slow, but we only need to migrate the planet for a short time.",
"_____no_output_____"
]
],
[
[
"def migrationForce(reb_sim):\n tau = 40000.\n ps[2].ax -= ps[2].vx/tau\n ps[2].ay -= ps[2].vy/tau\n ps[2].az -= ps[2].vz/tau",
"_____no_output_____"
]
],
[
[
"Next, we link the additional migration forces to our REBOUND simulation and get the pointer to the particle array.",
"_____no_output_____"
]
],
[
[
"sim.additional_forces = migrationForce\nps = sim.particles",
"_____no_output_____"
]
],
[
[
"Then, we just integrate the system for 3000 time units, about 500 years in units where $G=1$.",
"_____no_output_____"
]
],
[
[
"sim.integrate(3000.)",
"_____no_output_____"
]
],
[
[
"Then we save the simulation to a binary file. We'll be reusing it a lot later to create the initial conditions and it is faster to load it from file than to migrate the planets into resonance each time. ",
"_____no_output_____"
]
],
[
[
"sim.save(\"resonant_system.bin\") ",
"_____no_output_____"
]
],
[
[
"To create the poincare map, we first define which hyper surface we want to look at. Here, we choose the pericenter of the outer planet.",
"_____no_output_____"
]
],
[
[
"def hyper(sim):\n dp = sim.particles[2]-sim.particles[0]\n return dp.x*dp.vx + dp.y*dp.vy",
"_____no_output_____"
]
],
[
[
"We will also need a helper function that ensures our resonant angle is in the range $[-\\pi:\\pi]$.",
"_____no_output_____"
]
],
[
[
"def mod2pi(x):\n if x>np.pi:\n return mod2pi(x-2.*np.pi)\n if x<-np.pi:\n return mod2pi(x+2.*np.pi)\n return x",
"_____no_output_____"
]
],
[
[
"The following function generate the Poincare Map for one set of initial conditions. \nWe first load the resonant system from the binary file we created earlier. \nWe then randomly perturb the velocity of one of the particles. If we perturb the velocity enough, the planets will not be in resonant anymore.\nWe also initialize shadow particles to calculate the MEGNO, a fast chaos indicator.",
"_____no_output_____"
]
],
[
[
"def runone(args):\n i = args # integer numbering the run\n N_points_max = 2000 # maximum number of point in our Poincare Section\n N_points = 0\n poincare_map = np.zeros((N_points_max,2))\n \n # setting up simulation from binary file\n sim = rebound.Simulation.from_file(\"resonant_system.bin\")\n vx = 0.97+0.06*(float(i)/float(Nsim))\n sim.particles[2].vx *= vx\n sim.t = 0. # reset time to 0\n \n # Integrate simulation in small intervals\n # After each interval check if we crossed the \n # hypersurface. If so, bisect until we hit the \n # hypersurface exactly up to a precision\n # of dt_epsilon\n dt = 0.13\n dt_epsilon = 0.001\n sign = hyper(sim)\n while sim.t<15000. and N_points < N_points_max:\n oldt = sim.t\n olddt = sim.dt\n sim.integrate(oldt+dt)\n nsign = hyper(sim)\n if sign*nsign < 0.:\n # Hyper surface crossed.\n leftt = oldt\n rightt = sim.t\n sim.dt = -olddt\n while (rightt-leftt > dt_epsilon):\n # Bisection.\n midt = (leftt+rightt)/2.\n sim.integrate(midt)\n msign = hyper(sim)\n if msign*sign > 0.:\n leftt = midt\n sim.dt = 0.3*olddt\n else:\n rightt = midt\n sim.dt = -0.3*olddt\n # Hyper surface found up to precision of dt_epsilon.\n # Calculate orbital elements\n o = sim.calculate_orbits()\n # Check if we cross hypersurface in one direction or the other.\n if o[1].d<o[1].a:\n # Calculate resonant angle phi and its time derivative \n tp = np.pi*2.\n phi = mod2pi(o[0].l-2.*o[1].l+o[1].omega+o[1].Omega)\n phid = (tp/o[0].P-2.*tp/o[1].P)/(tp/o[0].P)\n # Store value for map\n poincare_map[N_points] = [phi,phid]\n N_points += 1\n sim.dt = olddt\n sim.integrate(oldt+dt)\n sign = nsign\n # Rerun to calculate Megno\n sim = rebound.Simulation.from_file(\"resonant_system.bin\")\n vx = 0.97+0.06*(float(i)/float(Nsim))\n sim.particles[2].vx *= vx\n sim.t = 0. # reset time to 0\n sim.init_megno() # adds variational particles and initialized MEGNO\n sim.integrate(15000.)\n return (poincare_map, sim.calculate_megno(),vx)",
"_____no_output_____"
]
],
[
[
"For this example we'll run 10 initial conditions. Some of them will be in resonance, some other won't be. We run them in parallel using the InterruptiblePool that comes with REBOUND.",
"_____no_output_____"
]
],
[
[
"Nsim = 10\npool = rebound.InterruptiblePool()\nres = pool.map(runone,range(Nsim))",
"_____no_output_____"
]
],
[
[
"Now we can finally plot the Poincare Map. We color the points by the MEGNO value of the particular simulation. A value close to 2 corresponds to quasi-periodic orbits, a large value indicate chaotic motion.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline \nimport matplotlib.pyplot as plt\nfig = plt.figure(figsize=(14,8))\nax = plt.subplot(111)\nax.set_xlabel(\"$\\phi$\"); ax.set_ylabel(\"$\\dot{\\phi}$\")\nax.set_xlim([-np.pi,np.pi]); ax.set_ylim([-0.06,0.1])\ncm = plt.cm.get_cmap('brg')\nfor m, megno, vx in res:\n c = np.empty(len(m[:,0])); c.fill(megno)\n p = ax.scatter(m[:,0],m[:,1],marker=\".\",c=c, vmin=1.4, vmax=3, s=25,edgecolor='none', cmap=cm)\ncb = plt.colorbar(p, ax=ax)\ncb.set_label(\"MEGNO $<Y>$\")",
"_____no_output_____"
]
],
[
[
"The red orbits are periodic or quasi periodic, the green orbits are chaotic. ",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a8230717d917a8cd951e6307e64e76650680724
| 24,741 |
ipynb
|
Jupyter Notebook
|
2.1_first_module.ipynb
|
JC-S/chisel-bootcamp
|
a69baccbb1b7e578e705af73d169310b30042cfb
|
[
"BSD-3-Clause"
] | null | null | null |
2.1_first_module.ipynb
|
JC-S/chisel-bootcamp
|
a69baccbb1b7e578e705af73d169310b30042cfb
|
[
"BSD-3-Clause"
] | null | null | null |
2.1_first_module.ipynb
|
JC-S/chisel-bootcamp
|
a69baccbb1b7e578e705af73d169310b30042cfb
|
[
"BSD-3-Clause"
] | null | null | null | 36.871833 | 640 | 0.593185 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a8234844a6e3635fd90a0e29d549fdcc2a242f4
| 598,573 |
ipynb
|
Jupyter Notebook
|
NDVI_Stats_And_Mask.ipynb
|
Sebasmedd26/MasterOekoneu
|
dd9a5e371e7708a05df5a3b990f6b1ae24f41e22
|
[
"MIT"
] | null | null | null |
NDVI_Stats_And_Mask.ipynb
|
Sebasmedd26/MasterOekoneu
|
dd9a5e371e7708a05df5a3b990f6b1ae24f41e22
|
[
"MIT"
] | null | null | null |
NDVI_Stats_And_Mask.ipynb
|
Sebasmedd26/MasterOekoneu
|
dd9a5e371e7708a05df5a3b990f6b1ae24f41e22
|
[
"MIT"
] | null | null | null | 994.307309 | 231,188 | 0.944328 |
[
[
[
"#import libraries\n\nimport rasterio as rs\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport math\nfrom osgeo import gdal\nfrom rasterio.plot import show\nimport os\n\nprint('*********** Libraries were imported successfuly **********')\nprint('working directory: '+ str(os.getcwd()))",
"*********** Libraries were imported successfuly **********\nworking directory: C:\\Users\\s79631\\Documents\\Python_ökoneu\n"
],
[
"#load classification image\n\nprint('**************** Loading classification file *************')\ngdal.UseExceptions()\nimg_clas = rs.open ('20200928_sent_ökoneu_mask_etrs89.img')\nprint('**************** Image imported successfuly **************')\n\n## Print image data\nprint('**********************************************************')\nprint('*********************** Image data ***********************')\nprint('Number of bands: ' + str(img_clas.count))\nprint('Coordinate Reference System: ' + str(img_clas.crs))\nprint('Image width:`' + str(img_clas.width))\nprint('Image height:`' + str(img_clas.height))\nprint('Number of Pixels:`' + str(int(img_clas.height)*int(img_clas.width)))\nprint('**********************************************************')\n\n## create groups using mask values from ERDAS classification mask\n#grassland = [13,15,18,19,21,22,25,27,28,32,33] for 2015\ngrassland = [12,13,15,16,17,20,23,25,28,29,31,33]\n#tree_canopy = [2,4,7,9,11,12,16]\ntree_canopy = [3,5,6,7,8,9]\n\ntree_list = list()\ngrass_list = list()\n\n## get bands\nprint('************** extracting classification data ************')\nclas_values = img_clas.read(1)\n\nseeker_column = 0\nwhile seeker_column < img.width:\n seeker_row = 0\n while seeker_row < img.height:\n arr = clas_values[seeker_row]\n pos = (seeker_row,seeker_column)\n if arr[seeker_column] in grassland:\n grass_list.append(pos)\n if arr[seeker_column] in tree_canopy:\n tree_list.append(pos)\n seeker_row = seeker_row+1\n seeker_column = seeker_column+1\n\nprint('************ classification successfully loaded **********') \nprint('Grassland/agriculture values...................'+str(len(grass_list))) \nprint('Tree Canopy values.............................'+str(len(tree_list))) \nprint('***********************************************************') \n \nprint(grass_list[1])\n#print((clas_values[200]))\n#print(type(clas_values[1]))\n#x = clas_values[200] #x = classvalues [row]\n#print (x[1]) #value x[column]\n\n\n\n#for elementa in (clas_values [200]):\n #if elementa in grassland:\n #print(\"V\")\n #else:\n #print(\"F\")\n\nshow(img_clas)",
"**************** Loading classification file *************\n**************** Image imported successfuly **************\n**********************************************************\n*********************** Image data ***********************\nNumber of bands: 1\nCoordinate Reference System: PROJCS[\"unnamed\",GEOGCS[\"ETRS 1989\",DATUM[\"ETRS 1989\",SPHEROID[\"GRS 1980\",6378137,298.257222096042],TOWGS84[0,0,0,0,0,0,0]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]]],PROJECTION[\"Transverse_Mercator\"],PARAMETER[\"latitude_of_origin\",0],PARAMETER[\"central_meridian\",15],PARAMETER[\"scale_factor\",0.9996],PARAMETER[\"false_easting\",500000],PARAMETER[\"false_northing\",0],UNIT[\"meters\",1],AXIS[\"Easting\",EAST],AXIS[\"Northing\",NORTH]]\nImage width:`901\nImage height:`501\nNumber of Pixels:`451401\n**********************************************************\n************** extracting classification data ************\n************ classification successfully loaded **********\nGrassland/agriculture values...................156575\nTree Canopy values.............................71804\n***********************************************************\n(1, 0)\n"
],
[
"##Change directory to file folder\n##os.chdir('D:\\TEMP\\20200324_Sentinel2A')\n\n## open image\n\ngdal.UseExceptions()\nimg = rs.open ('20200102_Ökoneu_etrs89_ndvi.img')\n\nprint('**************** Image imported successfuly **************')\n\n## Print image data\nprint('**********************************************************')\nprint('*********************** Image data ***********************')\nprint('Number of bands: ' + str(img.count))\nprint('Coordinate Reference System: ' + str(img.crs))\nprint('Image width:`' + str(img.width))\nprint('Image height:`' + str(img.height))\nprint('Number of Pixels:`' + str(int(img.height)*int(img.width)))\nprint('**********************************************************')\nshow(img)\n\n## get bands\nIndex_Values = img.read(1)\nprint(len(Index_Values))\n\n## stats\nfrom scipy import stats\n#stats.describe (Index_Values) #activate just if needed",
"**************** Image imported successfuly **************\n**********************************************************\n*********************** Image data ***********************\nNumber of bands: 1\nCoordinate Reference System: PROJCS[\"unnamed\",GEOGCS[\"ETRS 1989\",DATUM[\"ETRS 1989\",SPHEROID[\"GRS 1980\",6378137,298.257222096042],TOWGS84[0,0,0,0,0,0,0]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]]],PROJECTION[\"Transverse_Mercator\"],PARAMETER[\"latitude_of_origin\",0],PARAMETER[\"central_meridian\",15],PARAMETER[\"scale_factor\",0.9996],PARAMETER[\"false_easting\",500000],PARAMETER[\"false_northing\",0],UNIT[\"meters\",1],AXIS[\"Easting\",EAST],AXIS[\"Northing\",NORTH]]\nImage width:`901\nImage height:`501\nNumber of Pixels:`451401\n**********************************************************\n"
],
[
"print('**********************************************************')\nprint('****************** Analysing values... *******************')\nprint('**********************************************************')\n\n## create classification conters and indexing lists \n\nvery_healthy = 0 # values between [0.7-1]\nvery_healthy_dic = list()\n\nhealthy = 0 # values between [0.55-0.7]\nhealthy_dic = list()\n\nlightstress = 0 # values between [0.45-0.55]\nlight_dic = list()\n\nmoderatestress = 0 # values between [0.35-0.45]\nmoderate_dic = list()\n\nheavystress = 0 # values between [0.25-0.35]\nheavy_dic = list()\n\nno_veg = 0 # values between [<0.25]\nno_veg_dic = list\n\n# create numpy-array for masking for report\n\noutput_format = \".png\"\nt=(img.height, img.width,3)\nmask=np.zeros(t,dtype=np.uint8)\n\n#Define Masking Colours\n\ncolors= [(0,0,0),(255,0,0),(255,128,0),(255,255,0),(127,255,0),(50,205,50)]\n\n# Classify Pixels\nNDVI_tree=list()\nNDVI_veg=list()\nNDVI_grass=list()\nNDVI_neg=list()\nNDVI_accum = list()\ncounter_total= 0\ncounter_neg= 0\n\nseeker_column = 0\nwhile seeker_column < img.width:\n seeker_row = 0\n while seeker_row < img.height:\n if Index_Values[seeker_row, seeker_column] <= 0.25:\n mask[seeker_row, seeker_column] = colors[0]\n no_veg = no_veg+1\n else:\n if Index_Values[seeker_row, seeker_column] <= 0.35:\n mask[seeker_row, seeker_column] = colors[1]\n heavystress = heavystress+1\n else:\n if Index_Values[seeker_row, seeker_column] <= 0.45:\n mask[seeker_row, seeker_column] = colors[2]\n moderatestress = moderatestress + 1\n else:\n if Index_Values[seeker_row, seeker_column] <= 0.55:\n mask[seeker_row, seeker_column] = colors[3]\n lightstress = lightstress + 1\n else:\n if Index_Values[seeker_row, seeker_column] <= 0.7:\n mask[seeker_row, seeker_column] = colors[4]\n healthy = healthy + 1\n else:\n mask[seeker_row, seeker_column] = colors[5]\n very_healthy = very_healthy + 1\n if Index_Values[seeker_row, seeker_column] >= 0:\n NDVI_accum.append(Index_Values[seeker_row, seeker_column])\n \n NDVI_neg.append(Index_Values[seeker_row, seeker_column])\n seeker_row = seeker_row+1\n seeker_column = seeker_column+1\n\nfor elements in tree_list:\n x_pos = elements[0] \n y_pos = elements[1]\n value = float(Index_Values[x_pos, y_pos])\n if value >= 0:\n NDVI_tree.append(value)\n NDVI_veg.append(value)\n \nfor elemento in grass_list:\n x_pos = elemento[0] \n y_pos = elemento[1]\n value = float(Index_Values[x_pos, y_pos])\n if value >= 0:\n NDVI_grass.append(value)\n NDVI_veg.append(value)\n\n\n#Calculation of vegeation area and non vegetation area\n \nveg_area = 10*10/10000*(int(very_healthy)+int(healthy)+int(lightstress)+int(moderatestress)+int(heavystress))\nno_veg_area = int(no_veg)*10*10/10000\n\nNDVI_treemean = np.nanmean(NDVI_tree)\nNDVI_grassmean = np.nanmean(NDVI_grass)\nNDVI_mean = np.nanmean(NDVI_accum)\nNDVI_scene = np.nanmean(NDVI_neg)\nNDVI_vegmean = np.nanmean(NDVI_veg)\n\nprint('******************** Analysis completed *******************')\nprint('**********************************************************')\nprint('****************Scene analysis results *******************')\nprint('Scene NDVI [0.7, 1]...................... ' + str(very_healthy) + \" pixels\")\nprint('Scene NDVI [0.55, 0.7] .................. ' + str(healthy) + \" pixels\")\nprint('Scene NDVI [0.45-0.55]................... ' + str(lightstress) + \" pixels\")\nprint('Scene NDVI [0.35-0.45]................... ' + str(moderatestress) + \" pixels\")\nprint('Scene NDVI [0.25-0.35]................... ' + str(heavystress) + \" pixels\")\nprint('Scene NDVI [<0.25]....................... ' + str(no_veg) + \" pixels\")\nprint('**********************************************************')\nprint('Mean NDVI (ignore negative values)....... ' + str(NDVI_mean))\nprint('Scene NDVI (incl. negative values)....... ' + str(NDVI_scene))\nprint('**********************************************************')\nprint('Total area ............................. ' + str(float(no_veg_area)+float(veg_area)) + \" hectareas\")\nprint('**********************************************************')\nprint(' ')\n\n# vegetation analysis\n\nprint('**********************************************************')\nprint('********** Starting Vegetation Analysis ******************')\nprint('**********************************************************')\n\n\ngrass_area = int(len(grass_list))*10*10/10000\ntree_area = int(len(tree_list))*10*10/10000\nveg_area2 = grass_area + tree_area\n\n# Values for NDVI tree canopy\n\ncounter_1= 0\ncounter_2= 0\ncounter_3= 0\ncounter_4= 0\ncounter_5= 0\ncounter_6= 0\n\nfor elements in NDVI_tree:\n if elements <= 0.25:\n counter_1 = counter_1+1\n else:\n if elements <= 0.35:\n counter_2 = counter_2+1\n else:\n if elements <= 0.45:\n counter_3 = counter_3 + 1\n else:\n if elements <= 0.55:\n counter_4 = counter_4 + 1\n else:\n if elements <= 0.7:\n counter_5 = counter_5 + 1\n else:\n counter_6 = counter_6 + 1\n \nprint('********** Tree canopy NDVI Results ****************')\nprint('Tree canopy NDVI [0.7, 1]...................... ' + str(counter_6) + \" pixels\")\nprint('Tree canopy NDVI [0.55, 0.7] .................. ' + str(counter_5) + \" pixels\")\nprint('Tree canopy NDVI [0.45-0.55]................... ' + str(counter_4) + \" pixels\")\nprint('Tree canopy NDVI [0.35-0.45]................... ' + str(counter_3) + \" pixels\")\nprint('Tree canopy NDVI [0.25-0.35]................... ' + str(counter_2) + \" pixels\")\nprint('Tree canopy NDVI [<0.25]....................... ' + str(counter_1) + \" pixels\")\nprint('**********************************************************')\nprint('Tree canopy area .............................. ' + str(tree_area) + \" hectareas\")\nprint('**********************************************************')\nprint(' ')\n\n# Values for NDVI grassland\n\ncounter_1= 0\ncounter_2= 0\ncounter_3= 0\ncounter_4= 0\ncounter_5= 0\ncounter_6= 0 \n\nfor elements in NDVI_grass:\n if elements <= 0.25:\n counter_1 = counter_1+1\n else:\n if elements <= 0.35:\n counter_2 = counter_2+1\n else:\n if elements <= 0.45:\n counter_3 = counter_3 + 1\n else:\n if elements <= 0.55:\n counter_4 = counter_4 + 1\n else:\n if elements <= 0.7:\n counter_5 = counter_5 + 1\n else:\n counter_6 = counter_6 + 1\n\nprint('************** Grassland NDVI results ***************')\nprint('**********************************************************')\nprint('Grassland NDVI [0.7, 1]...................... ' + str(counter_6) + \" pixels\")\nprint('Grassland NDVI [0.55, 0.7] .................. ' + str(counter_5) + \" pixels\")\nprint('Grassland NDVI [0.45-0.55]................... ' + str(counter_4) + \" pixels\")\nprint('Grassland NDVI [0.35-0.45]................... ' + str(counter_3) + \" pixels\")\nprint('Grassland NDVI [0.25-0.35]................... ' + str(counter_2) + \" pixels\")\nprint('Grassland NDVI [<0.25]....................... ' + str(counter_1) + \" pixels\")\nprint('**********************************************************')\nprint('Grassland area .............................. ' + str(grass_area) + \" hectareas\")\nprint('**********************************************************')\nprint(' ')\n\n\nprint('********** Vegetation Analysis Results *******************')\nprint('**********************************************************')\nprint('Mean Grassland NDVI............................' + str(NDVI_grassmean))\nprint('Mean Tree Canopy NDVI .........................' + str(NDVI_treemean))\nprint('Mean Vegetation NDVI......................' + str(NDVI_vegmean))\nprint('**********************************************************')\nprint('Total Analysed vegetation area ........... ' + str(veg_area2) + \" hectareas\")\n\n# Plot mask\n\nmask_plot = Image.fromarray(mask, 'RGB')\n#mask_plot.save('20201219_Ökoneu_NDVI_mask.png')\nplt.imshow(mask_plot)",
"**********************************************************\n****************** Analysing values... *******************\n**********************************************************\n******************** Analysis completed *******************\n**********************************************************\n****************Scene analysis results *******************\nScene NDVI [0.7, 1]...................... 88 pixels\nScene NDVI [0.55, 0.7] .................. 18918 pixels\nScene NDVI [0.45-0.55]................... 35173 pixels\nScene NDVI [0.35-0.45]................... 59437 pixels\nScene NDVI [0.25-0.35]................... 98778 pixels\nScene NDVI [<0.25]....................... 239007 pixels\n**********************************************************\nMean NDVI (ignore negative values)....... 0.2623434\nScene NDVI (incl. negative values)....... 0.24802133\n**********************************************************\nTotal area ............................. 4514.01 hectareas\n**********************************************************\n \n**********************************************************\n********** Starting Vegetation Analysis ******************\n**********************************************************\n********** Tree canopy NDVI Results ****************\nTree canopy NDVI [0.7, 1]...................... 0 pixels\nTree canopy NDVI [0.55, 0.7] .................. 286 pixels\nTree canopy NDVI [0.45-0.55]................... 2876 pixels\nTree canopy NDVI [0.35-0.45]................... 14763 pixels\nTree canopy NDVI [0.25-0.35]................... 30517 pixels\nTree canopy NDVI [<0.25]....................... 23047 pixels\n**********************************************************\nTree canopy area .............................. 718.04 hectareas\n**********************************************************\n \n************** Grassland NDVI results ***************\n**********************************************************\nGrassland NDVI [0.7, 1]...................... 79 pixels\nGrassland NDVI [0.55, 0.7] .................. 18357 pixels\nGrassland NDVI [0.45-0.55]................... 30224 pixels\nGrassland NDVI [0.35-0.45]................... 34411 pixels\nGrassland NDVI [0.25-0.35]................... 35778 pixels\nGrassland NDVI [<0.25]....................... 36471 pixels\n**********************************************************\nGrassland area .............................. 1565.75 hectareas\n**********************************************************\n \n********** Vegetation Analysis Results *******************\n**********************************************************\nMean Grassland NDVI............................0.36693471044230647\nMean Tree Canopy NDVI .........................0.2897494834061534\nMean Vegetation NDVI......................0.3426063341627605\n**********************************************************\nTotal Analysed vegetation area ........... 2283.79 hectareas\n"
],
[
"print(len(NDVI_grass))\nprint(len(NDVI_tree))",
"149125\n76176\n"
],
[
"print(len(grass_list))\nprint(len(tree_list))",
"157250\n86513\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a8236258465c2f0bbdbb277a746b6c264ec8490
| 4,593 |
ipynb
|
Jupyter Notebook
|
image_analysis_plantcv/parallel_configuration.ipynb
|
danforthcenter/nsf-reu-2021
|
0ad632bb01c1a4dcf747ea9de671cd84b5f5d4ce
|
[
"MIT"
] | 2 |
2021-02-15T17:17:11.000Z
|
2021-02-16T02:58:12.000Z
|
image_analysis_plantcv/parallel_configuration.ipynb
|
danforthcenter/nsf-reu-2021
|
0ad632bb01c1a4dcf747ea9de671cd84b5f5d4ce
|
[
"MIT"
] | null | null | null |
image_analysis_plantcv/parallel_configuration.ipynb
|
danforthcenter/nsf-reu-2021
|
0ad632bb01c1a4dcf747ea9de671cd84b5f5d4ce
|
[
"MIT"
] | 1 |
2021-02-16T15:18:16.000Z
|
2021-02-16T15:18:16.000Z
| 28.886792 | 72 | 0.469192 |
[
[
[
"from plantcv.parallel import WorkflowConfig",
"_____no_output_____"
],
[
"# Create a new configuration\nconfig = WorkflowConfig()",
"_____no_output_____"
],
[
"# Save as a template\nconfig.save_config(config_file=\"workflow.config\")",
"_____no_output_____"
],
[
"# Test viewing a saved config\nconfig.import_config(config_file=\"multi-plant-analysis.config\")",
"_____no_output_____"
],
[
"# Print the configuration options as a dictionary\nconfig.__dict__",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a823cfd020dae23bb1dd4dcb26ec3dc7091a786
| 360,572 |
ipynb
|
Jupyter Notebook
|
getting_started.ipynb
|
lllukehuang/MedMNIST
|
037ebd18930c50838fc78497ea258f35322f54b9
|
[
"Apache-2.0"
] | null | null | null |
getting_started.ipynb
|
lllukehuang/MedMNIST
|
037ebd18930c50838fc78497ea258f35322f54b9
|
[
"Apache-2.0"
] | null | null | null |
getting_started.ipynb
|
lllukehuang/MedMNIST
|
037ebd18930c50838fc78497ea258f35322f54b9
|
[
"Apache-2.0"
] | null | null | null | 595.0033 | 156,788 | 0.943118 |
[
[
[
"import os\nimport sys\nfrom tqdm import trange\nfrom tqdm import tqdm\nfrom skimage.util import montage\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\n\nimport medmnist\nfrom medmnist.dataset import PathMNIST, ChestMNIST, DermaMNIST, OCTMNIST, PneumoniaMNIST, RetinaMNIST, BreastMNIST, OrganMNISTAxial, OrganMNISTCoronal, OrganMNISTSagittal\nfrom medmnist.evaluator import getAUC, getACC\nfrom medmnist.info import INFO",
"_____no_output_____"
],
[
"print(\"Version:\", medmnist.__version__)",
"Version: 0.2.2\n"
],
[
"data_flag = 'pathmnist'\n# data_flag = 'breastmnist'\ndownload = True\n# input_root = 'tmp_data/'\ninput_root = 'input/'\n\nNUM_EPOCHS = 10\nBATCH_SIZE = 128\nlr = 0.001\n\nflag_to_class = {\n \"pathmnist\": PathMNIST,\n \"chestmnist\": ChestMNIST,\n \"dermamnist\": DermaMNIST,\n \"octmnist\": OCTMNIST,\n \"pneumoniamnist\": PneumoniaMNIST,\n \"retinamnist\": RetinaMNIST,\n \"breastmnist\": BreastMNIST,\n \"organmnist_axial\": OrganMNISTAxial,\n \"organmnist_coronal\": OrganMNISTCoronal,\n \"organmnist_sagittal\": OrganMNISTSagittal,\n}\n\nDataClass = flag_to_class[data_flag]\n\ninfo = INFO[data_flag]\ntask = info['task']\nn_channels = info['n_channels']\nn_classes = len(info['label'])",
"_____no_output_____"
]
],
[
[
"First, we read the MedMNIST data, preprocess them and encapsulate them into dataloader form.",
"_____no_output_____"
]
],
[
[
"# preprocessing\ndata_transform = transforms.Compose([\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomRotation(15),\n transforms.ColorJitter(brightness=1, contrast=1, hue=0.5, saturation=0.5),\n transforms.ToTensor(),\n transforms.Normalize(mean=[.5], std=[.5])\n])\n# data_transform = transforms.Compose([\n# transforms.ToTensor(),\n# transforms.Normalize(mean=[.5], std=[.5])\n# ])\n\n# load the data\ntrain_dataset = DataClass(root=input_root, split='train', transform=data_transform, download=download)\ntest_dataset = DataClass(root=input_root, split='test', transform=data_transform, download=download)\nnonorm_dataset = DataClass(root=input_root, split='train', transform=transforms.ToTensor(), download=download)\n\n# encapsulate data into dataloader form\ntrain_loader = data.DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True)\ntest_loader = data.DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=True)",
"Using downloaded and verified file: input/pathmnist.npz\nUsing downloaded and verified file: input/pathmnist.npz\nUsing downloaded and verified file: input/pathmnist.npz\n"
],
[
"print(train_dataset)\nprint(\"===================\")\nprint(test_dataset)\nprint(\"===================\")\nprint(nonorm_dataset)",
"Dataset PathMNIST\n Number of datapoints: 89996\n Root location: input/\n Split: train\n Task: multi-class\n Number of channels: 3\n Meaning of labels: {'0': 'adipose', '1': 'background', '2': 'debris', '3': 'lymphocytes', '4': 'mucus', '5': 'smooth muscle', '6': 'normal colon mucosa', '7': 'cancer-associated stroma', '8': 'colorectal adenocarcinoma epithelium'}\n Number of samples: {'train': 89996, 'val': 10004, 'test': 7180}\n Description: PathMNIST: A dataset based on a prior study for predicting survival from colorectal cancer histology slides, which provides a dataset NCT-CRC-HE-100K of 100,000 non-overlapping image patches from hematoxylin & eosin stained histological images, and a test dataset CRC-VAL-HE-7K of 7,180 image patches from a different clinical center. 9 types of tissues are involved, resulting a multi-class classification task. We resize the source images of 3 x 224 x 224 into 3 x 28 x 28, and split NCT-CRC-HE-100K into training and valiation set with a ratio of 9:1.\n License: CC BY 4.0\n===================\nDataset PathMNIST\n Number of datapoints: 7180\n Root location: input/\n Split: test\n Task: multi-class\n Number of channels: 3\n Meaning of labels: {'0': 'adipose', '1': 'background', '2': 'debris', '3': 'lymphocytes', '4': 'mucus', '5': 'smooth muscle', '6': 'normal colon mucosa', '7': 'cancer-associated stroma', '8': 'colorectal adenocarcinoma epithelium'}\n Number of samples: {'train': 89996, 'val': 10004, 'test': 7180}\n Description: PathMNIST: A dataset based on a prior study for predicting survival from colorectal cancer histology slides, which provides a dataset NCT-CRC-HE-100K of 100,000 non-overlapping image patches from hematoxylin & eosin stained histological images, and a test dataset CRC-VAL-HE-7K of 7,180 image patches from a different clinical center. 9 types of tissues are involved, resulting a multi-class classification task. We resize the source images of 3 x 224 x 224 into 3 x 28 x 28, and split NCT-CRC-HE-100K into training and valiation set with a ratio of 9:1.\n License: CC BY 4.0\n===================\nDataset PathMNIST\n Number of datapoints: 89996\n Root location: input/\n Split: train\n Task: multi-class\n Number of channels: 3\n Meaning of labels: {'0': 'adipose', '1': 'background', '2': 'debris', '3': 'lymphocytes', '4': 'mucus', '5': 'smooth muscle', '6': 'normal colon mucosa', '7': 'cancer-associated stroma', '8': 'colorectal adenocarcinoma epithelium'}\n Number of samples: {'train': 89996, 'val': 10004, 'test': 7180}\n Description: PathMNIST: A dataset based on a prior study for predicting survival from colorectal cancer histology slides, which provides a dataset NCT-CRC-HE-100K of 100,000 non-overlapping image patches from hematoxylin & eosin stained histological images, and a test dataset CRC-VAL-HE-7K of 7,180 image patches from a different clinical center. 9 types of tissues are involved, resulting a multi-class classification task. We resize the source images of 3 x 224 x 224 into 3 x 28 x 28, and split NCT-CRC-HE-100K into training and valiation set with a ratio of 9:1.\n License: CC BY 4.0\n"
],
[
"# visualization\n\nimg, target = nonorm_dataset[8]\nif n_channels == 1:\n img = img.reshape(28, 28)\n plt.imshow(img, cmap='gray')\nelse:\n img = img.permute(1, 2, 0)\n plt.imshow(img)\nprint(target)",
"[5]\n"
],
[
"def show_images(imgs, num_rows, num_cols, scale=2):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = plt.subplots(num_rows, num_cols, figsize=figsize)\n for i in range(num_rows):\n for j in range(num_cols):\n axes[i][j].imshow(imgs[i * num_cols + j],cmap='gray')\n axes[i][j].axes.get_xaxis().set_visible(False)\n axes[i][j].axes.get_yaxis().set_visible(False)\n return axes\n\ndef apply(img,aug,num_rows=2,num_cols=4,scale=1.5):\n Y=[aug(img) for _ in range(num_rows*num_cols)]\n show_images(Y,num_rows,num_cols,scale)",
"_____no_output_____"
],
[
"print(img.shape)\nimg = img.permute(2, 0, 1)\nimage = img.cpu().clone()\nimage = image.squeeze(0) # 压缩一维\nprint(image.shape)\nunloader = transforms.ToPILImage()\n# image = transforms.ToPILImage(image) # 自动转换为0-255\n# image = unloader(image)\nimage = transforms.ToPILImage()(image)\nprint(image)",
"torch.Size([28, 28, 3])\ntorch.Size([3, 28, 28])\n<PIL.Image.Image image mode=RGB size=28x28 at 0x18F2C28CDA0>\n"
],
[
"plt.imshow(image)",
"_____no_output_____"
],
[
"apply(image,transforms.ColorJitter(brightness=1, contrast=1, hue=0.5, saturation=0.5))",
"_____no_output_____"
],
[
"augs = transforms.Compose(\n [transforms.RandomHorizontalFlip(p=0.5),\n # transforms.RandomCrop(28),\n# transforms.ColorJitter(brightness=0.5, contrast=1, hue=0.5, saturation=0.5),\n transforms.RandomRotation(15)])\napply(image,augs)",
"_____no_output_____"
],
[
"# montage\n\ndef process(n_channels, length=20):\n scale = length * length\n\n image = np.zeros((scale, 28, 28, 3)) if n_channels == 3 else np.zeros((scale, 28, 28))\n index = [i for i in range(scale)]\n np.random.shuffle(index)\n \n for idx in range(scale):\n img, _ = nonorm_dataset[idx]\n if n_channels == 3:\n img = img.permute(1, 2, 0).numpy()\n else:\n img = img.reshape(28, 28).numpy()\n image[index[idx]] = img\n\n if n_channels == 1:\n image = image.reshape(scale, 28, 28)\n arr_out = montage(image)\n plt.imshow(arr_out, cmap='gray')\n else:\n image = image.reshape(scale, 28, 28, 3)\n arr_out = montage(image, multichannel=3)\n plt.imshow(arr_out)\n \nprocess(n_channels=n_channels, length=20)",
"_____no_output_____"
]
],
[
[
"Then, we define a simple model for illustration, object function and optimizer that we use to classify.",
"_____no_output_____"
]
],
[
[
"# define a simple CNN model\n\nclass Net(nn.Module):\n def __init__(self, in_channels, num_classes):\n super(Net, self).__init__()\n\n self.layer1 = nn.Sequential(\n nn.Conv2d(in_channels, 16, kernel_size=3),\n nn.BatchNorm2d(16),\n nn.ReLU())\n\n self.layer2 = nn.Sequential(\n nn.Conv2d(16, 16, kernel_size=3),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2))\n\n self.layer3 = nn.Sequential(\n nn.Conv2d(16, 64, kernel_size=3),\n nn.BatchNorm2d(64),\n nn.ReLU())\n \n self.layer4 = nn.Sequential(\n nn.Conv2d(64, 64, kernel_size=3),\n nn.BatchNorm2d(64),\n nn.ReLU())\n\n self.layer5 = nn.Sequential(\n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2))\n\n self.fc = nn.Sequential(\n nn.Linear(64 * 4 * 4, 128),\n nn.ReLU(),\n nn.Linear(128, 128),\n nn.ReLU(),\n nn.Linear(128, num_classes))\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.layer5(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x\n\nmodel = Net(in_channels=n_channels, num_classes=n_classes)\n \n# define loss function and optimizer\nif task == \"multi-label, binary-class\":\n criterion = nn.BCEWithLogitsLoss()\nelse:\n criterion = nn.CrossEntropyLoss()\n \noptimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)",
"_____no_output_____"
]
],
[
[
"Next, we can start to train and evaluate!",
"_____no_output_____"
]
],
[
[
"# train\n\nfor epoch in range(NUM_EPOCHS):\n train_correct = 0\n train_total = 0\n test_correct = 0\n test_total = 0\n \n model.train()\n for inputs, targets in tqdm(train_loader):\n # forward + backward + optimize\n optimizer.zero_grad()\n outputs = model(inputs)\n \n if task == 'multi-label, binary-class':\n targets = targets.to(torch.float32)\n loss = criterion(outputs, targets)\n else:\n targets = targets.squeeze().long()\n loss = criterion(outputs, targets)\n \n loss.backward()\n optimizer.step()",
"100%|██████████| 37/37 [00:11<00:00, 3.10it/s]\n100%|██████████| 37/37 [00:09<00:00, 3.84it/s]\n100%|██████████| 37/37 [00:09<00:00, 3.82it/s]\n100%|██████████| 37/37 [00:09<00:00, 3.76it/s]\n100%|██████████| 37/37 [00:08<00:00, 4.17it/s]\n100%|██████████| 37/37 [00:09<00:00, 4.01it/s]\n 51%|█████▏ | 19/37 [00:05<00:04, 3.72it/s]"
],
[
"# evaluation\n\ndef test(split):\n model.eval()\n y_true = torch.tensor([])\n y_score = torch.tensor([])\n \n data_loader = train_loader if split == 'train' else test_loader\n\n with torch.no_grad():\n for inputs, targets in data_loader:\n outputs = model(inputs)\n\n if task == 'multi-label, binary-class':\n targets = targets.to(torch.float32)\n m = nn.Sigmoid()\n outputs = m(outputs)\n else:\n targets = targets.squeeze().long()\n m = nn.Softmax(dim=1)\n outputs = m(outputs)\n targets = targets.float().resize_(len(targets), 1)\n\n y_true = torch.cat((y_true, targets), 0)\n y_score = torch.cat((y_score, outputs), 0)\n\n y_true = y_true.numpy()\n y_score = y_score.detach().numpy()\n auc = getAUC(y_true, y_score, task)\n acc = getACC(y_true, y_score, task)\n \n print('%s acc: %.3f auc:%.3f' % (split, acc, auc))\n\n \nprint('==> Evaluating ...')\ntest('train')\ntest('test')",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a824332f402d86bdb16dfe9882c2c71065c5f36
| 15,997 |
ipynb
|
Jupyter Notebook
|
LAB11/lab11_0.ipynb
|
JG-geek/CE046_Jay_Gorakhiya
|
a8ae7638727b957c468d37ab72ebc94bbce96e9a
|
[
"MIT"
] | null | null | null |
LAB11/lab11_0.ipynb
|
JG-geek/CE046_Jay_Gorakhiya
|
a8ae7638727b957c468d37ab72ebc94bbce96e9a
|
[
"MIT"
] | null | null | null |
LAB11/lab11_0.ipynb
|
JG-geek/CE046_Jay_Gorakhiya
|
a8ae7638727b957c468d37ab72ebc94bbce96e9a
|
[
"MIT"
] | null | null | null | 29.51476 | 129 | 0.433706 |
[
[
[
"Perform SVM with PCA operation on Breast Cancer Dataset and Iris Dataset.\n\nWith Breast Cancer Dataset",
"_____no_output_____"
]
],
[
[
"from sklearn import datasets\nbreast_cancer = datasets.load_breast_cancer()\nbreast_data = breast_cancer.data\nbreast_labels = breast_cancer.target\n\nprint(breast_data.shape)\nprint(breast_labels.shape)",
"(569, 30)\n(569,)\n"
],
[
"import numpy as np\nlabels = np.reshape(breast_labels,(569,1))\nfinal_breast_data = np.concatenate([breast_data,labels],axis=1)\nfinal_breast_data.shape\n\nimport pandas as pd\nbreast_dataset = pd.DataFrame(final_breast_data)\nfeatures = breast_cancer.feature_names\nfeatures\n\nfinal_breast_data[0:5]",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(breast_data,\n breast_labels, random_state=46)\n\nprint(X_train.shape, X_test.shape)",
"(426, 30) (143, 30)\n"
]
],
[
[
"Preprocessing: Principal Component Analysis\n-------------------------------------------\n\nWe can use PCA to reduce these features to a manageable size, while maintaining most of the information\nin the dataset.\n\n",
"_____no_output_____"
]
],
[
[
"from sklearn import decomposition\npca = decomposition.PCA(n_components=20, whiten=True)\npca.fit(X_train)",
"_____no_output_____"
]
],
[
[
"The principal components measure deviations about this mean along\northogonal axes.",
"_____no_output_____"
]
],
[
[
"print(pca.components_.shape)",
"(20, 30)\n"
]
],
[
[
"With this projection computed, we can now project our original training\nand test data onto the PCA basis:\n",
"_____no_output_____"
]
],
[
[
"X_train_pca = pca.transform(X_train)\nX_test_pca = pca.transform(X_test)\nprint(X_train_pca.shape)\n\nprint(X_test_pca.shape)",
"(426, 20)\n(143, 20)\n"
]
],
[
[
"Doing the Learning: Support Vector Machines\n-------------------------------------------\n\nNow we'll perform support-vector-machine classification on this reduced\ndataset:",
"_____no_output_____"
]
],
[
[
"from sklearn import svm\nclf = svm.SVC(C=5., gamma=0.001)\nclf.fit(X_train_pca, y_train)\n\nfrom sklearn import metrics\ny_pred = clf.predict(X_test_pca)\nprint(metrics.classification_report(y_test, y_pred))",
" precision recall f1-score support\n\n 0 0.98 0.89 0.93 53\n 1 0.94 0.99 0.96 90\n\n accuracy 0.95 143\n macro avg 0.96 0.94 0.95 143\nweighted avg 0.95 0.95 0.95 143\n\n"
]
],
[
[
"Another interesting metric is the *confusion matrix*, which indicates\nhow often any two items are mixed-up. The confusion matrix of a perfect\nclassifier would only have nonzero entries on the diagonal, with zeros\non the off-diagonal:\n",
"_____no_output_____"
]
],
[
[
"print(metrics.confusion_matrix(y_test, y_pred))",
"[[47 6]\n [ 1 89]]\n"
]
],
[
[
"# With Iris Dataset",
"_____no_output_____"
]
],
[
[
"iris = datasets.load_iris()\niris_data = iris.data\niris_labels = iris.target\n\nprint(iris_data.shape)\nprint(iris_labels.shape)\n\nfeatures = iris.feature_names\nfeatures",
"(150, 4)\n(150,)\n"
],
[
"from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(iris_data,\n iris_labels, random_state=46)\n\nprint(X_train.shape, X_test.shape)",
"(112, 4) (38, 4)\n"
]
],
[
[
"Preprocessing: Principal Component Analysis\n\nWe can use PCA to reduce these features to a manageable size, while maintaining most of the information in the dataset.",
"_____no_output_____"
]
],
[
[
"from sklearn import decomposition\npca = decomposition.PCA(n_components=2, whiten=True)\npca.fit(X_train)\n\nprint(pca.components_.shape)",
"(2, 4)\n"
],
[
"X_train_pca = pca.transform(X_train)\nX_test_pca = pca.transform(X_test)\nprint(X_train_pca.shape)\n\nprint(X_test_pca.shape)",
"(112, 2)\n(38, 2)\n"
],
[
"from sklearn import svm\nclf = svm.SVC(C=5., gamma=0.001)\nclf.fit(X_train_pca, y_train)\n\nfrom sklearn import metrics\ny_pred = clf.predict(X_test_pca)\nprint(metrics.classification_report(y_test, y_pred))\n\nprint(metrics.confusion_matrix(y_test, y_pred))",
" precision recall f1-score support\n\n 0 1.00 0.93 0.97 15\n 1 0.56 1.00 0.71 10\n 2 1.00 0.46 0.63 13\n\n accuracy 0.79 38\n macro avg 0.85 0.80 0.77 38\nweighted avg 0.88 0.79 0.79 38\n\n[[14 1 0]\n [ 0 10 0]\n [ 0 7 6]]\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a8245a73a490f75f7191f6c5948b0c856cc93b3
| 184,173 |
ipynb
|
Jupyter Notebook
|
Seaborn/Pair plot.ipynb
|
afcarl/Useful-python
|
5d1947052fb25b2388704926e4692511cc162031
|
[
"MIT"
] | null | null | null |
Seaborn/Pair plot.ipynb
|
afcarl/Useful-python
|
5d1947052fb25b2388704926e4692511cc162031
|
[
"MIT"
] | null | null | null |
Seaborn/Pair plot.ipynb
|
afcarl/Useful-python
|
5d1947052fb25b2388704926e4692511cc162031
|
[
"MIT"
] | 1 |
2018-09-05T21:48:57.000Z
|
2018-09-05T21:48:57.000Z
| 1,116.2 | 180,286 | 0.942169 |
[
[
[
"https://seaborn.pydata.org/examples/scatterplot_matrix.html",
"_____no_output_____"
]
],
[
[
"import seaborn as sns\n%matplotlib inline\nsns.set()\n\ndf = sns.load_dataset(\"iris\")\ndf.head()",
"_____no_output_____"
],
[
"sns.pairplot(df, hue=\"species\")",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
]
] |
4a824c73c9ef102dd1348fef119c6f29eb083cf7
| 4,091 |
ipynb
|
Jupyter Notebook
|
CO by county.ipynb
|
kirbs-/covid-19-dataset
|
3427880186a03339abf82688581b7aab9fe5cb72
|
[
"MIT"
] | null | null | null |
CO by county.ipynb
|
kirbs-/covid-19-dataset
|
3427880186a03339abf82688581b7aab9fe5cb72
|
[
"MIT"
] | null | null | null |
CO by county.ipynb
|
kirbs-/covid-19-dataset
|
3427880186a03339abf82688581b7aab9fe5cb72
|
[
"MIT"
] | null | null | null | 26.738562 | 136 | 0.546566 |
[
[
[
"Colorado updates their data daily at 4pm MDT for cases through 4pm prior day.",
"_____no_output_____"
]
],
[
[
"from selenium import webdriver\nimport time\nimport pandas as pd\nimport pendulum\nimport re\nimport yaml\nimport requests\n# from selenium.webdriver.chrome.options import Options\n# chrome_options = Options()\n#chrome_options.add_argument(\"--disable-extensions\")\n#chrome_options.add_argument(\"--disable-gpu\")\n#chrome_options.add_argument(\"--no-sandbox) # linux only\n# chrome_options.add_argument(\"--start-maximized\")\n# chrome_options.add_argument(\"--headless\")\n# chrome_options.add_argument(\"user-agent=[Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:73.0) Gecko/20100101 Firefox/73.0]\")",
"_____no_output_____"
],
[
"with open('config.yaml', 'r') as f:\n config = yaml.safe_load(f.read())",
"_____no_output_____"
],
[
"state = 'CO'",
"_____no_output_____"
],
[
"scrape_timestamp = pendulum.now().strftime('%Y%m%d%H%M%S')",
"_____no_output_____"
],
[
"# Colorado exposes their data as ArcGIS geojson via an API\nurl = 'https://opendata.arcgis.com/datasets/fbae539746324ca69ff34f086286845b_0.geojson'",
"_____no_output_____"
],
[
"def fetch():\n# driver = webdriver.Chrome('../20190611 - Parts recommendation/chromedriver', options=chrome_options)\n\n# driver.get(url)\n# time.sleep(5)\n\n# # class topBoxH1Text is used in the summary box.\n# data_elements = driver.find_elements_by_class_name('topBoxH1Text')\n\n# datatbl = data_elements[1].text.split('\\n')\n\n# data = [re.search('^(.*) - (\\d*)', row) for row in datatbl]\n\n# data = [row.groups() for row in data if row]\n# page_source = driver.page_source\n# driver.close()\n \n # Colorado exposes their data as ArcGIS geojson via an API\n res = requests.get(url).json()\n data = [ele.pop('properties', None) for ele in res['features']]\n df = pd.DataFrame.from_dict(data)\n \n # adding standard column names\n df['county'] = df.COUNTY\n df['positive_cases'] = df.County_Pos_Cases\n \n return df, str(res)",
"_____no_output_____"
],
[
"def save(df, source):\n df.to_csv(f\"{config['data_folder']}/{state}_county_{scrape_timestamp}.txt\", sep='|', index=False)\n\n with open(f\"{config['data_source_backup_folder']}/{state}_county_{scrape_timestamp}.json\", 'w') as f:\n f.write(source)",
"_____no_output_____"
],
[
"def run():\n df, source = fetch()\n save(df, source)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a825605b972cc07193b293e56443f1c86810925
| 3,714 |
ipynb
|
Jupyter Notebook
|
ranking_algos_experiment.ipynb
|
wissamjur/local-impact
|
afb843d1908ed831e3169bb41c27deafc4e020a5
|
[
"MIT"
] | null | null | null |
ranking_algos_experiment.ipynb
|
wissamjur/local-impact
|
afb843d1908ed831e3169bb41c27deafc4e020a5
|
[
"MIT"
] | null | null | null |
ranking_algos_experiment.ipynb
|
wissamjur/local-impact
|
afb843d1908ed831e3169bb41c27deafc4e020a5
|
[
"MIT"
] | null | null | null | 31.74359 | 151 | 0.614701 |
[
[
[
"import numpy as np\nimport pandas as pd\nfrom surprise import SVDpp, KNNWithMeans\nfrom surprise import Dataset, Reader, accuracy\nfrom util.helpers import load_train_test_surpriselib, load_dataset_explicit\nfrom util.knn import get_knn\nfrom neighborhood_eval.neighborhood_rankings import precision_recall_at_k, get_critical_nbhds\n\n# path to the datasets folder\ndataset_name = 'ml-latest-small'\ndataset_path = '../data/' + dataset_name\n\n# load the data\n# old value was 16000\nratings = load_dataset_explicit(dataset_name, dataset_path, total_users=16000)\nprint('Dataset size:', len(ratings))\nprint('Total no of Users:', len(set(ratings.user_id.to_list())))",
"_____no_output_____"
],
[
"# split data into train/test\ndataset = ratings.copy()\n# test set portion (15%)\nmsk = np.random.rand(len(dataset)) < 0.85\nx = dataset[msk]\ny = dataset[~msk]\n\nprint(\"trainset size:\", len(x))\nprint(\"testset size:\", len(y))\nprint(\"full dataset size\", len(dataset))\n\n# load train, test compatible with surpriselib\ntrainset, testset = load_train_test_surpriselib(x, y)\n\n# nbhds on full dataset\nfull_data = Dataset.load_from_df(dataset[['user_id', 'item_id', 'rating']], Reader(rating_scale=(dataset.rating.min(), dataset.rating.max())))\ntrainset_nbhds = full_data.build_full_trainset()",
"_____no_output_____"
],
[
"# main train algo + algo for nbhd clustering\nalgo = SVDpp().fit(trainset)\nclustering_algo = KNNWithMeans(sim_options = {'name': 'pearson', 'user_based': True}).fit(trainset_nbhds)\n\npredictions = algo.test(testset)\npredictions_df = pd.DataFrame(predictions)\n\n# compute systen metrics\naccuracy.mae(predictions)\naccuracy.rmse(predictions)\naccuracy.mse(predictions)",
"_____no_output_____"
],
[
"nbhd_size = 10\np_thresh = 0.5\nneighborhoods = get_knn(x, clustering_algo, nbhd_size)\n\nprecisions, recalls = precision_recall_at_k(predictions, k=5, threshold=4)\nprecisions_df = pd.DataFrame(precisions, index=[0]).T.reset_index().rename({'index':'user_id', 0:'precision'}, axis=1)\nrecalls_df = pd.DataFrame(recalls, index=[0]).T.reset_index().rename({'index':'user_id', 0:'recall'}, axis=1)\n\ncritical_nbhds = get_critical_nbhds(neighborhoods, predictions_df, precisions_df, recalls_df)\ncritical_nbhds.to_csv('output/ranking_algo_exp/' + dataset_name + '.csv', index=False)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code"
]
] |
4a8269a65996d401dcd7e1a4c472501bf8c1ef46
| 281,515 |
ipynb
|
Jupyter Notebook
|
notebooks/range-padding-ratio.ipynb
|
equinor/gaussianfft
|
3865dcd02fdba566be7be662da77f653950b51ac
|
[
"BSD-2-Clause"
] | null | null | null |
notebooks/range-padding-ratio.ipynb
|
equinor/gaussianfft
|
3865dcd02fdba566be7be662da77f653950b51ac
|
[
"BSD-2-Clause"
] | 1 |
2021-09-24T14:03:33.000Z
|
2021-09-24T14:03:33.000Z
|
notebooks/range-padding-ratio.ipynb
|
equinor/gaussianfft
|
3865dcd02fdba566be7be662da77f653950b51ac
|
[
"BSD-2-Clause"
] | null | null | null | 626.982183 | 53,614 | 0.94228 |
[
[
[
"import gaussianfft\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.spatial.distance import cdist\nfrom gaussianfft.util import EmpiricalVariogram\n%matplotlib inline\nplt.rcParams['figure.figsize'] = [15,7]",
"_____no_output_____"
],
[
"def filter_deltas(m, d):\n # Filter nans\n deltas_nan = np.array(d)\n nan_cols = np.any(np.isnan(deltas_nan), axis=0)\n deltas_nan = deltas_nan[:, np.invert(nan_cols)]\n mid_nan = m[np.invert(nan_cols)]\n return mid_nan, deltas_nan, nan_cols\n\ndef plot_deltas(fig, ax, m, d):\n mid_nan, deltas_nan, _ = filter_deltas(m, d)\n \n mid_nan /= np.max(mid_nan)\n # Plot\n cf = ax.contourf(mid_nan, range_length_ratio, deltas_nan, 30, vmin=-0.15, vmax=0.15, cmap='bwr')\n return cf",
"_____no_output_____"
],
[
"# Setup\nnx, ny, nz = 100, 1, 1\ndx, dy, dz = 20, 20, 20\npx, py, pz = nx, ny, nz\ndr = 0.5 * dx\nnmax = 10000\nstrategy = 'origo'\nrange_length_ratio = np.linspace(0.1, 2, 10)\n\n# Derived constants\nLx, Ly, Lz = nx * dx, ny * dy, nz * dz\ndef simulate(vtype):\n # Simulation\n \n deltas = [[], []]\n true_variogram = []\n estimated_variograms = [[], []]\n for r in range_length_ratio:\n v = gaussianfft.variogram(vtype, Lx * r, Ly * r, Lz * r)\n ev = EmpiricalVariogram(v, nx, dx, ny, dy, nz, dz, px, py, pz)\n true_variogram.append(ev.true_variogram(dr))\n refs = ev.pick_reference_points(strategy)\n for dd, ee in zip(deltas, estimated_variograms):\n mid, estimated_variogram, _, _, convrg = ev.estimate_variogram(nmax, dr, refs, analyze_convergence=10)\n ee.append(estimated_variogram)\n dd.append(convrg.deltas[-1])\n # TODO: analyze convergence\n return mid, deltas, true_variogram, estimated_variograms",
"_____no_output_____"
]
],
[
[
"# Gaussian",
"_____no_output_____"
]
],
[
[
"variogram_type = 'gaussian'\nmid, deltas, tcorr, ecorr = simulate(variogram_type)",
"_____no_output_____"
],
[
"# Plot comparison\nfig, axes = plt.subplots(nrows=1, ncols=2)\nc = plot_deltas(fig, axes[0], mid, deltas[0])\nc = plot_deltas(fig, axes[1], mid, deltas[1])\naxes[0].set_ylabel('range/length ratio')\naxes[0].set_xlabel('correlation range')\naxes[1].set_xlabel('correlation range')\nfig.colorbar(c, ax=axes.ravel().tolist())",
"_____no_output_____"
],
[
"# Inspect variogram estimation\nidelta = 0\nratio = 0.75\n\nfmid, fdelta, nancols = filter_deltas(mid, deltas[idelta])\nir = np.argmin(np.abs(range_length_ratio - ratio))\nevario = np.array(ecorr[idelta][ir])[np.invert(nancols)]\ntvario = np.array(tcorr[ir])\n\n\nplt.plot(fmid, evario)\nplt.plot(tvario[0], tvario[1])\n# plt.plot(fmid, fdelta[ir, :])",
"_____no_output_____"
],
[
"# Show a realization\nv = gaussianfft.variogram(variogram_type, ratio * Lx)\nf = gaussianfft.advanced.simulate(v, nx, dx, padx=px)\nplt.plot(f)",
"_____no_output_____"
]
],
[
[
"# Spherical",
"_____no_output_____"
]
],
[
[
"variogram_type = 'spherical'\nmid, deltas, tcorr, ecorr = simulate(variogram_type)",
"_____no_output_____"
],
[
"# Plot comparison\nfig, axes = plt.subplots(nrows=1, ncols=2)\nc = plot_deltas(fig, axes[0], mid, deltas[0])\nc = plot_deltas(fig, axes[1], mid, deltas[1])\naxes[0].set_ylabel('range/length ratio')\naxes[0].set_xlabel('correlation range')\naxes[1].set_xlabel('correlation range')\nfig.colorbar(c, ax=axes.ravel().tolist())",
"_____no_output_____"
]
],
[
[
"# Exponential",
"_____no_output_____"
]
],
[
[
"variogram_type = 'exponential'\nmid, deltas, tcorr, ecorr = simulate(variogram_type)",
"_____no_output_____"
],
[
"# Plot comparison\nfig, axes = plt.subplots(nrows=1, ncols=2)\nc = plot_deltas(fig, axes[0], mid, deltas[0])\nc = plot_deltas(fig, axes[1], mid, deltas[1])\naxes[0].set_ylabel('range/length ratio')\naxes[0].set_xlabel('correlation range')\naxes[1].set_xlabel('correlation range')\nfig.colorbar(c, ax=axes.ravel().tolist())",
"_____no_output_____"
]
],
[
[
"# Matern52",
"_____no_output_____"
]
],
[
[
"variogram_type = 'matern52'\nmid, deltas, tcorr, ecorr = simulate(variogram_type)",
"_____no_output_____"
],
[
"# Plot comparison\nfig, axes = plt.subplots(nrows=1, ncols=2)\nc = plot_deltas(fig, axes[0], mid, deltas[0])\nc = plot_deltas(fig, axes[1], mid, deltas[1])\naxes[0].set_ylabel('range/length ratio')\naxes[0].set_xlabel('correlation range')\naxes[1].set_xlabel('correlation range')\nfig.colorbar(c, ax=axes.ravel().tolist())",
"_____no_output_____"
]
],
[
[
"# General Exponential (1.5)",
"_____no_output_____"
]
],
[
[
"variogram_type = 'general_exponential'\nmid, deltas, tcorr, ecorr = simulate(variogram_type)",
"_____no_output_____"
],
[
"# Plot comparison\nfig, axes = plt.subplots(nrows=1, ncols=2)\nc = plot_deltas(fig, axes[0], mid, deltas[0])\nc = plot_deltas(fig, axes[1], mid, deltas[1])\naxes[0].set_ylabel('range/length ratio')\naxes[0].set_xlabel('correlation range')\naxes[1].set_xlabel('correlation range')\nfig.colorbar(c, ax=axes.ravel().tolist())",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a827375483b1a809b3695545048f2f82c3b6d3b
| 11,354 |
ipynb
|
Jupyter Notebook
|
Bommidi_Python_OOPS.ipynb
|
Shreyansh-Gupta/Open-contributions
|
e72a9ce2b0aa6a48081921bf8138b91ad259c422
|
[
"MIT"
] | 61 |
2020-09-10T05:16:19.000Z
|
2021-11-07T00:22:46.000Z
|
Bommidi_Python_OOPS.ipynb
|
Shreyansh-Gupta/Open-contributions
|
e72a9ce2b0aa6a48081921bf8138b91ad259c422
|
[
"MIT"
] | 72 |
2020-09-12T09:34:19.000Z
|
2021-08-01T17:48:46.000Z
|
Bommidi_Python_OOPS.ipynb
|
Shreyansh-Gupta/Open-contributions
|
e72a9ce2b0aa6a48081921bf8138b91ad259c422
|
[
"MIT"
] | 571 |
2020-09-10T01:52:56.000Z
|
2022-03-26T17:26:23.000Z
| 5,677 | 11,353 | 0.62031 |
[
[
[
"# Creating a class",
"_____no_output_____"
]
],
[
[
"class Student: # created a class \"Student\" \n name = \"Tom\"\n grade = \"A\"\n age = 15\n \n def display(self):\n print(self.name,self.grade,self.age) \n \n# There will be no output here, because we are not invoking (calling) the \"display\" function to print",
"_____no_output_____"
]
],
[
[
"## Creating an object",
"_____no_output_____"
]
],
[
[
"class Student: \n name = \"Tom\"\n grade = \"A\"\n age = 15\n \n def display(self):\n print(self.name,self.grade,self.age) \n \ns1 = Student() # created an object \"s1\" of class \"Student\"\n\ns1.display() # displaying the details through the \"display\" finction",
"Tom A 15\n"
]
],
[
[
"## Creating a constructor",
"_____no_output_____"
],
[
"> If we give parameters inside the constructor (inside __init__) then that type of representation is called \"Parameterized constructor\"\n\n> If we don't give parameters inside the constructor (inside __init__) then that type of representation is called \"Non-Parameterized constructor\"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"# This is a parameterized constructor\n\nclass Student:\n def __init__(self,name,study,occupation): # intializing all the parameters we need i.e, name, study, occupation in the constructor\n self.name = name\n self.study = study\n self.occupation = occupation\n \n def output(self):\n print(self.name + \" completed \" + self.study + \" and working as a \" + self.occupation)\n \n\ns1 = Student('Tom', 'Btech' ,'software engineer') # creating two objects and giving the \ns2 = Student('Jerry', \"MBBS\", 'doctor') # input as the order mentioned in the \" __init__ \" function\n\ns1.output()\ns2.output()",
"Tom completed Btech and working as a software engineer\nJerry completed MBBS and working as a doctor\n"
],
[
"# This is a non-parameterized constructor\n\nclass Student:\n def __init__(self):\n print(\" This is a Non parameterized constructor\")\n \ns1 = Student()",
" This is a Non parameterized constructor\n"
]
],
[
[
"## Python in-built class functions",
"_____no_output_____"
]
],
[
[
"class Student:\n def __init__(self,name,grade,age):\n self.name = name\n self.grade = grade\n self.age = age\n \ns1 = Student(\"Tom\",\"A\",15)\n\nprint(getattr(s1,'name')) # we get the value of the particular attribute \nprint(getattr(s1,\"age\")) # Here,we are asking for attributes \"name\",\"age\" and the value of those attributes are \"Tom\",15 respectively\n \n \nsetattr(s1,\"age\",20) # setting the attribute (changing)\nprint(\"Age of the tom is changed using 'setattr' \")\nprint(getattr(s1,\"age\"))\n\n\nprint(\"Checking whether the particular attribute is there or not\")\nprint(hasattr(s1,\"name\")) # Returns \"True\" if the attribute is intialized on our class \nprint(hasattr(s1,\"school\")) # or else gives \"False\"",
"Tom\n15\nAge of the tom is changed using 'setattr' \n20\nChecking whether the particular attribute is there or not\nTrue\nFalse\n"
]
],
[
[
"## Built-in class attributes",
"_____no_output_____"
]
],
[
[
"class Student:\n '''This is doc string where we mention,what's the idea of this progam '''\n def __init__(self,name,grade,age):\n self.name = name\n self.grade = grade\n self.age = age\n \ns1 = Student(\"Tom\",\"A\",15)\nprint(Student.__doc__) # printing the doc string\nprint(s1.__dict__) # printing the attributes in a dictionary data type way ",
"This is doc string where we mention,what's the idea of this progam \n{'name': 'Tom', 'grade': 'A', 'age': 15}\n"
]
],
[
[
"# Inheritance",
"_____no_output_____"
]
],
[
[
"class Parent: \n print(\"This is the parent class\") \n def dog(self):\n print(\"Dog barks\")\n \n \nclass Child(Parent): # Inheriting the \"parent\" class using \"child\" class\n def lion(self):\n print(\"Lion roars\")\n \nc1 = Child() # \"c1\" is the object of \"Child\" class\nc1.lion()\nc1.dog() # because of inheritance, the print statement inside the \"dog\" function , which is inside the \"Parent\" class is also printed.\n ",
"This is the parent class\nLion roars\nDog barks\n"
]
],
[
[
"## Multi-level inheritance",
"_____no_output_____"
]
],
[
[
"class Parent: \n print(\"This is the parent class\") \n def dog(self):\n print(\"Dog barks\")\n \n \nclass Child(Parent): # Inheriting the \"parent\" class using \"child\" class\n def lion(self):\n print(\"Lion roars\")\n \nclass Grandchild(Child): # Inheriting the \"Child\" class\n def pegion(self):\n print(\"pegion coos\")\n \nc1 = Grandchild() # \"c1\" is the object of \"Grandchild\" class\nc1.lion()\nc1.dog() # because of inheritance, the print statement inside the \"dog\" function , which is inside the \"Parent\" class is also printed.\nc1.pegion() # because of inheritance, the print statement inside the \"lion\" function , which is inside the \"Child\" class is also printed.\n ",
"This is the parent class\nLion roars\nDog barks\npegion coos\n"
]
],
[
[
"# Multiple inheritance",
"_____no_output_____"
]
],
[
[
"class Calculator1:\n def sum(self,a,b):\n return a + b\n\nclass Calculator2:\n def mul(self,a,b):\n return a * b\n\nclass Derived(Calculator1,Calculator2): # Multiple inheritance, since it is having multiple (in this case 2) class arguments.\n def div(self,a,b):\n return a / b\n \n\nd = Derived()\nprint(d.sum(20,30))\nprint(d.mul(20,30))\nprint(d.div(20,30))\n\n",
"50\n600\n0.6666666666666666\n"
]
],
[
[
"# Polymorphism",
"_____no_output_____"
]
],
[
[
"class Teacher:\n def intro(self):\n print(\"I am a teacher\")\n \n def experience(self):\n print(\"3 to 4 years\")\n \n \nclass Lecturer:\n def intro(self):\n print(\"I am a lecturer\")\n \n def experience(self):\n print(\"5 to 6 years\")\n \n \n\nclass Professor:\n def intro(self):\n print(\"I am a professor\")\n \n def experience(self):\n print(\"8 to 10 years\")\n \n \n \n\n# Common Interface for all persons\ndef category(person):\n person.intro() # only intros are printed \n\n # type \"person.experience\" instead of \"person.intro\", we get only experience. If we type both \"person.intro\" and \"person.experience\" , then both statements are printed.\n\n# instantiate objects \nt = Teacher()\nl = Lecturer()\np = Professor()\n\n\n# passing the object\ncategory(t)\ncategory(l)\ncategory(p)",
"I am a teacher\nI am a lecturer\nI am a professor\n"
]
],
[
[
"# Encapsulation",
"_____no_output_____"
]
],
[
[
"class Computer:\n\n def __init__(self):\n self.__maxprice = 900 # maxprice is a private data bcz, it is starting with \" __ \" underscores\n \n def sell(self):\n print(\"Selling Price: {}\".format(self.__maxprice))\n \n \n def setMaxPrice(self, price): # This method is used to set the private data\n self.__maxprice = price\n\n\nc = Computer() # c is an object of \"Computer\" class\nc.sell()\n\n# change the price\nc.__maxprice = 1000 # Here, we are modifying our data directly \"__maxprice\" to 1000. But the data is not modified because it is a private data\nc.sell()\n\n# using setter function \nc.setMaxPrice(1000) # In order to change the private data, we have to take help of the method \"setMaxPrice\" and then now the data is modified\nc.sell() # Invoking (calling) the \"sell\" method (function)\n",
"Selling Price: 900\nSelling Price: 900\nSelling Price: 1000\n"
]
],
[
[
"## Data abstraction",
"_____no_output_____"
]
],
[
[
"from abc import ABC,abstractclassmethod\n\nclass Company(ABC): # this is the abstract class and \"ABC\" is called as \"Abstract Base Class\" which is imported from module \"abc\"\n \n # this is the abstact class method and that \"@\" is called as decorators. With the help of the decorator only we can make the method as abstract class method\n @abstractclassmethod\n def developer(self): \n pass\n \nclass Jr_developer(Company):\n def developer(self):\n print(\"I am a jr.developer and develops small applications\")\n \nclass Sr_developer(Company):\n def developer(self):\n print(\"I am a sr.developer and develops large applications\")\n \n\nj = Jr_developer()\ns = Sr_developer()\n\nj.developer()\ns.developer()\n\n",
"I am a jr.developer and develops small applications\nI am a sr.developer and develops large applications\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a827a729a9d2005c4f02b9ec2925347089931d6
| 38,996 |
ipynb
|
Jupyter Notebook
|
Dimensionality Reduction/Linear Discriminant Analysis/linear_discriminant_analysis.ipynb
|
d4rk-lucif3r/Machine-Learning-Models
|
403c7a2a37420f1ce99985422fb44e2c330742f5
|
[
"MIT"
] | 3 |
2021-01-04T08:42:00.000Z
|
2021-05-30T11:39:58.000Z
|
Dimensionality Reduction/Linear Discriminant Analysis/linear_discriminant_analysis.ipynb
|
d4rk-lucif3r/Machine-Learning-Models
|
403c7a2a37420f1ce99985422fb44e2c330742f5
|
[
"MIT"
] | 3 |
2021-05-29T17:09:41.000Z
|
2021-05-29T17:10:32.000Z
|
Dimensionality Reduction/Linear Discriminant Analysis/linear_discriminant_analysis.ipynb
|
d4rk-lucif3r/Machine-Learning-Models
|
403c7a2a37420f1ce99985422fb44e2c330742f5
|
[
"MIT"
] | 2 |
2021-05-26T03:54:13.000Z
|
2021-08-17T06:34:27.000Z
| 105.967391 | 15,886 | 0.827905 |
[
[
[
"# Linear Discriminant Analysis (LDA)",
"_____no_output_____"
],
[
"## Importing the libraries",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"## Importing the dataset",
"_____no_output_____"
]
],
[
[
"dataset = pd.read_csv('Wine.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values",
"_____no_output_____"
]
],
[
[
"## Splitting the dataset into the Training set and Test set",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)",
"_____no_output_____"
]
],
[
[
"## Feature Scaling",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)",
"_____no_output_____"
]
],
[
[
"## Applying LDA",
"_____no_output_____"
]
],
[
[
"from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\nlda = LDA(n_components = 2)\nX_train = lda.fit_transform(X_train, y_train)\nX_test = lda.transform(X_test)",
"_____no_output_____"
]
],
[
[
"## Training the Logistic Regression model on the Training set",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression(random_state = 0)\nclassifier.fit(X_train, y_train)",
"_____no_output_____"
]
],
[
[
"## Making the Confusion Matrix",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import confusion_matrix, accuracy_score\ny_pred = classifier.predict(X_test)\ncm = confusion_matrix(y_test, y_pred)\nprint(cm)\naccuracy_score(y_test, y_pred)",
"[[14 0 0]\n [ 0 16 0]\n [ 0 0 6]]\n"
]
],
[
[
"## Visualising the Training set results",
"_____no_output_____"
]
],
[
[
"from matplotlib.colors import ListedColormap\nX_set, y_set = X_train, y_train\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('red', 'green', 'blue'))(i), label = j)\nplt.title('Logistic Regression (Training set)')\nplt.xlabel('LD1')\nplt.ylabel('LD2')\nplt.legend()\nplt.show()",
"'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.\n'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.\n'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.\n"
]
],
[
[
"## Visualising the Test set results",
"_____no_output_____"
]
],
[
[
"from matplotlib.colors import ListedColormap\nX_set, y_set = X_test, y_test\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('red', 'green', 'blue'))(i), label = j)\nplt.title('Logistic Regression (Test set)')\nplt.xlabel('LD1')\nplt.ylabel('LD2')\nplt.legend()\nplt.show()",
"'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.\n'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.\n'c' argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with 'x' & 'y'. Please use a 2-D array with a single row if you really want to specify the same RGB or RGBA value for all points.\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a827b7c0d0aefb3b8c1f09149cdce2ff1decf12
| 47,358 |
ipynb
|
Jupyter Notebook
|
onnxruntime/python/tools/transformers/notebooks/PyTorch_Bert-Squad_OnnxRuntime_CPU.ipynb
|
DanielMemmel/onnxruntime
|
0a8bfb10fa42a5f5b871bf28b0ff6694d2da866e
|
[
"MIT"
] | 2 |
2021-07-24T01:13:36.000Z
|
2021-11-17T11:03:52.000Z
|
onnxruntime/python/tools/transformers/notebooks/PyTorch_Bert-Squad_OnnxRuntime_CPU.ipynb
|
DanielMemmel/onnxruntime
|
0a8bfb10fa42a5f5b871bf28b0ff6694d2da866e
|
[
"MIT"
] | 4 |
2020-12-04T21:00:38.000Z
|
2022-01-22T12:49:30.000Z
|
onnxruntime/python/tools/transformers/notebooks/PyTorch_Bert-Squad_OnnxRuntime_CPU.ipynb
|
DanielMemmel/onnxruntime
|
0a8bfb10fa42a5f5b871bf28b0ff6694d2da866e
|
[
"MIT"
] | 1 |
2020-06-08T19:08:12.000Z
|
2020-06-08T19:08:12.000Z
| 47.500502 | 512 | 0.576988 |
[
[
[
"Copyright (c) Microsoft Corporation. All rights reserved. \nLicensed under the MIT License.",
"_____no_output_____"
],
[
"# Inference PyTorch Bert Model with ONNX Runtime on CPU",
"_____no_output_____"
],
[
"In this tutorial, you'll be introduced to how to load a Bert model from PyTorch, convert it to ONNX, and inference it for high performance using ONNX Runtime. In the following sections, we are going to use the Bert model trained with Stanford Question Answering Dataset (SQuAD) dataset as an example. Bert SQuAD model is used in question answering scenarios, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.\n\nThis notebook is for CPU inference. For GPU inferenece, please look at another notebook [Inference PyTorch Bert Model with ONNX Runtime on GPU](PyTorch_Bert-Squad_OnnxRuntime_GPU.ipynb).",
"_____no_output_____"
],
[
"## 0. Prerequisites ##\n\nIf you have Jupyter Notebook, you may directly run this notebook. We will use pip to install or upgrade [PyTorch](https://pytorch.org/), [OnnxRuntime](https://microsoft.github.io/onnxruntime/) and other required packages.\n\nOtherwise, you can setup a new environment. First, we install [AnaConda](https://www.anaconda.com/distribution/). Then open an AnaConda prompt window and run the following commands:\n\n```console\nconda create -n cpu_env python=3.6\nconda activate cpu_env\nconda install jupyter\njupyter notebook\n```\nThe last command will launch Jupyter Notebook and we can open this notebook in browser to continue.",
"_____no_output_____"
]
],
[
[
"# Install or upgrade PyTorch 1.5.0 and OnnxRuntime 1.3.0 for CPU-only.\nimport sys\n!{sys.executable} -m pip install --upgrade torch==1.5.0+cpu torchvision==0.6.0+cpu -f https://download.pytorch.org/whl/torch_stable.html\n!{sys.executable} -m pip install --upgrade onnxruntime==1.3.0\n!{sys.executable} -m pip install --upgrade onnxruntime-tools\n\n# Install other packages used in this notebook.\n!{sys.executable} -m pip install transformers==2.11.0\n!{sys.executable} -m pip install wget netron",
"Looking in links: https://download.pytorch.org/whl/torch_stable.html\nRequirement already up-to-date: torch==1.5.0+cpu in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (1.5.0+cpu)\nRequirement already up-to-date: torchvision==0.6.0+cpu in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (0.6.0+cpu)\nRequirement already satisfied, skipping upgrade: numpy in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from torch==1.5.0+cpu) (1.18.1)\nRequirement already satisfied, skipping upgrade: future in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from torch==1.5.0+cpu) (0.18.2)\nRequirement already satisfied, skipping upgrade: pillow>=4.1.1 in c:\\users\\tianl\\appdata\\roaming\\python\\python36\\site-packages (from torchvision==0.6.0+cpu) (7.0.0)\nRequirement already up-to-date: onnxruntime==1.3.0 in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (1.3.0)\nRequirement already satisfied, skipping upgrade: onnx>=1.2.3 in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from onnxruntime==1.3.0) (1.7.0)\nRequirement already satisfied, skipping upgrade: numpy>=1.16.6 in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from onnxruntime==1.3.0) (1.18.1)\nRequirement already satisfied, skipping upgrade: protobuf in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from onnxruntime==1.3.0) (3.11.3)\nRequirement already satisfied, skipping upgrade: typing-extensions>=3.6.2.1 in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from onnx>=1.2.3->onnxruntime==1.3.0) (3.7.4.1)\nRequirement already satisfied, skipping upgrade: six in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from onnx>=1.2.3->onnxruntime==1.3.0) (1.14.0)\nRequirement already satisfied, skipping upgrade: setuptools in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from protobuf->onnxruntime==1.3.0) (45.2.0.post20200210)\nRequirement already up-to-date: onnxruntime-tools in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (1.3.0.1007)\nRequirement already satisfied, skipping upgrade: numpy in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from onnxruntime-tools) (1.18.1)\nRequirement already satisfied, skipping upgrade: py3nvml in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from onnxruntime-tools) (0.2.5)\nRequirement already satisfied, skipping upgrade: packaging in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from onnxruntime-tools) (20.1)\nRequirement already satisfied, skipping upgrade: onnx in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from onnxruntime-tools) (1.7.0)\nRequirement already satisfied, skipping upgrade: psutil in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from onnxruntime-tools) (5.7.0)\nRequirement already satisfied, skipping upgrade: py-cpuinfo in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from onnxruntime-tools) (5.0.0)\nRequirement already satisfied, skipping upgrade: coloredlogs in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from onnxruntime-tools) (14.0)\nRequirement already satisfied, skipping upgrade: xmltodict in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from py3nvml->onnxruntime-tools) (0.12.0)\nRequirement already satisfied, skipping upgrade: pyparsing>=2.0.2 in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from packaging->onnxruntime-tools) (2.4.6)\nRequirement already satisfied, skipping upgrade: six in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from packaging->onnxruntime-tools) (1.14.0)\nRequirement already satisfied, skipping upgrade: protobuf in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from onnx->onnxruntime-tools) (3.11.3)\nRequirement already satisfied, skipping upgrade: typing-extensions>=3.6.2.1 in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from onnx->onnxruntime-tools) (3.7.4.1)\nRequirement already satisfied, skipping upgrade: humanfriendly>=7.1 in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from coloredlogs->onnxruntime-tools) (8.1)\nRequirement already satisfied, skipping upgrade: setuptools in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from protobuf->onnx->onnxruntime-tools) (45.2.0.post20200210)\nRequirement already satisfied, skipping upgrade: pyreadline; sys_platform == \"win32\" in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from humanfriendly>=7.1->coloredlogs->onnxruntime-tools) (2.1)\nRequirement already satisfied: transformers==2.11.0 in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (2.11.0)\nRequirement already satisfied: regex!=2019.12.17 in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from transformers==2.11.0) (2020.2.20)\nRequirement already satisfied: dataclasses; python_version < \"3.7\" in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from transformers==2.11.0) (0.7)\nRequirement already satisfied: filelock in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from transformers==2.11.0) (3.0.12)\nRequirement already satisfied: requests in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from transformers==2.11.0) (2.23.0)\nRequirement already satisfied: sacremoses in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from transformers==2.11.0) (0.0.38)\nRequirement already satisfied: packaging in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from transformers==2.11.0) (20.1)\nRequirement already satisfied: sentencepiece in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from transformers==2.11.0) (0.1.85)\nRequirement already satisfied: tqdm>=4.27 in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from transformers==2.11.0) (4.43.0)\nRequirement already satisfied: tokenizers==0.7.0 in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from transformers==2.11.0) (0.7.0)\nRequirement already satisfied: numpy in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from transformers==2.11.0) (1.18.1)\nRequirement already satisfied: chardet<4,>=3.0.2 in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from requests->transformers==2.11.0) (3.0.4)\nRequirement already satisfied: idna<3,>=2.5 in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from requests->transformers==2.11.0) (2.9)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from requests->transformers==2.11.0) (1.25.8)\nRequirement already satisfied: certifi>=2017.4.17 in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from requests->transformers==2.11.0) (2020.4.5.1)\nRequirement already satisfied: six in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from sacremoses->transformers==2.11.0) (1.14.0)\nRequirement already satisfied: joblib in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from sacremoses->transformers==2.11.0) (0.14.1)\nRequirement already satisfied: click in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from sacremoses->transformers==2.11.0) (7.0)\nRequirement already satisfied: pyparsing>=2.0.2 in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (from packaging->transformers==2.11.0) (2.4.6)\nRequirement already satisfied: wget in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (3.2)\nRequirement already satisfied: netron in d:\\anaconda3\\envs\\cpu_env\\lib\\site-packages (3.9.6)\n"
]
],
[
[
"## 1. Load Pretrained Bert model ##",
"_____no_output_____"
],
[
"We begin by downloading the SQuAD data file and store them in the specified location.",
"_____no_output_____"
]
],
[
[
"import os\n\ncache_dir = \"./squad\"\nif not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\npredict_file_url = \"https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json\"\npredict_file = os.path.join(cache_dir, \"dev-v1.1.json\")\nif not os.path.exists(predict_file):\n import wget\n print(\"Start downloading predict file.\")\n wget.download(predict_file_url, predict_file)\n print(\"Predict file downloaded.\")",
"_____no_output_____"
]
],
[
[
"Specify some model configuration variables and constant.",
"_____no_output_____"
]
],
[
[
"# For fine tuned large model, the model name is \"bert-large-uncased-whole-word-masking-finetuned-squad\". Here we use bert-base for demo.\nmodel_name_or_path = \"bert-base-cased\"\nmax_seq_length = 128\ndoc_stride = 128\nmax_query_length = 64\n\n# Enable overwrite to export onnx model and download latest script each time when running this notebook.\nenable_overwrite = True\n\n# Total samples to inference. It shall be large enough to get stable latency measurement.\ntotal_samples = 100",
"_____no_output_____"
]
],
[
[
"Start to load model from pretrained. This step could take a few minutes. ",
"_____no_output_____"
]
],
[
[
"# The following code is adapted from HuggingFace transformers\n# https://github.com/huggingface/transformers/blob/master/examples/run_squad.py\n\nfrom transformers import (BertConfig, BertForQuestionAnswering, BertTokenizer)\n\n# Load pretrained model and tokenizer\nconfig_class, model_class, tokenizer_class = (BertConfig, BertForQuestionAnswering, BertTokenizer)\nconfig = config_class.from_pretrained(model_name_or_path, cache_dir=cache_dir)\ntokenizer = tokenizer_class.from_pretrained(model_name_or_path, do_lower_case=True, cache_dir=cache_dir)\nmodel = model_class.from_pretrained(model_name_or_path,\n from_tf=False,\n config=config,\n cache_dir=cache_dir)\n# load some examples\nfrom transformers.data.processors.squad import SquadV1Processor\n\nprocessor = SquadV1Processor()\nexamples = processor.get_dev_examples(None, filename=predict_file)\n\nfrom transformers import squad_convert_examples_to_features\nfeatures, dataset = squad_convert_examples_to_features( \n examples=examples[:total_samples], # convert just enough examples for this notebook\n tokenizer=tokenizer,\n max_seq_length=max_seq_length,\n doc_stride=doc_stride,\n max_query_length=max_query_length,\n is_training=False,\n return_dataset='pt'\n )",
"100%|██████████████████████████████████████████████████████████████████████████████████| 48/48 [00:04<00:00, 10.47it/s]\nconvert squad examples to features: 100%|████████████████████████████████████████████| 100/100 [00:01<00:00, 91.16it/s]\nadd example index and unique id: 100%|███████████████████████████████████████████| 100/100 [00:00<00:00, 100007.25it/s]\n"
]
],
[
[
"## 2. Export the loaded model ##\nOnce the model is loaded, we can export the loaded PyTorch model to ONNX.",
"_____no_output_____"
]
],
[
[
"output_dir = \"./onnx\"\nif not os.path.exists(output_dir):\n os.makedirs(output_dir) \nexport_model_path = os.path.join(output_dir, 'bert-base-cased-squad.onnx')\n\nimport torch\ndevice = torch.device(\"cpu\")\n\n# Get the first example data to run the model and export it to ONNX\ndata = dataset[0]\ninputs = {\n 'input_ids': data[0].to(device).reshape(1, max_seq_length),\n 'attention_mask': data[1].to(device).reshape(1, max_seq_length),\n 'token_type_ids': data[2].to(device).reshape(1, max_seq_length)\n}\n\n# Set model to inference mode, which is required before exporting the model because some operators behave differently in \n# inference and training mode.\nmodel.eval()\nmodel.to(device)\n\nif enable_overwrite or not os.path.exists(export_model_path):\n with torch.no_grad():\n symbolic_names = {0: 'batch_size', 1: 'max_seq_len'}\n torch.onnx.export(model, # model being run\n args=tuple(inputs.values()), # model input (or a tuple for multiple inputs)\n f=export_model_path, # where to save the model (can be a file or file-like object)\n opset_version=11, # the ONNX version to export the model to\n do_constant_folding=True, # whether to execute constant folding for optimization\n input_names=['input_ids', # the model's input names\n 'input_mask', \n 'segment_ids'],\n output_names=['start', 'end'], # the model's output names\n dynamic_axes={'input_ids': symbolic_names, # variable length axes\n 'input_mask' : symbolic_names,\n 'segment_ids' : symbolic_names,\n 'start' : symbolic_names,\n 'end' : symbolic_names})\n print(\"Model exported at \", export_model_path)",
"Model exported at ./onnx\\bert-base-cased-squad.onnx\n"
]
],
[
[
"## 3. PyTorch Inference ##\nUse PyTorch to evaluate an example input for comparison purpose.",
"_____no_output_____"
]
],
[
[
"import time\n\n# Measure the latency. It is not accurate using Jupyter Notebook, it is recommended to use standalone python script.\nlatency = []\nwith torch.no_grad():\n for i in range(total_samples):\n data = dataset[i]\n inputs = {\n 'input_ids': data[0].to(device).reshape(1, max_seq_length),\n 'attention_mask': data[1].to(device).reshape(1, max_seq_length),\n 'token_type_ids': data[2].to(device).reshape(1, max_seq_length)\n }\n start = time.time()\n outputs = model(**inputs)\n latency.append(time.time() - start)\nprint(\"PyTorch {} Inference time = {} ms\".format(device.type, format(sum(latency) * 1000 / len(latency), '.2f')))",
"PyTorch cpu Inference time = 198.99 ms\n"
]
],
[
[
"## 4. Inference ONNX Model with ONNX Runtime ##\n\n### OpenMP Environment Variable\n\nOpenMP environment variables are very important for CPU inference of Bert model. It has large performance impact on Bert model so you might need set it carefully according to [Performance Test Tool](#Performance-Test-Tool) result in later part of this notebook.\n\nSetting environment variables shall be done before importing onnxruntime. Otherwise, they might not take effect.",
"_____no_output_____"
]
],
[
[
"import psutil\n\n# You may change the settings in this cell according to Performance Test Tool result.\nos.environ[\"OMP_NUM_THREADS\"] = str(psutil.cpu_count(logical=True))\nos.environ[\"OMP_WAIT_POLICY\"] = 'ACTIVE'",
"_____no_output_____"
]
],
[
[
"Now we are ready to inference the model with ONNX Runtime. Here we can see that OnnxRuntime has better performance than PyTorch. \n\nIt is better to use standalone python script like [Performance Test tool](#Performance-Test-tool) to get accurate performance results.",
"_____no_output_____"
]
],
[
[
"import onnxruntime\nimport numpy\n\n# Print warning if user uses onnxruntime-gpu instead of onnxruntime package.\nif 'CUDAExecutionProvider' in onnxruntime.get_available_providers():\n print(\"warning: onnxruntime-gpu is not built with OpenMP. You might try onnxruntime package to test CPU inference.\")\n\nsess_options = onnxruntime.SessionOptions()\n\n# Optional: store the optimized graph and view it using Netron to verify that model is fully optimized.\n# Note that this will increase session creation time, so it is for debugging only.\nsess_options.optimized_model_filepath = os.path.join(output_dir, \"optimized_model_cpu.onnx\")\n\n# intra_op_num_threads is needed for OnnxRuntime 1.2.0.\n# For OnnxRuntime 1.3.0 or later, this does not have effect unless you are using onnxruntime-gpu package.\nsess_options.intra_op_num_threads=1\n\n\n# Specify providers when you use onnxruntime-gpu for CPU inference.\nsession = onnxruntime.InferenceSession(export_model_path, sess_options, providers=['CPUExecutionProvider'])\n\nlatency = []\nfor i in range(total_samples):\n data = dataset[i]\n # TODO: use IO Binding (see https://github.com/microsoft/onnxruntime/pull/4206) to improve performance.\n ort_inputs = {\n 'input_ids': data[0].cpu().reshape(1, max_seq_length).numpy(),\n 'input_mask': data[1].cpu().reshape(1, max_seq_length).numpy(),\n 'segment_ids': data[2].cpu().reshape(1, max_seq_length).numpy()\n }\n start = time.time()\n ort_outputs = session.run(None, ort_inputs)\n latency.append(time.time() - start)\nprint(\"OnnxRuntime cpu Inference time = {} ms\".format(format(sum(latency) * 1000 / len(latency), '.2f')))",
"OnnxRuntime cpu Inference time = 176.96 ms\n"
],
[
"print(\"***** Verifying correctness *****\")\nfor i in range(2):\n print('PyTorch and ONNX Runtime output {} are close:'.format(i), numpy.allclose(ort_outputs[i], outputs[i].cpu(), rtol=1e-05, atol=1e-04))",
"***** Verifying correctness *****\nPyTorch and ONNX Runtime output 0 are close: True\nPyTorch and ONNX Runtime output 1 are close: True\n"
]
],
[
[
"## 5. Offline Optimization Script and Test Tools\n\nIt is recommended to try [OnnxRuntime Transformer Model Optimization Tool](https://github.com/microsoft/onnxruntime/tree/master/onnxruntime/python/tools/transformers) on the exported ONNX models. It could help verify whether the model can be fully optimized, and get performance test results.",
"_____no_output_____"
],
[
"#### Transformer Optimizer\n\nAlthough OnnxRuntime could optimize Bert model exported by PyTorch. Sometime, model cannot be fully optimized due to different reasons:\n* A new subgraph pattern is generated by new version of export tool, and the pattern is not covered by older version of OnnxRuntime. \n* The exported model uses dynamic axis and this makes it harder for shape inference of the graph. That blocks some optimization to be applied.\n* Some optimization is better to be done offline. Like change input tensor type from int64 to int32 to avoid extra Cast nodes, or convert model to float16 to achieve better performance in V100 or T4 GPU.\n\nWe have python script **optimizer.py**, which is more flexible in graph pattern matching and model conversion (like float32 to float16). You can also use it to verify whether a Bert model is fully optimized.\n\nIn this example, we can see that it introduces optimization that is not provided by onnxruntime: SkipLayerNormalization and bias fusion, which is not fused in OnnxRuntime due to shape inference as mentioned.\n\nIt will also tell whether the model is fully optimized or not. If not, that means you might need change the script to fuse some new pattern of subgraph.\n\nExample Usage:\n```\nfrom onnxruntime_tools import optimizer\noptimized_model = optimizer.optimize_model(export_model_path, model_type='bert', num_heads=12, hidden_size=768)\noptimized_model.save_model_to_file(optimized_model_path)\n```\n\nYou can also use optimizer_cli like the following:",
"_____no_output_____"
]
],
[
[
"optimized_model_path = './onnx/bert-base-cased-squad_opt_cpu.onnx'\n\n!{sys.executable} -m onnxruntime_tools.optimizer_cli --input $export_model_path --output $optimized_model_path --model_type bert --num_heads 12 --hidden_size 768",
"optimize_by_onnxruntime: Save optimized model by onnxruntime to ./onnx\\bert-base-cased-squad_o1_cpu.onnx\n apply: Fused LayerNormalization count: 25\n apply: Fused LayerNormalization count: 0\n apply: Fused Gelu count: 12\n apply: Fused FastGelu count: 0\n apply: Fused Reshape count: 0\n apply: Fused SkipLayerNormalization count: 25\n apply: Fused Attention count: 12\n prune_graph: Graph pruned: 0 inputs, 0 outputs and 5 nodes are removed\n apply: Fused EmbedLayerNormalization(with mask) count: 1\n prune_graph: Graph pruned: 0 inputs, 0 outputs and 12 nodes are removed\n prune_graph: Graph pruned: 0 inputs, 0 outputs and 0 nodes are removed\n apply: Fused FastGelu(add bias) count: 0\n apply: Fused BiasGelu count: 12\n apply: Fused SkipLayerNormalization(add bias) count: 24\n optimize: opset verion: 11\n save_model_to_file: Output model to ./onnx/bert-base-cased-squad_opt_cpu.onnx\nget_fused_operator_statistics: Optimized operators:{'EmbedLayerNormalization': 1, 'Attention': 12, 'Gelu': 0, 'FastGelu': 0, 'BiasGelu': 12, 'LayerNormalization': 0, 'SkipLayerNormalization': 24}\n is_fully_optimized: EmbedLayer=1, Attention=12, Gelu=12, LayerNormalization=24, Successful=True\n main: The output model is fully optimized.\n"
]
],
[
[
"#### Optimized Graph\nWhen you can open the optimized model using Netron to visualize, the graph is like the following:\n<img src='images/optimized_bert_gpu.png'>\n\nFor CPU, optimized graph is slightly different: FastGelu is replaced by BiasGelu.",
"_____no_output_____"
]
],
[
[
"import netron\n\n# Change it to False to skip viewing the optimized model in browser.\nenable_netron = True\nif enable_netron:\n # If you encounter error \"access a socket in a way forbidden by its access permissions\", install Netron as standalone application instead.\n netron.start(optimized_model_path)",
"Serving './onnx/bert-base-cased-squad_opt_cpu.onnx' at http://localhost:8080\n"
]
],
[
[
"#### Model Results Comparison Tool\n\nIf your BERT model has three inputs, a script compare_bert_results.py can be used to do a quick verification. The tool will generate some fake input data, and compare results from both the original and optimized models. If outputs are all close, it is safe to use the optimized model.\n\nExample of verifying models:",
"_____no_output_____"
]
],
[
[
"!{sys.executable} -m onnxruntime_tools.transformers.compare_bert_results --baseline_model $export_model_path --optimized_model $optimized_model_path --batch_size 1 --sequence_length 128 --samples 100",
"100% passed for 100 random inputs given thresholds (rtol=0.001, atol=0.0001).\nmaximum absolute difference=3.46451997756958e-06\nmaximum relative difference=0.03302651643753052\n"
]
],
[
[
"#### Performance Test Tool\n\nThis tool measures performance of BERT model inference using OnnxRuntime Python API.\n\nThe following command will create 100 samples of batch_size 1 and sequence length 128 to run inference, then calculate performance numbers like average latency and throughput etc. You can increase number of samples (recommended 1000) to get more stable result.",
"_____no_output_____"
]
],
[
[
"!{sys.executable} -m onnxruntime_tools.transformers.bert_perf_test --model $optimized_model_path --batch_size 1 --sequence_length 128 --samples 100 --test_times 1 --intra_op_num_threads 1 --inclusive --all",
"Running test: model=bert-base-cased-squad_opt_cpu.onnx,graph_optimization_level=ENABLE_ALL,intra_op_num_threads=1,OMP_NUM_THREADS=,OMP_WAIT_POLICY=,batch_size=1,sequence_length=128,test_cases=100,test_times=1,contiguous=False,use_gpu=False,warmup=True\nAverage latency = 102.17 ms, Throughput = 9.79 QPS\nRunning test: model=bert-base-cased-squad_opt_cpu.onnx,graph_optimization_level=ENABLE_ALL,intra_op_num_threads=1,OMP_NUM_THREADS=,OMP_WAIT_POLICY=PASSIVE,batch_size=1,sequence_length=128,test_cases=100,test_times=1,contiguous=False,use_gpu=False,warmup=True\nAverage latency = 91.39 ms, Throughput = 10.94 QPS\nRunning test: model=bert-base-cased-squad_opt_cpu.onnx,graph_optimization_level=ENABLE_ALL,intra_op_num_threads=1,OMP_NUM_THREADS=,OMP_WAIT_POLICY=ACTIVE,batch_size=1,sequence_length=128,test_cases=100,test_times=1,contiguous=False,use_gpu=False,warmup=True\nAverage latency = 105.09 ms, Throughput = 9.52 QPS\nRunning test: model=bert-base-cased-squad_opt_cpu.onnx,graph_optimization_level=ENABLE_ALL,intra_op_num_threads=1,OMP_NUM_THREADS=1,OMP_WAIT_POLICY=,batch_size=1,sequence_length=128,test_cases=100,test_times=1,contiguous=False,use_gpu=False,warmup=True\nAverage latency = 291.53 ms, Throughput = 3.43 QPS\nRunning test: model=bert-base-cased-squad_opt_cpu.onnx,graph_optimization_level=ENABLE_ALL,intra_op_num_threads=1,OMP_NUM_THREADS=1,OMP_WAIT_POLICY=PASSIVE,batch_size=1,sequence_length=128,test_cases=100,test_times=1,contiguous=False,use_gpu=False,warmup=True\nAverage latency = 306.75 ms, Throughput = 3.26 QPS\nRunning test: model=bert-base-cased-squad_opt_cpu.onnx,graph_optimization_level=ENABLE_ALL,intra_op_num_threads=1,OMP_NUM_THREADS=1,OMP_WAIT_POLICY=ACTIVE,batch_size=1,sequence_length=128,test_cases=100,test_times=1,contiguous=False,use_gpu=False,warmup=True\nAverage latency = 289.82 ms, Throughput = 3.45 QPS\nRunning test: model=bert-base-cased-squad_opt_cpu.onnx,graph_optimization_level=ENABLE_ALL,intra_op_num_threads=1,OMP_NUM_THREADS=12,OMP_WAIT_POLICY=,batch_size=1,sequence_length=128,test_cases=100,test_times=1,contiguous=False,use_gpu=False,warmup=True\nAverage latency = 101.56 ms, Throughput = 9.85 QPS\nRunning test: model=bert-base-cased-squad_opt_cpu.onnx,graph_optimization_level=ENABLE_ALL,intra_op_num_threads=1,OMP_NUM_THREADS=12,OMP_WAIT_POLICY=PASSIVE,batch_size=1,sequence_length=128,test_cases=100,test_times=1,contiguous=False,use_gpu=False,warmup=True\nAverage latency = 84.73 ms, Throughput = 11.80 QPS\nRunning test: model=bert-base-cased-squad_opt_cpu.onnx,graph_optimization_level=ENABLE_ALL,intra_op_num_threads=1,OMP_NUM_THREADS=12,OMP_WAIT_POLICY=ACTIVE,batch_size=1,sequence_length=128,test_cases=100,test_times=1,contiguous=False,use_gpu=False,warmup=True\nAverage latency = 101.53 ms, Throughput = 9.85 QPS\nRunning test: model=bert-base-cased-squad_opt_cpu.onnx,graph_optimization_level=ENABLE_ALL,intra_op_num_threads=1,OMP_NUM_THREADS=6,OMP_WAIT_POLICY=,batch_size=1,sequence_length=128,test_cases=100,test_times=1,contiguous=False,use_gpu=False,warmup=True\nAverage latency = 100.54 ms, Throughput = 9.95 QPS\nRunning test: model=bert-base-cased-squad_opt_cpu.onnx,graph_optimization_level=ENABLE_ALL,intra_op_num_threads=1,OMP_NUM_THREADS=6,OMP_WAIT_POLICY=PASSIVE,batch_size=1,sequence_length=128,test_cases=100,test_times=1,contiguous=False,use_gpu=False,warmup=True\nAverage latency = 111.82 ms, Throughput = 8.94 QPS\nRunning test: model=bert-base-cased-squad_opt_cpu.onnx,graph_optimization_level=ENABLE_ALL,intra_op_num_threads=1,OMP_NUM_THREADS=6,OMP_WAIT_POLICY=ACTIVE,batch_size=1,sequence_length=128,test_cases=100,test_times=1,contiguous=False,use_gpu=False,warmup=True\nAverage latency = 101.14 ms, Throughput = 9.89 QPS\ntest setting TestSetting(use_gpu=False, batch_size=1, sequence_length=128, test_cases=100, test_times=1, omp_num_threads=None, omp_wait_policy=None, intra_op_num_threads=1, seed=3, verbose=False, contiguous=False, inclusive=True, extra_latency=0, warmup=True)\nGenerating 100 samples for batch_size=1 sequence_length=128\nTest summary is saved to onnx\\perf_results_CPU_B1_S128_20200612-115010.txt\n"
]
],
[
[
"Let's load the summary file and take a look.",
"_____no_output_____"
]
],
[
[
"import os\nimport glob \nimport pandas\nlatest_result_file = max(glob.glob(\"./onnx/perf_results_*.txt\"), key=os.path.getmtime)\nresult_data = pandas.read_table(latest_result_file, converters={'OMP_NUM_THREADS': str, 'OMP_WAIT_POLICY':str})\nprint(latest_result_file)\n\n# Remove some columns that have same values for all rows.\ncolumns_to_remove = ['model', 'graph_optimization_level', 'batch_size', 'sequence_length', 'test_cases', 'test_times', 'use_gpu', 'warmup']\n# Hide some latency percentile columns to fit screen width.\ncolumns_to_remove.extend(['Latency_P50', 'Latency_P95'])\nresult_data.drop(columns_to_remove, axis=1, inplace=True)\nresult_data",
"./onnx\\perf_results_CPU_B1_S128_20200612-115010.txt\nThe best setting is: use openmp; NO contiguous array\n"
]
],
[
[
"## 6. Additional Info\n\nNote that running Jupyter Notebook has slight impact on performance result since Jupyter Notebook is using system resources like CPU and memory etc. It is recommended to close Jupyter Notebook and other applications, then run the performance test tool in a console to get more accurate performance numbers.\n\nWe have a [benchmark script](https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/python/tools/transformers/run_benchmark.sh). It is recommended to use it compare inference speed of OnnxRuntime with PyTorch.\n\n[OnnxRuntime C API](https://github.com/microsoft/onnxruntime/blob/master/docs/C_API.md) could get slightly better performance than python API. If you use C API in inference, you can use OnnxRuntime_Perf_Test.exe built from source to measure performance instead.\n\nHere is the machine configuration that generated the above results. The machine has GPU but not used in CPU inference.\nYou might get slower or faster result based on your hardware.",
"_____no_output_____"
]
],
[
[
"!{sys.executable} -m onnxruntime_tools.transformers.machine_info --silent",
"{\n \"gpu\": {\n \"driver_version\": \"442.23\",\n \"devices\": [\n {\n \"memory_total\": 8589934592,\n \"memory_available\": 8480882688,\n \"name\": \"GeForce GTX 1070\"\n }\n ]\n },\n \"cpu\": {\n \"brand\": \"Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz\",\n \"cores\": 6,\n \"logical_cores\": 12,\n \"hz\": \"3.1920 GHz\",\n \"l2_cache\": \"1536 KB\",\n \"l3_cache\": \"12288 KB\",\n \"processor\": \"Intel64 Family 6 Model 158 Stepping 10, GenuineIntel\"\n },\n \"memory\": {\n \"total\": 16971259904,\n \"available\": 6604914688\n },\n \"python\": \"3.6.10.final.0 (64 bit)\",\n \"os\": \"Windows-10-10.0.18362-SP0\",\n \"onnxruntime\": {\n \"version\": \"1.3.0\",\n \"support_gpu\": false\n },\n \"pytorch\": {\n \"version\": \"1.5.0+cpu\",\n \"support_gpu\": false\n },\n \"tensorflow\": {\n \"version\": \"2.1.0\",\n \"git_version\": \"v2.1.0-rc2-17-ge5bf8de410\",\n \"support_gpu\": true\n }\n}\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a827ebc0b951495648a94ebd83dcddb7381b0cf
| 708,475 |
ipynb
|
Jupyter Notebook
|
model_trainer_v4.ipynb
|
husmen/driveCNN
|
4c766ab101e78e951755f0119bfffab2e336e8f4
|
[
"MIT"
] | null | null | null |
model_trainer_v4.ipynb
|
husmen/driveCNN
|
4c766ab101e78e951755f0119bfffab2e336e8f4
|
[
"MIT"
] | null | null | null |
model_trainer_v4.ipynb
|
husmen/driveCNN
|
4c766ab101e78e951755f0119bfffab2e336e8f4
|
[
"MIT"
] | 1 |
2020-02-07T03:39:25.000Z
|
2020-02-07T03:39:25.000Z
| 357.815657 | 227,508 | 0.893228 |
[
[
[
"args = {\n 'model_name':'2019_04_11_DRIVE',\n 'FP':'float16',\n 'optimizer': 'Adam', #SGD, RMSprop, Adadelta, Adagrad, Adam, Adamax, Nadam\n 'dataset':['101/training_data.csv', '102/training_data.csv', '103/training_data.csv', '104/training_data.csv', '105/training_data.csv', '106/training_data.csv', '107/training_data.csv'],\n 'batch_size':1024, #512 with float32, 1024 with float16\n 'split_point': 0.8, #80% Training , 20% Validation\n 'rnd_seed': 1234,\n 'epochs_number':250,\n 'image_width': 320,\n 'image_height': 90,\n 'resume': False\n}",
"_____no_output_____"
],
[
"# csv uznatılı dosyayı okuyabilmek için\nimport pandas as pd\n# matris işlemleri için\nimport numpy as np\n# opencv \nimport cv2\n# grafik kütüphanesi \nimport matplotlib.pylab as plt\n# histogram colors\nfrom matplotlib import colors\n# matplotlib grafiklerini jupyter notebook üzerinde göstermek için\n%matplotlib inline\n# rasgele sayı üretimi için\nimport random",
"_____no_output_____"
],
[
"# Eğitim için kaydettiğimiz seyir.csv dosaysını okuyoruz\nfor i, ds in enumerate(args['dataset']):\n try:\n tmp = pd.read_csv(ds)\n except:\n pass\n else:\n tmp['FileName'] = tmp['FileName'].apply(lambda x: ds.split('/')[0] + '/' + x) \n if i == 0:\n df = tmp\n else:\n df = df.append(tmp, ignore_index=True)",
"_____no_output_____"
],
[
"# Seyir dosaysındaki sutun başlıkları\ndf.columns",
"_____no_output_____"
],
[
"# Açı sutunu hakkında istatistiki bilgiler min max mean vs ...\ndf.Angle.describe()",
"_____no_output_____"
],
[
"# Toplam kayıt sayımız\nlen(df)",
"_____no_output_____"
],
[
"# 20 den 40 kadar kayıtlar\ndf[0:10]",
"_____no_output_____"
],
[
"df[-10:]",
"_____no_output_____"
],
[
"# Kayıt ettğimiz resmin birine bakıyoruz\nimage = cv2.imread(df.FileName[30])\nplt.figure(figsize=(15,5))\nplt.imshow(image)",
"_____no_output_____"
],
[
"# Resmin her tarafını kullanmamıza gerek yok \n# çeşitli denemeler yaparak kırptığımız alanı görüyoruz\n# Uygun gördüğümüz halini ağda kullanacağız\n# Biz burda sadece sol görüntüyü üstten 144 den 300 kadarlık kısmını alıyoruz \ntmp = image[160:340,:,:]\nplt.figure(figsize=(15,5))\nplt.imshow(tmp)\nprint(type(image[0,0,0]))",
"<type 'numpy.uint8'>\n"
],
[
"# Ağımızı eğitmek için giriş verimiz resimler çıkışımız da Açılar olacak\n# bunları birer listeye aktarıyoruz\n# Kamera ilk çekmeye başladığında düzgün çekemediği için başlangıçdan 30 kayıdı almıyoruz.\nimages = list(df.FileName[1:-1])\nlabels = list(df.Angle[1:-1])",
"_____no_output_____"
],
[
"len(labels)",
"_____no_output_____"
],
[
"# Verimizdeki açıların dağılımı nasıl diye bir histogram yapıp bakıyoruz\n# Dağılımın eşit şekilde olmaması eğitimin de düzgün olmamasına sebep olur\n#plt.hist(labels, bins=14)\n#plt.show()\nN, bins, patches = plt.hist(labels,14, range=[-0.35,0.35], facecolor='blue', align='mid')\nfracs = N / N.max()\nnorm = colors.Normalize(fracs.min(), fracs.max())\n\n# Now, we'll loop through our objects and set the color of each accordingly\nfor thisfrac, thispatch in zip(fracs, patches):\n color = plt.cm.RdBu(norm(thisfrac))\n thispatch.set_facecolor(color)\n\n#plt.axis([-0.4, 0.4, 0, 750])\nplt.show()",
"_____no_output_____"
],
[
"tmp = []\nstart_value = -0.35\naralik = 0.05\n\nfor i in range(14):\n length=df.loc[(df['Angle'] > start_value) & (df['Angle'] <= start_value+aralik)]\n tmp.append(len(length))\n start_value = start_value + aralik\nprint(tmp[0:7]) # Eksi açı degerleri (Sag)\nprint(tmp[7:14]) # Pozitif açı degerleri (Sol)",
"[1738, 452, 1393, 2261, 2687, 1783, 802]\n[16694, 1158, 1879, 1074, 631, 267, 1110]\n"
],
[
"# Veri setindeki açı dağılımını bir paröa düzeltmek için\n# sayısı az olan açıdaki kayıtları listeye yeniden ekleyerek \n# daha düzgün hale getirmeye çalışıyoruz\ndef augment():\n nitem = len(images)\n for i in range(nitem):\n if labels[i] >= 0.05 and labels[i] <= 0.1:\n addValue=tmp[7]/tmp[8]\n for j in range(addValue-5):\n images.append(images[i])\n labels.append(labels[i])\n\n if labels[i] > 0.1 and labels[i] <= 0.15:\n addValue=tmp[7]/tmp[9]\n for j in range(addValue-3):\n images.append(images[i])\n labels.append(labels[i]) \n\n if labels[i] > 0.15 and labels[i] <= 0.2:\n addValue=tmp[7]/tmp[10]\n for j in range(addValue-5):\n images.append(images[i])\n labels.append(labels[i])\n\n if labels[i] > 0.2 and labels[i] <= 0.25:\n addValue=tmp[7]/tmp[11]\n for j in range(addValue-10):\n images.append(images[i])\n labels.append(labels[i])\n\n if labels[i] > 0.25 and labels[i] <= 0.3:\n addValue=tmp[7]/tmp[12]\n for j in range(addValue-20):\n images.append(images[i])\n labels.append(labels[i])\n\n if labels[i] > 0.3 and labels[i] <= 0.35:\n addValue=tmp[7]/tmp[13]\n for j in range(addValue-5):\n images.append(images[i])\n labels.append(labels[i])\n\n\n #Negatif degerler\n\n if labels[i] < 0.0 and labels[i] > -0.05:\n addValue=tmp[7]/tmp[6]\n for j in range(addValue-5):\n images.append(images[i])\n labels.append(labels[i])\n\n if labels[i] <= -0.05 and labels[i] > -0.1:\n addValue=tmp[7]/tmp[5]\n for j in range(addValue-3):\n images.append(images[i])\n labels.append(labels[i])\n\n if labels[i] <= -0.1 and labels[i] > -0.15:\n addValue=tmp[7]/tmp[4]\n for j in range(addValue-3):\n images.append(images[i])\n labels.append(labels[i])\n\n if labels[i] <= -0.15 and labels[i] > -0.2:\n addValue=tmp[7]/tmp[3]\n for j in range(addValue-3):\n images.append(images[i])\n labels.append(labels[i])\n\n if labels[i] <= -0.2 and labels[i] > -0.25:\n addValue=tmp[7]/tmp[2]\n for j in range(addValue-4):\n images.append(images[i])\n labels.append(labels[i])\n\n if labels[i] <= -0.25 and labels[i] > -0.3:\n addValue=tmp[7]/tmp[1]\n for j in range(addValue-10):\n images.append(images[i])\n labels.append(labels[i])\n\n if labels[i] <= -0.3 and labels[i] > -0.35:\n addValue=tmp[7]/tmp[0]\n for j in range(addValue-3):\n images.append(images[i])\n labels.append(labels[i])",
"_____no_output_____"
],
[
"augment()",
"_____no_output_____"
],
[
"# İlk histgorama göre daga dengeli sayılabilecek bir dağılıma ulaştık\n# En doğru çözüm değil ama pratik işe yarar bir alternatif\nN, bins, patches = plt.hist(labels,14, range=[-0.35,0.35], facecolor='blue', align='mid')\nfracs = N / N.max()\nnorm = colors.Normalize(fracs.min(), fracs.max())\n\n# Now, we'll loop through our objects and set the color of each accordingly\nfor thisfrac, thispatch in zip(fracs, patches):\n color = plt.cm.RdBu(norm(thisfrac))\n thispatch.set_facecolor(color)\n\n#plt.axis([-0.4, 0.4, 0, 750])\nplt.show()",
"_____no_output_____"
],
[
"len(labels)",
"_____no_output_____"
],
[
"# Veri setimiz ile ilgili ayarlamalar\n# Veri seti küme büyüklüğü batch size\n# Verisetinin ne kadarı eğitim ne kadarı test için kullanılacak\n# Eğitim %80 , Test %20 \nbsize = args['batch_size']\ndlen = len(labels)\nsplitpoint = int(args['split_point']*dlen)\nreindex = list(range(len(labels)))\n# Eğtim verisini karıştıryoruz\nrandom.seed(args['rnd_seed'])\nrandom.shuffle(reindex)",
"_____no_output_____"
],
[
"# Resim üzerinde Rastgele parlaklık değişimi uygulayan bir fonksiyon\n# Augmentation function (taken from github)\ndef augment_brightness(image):\n image1 = cv2.cvtColor(image,cv2.COLOR_BGR2HSV) \n image1 = np.array(image1, dtype = np.float64)\n random_bright = .5+np.random.uniform()\n image1[:,:,2] = image1[:,:,2]*random_bright\n image1[:,:,2][image1[:,:,2]>255] = 255\n image1 = np.array(image1, dtype = np.uint8)\n image1 = cv2.cvtColor(image1,cv2.COLOR_HSV2RGB)\n return image1",
"_____no_output_____"
],
[
"#Resmi Kaydirma Sonradan Eklendi\ndef random_translate(image,range_x, range_y):\n \"\"\"\n Randomly shift the image virtially and horizontally (translation).\n \"\"\"\n trans_x = range_x * (np.random.rand() - 0.5)\n trans_y = range_y * (np.random.rand() - 0.5)\n trans_m = np.float32([[1, 0, trans_x], [0, 1, trans_y]])\n height, width = image.shape[:2]\n image = cv2.warpAffine(image, trans_m, (width, height))\n return image",
"_____no_output_____"
],
[
"def random_shadow(image,width,heigth):\n \"\"\"\n Generates and adds random shadow\n \"\"\"\n \n IMAGE_WIDTH=width\n IMAGE_HEIGHT=heigth\n # (x1, y1) and (x2, y2) forms a line\n # xm, ym gives all the locations of the image\n x1, y1 = IMAGE_WIDTH * np.random.rand(), 0\n x2, y2 = IMAGE_WIDTH * np.random.rand(), IMAGE_HEIGHT\n xm, ym = np.mgrid[0:IMAGE_HEIGHT, 0:IMAGE_WIDTH]\n\n print(image.size)\n # mathematically speaking, we want to set 1 below the line and zero otherwise\n # Our coordinate is up side down. So, the above the line: \n # (ym-y1)/(xm-x1) > (y2-y1)/(x2-x1)\n # as x2 == x1 causes zero-division problem, we'll write it in the below form:\n # (ym-y1)*(x2-x1) - (y2-y1)*(xm-x1) > 0\n mask = np.zeros_like(image[:, :, 1])\n mask[(ym - y1) * (x2 - x1) - (y2 - y1) * (xm - x1) > 0] = 1\n\n # choose which side should have shadow and adjust saturation\n cond = mask == np.random.randint(2)\n s_ratio = np.random.uniform(low=0.5, high=0.5)\n\n # adjust Saturation in HLS(Hue, Light, Saturation)\n hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n hls[:, :, 1][cond] = hls[:, :, 1][cond] * s_ratio\n #plt.imshow(random_shadow(image))\n return cv2.cvtColor(hls, cv2.COLOR_HLS2RGB)",
"_____no_output_____"
],
[
"# ismi verilen resmi okuyup \n# rastgele olarak %50 sine parlaklık değişimi uygulayan fonksiyonu uygulayıp\n# resim matrisini dönem bir fonksiyon\n\ndef get_matrix(fname):\n img = cv2.imread(fname)\n h, w = img.shape[:2]\n #img = img[160:340,:,:] # crop only\n if w != 640 or h != 360:\n img = cv2.resize(img, (640,360))\n img = cv2.resize(img[160:340,:,:], (args['image_width'],args['image_height'])) #crop then resize\n if random.randint(0,2) == 1:\n img = augment_brightness(img) \n if random.randint(0,2) == 1:\n img = random_translate(img,100,0) \n #if random.randint(0,2) == 1:\n # img = random_shadow(img,320,90)\n return img",
"_____no_output_____"
],
[
"# Bütün veriyi hafızaya almamız mümkün değil\n# Ek olarak bazen çeşitli değişimler - Augmentation - uygulamakda istiyebiliriz\n# python generator ile gerektiğinde veri okunur düzenlenir ve eğitim veya test için \n# sisteme verilir\n# alttaki fonksiyonlar bu işi yapar\n\n# Generate data for training\ndef generate_data():\n i = 0\n while True:\n x = []\n y = []\n for j in range(i,i+bsize): \n ix = reindex[j]\n img = get_matrix(images[ix])\n lbl = np.array([labels[ix]])\n flip = random.randint(0,1)\n if flip == 1:\n img = cv2.flip(img,1)\n lbl = lbl*-1.0\n x.append(img)\n y.append(lbl)\n x = np.array(x)\n y = np.array(y)\n #print(\"#------ Sending TRAINING batch ------#\")\n yield (x,y) \n i +=bsize\n if i+bsize > splitpoint:\n i = 0\n \n# Generate data for validation \ndef generate_data_val():\n i = splitpoint\n while True:\n x = []\n y = []\n for j in range(i,i+bsize): \n ix = reindex[j]\n x.append(get_matrix(images[ix]))\n y.append(np.array([labels[ix]]))\n x = np.array(x)\n y = np.array(y)\n #print(\"#------ Sending VALIDATION batch ------#\")\n yield (x,y) \n i +=bsize\n if i+bsize > dlen:\n i = splitpoint\n ",
"_____no_output_____"
],
[
"# Keras için gerekenler\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Dropout, Flatten, Lambda\nfrom keras.layers import Conv2D, MaxPooling2D, Cropping2D, Reshape\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.optimizers import SGD, RMSprop, Adadelta, Adagrad, Adam, Adamax, Nadam\nfrom keras.regularizers import l2\nfrom keras import backend as K",
"Using TensorFlow backend.\n"
],
[
"import tensorflow as tf",
"_____no_output_____"
],
[
"#Destroy the current TF graph and create a new one\nK.clear_session()\n#import keras\n#print(keras.__version__)\n# make sure soft-placement is off\n\n# allow_soft_placement: an op will be placed on CPU if not possible on GPU\n# allow_growth: attempts to allocate only as much GPU memory based on runtime allocations \n# per_process_gpu_memory_fraction: set the fraction of the overall memory that each GPU should be allocated\n\ntf_config = tf.ConfigProto(allow_soft_placement=False)\ntf_config.gpu_options.allow_growth = True\n#tf_config.gpu_options.per_process_gpu_memory_fraction = 0.5\ns = tf.Session(config=tf_config)\nK.set_session(s)\n\n# enable 16-bit training\nK.set_floatx(args['FP'])\nif args['FP'] == \"float16\":\n K.set_epsilon(1e-4)\nK.floatx()",
"_____no_output_____"
],
[
"# Model based on NVIDIA's End to End Learning for Self-Driving Cars model\n#input shape, original paper uses 60*200, openzeka uses 80*320\nshape=(args['image_height'],args['image_width'],3)\n# Sıralı bir keras modeli tanılıyoruz\nmodel = Sequential()\n# Cropping\n# gelen resmin istediğimiz ksımını kırpmak için bu katmanı yazıyoruz\n# Cropping2D(cropping((top_crop, bottom_crop), (left_crop, right_crop)))\n# aşağıdaki satırda \n# üstten 150 alttan 20 piksel \n# soldan 0 sağdan 640 piksel kırpılıyor\n#model.add(Cropping2D(cropping=((150,20),(0,640)), input_shape=shape))\n# Normalization\n# 0 - 255 arası değerler -1 ila 1 arasına çekiliyor\nmodel.add(Lambda(lambda x: (2*x / 255.0) - 1.0, input_shape=shape))\n# lambda + cast\n#model.add(Lambda(lambda x: tf.cast((2*x / 255.0) - 1.0 ,dtype=tf.float16)))\n# cast to float16\n#model.add(Lambda(lambda x: tf.cast(x,dtype=tf.float16)))\n# Doesnt work, requires numpy array as input\n#model.add(Lambda(lambda x: K.cast_to_floatx(x)))\n\n# Evrişim katmanı (5, 5) lik 24 tane 2 şer piksel kayarak\nmodel.add(Conv2D(24, (5, 5), activation=\"relu\", strides=(2, 2), kernel_regularizer=l2(0.001)))\nmodel.add(Conv2D(36, (5, 5), activation=\"relu\", strides=(2, 2), kernel_regularizer=l2(0.001)))\nmodel.add(Conv2D(48, (5, 5), activation=\"relu\", strides=(2, 2), kernel_regularizer=l2(0.001)))\nmodel.add(Conv2D(64, (3, 3), activation=\"relu\", kernel_regularizer=l2(0.001)))\nmodel.add(Conv2D(64, (3, 3), activation=\"relu\", kernel_regularizer=l2(0.001)))\n#Flattendan once kullanilmayan baglantilari drop out yaptik(sonradan ekledik)\nmodel.add(Dropout(0.5))\n# Ağın çıkışı burda vectöre çevriliyor\nmodel.add(Flatten())\n# Yapay Sinir ağı kısmı\nmodel.add(Dense(100))\nmodel.add(Dense(50))\nmodel.add(Dense(10))\n# Ağın çıkışı Açı \nmodel.add(Dense(1))\nmodel.compile(loss='mse', optimizer=args['optimizer'], metrics=['mse', 'msle', 'mae', 'mape', 'cosine'])",
"_____no_output_____"
],
[
"# Tanımladığımız ağın yapsı\nmodel.summary()",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nlambda_1 (Lambda) (None, 90, 320, 3) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 43, 158, 24) 1824 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 20, 77, 36) 21636 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 8, 37, 48) 43248 \n_________________________________________________________________\nconv2d_4 (Conv2D) (None, 6, 35, 64) 27712 \n_________________________________________________________________\nconv2d_5 (Conv2D) (None, 4, 33, 64) 36928 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 4, 33, 64) 0 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 8448) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 100) 844900 \n_________________________________________________________________\ndense_2 (Dense) (None, 50) 5050 \n_________________________________________________________________\ndense_3 (Dense) (None, 10) 510 \n_________________________________________________________________\ndense_4 (Dense) (None, 1) 11 \n=================================================================\nTotal params: 981,819\nTrainable params: 981,819\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"for layer in model.layers:\n print(layer.output)",
"Tensor(\"lambda_1/sub:0\", shape=(?, 90, 320, 3), dtype=float16)\nTensor(\"conv2d_1/Relu:0\", shape=(?, 43, 158, 24), dtype=float16)\nTensor(\"conv2d_2/Relu:0\", shape=(?, 20, 77, 36), dtype=float16)\nTensor(\"conv2d_3/Relu:0\", shape=(?, 8, 37, 48), dtype=float16)\nTensor(\"conv2d_4/Relu:0\", shape=(?, 6, 35, 64), dtype=float16)\nTensor(\"conv2d_5/Relu:0\", shape=(?, 4, 33, 64), dtype=float16)\nTensor(\"dropout_1/cond/Merge:0\", shape=(?, 4, 33, 64), dtype=float16)\nTensor(\"flatten_1/Reshape:0\", shape=(?, ?), dtype=float16)\nTensor(\"dense_1/BiasAdd:0\", shape=(?, 100), dtype=float16)\nTensor(\"dense_2/BiasAdd:0\", shape=(?, 50), dtype=float16)\nTensor(\"dense_3/BiasAdd:0\", shape=(?, 10), dtype=float16)\nTensor(\"dense_4/BiasAdd:0\", shape=(?, 1), dtype=float16)\n"
],
[
"# Açı değerlerinide -0.3 ila 0.3 aralığından -1 ila 1 aralığına çekebilmek için 3 ile çarpıyoruz\nprint(type(labels[1]))\nlabels = 3*np.array(labels).astype(args['FP'])\nprint(type(labels[1]))",
"<type 'float'>\n<type 'numpy.float16'>\n"
],
[
"model_name_full = args['model_name'] + '_' + args['FP'] + '_' + args['optimizer']",
"_____no_output_____"
],
[
"# Eğitim esnasında test hata değeri en düşük değeri kaydeden bir fonksiyon\nmodel_checkpoint = ModelCheckpoint('models/' + model_name_full + '_weights_{epoch:03d}_{val_loss:.2f}.h5', monitor='val_loss', save_best_only=True, period=10)",
"_____no_output_____"
],
[
"if args['resume']:\n model = load_model(\"\")",
"_____no_output_____"
],
[
"# Eğitim fonksiyonu \nhs = model.fit_generator(generate_data(),steps_per_epoch=int(splitpoint/ bsize),\n validation_data=generate_data_val(), \n validation_steps=(dlen-splitpoint)/bsize, epochs=args['epochs_number'], callbacks=[model_checkpoint])",
"Epoch 1/250\n131/131 [==============================] - 337s 3s/step - loss: 0.3016 - mean_squared_error: 0.1410 - mean_squared_logarithmic_error: 0.0319 - mean_absolute_error: 0.2855 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.2069 - val_mean_squared_error: 0.0928 - val_mean_squared_logarithmic_error: 0.0192 - val_mean_absolute_error: 0.2266 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 2/250\n131/131 [==============================] - 313s 2s/step - loss: 0.1753 - mean_squared_error: 0.0905 - mean_squared_logarithmic_error: 0.0204 - mean_absolute_error: 0.2269 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.1418 - val_mean_squared_error: 0.0776 - val_mean_squared_logarithmic_error: 0.0159 - val_mean_absolute_error: 0.2068 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 3/250\n131/131 [==============================] - 313s 2s/step - loss: 0.1346 - mean_squared_error: 0.0814 - mean_squared_logarithmic_error: 0.0184 - mean_absolute_error: 0.2128 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.1232 - val_mean_squared_error: 0.0785 - val_mean_squared_logarithmic_error: 0.0170 - val_mean_absolute_error: 0.2038 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 4/250\n131/131 [==============================] - 313s 2s/step - loss: 0.1197 - mean_squared_error: 0.0794 - mean_squared_logarithmic_error: 0.0181 - mean_absolute_error: 0.2098 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.1110 - val_mean_squared_error: 0.0747 - val_mean_squared_logarithmic_error: 0.0146 - val_mean_absolute_error: 0.2038 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 5/250\n131/131 [==============================] - 312s 2s/step - loss: 0.1102 - mean_squared_error: 0.0767 - mean_squared_logarithmic_error: 0.0173 - mean_absolute_error: 0.2049 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.1018 - val_mean_squared_error: 0.0709 - val_mean_squared_logarithmic_error: 0.0142 - val_mean_absolute_error: 0.1951 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 6/250\n131/131 [==============================] - 311s 2s/step - loss: 0.1045 - mean_squared_error: 0.0750 - mean_squared_logarithmic_error: 0.0170 - mean_absolute_error: 0.2024 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.1011 - val_mean_squared_error: 0.0730 - val_mean_squared_logarithmic_error: 0.0151 - val_mean_absolute_error: 0.1957 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 7/250\n131/131 [==============================] - 310s 2s/step - loss: 0.1011 - mean_squared_error: 0.0742 - mean_squared_logarithmic_error: 0.0168 - mean_absolute_error: 0.2009 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0927 - val_mean_squared_error: 0.0669 - val_mean_squared_logarithmic_error: 0.0137 - val_mean_absolute_error: 0.1861 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 8/250\n131/131 [==============================] - 310s 2s/step - loss: 0.0973 - mean_squared_error: 0.0722 - mean_squared_logarithmic_error: 0.0164 - mean_absolute_error: 0.1973 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0932 - val_mean_squared_error: 0.0689 - val_mean_squared_logarithmic_error: 0.0144 - val_mean_absolute_error: 0.1881 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 9/250\n131/131 [==============================] - 312s 2s/step - loss: 0.0950 - mean_squared_error: 0.0714 - mean_squared_logarithmic_error: 0.0163 - mean_absolute_error: 0.1962 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0910 - val_mean_squared_error: 0.0677 - val_mean_squared_logarithmic_error: 0.0140 - val_mean_absolute_error: 0.1878 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 10/250\n131/131 [==============================] - 313s 2s/step - loss: 0.0931 - mean_squared_error: 0.0703 - mean_squared_logarithmic_error: 0.0161 - mean_absolute_error: 0.1944 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0897 - val_mean_squared_error: 0.0671 - val_mean_squared_logarithmic_error: 0.0135 - val_mean_absolute_error: 0.1867 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 11/250\n131/131 [==============================] - 315s 2s/step - loss: 0.0920 - mean_squared_error: 0.0698 - mean_squared_logarithmic_error: 0.0159 - mean_absolute_error: 0.1932 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0855 - val_mean_squared_error: 0.0637 - val_mean_squared_logarithmic_error: 0.0131 - val_mean_absolute_error: 0.1813 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 12/250\n131/131 [==============================] - 317s 2s/step - loss: 0.0908 - mean_squared_error: 0.0692 - mean_squared_logarithmic_error: 0.0158 - mean_absolute_error: 0.1921 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0872 - val_mean_squared_error: 0.0659 - val_mean_squared_logarithmic_error: 0.0133 - val_mean_absolute_error: 0.1851 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 13/250\n131/131 [==============================] - 313s 2s/step - loss: 0.0900 - mean_squared_error: 0.0688 - mean_squared_logarithmic_error: 0.0158 - mean_absolute_error: 0.1918 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0859 - val_mean_squared_error: 0.0648 - val_mean_squared_logarithmic_error: 0.0134 - val_mean_absolute_error: 0.1804 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 14/250\n131/131 [==============================] - 319s 2s/step - loss: 0.0890 - mean_squared_error: 0.0684 - mean_squared_logarithmic_error: 0.0156 - mean_absolute_error: 0.1907 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0839 - val_mean_squared_error: 0.0635 - val_mean_squared_logarithmic_error: 0.0131 - val_mean_absolute_error: 0.1794 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 15/250\n131/131 [==============================] - 319s 2s/step - loss: 0.0869 - mean_squared_error: 0.0666 - mean_squared_logarithmic_error: 0.0152 - mean_absolute_error: 0.1874 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0840 - val_mean_squared_error: 0.0639 - val_mean_squared_logarithmic_error: 0.0132 - val_mean_absolute_error: 0.1855 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 16/250\n131/131 [==============================] - 312s 2s/step - loss: 0.0868 - mean_squared_error: 0.0667 - mean_squared_logarithmic_error: 0.0154 - mean_absolute_error: 0.1879 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0834 - val_mean_squared_error: 0.0636 - val_mean_squared_logarithmic_error: 0.0127 - val_mean_absolute_error: 0.1800 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 17/250\n131/131 [==============================] - 314s 2s/step - loss: 0.0868 - mean_squared_error: 0.0668 - mean_squared_logarithmic_error: 0.0152 - mean_absolute_error: 0.1881 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0829 - val_mean_squared_error: 0.0630 - val_mean_squared_logarithmic_error: 0.0130 - val_mean_absolute_error: 0.1803 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 18/250\n131/131 [==============================] - 312s 2s/step - loss: 0.0861 - mean_squared_error: 0.0663 - mean_squared_logarithmic_error: 0.0152 - mean_absolute_error: 0.1874 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0827 - val_mean_squared_error: 0.0629 - val_mean_squared_logarithmic_error: 0.0128 - val_mean_absolute_error: 0.1832 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 19/250\n131/131 [==============================] - 316s 2s/step - loss: 0.0856 - mean_squared_error: 0.0660 - mean_squared_logarithmic_error: 0.0151 - mean_absolute_error: 0.1867 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0842 - val_mean_squared_error: 0.0647 - val_mean_squared_logarithmic_error: 0.0138 - val_mean_absolute_error: 0.1816 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 20/250\n131/131 [==============================] - 321s 2s/step - loss: 0.0855 - mean_squared_error: 0.0658 - mean_squared_logarithmic_error: 0.0151 - mean_absolute_error: 0.1869 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0808 - val_mean_squared_error: 0.0612 - val_mean_squared_logarithmic_error: 0.0126 - val_mean_absolute_error: 0.1768 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 21/250\n131/131 [==============================] - 316s 2s/step - loss: 0.0843 - mean_squared_error: 0.0649 - mean_squared_logarithmic_error: 0.0148 - mean_absolute_error: 0.1848 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0807 - val_mean_squared_error: 0.0612 - val_mean_squared_logarithmic_error: 0.0126 - val_mean_absolute_error: 0.1757 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 22/250\n131/131 [==============================] - 317s 2s/step - loss: 0.0834 - mean_squared_error: 0.0641 - mean_squared_logarithmic_error: 0.0146 - mean_absolute_error: 0.1836 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0788 - val_mean_squared_error: 0.0597 - val_mean_squared_logarithmic_error: 0.0122 - val_mean_absolute_error: 0.1750 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 23/250\n131/131 [==============================] - 313s 2s/step - loss: 0.0830 - mean_squared_error: 0.0638 - mean_squared_logarithmic_error: 0.0147 - mean_absolute_error: 0.1828 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0805 - val_mean_squared_error: 0.0610 - val_mean_squared_logarithmic_error: 0.0125 - val_mean_absolute_error: 0.1778 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 24/250\n131/131 [==============================] - 312s 2s/step - loss: 0.0837 - mean_squared_error: 0.0645 - mean_squared_logarithmic_error: 0.0148 - mean_absolute_error: 0.1840 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0807 - val_mean_squared_error: 0.0614 - val_mean_squared_logarithmic_error: 0.0128 - val_mean_absolute_error: 0.1788 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 25/250\n131/131 [==============================] - 312s 2s/step - loss: 0.0828 - mean_squared_error: 0.0637 - mean_squared_logarithmic_error: 0.0146 - mean_absolute_error: 0.1826 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0786 - val_mean_squared_error: 0.0593 - val_mean_squared_logarithmic_error: 0.0120 - val_mean_absolute_error: 0.1736 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 26/250\n131/131 [==============================] - 312s 2s/step - loss: 0.0825 - mean_squared_error: 0.0633 - mean_squared_logarithmic_error: 0.0145 - mean_absolute_error: 0.1822 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0796 - val_mean_squared_error: 0.0605 - val_mean_squared_logarithmic_error: 0.0122 - val_mean_absolute_error: 0.1770 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 27/250\n131/131 [==============================] - 312s 2s/step - loss: 0.0820 - mean_squared_error: 0.0629 - mean_squared_logarithmic_error: 0.0144 - mean_absolute_error: 0.1810 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0773 - val_mean_squared_error: 0.0584 - val_mean_squared_logarithmic_error: 0.0121 - val_mean_absolute_error: 0.1718 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 28/250\n131/131 [==============================] - 311s 2s/step - loss: 0.0816 - mean_squared_error: 0.0625 - mean_squared_logarithmic_error: 0.0143 - mean_absolute_error: 0.1805 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0771 - val_mean_squared_error: 0.0580 - val_mean_squared_logarithmic_error: 0.0117 - val_mean_absolute_error: 0.1707 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 29/250\n131/131 [==============================] - 311s 2s/step - loss: 0.0815 - mean_squared_error: 0.0625 - mean_squared_logarithmic_error: 0.0142 - mean_absolute_error: 0.1807 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0790 - val_mean_squared_error: 0.0596 - val_mean_squared_logarithmic_error: 0.0123 - val_mean_absolute_error: 0.1748 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 30/250\n131/131 [==============================] - 311s 2s/step - loss: 0.0810 - mean_squared_error: 0.0619 - mean_squared_logarithmic_error: 0.0143 - mean_absolute_error: 0.1796 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0776 - val_mean_squared_error: 0.0588 - val_mean_squared_logarithmic_error: 0.0123 - val_mean_absolute_error: 0.1717 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 31/250\n131/131 [==============================] - 313s 2s/step - loss: 0.0814 - mean_squared_error: 0.0624 - mean_squared_logarithmic_error: 0.0143 - mean_absolute_error: 0.1803 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0779 - val_mean_squared_error: 0.0589 - val_mean_squared_logarithmic_error: 0.0124 - val_mean_absolute_error: 0.1732 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 32/250\n131/131 [==============================] - 313s 2s/step - loss: 0.0808 - mean_squared_error: 0.0618 - mean_squared_logarithmic_error: 0.0143 - mean_absolute_error: 0.1792 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0773 - val_mean_squared_error: 0.0583 - val_mean_squared_logarithmic_error: 0.0122 - val_mean_absolute_error: 0.1725 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 33/250\n131/131 [==============================] - 312s 2s/step - loss: 0.0808 - mean_squared_error: 0.0620 - mean_squared_logarithmic_error: 0.0142 - mean_absolute_error: 0.1797 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0826 - val_mean_squared_error: 0.0638 - val_mean_squared_logarithmic_error: 0.0123 - val_mean_absolute_error: 0.1847 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 34/250\n131/131 [==============================] - 312s 2s/step - loss: 0.0796 - mean_squared_error: 0.0609 - mean_squared_logarithmic_error: 0.0140 - mean_absolute_error: 0.1777 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0767 - val_mean_squared_error: 0.0579 - val_mean_squared_logarithmic_error: 0.0119 - val_mean_absolute_error: 0.1719 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 35/250\n131/131 [==============================] - 311s 2s/step - loss: 0.0804 - mean_squared_error: 0.0618 - mean_squared_logarithmic_error: 0.0143 - mean_absolute_error: 0.1798 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0759 - val_mean_squared_error: 0.0572 - val_mean_squared_logarithmic_error: 0.0121 - val_mean_absolute_error: 0.1713 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 36/250\n131/131 [==============================] - 312s 2s/step - loss: 0.0801 - mean_squared_error: 0.0615 - mean_squared_logarithmic_error: 0.0140 - mean_absolute_error: 0.1790 - mean_absolute_percentage_error: inf - cosine_proximity: nan - val_loss: 0.0763 - val_mean_squared_error: 0.0575 - val_mean_squared_logarithmic_error: 0.0118 - val_mean_absolute_error: 0.1708 - val_mean_absolute_percentage_error: inf - val_cosine_proximity: nan\nEpoch 37/250\n"
],
[
"# Eğitim işleminin gidişatını grafik olarak görüyoruz\n# Train and validation loss charts\nprint(hs.history.keys())",
"['loss', 'mean_squared_logarithmic_error', 'val_mean_absolute_percentage_error', 'val_cosine_proximity', 'val_mean_squared_logarithmic_error', 'mean_absolute_percentage_error', 'val_mean_squared_error', 'val_mean_absolute_error', 'cosine_proximity', 'mean_squared_error', 'val_loss', 'mean_absolute_error']\n"
],
[
"#for val in df.columns[1:]:\n# plt.title(val)\n# plt.plot(df.epoch,df[val])\n# plt.show()\n\nfor val in hs.history.keys():\n if \"val_\" not in val:\n plt.title(val)\n plt.plot(hs.history[val])\n plt.plot(hs.history['val_' + val])\n plt.xlabel('epoch')\n plt.legend(['training set', 'validation set'], loc='upper right')\n plt.savefig('models/' + model_name_full + '_' + val + '.png')\n plt.show()",
"_____no_output_____"
],
[
"try:\n plt.plot(hs.history['lr'])\n plt.title('Learning rate')\n plt.xlabel('epoch')\n plt.show()\nexcept:\n pass",
"_____no_output_____"
],
[
"# Eğittiğimiz modeli kaydediyoruz\n# Ağ yapsını json olarak\n# Ağ parametre değerlerini h5 uzantılı olarak\nimport json \n# Save model weights and json.\nmodel.save_weights('models/' + model_name_full + '_model.h5')\nmodel_json = model.to_json()\nwith open('models/' + model_name_full + '_model.json', 'w') as outfile:\n json.dump(model_json, outfile)",
"_____no_output_____"
],
[
"# rastgele 10 resim seçip modelimiz hesapladığı sonuçla gerçeğine bakıyoruz \n# Eğer sonuçlar iyi ise kullanabiliriz\n# Sonuççlar kötüyse eğitim aşamasına dönmemiz lazım\n# Compare actual and predicted steering\nfor i in range(100):\n ix = random.randint(0,len(df)-1)\n out = model.predict(get_matrix(df.FileName[ix]).reshape(1,args['image_height'],args['image_width'],3))\n print(df.Angle[ix], ' - > ', out[0][0]/3)",
"(0.0, ' - > ', 0.3427734375)\n(-0.258572280407, ' - > ', -0.19303385416666666)\n(-0.120483756065, ' - > ', -0.1484375)\n(0.0, ' - > ', -0.032206217447916664)\n(-0.0921533256769, ' - > ', -0.12451171875)\n(0.15944202244300001, ' - > ', 0.16748046875)\n(-0.180676743388, ' - > ', -0.17333984375)\n(-0.155888929963, ' - > ', -0.16959635416666666)\n(0.0, ' - > ', 0.0023066202799479165)\n(-0.0992385521531, ' - > ', -0.07478841145833333)\n(-0.173591509461, ' - > ', -0.19401041666666666)\n(-0.0779933556914, ' - > ', -0.06363932291666667)\n(-0.15943153202500002, ' - > ', -0.15999348958333334)\n(-0.11694113910199999, ' - > ', -0.17626953125)\n(0.340000003576, ' - > ', 0.23828125)\n(0.0, ' - > ', -0.035990397135416664)\n(-0.340000003576, ' - > ', -0.24772135416666666)\n(-0.10278116911599999, ' - > ', -0.155029296875)\n(-0.0602907724679, ' - > ', -0.0811767578125)\n(0.0, ' - > ', -0.08528645833333333)\n(0.0567586384714, ' - > ', 0.085693359375)\n(0.021353468298900002, ' - > ', 0.14705403645833334)\n(-0.0284282118082, ' - > ', 0.021952311197916668)\n(-0.053216021508, ' - > ', -0.010045369466145834)\n(-0.322297394276, ' - > ', -0.2693684895833333)\n(0.0, ' - > ', 0.15909830729166666)\n(-0.127568975091, ' - > ', -0.19287109375)\n(0.0, ' - > ', -0.021809895833333332)\n(0.102791652083, ' - > ', 0.12923177083333334)\n(-0.166516765952, ' - > ', -0.13167317708333334)\n(-0.187751471996, ' - > ', -0.13264973958333334)\n(-0.152346313, ' - > ', -0.16471354166666666)\n(-0.15943153202500002, ' - > ', -0.16927083333333334)\n(-0.170048907399, ' - > ', -0.16072591145833334)\n(0.0, ' - > ', 0.0499267578125)\n(-0.120483756065, ' - > ', -0.12353515625)\n(0.0, ' - > ', -0.024088541666666668)\n(-0.212539300323, ' - > ', -0.19954427083333334)\n(0.0, ' - > ', -0.06742350260416667)\n(0.0, ' - > ', -0.035664876302083336)\n(-0.152346313, ' - > ', -0.11873372395833333)\n(0.0, ' - > ', -0.08699544270833333)\n(0.0, ' - > ', -0.162841796875)\n(0.0, ' - > ', 0.038248697916666664)\n(-0.340000003576, ' - > ', -0.19986979166666666)\n(0.0, ' - > ', 0.2841796875)\n(0.0, ' - > ', -0.07755533854166667)\n(0.0, ' - > ', -0.00348663330078125)\n(-0.308137446642, ' - > ', -0.24674479166666666)\n(-0.155888929963, ' - > ', -0.15681966145833334)\n(-0.0992385521531, ' - > ', -0.11344401041666667)\n(-0.340000003576, ' - > ', -0.3356119791666667)\n(0.0, ' - > ', 0.035481770833333336)\n(0.0, ' - > ', -0.0594482421875)\n(0.131111592054, ' - > ', 0.1103515625)\n(0.340000003576, ' - > ', 0.23990885416666666)\n(0.0, ' - > ', 0.058878580729166664)\n(-0.106323778629, ' - > ', -0.16715494791666666)\n(0.0, ' - > ', 0.028035481770833332)\n(0.0, ' - > ', -0.0445556640625)\n(-0.14881418645400002, ' - > ', -0.15462239583333334)\n(0.0, ' - > ', -0.014139811197916666)\n(-0.141728952527, ' - > ', -0.132568359375)\n(-0.297520071268, ' - > ', -0.2916666666666667)\n(0.340000003576, ' - > ', 0.314453125)\n(0.0, ' - > ', -0.28955078125)\n(-0.04967341199519999, ' - > ', -0.06522623697916667)\n(-0.269189655781, ' - > ', -0.2578125)\n(-0.198379307985, ' - > ', -0.16845703125)\n(0.0, ' - > ', 0.052042643229166664)\n(0.15944202244300001, ' - > ', 0.123046875)\n(0.0, ' - > ', -0.08199055989583333)\n(0.187761962414, ' - > ', 0.15706380208333334)\n(0.0, ' - > ', 0.0355224609375)\n(0.0, ' - > ', 0.2643229166666667)\n(0.131111592054, ' - > ', 0.11515299479166667)\n(0.0, ' - > ', -0.01080322265625)\n(0.0, ' - > ', 0.006988525390625)\n(0.0, ' - > ', 0.08748372395833333)\n(0.0, ' - > ', 0.044474283854166664)\n(-0.20192193984999998, ' - > ', -0.16552734375)\n(-0.0107256285846, ' - > ', -0.005498250325520833)\n(-0.21962451934799998, ' - > ', -0.22526041666666666)\n(0.0, ' - > ', 0.0506591796875)\n(0.0, ' - > ', 0.028238932291666668)\n(0.340000003576, ' - > ', 0.20556640625)\n(0.0, ' - > ', 0.24007161458333334)\n(0.0, ' - > ', -0.041524251302083336)\n(0.0, ' - > ', -0.18489583333333334)\n(0.102791652083, ' - > ', 0.16861979166666666)\n(0.0, ' - > ', 0.24723307291666666)\n(0.0, ' - > ', 0.10595703125)\n(-0.194836705923, ' - > ', -0.21630859375)\n(0.0, ' - > ', -0.020853678385416668)\n(-0.07445074617860001, ' - > ', -0.14794921875)\n(-0.127568975091, ' - > ', -0.134033203125)\n(-0.109866395593, ' - > ', -0.14436848958333334)\n(0.0, ' - > ', 0.004987080891927083)\n(0.0, ' - > ', 0.09993489583333333)\n(-0.053216021508, ' - > ', -0.06803385416666667)\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a82809106c454d587e32264674ee8ed27ed17be
| 154,307 |
ipynb
|
Jupyter Notebook
|
notebooks/combine_data.ipynb
|
MichoelSnow/BGG
|
d100c0c15c3b57cb2e4d0ba89df6f3c4b4b5b10f
|
[
"MIT"
] | null | null | null |
notebooks/combine_data.ipynb
|
MichoelSnow/BGG
|
d100c0c15c3b57cb2e4d0ba89df6f3c4b4b5b10f
|
[
"MIT"
] | 4 |
2020-03-24T16:18:40.000Z
|
2021-12-13T19:50:55.000Z
|
notebooks/combine_data.ipynb
|
MichoelSnow/BGG
|
d100c0c15c3b57cb2e4d0ba89df6f3c4b4b5b10f
|
[
"MIT"
] | null | null | null | 50.050924 | 218 | 0.338507 |
[
[
[
"# Imports",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport os",
"_____no_output_____"
]
],
[
[
"# Load the data",
"_____no_output_____"
]
],
[
[
"fl_path = '/home/msnow/git/bgg/data/kaggle/'\nfl_list = [x for x in os.listdir(fl_path) if x.endswith('.csv') and x.find('bgg_top')>0]\nfl_list.sort()\nfl_list",
"_____no_output_____"
]
],
[
[
"# Create DF of just weekly game rankings",
"_____no_output_____"
]
],
[
[
"fl_num = 0\ndf_rank = pd.read_csv(os.path.join(fl_path, fl_list[fl_num]), usecols=['name','BoardGameRank'])\ndf_rank = df_rank.rename(columns = {'name':fl_list[fl_num][:10]})\ndf_rank.head()",
"_____no_output_____"
],
[
"for wk in fl_list[1:]:\n df_tmp = pd.read_csv(os.path.join(fl_path, wk), usecols=['name','BoardGameRank'])\n df_tmp = df_tmp.rename(columns = {'name':wk[:10]})\n df_rank = df_rank.merge(right=df_tmp, how='left', on='BoardGameRank')",
"_____no_output_____"
],
[
"df_rank",
"_____no_output_____"
],
[
"df_rank.to_csv(os.path.join(fl_path,'curated','weekly_rankings.csv'), index=False)",
"_____no_output_____"
]
],
[
[
"# Create game lookup DF",
"_____no_output_____"
]
],
[
[
"static_cols = ['boardgameartist', 'boardgamecategory', 'boardgamedesigner', 'boardgamefamily', 'boardgamemechanic', 'boardgamepublisher', 'id', 'maxplayers', 'minage', 'minplayers', 'name', 'yearpublished']\nstatic_cols2 = ['boardgameartist', 'boardgamecategory', 'boardgamedesigner', 'boardgamefamily', 'boardgamemechanic', 'boardgamepublisher', 'game_id', 'maxplayers', 'minage', 'minplayers', 'name', 'yearpublished']",
"_____no_output_____"
],
[
"df_lookup = pd.read_csv(os.path.join(fl_path, fl_list[0]), usecols=static_cols)\nfor wk in fl_list[1:]:\n try:\n df_tmp = pd.read_csv(os.path.join(fl_path, wk), usecols=static_cols)\n except:\n df_tmp = pd.read_csv(os.path.join(fl_path, wk), usecols=static_cols2)\n df_tmp = df_tmp.rename(columns = {'game_id':'id'})\n pass\n df_lookup = pd.concat([df_lookup, df_tmp], ignore_index=True).drop_duplicates('id')\ndf_lookup.shape",
"_____no_output_____"
],
[
"df_lookup.to_csv(os.path.join(fl_path,'curated','game_lookup.csv'), index=False)",
"_____no_output_____"
],
[
"col_to_use = 'yearpublished'\ndf_pl_time = pd.read_csv(os.path.join(fl_path, fl_list[0]), usecols=['name',col_to_use])\ndf_pl_time = df_pl_time[['name', col_to_use]]\ndf_pl_time = df_pl_time.rename(columns = {col_to_use:fl_list[0][:10]})\nfor wk in fl_list[1:]:\n df_tmp = pd.read_csv(os.path.join(fl_path, wk), usecols=['name',col_to_use])\n df_tmp = df_tmp.loc[df_tmp.name.isin(df_pl_time.name)]\n df_tmp = df_tmp.rename(columns = {col_to_use:wk[:10]})\n df_pl_time = df_pl_time.merge(right=df_tmp, how='inner', on='name')\n df_pl_time = df_pl_time.drop_duplicates(subset='name')\n# df_pl_time.shape \ndf_pl_time.loc[df_pl_time.var(axis=1, numeric_only=True)>0,:].var(axis=1, numeric_only=True)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4a828fdd6b6f18375030d190fb4f2c4bfb4d205e
| 59,110 |
ipynb
|
Jupyter Notebook
|
Notebooks/Performance-Evaluation/FT_pert-forest_512_20rnd_cosine.ipynb
|
danielecioffo/Bird-Images-Search-Engine
|
426e9928bacefb37b1cc71d618af252170896e99
|
[
"MIT"
] | null | null | null |
Notebooks/Performance-Evaluation/FT_pert-forest_512_20rnd_cosine.ipynb
|
danielecioffo/Bird-Images-Search-Engine
|
426e9928bacefb37b1cc71d618af252170896e99
|
[
"MIT"
] | null | null | null |
Notebooks/Performance-Evaluation/FT_pert-forest_512_20rnd_cosine.ipynb
|
danielecioffo/Bird-Images-Search-Engine
|
426e9928bacefb37b1cc71d618af252170896e99
|
[
"MIT"
] | 2 |
2022-02-28T20:16:28.000Z
|
2022-03-13T16:47:37.000Z
| 59,110 | 59,110 | 0.684284 |
[
[
[
"## Import libraries",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\nfrom pathlib import Path\nfrom matplotlib import pyplot as plt \nimport pandas as pd\nimport numpy as np\nimport time\nimport os\nimport csv\nimport concurrent.futures",
"_____no_output_____"
]
],
[
[
"## Utility functions",
"_____no_output_____"
],
[
"### Create annot and load descriptors",
"_____no_output_____"
]
],
[
[
"def create_annot(path):\n image_list = list(Path(path).glob('*/*.jpg'))\n # the identity name is in the path (the name of the parent directory)\n names_list = [i.parent.name for i in image_list] # get the identity of each image\n # keep info in a pandas DataFrame\n annot = pd.DataFrame({'identity': names_list, 'image_path': image_list})\n return annot\n\ndef concatenate_annots(list_of_paths):\n concat_annot = pd.DataFrame()\n with concurrent.futures.ThreadPoolExecutor() as executor:\n annots = [executor.submit(create_annot, path) for path in list_of_paths]\n for annot in annots:\n new_annot = annot.result()\n concat_annot = concat_annot.append(new_annot, ignore_index = True)\n return concat_annot",
"_____no_output_____"
],
[
"def load_descriptors(path):\n with open(path, 'rb') as file:\n return np.load(file)\n\ndef concatenate_descriptors(list_of_paths):\n concat_descriptors = None\n with concurrent.futures.ThreadPoolExecutor() as executor:\n descriptors = [executor.submit(load_descriptors, path) for path in list_of_paths]\n for descriptor in descriptors:\n new_descriptor = descriptor.result()\n if concat_descriptors is None:\n concat_descriptors = new_descriptor\n else:\n concat_descriptors = np.concatenate([concat_descriptors, new_descriptor])\n return concat_descriptors",
"_____no_output_____"
]
],
[
[
"### Create pivots",
"_____no_output_____"
]
],
[
[
"def generate_pivots(descriptors, n, strategy=\"rnd\"):\n if strategy == \"kMED\":\n kmedoids = sklearn_extra.cluster.KMedoids(n_clusters=n).fit(descriptors)\n return kmedoids.cluster_centers_\n if strategy != \"rnd\":\n print(strategy, \"was not implemented. Random pivots were returned\")\n pivots_id = np.random.choice(np.arange(len(descriptors)), size=n)\n return descriptors[pivots_id]\n\ndef generate_list_of_pivots(descriptors, t, n, strategy=\"rnd\"):\n list_of_pivots = []\n with concurrent.futures.ThreadPoolExecutor() as executor:\n pivots = [executor.submit(generate_pivots, descriptors, n, strategy) for i in range(t)]\n for pivot in concurrent.futures.as_completed(pivots):\n new_pivot = pivot.result()\n list_of_pivots.append(new_pivot)\n return list_of_pivots",
"_____no_output_____"
]
],
[
[
"### Save test results",
"_____no_output_____"
]
],
[
[
"def save_results(dir, file_name, results):\n with open(os.path.join(dir, file_name +\".csv\"), 'w') as f:\n writer = csv.writer(f)\n # write the header\n writer.writerow([\"CLASS\", \"AP\", \"QUERY TIME\"])\n # write the data\n for r in results:\n writer.writerow(r) ",
"_____no_output_____"
]
],
[
[
"## Test Performance",
"_____no_output_____"
]
],
[
[
"drive.mount('/content/drive', force_remount=True)",
"Mounted at /content/drive\n"
]
],
[
[
"### Create annot and load descriptors for the database",
"_____no_output_____"
]
],
[
[
"db_annot = concatenate_annots(['/content/drive/MyDrive/CV_Birds/train', '/content/drive/MyDrive/CV_Birds/mirflickr25k'])\ndb_annot",
"_____no_output_____"
],
[
"db_descriptors = concatenate_descriptors(['/content/drive/MyDrive/CV_Birds/features/training/ResNet152v2/OneDense512_Dropout_fine_tuning.npy','/content/drive/MyDrive/CV_Birds/features/distractor/ResNet152v2/OneDense512_Dropout_fine_tuning.npy'])\ndb_descriptors.shape",
"_____no_output_____"
]
],
[
[
"### Create annot and load descriptors for the test set",
"_____no_output_____"
]
],
[
[
"query_annot = create_annot('/content/drive/MyDrive/CV_Birds/test')\nquery_annot",
"_____no_output_____"
],
[
"query_descriptors = load_descriptors('/content/drive/MyDrive/CV_Birds/features/test/ResNet152v2/OneDense512_Dropout_fine_tuning.npy')\nquery_descriptors.shape",
"_____no_output_____"
]
],
[
[
"To run our tests we select only the first image of each species within the test set. Please note that within the test set we have 5 images per species.",
"_____no_output_____"
]
],
[
[
"queries_indexes = [x for x in range(325*5) if x%5 == 0]",
"_____no_output_____"
]
],
[
[
"### Create PP-Index",
"_____no_output_____"
]
],
[
[
"!rm /content/drive/MyDrive/CV_Birds/performance/fine_tuning/index/FT_pert-forest_512_20rnd_cosine/*\n!rm -r /content/drive/MyDrive/CV_Birds/performance/fine_tuning/index/FT_pert-forest_512_20rnd_cosine/pert_forest_structure/*",
"rm: cannot remove '/content/drive/MyDrive/CV_Birds/performance/fine_tuning/index/FT_pert-forest_512_20rnd_cosine/pert_forest_structure': Is a directory\nrm: cannot remove '/content/drive/MyDrive/CV_Birds/performance/fine_tuning/index/FT_pert-forest_512_20rnd_cosine/pert_forest_structure/*': No such file or directory\n"
],
[
"def get_descriptor_from_id(id_object):\n return db_descriptors[id_object]",
"_____no_output_____"
],
[
"%cd \"/content/drive/MyDrive/CV_Birds/Notebooks/PP-Index\"\n%run PPIndex.ipynb",
"/content/drive/.shortcut-targets-by-id/1rI5YNBuaSlCB__w522WEkHjw-nFuvIo0/CV_Birds/Notebooks/PP-Index\n"
],
[
"pivots = generate_list_of_pivots(db_descriptors, t=3, n=20, strategy=\"rnd\")\nrnd_pp_forest = PrefixForest(pivots, length=3, distance_metric='cosine', base_directory=\"/content\", forest_file='pert_forest_structure')\nrnd_pp_forest.insert_objects_into_forest(range(len(db_descriptors)))\nrnd_pp_forest.save()",
"[WARNING]: Tree is still empty\n[WARNING]: Tree is still empty\n[WARNING]: Tree is still empty\n[WARNING]: Forest is still empty\nInserting objects into tree1 ...\nInserting objects into tree2 ...\nInserting objects into tree3 ...\n--- Insertion into tree tree3 completed: 275.044 seconds ---\n--- Insertion into tree tree1 completed: 275.586 seconds ---\n--- Insertion into tree tree2 completed: 275.720 seconds ---\n"
]
],
[
[
"### Compute mAP",
"_____no_output_____"
]
],
[
[
"birds_db = db_annot.loc[db_annot['identity'] != 'mirflickr']\ncounts = birds_db.groupby('identity').count()\nprint(\"Minimum number of images per species:\", int(counts.min()))\nprint(\"Maximum number of images per species:\", int(counts.max()))\nprint(\"Average number of images:\", float(counts.sum()/325))",
"Minimum number of images per species: 116\nMaximum number of images per species: 249\nAverage number of images: 145.63692307692307\n"
]
],
[
[
"Since at most we have 249 images per species, we use $n=250$.",
"_____no_output_____"
]
],
[
[
"n = 250",
"_____no_output_____"
]
],
[
[
"The formula for Average Precision is the following:\n\n> $AP@n=\\frac{1}{GTP}\\sum_{k=1}^{n}P@k×rel@k$\n\nwhere $GTP$ refers to the total number of ground truth positives, $n$ refers to the total number of images we are interested in, $P@k$ refers to the precision@k and $rel@k$ is a relevance function. \n\nThe relevance function is an indicator function which equals 1 if the document at rank $k$ is relevant and equals to 0 otherwise.",
"_____no_output_____"
]
],
[
[
"def compute_ap(query_index, retrieved_ids):\n query_identity = query_annot['identity'][query_index]\n print(query_index//5, query_identity)\n GTP = len(db_annot.loc[db_annot['identity'] == query_identity])\n relevant = 0\n precision_summation = 0\n for k, id in enumerate(retrieved_ids):\n if db_annot['identity'][id] == query_identity: # relevant result\n relevant = relevant + 1\n precision_at_k = relevant/(k+1)\n precision_summation = precision_summation + precision_at_k\n return (query_identity, precision_summation/GTP)",
"_____no_output_____"
]
],
[
[
"For each query, $Q$, we can calculate a corresponding $AP$. Then, the $mAP$ is simply the mean of all the queries that were made.\n> $mAP = \\frac{1}{N}\\sum_{i=1}^{N}AP_i$\n\nIn our case, $N=325$ (one query per species)",
"_____no_output_____"
]
],
[
[
"def rnd_pivots_queries(query_index, n):\n start_time = time.time()\n ids, distances = rnd_pp_forest.find_nearest_neighbors(query_descriptors[query_index], n, perturbations=3)\n end_time = time.time()\n ids = ids.tolist()\n return compute_ap(query_index, ids) + (end_time - start_time,)",
"_____no_output_____"
],
[
"aps = []\nfor query_index in queries_indexes:\n aps.append(rnd_pivots_queries(query_index, n))",
"0 AFRICAN CROWNED CRANE\n1 AFRICAN FIREFINCH\n2 ALBATROSS\n3 ALEXANDRINE PARAKEET\n4 AMERICAN AVOCET\n5 AMERICAN BITTERN\n6 AMERICAN COOT\n7 AMERICAN GOLDFINCH\n8 AMERICAN KESTREL\n9 AMERICAN PIPIT\n10 AMERICAN REDSTART\n11 ANHINGA\n12 ANNAS HUMMINGBIRD\n13 ANTBIRD\n14 ARARIPE MANAKIN\n15 ASIAN CRESTED IBIS\n16 BALD EAGLE\n17 BALD IBIS\n18 BALI STARLING\n19 BALTIMORE ORIOLE\n20 BANANAQUIT\n21 BANDED BROADBILL\n22 BANDED PITA\n23 BAR-TAILED GODWIT\n24 BARN OWL\n25 BARN SWALLOW\n26 BARRED PUFFBIRD\n27 BAY-BREASTED WARBLER\n28 BEARDED BARBET\n29 BEARDED BELLBIRD\n30 BEARDED REEDLING\n31 BELTED KINGFISHER\n32 BIRD OF PARADISE\n33 BLACK & YELLOW bROADBILL\n34 BLACK BAZA\n35 BLACK FRANCOLIN\n36 BLACK SKIMMER\n37 BLACK SWAN\n38 BLACK TAIL CRAKE\n39 BLACK THROATED BUSHTIT\n40 BLACK THROATED WARBLER\n41 BLACK VULTURE\n42 BLACK-CAPPED CHICKADEE\n43 BLACK-NECKED GREBE\n44 BLACK-THROATED SPARROW\n45 BLACKBURNIAM WARBLER\n46 BLONDE CRESTED WOODPECKER\n47 BLUE COAU\n48 BLUE GROUSE\n49 BLUE HERON\n50 BLUE THROATED TOUCANET\n51 BOBOLINK\n52 BORNEAN BRISTLEHEAD\n53 BORNEAN LEAFBIRD\n54 BORNEAN PHEASANT\n55 BRANDT CORMARANT\n56 BROWN CREPPER\n57 BROWN NOODY\n58 BROWN THRASHER\n59 BULWERS PHEASANT\n60 CACTUS WREN\n61 CALIFORNIA CONDOR\n62 CALIFORNIA GULL\n63 CALIFORNIA QUAIL\n64 CANARY\n65 CAPE GLOSSY STARLING\n66 CAPE MAY WARBLER\n67 CAPPED HERON\n68 CAPUCHINBIRD\n69 CARMINE BEE-EATER\n70 CASPIAN TERN\n71 CASSOWARY\n72 CEDAR WAXWING\n73 CERULEAN WARBLER\n74 CHARA DE COLLAR\n75 CHESTNET BELLIED EUPHONIA\n76 CHIPPING SPARROW\n77 CHUKAR PARTRIDGE\n78 CINNAMON TEAL\n79 CLARKS NUTCRACKER\n80 COCK OF THE ROCK\n81 COCKATOO\n82 COLLARED ARACARI\n83 COMMON FIRECREST\n84 COMMON GRACKLE\n85 COMMON HOUSE MARTIN\n86 COMMON LOON\n87 COMMON POORWILL\n88 COMMON STARLING\n89 COUCHS KINGBIRD\n90 CRESTED AUKLET\n91 CRESTED CARACARA\n92 CRESTED NUTHATCH\n93 CRIMSON SUNBIRD\n94 CROW\n95 CROWNED PIGEON\n96 CUBAN TODY\n97 CUBAN TROGON\n98 CURL CRESTED ARACURI\n99 D-ARNAUDS BARBET\n100 DARK EYED JUNCO\n101 DOUBLE BARRED FINCH\n102 DOUBLE BRESTED CORMARANT\n103 DOWNY WOODPECKER\n104 EASTERN BLUEBIRD\n105 EASTERN MEADOWLARK\n106 EASTERN ROSELLA\n107 EASTERN TOWEE\n108 ELEGANT TROGON\n109 ELLIOTS PHEASANT\n110 EMPEROR PENGUIN\n111 EMU\n112 ENGGANO MYNA\n113 EURASIAN GOLDEN ORIOLE\n114 EURASIAN MAGPIE\n115 EVENING GROSBEAK\n116 FAIRY BLUEBIRD\n117 FIRE TAILLED MYZORNIS\n118 FLAME TANAGER\n119 FLAMINGO\n120 FRIGATE\n121 GAMBELS QUAIL\n122 GANG GANG COCKATOO\n123 GILA WOODPECKER\n124 GILDED FLICKER\n125 GLOSSY IBIS\n126 GO AWAY BIRD\n127 GOLD WING WARBLER\n128 GOLDEN CHEEKED WARBLER\n129 GOLDEN CHLOROPHONIA\n130 GOLDEN EAGLE\n131 GOLDEN PHEASANT\n132 GOLDEN PIPIT\n133 GOULDIAN FINCH\n134 GRAY CATBIRD\n135 GRAY KINGBIRD\n136 GRAY PARTRIDGE\n137 GREAT GRAY OWL\n138 GREAT KISKADEE\n139 GREAT POTOO\n140 GREATOR SAGE GROUSE\n141 GREEN BROADBILL\n142 GREEN JAY\n143 GREEN MAGPIE\n144 GREY PLOVER\n145 GROVED BILLED ANI\n146 GUINEA TURACO\n147 GUINEAFOWL\n148 GYRFALCON\n149 HARLEQUIN DUCK\n150 HARPY EAGLE\n151 HAWAIIAN GOOSE\n152 HELMET VANGA\n153 HIMALAYAN MONAL\n154 HOATZIN\n155 HOODED MERGANSER\n156 HOOPOES\n157 HORNBILL\n158 HORNED GUAN\n159 HORNED LARK\n160 HORNED SUNGEM\n161 HOUSE FINCH\n162 HOUSE SPARROW\n163 HYACINTH MACAW\n164 IMPERIAL SHAQ\n165 INCA TERN\n166 INDIAN BUSTARD\n167 INDIAN PITTA\n168 INDIAN ROLLER\n169 INDIGO BUNTING\n170 IWI\n171 JABIRU\n172 JAVA SPARROW\n173 KAGU\n174 KAKAPO\n175 KILLDEAR\n176 KING VULTURE\n177 KIWI\n178 KOOKABURRA\n179 LARK BUNTING\n180 LAZULI BUNTING\n181 LILAC ROLLER\n182 LONG-EARED OWL\n183 MAGPIE GOOSE\n184 MALABAR HORNBILL\n185 MALACHITE KINGFISHER\n186 MALAGASY WHITE EYE\n187 MALEO\n188 MALLARD DUCK\n189 MANDRIN DUCK\n190 MANGROVE CUCKOO\n191 MARABOU STORK\n192 MASKED BOOBY\n193 MASKED LAPWING\n194 MIKADO PHEASANT\n195 MOURNING DOVE\n196 MYNA\n197 NICOBAR PIGEON\n198 NOISY FRIARBIRD\n199 NORTHERN CARDINAL\n200 NORTHERN FLICKER\n201 NORTHERN FULMAR\n202 NORTHERN GANNET\n203 NORTHERN GOSHAWK\n204 NORTHERN JACANA\n205 NORTHERN MOCKINGBIRD\n206 NORTHERN PARULA\n207 NORTHERN RED BISHOP\n208 NORTHERN SHOVELER\n209 OCELLATED TURKEY\n210 OKINAWA RAIL\n211 ORANGE BRESTED BUNTING\n212 ORIENTAL BAY OWL\n213 OSPREY\n214 OSTRICH\n215 OVENBIRD\n216 OYSTER CATCHER\n217 PAINTED BUNTIG\n218 PALILA\n219 PARADISE TANAGER\n220 PARAKETT AKULET\n221 PARUS MAJOR\n222 PATAGONIAN SIERRA FINCH\n223 PEACOCK\n224 PELICAN\n225 PEREGRINE FALCON\n226 PHILIPPINE EAGLE\n227 PINK ROBIN\n228 POMARINE JAEGER\n229 PUFFIN\n230 PURPLE FINCH\n231 PURPLE GALLINULE\n232 PURPLE MARTIN\n233 PURPLE SWAMPHEN\n234 PYGMY KINGFISHER\n235 QUETZAL\n236 RAINBOW LORIKEET\n237 RAZORBILL\n238 RED BEARDED BEE EATER\n239 RED BELLIED PITTA\n240 RED BROWED FINCH\n241 RED FACED CORMORANT\n242 RED FACED WARBLER\n243 RED FODY\n244 RED HEADED DUCK\n245 RED HEADED WOODPECKER\n246 RED HONEY CREEPER\n247 RED NAPED TROGON\n248 RED TAILED HAWK\n249 RED TAILED THRUSH\n250 RED WINGED BLACKBIRD\n251 RED WISKERED BULBUL\n252 REGENT BOWERBIRD\n253 RING-NECKED PHEASANT\n254 ROADRUNNER\n255 ROBIN\n256 ROCK DOVE\n257 ROSY FACED LOVEBIRD\n258 ROUGH LEG BUZZARD\n259 ROYAL FLYCATCHER\n260 RUBY THROATED HUMMINGBIRD\n261 RUDY KINGFISHER\n262 RUFOUS KINGFISHER\n263 RUFUOS MOTMOT\n264 SAMATRAN THRUSH\n265 SAND MARTIN\n266 SANDHILL CRANE\n267 SATYR TRAGOPAN\n268 SCARLET CROWNED FRUIT DOVE\n269 SCARLET IBIS\n270 SCARLET MACAW\n271 SCARLET TANAGER\n272 SHOEBILL\n273 SHORT BILLED DOWITCHER\n274 SMITHS LONGSPUR\n275 SNOWY EGRET\n276 SNOWY OWL\n277 SORA\n278 SPANGLED COTINGA\n279 SPLENDID WREN\n280 SPOON BILED SANDPIPER\n281 SPOONBILL\n282 SPOTTED CATBIRD\n283 SRI LANKA BLUE MAGPIE\n284 STEAMER DUCK\n285 STORK BILLED KINGFISHER\n286 STRAWBERRY FINCH\n287 STRIPED OWL\n288 STRIPPED MANAKIN\n289 STRIPPED SWALLOW\n290 SUPERB STARLING\n291 SWINHOES PHEASANT\n292 TAIWAN MAGPIE\n293 TAKAHE\n294 TASMANIAN HEN\n295 TEAL DUCK\n296 TIT MOUSE\n297 TOUCHAN\n298 TOWNSENDS WARBLER\n299 TREE SWALLOW\n300 TROPICAL KINGBIRD\n301 TRUMPTER SWAN\n302 TURKEY VULTURE\n303 TURQUOISE MOTMOT\n304 UMBRELLA BIRD\n305 VARIED THRUSH\n306 VENEZUELIAN TROUPIAL\n307 VERMILION FLYCATHER\n308 VICTORIA CROWNED PIGEON\n309 VIOLET GREEN SWALLOW\n310 VULTURINE GUINEAFOWL\n311 WALL CREAPER\n312 WATTLED CURASSOW\n313 WHIMBREL\n314 WHITE BROWED CRAKE\n315 WHITE CHEEKED TURACO\n316 WHITE NECKED RAVEN\n317 WHITE TAILED TROPIC\n318 WHITE THROATED BEE EATER\n319 WILD TURKEY\n320 WILSONS BIRD OF PARADISE\n321 WOOD DUCK\n322 YELLOW BELLIED FLOWERPECKER\n323 YELLOW CACIQUE\n324 YELLOW HEADED BLACKBIRD\n"
],
[
"aps",
"_____no_output_____"
],
[
"ap_at_n = np.array([ap[1] for ap in aps])\nquery_time = np.array(([ap[2] for ap in aps]))",
"_____no_output_____"
],
[
"mAP_at_n = np.mean(ap_at_n, axis=0)\navg_query_time = np.mean(query_time, axis=0)\nprint(\"mAP:\", mAP_at_n)\nprint(\"avg. query time: \", avg_query_time)",
"mAP: 0.6071221508182182\navg. query time: 3.1683017686697155\n"
],
[
"save_results('/content/drive/MyDrive/CV_Birds/performance/fine_tuning/index/FT_pert-forest_512_20rnd_cosine', 'FT_pert-forest_512_20rnd_cosine_results', aps)",
"_____no_output_____"
],
[
"!mv /content/tree* /content/drive/MyDrive/CV_Birds/performance/fine_tuning/index/FT_pert-forest_512_20rnd_cosine/pert_forest_structure/",
"_____no_output_____"
],
[
"!mv /content/pert* /content/drive/MyDrive/CV_Birds/performance/fine_tuning/index/FT_pert-forest_512_20rnd_cosine/pert_forest_structure/",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a829269165d29c230d226fd0240e89264d076a2
| 13,280 |
ipynb
|
Jupyter Notebook
|
tutorials/streamlit_notebooks/healthcare/NER_TUMOR.ipynb
|
faisaladnanpeltops/spark-nlp-workshop
|
f8cb2bd09aeee19e9a10d773ed9e7330cc80fcd8
|
[
"Apache-2.0"
] | null | null | null |
tutorials/streamlit_notebooks/healthcare/NER_TUMOR.ipynb
|
faisaladnanpeltops/spark-nlp-workshop
|
f8cb2bd09aeee19e9a10d773ed9e7330cc80fcd8
|
[
"Apache-2.0"
] | null | null | null |
tutorials/streamlit_notebooks/healthcare/NER_TUMOR.ipynb
|
faisaladnanpeltops/spark-nlp-workshop
|
f8cb2bd09aeee19e9a10d773ed9e7330cc80fcd8
|
[
"Apache-2.0"
] | null | null | null | 13,280 | 13,280 | 0.663705 |
[
[
[
"\n\n\n\n[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/healthcare/NER_TUMOR.ipynb)\n\n\n",
"_____no_output_____"
],
[
"# **Detect tumor characteristics**",
"_____no_output_____"
],
[
"To run this yourself, you will need to upload your license keys to the notebook. Otherwise, you can look at the example outputs at the bottom of the notebook. To upload license keys, open the file explorer on the left side of the screen and upload `workshop_license_keys.json` to the folder that opens.",
"_____no_output_____"
],
[
"## 1. Colab Setup",
"_____no_output_____"
],
[
"Import license keys",
"_____no_output_____"
]
],
[
[
"import os\nimport json\n\nwith open('/content/workshop_license_keys.json', 'r') as f:\n license_keys = json.load(f)\n\nlicense_keys.keys()\n\nsecret = license_keys['secret']\nos.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE']\nos.environ['JSL_OCR_LICENSE'] = license_keys['JSL_OCR_LICENSE']\nos.environ['AWS_ACCESS_KEY_ID'] = license_keys['AWS_ACCESS_KEY_ID']\nos.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY']",
"_____no_output_____"
]
],
[
[
"Install dependencies",
"_____no_output_____"
]
],
[
[
"# Install Java\n! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null\n! java -version\n\n# Install pyspark and SparkNLP\n! pip install --ignore-installed -q pyspark==2.4.4\n! python -m pip install --upgrade spark-nlp-jsl==2.5.2 --extra-index-url https://pypi.johnsnowlabs.com/$secret\n! pip install --ignore-installed -q spark-nlp==2.5.2",
"_____no_output_____"
]
],
[
[
"Import dependencies into Python and start the Spark session",
"_____no_output_____"
]
],
[
[
"os.environ['JAVA_HOME'] = \"/usr/lib/jvm/java-8-openjdk-amd64\"\nos.environ['PATH'] = os.environ['JAVA_HOME'] + \"/bin:\" + os.environ['PATH']\nimport sparknlp\n\nimport pandas as pd\nfrom pyspark.ml import Pipeline\nfrom pyspark.sql import SparkSession\n\nfrom sparknlp.annotator import *\nfrom sparknlp_jsl.annotator import *\nfrom sparknlp.base import *\nimport sparknlp_jsl\nimport pyspark.sql.functions as F\n\nbuilder = SparkSession.builder \\\n .appName('Spark NLP Licensed') \\\n .master('local[*]') \\\n .config('spark.driver.memory', '16G') \\\n .config('spark.serializer', 'org.apache.spark.serializer.KryoSerializer') \\\n .config('spark.kryoserializer.buffer.max', '2000M') \\\n .config('spark.jars.packages', 'com.johnsnowlabs.nlp:spark-nlp_2.11:2.5.2') \\\n .config('spark.jars', f'https://pypi.johnsnowlabs.com/{secret}/spark-nlp-jsl-2.5.2.jar')\n \nspark = builder.getOrCreate()",
"_____no_output_____"
]
],
[
[
"## 2. Select the NER model and construct the pipeline",
"_____no_output_____"
],
[
"Select the NER model - Tumor model: **ner_bionlp**\n\nFor more details: https://github.com/JohnSnowLabs/spark-nlp-models#pretrained-models---spark-nlp-for-healthcare",
"_____no_output_____"
]
],
[
[
"# You can change this to the model you want to use and re-run cells below.\n# Neoplasm models: ner_bionlp\n# All these models use the same clinical embeddings.\nMODEL_NAME = \"ner_bionlp\"",
"_____no_output_____"
]
],
[
[
"Create the pipeline",
"_____no_output_____"
]
],
[
[
"\n\ndocument_assembler = DocumentAssembler() \\\n .setInputCol('text')\\\n .setOutputCol('document')\n\nsentence_detector = SentenceDetector() \\\n .setInputCols(['document'])\\\n .setOutputCol('sentence')\n\ntokenizer = Tokenizer()\\\n .setInputCols(['sentence']) \\\n .setOutputCol('token')\n\nword_embeddings = WordEmbeddingsModel.pretrained('embeddings_clinical', 'en', 'clinical/models') \\\n .setInputCols(['sentence', 'token']) \\\n .setOutputCol('embeddings')\n\nclinical_ner = NerDLModel.pretrained(MODEL_NAME, 'en', 'clinical/models') \\\n .setInputCols(['sentence', 'token', 'embeddings']) \\\n .setOutputCol('ner')\n\nner_converter = NerConverter()\\\n .setInputCols(['sentence', 'token', 'ner']) \\\n .setOutputCol('ner_chunk')\n\nnlp_pipeline = Pipeline(stages=[\n document_assembler, \n sentence_detector,\n tokenizer,\n word_embeddings,\n clinical_ner,\n ner_converter])\n\nempty_df = spark.createDataFrame([['']]).toDF(\"text\")\npipeline_model = nlp_pipeline.fit(empty_df)\nlight_pipeline = LightPipeline(pipeline_model)",
"_____no_output_____"
]
],
[
[
"## 3. Create example inputs",
"_____no_output_____"
]
],
[
[
"# Enter examples as strings in this array\ninput_list = [\n \"\"\"Under loupe magnification, the lesion was excised with 2 mm margins, oriented with sutures and submitted for frozen section pathology. The report was \"basal cell carcinoma with all margins free of tumor.\" Hemostasis was controlled with the Bovie. Excised lesion diameter was 1.2 cm. The defect was closed by elevating a left laterally based rotation flap utilizing the glabellar skin. The flap was elevated with a scalpel and Bovie, rotated into the defect without tension, ***** to the defect with scissors and inset in layer with interrupted 5-0 Vicryl for the dermis and running 5-0 Prolene for the skin. Donor site was closed in V-Y fashion with similar suture technique.\"\"\"\n]",
"_____no_output_____"
]
],
[
[
"## 4. Use the pipeline to create outputs",
"_____no_output_____"
]
],
[
[
"df = spark.createDataFrame(pd.DataFrame({\"text\": input_list}))\nresult = pipeline_model.transform(df)",
"_____no_output_____"
]
],
[
[
"## 5. Visualize results",
"_____no_output_____"
],
[
"Visualize outputs as data frame",
"_____no_output_____"
]
],
[
[
"exploded = F.explode(F.arrays_zip('ner_chunk.result', 'ner_chunk.metadata'))\nselect_expression_0 = F.expr(\"cols['0']\").alias(\"chunk\")\nselect_expression_1 = F.expr(\"cols['1']['entity']\").alias(\"ner_label\")\nresult.select(exploded.alias(\"cols\")) \\\n .select(select_expression_0, select_expression_1).show(truncate=False)\nresult = result.toPandas()",
"+--------------------+----------------------+\n|chunk |ner_label |\n+--------------------+----------------------+\n|lesion |Pathological_formation|\n|basal cell carcinoma|Cancer |\n|tumor |Cancer |\n|lesion |Pathological_formation|\n|glabellar skin |Organ |\n|flap |Tissue |\n|dermis |Tissue |\n|skin |Organ |\n+--------------------+----------------------+\n\n"
]
],
[
[
"Functions to display outputs as HTML",
"_____no_output_____"
]
],
[
[
"from IPython.display import HTML, display\nimport random\n\ndef get_color():\n r = lambda: random.randint(128,255)\n return \"#%02x%02x%02x\" % (r(), r(), r())\n\ndef annotation_to_html(full_annotation):\n ner_chunks = full_annotation[0]['ner_chunk']\n text = full_annotation[0]['document'][0].result\n label_color = {}\n for chunk in ner_chunks:\n label_color[chunk.metadata['entity']] = get_color()\n\n html_output = \"<div>\"\n pos = 0\n\n for n in ner_chunks:\n if pos < n.begin and pos < len(text):\n html_output += f\"<span class=\\\"others\\\">{text[pos:n.begin]}</span>\"\n pos = n.end + 1\n html_output += f\"<span class=\\\"entity-wrapper\\\" style=\\\"color: black; background-color: {label_color[n.metadata['entity']]}\\\"> <span class=\\\"entity-name\\\">{n.result}</span> <span class=\\\"entity-type\\\">[{n.metadata['entity']}]</span></span>\"\n\n if pos < len(text):\n html_output += f\"<span class=\\\"others\\\">{text[pos:]}</span>\"\n\n html_output += \"</div>\"\n display(HTML(html_output))",
"_____no_output_____"
]
],
[
[
"Display example outputs as HTML",
"_____no_output_____"
]
],
[
[
"for example in input_list:\n annotation_to_html(light_pipeline.fullAnnotate(example))",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a829438bd5beb57407f4ec3f88559c95641247c
| 830,621 |
ipynb
|
Jupyter Notebook
|
experiment/Visualization/Visual_Price_MostFreqTop3Cat.ipynb
|
DSEI21000-S21/project-product-price-prediction
|
b98bff46e882de0ac1dade525a14beeaea224ffc
|
[
"MIT"
] | 1 |
2021-04-26T20:48:43.000Z
|
2021-04-26T20:48:43.000Z
|
experiment/Visualization/Visual_Price_MostFreqTop3Cat.ipynb
|
ZhiLi51/project-product-price-prediction
|
bbd1eb9577b40fcb538a6b33f5ba71096b5af72f
|
[
"MIT"
] | 20 |
2021-04-23T22:54:53.000Z
|
2021-05-10T01:36:52.000Z
|
experiment/Visualization/Visual_Price_MostFreqTop3Cat.ipynb
|
ZhiLi51/project-product-price-prediction
|
bbd1eb9577b40fcb538a6b33f5ba71096b5af72f
|
[
"MIT"
] | 2 |
2021-04-26T20:48:55.000Z
|
2021-05-21T00:47:11.000Z
| 1,179.859375 | 261,786 | 0.93177 |
[
[
[
"!pip install chart_studio",
"Collecting chart_studio\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/ca/ce/330794a6b6ca4b9182c38fc69dd2a9cbff60fd49421cb8648ee5fee352dc/chart_studio-1.1.0-py3-none-any.whl (64kB)\n\r\u001b[K |█████ | 10kB 15.7MB/s eta 0:00:01\r\u001b[K |██████████▏ | 20kB 21.7MB/s eta 0:00:01\r\u001b[K |███████████████▎ | 30kB 13.9MB/s eta 0:00:01\r\u001b[K |████████████████████▍ | 40kB 9.4MB/s eta 0:00:01\r\u001b[K |█████████████████████████▍ | 51kB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▌ | 61kB 8.3MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 71kB 4.4MB/s \n\u001b[?25hRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from chart_studio) (2.23.0)\nRequirement already satisfied: plotly in /usr/local/lib/python3.7/dist-packages (from chart_studio) (4.4.1)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from chart_studio) (1.15.0)\nRequirement already satisfied: retrying>=1.3.3 in /usr/local/lib/python3.7/dist-packages (from chart_studio) (1.3.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->chart_studio) (2.10)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->chart_studio) (2020.12.5)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->chart_studio) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->chart_studio) (1.24.3)\nInstalling collected packages: chart-studio\nSuccessfully installed chart-studio-1.1.0\n"
],
[
"import plotly.graph_objects as go\nimport plotly.offline as offline_py\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\nimport plotly.figure_factory as ff\nimport numpy as np\n\n%matplotlib inline",
"_____no_output_____"
],
[
"import pandas as pd\ndf = pd.read_csv(\"https://raw.githubusercontent.com/DSEI21000-S21/project-product-price-prediction/main/data/random_samples/stratified_sampling_data_by_price_whigh_sz50000_1619218354.csv\")\n\n# size of dataset\nprint('The size of the dataset is: {} \\n'.format(df.shape))\n\n# different data types in the dataset\nprint('The types of the dataset: {}'.format(df.dtypes))\n\ndf.head()",
"The size of the dataset is: (50000, 11) \n\nThe types of the dataset: train_id int64\nname object\nitem_condition_id int64\nbrand_name object\nprice float64\nshipping int64\nitem_description object\nc1 object\nc2 object\nc3 object\nprice_bin object\ndtype: object\n"
],
[
"df.price.describe()",
"_____no_output_____"
],
[
"# most popular categories -- Women, electronics and men\nx = df['c1'].value_counts().index.values.astype('str')[:15]\ny = df['c1'].value_counts().values[:15]\npct = [(\"%.2f\"%(v*100))+\"%\" for v in (y/len(df))] [:15]",
"_____no_output_____"
],
[
"trace1 = go.Bar(x=x, y=y, text=pct)\nlayout = dict(title= 'Number of Items by Main Category',\n yaxis = dict(title='Count'),\n xaxis = dict(title='Brand'))\nfig=dict(data=[trace1], layout=layout)\noffline_py.iplot(fig)",
"_____no_output_____"
],
[
"x = df['brand_name'].value_counts().index.values.astype('str')[:15]\ny = df['brand_name'].value_counts().values[:15]\npct = [(\"%.2f\"%(v*100))+\"%\" for v in (y/len(df))] [:15]",
"_____no_output_____"
],
[
"colorscale = [[0, '#FAEE1C'], [0.33, '#F3558E'], [0.66, '#9C1DE7'], [1, '#581B98']]",
"_____no_output_____"
],
[
"# most popular brands -- Nike & PINK\ntrace1 = go.Bar(x=x, y=y, text=pct, marker=dict(color = y, colorscale=colorscale, showscale=True))\nlayout = dict(title= 'Number of Items by brand name',\n yaxis = dict(title='Count'),\n xaxis = dict(title='Brand'))\nfig=dict(data=[trace1], layout=layout)\noffline_py.iplot(fig)",
"_____no_output_____"
],
[
"dataframe = df[df.brand_name == 'Nike'][:100]\ndatawomen = dataframe.loc[:, ['price', 'shipping']]\ndatawomen[\"index\"] = np.arange(1,len(datawomen)+1)\n\nfig = ff.create_scatterplotmatrix(datawomen, diag='box', index='index',colormap='Portland',\n colormap_type='cat',\n height=700, width=700)\noffline_py.iplot(fig)",
"_____no_output_____"
],
[
"# visualize which words has the highest frequencies within the top1 category\ndescription = df.item_description[df.c1 == 'women']\n\nplt.subplots(figsize = (8,8))\n\nwordcloud = WordCloud (\n background_color = 'white',\n width = 512,\n height = 384\n ).generate(' '.join(description))\nplt.imshow(wordcloud) # image show\nplt.axis('off') # to off the axis of x and y\nplt.title('Top Words -- Women')\nplt.show()",
"_____no_output_____"
],
[
"description = df.item_description[df.c1 == 'electronics']\n\nplt.subplots(figsize = (8,8))\n\nwordcloud = WordCloud (\n background_color = 'white',\n width = 512,\n height = 384\n ).generate(' '.join(description))\nplt.imshow(wordcloud) # image show\nplt.axis('off') # to off the axis of x and y\nplt.title('Top Words -- Electronics')\nplt.show()",
"_____no_output_____"
],
[
"description = df.item_description[df.c1 == 'men']\n\nplt.subplots(figsize = (8,8))\n\nwordcloud = WordCloud (\n background_color = 'white',\n width = 512,\n height = 384\n ).generate(' '.join(description))\nplt.imshow(wordcloud) # image show\nplt.axis('off') # to off the axis of x and y\nplt.title('Top Words -- Men')\nplt.show()",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a82a0e5a66db95fd981358379f619f314af6245
| 531,776 |
ipynb
|
Jupyter Notebook
|
3/z1-Copy1.ipynb
|
iCarrrot/Evol-algs
|
cb79440f0c5430b1d63a43acf2d64faed6db1b17
|
[
"MIT"
] | 1 |
2019-12-18T11:31:03.000Z
|
2019-12-18T11:31:03.000Z
|
3/z1-Copy1.ipynb
|
iCarrrot/Evol-algs
|
cb79440f0c5430b1d63a43acf2d64faed6db1b17
|
[
"MIT"
] | null | null | null |
3/z1-Copy1.ipynb
|
iCarrrot/Evol-algs
|
cb79440f0c5430b1d63a43acf2d64faed6db1b17
|
[
"MIT"
] | null | null | null | 755.363636 | 34,544 | 0.950722 |
[
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport time\nfrom collections import namedtuple\n%load_ext autoreload\n%autoreload 2\n%matplotlib inline",
"_____no_output_____"
],
[
"from utils import *\nfrom ES import ES\nfrom benchmarks import *",
"_____no_output_____"
],
[
"benchData = namedtuple('benchData', 'func, domain, min, argmin')\n\nbenchList = {\n 'Sphere':benchData(rast, (-5.12, 5.12), 0, \"0,0,..\"),\n 'Griewank':benchData(griewank, (-500, 500), 0, \"0,0,..\"),\n 'Rastrigin':benchData(rast, (-5.12, 5.12), 0, \"0,0,..\"),\n 'Schwefel':benchData(rast, (-500, 500), 0, \"1,1,..\"),\n 'Dixon & Price':benchData(rast, (-10, 10), 0, \"0,0,..\"),\n}\n\nevkwargs = {\n 'mi':500,\n 'Lambda':4000,\n 'd':50,\n 'number_of_iterations':400,\n 'K':0.75,\n}\n\npltkwargs = {\n 'tries':2,\n 'barplot':True,\n 'file':False,\n 'verbose' : True\n}\nfor name in benchList:\n for plus in [True, False]:\n evkwargs = {\n **evkwargs,\n 'population_evaluation': benchList[name].func,\n 'domain': benchList[name].domain,\n 'plus': plus\n }\n pltkwargs['title'] = f'Name: {name} {\"plus \" if plus else \"\"} (min: {benchList[name].min})'\n pltkwargs['filename'] = f'z1_{name}{\"_plus \" if plus else \"\"}'\n \n plot_scores(ES, **pltkwargs, **evkwargs)\n \n",
"Name: Sphere plus (min: 0): próba 0; wynik: 172.1272443323976; czas: 43.2378511428833\nName: Sphere plus (min: 0): próba 1; wynik: 224.85978813210096; czas: 36.90970993041992\n"
],
[
"benchData = namedtuple('benchData', 'func, domain, min, argmin')\n\nbenchList = {\n 'Sphere':benchData(rast, (-5.12, 5.12), 0, \"0,0,..\"),\n 'Griewank':benchData(griewank, (-500, 500), 0, \"0,0,..\"),\n 'Rastrigin':benchData(rast, (-5.12, 5.12), 0, \"0,0,..\"),\n 'Schwefel':benchData(rast, (-500, 500), 0, \"1,1,..\"),\n 'Dixon & Price':benchData(rast, (-10, 10), 0, \"0,0,..\"),\n}\n\nevkwargs = {\n 'mi':500,\n 'Lambda':4000,\n 'd':50,\n 'number_of_iterations':400,\n 'K':0.6,\n}\n\npltkwargs = {\n 'tries':2,\n 'barplot':True,\n 'file':False,\n 'verbose' : True\n}\nfor name in benchList:\n for plus in [True, False]:\n evkwargs = {\n **evkwargs,\n 'population_evaluation': benchList[name].func,\n 'domain': benchList[name].domain,\n 'plus': plus\n }\n pltkwargs['title'] = f'Name: {name} {\"plus \" if plus else \"\"} (min: {benchList[name].min})'\n pltkwargs['filename'] = f'z1_{name}{\"_plus \" if plus else \"\"}'\n \n plot_scores(ES, **pltkwargs, **evkwargs)\n \n",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code"
]
] |
4a82a47cbcec1eaaf170ed212126375a344e1764
| 29,303 |
ipynb
|
Jupyter Notebook
|
src/text_classification_ml.ipynb
|
yyHaker/TextClassification
|
dc3c5ffe0731609c8f0c7a18a4daa5f149f83e9f
|
[
"MIT"
] | 3 |
2019-06-08T14:11:56.000Z
|
2020-05-26T15:08:23.000Z
|
src/text_classification_ml.ipynb
|
yyHaker/TextClassification
|
dc3c5ffe0731609c8f0c7a18a4daa5f149f83e9f
|
[
"MIT"
] | null | null | null |
src/text_classification_ml.ipynb
|
yyHaker/TextClassification
|
dc3c5ffe0731609c8f0c7a18a4daa5f149f83e9f
|
[
"MIT"
] | null | null | null | 40.417931 | 1,694 | 0.459953 |
[
[
[
"# 加载文本分类数据集\nfrom sklearn.datasets import fetch_20newsgroups\nimport random\n\nnewsgroups_train = fetch_20newsgroups(subset='train')\nnewsgroups_test = fetch_20newsgroups(subset='test')\nX_train = newsgroups_train.data\nX_test = newsgroups_test.data\ny_train = newsgroups_train.target\ny_test = newsgroups_test.target\n\nprint(\"sample several datas: \")\nprint(\"X_train: \", X_train[0: 2])\nprint(\"Y_train:\", y_train[0: 2])",
"sample several datas: \nX_train: [\"From: [email protected] (where's my thing)\\nSubject: WHAT car is this!?\\nNntp-Posting-Host: rac3.wam.umd.edu\\nOrganization: University of Maryland, College Park\\nLines: 15\\n\\n I was wondering if anyone out there could enlighten me on this car I saw\\nthe other day. It was a 2-door sports car, looked to be from the late 60s/\\nearly 70s. It was called a Bricklin. The doors were really small. In addition,\\nthe front bumper was separate from the rest of the body. This is \\nall I know. If anyone can tellme a model name, engine specs, years\\nof production, where this car is made, history, or whatever info you\\nhave on this funky looking car, please e-mail.\\n\\nThanks,\\n- IL\\n ---- brought to you by your neighborhood Lerxst ----\\n\\n\\n\\n\\n\", \"From: [email protected] (Guy Kuo)\\nSubject: SI Clock Poll - Final Call\\nSummary: Final call for SI clock reports\\nKeywords: SI,acceleration,clock,upgrade\\nArticle-I.D.: shelley.1qvfo9INNc3s\\nOrganization: University of Washington\\nLines: 11\\nNNTP-Posting-Host: carson.u.washington.edu\\n\\nA fair number of brave souls who upgraded their SI clock oscillator have\\nshared their experiences for this poll. Please send a brief message detailing\\nyour experiences with the procedure. Top speed attained, CPU rated speed,\\nadd on cards and adapters, heat sinks, hour of usage per day, floppy disk\\nfunctionality with 800 and 1.4 m floppies are especially requested.\\n\\nI will be summarizing in the next two days, so please add to the network\\nknowledge base if you have done the clock upgrade and haven't answered this\\npoll. Thanks.\\n\\nGuy Kuo <[email protected]>\\n\"]\nY_train: [7 4]\n"
],
[
"# 提取文本TF-IDF数据特征\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport numpy as np\n\n\ndef TFIDF(X_train, X_test, MAX_NB_WORDS=75000):\n vectorizer_x = TfidfVectorizer(max_features=MAX_NB_WORDS)\n X_train = vectorizer_x.fit_transform(X_train).toarray()\n X_test = vectorizer_x.transform(X_test).toarray()\n print(\"tf-idf with\", str(np.array(X_train).shape[1]),\"features\")\n return X_train, X_test\n\nX_train, X_test = TFIDF(X_train, X_test)",
"tf-idf with 75000 features\n"
],
[
"# 使用PCA将文本特征降纬\nfrom sklearn.decomposition import PCA\npca = PCA(n_components=2000)\nX_train_new = pca.fit_transform(X_train)\nX_test_new = pca.transform(X_test)\n\nprint(\"train with old features: \", np.array(X_train).shape)\nprint(\"train with new features:\", np.array(X_train_new).shape)\n\nprint(\"test with old features: \", np.array(X_test).shape)\nprint(\"test with new features:\", np.array(X_test_new).shape)",
"_____no_output_____"
],
[
"# 使用LDA将数据降纬\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nLDA = LinearDiscriminantAnalysis(n_components=15)\nX_train_new = LDA.fit(X_train, y_train)\nX_train_new = LDA.transform(X_train)\nX_test_new = LDA.transform(X_test)\n\nprint(\"train with old features: \", np.array(X_train).shape)\nprint(\"train with new features:\", np.array(X_train_new).shape)\n\nprint(\"test with old features: \", np.array(X_test).shape)\nprint(\"test with new features:\", np.array(X_test_new).shape)",
"_____no_output_____"
],
[
"# 使用NMF将数据降纬\nfrom sklearn.decomposition import NMF\nNMF_ = NMF(n_components=2000)\nX_train_new = NMF_.fit(X_train)\nX_train_new = NMF_.transform(X_train)\nX_test_new = NMF_.transform(X_test)\n\nprint(\"train with old features: \", np.array(X_train).shape)\nprint(\"train with new features:\", np.array(X_train_new).shape)\n\nprint(\"test with old features: \", np.array(X_test).shape)\nprint(\"test with new features:\", np.array(X_test_new))",
"_____no_output_____"
],
[
"# 使用random projection将数据降纬\nfrom sklearn import random_projection\n\nRandomProjection = random_projection.GaussianRandomProjection(n_components=2000)\nX_train_new = RandomProjection.fit_transform(X_train)\nX_test_new = RandomProjection.transform(X_test)\n\nprint(\"train with old features: \", np.array(X_train).shape)\nprint(\"train with new features:\", np.array(X_train_new).shape)\n\nprint(\"test with old features: \", np.array(X_test).shape)\nprint(\"test with new features:\", np.array(X_test_new).shape)\n",
"_____no_output_____"
],
[
"# about T-SNE\nimport numpy as np\nfrom sklearn.manifold import TSNE\nX = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])\nX_embedded = TSNE(n_components=2).fit_transform(X)\nprint(X_embedded.shape)",
"(4, 2)\n"
],
[
"# Rocchio classification\nfrom sklearn.neighbors.nearest_centroid import NearestCentroid\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.datasets import fetch_20newsgroups\n\nnewsgroups_train = fetch_20newsgroups(subset='train')\nnewsgroups_test = fetch_20newsgroups(subset='test')\nX_train = newsgroups_train.data\nX_test = newsgroups_test.data\ny_train = newsgroups_train.target\ny_test = newsgroups_test.target\n\ntext_clf = Pipeline([('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', NearestCentroid()),\n ])\n\ntext_clf.fit(X_train, y_train)\n\n\npredicted = text_clf.predict(X_test)\n\nprint(metrics.classification_report(y_test, predicted))",
" precision recall f1-score support\n\n 0 0.75 0.49 0.60 319\n 1 0.44 0.76 0.56 389\n 2 0.75 0.68 0.71 394\n 3 0.71 0.59 0.65 392\n 4 0.81 0.71 0.76 385\n 5 0.83 0.66 0.74 395\n 6 0.49 0.88 0.63 390\n 7 0.86 0.76 0.80 396\n 8 0.91 0.86 0.89 398\n 9 0.85 0.79 0.82 397\n 10 0.95 0.80 0.87 399\n 11 0.94 0.66 0.78 396\n 12 0.40 0.70 0.51 393\n 13 0.84 0.49 0.62 396\n 14 0.89 0.72 0.80 394\n 15 0.55 0.73 0.63 398\n 16 0.68 0.76 0.71 364\n 17 0.97 0.70 0.81 376\n 18 0.54 0.53 0.53 310\n 19 0.58 0.39 0.47 251\n\navg / total 0.74 0.69 0.70 7532\n\n"
],
[
"# boosting classification\nimport numpy as np\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.datasets import fetch_20newsgroups\n\nnewsgroups_train = fetch_20newsgroups(subset='train')\nnewsgroups_test = fetch_20newsgroups(subset='test')\nX_train = newsgroups_train.data\nX_test = newsgroups_test.data\ny_train = newsgroups_train.target\ny_test = newsgroups_test.target\n\ntext_clf = Pipeline([('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', GradientBoostingClassifier(n_estimators=100)),\n ])\n\ntext_clf.fit(X_train, y_train)\n\n\npredicted = text_clf.predict(X_test)\n\nprint(metrics.classification_report(y_test, predicted))",
"/home/yyhaker/anaconda3/lib/python3.6/site-packages/sklearn/ensemble/weight_boosting.py:29: DeprecationWarning: numpy.core.umath_tests is an internal NumPy module and should not be imported. It will be removed in a future NumPy release.\n from numpy.core.umath_tests import inner1d\n"
],
[
"# bagging classifier\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.datasets import fetch_20newsgroups\n\nnewsgroups_train = fetch_20newsgroups(subset='train')\nnewsgroups_test = fetch_20newsgroups(subset='test')\nX_train = newsgroups_train.data\nX_test = newsgroups_test.data\ny_train = newsgroups_train.target\ny_test = newsgroups_test.target\n\ntext_clf = Pipeline([('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', BaggingClassifier(KNeighborsClassifier())),\n ])\n\ntext_clf.fit(X_train, y_train)\n\n\npredicted = text_clf.predict(X_test)\n\nprint(metrics.classification_report(y_test, predicted))",
" precision recall f1-score support\n\n 0 0.59 0.71 0.64 319\n 1 0.59 0.56 0.57 389\n 2 0.59 0.57 0.58 394\n 3 0.58 0.57 0.58 392\n 4 0.60 0.55 0.57 385\n 5 0.74 0.63 0.68 395\n 6 0.60 0.47 0.53 390\n 7 0.77 0.71 0.74 396\n 8 0.84 0.82 0.83 398\n 9 0.76 0.75 0.76 397\n 10 0.82 0.88 0.85 399\n 11 0.74 0.84 0.78 396\n 12 0.67 0.53 0.59 393\n 13 0.76 0.51 0.61 396\n 14 0.78 0.79 0.78 394\n 15 0.72 0.78 0.75 398\n 16 0.71 0.76 0.74 364\n 17 0.61 0.79 0.69 376\n 18 0.46 0.64 0.53 310\n 19 0.50 0.55 0.52 251\n\navg / total 0.68 0.67 0.67 7532\n\n"
],
[
"# Naive Bayes Classifier\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.datasets import fetch_20newsgroups\n\nnewsgroups_train = fetch_20newsgroups(subset='train')\nnewsgroups_test = fetch_20newsgroups(subset='test')\nX_train = newsgroups_train.data\nX_test = newsgroups_test.data\ny_train = newsgroups_train.target\ny_test = newsgroups_test.target\n\ntext_clf = Pipeline([('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', MultinomialNB()),\n ])\n\ntext_clf.fit(X_train, y_train)\n\n\npredicted = text_clf.predict(X_test)\n\nprint(metrics.classification_report(y_test, predicted))",
" precision recall f1-score support\n\n 0 0.80 0.52 0.63 319\n 1 0.81 0.65 0.72 389\n 2 0.82 0.65 0.73 394\n 3 0.67 0.78 0.72 392\n 4 0.86 0.77 0.81 385\n 5 0.89 0.75 0.82 395\n 6 0.93 0.69 0.80 390\n 7 0.85 0.92 0.88 396\n 8 0.94 0.93 0.93 398\n 9 0.92 0.90 0.91 397\n 10 0.89 0.97 0.93 399\n 11 0.59 0.97 0.74 396\n 12 0.84 0.60 0.70 393\n 13 0.92 0.74 0.82 396\n 14 0.84 0.89 0.87 394\n 15 0.44 0.98 0.61 398\n 16 0.64 0.94 0.76 364\n 17 0.93 0.91 0.92 376\n 18 0.96 0.42 0.58 310\n 19 0.97 0.14 0.24 251\n\navg / total 0.82 0.77 0.77 7532\n\n"
],
[
"# K-nearest Neighbor\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.datasets import fetch_20newsgroups\n\nnewsgroups_train = fetch_20newsgroups(subset='train')\nnewsgroups_test = fetch_20newsgroups(subset='test')\nX_train = newsgroups_train.data\nX_test = newsgroups_test.data\ny_train = newsgroups_train.target\ny_test = newsgroups_test.target\n\ntext_clf = Pipeline([('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', KNeighborsClassifier()),\n ])\n\ntext_clf.fit(X_train, y_train)\n\npredicted = text_clf.predict(X_test)\n\nprint(metrics.classification_report(y_test, predicted))",
" precision recall f1-score support\n\n 0 0.43 0.76 0.55 319\n 1 0.50 0.61 0.55 389\n 2 0.56 0.57 0.57 394\n 3 0.53 0.58 0.56 392\n 4 0.59 0.56 0.57 385\n 5 0.69 0.60 0.64 395\n 6 0.58 0.45 0.51 390\n 7 0.75 0.69 0.72 396\n 8 0.84 0.81 0.82 398\n 9 0.77 0.72 0.74 397\n 10 0.85 0.84 0.84 399\n 11 0.76 0.84 0.80 396\n 12 0.70 0.50 0.58 393\n 13 0.82 0.49 0.62 396\n 14 0.79 0.76 0.78 394\n 15 0.75 0.76 0.76 398\n 16 0.70 0.73 0.72 364\n 17 0.62 0.76 0.69 376\n 18 0.55 0.61 0.58 310\n 19 0.56 0.49 0.52 251\n\navg / total 0.67 0.66 0.66 7532\n\n"
],
[
"# Support Vector Machine (SVM)\nfrom sklearn.svm import LinearSVC\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.datasets import fetch_20newsgroups\n\nnewsgroups_train = fetch_20newsgroups(subset='train')\nnewsgroups_test = fetch_20newsgroups(subset='test')\nX_train = newsgroups_train.data\nX_test = newsgroups_test.data\ny_train = newsgroups_train.target\ny_test = newsgroups_test.target\n\ntext_clf = Pipeline([('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', LinearSVC()),\n ])\n\ntext_clf.fit(X_train, y_train)\n\n\npredicted = text_clf.predict(X_test)\n\nprint(metrics.classification_report(y_test, predicted))",
" precision recall f1-score support\n\n 0 0.82 0.80 0.81 319\n 1 0.76 0.80 0.78 389\n 2 0.77 0.73 0.75 394\n 3 0.71 0.76 0.74 392\n 4 0.84 0.86 0.85 385\n 5 0.87 0.76 0.81 395\n 6 0.83 0.91 0.87 390\n 7 0.92 0.91 0.91 396\n 8 0.95 0.95 0.95 398\n 9 0.92 0.95 0.93 397\n 10 0.96 0.98 0.97 399\n 11 0.93 0.94 0.93 396\n 12 0.81 0.79 0.80 393\n 13 0.90 0.87 0.88 396\n 14 0.90 0.93 0.92 394\n 15 0.84 0.93 0.88 398\n 16 0.75 0.92 0.82 364\n 17 0.97 0.89 0.93 376\n 18 0.82 0.62 0.71 310\n 19 0.75 0.61 0.68 251\n\navg / total 0.85 0.85 0.85 7532\n\n"
],
[
"# Decision Tree\nfrom sklearn import tree\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.datasets import fetch_20newsgroups\n\nnewsgroups_train = fetch_20newsgroups(subset='train')\nnewsgroups_test = fetch_20newsgroups(subset='test')\nX_train = newsgroups_train.data\nX_test = newsgroups_test.data\ny_train = newsgroups_train.target\ny_test = newsgroups_test.target\n\ntext_clf = Pipeline([('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', tree.DecisionTreeClassifier()),\n ])\n\ntext_clf.fit(X_train, y_train)\n\n\npredicted = text_clf.predict(X_test)\n\nprint(metrics.classification_report(y_test, predicted))",
" precision recall f1-score support\n\n 0 0.49 0.49 0.49 319\n 1 0.40 0.41 0.41 389\n 2 0.50 0.56 0.53 394\n 3 0.46 0.41 0.43 392\n 4 0.52 0.57 0.54 385\n 5 0.48 0.47 0.48 395\n 6 0.68 0.72 0.70 390\n 7 0.62 0.58 0.60 396\n 8 0.72 0.76 0.74 398\n 9 0.52 0.56 0.54 397\n 10 0.66 0.66 0.66 399\n 11 0.78 0.70 0.74 396\n 12 0.34 0.35 0.35 393\n 13 0.49 0.42 0.45 396\n 14 0.66 0.62 0.64 394\n 15 0.70 0.69 0.70 398\n 16 0.47 0.61 0.53 364\n 17 0.78 0.59 0.67 376\n 18 0.41 0.38 0.39 310\n 19 0.32 0.35 0.33 251\n\navg / total 0.56 0.55 0.55 7532\n\n"
],
[
"# Random Forest\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.datasets import fetch_20newsgroups\n\nnewsgroups_train = fetch_20newsgroups(subset='train')\nnewsgroups_test = fetch_20newsgroups(subset='test')\nX_train = newsgroups_train.data\nX_test = newsgroups_test.data\ny_train = newsgroups_train.target\ny_test = newsgroups_test.target\n\ntext_clf = Pipeline([('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', RandomForestClassifier(n_estimators=100)),\n ])\n\ntext_clf.fit(X_train, y_train)\n\n\npredicted = text_clf.predict(X_test)\n\nprint(metrics.classification_report(y_test, predicted))",
" precision recall f1-score support\n\n 0 0.72 0.65 0.68 319\n 1 0.56 0.69 0.62 389\n 2 0.64 0.78 0.70 394\n 3 0.63 0.64 0.64 392\n 4 0.77 0.74 0.76 385\n 5 0.76 0.66 0.71 395\n 6 0.75 0.92 0.83 390\n 7 0.80 0.80 0.80 396\n 8 0.89 0.90 0.89 398\n 9 0.78 0.91 0.84 397\n 10 0.91 0.92 0.92 399\n 11 0.88 0.92 0.90 396\n 12 0.68 0.48 0.57 393\n 13 0.84 0.66 0.74 396\n 14 0.82 0.90 0.86 394\n 15 0.68 0.93 0.78 398\n 16 0.68 0.87 0.76 364\n 17 0.95 0.82 0.88 376\n 18 0.89 0.48 0.62 310\n 19 0.77 0.30 0.43 251\n\navg / total 0.77 0.76 0.75 7532\n\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a82a933a6e9fba79d7ab460aa0afc7c81634422
| 1,494 |
ipynb
|
Jupyter Notebook
|
assets/data/Temperature_Data/converting.ipynb
|
rmoesw01/planeteers
|
a15f71d42ab0ea3718e56002bf4b968e66190065
|
[
"CC-BY-3.0"
] | null | null | null |
assets/data/Temperature_Data/converting.ipynb
|
rmoesw01/planeteers
|
a15f71d42ab0ea3718e56002bf4b968e66190065
|
[
"CC-BY-3.0"
] | null | null | null |
assets/data/Temperature_Data/converting.ipynb
|
rmoesw01/planeteers
|
a15f71d42ab0ea3718e56002bf4b968e66190065
|
[
"CC-BY-3.0"
] | 1 |
2020-10-31T17:31:28.000Z
|
2020-10-31T17:31:28.000Z
| 27.163636 | 125 | 0.572959 |
[
[
[
"from DATAcsv_TO_json import converter",
"_____no_output_____"
],
[
"# converter(\"static/data/dfFilt.csv\",\"static/data/snow_pred_df.csv\",\"static/data/snow.json\",\"Year\",\"SNOW\")\n# converter(\"static/data/dfFilt.csv\",\"static/data/EMXP_pred_df.csv\",\"static/data/EMXP.json\",\"Year\",\"EMXP\")\n# converter(\"static/data/dfFilt.csv\",\"static/data/PRCP_pred_df.csv\",\"static/data/PRCP.json\",\"Year\",\"PRCP\")\nconverter(\"static/data/dfFilt.csv\",\"static/data/TMAX_pred_df.csv\",\"static/data/TMAX.json\",\"Year\",\"TMAX\")\n# converter(\"static/data/dfFilt.csv\",\"static/data/TMIN_pred_df.csv\",\"static/data/TMIN.json\",\"Year\",\"TMIN\")",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
4a82a9b52727ccdd1d1e204614ad671e6ae134af
| 7,602 |
ipynb
|
Jupyter Notebook
|
src/.ipynb_checkpoints/hjb_mdp_05-checkpoint.ipynb
|
songqsh/foo1
|
536bf44cc4fb43a3ac0f2a64695f619ac7526651
|
[
"MIT"
] | 1 |
2020-03-14T03:04:24.000Z
|
2020-03-14T03:04:24.000Z
|
src/.ipynb_checkpoints/hjb_mdp_05-checkpoint.ipynb
|
songqsh/foo1
|
536bf44cc4fb43a3ac0f2a64695f619ac7526651
|
[
"MIT"
] | 1 |
2019-07-01T20:35:39.000Z
|
2019-07-04T22:07:50.000Z
|
src/.ipynb_checkpoints/hjb_mdp_05-checkpoint.ipynb
|
songqsh/foo1
|
536bf44cc4fb43a3ac0f2a64695f619ac7526651
|
[
"MIT"
] | 2 |
2019-08-25T00:50:05.000Z
|
2020-02-25T20:06:32.000Z
| 31.675 | 105 | 0.425941 |
[
[
[
"# MDP from multidimensional HJB\n\nsee [pdf](https://github.com/songqsh/foo1/blob/master/doc/191206HJB.pdf) for its math derivation\n\nsee souce code at \n- [py](hjb_mdp_v05_3.py) for tabular approach and \n- [py](hjb_mdp_nn_v05.py) for deep learning approach",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport time\n#import ipdb\n\nimport itertools\ndef deep_iter(*shape):\n iters = (range(i) for i in shape)\n return itertools.product(*iters)\n \n \n\nclass Pde:\n def __init__(\n self,\n dim=1,\n lam=0.0,\n drift = lambda s,a: a,\n run_cost = lambda s,a: len(s) + np.sum(s**2)*2.+ np.sum(a**2)/2.0,\n term_cost = lambda s: -np.sum(s**2),\n limit_s = 1.0, #l-infinity limit for state\n limit_a = 2.0, #l-infinity limit for action\n verbose=True\n ):\n self.dim = dim\n self.lam = lam\n self.drift = drift\n self.run_cost = run_cost\n self.term_cost = term_cost \n self.limit_s = limit_s\n self.limit_a = limit_a\n\n if verbose:\n print(str(dim) + '-dim HJB')\n \n #domain is a unit hyper cube \n def is_interior(self, s):\n return all(0<s<1)\n \n #cfd2mdp\n def mdp(self, n_mesh_s = 8, n_mesh_a = 16, method='cfd'):\n out = {}\n \n ####domain of mdp\n h_s = self.limit_s/n_mesh_s #mesh size in state\n h_a = self.limit_a/n_mesh_a #mesh size in action\n v_shape = tuple([n_mesh_s + 1]*self.dim)\n a_shape = tuple([n_mesh_a + 1]*self.dim)\n \n def is_interior(*ix_s):\n return all([0<x<n_mesh_s for x in ix_s])\n \n out.update({\n 'v_shape': v_shape,\n 'a_shape': a_shape,\n 'is_interior': is_interior\n })\n ####domain\n \n # convert index(tuple) to state\n def i2s(*ix): \n return np.array([x * h_s for x in ix]) \n out['i2s'] = i2s\n #convert index to action\n def i2a(*ix):\n return np.array([x * h_a for x in ix])\n #out['i2a'] = i2a\n\n\n \n ########running and terminal costs and discount rate\n def run_cost(ix_s,ix_a):\n return self.run_cost(i2s(*ix_s), i2a(*ix_a))*h_s**2/self.dim\n \n def term_cost(ix_s):\n return self.term_cost(i2s(*ix_s))\n \n rate = self.dim/(self.dim+self.lam*(h_s**2))\n out.update({\n 'run_cost': run_cost,\n 'term_cost': term_cost,\n 'rate': rate\n })\n #########\n \n #####transition\n #return:\n # a list of nbd indices\n # a list of prob\n def step(ix_s, ix_a):\n ix_next_s_up = (np.array(ix_s)+np.eye(self.dim)).astype(int).tolist()\n ix_next_s_dn = (np.array(ix_s)-np.eye(self.dim)).astype(int).tolist()\n ix_next_s = [tuple(ix) for ix in ix_next_s_up+ix_next_s_dn]\n \n pr=[]\n if method == 'cfd':\n b = self.drift(i2s(*ix_s), i2a(*ix_a))\n pr_up = ((1+2.*h_s*b)/self.dim/2.0).tolist()\n pr_dn = ((1-2.*h_s*b)/self.dim/2.0).tolist()\n pr = pr_up+pr_dn\n \n return ix_next_s, pr\n out.update({'step': step})\n \n return out\n \n\n\ndef value_iter(v_shape, a_shape, i2s, is_interior, \n run_cost, term_cost, rate, step):\n dim = len(v_shape)\n v0 = np.zeros(v_shape)\n \n # boundary value\n for ix_s in deep_iter(*v_shape):\n if not is_interior(*ix_s):\n v0[ix_s]=term_cost(ix_s)\n v1 = v0.copy()\n\n\n\n for iter_n in range(100):\n for ix_s0 in deep_iter(*v_shape):\n if is_interior(*ix_s0):\n q1 = []\n for ix_a in deep_iter(*a_shape):\n rhs = run_cost(ix_s0, ix_a)\n ix_s1, pr = step(ix_s0, ix_a); \n for k in range(2*dim):\n rhs += v0[ix_s1[k]]*pr[k]\n q1 += [rhs,]\n v1[ix_s0] = rate*min(q1); \n \n\n if np.max(np.abs(v0 - v1)) < 1e-3:\n v0 = v1.copy()\n break\n v0 = v1.copy(); \n \n #iter_n += 1\n return iter_n, v0",
"_____no_output_____"
],
[
"p = Pde(dim=2); m = p.mdp(n_mesh_s=16)\nstart_time = time.time()\nn, v = value_iter(**m)\nend_time = time.time()\nprint('>>>time elapsed is: ' + str(end_time - start_time))\n\ndef true_soln(s):\n return -np.sum(s**2)\nerr = []\nfor ix_s in deep_iter(*m['v_shape']):\n err0 = np.abs(v[ix_s] - true_soln(m['i2s'](*ix_s)))\n err += [err0, ]\nprint('>>> sup norm error is: ' + str(max(err)))\nprint('>>> number of iterations is: ' + str(n))",
"2-dim HJB\n>>>time elapsed is: 346.68731594085693\n>>> sup norm error is: 0.3167323936691848\n>>> number of iterations is: 99\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
]
] |
4a82b0b8a9405c7640b6c6fe0d8475e3920348a9
| 6,200 |
ipynb
|
Jupyter Notebook
|
Week5_C1M5L3_Code_Reuse_V2.ipynb
|
osgalvanm/GoogleCrashCoursePython
|
40f1881749bf4ece29e4cc13becfb774b3384f81
|
[
"Apache-2.0"
] | null | null | null |
Week5_C1M5L3_Code_Reuse_V2.ipynb
|
osgalvanm/GoogleCrashCoursePython
|
40f1881749bf4ece29e4cc13becfb774b3384f81
|
[
"Apache-2.0"
] | null | null | null |
Week5_C1M5L3_Code_Reuse_V2.ipynb
|
osgalvanm/GoogleCrashCoursePython
|
40f1881749bf4ece29e4cc13becfb774b3384f81
|
[
"Apache-2.0"
] | null | null | null | 30.693069 | 383 | 0.559677 |
[
[
[
"# Code Reuse",
"_____no_output_____"
],
[
"Let’s put what we learned about code reuse all together. \n<br><br>\nFirst, let’s look back at **inheritance**. Run the following cell that defines a generic `Animal` class. ",
"_____no_output_____"
]
],
[
[
"class Animal:\n name = \"\"\n category = \"\"\n \n def __init__(self, name):\n self.name = name\n \n def set_category(self, category):\n self.category = category",
"_____no_output_____"
]
],
[
[
"What we have is not enough to do much -- yet. That’s where you come in. \n<br><br>\nIn the next cell, define a `Turtle` class that inherits from the `Animal` class. Then go ahead and set its category. For instance, a turtle is generally considered a reptile. Although modern cladistics call this categorization into question, for purposes of this exercise we will say turtles are reptiles! ",
"_____no_output_____"
]
],
[
[
"class Turtle(Animal):\n category = \"reptile\"",
"_____no_output_____"
]
],
[
[
"Run the following cell to check whether you correctly defined your `Turtle` class and set its category to reptile.",
"_____no_output_____"
]
],
[
[
"print(Turtle.category)",
"reptile\n"
]
],
[
[
"Was the output of the above cell reptile? If not, go back and edit your `Turtle` class making sure that it inherits from the `Animal` class and its category is properly set to reptile. Be sure to re-run that cell once you've finished your edits. Did you get it? If so, great!",
"_____no_output_____"
],
[
"Next, let’s practice **composition** a little bit. This one will require a second type of `Animal` that is in the same category as the first. For example, since you already created a `Turtle` class, go ahead and create a `Snake` class. Don’t forget that it also inherits from the `Animal` class and that its category should be set to reptile.",
"_____no_output_____"
]
],
[
[
"class Snake(Animal):\n category = \"reptile\"",
"_____no_output_____"
]
],
[
[
"Now, let’s say we have a large variety of `Animal`s (such as turtles and snakes) in a Zoo. Below we have the `Zoo` class. We’re going to use it to organize our various `Animal`s. Remember, inheritance says a Turtle is an `Animal`, but a `Zoo` is not an `Animal` and an `Animal` is not a `Zoo` -- though they are related to one another. ",
"_____no_output_____"
],
[
"Fill in the blanks of the `Zoo` class below so that you can use **zoo.add_animal( )** to add instances of the `Animal` subclasses you created above. Once you’ve added them all, you should be able to use **zoo.total_of_category( )** to tell you exactly how many individual `Animal` types the `Zoo` has for each category! Be sure to run the cell once you've finished your edits.",
"_____no_output_____"
]
],
[
[
"class Zoo:\n def __init__(self):\n self.current_animals = {}\n \n def add_animal(self, animal):\n self.current_animals[animal.name] = animal.category\n \n def total_of_category(self, category):\n result = 0\n for animal in self.current_animals.values():\n if animal == category:\n result += 1\n return result\n\nzoo = Zoo()",
"_____no_output_____"
]
],
[
[
"Run the following cell to check whether you properly filled in the blanks of your `Zoo` class.",
"_____no_output_____"
]
],
[
[
"turtle = Turtle(\"Turtle\") #create an instance of the Turtle class\nsnake = Snake(\"Snake\") #create an instance of the Snake class\n\nzoo.add_animal(turtle)\nzoo.add_animal(snake)\n\nprint(zoo.total_of_category(\"reptile\")) #how many zoo animal types in the reptile category",
"2\n"
]
],
[
[
"Was the output of the above cell 2? If not, go back and edit the `Zoo` class making sure to fill in the blanks with the appropriate attributes. Be sure to re-run that cell once you've finished your edits. \n<br>\nDid you get it? If so, perfect! You have successfully defined your `Turtle` and `Snake` subclasses as well as your `Zoo` class. You are all done with this notebook. Great work!",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a82c2ee900dade06230bfb43e027c7e80b4477e
| 28,274 |
ipynb
|
Jupyter Notebook
|
nbs/_alum_lm_finetuning_fastai_blurr_nli.ipynb
|
aikindergarten/vat
|
d47eb24e75c0ced25a262b3593708aeb989f11b3
|
[
"Apache-2.0"
] | 1 |
2021-04-04T20:58:16.000Z
|
2021-04-04T20:58:16.000Z
|
nbs/_alum_lm_finetuning_fastai_blurr_nli.ipynb
|
aikindergarten/vat
|
d47eb24e75c0ced25a262b3593708aeb989f11b3
|
[
"Apache-2.0"
] | 5 |
2021-04-25T12:23:07.000Z
|
2021-05-20T17:15:18.000Z
|
nbs/_alum_lm_finetuning_fastai_blurr_nli.ipynb
|
aikindergarten/vat
|
d47eb24e75c0ced25a262b3593708aeb989f11b3
|
[
"Apache-2.0"
] | null | null | null | 31.555804 | 2,292 | 0.488046 |
[
[
[
"!nvidia-smi",
"Mon Apr 19 17:59:32 2021 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Quadro RTX 5000 Off | 00000000:1C:00.0 Off | Off |\n| 35% 29C P8 5W / 230W | 0MiB / 16125MiB | 0% Default |\n| | | N/A |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n"
],
[
"import sys\nif 'google.colab' in sys.modules:\n !pip install -Uqq fastcore onnx onnxruntime sentencepiece seqeval rouge-score\n !pip install -Uqq --no-deps fastai ohmeow-blurr\n !pip install -Uqq transformers datasets wandb ",
"_____no_output_____"
],
[
"from fastai.text.all import *\nfrom fastai.callback.wandb import *",
"_____no_output_____"
],
[
"from transformers import *\nfrom datasets import load_dataset, concatenate_datasets\n\nfrom blurr.data.all import *\nfrom blurr.modeling.all import *",
"/opt/conda/lib/python3.6/site-packages/torchaudio/backend/utils.py:54: UserWarning: \"sox\" backend is being deprecated. The default backend will be changed to \"sox_io\" backend in 0.8.0 and \"sox\" backend will be removed in 0.9.0. Please migrate to \"sox_io\" backend. Please refer to https://github.com/pytorch/audio/issues/903 for the detail.\n '\"sox\" backend is being deprecated. '\n[nltk_data] Downloading package wordnet to /home/nltk_data...\n[nltk_data] Package wordnet is already up-to-date!\n"
]
],
[
[
"## Data preprocessing",
"_____no_output_____"
]
],
[
[
"ds_name = 'snli'",
"_____no_output_____"
],
[
"train_ds = load_dataset(ds_name, split='train')\nvalid_ds = load_dataset(ds_name, split='validation')",
"Reusing dataset snli (/home/.cache/huggingface/datasets/snli/plain_text/1.0.0/bb1102591c6230bd78813e229d5dd4c7fbf4fc478cec28f298761eb69e5b537c)\nReusing dataset snli (/home/.cache/huggingface/datasets/snli/plain_text/1.0.0/bb1102591c6230bd78813e229d5dd4c7fbf4fc478cec28f298761eb69e5b537c)\n"
],
[
"len(train_ds), len(valid_ds)",
"_____no_output_____"
],
[
"train_ds.column_names",
"_____no_output_____"
],
[
"train_ds[2]",
"_____no_output_____"
],
[
"from collections import Counter",
"_____no_output_____"
],
[
"Counter(train_ds['label'])",
"_____no_output_____"
],
[
"train_ds = train_ds.filter(lambda sample: sample['label'] in [0,1,2])\nvalid_ds = valid_ds.filter(lambda sample: sample['label'] in [0,1,2])",
"Loading cached processed dataset at /home/.cache/huggingface/datasets/snli/plain_text/1.0.0/bb1102591c6230bd78813e229d5dd4c7fbf4fc478cec28f298761eb69e5b537c/cache-18cfe39918caca0a.arrow\nLoading cached processed dataset at /home/.cache/huggingface/datasets/snli/plain_text/1.0.0/bb1102591c6230bd78813e229d5dd4c7fbf4fc478cec28f298761eb69e5b537c/cache-45271a826cbdfaba.arrow\n"
]
],
[
[
"## Setup",
"_____no_output_____"
]
],
[
[
"model_name = 'distilbert-base-uncased'\n# data\nmax_len = 512\nbs = 32\nval_bs = bs*2\n# training\nlr = 2e-5",
"_____no_output_____"
]
],
[
[
"## Tracking",
"_____no_output_____"
]
],
[
[
"import wandb\n\nWANDB_NAME = f'{ds_name}-{model_name}-alum'\nGROUP = f'{ds_name}-{model_name}-alum-{lr:.0e}'\nNOTES = f'Simple finetuning {model_name} with RAdam lr={lr:.0e}'\nCONFIG = {}\nTAGS =[model_name,ds_name,'radam','alum']",
"_____no_output_____"
],
[
"wandb.init(reinit=True, project=\"vat\", entity=\"fastai_community\",\n name=WANDB_NAME, group=GROUP, notes=NOTES, tags=TAGS, config=CONFIG);",
"\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mfastai_community\u001b[0m (use `wandb login --relogin` to force relogin)\n"
]
],
[
[
"## Training",
"_____no_output_____"
]
],
[
[
"def _to_device(e, device):\n if hasattr(e, 'to'): return e.to(device)\n elif isinstance(e, dict):\n for _, v in e.items():\n if hasattr(v, 'to'): v.to(device)\n return {k:(v.to(device) if hasattr(v, 'to') else v) for k, v in e.items()}",
"_____no_output_____"
],
[
"@patch\ndef one_batch(self:Learner, i, b):\n self.iter = i\n b_on_device = tuple(_to_device(e, self.dls.device) for e in b) if self.dls.device is not None else b\n self._split(b_on_device)\n self._with_events(self._do_one_batch, 'batch', CancelBatchException)",
"_____no_output_____"
],
[
"hf_arch, hf_config, hf_tokenizer, hf_model = BLURR_MODEL_HELPER.get_hf_objects(model_name, model_cls=AutoModelForSequenceClassification, tokenizer_cls=AutoTokenizer, \n config_kwargs={'num_labels':3}, tokenizer_kwargs={'max_len':512})",
"_____no_output_____"
],
[
"def get_x(sample):\n return sample['premise'], sample['hypothesis']",
"_____no_output_____"
],
[
"ds = concatenate_datasets([train_ds, valid_ds])\ntrain_idx = list(range(len(train_ds)))\nvalid_idx = list(range(len(train_ds), len(train_ds)+len(valid_ds)))",
"_____no_output_____"
],
[
"# use number of chars as proxy to number of tokens for simplicity\nlens = ds.map(lambda s: {'len': len(s['premise'])+len(s['hypothesis'])}, remove_columns=ds.column_names, num_proc=4)",
"Loading cached processed dataset at /home/.cache/huggingface/datasets/snli/plain_text/1.0.0/bb1102591c6230bd78813e229d5dd4c7fbf4fc478cec28f298761eb69e5b537c/cache-5ca5f39f0f347987.arrow\nLoading cached processed dataset at /home/.cache/huggingface/datasets/snli/plain_text/1.0.0/bb1102591c6230bd78813e229d5dd4c7fbf4fc478cec28f298761eb69e5b537c/cache-b303872196dbefa3.arrow\nLoading cached processed dataset at /home/.cache/huggingface/datasets/snli/plain_text/1.0.0/bb1102591c6230bd78813e229d5dd4c7fbf4fc478cec28f298761eb69e5b537c/cache-22c40595756fbadc.arrow\nLoading cached processed dataset at /home/.cache/huggingface/datasets/snli/plain_text/1.0.0/bb1102591c6230bd78813e229d5dd4c7fbf4fc478cec28f298761eb69e5b537c/cache-f8c72ce8c6f484ef.arrow\n"
],
[
"train_lens = lens.select(train_idx)['len']\nvalid_lens = lens.select(valid_idx)['len']",
"_____no_output_____"
],
[
"blocks = (HF_TextBlock(hf_arch, hf_config, hf_tokenizer, hf_model),\n CategoryBlock(vocab={0:'entailment', 1:'neutral', 2:'contradiction'}))\ndblock = DataBlock(blocks=blocks,\n get_x = get_x,\n get_y=ItemGetter('label'),\n splitter=IndexSplitter(list(range(len(train_ds), len(train_ds)+len(valid_ds)))))\n# dblock.summary(train_ds)",
"_____no_output_____"
],
[
"%%time\ndls = dblock.dataloaders(ds, bs=bs, val_bs=val_bs, dl_kwargs=[{'res':train_lens}, {'val_res':valid_lens}], num_workers=4)",
"CPU times: user 1min 12s, sys: 1.68 s, total: 1min 14s\nWall time: 1min 14s\n"
],
[
"# b = dls.one_batch()",
"_____no_output_____"
],
[
"model = HF_BaseModelWrapper(hf_model)\nlearn = Learner(dls,\n model,\n opt_func=RAdam,\n metrics=[accuracy],\n cbs=[HF_BaseModelCallback],\n splitter=hf_splitter).to_fp16()\n\n# learn.blurr_summary()",
"_____no_output_____"
]
],
[
[
"### ALUM finetuning",
"_____no_output_____"
]
],
[
[
"# !pip install git+git://github.com/aikindergarten/vat.git --no-deps -q",
"_____no_output_____"
],
[
"from vat.core import ALUMCallback",
"_____no_output_____"
],
[
"learn.add_cb(ALUMCallback(learn.model.hf_model.base_model.embeddings, start_epoch=2, alpha=0.5));",
"_____no_output_____"
],
[
"learn.fit_one_cycle(5, lr, cbs=WandbCallback(log_preds=False, log_model=False))",
"Could not gather input dimensions\n"
],
[
"learn.validate()",
"_____no_output_____"
],
[
"test_ds = load_dataset('snli', split='test')\ntest_ds[0]",
"Reusing dataset snli (/home/.cache/huggingface/datasets/snli/plain_text/1.0.0/bb1102591c6230bd78813e229d5dd4c7fbf4fc478cec28f298761eb69e5b537c)\n"
],
[
"test_ds = test_ds.filter(lambda s: s['label'] in [0,1,2])\ntest_dl = dls.test_dl(test_ds, with_labels=True)\nlearn.validate(dl=test_dl)",
"Loading cached processed dataset at /home/.cache/huggingface/datasets/snli/plain_text/1.0.0/bb1102591c6230bd78813e229d5dd4c7fbf4fc478cec28f298761eb69e5b537c/cache-ea8100bd89f36a77.arrow\n"
],
[
"wandb.finish()",
"_____no_output_____"
]
],
[
[
"## Validation on adversarial data",
"_____no_output_____"
]
],
[
[
"adv_ds = load_dataset('anli', split='test_r1')\nadv_ds[0]",
"Reusing dataset anli (/home/.cache/huggingface/datasets/anli/plain_text/0.1.0/43fa2c99c10bf8478f1fa0860f7b122c6b277c4c41306255b7641257cf4e3299)\n"
],
[
"test_dl = dls.test_dl(adv_ds, with_labels=True)",
"_____no_output_____"
],
[
"learn.validate(dl=test_dl)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a82d0f8b94f73117bb890909628b3f53870d45f
| 273,580 |
ipynb
|
Jupyter Notebook
|
dhalsim/demand_patterns_handler.ipynb
|
Daveonwave/msc_thesis
|
e06e4d08b7de881350bda18be0c35d6eacebcd98
|
[
"MIT"
] | 2 |
2021-04-28T05:33:14.000Z
|
2021-05-13T09:14:17.000Z
|
dhalsim/demand_patterns_handler.ipynb
|
Daveonwave/msc_thesis
|
e06e4d08b7de881350bda18be0c35d6eacebcd98
|
[
"MIT"
] | 1 |
2021-05-13T10:20:01.000Z
|
2021-05-13T10:20:01.000Z
|
dhalsim/demand_patterns_handler.ipynb
|
Daveonwave/msc_thesis
|
e06e4d08b7de881350bda18be0c35d6eacebcd98
|
[
"MIT"
] | 2 |
2021-05-12T11:01:37.000Z
|
2021-06-25T04:10:11.000Z
| 84.673476 | 159,704 | 0.714127 |
[
[
[
"import pandas as pd\nimport numpy as np\nfrom pathlib import Path",
"_____no_output_____"
],
[
"dir_path = Path().resolve().parent / 'demand_patterns'\n\nlow_patterns = \"demand_patterns_train_low.csv\"\nfullrange_patterns = \"demand_patterns_train_full_range.csv\"",
"_____no_output_____"
],
[
"combined_pattern = 'demand_patterns_train_combined.csv'\ncomb = pd.read_csv(dir_path / combined_pattern)\ncomb",
"_____no_output_____"
],
[
"# FARE PER CREARE MIXED DEMAND_PATTERNS PER TRAINING\nlow_demand = pd.read_csv(dir_path / low_patterns)\nfullrange_demand = pd.read_csv(dir_path / fullrange_patterns)\n\nnew = pd.concat([low_demand, fullrange_demand], axis=1, ignore_index=True)\nnew",
"_____no_output_____"
],
[
"output_file = dir_path / 'demand_patterns_train_combined.csv'\n\nnew.to_csv(output_file, index=False)",
"_____no_output_____"
],
[
"new = pd.read_csv(output_file)\nnew",
"_____no_output_____"
],
[
"import pandas as pd\nimport numpy as np\nfrom pathlib import Path",
"_____no_output_____"
],
[
"dir_path = Path().resolve().parent / 'demand_patterns'",
"_____no_output_____"
],
[
"test_low_patterns = 'demand_patterns_test_low.csv'\ntest_full_range_patterns = 'demand_patterns_test_full_range.csv'\ntest_high_patterns = \"demand_patterns_test.csv\"\ntest_middle_patterns = \"demand_patterns_test_middle.csv\"\n\ndf_low = pd.read_csv(dir_path / test_low_patterns)\ndf_low",
"_____no_output_____"
],
[
"sum_of_columns = [df_low.loc[:, index].sum() for index in df_low.columns.values]\nmax_column = np.argmax(sum_of_columns)\nmin_column = np.argmin(sum_of_columns)",
"_____no_output_____"
],
[
"print(\"min: \" + str(min_column) + ' --> ' + str(df_low[str(min_column)].sum()))\nprint(\"max: \" + str(max_column) + ' --> ' + str(df_low[str(max_column)].sum()))",
"min: 99 --> 9.934674029334456\nmax: 6 --> 9.798683828387464\n"
],
[
"df_low['4'].sum()",
"_____no_output_____"
],
[
"df_full_range = pd.read_csv(dir_path / test_full_range_patterns)\ndf_full_range",
"_____no_output_____"
],
[
"sum_of_columns = [df_full_range.loc[:, index].sum() for index in df_full_range.columns.values]\nmax_column = np.argmax(sum_of_columns)\nmin_column = np.argmin(sum_of_columns)",
"_____no_output_____"
],
[
"print(\"min: \" + str(min_column) + ' --> ' + str(df_full_range[str(min_column)].sum()))\nprint(\"max: \" + str(max_column) + ' --> ' + str(df_full_range[str(max_column)].sum()))",
"min: 132 --> 53.12540628944592\nmax: 131 --> 57.12255551930929\n"
],
[
"df_high = pd.read_csv(dir_path / test_high_patterns)\ndf_high",
"_____no_output_____"
],
[
"sum_of_columns = [df_high.loc[:, index].sum() for index in df_high.columns.values]\nmax_column = np.argmax(sum_of_columns)\nmin_column = np.argmin(sum_of_columns)",
"_____no_output_____"
],
[
"print(\"min: \" + str(min_column) + ' --> ' + str(df_high[str(min_column)].sum()))\nprint(\"max: \" + str(max_column) + ' --> ' + str(df_high[str(max_column)].sum()))",
"min: 99 --> 61.30124968764071\nmax: 6 --> 66.41244618255905\n"
],
[
"df_middle = pd.read_csv(dir_path / test_middle_patterns)\ndf_middle",
"_____no_output_____"
],
[
"sum_of_columns = [df_middle.loc[:, index].sum() for index in df_middle.columns.values]\nmax_column = np.argmax(sum_of_columns)\nmin_column = np.argmin(sum_of_columns)",
"_____no_output_____"
],
[
"print(\"min: \" + str(min_column) + ' --> ' + str(df_middle[str(min_column)].sum()))\nprint(\"max: \" + str(max_column) + ' --> ' + str(df_middle[str(max_column)].sum()))",
"min: 109 --> 28.70954053066468\nmax: 54 --> 29.953812441093937\n"
],
[
"# Creation of appropriate test dataframe (we take the lower demand patten, a central one and the higher one)\ndf_new = pd.DataFrame(df_low['4'].values)\ndf_new.insert(1, '1', df_middle['54'])\ndf_new.insert(2, '2', df_full_range['132'])\ndf_new.insert(3, '3', df_high['6'])\n\ndf_new",
"_____no_output_____"
],
[
"output_file = dir_path / 'demand_patterns_test_mixed.csv'\n\ndf_new.to_csv(output_file, index=False)",
"_____no_output_____"
],
[
"df = pd.read_csv(output_file)\ndf",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\n\nfig, ax = plt.subplots(figsize=(15,7))\n\nax.plot(df['0'].values)\nax.plot(df['1'].values)\nax.plot(df['2'].values)\nax.plot(df['3'].values)\n\nax.set_title(\"Test demand patterns trend\")\nax.legend(('Low', 'Midium', 'Semi-high', 'High' ))",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a82d3753b1183389a036637555bfed659338c60
| 3,422 |
ipynb
|
Jupyter Notebook
|
julia/armstrong-numbers/armstrong-numbers.ipynb
|
Rigante-pl/Exercism-Julia
|
002bdb61b8aaeea5d70579a3882870d0175cd36d
|
[
"MIT"
] | null | null | null |
julia/armstrong-numbers/armstrong-numbers.ipynb
|
Rigante-pl/Exercism-Julia
|
002bdb61b8aaeea5d70579a3882870d0175cd36d
|
[
"MIT"
] | null | null | null |
julia/armstrong-numbers/armstrong-numbers.ipynb
|
Rigante-pl/Exercism-Julia
|
002bdb61b8aaeea5d70579a3882870d0175cd36d
|
[
"MIT"
] | null | null | null | 49.594203 | 1,040 | 0.505845 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a82ed72617cd5917bc9420fe91f3ca5e66ad9bd
| 285,934 |
ipynb
|
Jupyter Notebook
|
Computational Physics Practicals/Practical 7.ipynb
|
juancastaneda1/Course-Work
|
e79a692d6a2e9d711fd9aff3864c94a079c5bdb2
|
[
"MIT"
] | null | null | null |
Computational Physics Practicals/Practical 7.ipynb
|
juancastaneda1/Course-Work
|
e79a692d6a2e9d711fd9aff3864c94a079c5bdb2
|
[
"MIT"
] | null | null | null |
Computational Physics Practicals/Practical 7.ipynb
|
juancastaneda1/Course-Work
|
e79a692d6a2e9d711fd9aff3864c94a079c5bdb2
|
[
"MIT"
] | null | null | null | 271.027488 | 66,936 | 0.893164 |
[
[
[
"# Practical 7. Assignment 3.\n\nDue date is March 7, before the class. You can work with a partner\n\nPartner: Mohamed Salama, utorid: salamam5",
"_____no_output_____"
],
[
"## Problem 1. Your first MD simulation.\n\nRead through section 6 and example 6.1-6.2 of the lecture. Run 3 simulations of fully extended polyglycine `data/polyGLY.pdb` for 1 nanosecond in vacuum (no water) with $T_1=100 K$, $T_2=300 K$, and $T_3=500 K$ and visually compare how extended the final structure is at each temperature. Write down your observations. ",
"_____no_output_____"
]
],
[
[
"from simtk.openmm.app import *\nfrom simtk.openmm import *\nfrom simtk.unit import *\nimport MDAnalysis as md\nimport nglview as ng\nfrom sys import stdout",
"_____no_output_____"
],
[
"\npdb0_file = 'data/polyGLY.pdb'\nfile0 = open(pdb0_file, 'r')\nfor line in file0:\n print(line)",
"REMARK 1 CREATED WITH OPENMM 7.3, 2019-02-21\n\nATOM 1 N GLY A 1 -36.753 -23.815 -2.134 1.00 0.00 N \n\nATOM 2 H GLY A 1 -37.106 -24.760 -2.199 1.00 0.00 H \n\nATOM 3 H2 GLY A 1 -37.083 -23.276 -2.926 1.00 0.00 H \n\nATOM 4 H3 GLY A 1 -37.100 -23.382 -1.289 1.00 0.00 H \n\nATOM 5 CA GLY A 1 -35.304 -23.815 -2.134 1.00 0.00 C \n\nATOM 6 HA2 GLY A 1 -34.929 -24.337 -1.254 1.00 0.00 H \n\nATOM 7 HA3 GLY A 1 -34.936 -24.310 -3.033 1.00 0.00 H \n\nATOM 8 C GLY A 1 -34.781 -22.386 -2.134 1.00 0.00 C \n\nATOM 9 O GLY A 1 -35.271 -21.544 -2.883 1.00 0.00 O \n\nATOM 10 N GLY A 2 -33.783 -22.114 -1.290 1.00 0.00 N \n\nATOM 11 H GLY A 2 -33.353 -22.837 -0.727 1.00 0.00 H \n\nATOM 12 CA GLY A 2 -33.198 -20.792 -1.195 1.00 0.00 C \n\nATOM 13 HA2 GLY A 2 -33.434 -20.193 -2.076 1.00 0.00 H \n\nATOM 14 HA3 GLY A 2 -33.584 -20.276 -0.316 1.00 0.00 H \n\nATOM 15 C GLY A 2 -31.686 -20.897 -1.065 1.00 0.00 C \n\nATOM 16 O GLY A 2 -31.183 -21.711 -0.294 1.00 0.00 O \n\nATOM 17 N GLY A 3 -30.961 -20.069 -1.821 1.00 0.00 N \n\nATOM 18 H GLY A 3 -31.415 -19.390 -2.416 1.00 0.00 H \n\nATOM 19 CA GLY A 3 -29.513 -20.072 -1.788 1.00 0.00 C \n\nATOM 20 HA2 GLY A 3 -29.144 -20.561 -0.885 1.00 0.00 H \n\nATOM 21 HA3 GLY A 3 -29.124 -20.608 -2.654 1.00 0.00 H \n\nATOM 22 C GLY A 3 -28.987 -18.644 -1.826 1.00 0.00 C \n\nATOM 23 O GLY A 3 -29.458 -17.828 -2.616 1.00 0.00 O \n\nATOM 24 N GLY A 4 -28.007 -18.345 -0.970 1.00 0.00 N \n\nATOM 25 H GLY A 4 -27.636 -19.056 -0.355 1.00 0.00 H \n\nATOM 26 CA GLY A 4 -27.422 -17.021 -0.908 1.00 0.00 C \n\nATOM 27 HA2 GLY A 4 -27.831 -16.476 -0.057 1.00 0.00 H \n\nATOM 28 HA3 GLY A 4 -27.640 -16.456 -1.815 1.00 0.00 H \n\nATOM 29 C GLY A 4 -25.913 -17.123 -0.740 1.00 0.00 C \n\nATOM 30 O GLY A 4 -25.430 -17.911 0.071 1.00 0.00 O \n\nATOM 31 N GLY A 5 -25.170 -16.324 -1.508 1.00 0.00 N \n\nATOM 32 H GLY A 5 -25.613 -15.677 -2.144 1.00 0.00 H \n\nATOM 33 CA GLY A 5 -23.722 -16.327 -1.442 1.00 0.00 C \n\nATOM 34 HA2 GLY A 5 -23.316 -16.895 -2.279 1.00 0.00 H \n\nATOM 35 HA3 GLY A 5 -23.375 -16.785 -0.514 1.00 0.00 H \n\nATOM 36 C GLY A 5 -23.192 -14.903 -1.518 1.00 0.00 C \n\nATOM 37 O GLY A 5 -23.643 -14.115 -2.347 1.00 0.00 O \n\nATOM 38 N GLY A 6 -22.232 -14.575 -0.651 1.00 0.00 N \n\nATOM 39 H GLY A 6 -21.877 -15.264 -0.003 1.00 0.00 H \n\nATOM 40 CA GLY A 6 -21.645 -13.250 -0.622 1.00 0.00 C \n\nATOM 41 HA2 GLY A 6 -22.072 -12.675 0.199 1.00 0.00 H \n\nATOM 42 HA3 GLY A 6 -21.840 -12.717 -1.554 1.00 0.00 H \n\nATOM 43 C GLY A 6 -20.140 -13.349 -0.415 1.00 0.00 C \n\nATOM 44 O GLY A 6 -19.678 -14.109 0.433 1.00 0.00 O \n\nATOM 45 N GLY A 7 -19.378 -12.579 -1.195 1.00 0.00 N \n\nATOM 46 H GLY A 7 -19.805 -11.954 -1.864 1.00 0.00 H \n\nATOM 47 CA GLY A 7 -17.932 -12.582 -1.095 1.00 0.00 C \n\nATOM 48 HA2 GLY A 7 -17.508 -13.179 -1.902 1.00 0.00 H \n\nATOM 49 HA3 GLY A 7 -17.608 -13.007 -0.144 1.00 0.00 H \n\nATOM 50 C GLY A 7 -17.397 -11.162 -1.209 1.00 0.00 C \n\nATOM 51 O GLY A 7 -17.827 -10.403 -2.075 1.00 0.00 O \n\nATOM 52 N GLY A 8 -16.456 -10.805 -0.332 1.00 0.00 N \n\nATOM 53 H GLY A 8 -16.118 -11.471 0.348 1.00 0.00 H \n\nATOM 54 CA GLY A 8 -15.867 -9.481 -0.337 1.00 0.00 C \n\nATOM 55 HA2 GLY A 8 -16.311 -8.876 0.453 1.00 0.00 H \n\nATOM 56 HA3 GLY A 8 -16.039 -8.982 -1.291 1.00 0.00 H \n\nATOM 57 C GLY A 8 -14.367 -9.575 -0.092 1.00 0.00 C \n\nATOM 58 O GLY A 8 -13.926 -10.305 0.793 1.00 0.00 O \n\nATOM 59 N GLY A 9 -13.586 -8.834 -0.880 1.00 0.00 N \n\nATOM 60 H GLY A 9 -13.996 -8.232 -1.580 1.00 0.00 H \n\nATOM 61 CA GLY A 9 -12.143 -8.836 -0.747 1.00 0.00 C \n\nATOM 62 HA2 GLY A 9 -11.702 -9.462 -1.522 1.00 0.00 H \n\nATOM 63 HA3 GLY A 9 -11.842 -9.228 0.226 1.00 0.00 H \n\nATOM 64 C GLY A 9 -11.602 -7.421 -0.899 1.00 0.00 C \n\nATOM 65 O GLY A 9 -12.010 -6.693 -1.801 1.00 0.00 O \n\nATOM 66 N GLY A 10 -10.680 -7.035 -0.014 1.00 0.00 N \n\nATOM 67 H GLY A 10 -10.359 -7.677 0.697 1.00 0.00 H \n\nATOM 68 CA GLY A 10 -10.088 -5.713 -0.052 1.00 0.00 C \n\nATOM 69 HA2 GLY A 10 -10.549 -5.080 0.706 1.00 0.00 H \n\nATOM 70 HA3 GLY A 10 -10.237 -5.247 -1.027 1.00 0.00 H \n\nATOM 71 C GLY A 10 -8.595 -5.801 0.230 1.00 0.00 C \n\nATOM 72 O GLY A 10 -8.176 -6.499 1.150 1.00 0.00 O \n\nATOM 73 N GLY A 11 -7.794 -5.089 -0.565 1.00 0.00 N \n\nATOM 74 H GLY A 11 -8.187 -4.512 -1.295 1.00 0.00 H \n\nATOM 75 CA GLY A 11 -6.354 -5.088 -0.400 1.00 0.00 C \n\nATOM 76 HA2 GLY A 11 -6.075 -5.446 0.592 1.00 0.00 H \n\nATOM 77 HA3 GLY A 11 -5.897 -5.742 -1.143 1.00 0.00 H \n\nATOM 78 C GLY A 11 -5.806 -3.681 -0.589 1.00 0.00 C \n\nATOM 79 O GLY A 11 -6.192 -2.984 -1.525 1.00 0.00 O \n\nATOM 80 N GLY A 12 -4.905 -3.265 0.304 1.00 0.00 N \n\nATOM 81 H GLY A 12 -4.602 -3.882 1.044 1.00 0.00 H \n\nATOM 82 CA GLY A 12 -4.309 -1.946 0.233 1.00 0.00 C \n\nATOM 83 HA2 GLY A 12 -4.785 -1.286 0.958 1.00 0.00 H \n\nATOM 84 HA3 GLY A 12 -4.435 -1.515 -0.761 1.00 0.00 H \n\nATOM 85 C GLY A 12 -2.823 -2.026 0.552 1.00 0.00 C \n\nATOM 86 O GLY A 12 -2.426 -2.693 1.505 1.00 0.00 O \n\nATOM 87 N GLY A 13 -2.002 -1.344 -0.250 1.00 0.00 N \n\nATOM 88 H GLY A 13 -2.377 -0.792 -1.009 1.00 0.00 H \n\nATOM 89 CA GLY A 13 -0.566 -1.339 -0.051 1.00 0.00 C \n\nATOM 90 HA2 GLY A 13 -0.093 -2.019 -0.759 1.00 0.00 H \n\nATOM 91 HA3 GLY A 13 -0.312 -1.662 0.959 1.00 0.00 H \n\nATOM 92 C GLY A 13 -0.011 0.060 -0.277 1.00 0.00 C \n\nATOM 93 O GLY A 13 -0.374 0.724 -1.246 1.00 0.00 O \n\nATOM 94 N GLY A 14 0.871 0.505 0.621 1.00 0.00 N \n\nATOM 95 H GLY A 14 1.156 -0.086 1.389 1.00 0.00 H \n\nATOM 96 CA GLY A 14 1.472 1.820 0.517 1.00 0.00 C \n\nATOM 97 HA2 GLY A 14 1.370 2.216 -0.494 1.00 0.00 H \n\nATOM 98 HA3 GLY A 14 0.981 2.506 1.207 1.00 0.00 H \n\nATOM 99 C GLY A 14 2.950 1.749 0.873 1.00 0.00 C \n\nATOM 100 O GLY A 14 3.323 1.116 1.858 1.00 0.00 O \n\nATOM 101 N GLY A 15 3.790 2.401 0.066 1.00 0.00 N \n\nATOM 102 H GLY A 15 3.434 2.927 -0.720 1.00 0.00 H \n\nATOM 103 CA GLY A 15 5.221 2.410 0.298 1.00 0.00 C \n\nATOM 104 HA2 GLY A 15 5.708 1.704 -0.374 1.00 0.00 H \n\nATOM 105 HA3 GLY A 15 5.451 2.124 1.325 1.00 0.00 H \n\nATOM 106 C GLY A 15 5.784 3.800 0.035 1.00 0.00 C \n\nATOM 107 O GLY A 15 5.445 4.430 -0.964 1.00 0.00 O \n\nATOM 108 N GLY A 16 6.647 4.275 0.937 1.00 0.00 N \n\nATOM 109 H GLY A 16 6.912 3.711 1.732 1.00 0.00 H \n\nATOM 110 CA GLY A 16 7.252 5.584 0.801 1.00 0.00 C \n\nATOM 111 HA2 GLY A 16 6.747 6.294 1.455 1.00 0.00 H \n\nATOM 112 HA3 GLY A 16 7.173 5.945 -0.225 1.00 0.00 H \n\nATOM 113 C GLY A 16 8.722 5.524 1.192 1.00 0.00 C \n\nATOM 114 O GLY A 16 9.071 4.925 2.208 1.00 0.00 O \n\nATOM 115 N GLY A 17 9.582 6.146 0.383 1.00 0.00 N \n\nATOM 116 H GLY A 17 9.245 6.644 -0.429 1.00 0.00 H \n\nATOM 117 CA GLY A 17 11.007 6.161 0.647 1.00 0.00 C \n\nATOM 118 HA2 GLY A 17 11.508 5.431 0.011 1.00 0.00 H \n\nATOM 119 HA3 GLY A 17 11.213 5.909 1.688 1.00 0.00 H \n\nATOM 120 C GLY A 17 11.580 7.540 0.349 1.00 0.00 C \n\nATOM 121 O GLY A 17 11.265 8.135 -0.680 1.00 0.00 O \n\nATOM 122 N GLY A 18 12.422 8.045 1.252 1.00 0.00 N \n\nATOM 123 H GLY A 18 12.668 7.509 2.072 1.00 0.00 H \n\nATOM 124 CA GLY A 18 13.034 9.348 1.084 1.00 0.00 C \n\nATOM 125 HA2 GLY A 18 12.980 9.673 0.044 1.00 0.00 H \n\nATOM 126 HA3 GLY A 18 12.516 10.082 1.701 1.00 0.00 H \n\nATOM 127 C GLY A 18 14.494 9.299 1.511 1.00 0.00 C \n\nATOM 128 O GLY A 18 14.818 8.736 2.555 1.00 0.00 O \n\nATOM 129 N GLY A 19 15.374 9.891 0.701 1.00 0.00 N \n\nATOM 130 H GLY A 19 15.057 10.361 -0.135 1.00 0.00 H \n\nATOM 131 CA GLY A 19 16.793 9.913 0.996 1.00 0.00 C \n\nATOM 132 HA2 GLY A 19 16.975 9.698 2.050 1.00 0.00 H \n\nATOM 133 HA3 GLY A 19 17.306 9.161 0.397 1.00 0.00 H \n\nATOM 134 C GLY A 19 17.375 11.279 0.663 1.00 0.00 C \n\nATOM 135 O GLY A 19 17.085 11.838 -0.393 1.00 0.00 O \n\nATOM 136 N GLY A 20 18.198 11.815 1.567 1.00 0.00 N \n\nATOM 137 H GLY A 20 18.424 11.308 2.411 1.00 0.00 H \n\nATOM 138 CA GLY A 20 18.817 13.110 1.367 1.00 0.00 C \n\nATOM 139 HA2 GLY A 20 18.286 13.866 1.945 1.00 0.00 H \n\nATOM 140 HA3 GLY A 20 18.788 13.397 0.315 1.00 0.00 H \n\nATOM 141 C GLY A 20 20.266 13.074 1.829 1.00 0.00 C \n\nATOM 142 O GLY A 20 20.565 12.548 2.899 1.00 0.00 O \n\nATOM 143 N GLY A 21 21.166 13.636 1.019 1.00 0.00 N \n\nATOM 144 H GLY A 21 20.870 14.076 0.159 1.00 0.00 H \n\nATOM 145 CA GLY A 21 22.577 13.666 1.346 1.00 0.00 C \n\nATOM 146 HA2 GLY A 21 22.734 13.488 2.411 1.00 0.00 H \n\nATOM 147 HA3 GLY A 21 23.102 12.892 0.786 1.00 0.00 H \n\nATOM 148 C GLY A 21 23.170 15.019 0.978 1.00 0.00 C \n\nATOM 149 O GLY A 21 22.906 15.541 -0.103 1.00 0.00 O \n\nATOM 150 N GLY A 22 23.974 15.585 1.881 1.00 0.00 N \n\nATOM 151 H GLY A 22 24.180 15.107 2.747 1.00 0.00 H \n\nATOM 152 CA GLY A 22 24.600 16.871 1.650 1.00 0.00 C \n\nATOM 153 HA2 GLY A 22 24.058 17.648 2.189 1.00 0.00 H \n\nATOM 154 HA3 GLY A 22 24.596 17.121 0.588 1.00 0.00 H \n\nATOM 155 C GLY A 22 26.039 16.849 2.147 1.00 0.00 C \n\nATOM 156 O GLY A 22 26.312 16.361 3.241 1.00 0.00 O \n\nATOM 157 N GLY A 23 26.958 17.381 1.338 1.00 0.00 N \n\nATOM 158 H GLY A 23 26.682 17.792 0.457 1.00 0.00 H \n\nATOM 159 CA GLY A 23 28.361 17.421 1.695 1.00 0.00 C \n\nATOM 160 HA2 GLY A 23 28.897 16.628 1.175 1.00 0.00 H \n\nATOM 161 HA3 GLY A 23 28.495 17.280 2.769 1.00 0.00 H \n\nATOM 162 C GLY A 23 28.966 18.759 1.294 1.00 0.00 C \n\nATOM 163 O GLY A 23 28.728 19.243 0.190 1.00 0.00 O \n\nATOM 164 N GLY A 24 29.750 19.355 2.195 1.00 0.00 N \n\nATOM 165 H GLY A 24 29.952 18.904 3.076 1.00 0.00 H \n\nATOM 166 CA GLY A 24 30.384 20.631 1.933 1.00 0.00 C \n\nATOM 167 HA2 GLY A 24 30.410 20.843 0.863 1.00 0.00 H \n\nATOM 168 HA3 GLY A 24 29.834 21.430 2.430 1.00 0.00 H \n\nATOM 169 C GLY A 24 31.811 20.625 2.463 1.00 0.00 C \n\nATOM 170 O GLY A 24 32.058 20.174 3.579 1.00 0.00 O \n\nATOM 171 N GLY A 25 32.750 21.126 1.657 1.00 0.00 N \n\nATOM 172 H GLY A 25 32.511 21.575 0.783 1.00 0.00 H \n\nATOM 173 CA GLY A 25 34.145 21.176 2.045 1.00 0.00 C \n\nATOM 174 HA2 GLY A 25 34.267 21.103 3.125 1.00 0.00 H \n\nATOM 175 HA3 GLY A 25 34.703 20.379 1.562 1.00 0.00 H \n\nATOM 176 C GLY A 25 34.761 22.498 1.611 1.00 0.00 C \n\nATOM 177 O GLY A 25 34.508 22.885 0.472 1.00 0.00 O \n\nATOM 178 OXT GLY A 25 35.466 23.081 2.431 1.00 0.00 O \n\nTER 179 GLY A 25\n\nEND\n\n"
],
[
"u = md.Universe(pdb0_file)\nng.show_mdanalysis(u, gui=True)",
"_____no_output_____"
],
[
"def simulate(temp, fname):\n '''run simulation on polyclicine for 1 nanosecond in vacuum (no water) with given temperature \n and save file as fname\n '''\n ### 1.loading initial coordinates\n pdb = PDBFile(pdb0_file) \n\n ### 2.choosing a forcefield parameters\n ff = ForceField('amber10.xml') \n system = ff.createSystem(pdb.topology, nonbondedMethod=CutoffNonPeriodic)\n\n ### 3. Choose parameters of the experiment: temperature, pressure, box size, solvation, boundary conditions, etc\n temperature = temp*kelvin\n frictionCoeff = 1/picosecond\n time_step = 0.002*picoseconds\n total_steps = 1*nanosecond / time_step\n\n ### 4. Choose an algorithm (integrator)\n integrator = LangevinIntegrator(temperature, frictionCoeff, time_step)\n\n ### 5. Run simulation, saving coordinates time to time:\n\n ### 5a. Create a simulation object\n simulation = Simulation(pdb.topology, system, integrator)\n simulation.context.setPositions(pdb.positions)\n\n ### 5b. Minimize energy\n simulation.minimizeEnergy()\n\n ### 5c. Save coordinates to dcd file and energues to standard output console:\n simulation.reporters.append(DCDReporter(fname, 1000))\n simulation.reporters.append(StateDataReporter(stdout, 5000, step=True, potentialEnergy=True,\\\n temperature=True, progress=True, totalSteps = total_steps))\n\n ### 5d. Run!\n simulation.step(total_steps)",
"_____no_output_____"
],
[
"simulate(500, 'data/polyALA_traj_500K.dcd')",
"#\"Progress (%)\",\"Step\",\"Potential Energy (kJ/mole)\",\"Temperature (K)\"\n1.0%,5000,4358.179036461908,465.9763990211766\n2.0%,10000,4223.854060479385,460.21714901283576\n3.0%,15000,3937.024067835952,503.65865659278165\n4.0%,20000,4004.0270952850406,500.8458505699277\n5.0%,25000,3979.447671496886,499.2825611151692\n6.0%,30000,3936.289657817283,478.5124483993892\n7.0%,35000,3832.0169658056275,495.9921348506074\n8.0%,40000,3756.009188375303,540.937934134266\n9.0%,45000,3726.2788741035924,518.4407199855714\n10.0%,50000,3783.21985384484,500.34548598427443\n11.0%,55000,3661.0862028134698,475.3461776897141\n12.0%,60000,3648.568707072175,442.87225568498775\n13.0%,65000,3638.6090519262675,533.3492961206547\n14.0%,70000,3631.8645348743235,493.04391591101626\n15.0%,75000,3587.0681585021143,497.8558203785094\n16.0%,80000,3574.183732052921,522.1573193768791\n17.0%,85000,3540.210540930802,497.4522968908587\n18.0%,90000,3585.9377165117335,536.036697997991\n19.0%,95000,3526.8276593771548,471.99082832899165\n20.0%,100000,3629.1773128742707,544.8616971495575\n21.0%,105000,3747.3601843188426,501.24278587515124\n22.0%,110000,3661.808981646347,486.61532129950575\n23.0%,115000,3594.3035662932325,494.0870463618621\n24.0%,120000,3548.853338144977,526.6506910668269\n25.0%,125000,3584.143603052941,465.4732794278854\n26.0%,130000,3484.8034063792875,522.3555974724776\n27.0%,135000,3441.9711537507515,576.3613890575409\n28.0%,140000,3640.8079342529018,532.1896046534064\n29.0%,145000,3694.9023517117566,491.23569078973196\n30.0%,150000,3412.5266364348445,485.1252848649139\n31.0%,155000,3567.885002672733,483.06923388932586\n32.0%,160000,3670.0980661347953,501.63109112933813\n33.0%,165000,3642.6337796099624,485.47785567398256\n34.0%,170000,3640.901639594358,491.4856311170284\n35.0%,175000,3523.8052697325984,515.49609764763\n36.0%,180000,3535.98633116787,529.4879910717\n37.0%,185000,3547.557184383245,498.94836052316947\n38.0%,190000,3647.3461871220748,491.972119870177\n39.0%,195000,3698.6318845386536,510.0458543308536\n40.0%,200000,3519.8280155872835,481.7867399470974\n41.0%,205000,3542.6214476583395,506.02624146771893\n42.0%,210000,3674.8519416244044,508.6890444446035\n43.0%,215000,3729.237108469134,518.7051993719515\n44.0%,220000,3573.0511317622495,549.1337481909666\n45.0%,225000,3596.2171338693515,497.2744729162917\n46.0%,230000,3424.6941796575124,497.33080306435954\n47.0%,235000,3651.3240916278073,500.73770394326914\n48.0%,240000,3562.7241080360363,522.4395104224227\n49.0%,245000,3559.9720680782016,536.4817418275624\n50.0%,250000,3660.213931095939,495.38371234074526\n51.0%,255000,3648.208499937924,530.320286292092\n52.0%,260000,3611.6073143850235,561.7463824840181\n53.0%,265000,3550.5451760972514,529.9688869366138\n54.0%,270000,3518.460468677895,466.3271465182797\n55.0%,275000,3491.5820430251024,478.3083129062882\n56.0%,280000,3602.014187923496,493.29543476963966\n57.0%,285000,3434.180733218048,517.3624055973884\n58.0%,290000,3549.569355820946,518.9496066403175\n59.0%,295000,3638.1713675085057,489.94611286253956\n60.0%,300000,3595.4644504585076,539.0664420008119\n61.0%,305000,3524.3395840238004,509.42021657854343\n62.0%,310000,3657.393144719058,487.9594177009654\n63.0%,315000,3660.9457849716564,505.66364950042674\n64.0%,320000,3459.251845227378,536.54178872173\n65.0%,325000,3536.5000192250154,469.2844388960339\n66.0%,330000,3464.7143350551746,511.55653982035096\n67.0%,335000,3519.4991555552606,485.07729103775586\n68.0%,340000,3424.6154106243634,460.8629318752808\n69.0%,345000,3553.988871233926,493.01782234041633\n70.0%,350000,3623.3865664690693,458.13778454085497\n71.0%,355000,3428.719963140863,438.3129584319194\n72.0%,360000,3485.2156215378786,567.9951287757722\n73.0%,365000,3556.8921806575854,554.1128895817528\n74.0%,370000,3629.2995632157013,450.4301684038171\n75.0%,375000,3635.0578935962685,553.5494769596487\n76.0%,380000,3659.993722951961,418.288795562618\n77.0%,385000,3642.931779311575,492.6627565782978\n78.0%,390000,3548.4501390952796,475.65028887531616\n79.0%,395000,3588.264685074656,517.9912533113127\n80.0%,400000,3588.054310738682,495.05312896047946\n81.0%,405000,3580.4694813827564,519.1152958582952\n82.0%,410000,3609.6835992859824,542.4051621883394\n83.0%,415000,3524.1704485550667,491.9200166617057\n84.0%,420000,3556.917605666533,514.4719618062038\n85.0%,425000,3474.957782874323,501.19475298138093\n86.0%,430000,3663.9623028940755,497.12412342609036\n87.0%,435000,3609.1290308980033,513.8137834553537\n88.0%,440000,3663.5193894811623,560.868645903472\n89.0%,445000,3640.210972444967,586.4040971119788\n90.0%,450000,3628.7796016521615,532.1778628318418\n91.0%,455000,3526.538761892418,493.11943547834693\n92.0%,460000,3505.164648967223,524.1631316030129\n93.0%,465000,3528.9158084921964,503.9610579724784\n94.0%,470000,3541.0735381016284,513.0639638686043\n95.0%,475000,3625.4320679944926,544.2412988823637\n96.0%,480000,3381.606460881969,489.8830189405692\n97.0%,485000,3735.1758794383636,495.79680436445574\n98.0%,490000,3735.9038364833605,522.8011569675323\n99.0%,495000,3599.372307601103,480.2269256210207\n100.0%,500000,3585.861522862256,533.5030282991384\n"
],
[
"### 6. Visualization\nsys = md.Universe(pdb0_file, 'data/polyALA_traj_100K.dcd')\nng.show_mdanalysis(sys, gui=True)",
"_____no_output_____"
],
[
"### 6. Visualization\nsys = md.Universe(pdb0_file, 'data/polyALA_traj_300K.dcd')\nng.show_mdanalysis(sys, gui=True)",
"_____no_output_____"
],
[
"### 6. Visualization\nsys = md.Universe(pdb0_file, 'data/polyALA_traj_500K.dcd')\nng.show_mdanalysis(sys, gui=True)",
"_____no_output_____"
]
],
[
[
"# At the lowest temperature (100 K) the protein was much more extended after 1 ns compared to the other two temperatures (there was very little folding at 100 K). At 300 K and 500 K the protein folded much more, and apeared to be about equally compact after 1 ns for both temperatures",
"_____no_output_____"
],
[
"## Problem 2. MD simulation analysis.\n\nPerform a quantitative analysis of how extended/collapsed the proteins are in the trajectories obtained from Problem 1. Use, for example, end-to-end distance and/or the function `radius_of_gyration()` from the `MDAnalysis` module, which returns the [radius of gyration](https://en.wikipedia.org/wiki/Radius_of_gyration) of the protein. Present your findings and explain your observations from the physical perspective. \n\n**Hint**. Think about the entropical and energetical contributions to the collapse and how temperature plays role in these processes. ",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\ndef end2end(sys):\n \n ### analysis of end-to-end distance\n\n ## choose terminal atoms \n N_terminus = sys.select_atoms('resid 1 and name N')\n C_terminus = sys.select_atoms('resid 25 and name C')\n\n ## go through the whole trajectory and compute distance between them dor every frame\n dist = []\n for frame in sys.trajectory:\n dist.append(np.linalg.norm(N_terminus.positions - C_terminus.positions))\n\n ## the result is in the dist array \n dist = np.array(dist) \n \n return dist\n",
"_____no_output_____"
]
],
[
[
"## Plotting end to end distance for each temperature",
"_____no_output_____"
]
],
[
[
"sys1 = md.Universe(pdb0_file, 'data/polyALA_traj_100K.dcd')\nsys2 = md.Universe(pdb0_file, 'data/polyALA_traj_300K.dcd')\nsys3 = md.Universe(pdb0_file, 'data/polyALA_traj_500K.dcd')\n\n\nplt.figure(figsize=(15,5))\n\nplt.subplot(121)\nplt.plot( end2end(sys1), '-k' )\nplt.xlabel('timesteps')\nplt.ylabel('end-to-end distance, A')\nplt.title(\"Speed of Folding at 100 K\")\nplt.show()\n\n\nplt.figure(figsize=(15,5))\n\nplt.subplot(121)\nplt.plot( end2end(sys2), '-k' )\nplt.xlabel('timesteps')\nplt.ylabel('end-to-end distance, A')\nplt.title(\"Speed of Folding at 300 K\")\n\nplt.show()\n\n\nplt.figure(figsize=(15,5))\n\nplt.subplot(121)\nplt.plot( end2end(sys3), '-k' )\nplt.xlabel('timesteps')\nplt.ylabel('end-to-end distance, A')\nplt.title(\"Speed of Folding at 500 K\")\n\nplt.show()\n\n\nprint(\"Final end to end distance at 100 K:\")\nprint(end2end(sys1)[-1])\nprint(\"Final end to end distance at 300 K:\")\nprint(end2end(sys2)[-1])\nprint(\"Final end to end distance at 500 K:\")\nprint(end2end(sys3)[-1])",
"_____no_output_____"
],
[
"from MDAnalysis.analysis import hbonds ## module for analysis of hydrogen bonds\n\n## compute information about hbonds and write it in the 'hb.timeseries'\n\n\ndef plot(num): \n ## go through the 'hb.timeseries' file and calculate number of bonds for each time frame (it's the length of array frame)\n hb_number = []\n hb = hbonds.hbond_analysis.HydrogenBondAnalysis(num)\n hb.run()\n\n for frame in hb.timeseries:\n hb_number.append(len(frame))\n\n ## the result is in the number array \n hb_number = np.array(hb_number)\n\n plt.figure(figsize=(15,5))\n\n plt.plot(hb_number, 'g-')\n plt.ylabel('# of hydrogen bonds')\n plt.xlabel('timesteps')\n\n \nplot(sys1)\nplt.title(\"Forming of Hydrogen Bonds at 100 K\")\nplt.show()\nplot(sys2)\nplt.title(\"Forming of Hydrogen Bonds at 300 K\")\nplt.show()\nplot(sys3)\nplt.title(\"Forming of Hydrogen Bonds at 500 K\")\nplt.show()",
"_____no_output_____"
],
[
"# Radii of Gyration\nprint(\"Radius of gyration after 1 ns, at 100 K:\")\nprint(sys1.atoms.radius_of_gyration())\nprint(\"\\nRadius of gyration after 1 ns, at 300 K:\")\nprint(sys2.atoms.radius_of_gyration())\nprint(\"\\nRadius of gyration after 1 ns, at 500 K:\")\nprint(sys3.atoms.radius_of_gyration())\n",
"Radius of gyration after 1 ns, at 100 K:\n26.693876665403366\n\nRadius of gyration after 1 ns, at 300 K:\n26.429826670152487\n\nRadius of gyration after 1 ns, at 500 K:\n26.01812597165474\n"
]
],
[
[
"As shown by the first set of plots, the speed of folding (considering end to end distances) increases at higher temperatures, and the final radius of gyration is also inversely proportional to the temperature, suggesting that the protein folds faster, and reaches a more compact state at higher temperatures.\n \nAt 100K there may be too little kinetic energy for the protein to fold. At 300 K, the protein can perhaps move more due to the higher kinetic energy, but at 500 K, the kinetic energy is maybe so high that the hydrogen bonds break more freely (which is why the final number of hydrogen bonds seems to be higher at 300 K than at 500 K).\n\nPerhaps the fewer number of hydrogen bonds at 500 K allows the protein to be more flexible and thus reach its most compact state. (ie. at this temperature, the molecule has the most kinetic energy, so it can move around more and try more configurations until the most energetically favourable configuration is reached - it will not get stuck in any local minima that perhaps the 300 K simulation was stuck at because it has enough energy to break out of those configurations).",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a82f81c7168c3aa847ee1f40631ea60457bef42
| 323,062 |
ipynb
|
Jupyter Notebook
|
Handling Bad, missing and duplicate data.ipynb
|
iambalakrishnan/Pandas
|
4e70f78f450c71e17226bf8658c7dd7b07568a89
|
[
"MIT"
] | null | null | null |
Handling Bad, missing and duplicate data.ipynb
|
iambalakrishnan/Pandas
|
4e70f78f450c71e17226bf8658c7dd7b07568a89
|
[
"MIT"
] | null | null | null |
Handling Bad, missing and duplicate data.ipynb
|
iambalakrishnan/Pandas
|
4e70f78f450c71e17226bf8658c7dd7b07568a89
|
[
"MIT"
] | null | null | null | 41.212144 | 226 | 0.386873 |
[
[
[
"<h3>Cleaning Bad data\n",
"_____no_output_____"
],
[
"-Strip white space \n-Replace bad data \n-Fill missing data \n-Drop bad data \n-Drop duplicate",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"data = pd.read_csv('artwork_data.csv',low_memory=False)",
"_____no_output_____"
],
[
"data.head(2)",
"_____no_output_____"
],
[
"#finding data which has any white space\n\ndata.loc[data['title'].str.contains('\\s$',regex=True )]",
"_____no_output_____"
],
[
"#Str.strip method will remove whitespace at the end of the string\ndata['title'].str.strip()",
"_____no_output_____"
],
[
"#Need to make changes in the dataframe\ndata['title']=data['title'].str.strip()",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
],
[
"#Now we can run and check filter whether it has whitespace it it\n\ndata.loc[data['title'].str.contains('\\s$', regex=True)]",
"_____no_output_____"
],
[
"#We can also use lstrip and rstrip\ndata['title'].str.rstrip()\ndata['title'].str.lstrip()",
"_____no_output_____"
],
[
"#we can also use transform method instead of string methods\ndata['title'].transform(lambda x: x.strip())",
"_____no_output_____"
]
],
[
[
"<h4> Replace Bad data with NaN",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd",
"_____no_output_____"
],
[
"data = pd.read_csv('artwork_data.csv', low_memory=False)",
"_____no_output_____"
],
[
"data.head(2)",
"_____no_output_____"
],
[
"pd.isna(data.loc[:, 'dateText'])",
"_____no_output_____"
],
[
"#Without loc method\npd.isna(data['dateText'])",
"_____no_output_____"
],
[
"data.replace({'dateText':{'date not known':np.nan}})",
"_____no_output_____"
],
[
"data.replace({'dateText':{'date not known':np.nan}}, inplace=True)",
"_____no_output_____"
],
[
"\ndata = pd.read_csv('artwork_data.csv', low_memory = False)",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
],
[
"#Instead of loc we can also use below method in some circumstances\ndata.loc[data['dateText'] == 'date not known',['dateText']] = np.nan",
"_____no_output_____"
],
[
"data.head(3)",
"_____no_output_____"
],
[
"data.loc[data['year'].notnull() & data['year'].astype(str).str.contains('[^0-9]')]",
"_____no_output_____"
],
[
"data.loc[data['year'].notnull() & data['year'].astype(str).str.contains('[^0-9]'),['year']] = np.nan",
"_____no_output_____"
],
[
"data.iloc[67968:67969]",
"_____no_output_____"
]
],
[
[
"<h4>Filling missing data with a value",
"_____no_output_____"
]
],
[
[
"data = pd.read_csv('artwork_data.csv', low_memory=False)",
"_____no_output_____"
],
[
"data.head(3)",
"_____no_output_____"
],
[
"data.fillna(0)",
"_____no_output_____"
],
[
"#Fillna method will replace all nan value to 0\n#Lets specify with column and respected value in dictonary format\n\ndata.fillna(value = {'depth' : 0, 'inscription' : 0})",
"_____no_output_____"
],
[
"#Lets make changes in original dataframe\n\ndata.fillna(value = {'depth' : 0,}, inplace = True)",
"_____no_output_____"
],
[
"data.head(3)",
"_____no_output_____"
]
],
[
[
"<h4>Dropping data",
"_____no_output_____"
]
],
[
[
"data = pd.read_csv('artwork_data.csv', low_memory = False)",
"_____no_output_____"
],
[
"data.head(2)",
"_____no_output_____"
],
[
"data.shape",
"_____no_output_____"
],
[
"data.dropna()",
"_____no_output_____"
],
[
"data.dropna().shape",
"_____no_output_____"
],
[
"data.dropna(how = 'all')#Drop rows if any rows contains nan\n",
"_____no_output_____"
],
[
"data.dropna(how = 'all')",
"_____no_output_____"
],
[
"data.dropna(how = 'all').shape",
"_____no_output_____"
],
[
"#We can set thresh to drop rowns from dataframe \n#setting up thresh = 15, it will drop rows if any rows had atleast 15 nan \n\ndata.dropna(thresh=15).shape",
"_____no_output_____"
],
[
"#we can also set specific columns data to drop\n\ndata.dropna(subset=['year','acquisitionYear']).shape",
"_____no_output_____"
],
[
"# We can also set how = any or all\n\ndata.dropna(subset = ['year','acquisitionYear'], how = 'all').shape\n\n# Will drop rows if both year and acquisitionYear columns has nan value.",
"_____no_output_____"
],
[
"data.dropna(subset = ['year','acquisitionYear'], inplace=True)",
"_____no_output_____"
],
[
"data.shape",
"_____no_output_____"
]
],
[
[
"<h4> Identifying and Dropping Duplicate Data",
"_____no_output_____"
]
],
[
[
"data = pd.read_csv('artwork_sample.csv') \ndata.head(3)",
"_____no_output_____"
],
[
"# Foe some machine Learning models it okay to have duplicate but some models doensot need it\ndata.drop_duplicates()",
"_____no_output_____"
],
[
"# drop_duplicated method will drop if every single value has duplicate\n# Instead we can pass subset of columns\n\ndata.drop_duplicates(subset = ['artist'])",
"_____no_output_____"
],
[
"# We can also keep first or last duplicate values by passing keep argument \n#Keep first rows\n\ndata.drop_duplicates(subset = ['artist'], keep = 'first')",
"_____no_output_____"
],
[
"#Keep last rows\n\ndata.drop_duplicates(subset = ['artist'], keep = 'last')",
"_____no_output_____"
],
[
"#If we don't want to keep none of the duplicate row pass keep = False\n\n#Keep first rows\n\ndata.drop_duplicates(subset = ['artist'], keep = False)",
"_____no_output_____"
],
[
"# Drop duplicate doesn't change in original dataframe we can pass inplace = True\n\n#Keep first rows\n\ndata.drop_duplicates(subset = ['artist'], keep = 'first', inplace = True)",
"_____no_output_____"
],
[
"data",
"_____no_output_____"
],
[
"#lets read large one\n\ndata = pd.read_csv('artwork_data.csv', low_memory = False)",
"_____no_output_____"
],
[
"data.head(2)",
"_____no_output_____"
],
[
"# Lets check which rows are duplicated\n\ndata.duplicated()",
"_____no_output_____"
],
[
"data.loc[data.duplicated()]\n\n#None of the rows are fully duplicated ",
"_____no_output_____"
],
[
"#Lets add some parm, duplicated will take same subset parm\n\ndata.duplicated(subset = ['artist','title'], keep = False)",
"_____no_output_____"
],
[
"data.loc[data.duplicated(subset = ['artist','title'], keep = False)]",
"_____no_output_____"
],
[
"data.loc[data.duplicated(subset = ['artist','title'], keep = False)].shape",
"_____no_output_____"
],
[
"#Lets take some more example\n\ndata.loc[data['title'].str.contains('The Circle of the Lustful')]",
"_____no_output_____"
],
[
"#same title has 2 different year in term of acquisition probably reprint of the album",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a83133b7d0357957f2b3614e125c242a41851c7
| 25,190 |
ipynb
|
Jupyter Notebook
|
notebooks/basic_interfaces.ipynb
|
ccbrain/nipype_tutorial
|
2f7f17bf0a902e273839f0c0dc8eaf61eee13351
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/basic_interfaces.ipynb
|
ccbrain/nipype_tutorial
|
2f7f17bf0a902e273839f0c0dc8eaf61eee13351
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/basic_interfaces.ipynb
|
ccbrain/nipype_tutorial
|
2f7f17bf0a902e273839f0c0dc8eaf61eee13351
|
[
"BSD-3-Clause"
] | null | null | null | 33.903096 | 455 | 0.56975 |
[
[
[
"# Interfaces\n\nIn Nipype, interfaces are python modules that allow you to use various external packages (e.g. FSL, SPM or FreeSurfer), even if they themselves are written in another programming language than python. Such an interface knows what sort of options an external program has and how to execute it.",
"_____no_output_____"
],
[
"## Interfaces vs. Workflows\n\nInterfaces are the building blocks that solve well-defined tasks. We solve more complex tasks by combining interfaces with workflows:\n\n<table style=\"width: 100%; font-size: 14px;\">\n <thead>\n <th style=\"text-align:left\">Interfaces</th>\n <th style=\"text-align:left\">Workflows</th>\n </thead>\n <tbody>\n <tr>\n <td style=\"text-align:left\">Wrap *unitary* tasks</td>\n <td style=\"text-align:left\">Wrap *meta*-tasks\n <li style=\"text-align:left\">implemented with nipype interfaces wrapped inside ``Node`` objects</li>\n <li style=\"text-align:left\">subworkflows can also be added to a workflow without any wrapping</li>\n </td>\n </tr>\n <tr>\n <td style=\"text-align:left\">Keep track of the inputs and outputs, and check their expected types</td>\n <td style=\"text-align:left\">Do not have inputs/outputs, but expose them from the interfaces wrapped inside</td>\n </tr>\n <tr>\n <td style=\"text-align:left\">Do not cache results (unless you use [interface caching](advanced_interfaces_caching.ipynb))</td>\n <td style=\"text-align:left\">Cache results</td>\n </tr>\n <tr>\n <td style=\"text-align:left\">Run by a nipype plugin</td>\n <td style=\"text-align:left\">Run by a nipype plugin</td>\n </tr>\n </tbody>\n</table>",
"_____no_output_____"
],
[
"To illustrate why interfaces are so useful, let's have a look at the brain extraction algorithm [BET](http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/BET) from FSL. Once in its original framework and once in the Nipype framework.",
"_____no_output_____"
],
[
"## BET in the origional framework\n\nLet's take a look at one of the T1 images we have in our dataset on which we want to run BET.",
"_____no_output_____"
]
],
[
[
"from nilearn.plotting import plot_anat\n%matplotlib inline\nimport matplotlib.pyplot as plt\nplot_anat('/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz', title='original',\n display_mode='ortho', dim=-1, draw_cross=False, annotate=False);",
"_____no_output_____"
]
],
[
[
"In its simplest form, you can run BET by just specifying the input image and tell it what to name the output image:\n\n bet <input> <output>",
"_____no_output_____"
]
],
[
[
"%%bash\n\nFILENAME=/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w\n\nbet ${FILENAME}.nii.gz /output/sub-01_ses-test_T1w_bet.nii.gz",
"_____no_output_____"
]
],
[
[
"Let's take a look at the results:",
"_____no_output_____"
]
],
[
[
"plot_anat('/output/sub-01_ses-test_T1w_bet.nii.gz', title='original',\n display_mode='ortho', dim=-1, draw_cross=False, annotate=False);",
"_____no_output_____"
]
],
[
[
"Perfect! Exactly what we want. Hmm... what else could we want from BET? Well, it's actually a fairly complicated program. As is the case for all FSL binaries, just call it with no arguments to see all its options.",
"_____no_output_____"
]
],
[
[
"%%bash\nbet",
"_____no_output_____"
]
],
[
[
"We see that BET can also return a binary brain mask as a result of the skull-strip, which can be useful for masking our GLM analyses (among other things). Let's run it again including that option and see the result.",
"_____no_output_____"
]
],
[
[
"%%bash\n\nFILENAME=/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w\n\nbet ${FILENAME}.nii.gz /output/sub-01_ses-test_T1w_bet.nii.gz -m",
"_____no_output_____"
],
[
"plot_anat('/output/sub-01_ses-test_T1w_bet_mask.nii.gz', title='original',\n display_mode='ortho', dim=-1, draw_cross=False, annotate=False);",
"_____no_output_____"
]
],
[
[
"Now let's look at the BET interface in Nipype. First, we have to import it.",
"_____no_output_____"
],
[
"## BET in the Nipype framework\n\nSo how can we run BET in the Nipype framework?\n\nFirst things first, we need to import the ``BET`` class from Nipype's ``interfaces`` module:",
"_____no_output_____"
]
],
[
[
"from nipype.interfaces.fsl import BET",
"_____no_output_____"
]
],
[
[
"Now that we have the BET function accessible, we just have to specify the input and output file. And finally we have to run the command. So exactly like in the original framework.",
"_____no_output_____"
]
],
[
[
"skullstrip = BET()\nskullstrip.inputs.in_file = \"/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz\"\nskullstrip.inputs.out_file = \"/output/T1w_nipype_bet.nii.gz\"\nres = skullstrip.run()",
"_____no_output_____"
]
],
[
[
"If we now look at the results from Nipype, we see that it is exactly the same as before.",
"_____no_output_____"
]
],
[
[
"plot_anat('/output/T1w_nipype_bet.nii.gz', title='original',\n display_mode='ortho', dim=-1, draw_cross=False, annotate=False);",
"_____no_output_____"
]
],
[
[
"This is not surprising, because Nipype used exactly the same bash code that we were using in the original framework example above. To verify this, we can call the ``cmdline`` function of the constructed BET instance.",
"_____no_output_____"
]
],
[
[
"print(skullstrip.cmdline)",
"_____no_output_____"
]
],
[
[
"Another way to set the inputs on an interface object is to use them as keyword arguments when you construct the interface instance. Let's write the Nipype code from above in this way, but let's also add the option to create a brain mask.",
"_____no_output_____"
]
],
[
[
"skullstrip = BET(in_file=\"/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz\",\n out_file=\"/output/T1w_nipype_bet.nii.gz\",\n mask=True)\nres = skullstrip.run()",
"_____no_output_____"
]
],
[
[
"Now if we plot this, we see again that this worked exactly as before. No surprise there.",
"_____no_output_____"
]
],
[
[
"plot_anat('/output/T1w_nipype_bet_mask.nii.gz', title='after skullstrip',\n display_mode='ortho', dim=-1, draw_cross=False, annotate=False);",
"_____no_output_____"
]
],
[
[
"## Help Function\n\nBut how did we know what the names of the input parameters are? In the original framework we were able to just run ``BET``, without any additional parameters to get an information page. In the Nipype framework we can achieve the same thing by using the ``help()`` function on an interface class. For the BET example, this is:",
"_____no_output_____"
]
],
[
[
"BET.help()",
"_____no_output_____"
]
],
[
[
"As you can see, we get three different informations. ***First***, a general explanation of the class.\n\n Wraps command **bet**\n\n Use FSL BET command for skull stripping.\n\n For complete details, see the `BET Documentation.\n <http://www.fmrib.ox.ac.uk/fsl/bet2/index.html>`_\n\n Examples\n --------\n >>> from nipype.interfaces import fsl\n >>> from nipype.testing import example_data\n >>> btr = fsl.BET()\n >>> btr.inputs.in_file = example_data('structural.nii')\n >>> btr.inputs.frac = 0.7\n >>> res = btr.run() # doctest: +SKIP\n\n***Second***, a list of all possible input parameters.\n\n Inputs:\n\n [Mandatory]\n in_file: (an existing file name)\n input file to skull strip\n flag: %s, position: 0\n\n [Optional]\n args: (a string)\n Additional parameters to the command\n flag: %s\n center: (a list of at most 3 items which are an integer (int or\n long))\n center of gravity in voxels\n flag: -c %s\n environ: (a dictionary with keys which are a value of type 'str' and\n with values which are a value of type 'str', nipype default value:\n {})\n Environment variables\n frac: (a float)\n fractional intensity threshold\n flag: -f %.2f\n functional: (a boolean)\n apply to 4D fMRI data\n flag: -F\n mutually_exclusive: functional, reduce_bias, robust, padding,\n remove_eyes, surfaces, t2_guided\n ignore_exception: (a boolean, nipype default value: False)\n Print an error message instead of throwing an exception in case the\n interface fails to run\n mask: (a boolean)\n create binary mask image\n flag: -m\n mesh: (a boolean)\n generate a vtk mesh brain surface\n flag: -e\n no_output: (a boolean)\n Don't generate segmented output\n flag: -n\n out_file: (a file name)\n name of output skull stripped image\n flag: %s, position: 1\n outline: (a boolean)\n create surface outline image\n flag: -o\n output_type: ('NIFTI_PAIR' or 'NIFTI_PAIR_GZ' or 'NIFTI_GZ' or\n 'NIFTI')\n FSL output type\n padding: (a boolean)\n improve BET if FOV is very small in Z (by temporarily padding end\n slices)\n flag: -Z\n mutually_exclusive: functional, reduce_bias, robust, padding,\n remove_eyes, surfaces, t2_guided\n radius: (an integer (int or long))\n head radius\n flag: -r %d\n reduce_bias: (a boolean)\n bias field and neck cleanup\n flag: -B\n mutually_exclusive: functional, reduce_bias, robust, padding,\n remove_eyes, surfaces, t2_guided\n remove_eyes: (a boolean)\n eye & optic nerve cleanup (can be useful in SIENA)\n flag: -S\n mutually_exclusive: functional, reduce_bias, robust, padding,\n remove_eyes, surfaces, t2_guided\n robust: (a boolean)\n robust brain centre estimation (iterates BET several times)\n flag: -R\n mutually_exclusive: functional, reduce_bias, robust, padding,\n remove_eyes, surfaces, t2_guided\n skull: (a boolean)\n create skull image\n flag: -s\n surfaces: (a boolean)\n run bet2 and then betsurf to get additional skull and scalp surfaces\n (includes registrations)\n flag: -A\n mutually_exclusive: functional, reduce_bias, robust, padding,\n remove_eyes, surfaces, t2_guided\n t2_guided: (a file name)\n as with creating surfaces, when also feeding in non-brain-extracted\n T2 (includes registrations)\n flag: -A2 %s\n mutually_exclusive: functional, reduce_bias, robust, padding,\n remove_eyes, surfaces, t2_guided\n terminal_output: ('stream' or 'allatonce' or 'file' or 'none')\n Control terminal output: `stream` - displays to terminal immediately\n (default), `allatonce` - waits till command is finished to display\n output, `file` - writes output to file, `none` - output is ignored\n threshold: (a boolean)\n apply thresholding to segmented brain image and mask\n flag: -t\n vertical_gradient: (a float)\n vertical gradient in fractional intensity threshold (-1, 1)\n flag: -g %.2f\n\nAnd ***third***, a list of all possible output parameters.\n\n Outputs:\n\n inskull_mask_file: (a file name)\n path/name of inskull mask (if generated)\n inskull_mesh_file: (a file name)\n path/name of inskull mesh outline (if generated)\n mask_file: (a file name)\n path/name of binary brain mask (if generated)\n meshfile: (a file name)\n path/name of vtk mesh file (if generated)\n out_file: (a file name)\n path/name of skullstripped file (if generated)\n outline_file: (a file name)\n path/name of outline file (if generated)\n outskin_mask_file: (a file name)\n path/name of outskin mask (if generated)\n outskin_mesh_file: (a file name)\n path/name of outskin mesh outline (if generated)\n outskull_mask_file: (a file name)\n path/name of outskull mask (if generated)\n outskull_mesh_file: (a file name)\n path/name of outskull mesh outline (if generated)\n skull_mask_file: (a file name)\n path/name of skull mask (if generated)",
"_____no_output_____"
],
[
"So here we see that Nipype also has output parameters. This is very practical. Because instead of typing the full path name to the mask volume, we can also more directly use the ``mask_file`` parameter.",
"_____no_output_____"
]
],
[
[
"print(res.outputs.mask_file)",
"_____no_output_____"
]
],
[
[
"## Interface errors",
"_____no_output_____"
],
[
"To execute any interface class we use the ``run`` method on that object. For FSL, Freesurfer, and other programs, this will just make a system call with the command line we saw above. For MATLAB-based programs like SPM, it will actually generate a ``.m`` file and run a MATLAB process to execute it. All of that is handled in the background.\n\nBut what happens if we didn't specify all necessary inputs? For instance, you need to give BET a file to work on. If you try and run it without setting the input ``in_file``, you'll get a Python exception before anything actually gets executed:",
"_____no_output_____"
]
],
[
[
"skullstrip2 = BET()\ntry:\n skullstrip2.run()\nexcept(ValueError) as err:\n print(\"ValueError:\", err)\nelse:\n raise",
"_____no_output_____"
]
],
[
[
"Nipype also knows some things about what sort of values should get passed to the inputs, and will raise (hopefully) informative exceptions when they are violated -- before anything gets processed. For example, BET just lets you say \"create a mask,\" it doesn't let you name it. You may forget this, and try to give it a name. In this case, Nipype will raise a ``TraitError`` telling you what you did wrong:",
"_____no_output_____"
]
],
[
[
"try:\n skullstrip.inputs.mask = \"mask_file.nii\"\nexcept(Exception) as err:\n if \"TraitError\" in str(err.__class__):\n print(\"TraitError:\", err)\n else:\n raise\nelse:\n raise",
"_____no_output_____"
]
],
[
[
"Additionally, Nipype knows that, for inputs corresponding to files you are going to process, they should exist in your file system. If you pass a string that doesn't correspond to an existing file, it will error and let you know:",
"_____no_output_____"
]
],
[
[
"try:\n skullstrip.inputs.in_file = \"/data/oops_a_typo.nii\"\nexcept(Exception) as err:\n if \"TraitError\" in str(err.__class__):\n print(\"TraitError:\", err)\n else:\n raise\nelse:\n raise",
"_____no_output_____"
]
],
[
[
"It turns out that for default output files, you don't even need to specify a name. Nipype will know what files are going to be created and will generate a name for you:",
"_____no_output_____"
]
],
[
[
"skullstrip = BET(in_file=\"/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz\")\nprint(skullstrip.cmdline)",
"_____no_output_____"
]
],
[
[
"Note that it is going to write the output file to the local directory.\n\nWhat if you just ran this interface and wanted to know what it called the file that was produced? As you might have noticed before, calling the ``run`` method returned an object called ``InterfaceResult`` that we saved under the variable ``res``. Let's inspect that object:",
"_____no_output_____"
]
],
[
[
"res = skullstrip.run()\nprint(res.outputs)",
"_____no_output_____"
]
],
[
[
"We see that four possible files can be generated by BET. Here we ran it in the most simple way possible, so it just generated an ``out_file``, which is the skull-stripped image. Let's see what happens when we generate a mask. By the way, you can also set inputs at runtime by including them as arguments to the ``run`` method:",
"_____no_output_____"
]
],
[
[
"res2 = skullstrip.run(mask=True)\nprint(res2.outputs)",
"_____no_output_____"
]
],
[
[
"Nipype knows that if you ask for a mask, BET is going to generate it in a particular way and makes that information available to you.",
"_____no_output_____"
],
[
"## Why this is amazing!\n\n**A major motivating objective for Nipype is to streamline the integration of different analysis packages, so that you can use the algorithms you feel are best suited to your particular problem.**\n\nSay that you want to use BET, as SPM does not offer a way to create an explicit mask from functional data, but that otherwise you want your processing to occur in SPM. Although possible to do this in a MATLAB script, it might not be all that clean, particularly if you want your skullstrip to happen in the middle of your workflow (for instance, after realignment). Nipype provides a unified representation of interfaces across analysis packages.\n\nFor more on this, check out the [Interfaces](basic_interfaces.ipynb) and the [Workflow](basic_workflow.ipynb) tutorial.",
"_____no_output_____"
],
[
"### Exercise 1\nImport `IsotropicSmooth` from `nipype.interfaces.fsl` and find the `FSL` command that is being run. What are the mandatory inputs for this interface?",
"_____no_output_____"
]
],
[
[
"# write your solution here",
"_____no_output_____"
],
[
"from nipype.interfaces.fsl import IsotropicSmooth\n# all this information can be found when we run `help` method. \n# note that you can either provide `in_file` and `fwhm` or `in_file` and `sigma`\nIsotropicSmooth.help()",
"_____no_output_____"
]
],
[
[
"### Exercise 2\nRun the `IsotropicSmooth` for `/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz` file with a smoothing kernel 4mm:",
"_____no_output_____"
]
],
[
[
"# write your solution here",
"_____no_output_____"
],
[
"smoothing = IsotropicSmooth()\nsmoothing.inputs.in_file = \"/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz\"\nsmoothing.inputs.fwhm = 4\nsmoothing.inputs.out_file = \"/output/T1w_nipype_smooth.nii.gz\"\nsmoothing.run()",
"_____no_output_____"
]
],
[
[
"### Exercise 3\nPlot the output of your interface.",
"_____no_output_____"
]
],
[
[
"# write your solution here",
"_____no_output_____"
],
[
"# we will be using plot_anat from nilearn package\nfrom nilearn.plotting import plot_anat\n%matplotlib inline\nplot_anat('/output/T1w_nipype_smooth.nii.gz', title='after smoothing',\n display_mode='ortho', dim=-1, draw_cross=False, annotate=False);",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a83182523b149d172355f5b336cd9d53bb90cf7
| 5,614 |
ipynb
|
Jupyter Notebook
|
weather_csv_to_html.ipynb
|
SOUMYA-MURALI/Web-Design-Challenge
|
176547e93808370a074c971170687c82ffa33a78
|
[
"ADSL"
] | null | null | null |
weather_csv_to_html.ipynb
|
SOUMYA-MURALI/Web-Design-Challenge
|
176547e93808370a074c971170687c82ffa33a78
|
[
"ADSL"
] | null | null | null |
weather_csv_to_html.ipynb
|
SOUMYA-MURALI/Web-Design-Challenge
|
176547e93808370a074c971170687c82ffa33a78
|
[
"ADSL"
] | null | null | null | 26.606635 | 89 | 0.362309 |
[
[
[
"\n# DEPENDENCIES AND SETUP \nimport pandas as pd",
"_____no_output_____"
],
[
"# specifying csv path\ncsv_path = \"./Resources/cities.csv\"",
"_____no_output_____"
],
[
"# read_csv reads the csv file\nread_csv = pd.read_csv(csv_path)",
"_____no_output_____"
],
[
"# creating data frame \ndf = pd.DataFrame(read_csv)\ndf\ndf.head()",
"_____no_output_____"
],
[
"# converting data frame to html file called \"table.html\"\n# set index as false\ndf.to_html('table.html',index=False)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a8320da6ad0045c3e4896ce82f00a43b7c14ec8
| 275,985 |
ipynb
|
Jupyter Notebook
|
Module 2/10- Multiple_linear_regression_training.ipynb
|
anandgokul18/IBM-Deep-Learning-with-Python-and-PyTorch
|
3357bda38bed3e0ff16519181996c61baab0223d
|
[
"MIT"
] | 1 |
2019-02-18T07:16:00.000Z
|
2019-02-18T07:16:00.000Z
|
Module 2/10- Multiple_linear_regression_training.ipynb
|
anandgokul18/IBM-Deep-Learning-with-Python-and-PyTorch
|
3357bda38bed3e0ff16519181996c61baab0223d
|
[
"MIT"
] | null | null | null |
Module 2/10- Multiple_linear_regression_training.ipynb
|
anandgokul18/IBM-Deep-Learning-with-Python-and-PyTorch
|
3357bda38bed3e0ff16519181996c61baab0223d
|
[
"MIT"
] | null | null | null | 408.261834 | 83,588 | 0.939214 |
[
[
[
"<a href=\"http://cocl.us/pytorch_link_top\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/Pytochtop.png\" width=\"750\" alt=\"IBM Product \" />\n</a> ",
"_____no_output_____"
],
[
"<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/cc-logo-square.png\" width=\"200\" alt=\"cognitiveclass.ai logo\" />",
"_____no_output_____"
],
[
"<h1>Linear Regression Multiple Outputs</h1> ",
"_____no_output_____"
],
[
"<h2>Table of Contents</h2>\n<p>In this lab, you will create a model the PyTroch way. This will help you more complicated models.</p>\n\n<ul>\n <li><a href=\"#Makeup_Data\">Make Some Data</a></li>\n <li><a href=\"#Model_Cost\">Create the Model and Cost Function the PyTorch way</a></li>\n <li><a href=\"#BGD\">Train the Model: Batch Gradient Descent</a></li>\n</ul>\n<p>Estimated Time Needed: <strong>20 min</strong></p>\n\n<hr>",
"_____no_output_____"
],
[
"<h2>Preparation</h2>",
"_____no_output_____"
],
[
"We'll need the following libraries:",
"_____no_output_____"
]
],
[
[
"# Import the libraries we need for this lab\n\nfrom torch import nn,optim\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom torch.utils.data import Dataset, DataLoader",
"_____no_output_____"
]
],
[
[
"Set the random seed:",
"_____no_output_____"
]
],
[
[
"# Set the random seed.\n\ntorch.manual_seed(1)",
"_____no_output_____"
]
],
[
[
"Use this function for plotting: ",
"_____no_output_____"
]
],
[
[
"# The function for plotting 2D\n\ndef Plot_2D_Plane(model, dataset, n=0):\n w1 = model.state_dict()['linear.weight'].numpy()[0][0]\n w2 = model.state_dict()['linear.weight'].numpy()[0][0]\n b = model.state_dict()['linear.bias'].numpy()\n\n # Data\n x1 = data_set.x[:, 0].view(-1, 1).numpy()\n x2 = data_set.x[:, 1].view(-1, 1).numpy()\n y = data_set.y.numpy()\n\n # Make plane\n X, Y = np.meshgrid(np.arange(x1.min(), x1.max(), 0.05), np.arange(x2.min(), x2.max(), 0.05))\n yhat = w1 * X + w2 * Y + b\n\n # Plotting\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n ax.plot(x1[:, 0], x2[:, 0], y[:, 0],'ro', label='y') # Scatter plot\n \n ax.plot_surface(X, Y, yhat) # Plane plot\n \n ax.set_xlabel('x1 ')\n ax.set_ylabel('x2 ')\n ax.set_zlabel('y')\n plt.title('estimated plane iteration:' + str(n))\n ax.legend()\n\n plt.show()",
"_____no_output_____"
]
],
[
[
"<!--Empty Space for separating topics-->",
"_____no_output_____"
],
[
"<h2 id=\"Makeup_Data\"r>Make Some Data </h2>",
"_____no_output_____"
],
[
"Create a dataset class with two-dimensional features:",
"_____no_output_____"
]
],
[
[
"# Create a 2D dataset\n\nclass Data2D(Dataset):\n \n # Constructor\n def __init__(self):\n self.x = torch.zeros(20, 2)\n self.x[:, 0] = torch.arange(-1, 1, 0.1)\n self.x[:, 1] = torch.arange(-1, 1, 0.1)\n self.w = torch.tensor([[1.0], [1.0]])\n self.b = 1\n self.f = torch.mm(self.x, self.w) + self.b \n self.y = self.f + 0.1 * torch.randn((self.x.shape[0],1))\n self.len = self.x.shape[0]\n\n # Getter\n def __getitem__(self, index): \n return self.x[index], self.y[index]\n \n # Get Length\n def __len__(self):\n return self.len",
"_____no_output_____"
]
],
[
[
"Create a dataset object:",
"_____no_output_____"
]
],
[
[
"# Create the dataset object\n\ndata_set = Data2D()",
"_____no_output_____"
]
],
[
[
"<h2 id=\"Model_Cost\">Create the Model, Optimizer, and Total Loss Function (Cost)</h2>",
"_____no_output_____"
],
[
"Create a customized linear regression module: ",
"_____no_output_____"
]
],
[
[
"# Create a customized linear\n\nclass linear_regression(nn.Module):\n \n # Constructor\n def __init__(self, input_size, output_size):\n super(linear_regression, self).__init__()\n self.linear = nn.Linear(input_size, output_size)\n \n # Prediction\n def forward(self, x):\n yhat = self.linear(x)\n return yhat",
"_____no_output_____"
]
],
[
[
"Create a model. Use two features: make the input size 2 and the output size 1: ",
"_____no_output_____"
]
],
[
[
"# Create the linear regression model and print the parameters\n\nmodel = linear_regression(2,1)\nprint(\"The parameters: \", list(model.parameters()))",
"The parameters: [Parameter containing:\ntensor([[ 0.6209, -0.1178]], requires_grad=True), Parameter containing:\ntensor([0.3026], requires_grad=True)]\n"
]
],
[
[
"Create an optimizer object. Set the learning rate to 0.1. <b>Don't forget to enter the model parameters in the constructor.</b>",
"_____no_output_____"
],
[
"<img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/chapter2/2.6.2paramater_hate.png\" width = \"100\" alt=\"How the optimizer works\" />",
"_____no_output_____"
]
],
[
[
"# Create the optimizer\n\noptimizer = optim.SGD(model.parameters(), lr=0.1)",
"_____no_output_____"
]
],
[
[
"Create the criterion function that calculates the total loss or cost:",
"_____no_output_____"
]
],
[
[
"# Create the cost function\n\ncriterion = nn.MSELoss()",
"_____no_output_____"
]
],
[
[
"Create a data loader object. Set the batch_size equal to 2: ",
"_____no_output_____"
]
],
[
[
"# Create the data loader\n\ntrain_loader = DataLoader(dataset=data_set, batch_size=2)",
"_____no_output_____"
]
],
[
[
"<!--Empty Space for separating topics-->",
"_____no_output_____"
],
[
"<h2 id=\"BGD\">Train the Model via Mini-Batch Gradient Descent</h2>",
"_____no_output_____"
],
[
"Run 100 epochs of Mini-Batch Gradient Descent and store the total loss or cost for every iteration. Remember that this is an approximation of the true total loss or cost:",
"_____no_output_____"
]
],
[
[
"# Train the model\n\nLOSS = []\nprint(\"Before Training: \")\nPlot_2D_Plane(model, data_set) \nepochs = 100\n \ndef train_model(epochs): \n for epoch in range(epochs):\n for x,y in train_loader:\n yhat = model(x)\n loss = criterion(yhat, y)\n LOSS.append(loss.item())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step() \ntrain_model(epochs)\nprint(\"After Training: \")\nPlot_2D_Plane(model, data_set, epochs) ",
"Before Training: \n"
],
[
"# Plot out the Loss and iteration diagram\n\nplt.plot(LOSS)\nplt.xlabel(\"Iterations \")\nplt.ylabel(\"Cost/total loss \")",
"_____no_output_____"
]
],
[
[
"<h3>Practice</h3>",
"_____no_output_____"
],
[
"Create a new <code>model1</code>. Train the model with a batch size 30 and learning rate 0.1, store the loss or total cost in a list <code>LOSS1</code>, and plot the results.",
"_____no_output_____"
]
],
[
[
"# Practice create model1. Train the model with batch size 30 and learning rate 0.1, store the loss in a list <code>LOSS1</code>. Plot the results.\n\ndata_set = Data2D()\nmodel1=linear_regression(2,1)\ntrainloader=DataLoader(dataset=data_set, batch_size=30)\noptimizer1=optim.SGD(model.parameters(),lr=0.1)\n\n\nLOSS1=[]\n\nfor epoch in range(epochs):\n for x,y in trainloader:\n yhat=model1(x)\n loss=criterion(yhat,y)\n LOSS1.append(loss)\n optimizer1.zero_grad()\n loss.backward()\n optimizer.step()\n \nprint(\"After Training: \")\nPlot_2D_Plane(model, data_set, epochs) ",
"After Training: \n"
]
],
[
[
"Double-click <b>here</b> for the solution.\n\n<!-- Your answer is below:\ntrain_loader = DataLoader(dataset = data_set, batch_size = 30)\nmodel1 = linear_regression(2, 1)\noptimizer = optim.SGD(model1.parameters(), lr = 0.1)\nLOSS1 = []\nepochs = 100\ndef train_model(epochs): \n for epoch in range(epochs):\n for x,y in train_loader:\n yhat = model1(x)\n loss = criterion(yhat,y)\n LOSS1.append(loss.item())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step() \ntrain_model(epochs)\nPlot_2D_Plane(model1 , data_set) \nplt.plot(LOSS1)\nplt.xlabel(\"iterations \")\nplt.ylabel(\"Cost/total loss \")\n-->",
"_____no_output_____"
],
[
"Use the following validation data to calculate the total loss or cost for both models:",
"_____no_output_____"
]
],
[
[
"torch.manual_seed(2)\n\nvalidation_data = Data2D()\nY = validation_data.y\nX = validation_data.x\n\nprint(\"For model:\")\ntotalloss=criterion(model(X),Y)\nprint(totalloss)\nprint(\"For model1:\")\ntotalloss=criterion(model1(X),Y)\nprint(totalloss)",
"For model:\ntensor(0.0081, grad_fn=<MseLossBackward>)\nFor model1:\ntensor(1.2991, grad_fn=<MseLossBackward>)\n"
]
],
[
[
"Double-click <b>here</b> for the solution.\n<!-- Your answer is below:\nprint(\"total loss or cost for model: \",criterion(model(X),Y))\nprint(\"total loss or cost for model: \",criterion(model1(X),Y))\n-->",
"_____no_output_____"
],
[
"<!--Empty Space for separating topics-->",
"_____no_output_____"
],
[
"<a href=\"http://cocl.us/pytorch_link_bottom\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/notebook_bottom%20.png\" width=\"750\" alt=\"PyTorch Bottom\" />\n</a>",
"_____no_output_____"
],
[
"<h2>About the Authors:</h2> \n\n<a href=\"https://www.linkedin.com/in/joseph-s-50398b136/\">Joseph Santarcangelo</a> has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD. ",
"_____no_output_____"
],
[
"Other contributors: <a href=\"https://www.linkedin.com/in/michelleccarey/\">Michelle Carey</a>, <a href=\"www.linkedin.com/in/jiahui-mavis-zhou-a4537814a\">Mavis Zhou</a>",
"_____no_output_____"
],
[
"<hr>",
"_____no_output_____"
],
[
"Copyright © 2018 <a href=\"cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu\">cognitiveclass.ai</a>. This notebook and its source code are released under the terms of the <a href=\"https://bigdatauniversity.com/mit-license/\">MIT License</a>.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a8328e88f17b9f2a1dcd35a6873e74da1b09835
| 46,170 |
ipynb
|
Jupyter Notebook
|
Chapter09/09_Accessing_Data.ipynb
|
xxgw/Learning-Pandas-Second-Edition
|
7ad718333c69ec28911318fcd6b037e9eb80ca3a
|
[
"MIT"
] | 2 |
2020-01-05T06:23:35.000Z
|
2020-04-25T07:00:37.000Z
|
Chapter09/09_Accessing_Data.ipynb
|
79458627/Learning-Pandas-Second-Edition
|
9cba16a46512d613bf7a64bb26f92a0a98d76590
|
[
"MIT"
] | null | null | null |
Chapter09/09_Accessing_Data.ipynb
|
79458627/Learning-Pandas-Second-Edition
|
9cba16a46512d613bf7a64bb26f92a0a98d76590
|
[
"MIT"
] | null | null | null | 26.413043 | 492 | 0.478406 |
[
[
[
"# Configuring pandas",
"_____no_output_____"
]
],
[
[
"# import numpy and pandas\nimport numpy as np\nimport pandas as pd\n\n# used for dates\nimport datetime\nfrom datetime import datetime, date\n\n# Set some pandas options controlling output format\npd.set_option('display.notebook_repr_html', False)\npd.set_option('display.max_columns', 8)\npd.set_option('display.max_rows', 10)\npd.set_option('display.width', 90)\n\n# bring in matplotlib for graphics\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"# view the first five lines of data/msft.csv\n!head -n 5 data/msft.csv # mac or Linux\n# type data/msft.csv # on windows, but shows the entire file",
"Date,Open,High,Low,Close,Volume\r\r\n7/21/2014,83.46,83.53,81.81,81.93,2359300\r\r\n7/18/2014,83.3,83.4,82.52,83.35,4020800\r\r\n7/17/2014,84.35,84.63,83.33,83.63,1974000\r\r\n7/16/2014,83.77,84.91,83.66,84.91,1755600\r\r\n"
]
],
[
[
"# Reading a CSV into a DataFrame",
"_____no_output_____"
]
],
[
[
"# read in msft.csv into a DataFrame\nmsft = pd.read_csv(\"data/msft.csv\")\nmsft[:5]",
"_____no_output_____"
]
],
[
[
"# Specifying the index column when reading a CSV file",
"_____no_output_____"
]
],
[
[
"# use column 0 as the index\nmsft = pd.read_csv(\"data/msft.csv\", index_col=0)\nmsft[:5]",
"_____no_output_____"
]
],
[
[
"# Data type inference and specification",
"_____no_output_____"
]
],
[
[
"# examine the types of the columns in this DataFrame\nmsft.dtypes",
"_____no_output_____"
],
[
"# specify that the Volume column should be a float64\nmsft = pd.read_csv(\"data/msft.csv\", \n dtype = { 'Volume' : np.float64})\nmsft.dtypes",
"_____no_output_____"
]
],
[
[
"# Specifying column names",
"_____no_output_____"
]
],
[
[
"# specify a new set of names for the columns\n# all lower case, remove space in Adj Close\n# also, header=0 skips the header row\ndf = pd.read_csv(\"data/msft.csv\", \n header=0,\n names=['date', 'open', 'high', 'low', \n 'close', 'volume'])\ndf[:5]",
"_____no_output_____"
]
],
[
[
"# Specifying specific columns to load",
"_____no_output_____"
]
],
[
[
"# read in data only in the Date and Close columns\n# and index by the Date column\ndf2 = pd.read_csv(\"data/msft.csv\", \n usecols=['Date', 'Close'], \n index_col=['Date'])\ndf2[:5]",
"_____no_output_____"
]
],
[
[
"# Saving a DataFrame to a CSV",
"_____no_output_____"
]
],
[
[
"# save df2 to a new csv file\n# also specify naming the index as date\ndf2.to_csv(\"data/msft_modified.csv\", index_label='date')",
"_____no_output_____"
],
[
"# view the start of the file just saved\n!head -n 5 data/msft_modified.csv\n#type data/msft_modified.csv # windows",
"date,Close\r\n7/21/2014,81.93\r\n7/18/2014,83.35\r\n7/17/2014,83.63\r\n7/16/2014,84.91\r\n"
]
],
[
[
"# General field-delimited data",
"_____no_output_____"
]
],
[
[
"# use read_table with sep=',' to read a CSV\ndf = pd.read_table(\"data/msft.csv\", sep=',')\ndf[:5]",
"_____no_output_____"
],
[
"# save as pipe delimited\ndf.to_csv(\"data/msft_piped.txt\", sep='|')\n# check that it worked\n!head -n 5 data/msft_piped.txt # osx or Linux\n# type data/psft_piped.txt # on windows",
"|Date|Open|High|Low|Close|Volume\r\n0|7/21/2014|83.46|83.53|81.81|81.93|2359300\r\n1|7/18/2014|83.3|83.4|82.52|83.35|4020800\r\n2|7/17/2014|84.35|84.63|83.33|83.63|1974000\r\n3|7/16/2014|83.77|84.91|83.66|84.91|1755600\r\n"
]
],
[
[
"# Handling variants of formats in field-delimited data",
"_____no_output_____"
]
],
[
[
"# messy file\n!head -n 6 data/msft2.csv # osx or Linux\n# type data/msft2.csv # windows",
"This is fun because the data does not start on the first line,,,,,\r\r\nDate,Open,High,Low,Close,Volume\r\r\n,,,,,\r\r\nAnd there is space between the header row and data,,,,,\r\r\n7/21/2014,83.46,83.53,81.81,81.93,2359300\r\r\n7/18/2014,83.3,83.4,82.52,83.35,4020800\r\r\n"
],
[
"# read, but skip rows 0, 2 and 3\ndf = pd.read_csv(\"data/msft2.csv\", skiprows=[0, 2, 3])\ndf[:5]",
"_____no_output_____"
],
[
"# another messy file, with the mess at the end\n!cat data/msft_with_footer.csv # osx or Linux\n# type data/msft_with_footer.csv # windows",
"Date,Open,High,Low,Close,Volume\r\r\n7/21/2014,83.46,83.53,81.81,81.93,2359300\r\r\n7/18/2014,83.3,83.4,82.52,83.35,4020800\r\r\n\r\r\nUh oh, there is stuff at the end.\r\r\n"
],
[
"# skip only two lines at the end\ndf = pd.read_csv(\"data/msft_with_footer.csv\", \n skipfooter=2,\n engine = 'python')\ndf",
"_____no_output_____"
],
[
"# only process the first three rows\npd.read_csv(\"data/msft.csv\", nrows=3)",
"_____no_output_____"
],
[
"# skip 100 lines, then only process the next five\npd.read_csv(\"data/msft.csv\", skiprows=100, nrows=5, \n header=0,\n names=['date', 'open', 'high', 'low', \n 'close', 'vol']) ",
"_____no_output_____"
]
],
[
[
"# Reading and writing data in Excel format",
"_____no_output_____"
]
],
[
[
"# read excel file\n# only reads first sheet (msft in this case)\ndf = pd.read_excel(\"data/stocks.xlsx\")\ndf[:5]",
"_____no_output_____"
],
[
"# read from the aapl worksheet\naapl = pd.read_excel(\"data/stocks.xlsx\", sheetname='aapl')\naapl[:5]",
"_____no_output_____"
],
[
"# save to an .XLS file, in worksheet 'Sheet1'\ndf.to_excel(\"data/stocks2.xls\")",
"_____no_output_____"
],
[
"# write making the worksheet name MSFT\ndf.to_excel(\"data/stocks_msft.xls\", sheet_name='MSFT')",
"_____no_output_____"
],
[
"# write multiple sheets\n# requires use of the ExcelWriter class\nfrom pandas import ExcelWriter\nwith ExcelWriter(\"data/all_stocks.xls\") as writer:\n aapl.to_excel(writer, sheet_name='AAPL')\n df.to_excel(writer, sheet_name='MSFT')",
"_____no_output_____"
],
[
"# write to xlsx\ndf.to_excel(\"data/msft2.xlsx\")",
"_____no_output_____"
]
],
[
[
"# Reading and writing JSON files",
"_____no_output_____"
]
],
[
[
"# wirite the excel data to a JSON file\ndf[:5].to_json(\"data/stocks.json\")\n!cat data/stocks.json # osx or Linux\n#type data/stocks.json # windows",
"{\"Date\":{\"0\":1405900800000,\"1\":1405641600000,\"2\":1405555200000,\"3\":1405468800000,\"4\":1405382400000},\"Open\":{\"0\":83.46,\"1\":83.3,\"2\":84.35,\"3\":83.77,\"4\":84.3},\"High\":{\"0\":83.53,\"1\":83.4,\"2\":84.63,\"3\":84.91,\"4\":84.38},\"Low\":{\"0\":81.81,\"1\":82.52,\"2\":83.33,\"3\":83.66,\"4\":83.2},\"Close\":{\"0\":81.93,\"1\":83.35,\"2\":83.63,\"3\":84.91,\"4\":83.58},\"Volume\":{\"0\":2359300,\"1\":4020800,\"2\":1974000,\"3\":1755600,\"4\":1874700}}"
],
[
"# read data in from JSON\ndf_from_json = pd.read_json(\"data/stocks.json\")\ndf_from_json[:5]",
"_____no_output_____"
],
[
"# the URL to read\nurl = \"http://www.fdic.gov/bank/individual/failed/banklist.html\"\n# read it\nbanks = pd.read_html(url)",
"_____no_output_____"
],
[
"# examine a subset of the first table read\nbanks[0][0:5].iloc[:,0:2]",
"_____no_output_____"
],
[
"# read the stock data\ndf = pd.read_excel(\"data/stocks.xlsx\")\n# write the first two rows to HTML\ndf.head(2).to_html(\"data/stocks.html\")\n# check the first 28 lines of the output\n!head -n 10 data/stocks.html # max or Linux\n# type data/stocks.html # window, but prints the entire file",
"<table border=\"1\" class=\"dataframe\">\r\n <thead>\r\n <tr style=\"text-align: right;\">\r\n <th></th>\r\n <th>Date</th>\r\n <th>Open</th>\r\n <th>High</th>\r\n <th>Low</th>\r\n <th>Close</th>\r\n <th>Volume</th>\r\n"
]
],
[
[
"# Reading and writing HDF5 format files",
"_____no_output_____"
]
],
[
[
"# seed for replication\nnp.random.seed(123456)\n# create a DataFrame of dates and random numbers in three columns\ndf = pd.DataFrame(np.random.randn(8, 3), \n index=pd.date_range('1/1/2000', periods=8),\n columns=['A', 'B', 'C'])\n\n# create HDF5 store\nstore = pd.HDFStore('data/store.h5')\nstore['df'] = df # persisting happened here\nstore",
"_____no_output_____"
],
[
"# read in data from HDF5\nstore = pd.HDFStore(\"data/store.h5\")\ndf = store['df']\ndf[:5]",
"_____no_output_____"
],
[
"# this changes the DataFrame, but did not persist\ndf.iloc[0].A = 1 \n# to persist the change, assign the DataFrame to the \n# HDF5 store object\nstore['df'] = df\n# it is now persisted\n# the following loads the store and \n# shows the first two rows, demonstrating\n# the the persisting was done\npd.HDFStore(\"data/store.h5\")['df'][:5] # it's now in there",
"_____no_output_____"
]
],
[
[
"# Accessing data on the web and in the cloud",
"_____no_output_____"
]
],
[
[
"# read csv directly from Yahoo! Finance from a URL\nmsft_hist = pd.read_csv(\n \"http://www.google.com/finance/historical?\" +\n \"q=NASDAQ:MSFT&startdate=Apr+01%2C+2017&\" +\n \"enddate=Apr+30%2C+2017&output=csv\")\nmsft_hist[:5]",
"_____no_output_____"
]
],
[
[
"# Reading and writing from/to SQL databases",
"_____no_output_____"
]
],
[
[
"# reference SQLite\nimport sqlite3\n\n# read in the stock data from CSV\nmsft = pd.read_csv(\"data/msft.csv\")\nmsft[\"Symbol\"]=\"MSFT\"\naapl = pd.read_csv(\"data/aapl.csv\")\naapl[\"Symbol\"]=\"AAPL\"\n\n# create connection\nconnection = sqlite3.connect(\"data/stocks.sqlite\")\n# .to_sql() will create SQL to store the DataFrame\n# in the specified table. if_exists specifies\n# what to do if the table already exists\nmsft.to_sql(\"STOCK_DATA\", connection, if_exists=\"replace\")\naapl.to_sql(\"STOCK_DATA\", connection, if_exists=\"append\")\n\n# commit the SQL and close the connection\nconnection.commit()\nconnection.close()",
"_____no_output_____"
],
[
"# connect to the database file\nconnection = sqlite3.connect(\"data/stocks.sqlite\")\n\n# query all records in STOCK_DATA\n# returns a DataFrame\n# inde_col specifies which column to make the DataFrame index\nstocks = pd.io.sql.read_sql(\"SELECT * FROM STOCK_DATA;\", \n connection, index_col='index')\n\n# close the connection\nconnection.close()\n\n# report the head of the data retrieved\nstocks[:5]",
"_____no_output_____"
],
[
"# open the connection\nconnection = sqlite3.connect(\"data/stocks.sqlite\")\n# construct the query string\nquery = \"SELECT * FROM STOCK_DATA WHERE \" + \\\n \"Volume>29200100 AND Symbol='MSFT';\"\n# execute and close connection\nitems = pd.io.sql.read_sql(query, connection, index_col='index')\nconnection.close()\n# report the query result\nitems",
"_____no_output_____"
]
],
[
[
"# Reading stock data from Google Finance",
"_____no_output_____"
]
],
[
[
"# import data reader package\nimport pandas_datareader as pdr",
"_____no_output_____"
],
[
"# read from google and display the head of the data\nstart = datetime(2017, 4, 1)\nend = datetime(2017, 4, 30)\ngoog = pdr.data.DataReader(\"MSFT\", 'google', start, end)\ngoog[:5]",
"_____no_output_____"
]
],
[
[
"# Retrieving options data from Google Finance",
"_____no_output_____"
]
],
[
[
"# read options for MSFT\noptions = pdr.data.Options('MSFT', 'google')",
"_____no_output_____"
],
[
"options.expiry_dates",
"_____no_output_____"
],
[
"data = options.get_options_data(expiry=options.expiry_dates[0])\ndata.iloc[:5,:3]",
"_____no_output_____"
],
[
"# get all puts at strike price of $30 (first four columns only)\ndata.loc[(30, slice(None), 'put'), :].iloc[0:5, 0:3]",
"_____no_output_____"
],
[
"# put options at strike of $80, between 2017-06-01 and 2017-06-30\ndata.loc[(30, slice('20180119','20180130'), 'put'), :] \\\n .iloc[:, 0:3]",
"_____no_output_____"
]
],
[
[
"# Reading economic data from the Federal Reserve Bank of St. Louis",
"_____no_output_____"
]
],
[
[
"# read GDP data from FRED\ngdp = pdr.data.FredReader(\"GDP\",\n date(2012, 1, 1), \n date(2014, 1, 27))\ngdp.read()[:5]",
"/Users/michaelheydt/anaconda/lib/python3.6/site-packages/ipykernel/__main__.py:5: DeprecationWarning: pandas.core.common.is_list_like is deprecated. import from the public API: pandas.api.types.is_list_like instead\n"
],
[
"# Get Compensation of employees: Wages and salaries\npdr.data.FredReader(\"A576RC1A027NBEA\",\n date(1929, 1, 1),\n date(2013, 1, 1)).read()[:5]",
"/Users/michaelheydt/anaconda/lib/python3.6/site-packages/ipykernel/__main__.py:4: DeprecationWarning: pandas.core.common.is_list_like is deprecated. import from the public API: pandas.api.types.is_list_like instead\n"
]
],
[
[
"# Accessing Kenneth French data",
"_____no_output_____"
]
],
[
[
"# read from Kenneth French fama global factors data set\nfactors = pdr.data.FamaFrenchReader(\"Global_Factors\").read()\nfactors[0][:5]",
"_____no_output_____"
]
],
[
[
"# Reading from the World Bank",
"_____no_output_____"
]
],
[
[
"# get all indicators\nfrom pandas_datareader import wb\nall_indicators = pdr.wb.get_indicators()\nall_indicators.iloc[:5,:2]",
"_____no_output_____"
],
[
"# search of life expectancy indicators\nle_indicators = pdr.wb.search(\"life expectancy\")\n# report first three rows, first two columns\nle_indicators.iloc[:5,:2]",
"_____no_output_____"
],
[
"# get countries and show the 3 digit code and name\ncountries = pdr.wb.get_countries()\n# show a subset of the country data\ncountries.loc[0:5,['name', 'capitalCity', 'iso2c']]",
"_____no_output_____"
],
[
"# get life expectancy at birth for all countries from 1980 to 2014\nle_data_all = pdr.wb.download(indicator=\"SP.DYN.LE00.IN\", \n start='1980', \n end='2014')\nle_data_all",
"_____no_output_____"
],
[
"# only US, CAN, and MEX are returned by default\nle_data_all.index.levels[0]",
"_____no_output_____"
],
[
"# retrieve life expectancy at birth for all countries \n# from 1980 to 2014\nle_data_all = wb.download(indicator=\"SP.DYN.LE00.IN\", \n country = countries['iso2c'],\n start='1980', \n end='2012')\nle_data_all",
"/Users/michaelheydt/anaconda/lib/python3.6/site-packages/pandas_datareader/wb.py:145: UserWarning: Non-standard ISO country codes: 1A, 1W, 4E, 6D, 6F, 6L, 6N, 6X, 7E, 8S, A4, A5, A9, B1, B2, B3, B4, B6, B7, B8, C4, C5, C6, C7, C8, C9, D2, D3, D4, D5, D6, D7, D8, D9, EU, F1, F6, JG, L4, L5, L6, L7, M1, M2, N6, O6, OE, R6, S1, S2, S3, S4, T2, T3, T4, T5, T6, T7, V1, V2, V3, V4, XC, XD, XE, XF, XG, XH, XI, XJ, XK, XL, XM, XN, XO, XP, XQ, XT, XU, XY, Z4, Z7, ZB, ZF, ZG, ZJ, ZQ, ZT\n 'country codes: %s' % tmp, UserWarning)\n"
],
[
"#le_data_all.pivot(index='country', columns='year')\nle_data = le_data_all.reset_index().pivot(index='country', \n columns='year')\n# examine pivoted data\nle_data.iloc[:5,0:3]",
"_____no_output_____"
],
[
"# ask what is the name of country for each year\n# with the least life expectancy\ncountry_with_least_expectancy = le_data.idxmin(axis=0)\ncountry_with_least_expectancy[:5]",
"_____no_output_____"
],
[
"# and what is the minimum life expectancy for each year\nexpectancy_for_least_country = le_data.min(axis=0)\nexpectancy_for_least_country[:5]",
"_____no_output_____"
],
[
"# this merges the two frames together and gives us\n# year, country and expectancy where there minimum exists\nleast = pd.DataFrame(\n data = {'Country': country_with_least_expectancy.values,\n 'Expectancy': expectancy_for_least_country.values},\n index = country_with_least_expectancy.index.levels[1])\nleast[:5]",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a832eb8887fa54d6c6dfcdac315e570f927f9e0
| 150,406 |
ipynb
|
Jupyter Notebook
|
06_Stats/Wind_Stats/Solutions.ipynb
|
coderhh/pandas_exercises
|
b4f10151d30a0d765d130b41a2d8050a09538c76
|
[
"BSD-3-Clause"
] | null | null | null |
06_Stats/Wind_Stats/Solutions.ipynb
|
coderhh/pandas_exercises
|
b4f10151d30a0d765d130b41a2d8050a09538c76
|
[
"BSD-3-Clause"
] | null | null | null |
06_Stats/Wind_Stats/Solutions.ipynb
|
coderhh/pandas_exercises
|
b4f10151d30a0d765d130b41a2d8050a09538c76
|
[
"BSD-3-Clause"
] | null | null | null | 37.733567 | 346 | 0.379466 |
[
[
[
"# Wind Statistics",
"_____no_output_____"
],
[
"### Introduction:\n\nThe data have been modified to contain some missing values, identified by NaN. \nUsing pandas should make this exercise\neasier, in particular for the bonus question.\n\nYou should be able to perform all of these operations without using\na for loop or other looping construct.\n\n\n1. The data in 'wind.data' has the following format:",
"_____no_output_____"
]
],
[
[
"\"\"\"\nYr Mo Dy RPT VAL ROS KIL SHA BIR DUB CLA MUL CLO BEL MAL\n61 1 1 15.04 14.96 13.17 9.29 NaN 9.87 13.67 10.25 10.83 12.58 18.50 15.04\n61 1 2 14.71 NaN 10.83 6.50 12.62 7.67 11.50 10.04 9.79 9.67 17.54 13.83\n61 1 3 18.50 16.88 12.33 10.13 11.17 6.17 11.25 NaN 8.50 7.67 12.75 12.71\n\"\"\"",
"_____no_output_____"
]
],
[
[
" The first three columns are year, month and day. The\n remaining 12 columns are average windspeeds in knots at 12\n locations in Ireland on that day. \n\n More information about the dataset go [here](wind.desc).",
"_____no_output_____"
],
[
"### Step 1. Import the necessary libraries",
"_____no_output_____"
],
[
"### Step 2. Import the dataset from this [address](https://github.com/guipsamora/pandas_exercises/blob/master/06_Stats/Wind_Stats/wind.data)",
"_____no_output_____"
],
[
"### Step 3. Assign it to a variable called data and replace the first 3 columns by a proper datetime index.",
"_____no_output_____"
],
[
"### Step 4. Year 2061? Do we really have data from this year? Create a function to fix it and apply it.",
"_____no_output_____"
],
[
"### Step 5. Set the right dates as the index. Pay attention at the data type, it should be datetime64[ns].",
"_____no_output_____"
],
[
"### Step 6. Compute how many values are missing for each location over the entire record. \n#### They should be ignored in all calculations below. ",
"_____no_output_____"
],
[
"### Step 7. Compute how many non-missing values there are in total.",
"_____no_output_____"
],
[
"### Step 8. Calculate the mean windspeeds of the windspeeds over all the locations and all the times.\n#### A single number for the entire dataset.",
"_____no_output_____"
],
[
"### Step 9. Create a DataFrame called loc_stats and calculate the min, max and mean windspeeds and standard deviations of the windspeeds at each location over all the days \n\n#### A different set of numbers for each location.",
"_____no_output_____"
],
[
"### Step 10. Create a DataFrame called day_stats and calculate the min, max and mean windspeed and standard deviations of the windspeeds across all the locations at each day.\n\n#### A different set of numbers for each day.",
"_____no_output_____"
],
[
"### Step 11. Find the average windspeed in January for each location. \n#### Treat January 1961 and January 1962 both as January.",
"_____no_output_____"
],
[
"### Step 12. Downsample the record to a yearly frequency for each location.",
"_____no_output_____"
],
[
"### Step 13. Downsample the record to a monthly frequency for each location.",
"_____no_output_____"
],
[
"### Step 14. Downsample the record to a weekly frequency for each location.",
"_____no_output_____"
],
[
"### Step 15. Calculate the min, max and mean windspeeds and standard deviations of the windspeeds across all locations for each week (assume that the first week starts on January 2 1961) for the first 52 weeks.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a8349a8cc85bacc1bc4692644f0faa9977ace9b
| 7,943 |
ipynb
|
Jupyter Notebook
|
SociologyEmbedding.ipynb
|
TahaVahedi/Sociology_wordEmbedding-persian-lang
|
b82a7638e1d04ee4c668c0566719290f9bff32d9
|
[
"MIT"
] | null | null | null |
SociologyEmbedding.ipynb
|
TahaVahedi/Sociology_wordEmbedding-persian-lang
|
b82a7638e1d04ee4c668c0566719290f9bff32d9
|
[
"MIT"
] | null | null | null |
SociologyEmbedding.ipynb
|
TahaVahedi/Sociology_wordEmbedding-persian-lang
|
b82a7638e1d04ee4c668c0566719290f9bff32d9
|
[
"MIT"
] | null | null | null | 30.906615 | 283 | 0.5017 |
[
[
[
"# install dependencies\n!git clone https://github.com/facebookresearch/fastText.git\n%cd fastText\n!pip install .\n%cd ..\n!pip install numpy",
"Cloning into 'fastText'...\nremote: Enumerating objects: 3854, done.\u001b[K\nremote: Total 3854 (delta 0), reused 0 (delta 0), pack-reused 3854\u001b[K\nReceiving objects: 100% (3854/3854), 8.22 MiB | 21.54 MiB/s, done.\nResolving deltas: 100% (2417/2417), done.\n/content/fastText\nProcessing /content/fastText\n\u001b[33m DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\u001b[0m\nCollecting pybind11>=2.2\n Using cached pybind11-2.8.1-py2.py3-none-any.whl (208 kB)\nRequirement already satisfied: setuptools>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from fasttext==0.9.2) (57.4.0)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from fasttext==0.9.2) (1.19.5)\nBuilding wheels for collected packages: fasttext\n Building wheel for fasttext (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for fasttext: filename=fasttext-0.9.2-cp37-cp37m-linux_x86_64.whl size=3116502 sha256=f04851944f9ad90730468ff495a1988887a3941e30c63c98d3f864c0a0011f94\n Stored in directory: /tmp/pip-ephem-wheel-cache-nylgklpd/wheels/22/04/6e/b3aba25c1a5845898b5871a0df37c2126cb0cc9326ad0c08e7\nSuccessfully built fasttext\nInstalling collected packages: pybind11, fasttext\nSuccessfully installed fasttext-0.9.2 pybind11-2.8.1\n/content\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (1.19.5)\n"
],
[
"import fasttext\nimport fasttext.util\nimport numpy as np",
"_____no_output_____"
],
[
"# download embedding model\n# fasttext.util.download_model('fa', if_exists='ignore') # its took long time so I decide to download it from my drive\n# https://drive.google.com/file/d/1GrJ_qknqfLQJ436zBt9dQwY-6WZWKpP9/view\n!gdown --id 1GrJ_qknqfLQJ436zBt9dQwY-6WZWKpP9",
"Downloading...\nFrom: https://drive.google.com/uc?id=1GrJ_qknqfLQJ436zBt9dQwY-6WZWKpP9\nTo: /content/cc.fa.300.bin\n100% 7.25G/7.25G [01:47<00:00, 67.5MB/s]\n"
]
],
[
[
"This model trained on persian wikipedia and some common words.",
"_____no_output_____"
]
],
[
[
"# load model\nmodel = fasttext.load_model('cc.fa.300.bin')",
"_____no_output_____"
],
[
"print(model.get_dimension()) # number of dimensions",
"300\n"
],
[
"def singleword2vec(wd):\n wd = wd.strip()\n vec = model.get_word_vector(wd)\n return vec",
"_____no_output_____"
],
[
"def difference(w1, w2):\n vec1 = np.array(singleword2vec(w1))\n vec2 = np.array(singleword2vec(w2))\n r = vec1 - vec2\n return np.linalg.norm(r)",
"_____no_output_____"
],
[
"# racism example\nnum1 = difference(\"سفید\",\"انسان\")\nnum2 = difference(\"سیاه\",\"انسان\")\ndif = np.absolute(num1 - num2)\nprint(\"white and human: \", num1)\nprint(\"black and human: \", num2)\nprint(\"diference(zero mean no racism): \", dif)",
"white and human: 1.2154062\nblack and human: 1.2712024\ndiference(zero mean no racism): 0.055796266\n"
],
[
"# sexism example\nnum1 = difference(\"مرد\",\"سنگ\")\nnum2 = difference(\"زن\",\"سنگ\")\ndif = np.absolute(num1 - num2)\nprint(\"white and human: \", num1)\nprint(\"black and human: \", num2)\nprint(\"diference(zero mean no sexism): \", dif)",
"white and human: 2.0089982\nblack and human: 3.1639984\ndiference(zero mean no sexism): 1.1550002\n"
],
[
"",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a834a7dee094585444a5fb380df68dfe974f715
| 700,718 |
ipynb
|
Jupyter Notebook
|
Data Visualization with Python/final_assessment_data_visual_python.ipynb
|
premalnayee/IBM-Data-science-coursera
|
09d611fa62e2abd55a9d2de40adae9b5024ec383
|
[
"MIT"
] | 2 |
2019-12-17T13:18:22.000Z
|
2020-05-19T14:03:37.000Z
|
Data Visualization with Python/final_assessment_data_visual_python.ipynb
|
premalnayee/IBM-Data-science-coursera
|
09d611fa62e2abd55a9d2de40adae9b5024ec383
|
[
"MIT"
] | null | null | null |
Data Visualization with Python/final_assessment_data_visual_python.ipynb
|
premalnayee/IBM-Data-science-coursera
|
09d611fa62e2abd55a9d2de40adae9b5024ec383
|
[
"MIT"
] | 2 |
2020-03-29T16:51:34.000Z
|
2021-10-04T07:56:07.000Z
| 1,018.485465 | 553,688 | 0.975452 |
[
[
[
"# Final assessment: Data Visualisations with Python",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl",
"_____no_output_____"
],
[
"mpl.style.use(['default']) # optional: for ggplot-like style ",
"_____no_output_____"
],
[
"url= 'https://cocl.us/datascience_survey_data'\ndf_survey = pd.read_csv(url,index_col=0)\ndf_survey",
"_____no_output_____"
],
[
"df_survey.sort_values(by='Very interested',ascending=False,inplace=True)\ndf_survey_tot = df_survey.sum(axis=1)\nprint(df_survey_tot)\n\nfor columns in df_survey.columns:\n df_survey[columns] = df_survey[columns]/22.33 # divide by the total number of people *100\ndf_survey = df_survey.round(decimals=2)\n\ndf_survey",
"Data Analysis / Statistics 2192\nMachine Learning 2180\nData Visualization 2176\nBig Data (Spark / Hadoop) 2188\nDeep Learning 2169\nData Journalism 2120\ndtype: int64\n"
],
[
"ax = df_survey.plot(kind='bar', \n figsize=(20,8),\n fontsize=14,\n color=['#5cb85c','#5bc0de','#d9534f'],\n width=0.8\n )\n\nfor p in ax.patches:\n ax.annotate('{}%'.format(p.get_height()),\n xy = (p.get_x(),p.get_height() + 1),\n fontsize=14)\n\nax.legend(fontsize=14)\nax.set_title('Percentage of Respondants\\' Interest in Data Science Areas',fontsize=16)\nplt.show()\n\n# autolabel(ax)\n#5cb85c for the Very interested bars, color #5bc0de for the Somewhat interested bars, and color #d9534f for the Not interested bars,",
"_____no_output_____"
]
],
[
[
"## Part II: Let's now deal with the next part of the assignment",
"_____no_output_____"
]
],
[
[
"url = 'https://cocl.us/sanfran_crime_dataset'\ndf_crime = pd.read_csv(url,index_col=0)\ndf_crime.head()",
"_____no_output_____"
],
[
"df_crime = pd.DataFrame(df_crime.groupby(['PdDistrict']).size())\ndf_crime.reset_index(inplace=True)\ndf_crime.columns = ['Neighbourhood','Number of crimes']",
"_____no_output_____"
],
[
"df_crime",
"_____no_output_____"
]
],
[
[
"### Cholorpleth map",
"_____no_output_____"
]
],
[
[
"import folium",
"_____no_output_____"
],
[
"san_geo = r'san_francisco_geo.json' # geojson file\n\n# create a plain world map\nsan_map = folium.Map(location=[37.7749, -122.4194], zoom_start=12)",
"_____no_output_____"
],
[
"# generate choropleth map using the total immigration of each country to Canada from 1980 to 2013\nsan_map.choropleth(\n geo_data=san_geo,\n data=df_crime,\n columns=['Neighbourhood', 'Number of crimes'],\n key_on='feature.properties.DISTRICT',\n fill_color='YlOrRd', \n fill_opacity=0.7, \n line_opacity=0.2,\n legend_name='Immigration to Canada',\n)\n\n# display map\nsan_map",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a83514dc7ad93076043e5fa99126e8234582a03
| 3,550 |
ipynb
|
Jupyter Notebook
|
UNetMain_Notebook.ipynb
|
maxgraf96/DLAM_Assignment
|
cbd2ce1fbc39c187ff2b1a4259a36559dd50e772
|
[
"MIT"
] | 2 |
2020-09-19T08:17:46.000Z
|
2021-07-16T08:25:57.000Z
|
UNetMain_Notebook.ipynb
|
maxgraf96/DLAM_Assignment
|
cbd2ce1fbc39c187ff2b1a4259a36559dd50e772
|
[
"MIT"
] | null | null | null |
UNetMain_Notebook.ipynb
|
maxgraf96/DLAM_Assignment
|
cbd2ce1fbc39c187ff2b1a4259a36559dd50e772
|
[
"MIT"
] | null | null | null | 31.415929 | 142 | 0.568732 |
[
[
[
"# Train model / import pretrained model\n%run UNetMain.py",
"_____no_output_____"
],
[
"import librosa\nimport IPython.display as ipd\nimport numpy as np\nfrom UNet import generate_sample\nfrom Util import map_to_range\nfrom DatasetCreator import create_spectrogram\nfrom Hyperparameters import sep, sample_rate, hop_size, n_fft, top_db\n\nfrom Util import map_to_range, plot_mel\n\n# Helper method to put an unknown *.wav file through the model\ndef gen_unknown(path):\n spec = create_spectrogram(path)\n # Expand dimensions by 2\n unet_input = np.expand_dims(spec, axis=0)\n unet_input = np.expand_dims(unet_input, axis=0)\n \n output = generate_sample(model, unet_input).cpu().numpy()[0][0]\n db = map_to_range(output, 0, 1, -top_db, 0)\n print(\"Final output\")\n plot_mel(db)\n \n # convert back to *.wav\n power = librosa.db_to_power(db)\n sig = librosa.feature.inverse.mel_to_audio(power, sample_rate, n_fft, hop_size, n_fft)\n return sig\n \nsig = gen_unknown(\"Clair_de_lune__Claude_Debussy__Suite_bergamasque.wav\")\nipd.Audio(sig, rate=sample_rate)",
"_____no_output_____"
],
[
"# Helper method to put a *.wav file from the training/validation dataset through the model\n# Note: This requires that the autoencoder was trained before \ndef gen(path):\n filename = str(path).split(sep)[-1][:-4]\n if not os.path.exists(\"data\" + sep + \"generated\"):\n print(\"'data/generated' folder does not exist. Train autoencoder first.\")\n return\n ground_truth = np.load(\"data\" + sep + \"generated\" + sep + filename + sep + \"synth\" + sep + filename + \"_0_synth_mel.npy\")\n ae_output = np.load(\"data\" + sep + \"ae_output\" + sep + filename + \"_output.npy\")\n gen = generate(model, ae_output, ground_truth, plot_original=True)\n \n # Map to dB\n gen = map_to_range(gen, 0, 1, -top_db, 0)\n # Convert back to power\n power = librosa.db_to_power(gen)\n sig = librosa.feature.inverse.mel_to_audio(power, sample_rate, n_fft, hop_size, n_fft)\n return librosa.util.normalize(sig)\n \n# Put a file through the U-Net model for testing\ntest = gen(\"data\" + sep + \"synth\" + sep + \"chpn_op7_1.wav\")\nipd.Audio(test, rate=sample_rate)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code"
]
] |
4a8371b8df98a5ffbd0cb12d0528d796c4d98044
| 55,917 |
ipynb
|
Jupyter Notebook
|
Transformer Models/MPNet.ipynb
|
04mayukh/Comparison-of-Neutrosophic-Approach-to-various-Deep-Learning-Models-for-Sentiment-Analysis
|
763641a2987d0b9f81e45b21e46874477fce5ddb
|
[
"MIT"
] | null | null | null |
Transformer Models/MPNet.ipynb
|
04mayukh/Comparison-of-Neutrosophic-Approach-to-various-Deep-Learning-Models-for-Sentiment-Analysis
|
763641a2987d0b9f81e45b21e46874477fce5ddb
|
[
"MIT"
] | null | null | null |
Transformer Models/MPNet.ipynb
|
04mayukh/Comparison-of-Neutrosophic-Approach-to-various-Deep-Learning-Models-for-Sentiment-Analysis
|
763641a2987d0b9f81e45b21e46874477fce5ddb
|
[
"MIT"
] | null | null | null | 29.461012 | 250 | 0.472254 |
[
[
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"_____no_output_____"
],
[
"pip install keras-self-attention",
"_____no_output_____"
],
[
"!pip install emoji",
"_____no_output_____"
],
[
"!pip install ekphrasis",
"_____no_output_____"
],
[
"!pip install transformers==4.2.1",
"_____no_output_____"
],
[
"import numpy as np\nimport pandas as pd\nimport string\nfrom nltk.corpus import stopwords\nimport re\nimport os\nfrom collections import Counter\n\nfrom ekphrasis.classes.preprocessor import TextPreProcessor\nfrom ekphrasis.classes.tokenizer import SocialTokenizer\nfrom ekphrasis.dicts.emoticons import emoticons",
"_____no_output_____"
],
[
"text_processor = TextPreProcessor(\n # terms that will be normalized\n normalize=['url', 'email', 'percent', 'money', 'phone', 'user',\n 'time', 'url', 'date', 'number'],\n # terms that will be annotated\n annotate={\"hashtag\", \"allcaps\", \"elongated\", \"repeated\",\n 'emphasis', 'censored'},\n fix_html=True, # fix HTML tokens\n \n # corpus from which the word statistics are going to be used \n # for word segmentation \n segmenter=\"twitter\", \n \n # corpus from which the word statistics are going to be used \n # for spell correction\n corrector=\"twitter\", \n \n unpack_hashtags=True, # perform word segmentation on hashtags\n unpack_contractions=True, # Unpack contractions (can't -> can not)\n spell_correct_elong=True, # spell correction for elongated words\n \n # select a tokenizer. You can use SocialTokenizer, or pass your own\n # the tokenizer, should take as input a string and return a list of tokens\n tokenizer=SocialTokenizer(lowercase=True).tokenize,\n \n # list of dictionaries, for replacing tokens extracted from the text,\n # with other expressions. You can pass more than one dictionaries.\n dicts=[emoticons]\n)",
"_____no_output_____"
],
[
"def print_text(texts,i,j):\n for u in range(i,j):\n print(texts[u])\n print()",
"_____no_output_____"
],
[
"df_1 = pd.read_csv('/content/drive/My Drive/Semeval 2017/twitter-2016train-A.txt', delimiter='\\t', encoding='utf-8', header=None)\n# print(df_1.head(5)) #last N rows\n# print(len(df_1))\n\ndf_2 = pd.read_csv('/content/drive/My Drive/Semeval 2017/twitter-2016test-A.txt', delimiter='\\t', encoding='utf-8', header=None)\n# print(df_2.head(5)) #last N rows\n# print(len(df_2))\n\ndf_3 = pd.read_csv('/content/drive/My Drive/Semeval 2017/twitter-2016devtest-A.txt', delimiter='\\t', encoding='utf-8', header=None)\n# print(df_3.head(5)) #last N rows\n# print(len(df_3))\n\ndf_4 = pd.read_csv('/content/drive/My Drive/Semeval 2017/twitter-2016dev-A.txt', delimiter='\\t', encoding='utf-8', header=None)\n# print(df_4.head(5)) #last N rows\n# print(len(df_4))\n\ndf_5 = pd.read_csv('/content/drive/My Drive/Semeval 2017/twitter-2015train-A.txt', delimiter='\\t', encoding='utf-8', header=None)\n# print(df_5.head(5)) #last N rows\n# print(len(df_5))\n\ndf_6 = pd.read_csv('/content/drive/My Drive/Semeval 2017/twitter-2015test-A.txt', delimiter='\\t', encoding='utf-8', header=None)\n# print(df_6.head(5)) #last N rows\n# print(len(df_6))\n\ndf_7 = pd.read_csv('/content/drive/My Drive/Semeval 2017/twitter-2014test-A.txt', delimiter='\\t', encoding='utf-8', header=None)\n# print(df_7.head(5)) #last N rows\n# print(len(df_7))\n\ndf_8 = pd.read_csv('/content/drive/My Drive/Semeval 2017/twitter-2014sarcasm-A.txt', delimiter='\\t', encoding='utf-8', header=None)\n# print(df_8.head(5)) #last N rows\n# print(len(df_8))\n\ndf_9 = pd.read_csv('/content/drive/My Drive/Semeval 2017/twitter-2013train-A.txt', delimiter='\\t', encoding='utf-8', header=None)\n# print(df_9.head(5)) #last N rows\n# print(len(df_9))\n\ndf_10 = pd.read_csv('/content/drive/My Drive/Semeval 2017/twitter-2013test-A.txt', delimiter='\\t', encoding='utf-8', header=None)\n# print(df_10.head(5)) #last N rows\n# print(len(df_10))\n\ndf_11 = pd.read_csv('/content/drive/My Drive/Semeval 2017/twitter-2013dev-A.txt', delimiter='\\t', encoding='utf-8', header=None)\n# print(df_11.head(5)) #last N rows\n# print(len(df_11))\n",
"_____no_output_____"
]
],
[
[
"<h2>Balancing the data</h2>",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame()\ndf = df.append(df_1, ignore_index = True)\ndf = df.append(df_2, ignore_index = True)\ndf = df.append(df_3, ignore_index = True)\ndf = df.append(df_4, ignore_index = True)\n\ndf = df.append(df_5, ignore_index = True)\ndf = df.append(df_6, ignore_index = True)\ndf = df.append(df_7, ignore_index = True)\ndf = df.append(df_8, ignore_index = True)\n\ndf = df.append(df_9, ignore_index = True)\ndf = df.append(df_10, ignore_index = True)\ndf = df.append(df_11, ignore_index = True)\n\nprint(df.head(5))\nprint(len(df))",
"_____no_output_____"
],
[
"# Testing for null values\n# lol = np.asarray(df_[1].isnull())\n\n# for i in range(0,len(lol)):\n# if lol[i]:\n# print(i)",
"_____no_output_____"
],
[
"print(len(df))",
"_____no_output_____"
],
[
"text_array = df[2]\nlabels = df[1]\nprint(\"Length of training data: \",len(text_array))\nprint_text(text_array,0,10)",
"_____no_output_____"
],
[
"df_val = pd.read_csv('/content/drive/My Drive/Semeval 2017/Test/SemEval2017-task4-test.subtask-A.english.txt', delimiter='\\n', encoding='utf-8', header=None)\nprint(df_val.tail(5)) #last N rows\nprint(len(df_val))",
"_____no_output_____"
],
[
"lol = []\ntest_set = np.asarray(df_val[0])\nfor i in range(0,len(df_val)):\n temp = np.asarray(test_set[i].split(\"\\t\"))\n temp = temp.reshape((3))\n lol.append(temp)",
"_____no_output_____"
],
[
"df_val = pd.DataFrame(lol)\ndf_val.head(5)",
"_____no_output_____"
],
[
"text_array_val = df_val[2]\nlabels_val = df_val[1]\nprint(\"Length of validation data: \",len(text_array_val))\nprint_text(text_array_val,0,10)",
"_____no_output_____"
],
[
"print(Counter(labels))\nprint(Counter(labels_val))",
"_____no_output_____"
],
[
"#removing website names\ndef remove_website(text):\n return \" \".join([word if re.search(\"r'https?://\\S+|www\\.\\S+'|((?i).com$|.co|.net)\",word)==None else \"\" for word in text.split(\" \") ])\n\n# Training set \ntext_array = text_array.apply(lambda text: remove_website(text))\nprint_text(text_array,0,10)\n\nprint(\"**************************************************************************\")\n\n# Validation set \ntext_array_val = text_array_val.apply(lambda text: remove_website(text))\nprint_text(text_array_val,0,10)",
"_____no_output_____"
],
[
"# Functions for chat word conversion\nf = open(\"/content/drive/My Drive/Semeval 2017/slang.txt\", \"r\")\nchat_words_str = f.read()\nchat_words_map_dict = {}\nchat_words_list = []\n\nfor line in chat_words_str.split(\"\\n\"):\n if line != \"\":\n cw = line.split(\"=\")[0]\n cw_expanded = line.split(\"=\")[1]\n chat_words_list.append(cw)\n chat_words_map_dict[cw] = cw_expanded\nchat_words_list = set(chat_words_list)\n\ndef chat_words_conversion(text):\n new_text = []\n for w in text.split():\n if w.upper() in chat_words_list:\n new_text.append(chat_words_map_dict[w.upper()])\n else:\n new_text.append(w)\n return \" \".join(new_text)",
"_____no_output_____"
],
[
"# Chat word conversion\n# Training set\ntext_array = text_array.apply(lambda text: chat_words_conversion(text))\nprint_text(text_array,0,10)\n\nprint(\"********************************************************************************\")\n\n# Validation set\ntext_array_val = text_array_val.apply(lambda text: chat_words_conversion(text))\nprint_text(text_array_val,0,10)",
"_____no_output_____"
],
[
"os.chdir(\"/content/drive/My Drive/Semeval 2017\")",
"_____no_output_____"
],
[
"#Function for emoticon conversion\nfrom emoticons import EMOTICONS\n\ndef convert_emoticons(text):\n for emot in EMOTICONS:\n text = re.sub(u'('+emot+')', \" \".join(EMOTICONS[emot].replace(\",\",\"\").split()), text)\n return text\n\n\n#testing the emoticon function\ntext = \"Hello :-) :-)\"\ntext = convert_emoticons(text)\nprint(text + \"\\n\")",
"_____no_output_____"
],
[
"# Emoticon conversion\n# Training set\ntext_array = text_array.apply(lambda text: convert_emoticons(text))\nprint_text(text_array,0,10)\n\nprint(\"**********************************************************************************\")\n\n# Validation set\ntext_array_val = text_array_val.apply(lambda text: convert_emoticons(text))\nprint_text(text_array_val,0,10)",
"_____no_output_____"
],
[
"os.chdir(\"/content\")",
"_____no_output_____"
],
[
"# FUnction for removal of emoji\nimport emoji\n\ndef convert_emojis(text):\n text = emoji.demojize(text, delimiters=(\" \", \" \"))\n text = re.sub(\"_|-\",\" \",text)\n return text\n\n# Training set\ntext_array = text_array.apply(lambda text: convert_emojis(text))\nprint_text(text_array,0,10)\n\nprint(\"**************************************************************************\")\n\n# Validation set\ntext_array_val = text_array_val.apply(lambda text: convert_emojis(text))\nprint_text(text_array_val,0,10)",
"_____no_output_____"
],
[
"# Ekphrasis pipe for text pre-processing\ndef ekphrasis_pipe(sentence):\n cleaned_sentence = \" \".join(text_processor.pre_process_doc(sentence))\n return cleaned_sentence\n\n# Training set\ntext_array = text_array.apply(lambda text: ekphrasis_pipe(text))\nprint(\"Training set completed.......\")\n#Validation set\ntext_array_val = text_array_val.apply(lambda text: ekphrasis_pipe(text))\nprint(\"Test set completed.......\")",
"_____no_output_____"
],
[
"print_text(text_array,0,10)\nprint(\"************************************************************************\")\nprint_text(text_array_val,0,10)",
"_____no_output_____"
],
[
"# Removing unnecessary punctuations\nPUNCT_TO_REMOVE = \"\\\"$%&'()+,-./;=[\\]^_`{|}~\"\ndef remove_punctuation(text):\n return text.translate(str.maketrans('', '', PUNCT_TO_REMOVE))\n\n# Training set\ntext_array = text_array.apply(lambda text: remove_punctuation(text))\nprint_text(text_array,0,10)\n\nprint(\"********************************************************************\")\n\n# Validation set\ntext_array_val = text_array_val.apply(lambda text: remove_punctuation(text))\nprint_text(text_array_val,0,10)",
"_____no_output_____"
],
[
"# Finding length of longest array\nmaxLen = len(max(text_array,key = lambda text: len(text.split(\" \"))).split(\" \"))\nprint(maxLen)",
"_____no_output_____"
],
[
"u = lambda text: len(text.split(\" \"))\nsentence_lengths = []\nfor x in text_array:\n sentence_lengths.append(u(x))\nprint(sorted(sentence_lengths)[-800:])\nprint(len(sentence_lengths))",
"_____no_output_____"
],
[
"# Count of each label in dataset\nfrom collections import Counter\n\n# Printing training set counts for analysis\nprint(\"Elements: \",set(labels))\nprint(\"Length: \",len(labels))\nprint(Counter(labels))\n\nprint(\"**************************************************************************\")\n\n# Printing validation set counts for analysis\nprint(\"Elements: \",set(labels_val))\nprint(\"Length: \",len(labels_val))\nprint(Counter(labels_val))",
"_____no_output_____"
],
[
"Y = []\nY_val = []\n\n# Training set \nfor i in range(0,len(labels)):\n if(labels[i] == 'neutral'):\n Y.append(0)\n if(labels[i] == 'positive'):\n Y.append(1)\n if(labels[i] == 'negative'):\n Y.append(2)\n\n\n# Validation set\nfor i in range(0,len(labels_val)):\n if(labels_val[i] == 'neutral'):\n Y_val.append(0)\n if(labels_val[i] == 'positive'):\n Y_val.append(1)\n if(labels_val[i] == 'negative'):\n Y_val.append(2)",
"_____no_output_____"
],
[
"print(len(Y),len(Y_val))",
"_____no_output_____"
],
[
"print(Counter(Y))\nprint(Counter(Y_val))",
"_____no_output_____"
],
[
"# Testing the conversion into integers\nfor i in range(310,320):\n print(text_array_val[i])\n print(labels_val[i],Y_val[i])",
"_____no_output_____"
],
[
"# Verifying train set \nX = np.asarray(list(text_array))\nY = np.asarray(list(Y))\nlabels = np.asarray(list(labels))\nprint(type(X))\nprint(type(Y))\nprint(type(labels))\nprint(np.shape(X),np.shape(Y),np.shape(labels))\n\n# Verifying validation set\nX_val = np.asarray(list(text_array_val))\nY_val = np.asarray(list(Y_val))\nlabels_val = np.asarray(list(labels_val))\nprint(type(X_val))\nprint(type(Y_val))\nprint(type(labels_val))\nprint(np.shape(X_val),np.shape(Y_val),np.shape(labels_val))",
"_____no_output_____"
],
[
"index = 824\nprint(X[index])\nprint(labels[index])\nprint(Y[index])",
"_____no_output_____"
],
[
"print(type(X))\nprint(type(Y))\nprint(np.shape(X),np.shape(Y),np.shape(labels))\nprint(np.shape(X_val),np.shape(Y_val),np.shape(labels_val))",
"_____no_output_____"
],
[
"# Converting to one hot vectors\ndef convert_to_one_hot(Y, C):\n Y = np.eye(C)[Y.reshape(-1)] #u[Y] helps to index each element of Y index at u. U here is a class array\n return Y",
"_____no_output_____"
],
[
"Y_oh_train = convert_to_one_hot(np.array(Y), C = 3)\nY_oh_val = convert_to_one_hot(np.array(Y_val), C = 3)\n\nprint(np.shape(Y_oh_train))\nindex = 310\nprint(labels[index], Y[index], \"is converted into one hot\", Y_oh_train[index])",
"_____no_output_____"
]
],
[
[
"\n\n<h2>Tensorflow Model</h2>",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nimport string\r\nfrom nltk.corpus import stopwords\r\nimport re\r\nimport os\r\nfrom collections import Counter",
"_____no_output_____"
],
[
"from transformers import RobertaTokenizerFast, TFRobertaModel, TFBertModel, BertTokenizerFast, ElectraTokenizerFast, TFElectraModel, AlbertTokenizerFast, TFAlbertModel, XLNetTokenizerFast, TFXLNetModel, MPNetTokenizerFast, TFMPNetModel\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow.keras import backend as K\r\nfrom tensorflow.keras.callbacks import ModelCheckpoint\r\n\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import f1_score\r\nfrom keras_self_attention import SeqSelfAttention",
"_____no_output_____"
],
[
"print(tf.__version__)",
"_____no_output_____"
],
[
"\r\nresolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])\r\n\r\ntf.config.experimental_connect_to_cluster(resolver)\r\ntf.tpu.experimental.initialize_tpu_system(resolver)\r\nprint(\"All devices: \", tf.config.list_logical_devices('TPU'))",
"_____no_output_____"
],
[
"tokenizer = MPNetTokenizerFast.from_pretrained(\"microsoft/mpnet-base\")",
"_____no_output_____"
],
[
"X = list(X)\r\nX_val = list(X_val)",
"_____no_output_____"
],
[
"train_encodings = tokenizer(X, max_length=80, truncation=True, padding=\"max_length\", return_tensors='tf')\r\nval_encodings = tokenizer(X_val, max_length=80, truncation=True, padding=\"max_length\", return_tensors='tf')",
"_____no_output_____"
],
[
"print(np.shape(train_encodings[\"input_ids\"]))\r\nprint(np.shape(val_encodings[\"input_ids\"]))",
"_____no_output_____"
],
[
"print(train_encodings[\"input_ids\"][0])\r\nprint(\"***************************************************************************\")\r\nprint(val_encodings[\"input_ids\"][0])",
"_____no_output_____"
],
[
"# This is the best model\ndef Offense_classifier(input_shape):\n \"\"\"\n Function creating the Emojify-v2 model's graph.\n \n Arguments:\n input_shape -- shape of the input, usually (max_len,)\n word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation\n word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words)\n\n Returns:\n model -- a model instance in Keras\n \"\"\"\n\n model = TFMPNetModel.from_pretrained('microsoft/mpnet-base')\n layer = model.layers[0]\n\n # Define sentence_indices as the input of the graph, it should be of shape input_shape and dtype 'int32' (as it contains indices).\n inputs = keras.Input(shape=input_shape, dtype='int32')\n input_masks = keras.Input(shape=input_shape, dtype='int32')\n \n embeddings = layer([inputs, input_masks])[0][:,0,:]\n \n # embeddings = keras.layers.GaussianNoise(0.2)(embeddings)\n\n # embeddings = keras.layers.Dropout(0.3)(embeddings)\n\n # Propagate the embeddings through an LSTM layer with 128-dimensional hidden state\n # Be careful, the returned output should be a batch of sequences.\n # lstm_one = keras.layers.Bidirectional(keras.layers.LSTM(150, return_sequences=True, recurrent_dropout=0.25, dropout=0.2)) \n # X = lstm_one(embeddings)\n # X = keras.layers.Dropout(0.2)(X)\n\n # lstm_two = keras.layers.Bidirectional(keras.layers.LSTM(150, return_sequences=True, recurrent_dropout=0.25, dropout=0.2)) \n # X = lstm_two(X)\n # X = keras.layers.Dropout(0.2)(X)\n\n # # *************Attention*******************\n # X = SeqSelfAttention(attention_activation='elu')(X)\n # # ****************Attention*******************\n\n # post_activation_GRU_cell = keras.layers.GRU(64, return_sequences = False, recurrent_dropout=0.25, dropout=0.2)\n # X = post_activation_GRU_cell(X)\n\n X = keras.layers.Dense(32,activation='elu',kernel_regularizer=keras.regularizers.l2(0.0001))(embeddings)\n\n X = keras.layers.BatchNormalization(momentum=0.99, epsilon=0.001, center=True, scale=True)(X)\n\n X = keras.layers.Dense(3,activation='tanh',kernel_regularizer=keras.regularizers.l2(0.0001))(X)\n\n \n # Add a sigmoid activation\n X = keras.layers.Activation('softmax')(X)\n \n # Create Model instance which converts sentence_indices into X.\n model = keras.Model(inputs=[inputs,input_masks], outputs=[X])\n \n \n return model",
"_____no_output_____"
],
[
"model = Offense_classifier((80,))\r\nmodel.summary()",
"_____no_output_____"
],
[
"strategy = tf.distribute.TPUStrategy(resolver)",
"_____no_output_____"
],
[
"class EvaluationMetric(keras.callbacks.Callback): \r\n \r\n def __init__(self, trial_encodings, trial_masks, Y_val):\r\n super(EvaluationMetric, self).__init__()\r\n self.trial_encodings = trial_encodings\r\n self.trial_masks = trial_masks\r\n self.Y_val = Y_val\r\n \r\n def on_epoch_begin(self, epoch, logs={}):\r\n print(\"\\nTraining...\")\r\n\r\n def on_epoch_end(self, epoch, logs={}):\r\n print(\"\\nEvaluating...\")\r\n trial_prediction = self.model.predict([self.trial_encodings,self.trial_masks])\r\n \r\n pred = []\r\n for i in range(0,len(self.Y_val)):\r\n num = np.argmax(trial_prediction[i])\r\n pred.append(num)\r\n \r\n from sklearn.metrics import classification_report\r\n print(classification_report(Y_val, pred, digits=3))\r\n \r\nevaluation_metric = EvaluationMetric(val_encodings[\"input_ids\"], val_encodings[\"attention_mask\"], Y_val)",
"_____no_output_____"
],
[
"with strategy.scope():\r\n model = Offense_classifier((80,))\r\n optimizer = keras.optimizers.Adam(learning_rate=1e-5)\r\n loss_fun = [\r\n tf.keras.losses.CategoricalCrossentropy(from_logits=True)\r\n ]\r\n metric = ['acc']\r\n model.compile(optimizer=optimizer, loss=loss_fun, metrics=metric)",
"_____no_output_____"
],
[
"model.summary()",
"_____no_output_____"
],
[
"checkpoint = ModelCheckpoint(filepath='/content/neutro-mpnet.{epoch:03d}.h5',\n verbose = 0,\n save_weights_only=True,\n epoch=4)",
"_____no_output_____"
],
[
"c = Counter(Y)\nprint(c)\nprint(c.keys())\nneutral = c[0]\npos = c[1]\nneg = c[2]\ntotal = pos+neg+neutral\nprint(neutral,pos,neg,total)",
"_____no_output_____"
],
[
"# Scaling by total/2 helps keep the loss to a similar magnitude.\n# The sum of the weights of all examples stays the same.\nmaxi = max(pos,neg,neutral)\nweight_for_0 = (maxi / (maxi+neutral))\nweight_for_1 = (maxi / (maxi+pos))\nweight_for_2 = (maxi / (maxi+neg))\n\nclass_weight_ = {0: weight_for_0, 1: weight_for_1, 2: weight_for_2}\n\nprint('Weight for class 0: {:.2f}'.format(weight_for_0))\nprint('Weight for class 1: {:.2f}'.format(weight_for_1))\nprint('Weight for class 2: {:.2f}'.format(weight_for_2))",
"_____no_output_____"
],
[
"history = model.fit(\r\n x = [train_encodings[\"input_ids\"], train_encodings[\"attention_mask\"]],\r\n y = Y_oh_train,\r\n validation_data = ([val_encodings[\"input_ids\"],val_encodings[\"attention_mask\"]],Y_oh_val),\r\n callbacks = [evaluation_metric, checkpoint],\r\n batch_size = 32,\r\n shuffle=True,\r\n epochs=6,\r\n class_weight = class_weight_\r\n)",
"_____no_output_____"
],
[
"# plot_model(model, to_file=\"model.png\", show_shapes=True, show_layer_names=False)",
"_____no_output_____"
],
[
"model.load_weights(\"/content/drive/MyDrive/semeval 17 transformer weights/neutro-mpnet.004.h5\")\n# model.save_weights(\"/content/drive/MyDrive/semeval 17 transformer weights/neutro-mpnet.004.h5\")",
"_____no_output_____"
],
[
"answer = model.predict([val_encodings[\"input_ids\"],val_encodings[\"attention_mask\"]])",
"_____no_output_____"
],
[
"print(X_val[0])\nprint(Y_oh_val[0])\nprint(labels_val[0])\nprint(\"******************************************\")\nprint(len(answer),len(answer))",
"_____no_output_____"
],
[
"Counter(Y_val)",
"_____no_output_____"
],
[
"# used for querying\ncount_sl = 0\ncount_pos = 0\ncount_not = 0\npred = []\ntext = df_val[2]\n\ntemp = 0\nfor i in range(0,len(X_val)):\n num = np.argmax(answer[i])\n pred.append(num)\n\nprint(temp)",
"0\n"
],
[
"Counter(pred)",
"_____no_output_____"
],
[
"Counter(Y_val)",
"_____no_output_____"
],
[
"con_mat = tf.math.confusion_matrix(labels=Y_val, predictions=pred, dtype=tf.dtypes.int32)\nprint(con_mat)",
"_____no_output_____"
],
[
"import seaborn as sns\nimport matplotlib.pyplot as plt\n\nfigure = plt.figure(figsize=(8, 8))\nsns.heatmap(con_mat, annot=True,cmap=plt.cm.Spectral,fmt='d',xticklabels=[\"Neutral\",\"Positive\",\"Negative\"], yticklabels=[\"Neutral\",\"Positive\",\"Negative\"])\nplt.tight_layout()\nplt.ylabel('True label')\nplt.xlabel('Predicted label')\nplt.show()",
"_____no_output_____"
],
[
"from sklearn.metrics import f1_score\nf1_score(Y_val, pred, average='macro')",
"_____no_output_____"
],
[
"from sklearn.metrics import recall_score\nrecall_score(Y_val, pred, average='macro')",
"_____no_output_____"
],
[
"from sklearn.metrics import classification_report\ntarget_names = ['Neutral', 'Positive', 'Negative']\nprint(classification_report(Y_val, pred, digits=3))",
"_____no_output_____"
],
[
"from sklearn.metrics import accuracy_score\naccuracy_score(Y_val, pred, normalize=True)",
"_____no_output_____"
]
],
[
[
"<h3>Clustering</h3>",
"_____no_output_____"
]
],
[
[
"pip install plotly==4.5.4",
"_____no_output_____"
],
[
"import plotly\nimport plotly.graph_objs as go\nimport plotly.express as px",
"_____no_output_____"
],
[
"flag = []\ncount = 0\n\npositive = []\nnegative = []\nneutral = []\n\n\nfor i in range(0,len(pred)):\n count = count + 1\n neutral.append(answer[i][0])\n positive.append(answer[i][1])\n negative.append(answer[i][2])\n\n\nprint(count)",
"_____no_output_____"
],
[
"pred_colour = []\nfor i in range(0,len(pred)):\n if pred[i] == 0:\n pred_colour.append(\"Neutral\")\n if pred[i] == 1:\n pred_colour.append(\"Positive\")\n if pred[i] == 2:\n pred_colour.append(\"Negative\")\n\ntest_df = pd.DataFrame({'positive':positive, 'negative':negative, 'neutral':neutral, 'Prediction':pred_colour})\n\nfig = px.scatter_3d(test_df, x='positive', y='negative', z='neutral', color='Prediction')\nfig.update_traces(\n marker={\n 'size': 0.7,\n 'opacity': 1,\n 'colorscale' : 'viridis',\n }\n)\nfig.update_layout(legend= {'itemsizing': 'constant'})\nfig.update_layout(width = 700)\nfig.update_layout(margin=dict(l=0, r=0, b=0, t=0))",
"_____no_output_____"
],
[
"from sklearn.preprocessing import normalize\nfrom sklearn.cluster import KMeans",
"_____no_output_____"
],
[
"from sklearn.metrics.pairwise import cosine_similarity\nfrom scipy.spatial.distance import cosine",
"_____no_output_____"
]
],
[
[
"<h5>SVNS</h5>",
"_____no_output_____"
],
[
"<h3>Middle Layer</h3>",
"_____no_output_____"
]
],
[
[
"model.layers[-3]",
"_____no_output_____"
],
[
"with strategy.scope():\n cl_model = keras.Model(model.input, model.layers[-3].output)",
"_____no_output_____"
],
[
"cl_32 = cl_model.predict([val_encodings[\"input_ids\"],val_encodings[\"attention_mask\"]])",
"_____no_output_____"
],
[
"kmeans = KMeans(n_clusters=3, random_state=4).fit(cl_32)\ny_kmeans_batchnorm = kmeans.predict(cl_32)",
"_____no_output_____"
],
[
"for i in range(0,len(y_kmeans_batchnorm)):\n if(y_kmeans_batchnorm[i] == 0):\n y_kmeans_batchnorm[i] = 1\n elif(y_kmeans_batchnorm[i] == 1):\n y_kmeans_batchnorm[i] = 2\n else:\n y_kmeans_batchnorm[i] = 0",
"_____no_output_____"
],
[
"centers_batchnorm = kmeans.cluster_centers_",
"_____no_output_____"
],
[
"con_mat = tf.math.confusion_matrix(labels=Y_val, predictions=y_kmeans_batchnorm)\nprint(con_mat)",
"_____no_output_____"
],
[
"from sklearn.metrics import classification_report\ntarget_names = ['Neutral', 'Positive', 'Negative']\nprint(classification_report(Y_val, y_kmeans_batchnorm, digits=3, target_names=target_names))",
"_____no_output_____"
],
[
"import seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\nfigure = plt.figure(figsize=(8, 8))\r\nsns.heatmap(con_mat, annot=True,cmap=plt.cm.Spectral,fmt='d',xticklabels=[\"Neutral\",\"Positive\",\"Negative\"], yticklabels=[\"Neutral\",\"Positive\",\"Negative\"])\r\nplt.tight_layout()\r\nplt.ylabel('True label')\r\nplt.xlabel('Predicted label')\r\nplt.show()",
"_____no_output_____"
],
[
"svns_neu_bn = []\nfor i in range(0,len(Y_val)):\n neu = cosine(cl_32[i], centers_batchnorm[2])/2\n svns_neu_bn.append(1-neu)\nprint(len(svns_neu_bn))",
"_____no_output_____"
],
[
"svns_pos_bn = []\nfor i in range(0,len(Y_val)):\n pos = cosine(cl_32[i], centers_batchnorm[0])/2\n svns_pos_bn.append(1-pos)\nprint(len(svns_pos_bn))",
"_____no_output_____"
],
[
"svns_neg_bn = []\nfor i in range(0,len(Y_val)):\n neg = cosine(cl_32[i], centers_batchnorm[1])/2\n svns_neg_bn.append(1-neg)\nprint(len(svns_neg_bn))",
"_____no_output_____"
],
[
"pred_colour = []\nfor i in range(0,len(pred)):\n if y_kmeans_batchnorm[i] == 0:\n pred_colour.append(\"Neutral\")\n if y_kmeans_batchnorm[i] == 1:\n pred_colour.append(\"Positive\")\n if y_kmeans_batchnorm[i] == 2:\n pred_colour.append(\"Negative\")\n\ntest_df = pd.DataFrame({'SVNS Positive':svns_pos_bn, 'SVNS Negative':svns_neg_bn, 'SVNS Neutral':svns_neu_bn, 'Labels:':pred_colour})\n\nfig = px.scatter_3d(test_df, x='SVNS Positive', y='SVNS Negative', z='SVNS Neutral', color='Labels:')\nfig.update_traces(\n marker={\n 'size': 1,\n 'opacity': 1,\n 'colorscale' : 'viridis',\n }\n)\nfig.update_layout(legend= {'itemsizing': 'constant'})\nfig.update_layout(width = 850, height = 750)\nfig.update_layout(margin=dict(l=0, r=0, b=0, t=0))",
"_____no_output_____"
]
],
[
[
"<h3>GRU</h3>",
"_____no_output_____"
]
],
[
[
"model.layers[-5]",
"_____no_output_____"
],
[
"with strategy.scope():\n cl_model = keras.Model(model.input, (model.layers[-5].output))",
"_____no_output_____"
],
[
"cl_32 = cl_model.predict([val_encodings[\"input_ids\"],val_encodings[\"attention_mask\"]])",
"_____no_output_____"
],
[
"kmeans = KMeans(n_clusters=3, random_state=4).fit(cl_32)\ny_kmeans_gru = kmeans.predict(cl_32)",
"_____no_output_____"
],
[
"for i in range(0,len(y_kmeans_gru)):\n if(y_kmeans_gru[i] == 0):\n y_kmeans_gru[i] = 1\n elif(y_kmeans_gru[i] == 1):\n y_kmeans_gru[i] = 2\n else:\n y_kmeans_gru[i] = 0",
"_____no_output_____"
],
[
"centers_gru = kmeans.cluster_centers_",
"_____no_output_____"
],
[
"con_mat = tf.math.confusion_matrix(labels=Y_val, predictions=y_kmeans_gru)\nprint(con_mat)",
"_____no_output_____"
],
[
"import seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\nfigure = plt.figure(figsize=(8, 8))\r\nsns.set(font_scale=1.5)\r\nsns.heatmap(con_mat, annot=True,cmap=plt.cm.Spectral,fmt='d',xticklabels=[\"Neutral\",\"Positive\",\"Negative\"], yticklabels=[\"Neutral\",\"Positive\",\"Negative\"])\r\nplt.tight_layout()\r\nplt.ylabel('True label')\r\nplt.xlabel('Predicted label')\r\nplt.show()",
"_____no_output_____"
],
[
"from sklearn.metrics import classification_report\ntarget_names = ['Neutral', 'Positive', 'Negative']\nprint(classification_report(Y_val, y_kmeans_gru, digits=3, target_names=target_names))",
"_____no_output_____"
],
[
"svns_neu_gru = []\nfor i in range(0,len(Y_val)):\n neu = cosine(cl_32[i], centers_gru[2])/2\n svns_neu_gru.append(1-neu)\nprint(len(svns_neu_gru))",
"_____no_output_____"
],
[
"svns_pos_gru = []\nfor i in range(0,len(Y_val)):\n pos = cosine(cl_32[i], centers_gru[0])/2\n svns_pos_gru.append(1-pos)\nprint(len(svns_pos_gru))",
"_____no_output_____"
],
[
"svns_neg_gru = []\nfor i in range(0,len(Y_val)):\n neg = cosine(cl_32[i], centers_gru[1])/2\n svns_neg_gru.append(1-neg)\nprint(len(svns_neg_gru))",
"_____no_output_____"
],
[
"pred_colour = []\nfor i in range(0,len(pred)):\n if y_kmeans_gru[i] == 0:\n pred_colour.append(\"Neutral\")\n if y_kmeans_gru[i] == 1:\n pred_colour.append(\"Positive\")\n if y_kmeans_gru[i] == 2:\n pred_colour.append(\"Negative\")\n\ntest_df = pd.DataFrame({'SVNS Positive':svns_pos_gru, 'SVNS Negative':svns_neg_gru, 'SVNS Neutral':svns_neu_gru, 'Labels:':pred_colour})\n\nfig = px.scatter_3d(test_df, x='SVNS Positive', y='SVNS Negative', z='SVNS Neutral', color='Labels:')\nfig.update_traces(\n marker={\n 'size': 1,\n 'opacity': 1,\n 'colorscale' : 'viridis',\n }\n)\nfig.update_layout(legend= {'itemsizing': 'constant'})\nfig.update_layout(width = 850, height = 750)\nfig.update_layout(margin=dict(l=0, r=0, b=0, t=0))",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a837bd3aa6bca42e2cc633df667e80f78bb1262
| 17,797 |
ipynb
|
Jupyter Notebook
|
docs/tutorials/qsimcirq.ipynb
|
lilies/qsim
|
aa9126c8c3b3d8118fd1c59e62ff89780065b0ee
|
[
"Apache-2.0"
] | null | null | null |
docs/tutorials/qsimcirq.ipynb
|
lilies/qsim
|
aa9126c8c3b3d8118fd1c59e62ff89780065b0ee
|
[
"Apache-2.0"
] | null | null | null |
docs/tutorials/qsimcirq.ipynb
|
lilies/qsim
|
aa9126c8c3b3d8118fd1c59e62ff89780065b0ee
|
[
"Apache-2.0"
] | null | null | null | 30.267007 | 369 | 0.592965 |
[
[
[
"##### Copyright 2020 Google",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Get started with qsimcirq",
"_____no_output_____"
],
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.example.org/qsim/tutorials/qsimcirq\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on QuantumLib</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/quantumlib/qsim/blob/master/docs/tutorials/qsimcirq.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/quantumlib/qsim/blob/master/docs/tutorials/qsimcirq.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/qsim/docs/tutorials/qsimcirq.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"The qsim library provides a Python interface to Cirq in the **qsimcirq** PyPI package.",
"_____no_output_____"
],
[
"## Setup\n\nInstall the Cirq and qsimcirq packages:",
"_____no_output_____"
]
],
[
[
"try:\n import cirq\nexcept ImportError:\n !pip install cirq --quiet\n import cirq\n\ntry:\n import qsimcirq\nexcept ImportError:\n !pip install qsimcirq --quiet\n import qsimcirq",
"_____no_output_____"
]
],
[
[
"Simulating Cirq circuits with qsim is easy: just define the circuit as you normally would, then create a `QSimSimulator` to perform the simulation. This object implements Cirq's [simulator.py](https://github.com/quantumlib/Cirq/blob/master/cirq/sim/simulator.py) interfaces, so you can drop it in anywhere the basic Cirq simulator is used.",
"_____no_output_____"
],
[
"## Full state-vector simulation\n\nqsim is optimized for computing the final state vector of a circuit. Try it by running the example below.",
"_____no_output_____"
]
],
[
[
"# Define qubits and a short circuit.\nq0, q1 = cirq.LineQubit.range(2)\ncircuit = cirq.Circuit(cirq.H(q0), cirq.CX(q0, q1))\nprint(\"Circuit:\")\nprint(circuit)\nprint()\n\n# Simulate the circuit with Cirq and return the full state vector.\nprint('Cirq results:')\ncirq_simulator = cirq.Simulator()\ncirq_results = cirq_simulator.simulate(circuit)\nprint(cirq_results)\nprint()\n\n# Simulate the circuit with qsim and return the full state vector.\nprint('qsim results:')\nqsim_simulator = qsimcirq.QSimSimulator()\nqsim_results = qsim_simulator.simulate(circuit)\nprint(qsim_results)",
"_____no_output_____"
]
],
[
[
"To sample from this state, you can invoke Cirq's `sample_state_vector` method:",
"_____no_output_____"
]
],
[
[
"samples = cirq.sample_state_vector(\n qsim_results.state_vector(), indices=[0, 1], repetitions=10)\nprint(samples)",
"_____no_output_____"
]
],
[
[
"## Measurement sampling\n\nqsim also supports sampling from user-defined measurement gates. \n\n> *Note*: Since qsim and Cirq use different random number generators, identical runs on both simulators may give different results, even if they use the same seed.",
"_____no_output_____"
]
],
[
[
"# Define a circuit with measurements.\nq0, q1 = cirq.LineQubit.range(2)\ncircuit = cirq.Circuit(\n cirq.H(q0), cirq.X(q1), cirq.CX(q0, q1),\n cirq.measure(q0, key='qubit_0'),\n cirq.measure(q1, key='qubit_1'),\n)\nprint(\"Circuit:\")\nprint(circuit)\nprint()\n\n# Simulate the circuit with Cirq and return just the measurement values.\nprint('Cirq results:')\ncirq_simulator = cirq.Simulator()\ncirq_results = cirq_simulator.run(circuit, repetitions=5)\nprint(cirq_results)\nprint()\n\n# Simulate the circuit with qsim and return just the measurement values.\nprint('qsim results:')\nqsim_simulator = qsimcirq.QSimSimulator()\nqsim_results = qsim_simulator.run(circuit, repetitions=5)\nprint(qsim_results)",
"_____no_output_____"
]
],
[
[
"The warning above highlights an important distinction between the `simulate` and `run` methods:\n\n* `simulate` only executes the circuit once. \n - Sampling from the resulting state is fast, but if there are intermediate measurements the final state vector depends on the results of those measurements.\n* `run` will execute the circuit once for each repetition requested. \n - As a result, sampling is much slower, but intermediate measurements are re-sampled for each repetition. If there are no intermediate measurements, `run` redirects to `simulate` for faster execution.\n\nThe warning goes away if intermediate measurements are present:",
"_____no_output_____"
]
],
[
[
"# Define a circuit with intermediate measurements.\nq0 = cirq.LineQubit(0)\ncircuit = cirq.Circuit(\n cirq.X(q0)**0.5, cirq.measure(q0, key='m0'),\n cirq.X(q0)**0.5, cirq.measure(q0, key='m1'),\n cirq.X(q0)**0.5, cirq.measure(q0, key='m2'),\n)\nprint(\"Circuit:\")\nprint(circuit)\nprint()\n\n# Simulate the circuit with qsim and return just the measurement values.\nprint('qsim results:')\nqsim_simulator = qsimcirq.QSimSimulator()\nqsim_results = qsim_simulator.run(circuit, repetitions=5)\nprint(qsim_results)",
"_____no_output_____"
]
],
[
[
"## Amplitude evaluation\n\nqsim can also calculate amplitudes for specific output bitstrings.",
"_____no_output_____"
]
],
[
[
"# Define a simple circuit.\nq0, q1 = cirq.LineQubit.range(2)\ncircuit = cirq.Circuit(cirq.H(q0), cirq.CX(q0, q1))\nprint(\"Circuit:\")\nprint(circuit)\nprint()\n\n# Simulate the circuit with qsim and return the amplitudes for |00) and |01).\nprint('Cirq results:')\ncirq_simulator = cirq.Simulator()\ncirq_results = cirq_simulator.compute_amplitudes(\n circuit, bitstrings=[0b00, 0b01])\nprint(cirq_results)\nprint()\n\n# Simulate the circuit with qsim and return the amplitudes for |00) and |01).\nprint('qsim results:')\nqsim_simulator = qsimcirq.QSimSimulator()\nqsim_results = qsim_simulator.compute_amplitudes(\n circuit, bitstrings=[0b00, 0b01])\nprint(qsim_results)",
"_____no_output_____"
]
],
[
[
"## Performance benchmark\n\nThe code below generates a depth-16 circuit on a 4x5 qubit grid, then runs it against the basic Cirq simulator. For a circuit of this size, the difference in runtime can be significant - try it out!",
"_____no_output_____"
]
],
[
[
"import time\n\n# Get a rectangular grid of qubits.\nqubits = cirq.GridQubit.rect(4, 5)\n\n# Generates a random circuit on the provided qubits.\ncircuit = cirq.experiments.random_rotations_between_grid_interaction_layers_circuit(\n qubits=qubits, depth=16)\n\n# Simulate the circuit with Cirq and print the runtime.\ncirq_simulator = cirq.Simulator()\ncirq_start = time.time()\ncirq_results = cirq_simulator.simulate(circuit)\ncirq_elapsed = time.time() - cirq_start\nprint(f'Cirq runtime: {cirq_elapsed} seconds.')\nprint()\n\n# Simulate the circuit with qsim and print the runtime.\nqsim_simulator = qsimcirq.QSimSimulator()\nqsim_start = time.time()\nqsim_results = qsim_simulator.simulate(circuit)\nqsim_elapsed = time.time() - qsim_start\nprint(f'qsim runtime: {qsim_elapsed} seconds.')",
"_____no_output_____"
]
],
[
[
"qsim performance can be tuned further by passing options to the simulator constructor. These options use the same format as the qsim_base binary - a full description can be found in the qsim [usage doc](https://github.com/quantumlib/qsim/blob/master/docs/usage.md).",
"_____no_output_____"
]
],
[
[
"# Use eight threads to parallelize simulation.\noptions = {'t': 8}\n\nqsim_simulator = qsimcirq.QSimSimulator(options)\nqsim_start = time.time()\nqsim_results = qsim_simulator.simulate(circuit)\nqsim_elapsed = time.time() - qsim_start\nprint(f'qsim runtime: {qsim_elapsed} seconds.')",
"_____no_output_____"
]
],
[
[
"## Advanced applications: Distributed execution\n\nqsimh (qsim-hybrid) is a second library in the qsim repository that takes a slightly different approach to circuit simulation. When simulating a quantum circuit, it's possible to simplify the execution by decomposing a subset of two-qubit gates into pairs of one-qubit gates with shared indices. This operation is called \"slicing\" (or \"cutting\") the gates.\n\nqsimh takes advantage of the \"slicing\" operation by selecting a set of gates to \"slice\" and assigning each possible value of the shared indices across a set of executors running in parallel. By adding up the results afterwards, the total state can be recovered.",
"_____no_output_____"
]
],
[
[
"# Pick a pair of qubits.\nq0 = cirq.GridQubit(0, 0)\nq1 = cirq.GridQubit(0, 1)\n\n# Create a circuit that entangles the pair.\ncircuit = cirq.Circuit(\n cirq.H(q0), cirq.CX(q0, q1), cirq.X(q1)\n)\nprint(\"Circuit:\")\nprint(circuit)",
"_____no_output_____"
]
],
[
[
"In order to let qsimh know how we want to split up the circuit, we need to pass it some additional options. More detail on these can be found in the qsim [usage doc](https://github.com/quantumlib/qsim/blob/master/docs/usage.md), but the fundamentals are explained below.",
"_____no_output_____"
]
],
[
[
"options = {}\n\n# 'k' indicates the qubits on one side of the cut.\n# We'll use qubit 0 for this.\noptions['k'] = [0]\n\n# 'p' and 'r' control when values are assigned to cut indices.\n# There are some intricacies in choosing values for these options,\n# but for now we'll set p=1 and r=0.\n# This allows us to pre-assign the value of the CX indices\n# and distribute its execution to multiple jobs.\noptions['p'] = 1\noptions['r'] = 0\n\n# 'w' indicates the value pre-assigned to the cut.\n# This should change for each execution.\noptions['w'] = 0\n\n# Create the qsimh simulator with those options.\nqsimh_simulator = qsimcirq.QSimhSimulator(options)\nresults_0 = qsimh_simulator.compute_amplitudes(\n circuit, bitstrings=[0b00, 0b01, 0b10, 0b11])\nprint(results_0)",
"_____no_output_____"
]
],
[
[
"Now to run the other side of the cut...",
"_____no_output_____"
]
],
[
[
"options['w'] = 1\n\nqsimh_simulator = qsimcirq.QSimhSimulator(options)\nresults_1 = qsimh_simulator.compute_amplitudes(\n circuit, bitstrings=[0b00, 0b01, 0b10, 0b11])\nprint(results_1)",
"_____no_output_____"
]
],
[
[
"...and add the two together. The results of a normal qsim simulation are shown for comparison.",
"_____no_output_____"
]
],
[
[
"results = [r0 + r1 for r0, r1 in zip(results_0, results_1)]\nprint(\"qsimh results:\")\nprint(results)\n\nqsim_simulator = qsimcirq.QSimSimulator()\nqsim_simulator.compute_amplitudes(circuit, bitstrings=[0b00, 0b01, 0b10, 0b11])\nprint(\"qsim results:\")\nprint(results)",
"_____no_output_____"
]
],
[
[
"The key point to note here is that `results_0` and `results_1` are completely independent - they can be run in parallel on two separate machines, with no communication between the two. Getting the full result requires `2^p` executions, but each individual result is much cheaper to calculate than trying to do the whole circuit at once.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a838ac052cc9ae4a4b6d8e709cbe71d53592ed3
| 19,589 |
ipynb
|
Jupyter Notebook
|
Computing Volumes.ipynb
|
4dsolutions/Python5
|
8d80753e823441a571b827d24d21577446409b52
|
[
"MIT"
] | 11 |
2016-08-17T00:15:26.000Z
|
2020-07-17T21:31:10.000Z
|
Computing Volumes.ipynb
|
4dsolutions/Python5
|
8d80753e823441a571b827d24d21577446409b52
|
[
"MIT"
] | null | null | null |
Computing Volumes.ipynb
|
4dsolutions/Python5
|
8d80753e823441a571b827d24d21577446409b52
|
[
"MIT"
] | 5 |
2017-02-22T05:15:52.000Z
|
2019-11-08T06:17:34.000Z
| 37.52682 | 393 | 0.528868 |
[
[
[
"Synergetics<br/>[Oregon Curriculum Network](http://4dsolutions.net/ocn/)\n<h3 align=\"center\">Computing Volumes in XYZ and IVM units</h3>\n<h4 align=\"center\">by Kirby Urner, July 2016</h4>\n\n\n\n\nA cube is composed of 24 identical not-regular tetrahedrons, each with a corner at the cube's center, an edge from cube's center to a face center, and two more to adjacent cube corners on that face, defining six edges in all (Fig. 1). \n\nIf we define the cube's edges to be √2 then the whole cube would have volume √2 * √2 * √2 in XYZ units. \n\nHowever, in IVM units, the very same cube has a volume of 3, owing to the differently-shaped volume unit, a tetrahedron of edges 2, inscribed in this same cube. [Fig. 986.210](http://www.rwgrayprojects.com/synergetics/findex/fx0900.html) from *Synergetics*:\n\n\n\nThose lengths would be in R-units, where R is the radius of a unit sphere. In D-units, twice as long (D = 2R), the tetrahedron has edges 1 and the cube has edges √2/2.\n\nBy XYZ we mean the XYZ coordinate system of René Descartes (1596 – 1650). \n\nBy IVM we mean the \"octet-truss\", a space-frame consisting of tetrahedrons and octahedrons in a space-filling matrix, with twice as many tetrahedrons as octahedrons. \n\n\n\nThe tetrahedron and octahedron have relative volumes of 1:4. The question then becomes, how to superimpose the two.\n\nThe canonical solution is to start with unit-radius balls (spheres) of radius R. R = 1 in other words, whereas D, the diameter, is 2. Alternatively, we may set D = 1 and R = 0.5, keeping the same 2:1 ratio for D:R. \n\nThe XYZ cube has edges R, whereas the IVM tetrahedron has edges D. That relative sizing convention brings their respective volumes fairly close together, with the cube's volume exceeding the tetrahedron's by about six percent.",
"_____no_output_____"
]
],
[
[
"import math\nxyz_volume = math.sqrt(2)**3\nivm_volume = 3\nprint(\"XYZ units:\", xyz_volume)\nprint(\"IVM units:\", ivm_volume)\nprint(\"Conversion constant:\", ivm_volume/xyz_volume)",
"XYZ units: 2.8284271247461907\nIVM units: 3\nConversion constant: 1.060660171779821\n"
]
],
[
[
"The Python code below encodes a Tetrahedron type based solely on its six edge lengths. The code makes no attempt to determine the consequent angles. \n\nA complicated volume formula, mined from the history books and streamlined by mathematician Gerald de Jong, outputs the volume of said tetrahedron in both IVM and XYZ units. \n\n<a data-flickr-embed=\"true\" href=\"https://www.flickr.com/photos/kirbyurner/45589318711/in/dateposted-public/\" title=\"dejong\"><img src=\"https://farm2.staticflickr.com/1935/45589318711_677d272397.jpg\" width=\"417\" height=\"136\" alt=\"dejong\"></a><script async src=\"//embedr.flickr.com/assets/client-code.js\" charset=\"utf-8\"></script>\n\n\nThe [unittests](http://pythontesting.net/framework/unittest/unittest-introduction/) that follow assure it's producing the expected results. The formula bears great resemblance to the one by [Piero della Francesca](https://mathpages.com/home/kmath424/kmath424.htm).",
"_____no_output_____"
]
],
[
[
"from math import sqrt as rt2\nfrom qrays import Qvector, Vector\n\nR =0.5\nD =1.0\n\nS3 = pow(9/8, 0.5)\nroot2 = rt2(2)\nroot3 = rt2(3)\nroot5 = rt2(5)\nroot6 = rt2(6)\nPHI = (1 + root5)/2.0\n\nclass Tetrahedron:\n \"\"\"\n Takes six edges of tetrahedron with faces\n (a,b,d)(b,c,e)(c,a,f)(d,e,f) -- returns volume\n in ivm and xyz units\n \"\"\"\n\n def __init__(self, a,b,c,d,e,f):\n self.a, self.a2 = a, a**2\n self.b, self.b2 = b, b**2\n self.c, self.c2 = c, c**2\n self.d, self.d2 = d, d**2\n self.e, self.e2 = e, e**2\n self.f, self.f2 = f, f**2\n\n def ivm_volume(self):\n ivmvol = ((self._addopen() - self._addclosed() - self._addopposite())/2) ** 0.5\n return ivmvol\n\n def xyz_volume(self):\n xyzvol = rt2(8/9) * self.ivm_volume()\n return xyzvol\n\n def _addopen(self):\n a2,b2,c2,d2,e2,f2 = self.a2, self.b2, self.c2, self.d2, self.e2, self.f2\n sumval = f2*a2*b2\n sumval += d2 * a2 * c2\n sumval += a2 * b2 * e2\n sumval += c2 * b2 * d2\n sumval += e2 * c2 * a2\n sumval += f2 * c2 * b2\n sumval += e2 * d2 * a2\n sumval += b2 * d2 * f2\n sumval += b2 * e2 * f2\n sumval += d2 * e2 * c2\n sumval += a2 * f2 * e2\n sumval += d2 * f2 * c2\n return sumval\n\n def _addclosed(self):\n a2,b2,c2,d2,e2,f2 = self.a2, self.b2, self.c2, self.d2, self.e2, self.f2\n sumval = a2 * b2 * d2\n sumval += d2 * e2 * f2\n sumval += b2 * c2 * e2\n sumval += a2 * c2 * f2\n return sumval\n\n def _addopposite(self):\n a2,b2,c2,d2,e2,f2 = self.a2, self.b2, self.c2, self.d2, self.e2, self.f2\n sumval = a2 * e2 * (a2 + e2)\n sumval += b2 * f2 * (b2 + f2)\n sumval += c2 * d2 * (c2 + d2)\n return sumval\n \ndef make_tet(v0,v1,v2):\n \"\"\"\n three edges from any corner, remaining three edges computed\n \"\"\"\n tet = Tetrahedron(v0.length(), v1.length(), v2.length(), \n (v0-v1).length(), (v1-v2).length(), (v2-v0).length())\n return tet.ivm_volume(), tet.xyz_volume()\n\ntet = Tetrahedron(D, D, D, D, D, D)\nprint(tet.ivm_volume())",
"1.0\n"
]
],
[
[
"The ```make_tet``` function takes three vectors from a common corner, in terms of vectors with coordinates, and computes the remaining missing lengths, thereby getting the information it needs to use the Tetrahedron class as before.",
"_____no_output_____"
]
],
[
[
"import unittest\nfrom qrays import Vector, Qvector\n\nclass Test_Tetrahedron(unittest.TestCase):\n\n def test_unit_volume(self):\n tet = Tetrahedron(D, D, D, D, D, D)\n self.assertEqual(tet.ivm_volume(), 1, \"Volume not 1\")\n\n def test_e_module(self):\n e0 = D\n e1 = root3 * PHI**-1\n e2 = rt2((5 - root5)/2)\n e3 = (3 - root5)/2\n e4 = rt2(5 - 2*root5)\n e5 = 1/PHI\n tet = Tetrahedron(e0, e1, e2, e3, e4, e5)\n self.assertTrue(1/23 > tet.ivm_volume()/8 > 1/24, \"Wrong E-mod\")\n \n def test_unit_volume2(self):\n tet = Tetrahedron(R, R, R, R, R, R)\n self.assertAlmostEqual(float(tet.xyz_volume()), 0.117851130)\n\n def test_phi_edge_tetra(self):\n tet = Tetrahedron(D, D, D, D, D, PHI)\n self.assertAlmostEqual(float(tet.ivm_volume()), 0.70710678)\n\n def test_right_tetra(self):\n e = pow((root3/2)**2 + (root3/2)**2, 0.5) # right tetrahedron\n tet = Tetrahedron(D, D, D, D, D, e)\n self.assertAlmostEqual(tet.xyz_volume(), 1)\n\n def test_quadrant(self):\n qA = Qvector((1,0,0,0))\n qB = Qvector((0,1,0,0))\n qC = Qvector((0,0,1,0))\n tet = make_tet(qA, qB, qC) \n self.assertAlmostEqual(tet[0], 0.25) \n\n def test_octant(self):\n x = Vector((0.5, 0, 0))\n y = Vector((0 , 0.5, 0))\n z = Vector((0 , 0 , 0.5))\n tet = make_tet(x,y,z)\n self.assertAlmostEqual(tet[1], 1/6, 5) # good to 5 places\n\n def test_quarter_octahedron(self):\n a = Vector((1,0,0))\n b = Vector((0,1,0))\n c = Vector((0.5,0.5,root2/2))\n tet = make_tet(a, b, c)\n self.assertAlmostEqual(tet[0], 1, 5) # good to 5 places \n\n def test_xyz_cube(self):\n a = Vector((0.5, 0.0, 0.0))\n b = Vector((0.0, 0.5, 0.0))\n c = Vector((0.0, 0.0, 0.5))\n R_octa = make_tet(a,b,c) \n self.assertAlmostEqual(6 * R_octa[1], 1, 4) # good to 4 places \n\n def test_s3(self):\n D_tet = Tetrahedron(D, D, D, D, D, D)\n a = Vector((0.5, 0.0, 0.0))\n b = Vector((0.0, 0.5, 0.0))\n c = Vector((0.0, 0.0, 0.5))\n R_cube = 6 * make_tet(a,b,c)[1]\n self.assertAlmostEqual(D_tet.xyz_volume() * S3, R_cube, 4)\n\n def test_martian(self):\n p = Qvector((2,1,0,1))\n q = Qvector((2,1,1,0))\n r = Qvector((2,0,1,1))\n result = make_tet(5*q, 2*p, 2*r)\n self.assertAlmostEqual(result[0], 20, 7)\n \n def test_phi_tet(self):\n \"edges from common vertex: phi, 1/phi, 1\"\n p = Vector((1, 0, 0))\n q = Vector((1, 0, 0)).rotz(60) * PHI\n r = Vector((0.5, root3/6, root6/3)) * 1/PHI\n result = make_tet(p, q, r)\n self.assertAlmostEqual(result[0], 1, 7)\n \n def test_phi_tet_2(self):\n p = Qvector((2,1,0,1))\n q = Qvector((2,1,1,0))\n r = Qvector((2,0,1,1))\n result = make_tet(PHI*q, (1/PHI)*p, r)\n self.assertAlmostEqual(result[0], 1, 7)\n \n def test_phi_tet_3(self):\n T = Tetrahedron(PHI, 1/PHI, 1.0, \n root2, root2/PHI, root2)\n result = T.ivm_volume()\n self.assertAlmostEqual(result, 1, 7)\n\n def test_koski(self):\n a = 1 \n b = PHI ** -1\n c = PHI ** -2\n d = (root2) * PHI ** -1 \n e = (root2) * PHI ** -2\n f = (root2) * PHI ** -1 \n T = Tetrahedron(a,b,c,d,e,f)\n result = T.ivm_volume()\n self.assertAlmostEqual(result, PHI ** -3, 7) \n \na = Test_Tetrahedron()\n\nR =0.5\nD =1.0\n\nsuite = unittest.TestLoader().loadTestsFromModule(a)\nunittest.TextTestRunner().run(suite)",
"...............\n----------------------------------------------------------------------\nRan 15 tests in 0.025s\n\nOK\n"
]
],
[
[
"<a data-flickr-embed=\"true\" href=\"https://www.flickr.com/photos/kirbyurner/41211295565/in/album-72157624750749042/\" title=\"Martian Multiplication\"><img src=\"https://farm1.staticflickr.com/907/41211295565_59145e2f63.jpg\" width=\"500\" height=\"312\" alt=\"Martian Multiplication\"></a><script async src=\"//embedr.flickr.com/assets/client-code.js\" charset=\"utf-8\"></script>\n\nThe above tetrahedron has a=2, b=2, c=5, for a volume of 20. The remaining three lengths have not been computed as it's sufficient to know only a, b, c if the angles between them are those of the regular tetrahedron. \n\nThat's how IVM volume is computed: multiply a * b * c from a regular tetrahedron corner, then \"close the lid\" to see the volume.",
"_____no_output_____"
]
],
[
[
"a = 2\nb = 4\nc = 5\nd = 3.4641016151377544\ne = 4.58257569495584\nf = 4.358898943540673\ntetra = Tetrahedron(a,b,c,d,e,f)\nprint(\"IVM volume of tetra:\", round(tetra.ivm_volume(),5))",
"IVM volume of tetra: 40.0\n"
]
],
[
[
"Lets define a MITE, one of these 24 identical space-filling tetrahedrons, with reference to D=1, R=0.5, as this is how our Tetrahedron class is calibrated. The cubes 12 edges will all be √2/2.\n\nEdges 'a' 'b' 'c' fan out from the cube center, with 'b' going up to a face center, with 'a' and 'c' to adjacent ends of the face's edge. \n\nFrom the cube's center to mid-face is √2/4 (half an edge), our 'b'. 'a' and 'c' are both half the cube's body diagonal of √(3/2)/2 or √(3/8). \n\nEdges 'd', 'e' and 'f' define the facet opposite the cube's center. \n\n'd' and 'e' are both half face diagonals or 0.5, whereas 'f' is a cube edge, √2/2. This gives us our tetrahedron:",
"_____no_output_____"
]
],
[
[
"b = rt2(2)/4\na = c = rt2(3/8)\nd = e = 0.5\nf = rt2(2)/2\nmite = Tetrahedron(a, b, c, d, e, f)\nprint(\"IVM volume of Mite:\", round(mite.ivm_volume(),5))\nprint(\"XYZ volume of Mite:\", round(mite.xyz_volume(),5))",
"IVM volume of Mite: 0.125\nXYZ volume of Mite: 0.11785\n"
]
],
[
[
"Allowing for floating point error, this space-filling right tetrahedron has a volume of 0.125 or 1/8. Since 24 of them form a cube, said cube has a volume of 3. The XYZ volume, on the other hand, is what we'd expect from a regular tetrahedron of edges 0.5 in the current calibration system.",
"_____no_output_____"
]
],
[
[
"regular = Tetrahedron(0.5, 0.5, 0.5, 0.5, 0.5, 0.5)\nprint(\"MITE volume in XYZ units:\", round(regular.xyz_volume(),5))\nprint(\"XYZ volume of 24-Mite Cube:\", round(24 * regular.xyz_volume(),5))",
"MITE volume in XYZ units: 0.11785\nXYZ volume of 24-Mite Cube: 2.82843\n"
]
],
[
[
"The MITE (minimum tetrahedron) further dissects into component modules, a left and right A module, then either a left or right B module. Outwardly, the positive and negative MITEs look the same. Here are some drawings from R. Buckminster Fuller's research, the chief popularizer of the A and B modules.\n\n\n\nIn a different Jupyter Notebook, we could run these tetrahedra through our volume computer to discover both As and Bs have a volume of 1/24 in IVM units.\n\nInstead, lets take a look at the E-module and compute its volume.\n\n<br />\nThe black hub is at the center of the RT, as shown here...\n\n<br />\n\n<div style=\"text-align: center\">\n<a data-flickr-embed=\"true\" href=\"https://www.flickr.com/photos/kirbyurner/24971714468/in/dateposted-public/\" title=\"E module with origin\"><img src=\"https://farm5.staticflickr.com/4516/24971714468_46e14ce4b5_z.jpg\" width=\"640\" height=\"399\" alt=\"E module with origin\"></a><script async src=\"//embedr.flickr.com/assets/client-code.js\" charset=\"utf-8\"></script>\n<b>RT center is the black hub (Koski with vZome)</b>\n</div>",
"_____no_output_____"
]
],
[
[
"from math import sqrt as rt2\nfrom tetravolume import make_tet, Vector\n\nø = (rt2(5)+1)/2\ne0 = Black_Yellow = rt2(3)*ø**-1\ne1 = Black_Blue = 1\ne3 = Yellow_Blue = (3 - rt2(5))/2\ne6 = Black_Red = rt2((5 - rt2(5))/2)\ne7 = Blue_Red = 1/ø\n\n# E-mod is a right tetrahedron, so xyz is easy\nv0 = Vector((Black_Blue, 0, 0))\nv1 = Vector((Black_Blue, Yellow_Blue, 0))\nv2 = Vector((Black_Blue, 0, Blue_Red))\n\n# assumes R=0.5 so computed result is 8x needed\n# volume, ergo divide by 8.\nivm, xyz = make_tet(v0,v1,v2)\n\nprint(\"IVM volume:\", round(ivm/8, 5))\nprint(\"XYZ volume:\", round(xyz/8, 5))",
"IVM volume: 0.04173\nXYZ volume: 0.03934\n"
]
],
[
[
"This information is being shared around Portland in various contexts. Below, an image from a hands-on workshop in 2010 organized by the Portland Free School.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a838cb8c74c3ea33ec11e366f98b0a664df5715
| 7,678 |
ipynb
|
Jupyter Notebook
|
Python/Intro to Python - Easy.ipynb
|
28433cafu/Exercises
|
8e7ab7383b78535649373f22d4aea722f595be2f
|
[
"Unlicense"
] | null | null | null |
Python/Intro to Python - Easy.ipynb
|
28433cafu/Exercises
|
8e7ab7383b78535649373f22d4aea722f595be2f
|
[
"Unlicense"
] | null | null | null |
Python/Intro to Python - Easy.ipynb
|
28433cafu/Exercises
|
8e7ab7383b78535649373f22d4aea722f595be2f
|
[
"Unlicense"
] | null | null | null | 31.858921 | 938 | 0.55939 |
[
[
[
"Here you have a collection of guided exercises for the first class on Python. <br>\nThe exercises are divided by topic, following the topics reviewed during the theory session, and for each topic you have some mandatory exercises, and other optional exercises, which you are invited to do if you still have time after the mandatory exercises. <br>\n\nRemember that you have 5 hours to solve these exercises, after which we will review the most interesting exercises together. If you don't finish all the exercises, you can work on them tonightor tomorrow. \n\nAt the end of the class, we will upload the code with the solutions of the exercises so that you can review them again if needed. If you still have not finished some exercises, try to do them first by yourself, before taking a look at the solutions: you are doing these exercises for yourself, so it is always the best to do them your way first, as it is the fastest way to learn!",
"_____no_output_____"
],
[
"**Exercise 1.1:** The cover price of a book is 24.95 EUR, but bookstores get a 40 percent discount. Shipping costs 3 EUR for the first copy and 75 cents for each additional copy. **Calculate the total wholesale costs for 60 copies**. ",
"_____no_output_____"
]
],
[
[
"#Your Code Here\n#i will firstly define the variables as:\nbookPrice = 24.95\ndiscount = 40/100\ntotalNumberOfBooks= 60\nshippingFirstCopy = 3\nshippingSubsequentCopies = 0.75\n\nwholeSalePrice = bookPrice - (bookPrice*discount)\ntotalBookCost = wholeSalePrice*totalNumberOfBooks\ntotalShippingCost = shippingFirstCopy + (shippingSubsequentCopies*59)\n\n\n#i will now create a function for calculating the total wholesale cost\ndef wholeSaleCost():\n totalWholeSaleCost = totalBookCost + totalShippingCost\n return totalWholeSaleCost\n\nwholeSaleCost()",
"_____no_output_____"
]
],
[
[
"**Exercise 1.2:** When something is wrong with your code, Python will raise errors. Often these will be \"syntax errors\" that signal that something is wrong with the form of your code (e.g., the code in the previous exercise raised a `SyntaxError`). There are also \"runtime errors\", which signal that your code was in itself formally correct, but that something went wrong during the code's execution. A good example is the `ZeroDivisionError`, which indicates that you tried to divide a number by zero (which, as you may know, is not allowed). Try to make Python **raise such a `ZeroDivisionError`.**",
"_____no_output_____"
]
],
[
[
"#Your Code Here\nclassAge = 28\nnumberInClass = 0\naverageAge = classAge/numberInClass\n\ndef myClassAge (averageAge): \n return averageAge\n\nprint(averageAge)\n",
"_____no_output_____"
]
],
[
[
"**Exercise 5.1**: Create a countdown function that starts at a certain count, and counts down to zero. Instead of zero, print \"Blast off!\". Use a `for` loop. \n",
"_____no_output_____"
]
],
[
[
"# Countdown\ndef countdown():\n \"\"\"\n 20\n 19\n 18\n 17\n 16\n 15\n 14\n 13\n 12\n 11\n 10\n 9\n 8\n 7\n 6\n 5\n 4\n 3\n 2\n 1\n Blast off!\n \"\"\"\n i = 20\n while i > 0:\n print(i)\n i -= 1\n else:\n print(\"Blast off!\") \n\n\ncountdown()",
"20\n19\n18\n17\n16\n15\n14\n13\n12\n11\n10\n9\n8\n7\n6\n5\n4\n3\n2\n1\nBlast off!\n"
]
],
[
[
"**Exercise 5.2:** Write and test three functions that return the largest, the smallest, and the number of dividables by 3 in a given collection of numbers. Use the algorithm described earlier in the Part 5 lecture :)",
"_____no_output_____"
]
],
[
[
"# Your functions\ndef main():\n \"\"\"\n a = [2, 4, 6, 12, 15, 99, 100]\n 100\n 2\n 4\n \"\"\"\n a = [2, 4, 6, 12, 15, 99, 100]\n count = 0\n print(max(a))\n print(min(a))\n for num in a :\n if num%3 == 0:\n count += 1\n print(count)\n \nmain()\n",
"100\n2\n4\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a83968a634b6b1ea05d91597d66a11310baca1b
| 656 |
ipynb
|
Jupyter Notebook
|
nbgrader/tests/nbextensions/files/open_relative_file.ipynb
|
datalayer-contrib/jupyter-nbgrader
|
a409a1c10e92df16f446cdc18ed93c85ac174f56
|
[
"BSD-3-Clause"
] | 1,116 |
2015-01-20T19:22:24.000Z
|
2022-03-31T22:05:10.000Z
|
nbgrader/tests/nbextensions/files/open_relative_file.ipynb
|
datalayer-contrib/jupyter-nbgrader
|
a409a1c10e92df16f446cdc18ed93c85ac174f56
|
[
"BSD-3-Clause"
] | 1,166 |
2015-01-08T21:50:31.000Z
|
2022-03-31T05:15:01.000Z
|
nbgrader/tests/nbextensions/files/open_relative_file.ipynb
|
datalayer-externals/jupyter-notebook-grader
|
a409a1c10e92df16f446cdc18ed93c85ac174f56
|
[
"BSD-3-Clause"
] | 337 |
2015-02-06T01:28:00.000Z
|
2022-03-29T06:52:38.000Z
| 17.263158 | 45 | 0.474085 |
[
[
[
"with open(\"data.txt\", \"r\") as f:\n data = f.read()\n \nassert len(data.split(\"\\n\")) == 4",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code"
]
] |
4a839df897c52c23d2a2f5077dba407d8a084178
| 2,245 |
ipynb
|
Jupyter Notebook
|
migrate_v1_to_v2/Introduction.ipynb
|
quantrocket-codeload/migrate-v1-to-v2
|
a83bed8fae5a1e0857cb34bbbee3e7c0492ba13f
|
[
"Apache-2.0"
] | null | null | null |
migrate_v1_to_v2/Introduction.ipynb
|
quantrocket-codeload/migrate-v1-to-v2
|
a83bed8fae5a1e0857cb34bbbee3e7c0492ba13f
|
[
"Apache-2.0"
] | null | null | null |
migrate_v1_to_v2/Introduction.ipynb
|
quantrocket-codeload/migrate-v1-to-v2
|
a83bed8fae5a1e0857cb34bbbee3e7c0492ba13f
|
[
"Apache-2.0"
] | null | null | null | 31.180556 | 290 | 0.620045 |
[
[
[
"<img alt=\"QuantRocket logo\" src=\"https://www.quantrocket.com/assets/img/notebook-header-logo.png\">\n\n<a href=\"https://www.quantrocket.com/disclaimer/\">Disclaimer</a>",
"_____no_output_____"
],
[
"# Introduction\n\nUpgrading from version 1 to version 2 has two main parts: migrating your databases, and updating your code to reflect version 2 API changes. This tutorial guides you through the process, providing helper scripts and instructions for reverting back to version 1 if anything goes wrong.",
"_____no_output_____"
],
[
"To use the migration guide, you should have started with your version 1 deployment and updated it in place to version 2. The following code double checks:",
"_____no_output_____"
]
],
[
[
"![ ! -f /var/lib/quantrocket/quantrocket.master.main.sqlite ] && echo 'oops, no version 1 database found, did you start with version 1 and update in place to version 2?'\n![ ! -f /var/lib/quantrocket/quantrocket.v2.master.main.sqlite ] && echo 'oops, no version 2 database found, please update to version 2'\n![ -f /var/lib/quantrocket/quantrocket.master.main.sqlite ] && [ -f /var/lib/quantrocket/quantrocket.v2.master.main.sqlite ] && echo 'looks like you are all set!'",
"_____no_output_____"
]
],
[
[
"* Part 1: [Migrate databases](Part1-Migrate-Databases.ipynb)\n* Part 2: [Make code changes](Part2-Code-Changes.ipynb)\n\n[How to revert to version 1](Part3-How-To-Revert.ipynb)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a83b312a266ff04bf20312dc0171b7b6d9bc61f
| 617 |
ipynb
|
Jupyter Notebook
|
examples/reference/solvers/available_matrix_solvers.ipynb
|
xu-kai-xu/OpenPNM
|
61d5fc4729a0a29291cf6c53c07c4246e7a13714
|
[
"MIT"
] | 2 |
2019-08-24T09:17:40.000Z
|
2020-07-05T07:21:21.000Z
|
examples/reference/solvers/available_matrix_solvers.ipynb
|
xu-kai-xu/OpenPNM
|
61d5fc4729a0a29291cf6c53c07c4246e7a13714
|
[
"MIT"
] | null | null | null |
examples/reference/solvers/available_matrix_solvers.ipynb
|
xu-kai-xu/OpenPNM
|
61d5fc4729a0a29291cf6c53c07c4246e7a13714
|
[
"MIT"
] | null | null | null | 16.675676 | 34 | 0.531605 |
[
[
[
"# Available Matrix Solvers",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown"
]
] |
4a83b9f6182251234e0e64421bbd5dd9a42583ab
| 35,113 |
ipynb
|
Jupyter Notebook
|
colab/chap09.ipynb
|
ferrysany/ModSimPy
|
4b68634ee847102ad3b0f1816ac0b1c3125018d3
|
[
"MIT"
] | null | null | null |
colab/chap09.ipynb
|
ferrysany/ModSimPy
|
4b68634ee847102ad3b0f1816ac0b1c3125018d3
|
[
"MIT"
] | null | null | null |
colab/chap09.ipynb
|
ferrysany/ModSimPy
|
4b68634ee847102ad3b0f1816ac0b1c3125018d3
|
[
"MIT"
] | null | null | null | 25.062812 | 283 | 0.416911 |
[
[
[
"# Chapter 9",
"_____no_output_____"
],
[
"*Modeling and Simulation in Python*\n\nCopyright 2021 Allen Downey\n\nLicense: [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International](https://creativecommons.org/licenses/by-nc-sa/4.0/)",
"_____no_output_____"
]
],
[
[
"# check if the libraries we need are installed\n\ntry:\n import pint\nexcept ImportError:\n !pip install pint\n import pint\n \ntry:\n from modsim import *\nexcept ImportError:\n !pip install modsimpy\n from modsim import *",
"_____no_output_____"
]
],
[
[
"The following displays SymPy expressions and provides the option of showing results in LaTeX format.",
"_____no_output_____"
]
],
[
[
"from sympy.printing import latex\n\ndef show(expr, show_latex=False):\n \"\"\"Display a SymPy expression.\n \n expr: SymPy expression\n show_latex: boolean\n \"\"\"\n if show_latex:\n print(latex(expr))\n return expr",
"_____no_output_____"
]
],
[
[
"### Analysis with SymPy",
"_____no_output_____"
],
[
"Create a symbol for time.",
"_____no_output_____"
]
],
[
[
"import sympy as sp\n\nt = sp.symbols('t')\nt",
"_____no_output_____"
]
],
[
[
"If you combine symbols and numbers, you get symbolic expressions.",
"_____no_output_____"
]
],
[
[
"expr = t + 1\nexpr",
"_____no_output_____"
]
],
[
[
"The result is an `Add` object, which just represents the sum without trying to compute it.",
"_____no_output_____"
]
],
[
[
"type(expr)",
"_____no_output_____"
]
],
[
[
"`subs` can be used to replace a symbol with a number, which allows the addition to proceed.",
"_____no_output_____"
]
],
[
[
"expr.subs(t, 2)",
"_____no_output_____"
]
],
[
[
"`f` is a special class of symbol that represents a function.",
"_____no_output_____"
]
],
[
[
"f = sp.Function('f')\nf",
"_____no_output_____"
]
],
[
[
"The type of `f` is `UndefinedFunction`",
"_____no_output_____"
]
],
[
[
"type(f)",
"_____no_output_____"
]
],
[
[
"SymPy understands that `f(t)` means `f` evaluated at `t`, but it doesn't try to evaluate it yet.",
"_____no_output_____"
]
],
[
[
"f(t)",
"_____no_output_____"
]
],
[
[
"`diff` returns a `Derivative` object that represents the time derivative of `f`",
"_____no_output_____"
]
],
[
[
"dfdt = sp.diff(f(t), t)\ndfdt",
"_____no_output_____"
],
[
"type(dfdt)",
"_____no_output_____"
]
],
[
[
"We need a symbol for `alpha`",
"_____no_output_____"
]
],
[
[
"alpha = sp.symbols('alpha')\nalpha",
"_____no_output_____"
]
],
[
[
"Now we can write the differential equation for proportional growth.",
"_____no_output_____"
]
],
[
[
"eq1 = sp.Eq(dfdt, alpha*f(t))\neq1",
"_____no_output_____"
]
],
[
[
"And use `dsolve` to solve it. The result is the general solution.",
"_____no_output_____"
]
],
[
[
"solution_eq = sp.dsolve(eq1)\nsolution_eq",
"_____no_output_____"
]
],
[
[
"We can tell it's a general solution because it contains an unspecified constant, `C1`.\n\nIn this example, finding the particular solution is easy: we just replace `C1` with `p_0`",
"_____no_output_____"
]
],
[
[
"C1, p_0 = sp.symbols('C1 p_0')",
"_____no_output_____"
],
[
"particular = solution_eq.subs(C1, p_0)\nparticular",
"_____no_output_____"
]
],
[
[
"In the next example, we have to work a little harder to find the particular solution.",
"_____no_output_____"
],
[
"### Solving the quadratic growth equation \n\nWe'll use the (r, K) parameterization, so we'll need two more symbols:",
"_____no_output_____"
]
],
[
[
"r, K = sp.symbols('r K')",
"_____no_output_____"
]
],
[
[
"Now we can write the differential equation.",
"_____no_output_____"
]
],
[
[
"eq2 = sp.Eq(sp.diff(f(t), t), r * f(t) * (1 - f(t)/K))\neq2",
"_____no_output_____"
]
],
[
[
"And solve it.",
"_____no_output_____"
]
],
[
[
"solution_eq = sp.dsolve(eq2)\nsolution_eq",
"_____no_output_____"
]
],
[
[
"The result, `solution_eq`, contains `rhs`, which is the right-hand side of the solution.",
"_____no_output_____"
]
],
[
[
"general = solution_eq.rhs\ngeneral",
"_____no_output_____"
]
],
[
[
"We can evaluate the right-hand side at $t=0$",
"_____no_output_____"
]
],
[
[
"at_0 = general.subs(t, 0)\nat_0",
"_____no_output_____"
]
],
[
[
"Now we want to find the value of `C1` that makes `f(0) = p_0`.\n\nSo we'll create the equation `at_0 = p_0` and solve for `C1`. Because this is just an algebraic identity, not a differential equation, we use `solve`, not `dsolve`.\n\nThe result from `solve` is a list of solutions. In this case, [we have reason to expect only one solution](https://en.wikipedia.org/wiki/Picard%E2%80%93Lindel%C3%B6f_theorem), but we still get a list, so we have to use the bracket operator, `[0]`, to select the first one.",
"_____no_output_____"
]
],
[
[
"solutions = sp.solve(sp.Eq(at_0, p_0), C1)\ntype(solutions), len(solutions)",
"_____no_output_____"
],
[
"value_of_C1 = solutions[0]\nvalue_of_C1",
"_____no_output_____"
]
],
[
[
"Now in the general solution, we want to replace `C1` with the value of `C1` we just figured out.",
"_____no_output_____"
]
],
[
[
"particular = general.subs(C1, value_of_C1)\nparticular",
"_____no_output_____"
]
],
[
[
"The result is complicated, but SymPy provides a method that tries to simplify it.",
"_____no_output_____"
]
],
[
[
"particular = sp.simplify(particular)\nparticular",
"_____no_output_____"
]
],
[
[
"Often simplicity is in the eye of the beholder, but that's about as simple as this expression gets.\n\nJust to double-check, we can evaluate it at `t=0` and confirm that we get `p_0`",
"_____no_output_____"
]
],
[
[
"particular.subs(t, 0)",
"_____no_output_____"
]
],
[
[
"This solution is called the [logistic function](https://en.wikipedia.org/wiki/Population_growth#Logistic_equation).\n\nIn some places you'll see it written in a different form:\n\n$f(t) = \\frac{K}{1 + A e^{-rt}}$\n\nwhere $A = (K - p_0) / p_0$.\n\nWe can use SymPy to confirm that these two forms are equivalent. First we represent the alternative version of the logistic function:",
"_____no_output_____"
]
],
[
[
"A = (K - p_0) / p_0\nA",
"_____no_output_____"
],
[
"logistic = K / (1 + A * sp.exp(-r*t))\nlogistic",
"_____no_output_____"
]
],
[
[
"To see whether two expressions are equivalent, we can check whether their difference simplifies to 0.",
"_____no_output_____"
]
],
[
[
"sp.simplify(particular - logistic)",
"_____no_output_____"
]
],
[
[
"This test only works one way: if SymPy says the difference reduces to 0, the expressions are definitely equivalent (and not just numerically close).\n\nBut if SymPy can't find a way to simplify the result to 0, that doesn't necessarily mean there isn't one. Testing whether two expressions are equivalent is a surprisingly hard problem; in fact, there is no algorithm that can solve it in general.",
"_____no_output_____"
],
[
"### Exercises\n\n**Exercise:** Solve the quadratic growth equation using the alternative parameterization\n\n$\\frac{df(t)}{dt} = \\alpha f(t) + \\beta f^2(t) $",
"_____no_output_____"
]
],
[
[
"beta = sp.symbols('beta')\nbeta",
"_____no_output_____"
],
[
"eq3 = sp.Eq(sp.diff(f(t), t), alpha * f(t) + beta * f(t)**2)\neq3",
"_____no_output_____"
],
[
"solution_eq3 = sp.dsolve(eq3)\nsolution_eq3",
"_____no_output_____"
],
[
"general3 = solution_eq3.rhs\ngeneral3",
"_____no_output_____"
],
[
"at_03 = general3.subs(t, 0)\nat_03",
"_____no_output_____"
],
[
"solutions3 = sp.solve(sp.Eq(at_03, p_0), C1)\nsolutions3[0]",
"_____no_output_____"
],
[
"particular3 = sp.simplify(general3.subs(C1, solutions3[0]))\nparticular3",
"_____no_output_____"
]
],
[
[
"**Exercise:** Use [WolframAlpha](https://www.wolframalpha.com/) to solve the quadratic growth model, using either or both forms of parameterization:\n\n df(t) / dt = alpha f(t) + beta f(t)^2\n\nor\n\n df(t) / dt = r f(t) (1 - f(t)/K)\n\nFind the general solution and also the particular solution where `f(0) = p_0`.",
"_____no_output_____"
]
],
[
[
"Please see the solution above",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a83c640fbdbb657e0652e0211d0a79b21fd2c45
| 248,967 |
ipynb
|
Jupyter Notebook
|
evaluate/previous_works/HoHoNet/infer_layout.ipynb
|
Syniez/Joint_360depth
|
4f28c3b5b7f648173480052e205e898c6c7a5151
|
[
"MIT"
] | 11 |
2021-11-01T05:40:19.000Z
|
2022-03-28T17:59:44.000Z
|
evaluate/previous_works/HoHoNet/infer_layout.ipynb
|
Syniez/Joint_360depth
|
4f28c3b5b7f648173480052e205e898c6c7a5151
|
[
"MIT"
] | null | null | null |
evaluate/previous_works/HoHoNet/infer_layout.ipynb
|
Syniez/Joint_360depth
|
4f28c3b5b7f648173480052e205e898c6c7a5151
|
[
"MIT"
] | 1 |
2022-03-29T10:13:47.000Z
|
2022-03-29T10:13:47.000Z
| 560.736486 | 128,720 | 0.944559 |
[
[
[
"!pip install yacs\n!pip install gdown",
"Requirement already satisfied: yacs in /Users/suncheng/miniconda3/lib/python3.8/site-packages (0.1.8)\nRequirement already satisfied: PyYAML in /Users/suncheng/miniconda3/lib/python3.8/site-packages (from yacs) (5.4.1)\nRequirement already satisfied: gdown in /Users/suncheng/miniconda3/lib/python3.8/site-packages (3.12.2)\nRequirement already satisfied: filelock in /Users/suncheng/miniconda3/lib/python3.8/site-packages (from gdown) (3.0.12)\nRequirement already satisfied: tqdm in /Users/suncheng/miniconda3/lib/python3.8/site-packages (from gdown) (4.46.0)\nRequirement already satisfied: six in /Users/suncheng/miniconda3/lib/python3.8/site-packages (from gdown) (1.14.0)\nRequirement already satisfied: requests[socks] in /Users/suncheng/miniconda3/lib/python3.8/site-packages (from gdown) (2.23.0)\nRequirement already satisfied: chardet<4,>=3.0.2 in /Users/suncheng/miniconda3/lib/python3.8/site-packages (from requests[socks]->gdown) (3.0.4)\nRequirement already satisfied: idna<3,>=2.5 in /Users/suncheng/miniconda3/lib/python3.8/site-packages (from requests[socks]->gdown) (2.9)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /Users/suncheng/miniconda3/lib/python3.8/site-packages (from requests[socks]->gdown) (1.25.8)\nRequirement already satisfied: certifi>=2017.4.17 in /Users/suncheng/miniconda3/lib/python3.8/site-packages (from requests[socks]->gdown) (2020.6.20)\nRequirement already satisfied: PySocks!=1.5.7,>=1.5.6; extra == \"socks\" in /Users/suncheng/miniconda3/lib/python3.8/site-packages (from requests[socks]->gdown) (1.7.1)\n"
],
[
"import os, sys, time\nimport argparse\nimport importlib\nfrom tqdm.notebook import tqdm\nfrom imageio import imread\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"### Download pretrained\n- We use HoHoNet w/ hardnet encoder in this demo\n- Download other version [here](https://drive.google.com/drive/folders/1raT3vRXnQXRAQuYq36dE-93xFc_hgkTQ?usp=sharing)",
"_____no_output_____"
]
],
[
[
"PRETRAINED_PTH = 'ckpt/mp3d_layout_HOHO_layout_aug_efficienthc_Transen1_resnet34/ep300.pth'\n\nif not os.path.exists(PRETRAINED_PTH):\n os.makedirs(os.path.split(PRETRAINED_PTH)[0], exist_ok=True)\n !gdown 'https://drive.google.com/uc?id=1OU9uyuNiswkPovJuvG3sevm3LqHJgazJ' -O $PRETRAINED_PTH",
"_____no_output_____"
]
],
[
[
"### Download image\n- We use a out-of-distribution image from PanoContext",
"_____no_output_____"
]
],
[
[
"if not os.path.exists('assets/pano_asmasuxybohhcj.png'):\n !gdown 'https://drive.google.com/uc?id=1CXl6RPK6yPRFXxsa5OisHV9KwyRcejHu' -O 'assets/pano_asmasuxybohhcj.png'\n\nrgb = imread('assets/pano_asmasuxybohhcj.png')\n\nplt.imshow(rgb)\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Load model config\n- We use HoHoNet w/ hardnet encoder in this demo\n- Find out other version in `mp3d_depth/` and `s2d3d_depth`",
"_____no_output_____"
]
],
[
[
"from lib.config import config\n\nconfig.defrost()\nconfig.merge_from_file('config/mp3d_layout/HOHO_layout_aug_efficienthc_Transen1_resnet34.yaml')\nconfig.freeze()",
"_____no_output_____"
]
],
[
[
"### Load model",
"_____no_output_____"
]
],
[
[
"device = 'cuda' if torch.cuda.is_available() else 'cpu'\nprint('devcie:', device)\n\nmodel_file = importlib.import_module(config.model.file)\nmodel_class = getattr(model_file, config.model.modelclass)\nnet = model_class(**config.model.kwargs)\nnet.load_state_dict(torch.load(PRETRAINED_PTH, map_location=device))\nnet = net.eval().to(device)",
"devcie: cpu\n"
]
],
[
[
"### Move image into tensor, normzlie to [0, 255], resize to 512x1024",
"_____no_output_____"
]
],
[
[
"x = torch.from_numpy(rgb).permute(2,0,1)[None].float() / 255.\nif x.shape[2:] != (512, 1024):\n x = torch.nn.functional.interpolate(x, self.hw, mode='area')\nx = x.to(device)",
"_____no_output_____"
]
],
[
[
"### Model feedforward",
"_____no_output_____"
]
],
[
[
"with torch.no_grad():\n ts = time.time()\n layout = net.infer(x)\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n print(f'Eps time: {time.time() - ts:.2f} sec.')\n\ncor_id = layout['cor_id']\ny_bon_ = layout['y_bon_']\ny_cor_ = layout['y_cor_']",
"Eps time: 1.92 sec.\n"
]
],
[
[
"### Visualize result in 2d",
"_____no_output_____"
]
],
[
[
"from eval_layout import layout_2_depth",
"_____no_output_____"
],
[
"plt.figure(figsize=(12,6))\n\nplt.subplot(121)\nplt.imshow(np.concatenate([\n (y_cor_ * 255).reshape(1,-1,1).repeat(30, 0).repeat(3, 2).astype(np.uint8),\n rgb[30:]\n], 0))\nplt.plot(np.arange(y_bon_.shape[1]), y_bon_[0], 'r-')\nplt.plot(np.arange(y_bon_.shape[1]), y_bon_[1], 'r-')\nplt.scatter(cor_id[:, 0], cor_id[:, 1], marker='x', c='b')\nplt.axis('off')\nplt.title('y_bon_ (red) / y_cor_ (up-most bar) / cor_id (blue x)')\n\nplt.subplot(122)\nplt.imshow(layout_2_depth(cor_id, *rgb.shape[:2]), cmap='inferno_r')\nplt.axis('off')\nplt.title('rendered depth from the estimated layout (cor_id)')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Visualize result as 3d mesh",
"_____no_output_____"
]
],
[
[
"!pip install open3d\n!pip install plotly",
"_____no_output_____"
],
[
"import open3d as o3d\nimport plotly.graph_objects as go\nfrom scipy.signal import correlate2d\nfrom scipy.ndimage import shift\nfrom skimage.transform import resize\n\nfrom lib.misc.post_proc import np_coor2xy, np_coorx2u, np_coory2v",
"_____no_output_____"
],
[
"H, W = 256, 512\nignore_floor = False\nignore_ceiling = True\nignore_wall = False",
"_____no_output_____"
],
[
"# Convert corners to layout\ndepth, floor_mask, ceil_mask, wall_mask = [\n resize(v, [H, W], order=0, preserve_range=True).astype(v.dtype)\n for v in layout_2_depth(cor_id, *rgb.shape[:2], return_mask=True)]\ncoorx, coory = np.meshgrid(np.arange(W), np.arange(H))\nus = np_coorx2u(coorx, W)\nvs = np_coory2v(coory, H)\nzs = depth * np.sin(vs)\ncs = depth * np.cos(vs)\nxs = cs * np.sin(us)\nys = -cs * np.cos(us)\n\n# Aggregate mask\nmask = np.ones_like(floor_mask)\nif ignore_floor:\n mask &= ~floor_mask\nif ignore_ceiling:\n mask &= ~ceil_mask\nif ignore_wall:\n mask &= ~wall_mask\n\n# Prepare ply's points and faces\nxyzrgb = np.concatenate([\n xs[...,None], ys[...,None], zs[...,None],\n resize(rgb, [H, W])], -1)\nxyzrgb = np.concatenate([xyzrgb, xyzrgb[:,[0]]], 1)\nmask = np.concatenate([mask, mask[:,[0]]], 1)\nlo_tri_template = np.array([\n [0, 0, 0],\n [0, 1, 0],\n [0, 1, 1]])\nup_tri_template = np.array([\n [0, 0, 0],\n [0, 1, 1],\n [0, 0, 1]])\nma_tri_template = np.array([\n [0, 0, 0],\n [0, 1, 1],\n [0, 1, 0]])\nlo_mask = (correlate2d(mask, lo_tri_template, mode='same') == 3)\nup_mask = (correlate2d(mask, up_tri_template, mode='same') == 3)\nma_mask = (correlate2d(mask, ma_tri_template, mode='same') == 3) & (~lo_mask) & (~up_mask)\nref_mask = (\n lo_mask | (correlate2d(lo_mask, np.flip(lo_tri_template, (0,1)), mode='same') > 0) |\\\n up_mask | (correlate2d(up_mask, np.flip(up_tri_template, (0,1)), mode='same') > 0) |\\\n ma_mask | (correlate2d(ma_mask, np.flip(ma_tri_template, (0,1)), mode='same') > 0)\n)\npoints = xyzrgb[ref_mask]\n\nref_id = np.full(ref_mask.shape, -1, np.int32)\nref_id[ref_mask] = np.arange(ref_mask.sum())\nfaces_lo_tri = np.stack([\n ref_id[lo_mask],\n ref_id[shift(lo_mask, [1, 0], cval=False, order=0)],\n ref_id[shift(lo_mask, [1, 1], cval=False, order=0)],\n], 1)\nfaces_up_tri = np.stack([\n ref_id[up_mask],\n ref_id[shift(up_mask, [1, 1], cval=False, order=0)],\n ref_id[shift(up_mask, [0, 1], cval=False, order=0)],\n], 1)\nfaces_ma_tri = np.stack([\n ref_id[ma_mask],\n ref_id[shift(ma_mask, [1, 0], cval=False, order=0)],\n ref_id[shift(ma_mask, [0, 1], cval=False, order=0)],\n], 1)\nfaces = np.concatenate([faces_lo_tri, faces_up_tri, faces_ma_tri])",
"_____no_output_____"
],
[
"fig = go.Figure(\n data=[\n go.Mesh3d(\n x=points[:,0],\n y=points[:,1],\n z=points[:,2],\n i=faces[:,0],\n j=faces[:,1],\n k=faces[:,2],\n facecolor=points[:,3:][faces[:,0]])\n ],\n layout=dict(\n scene=dict(\n xaxis=dict(visible=False),\n yaxis=dict(visible=False),\n zaxis=dict(visible=False)\n )\n )\n)\nfig.show()",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a83cff068edd45af83c1ddb10f0ef681331fe81
| 5,454 |
ipynb
|
Jupyter Notebook
|
Advanced_Natural_Language_Processing/FindingSimilarityBetweenText.ipynb
|
shubhamchouksey/NLP
|
ab81e45ebe1dc9a5d683a3adc0c5e545129d7175
|
[
"MIT"
] | 4 |
2020-04-01T11:51:09.000Z
|
2021-11-01T08:52:03.000Z
|
Advanced_Natural_Language_Processing/FindingSimilarityBetweenText.ipynb
|
shubhamchouksey/NLP
|
ab81e45ebe1dc9a5d683a3adc0c5e545129d7175
|
[
"MIT"
] | null | null | null |
Advanced_Natural_Language_Processing/FindingSimilarityBetweenText.ipynb
|
shubhamchouksey/NLP
|
ab81e45ebe1dc9a5d683a3adc0c5e545129d7175
|
[
"MIT"
] | null | null | null | 47.017241 | 917 | 0.597726 |
[
[
[
"# Finding Similarity Between Texts\n\nIn this recipee, we are going to discuss how to find the similarity between two documents or text, there are many similarity metrics like Euclidean, cosine, Jaccard etc. Applications of text similarity can be found in areas like spelling correction and data deduplication.\n\n## Here are few of the similarity metrics:\n**Cosine Similarity**: Calculates the cosine of the angles between 2 vectors. \n**Jaccard similarity**: The score is calculated using intersection or union of words. \n**Jaccard index**: (the number in both sets)/(the number in either sets)*100 \n**Levenshtein distance**: Minimal number of insertions, deletions, and replacement required for transforming string 'a' to string 'b' \n**Hamming distance**: Number of positions with the same symbol in both strings. But it can be defined only for strings with equal length",
"_____no_output_____"
]
],
[
[
"documents = (\n'I like NLP',\n'I am exploring NLP',\n'I am a beginner in NLP',\n'I want to learn NLP',\n'I like advanced NLP')\n",
"_____no_output_____"
],
[
"import pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\ntf_idf_vectorizer = TfidfVectorizer()\ntfidf_matrix = tf_idf_vectorizer.fit_transform(documents)\nprint(tfidf_matrix)\nprint(tf_idf_vectorizer.vocabulary_)\nvoc_sorted = sorted(tf_idf_vectorizer.vocabulary_, key = tf_idf_vectorizer.vocabulary_.__getitem__)\ntfidf = pd.DataFrame(tf_idf_vectorizer.idf_,index=tf_idf_vectorizer.vocabulary_,columns=['tfidf_weights'])\ntfidf\n\ncosine_similarity(tfidf_matrix[0:1],tfidf_matrix)",
" (0, 6)\t0.8610369959439764\n (0, 7)\t0.5085423203783267\n (1, 7)\t0.3477147117091919\n (1, 1)\t0.5887321837696324\n (1, 3)\t0.7297183669435993\n (2, 7)\t0.2808823162882302\n (2, 1)\t0.47557510189256375\n (2, 2)\t0.5894630806320427\n (2, 4)\t0.5894630806320427\n (3, 7)\t0.26525552965220073\n (3, 9)\t0.5566685141652766\n (3, 8)\t0.5566685141652766\n (3, 5)\t0.5566685141652766\n (4, 6)\t0.5887321837696324\n (4, 7)\t0.3477147117091919\n (4, 0)\t0.7297183669435993\n{'exploring': 3, 'in': 4, 'like': 6, 'beginner': 2, 'learn': 5, 'want': 9, 'to': 8, 'nlp': 7, 'am': 1, 'advanced': 0}\n"
]
],
[
[
"let us take, \nvec1: tfidf_matrix[0] and vec2 = tfidf_matrix[0] \n\n$$\\vec{vec1} = 0.861017*u6 + 0.50854*u7$$\n$$\\vec{vec2} = 0.347714*u7 + 0.58873*u1 + 0.73*u3$$\n$$cos(\\vec{vec1},\\vec{vec2}) = \\vec{vec1}\\cdot\\vec{vec2}$$\n$$ = 0.861017*0 + 0.50854*0.347714 + 0.58873*0 + 0.73*0$$\n$$ = 0.17682765$$\n",
"_____no_output_____"
],
[
"### Phonetic Matching\n\nThe next vesion of similarity checking is phonetic matching, which roughly matches the two words or sentences and also creates an alphanumeric string as encoded vesion of the text or word. It is very useful for searching large text corpora, correcting spellins errors, and matcing relavant names. **Soundex** and **Metaphone** are two main phonetic algorithms used for this purpose. The simplest way to do this is by using Fuzzy library. ",
"_____no_output_____"
]
],
[
[
"!pip install Fuzzy\nimport Fuzzy",
"Requirement already satisfied: Fuzzy in /home/nbuser/anaconda3_501/lib/python3.6/site-packages (1.2.2)\r\n"
],
[
"import fuzzy\nsoundex = fuzzy.Soundex(4)\nsoundex('natural')",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
4a83d8b3063206f6e00da3562b62c5b6fe03471c
| 3,016 |
ipynb
|
Jupyter Notebook
|
Tutorials/SystemCommands.ipynb
|
mbaas2/APLcourse
|
3acdbef4a1f7c06be049e8677b71ce8536815a72
|
[
"MIT"
] | 1 |
2020-11-08T18:16:22.000Z
|
2020-11-08T18:16:22.000Z
|
Tutorials/SystemCommands.ipynb
|
mbaas2/APLcourse
|
3acdbef4a1f7c06be049e8677b71ce8536815a72
|
[
"MIT"
] | 1 |
2019-10-29T16:57:47.000Z
|
2019-10-29T16:57:47.000Z
|
Tutorials/SystemCommands.ipynb
|
mbaas2/APLcourse
|
3acdbef4a1f7c06be049e8677b71ce8536815a72
|
[
"MIT"
] | null | null | null | 19.584416 | 206 | 0.48508 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a83de63294092626886ec8d239abf5daca218e0
| 41,180 |
ipynb
|
Jupyter Notebook
|
PBH-DM/Example_FI.ipynb
|
earlyuniverse/ulysses
|
9b8bade6d2d6badf2f17bac37306ea7ebceeeb4a
|
[
"MIT"
] | 9 |
2020-07-21T17:08:18.000Z
|
2022-02-05T18:52:56.000Z
|
PBH-DM/Example_FI.ipynb
|
earlyuniverse/ulysses
|
9b8bade6d2d6badf2f17bac37306ea7ebceeeb4a
|
[
"MIT"
] | 2 |
2020-07-23T03:24:57.000Z
|
2021-05-17T23:17:12.000Z
|
PBH-DM/Example_FI.ipynb
|
earlyuniverse/ulysses
|
9b8bade6d2d6badf2f17bac37306ea7ebceeeb4a
|
[
"MIT"
] | 1 |
2021-09-16T16:20:58.000Z
|
2021-09-16T16:20:58.000Z
| 294.142857 | 36,564 | 0.907819 |
[
[
[
"###################################################################################################\n# #\n# Primordial Black Hole Evaporation + DM Production #\n# Interplay with Freeze-In #\n# #\n# Authors: Andrew Cheek, Lucien Heurtier, Yuber F. Perez-Gonzalez, Jessica Turner #\n# Based on: arXiv:2107.xxxxx #\n# #\n###################################################################################################\n\n\nimport BHProp as bh\nimport ulysses\nimport math\nfrom odeintw import odeintw\nimport pandas as pd\nfrom scipy import interpolate\nimport matplotlib.pyplot as plt\nimport scipy.integrate as integrate\nfrom scipy.integrate import quad, ode, solve_ivp, odeint\nfrom scipy.optimize import root\nfrom scipy.special import zeta, kn\nfrom scipy.interpolate import interp1d, RectBivariateSpline\nfrom numpy import ma\nfrom matplotlib import ticker, cm\nfrom matplotlib import cm\n\nfrom collections import OrderedDict\n\nimport BHProp as bh #Schwarzschild and Kerr BHs library\n\nfrom Omega_h2_onlyDM import FrInPBH as FrIn1 # Only DM contribution\n\n#----- Package for LateX plotting -----\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('text', usetex=True)\nrc('text.latex', preamble=r'\\usepackage{amsmath,amssymb,bm}')\n#-----\n\n\n# Import solving functions\nfrom Omega_h2_FI import FrInPBH as FrInFull # Freeze-In + PBH\n\n\n\n# ----------- Input Parameters --------------\nMi = 7.47 # Log10@ Initial BH mass in g\nai = 0. # Initial a* value, a* = 0. -> Schwarzschild, a* > 0. -> Kerr.\nbi = -10. # Log10@beta^\\prime\nmDM = -3. # Log10 @ DM Mass in GeV\nmX = 1. # Log10 @ Mediaton Mass in GeV\nmf = -10. # Log10 @ SM mass in GeV\nsv = -43. # Log10 @ averaged cross section <sv>\nBR = 0.5 # Branching ratio to DM\ng_DM = 2 # DM degrees of freedom\nmodel = 1 # Type of model --> fixed here to be one\n\n\nZ=FrInFull(Mi, ai, bi, mDM, mX, mf, sv, BR, g_DM, model)\n\nrelic=Z.Omegah2()\n\nrelic_analytic=Z.Omegah2_analytics_FI()\n\nprint('relic numerics : ',relic)\nprint('relic analytics : ',relic_analytic[0])\nif(relic_analytic[1]>=1):\n print('Thermalization of X -> True')\nelse:\n print('Thermalization of X -> False')",
"fraction evaporated into SM = 97.08434558030845 %\ng_V = 7.486648777789885e-09\ng_D = 7.486648777789888e-09\nOh2 = 0.12741875299222602\n"
]
]
] |
[
"code"
] |
[
[
"code"
]
] |
4a83e374b8f02d1f6eb5a06367f112a6814d6a8b
| 332,159 |
ipynb
|
Jupyter Notebook
|
manual/GPyOpt_cost_functions.ipynb
|
epistra/GPyOpt
|
cd4cbaebd20b73e05246beea20d76c4c4a7361a1
|
[
"BSD-3-Clause"
] | null | null | null |
manual/GPyOpt_cost_functions.ipynb
|
epistra/GPyOpt
|
cd4cbaebd20b73e05246beea20d76c4c4a7361a1
|
[
"BSD-3-Clause"
] | null | null | null |
manual/GPyOpt_cost_functions.ipynb
|
epistra/GPyOpt
|
cd4cbaebd20b73e05246beea20d76c4c4a7361a1
|
[
"BSD-3-Clause"
] | null | null | null | 1,207.850909 | 96,408 | 0.95662 |
[
[
[
"# GPyOpt: dealing with cost fuctions\n\n### Written by Javier Gonzalez, University of Sheffield.\n\n## Reference Manual index\n\n*Last updated Friday, 11 March 2016.*",
"_____no_output_____"
],
[
"GPyOpt allows to consider function evaluation costs in the optimization.",
"_____no_output_____"
]
],
[
[
"%pylab inline \nimport GPyOpt",
"Populating the interactive namespace from numpy and matplotlib\n"
],
[
"# --- Objective function\nobjective_true = GPyOpt.objective_examples.experiments2d.branin() # true function\nobjective_noisy = GPyOpt.objective_examples.experiments2d.branin(sd = 0.1) # noisy version\nbounds = objective_noisy.bounds \nobjective_true.plot()",
"_____no_output_____"
],
[
"domain = [{'name': 'var_1', 'type': 'continuous', 'domain': bounds[0]}, ## use default bounds\n {'name': 'var_2', 'type': 'continuous', 'domain': bounds[1]}]",
"_____no_output_____"
],
[
"def mycost(x):\n cost_f = np.atleast_2d(.1*x[:,0]**2 +.1*x[:,1]**2).T\n cost_df = np.array([0.2*x[:,0],0.2*x[:,1]]).T\n return cost_f, cost_df",
"_____no_output_____"
],
[
"# plot the cost fucntion\ngrid = 400\nbounds = objective_true.bounds\nX1 = np.linspace(bounds[0][0], bounds[0][1], grid)\nX2 = np.linspace(bounds[1][0], bounds[1][1], grid)\nx1, x2 = np.meshgrid(X1, X2)\nX = np.hstack((x1.reshape(grid*grid,1),x2.reshape(grid*grid,1)))\n\ncost_X, _ = mycost(X)",
"_____no_output_____"
],
[
"# Feasible region\nplt.contourf(X1, X2, cost_X.reshape(grid,grid),100, alpha=1,origin ='lower')\nplt.title('Cost function')\nplt.colorbar()",
"_____no_output_____"
],
[
"GPyOpt.methods.BayesianOptimization?",
"_____no_output_____"
],
[
"from numpy.random import seed\nseed(123)\nBO = GPyOpt.methods.BayesianOptimization(f=objective_noisy.f, \n domain = domain, \n initial_design_numdata = 5,\n acquisition_type = 'EI', \n normalize_Y = True,\n exact_feval = False,\n acquisition_jitter = 0.05) ",
"_____no_output_____"
],
[
"seed(123)\nBO_cost = GPyOpt.methods.BayesianOptimization(f=objective_noisy.f, \n cost_withGradients = mycost,\n initial_design_numdata =5,\n domain = domain, \n acquisition_type = 'EI', \n normalize_Y = True,\n exact_feval = False,\n acquisition_jitter = 0.05) ",
"_____no_output_____"
],
[
"BO.plot_acquisition()",
"_____no_output_____"
],
[
"BO_cost.run_optimization(15)\nBO_cost.plot_acquisition()",
"_____no_output_____"
],
[
"BO.run_optimization(15)\nBO.plot_acquisition()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a83ea75199ec7db6a06f9595447ac28618eec06
| 97,594 |
ipynb
|
Jupyter Notebook
|
stratification/stratification.ipynb
|
SanyHe/geo_ml
|
e2fec125782d9189ee0b83ce96f18cefb49d5e6d
|
[
"MIT"
] | 1 |
2020-10-07T11:33:42.000Z
|
2020-10-07T11:33:42.000Z
|
stratification/stratification.ipynb
|
SanyHe/Machine_Learning_on_Geological_Project
|
e2fec125782d9189ee0b83ce96f18cefb49d5e6d
|
[
"MIT"
] | null | null | null |
stratification/stratification.ipynb
|
SanyHe/Machine_Learning_on_Geological_Project
|
e2fec125782d9189ee0b83ce96f18cefb49d5e6d
|
[
"MIT"
] | null | null | null | 35.579293 | 7,008 | 0.375853 |
[
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv(\"test2_result.csv\")",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"df2 = pd.read_excel(\"Test_2.xlsx\")\n# 只含特征值的完整数据集\ndata = df2.drop(\"TRUE VALUE\", axis=1)\n# 只含真实分类信息的完整数据集\nlabels = df2[\"TRUE VALUE\"]",
"_____no_output_____"
],
[
"# data2是去掉真实分类信息的数据集(含有聚类后的结果)\ndata2 = df.drop(\"TRUE VALUE\", axis=1)\ndata2",
"_____no_output_____"
],
[
"# 查看使用kmeans聚类后的分类标签值,两类\ndata2['km_clustering_label'].hist()",
"_____no_output_____"
],
[
"from sklearn.model_selection import StratifiedShuffleSplit\n\n# 基于kmeans聚类结果的分层抽样\nsplit = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)\nfor train_index, test_index in split.split(data2, data2[\"km_clustering_label\"]):\n strat_train_set = data2.loc[train_index]\n strat_test_set = data2.loc[test_index]",
"_____no_output_____"
],
[
"def clustering_result_propotions(data):\n \"\"\"\n 分层抽样后,训练集或测试集里不同分类标签的数量比\n :param data: 训练集或测试集,纯随机取样或分层取样\n \"\"\"\n return data[\"km_clustering_label\"].value_counts() / len(data)",
"_____no_output_____"
],
[
"# 经过分层抽样的测试集中,不同分类标签的数量比\nclustering_result_propotions(strat_test_set)",
"_____no_output_____"
],
[
"# 经过分层抽样的训练集中,不同分类标签的数量比\nclustering_result_propotions(strat_train_set)",
"_____no_output_____"
],
[
"# 完整的数据集中,不同分类标签的数量比\nclustering_result_propotions(data2)",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\n\n# 纯随机取样\nrandom_train_set, random_test_set = train_test_split(data2, test_size=0.2, random_state=42)\n\n# 完整的数据集、分层抽样后的测试集、纯随机抽样后的测试集中,不同分类标签的数量比\ncompare_props = pd.DataFrame({\n \"Overall\": clustering_result_propotions(data2),\n \"Stratified\": clustering_result_propotions(strat_test_set),\n \"Random\": clustering_result_propotions(random_test_set),\n}).sort_index()\n\n# 计算分层抽样和纯随机抽样后的测试集中不同分类标签的数量比,和完整的数据集中不同分类标签的数量比的误差\ncompare_props[\"Rand. %error\"] = 100 * compare_props[\"Random\"] / compare_props[\"Overall\"] - 100\ncompare_props[\"Start. %error\"] = 100 * compare_props[\"Stratified\"] / compare_props[\"Overall\"] - 100\n\ncompare_props",
"_____no_output_____"
],
[
"from sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import f1_score\n\ndef get_classification_marks(model, data, labels, train_index, test_index):\n \"\"\"\n 获取分类模型(二元或多元分类器)的评分:F1值\n :param data: 只含有特征值的数据集\n :param labels: 只含有标签值的数据集\n :param train_index: 分层抽样获取的训练集中数据的索引\n :param test_index: 分层抽样获取的测试集中数据的索引\n :return: F1评分值\n \"\"\"\n m = model(random_state=42)\n m.fit(data.loc[train_index], labels.loc[train_index])\n test_labels_predict = m.predict(data.loc[test_index])\n score = f1_score(labels.loc[test_index], test_labels_predict, average=\"weighted\")\n return score",
"_____no_output_____"
],
[
"# 用分层抽样后的训练集训练分类模型后的评分值\nstart_marks = get_classification_marks(LogisticRegression, data, labels, strat_train_set.index, strat_test_set.index)\nstart_marks",
"/usr/local/lib/python3.7/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n/usr/local/lib/python3.7/site-packages/sklearn/linear_model/logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\n"
],
[
"# 用纯随机抽样后的训练集训练分类模型后的评分值\nrandom_marks = get_classification_marks(LogisticRegression, data, labels, random_train_set.index, random_test_set.index)\nrandom_marks",
"/usr/local/lib/python3.7/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n/usr/local/lib/python3.7/site-packages/sklearn/linear_model/logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\n"
],
[
"from sklearn.metrics import f1_score\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.base import clone, BaseEstimator, TransformerMixin\n\nclass stratified_cross_val_score(BaseEstimator, TransformerMixin):\n \"\"\"实现基于分层抽样的k折交叉验证\"\"\"\n \n def __init__(self, model, data, labels, random_state=0, cv=5):\n \"\"\"\n :model: 训练的模型(回归或分类)\n :data: 只含特征值的完整数据集\n :labels: 只含标签值的完整数据集\n :random_state: 模型的随机种子值\n :cv: 交叉验证的次数\n \"\"\"\n self.model = model\n self.data = data\n self.labels = labels\n self.random_state = random_state\n self.cv = cv\n self.score = [] # 储存每折测试集的模型评分\n self.i = 0 \n \n def fit(self, X, y):\n \"\"\"\n :param X: 含有特征值和聚类结果的完整数据集\n :param y: 含有聚类结果的完整数据集\n \"\"\"\n skfolds = StratifiedKFold(n_splits=self.cv, random_state=self.random_state)\n\n for train_index, test_index in skfolds.split(X, y):\n # 复制要训练的模型(分类或回归)\n clone_model = clone(self.model)\n strat_X_train_folds = self.data.loc[train_index]\n strat_y_train_folds = self.labels.loc[train_index]\n strat_X_test_fold = self.data.loc[test_index]\n strat_y_test_fold = self.labels.loc[test_index]\n \n # 训练模型\n clone_model.fit(strat_X_train_folds, strat_y_train_folds)\n # 预测值(这里是分类模型的分类结果)\n test_labels_pred = clone_model.predict(strat_X_test_fold)\n \n # 这里使用的是分类模型用的F1值,如果是回归模型可以换成相应的模型\n score_fold = f1_score(labels.loc[test_index], test_labels_pred, average=\"weighted\")\n \n # 避免重复向列表里重复添加值\n if self.i < self.cv:\n self.score.append(score_fold)\n else:\n None\n \n self.i += 1\n \n def transform(self, X, y=None):\n return self\n \n def mean(self):\n \"\"\"返回交叉验证评分的平均值\"\"\"\n return np.array(self.score).mean()\n \n def std(self):\n \"\"\"返回交叉验证评分的标准差\"\"\"\n return np.array(self.score).std()",
"_____no_output_____"
],
[
"from sklearn.linear_model import SGDClassifier\n\n# 分类模型\nclf_model = SGDClassifier(max_iter=5, tol=-np.infty, random_state=42)\n# 基于分层抽样的交叉验证,data是只含特征值的完整数据集,labels是只含标签值的完整数据集\nclf_cross_val = stratified_cross_val_score(clf_model, data, labels, cv=5, random_state=42)\n# data2是含有特征值和聚类结果的完整数据集\nclf_cross_val.fit(data2, data2[\"km_clustering_label\"])",
"/usr/local/lib/python3.7/site-packages/sklearn/metrics/classification.py:1439: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no true samples.\n 'recall', 'true', average, warn_for)\n/usr/local/lib/python3.7/site-packages/sklearn/metrics/classification.py:1437: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n/usr/local/lib/python3.7/site-packages/sklearn/metrics/classification.py:1439: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no true samples.\n 'recall', 'true', average, warn_for)\n/usr/local/lib/python3.7/site-packages/sklearn/metrics/classification.py:1437: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n"
],
[
"# 每折交叉验证的评分\nclf_cross_val.score",
"_____no_output_____"
],
[
"# 交叉验证评分的平均值\nclf_cross_val.mean()",
"_____no_output_____"
],
[
"# 交叉验证评分的标准差\nclf_cross_val.std()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a83fa1c84e13a61ec094aa6be201803af7de850
| 8,085 |
ipynb
|
Jupyter Notebook
|
Datasets/Water/usgs_watersheds.ipynb
|
jdgomezmo/gee
|
7016c47ee902dbf60b1aeb6319424c61c1107345
|
[
"MIT"
] | 1 |
2020-11-16T22:07:42.000Z
|
2020-11-16T22:07:42.000Z
|
Datasets/Water/usgs_watersheds.ipynb
|
tingli3/earthengine-py-notebooks
|
7016c47ee902dbf60b1aeb6319424c61c1107345
|
[
"MIT"
] | null | null | null |
Datasets/Water/usgs_watersheds.ipynb
|
tingli3/earthengine-py-notebooks
|
7016c47ee902dbf60b1aeb6319424c61c1107345
|
[
"MIT"
] | null | null | null | 41.461538 | 1,031 | 0.560668 |
[
[
[
"<table class=\"ee-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://github.com/giswqs/earthengine-py-notebooks/tree/master/Datasets/Water/usgs_watersheds.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /> View source on GitHub</a></td>\n <td><a target=\"_blank\" href=\"https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Water/usgs_watersheds.ipynb\"><img width=26px src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png\" />Notebook Viewer</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Water/usgs_watersheds.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /> Run in Google Colab</a></td>\n</table>",
"_____no_output_____"
],
[
"## Install Earth Engine API and geemap\nInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.\nThe following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.\n\n**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).",
"_____no_output_____"
]
],
[
[
"# Installs geemap package\nimport subprocess\n\ntry:\n import geemap\nexcept ImportError:\n print('geemap package not installed. Installing ...')\n subprocess.check_call([\"python\", '-m', 'pip', 'install', 'geemap'])\n\n# Checks whether this notebook is running on Google Colab\ntry:\n import google.colab\n import geemap.eefolium as geemap\nexcept:\n import geemap\n\n# Authenticates and initializes Earth Engine\nimport ee\n\ntry:\n ee.Initialize()\nexcept Exception as e:\n ee.Authenticate()\n ee.Initialize() ",
"_____no_output_____"
]
],
[
[
"## Create an interactive map \nThe default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function. ",
"_____no_output_____"
]
],
[
[
"Map = geemap.Map(center=[40,-100], zoom=4)\nMap",
"_____no_output_____"
]
],
[
[
"## Add Earth Engine Python script ",
"_____no_output_____"
]
],
[
[
"# Add Earth Engine dataset\ndataset = ee.FeatureCollection('USGS/WBD/2017/HUC02')\nstyleParams = {\n 'fillColor': '000070',\n 'color': '0000be',\n 'width': 3.0,\n}\nregions = dataset.style(**styleParams)\nMap.setCenter(-96.8, 40.43, 4)\nMap.addLayer(regions, {}, 'USGS/WBD/2017/HUC02')\n\n\ndataset = ee.FeatureCollection('USGS/WBD/2017/HUC04')\nstyleParams = {\n 'fillColor': '5885E3',\n 'color': '0000be',\n 'width': 3.0,\n}\nsubregions = dataset.style(**styleParams)\nMap.setCenter(-110.904, 36.677, 7)\nMap.addLayer(subregions, {}, 'USGS/WBD/2017/HUC04')\n\n\ndataset = ee.FeatureCollection('USGS/WBD/2017/HUC06')\nstyleParams = {\n 'fillColor': '588593',\n 'color': '587193',\n 'width': 3.0,\n}\nbasins = dataset.style(**styleParams)\nMap.setCenter(-96.8, 40.43, 7)\nMap.addLayer(basins, {}, 'USGS/WBD/2017/HUC06')\n\n\ndataset = ee.FeatureCollection('USGS/WBD/2017/HUC08')\nstyleParams = {\n 'fillColor': '2E8593',\n 'color': '587193',\n 'width': 2.0,\n}\nsubbasins = dataset.style(**styleParams)\nMap.setCenter(-96.8, 40.43, 8)\nMap.addLayer(subbasins, {}, 'USGS/WBD/2017/HUC08')\n\n\ndataset = ee.FeatureCollection('USGS/WBD/2017/HUC10')\nstyleParams = {\n 'fillColor': '2E85BB',\n 'color': '2E5D7E',\n 'width': 1.0,\n}\nwatersheds = dataset.style(**styleParams)\nMap.setCenter(-96.8, 40.43, 9)\nMap.addLayer(watersheds, {}, 'USGS/WBD/2017/HUC10')\n\n\ndataset = ee.FeatureCollection('USGS/WBD/2017/HUC12')\nstyleParams = {\n 'fillColor': '2E85BB',\n 'color': '2E5D7E',\n 'width': 0.1,\n}\nsubwatersheds = dataset.style(**styleParams)\nMap.setCenter(-96.8, 40.43, 10)\nMap.addLayer(subwatersheds, {}, 'USGS/WBD/2017/HUC12')\n",
"_____no_output_____"
]
],
[
[
"## Display Earth Engine data layers ",
"_____no_output_____"
]
],
[
[
"Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.\nMap",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a8421673fb5e848d64700c24496ae920b184ab5
| 169,932 |
ipynb
|
Jupyter Notebook
|
uruk/start.ipynb
|
annotation/tutorials
|
2bfb1a044f386e6b8a2118a8ff6142ec4fb0e884
|
[
"MIT"
] | 2 |
2019-07-17T18:51:26.000Z
|
2019-07-24T19:45:23.000Z
|
uruk/start.ipynb
|
annotation/tutorials
|
2bfb1a044f386e6b8a2118a8ff6142ec4fb0e884
|
[
"MIT"
] | 3 |
2019-01-16T10:56:50.000Z
|
2020-11-16T16:30:48.000Z
|
uruk/start.ipynb
|
annotation/tutorials
|
2bfb1a044f386e6b8a2118a8ff6142ec4fb0e884
|
[
"MIT"
] | 2 |
2020-12-17T15:41:33.000Z
|
2021-11-03T18:23:07.000Z
| 51.153522 | 20,171 | 0.521832 |
[
[
[
"<img align=\"right\" src=\"images/ninologo.png\" width=\"150\"/>\n<img align=\"right\" src=\"images/tf-small.png\" width=\"125\"/>\n<img align=\"right\" src=\"images/dans.png\" width=\"150\"/>\n\n# Start\n\nThis notebook gets you started with using\n[Text-Fabric](https://github.com/Nino-cunei/uruk/blob/master/docs/textfabric.md) for coding in cuneiform tablet transcriptions.\n\nFamiliarity with the underlying\n[data model](https://annotation.github.io/text-fabric/tf/about/datamodel.html)\nis recommended.\n\nFor provenance, see the documentation:\n[about](https://github.com/Nino-cunei/uruk/blob/master/docs/about.md).",
"_____no_output_____"
],
[
"## Overview\n\n* we tell you how to get Text-Fabric on your system;\n* we tell you how to get the Uruk IV-III corpus on your system.",
"_____no_output_____"
],
[
"## Installing Text-Fabric\n\nSee [here](https://annotation.github.io/text-fabric/tf/about/install.html)",
"_____no_output_____"
],
[
"### Get the data\n\nText-Fabric will get the data for you and store it on your system.\n\nIf you have cloned the github repo with the data,\n[Nino-cunei/uruk](https://github.com/Nino-cunei/uruk),\nyour data is already in place, and nothing will be downloaded.\n\nOtherwise, on first run, Text-Fabric will load the data and store it in the folder\n`text-fabric-data` in your home directory.\nThis only happens if the data is not already there.\n\nNot only transcription data will be downloaded, also linearts and photos.\nThese images are contained in a zipfile of 550 MB,\nso take care that you have a good internet connection when it comes to downloading the images.",
"_____no_output_____"
],
[
"## Start the engines\n\nNavigate to this directory in a terminal and say\n\n```\njupyter notebook\n```\n\n(just literally).\n\nYour browser opens with a directory view, and you'll see `start.ipynb`.\nClick on it. A new browser tab opens, and a Python engine has been allocated to this\nnotebook.\n\nNow we are ready to compute .\nThe next cell is a code cell that can be executed if you have downloaded this\nnotebook and have issued the `jupyter notebook` command.\n\nYou execute a code cell by standing in it and press `Shift Enter`.",
"_____no_output_____"
],
[
"### The code",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"import sys, os\nfrom tf.app import use",
"_____no_output_____"
]
],
[
[
"View the next cell as an *incantation*.\nYou just have to say it to get things underway.",
"_____no_output_____"
],
[
"For the very last version, use `hot`.\n\nFor the latest release, use `latest`.\n\nIf you have cloned the repos (TF app and data), use `clone`.\n\nIf you do not want/need to upgrade, leave out the checkout specifiers.",
"_____no_output_____"
]
],
[
[
"A = use(\"uruk:clone\", checkout=\"clone\", hoist=globals())\n# A = use('uruk:hot', checkout=\"hot\", hoist=globals())\n# A = use('uruk:latest', checkout=\"latest\", hoist=globals())\n# A = use('uruk', hoist=globals())",
"_____no_output_____"
]
],
[
[
"### The output\n\nThe output shows some statistics about the images found in the Uruk data.\n\nThen there are links to the documentation.\n\n**Tip:** open them, and have a quick look.\n\nEvery notebook that you set up with `Cunei` will have such links.\n\n**GitHub and NBViewer**\n\nIf you have made your own notebook, and used this incantation,\nand pushed the notebook to GitHub, links to the online version\nof *your* notebook on GitHub and NBViewer will be generated and displayed.\n\nBy the way, GitHub shows notebooks nicely.\nSometimes NBViewer does it better, although it fetches exactly the same notebook from GitHub.\n\nNBViewer is handy to navigate all the notebooks of a particular organization.\nTry the [Nino-cunei starting point](http://nbviewer.jupyter.org/github/Nino-cunei/).\n\nThese links you can share with colleagues.",
"_____no_output_____"
],
[
"## Test\n\nWe perform a quick test to see that everything works.\n\n### Count the signs\n\nWe count how many signs there are in the corpus.\nIn a next notebook we'll explain code like this.",
"_____no_output_____"
]
],
[
[
"len(F.otype.s(\"sign\"))",
"_____no_output_____"
]
],
[
[
"### Show photos and lineart\n\nWe show the photo and lineart of a tablet, to whet your appetite.",
"_____no_output_____"
]
],
[
[
"example = T.nodeFromSection((\"P005381\",))",
"_____no_output_____"
],
[
"A.photo(example)",
"_____no_output_____"
]
],
[
[
"Note that you can click on the photo to see a better version on CDLI.\n\nHere comes the lineart:",
"_____no_output_____"
]
],
[
[
"A.lineart(example)",
"_____no_output_____"
]
],
[
[
"A pretty representation of the transcription with embedded lineart for quads and signs:",
"_____no_output_____"
]
],
[
[
"A.pretty(example, withNodes=True)",
"_____no_output_____"
]
],
[
[
"We can suppress the lineart:",
"_____no_output_____"
]
],
[
[
"A.pretty(example, showGraphics=False)",
"_____no_output_____"
]
],
[
[
"The transliteration:",
"_____no_output_____"
]
],
[
[
"A.getSource(example)",
"_____no_output_____"
]
],
[
[
"Now the lines ans cases of this tablet in a table:",
"_____no_output_____"
]
],
[
[
"table = []\nfor sub in L.d(example):\n if F.otype.v(sub) in {\"line\", \"case\"}:\n table.append((sub,))",
"_____no_output_____"
],
[
"A.table(table, showGraphics=False)",
"_____no_output_____"
]
],
[
[
"We can include the lineart in plain displays:",
"_____no_output_____"
]
],
[
[
"A.table(table, showGraphics=True)",
"_____no_output_____"
]
],
[
[
"This is just the beginning.\n\nIn the next chapters we show you how to\n* fine-tune tablet displays,\n* step and jump around in the corpus,\n* search for patterns,\n* drill down to quads and signs,\n* and study frequency distributions of signs in subcases.",
"_____no_output_____"
],
[
"# Next\n\n[imagery](imagery.ipynb)\n\n*Get the big picture ...*\n\nAll chapters:\n**start**\n[imagery](imagery.ipynb)\n[steps](steps.ipynb)\n[search](search.ipynb)\n[calc](calc.ipynb)\n[signs](signs.ipynb)\n[quads](quads.ipynb)\n[jumps](jumps.ipynb)\n[cases](cases.ipynb)\n\n---\n\nCC-BY Dirk Roorda",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a842614fd7f2d3319a4644319e44afee76eae26
| 309,517 |
ipynb
|
Jupyter Notebook
|
project-bikesharing/Predicting_bike_sharing_data.ipynb
|
GabbySuwichaya/deep-learning-v2-pytorch
|
4d0d171a5ea1c2821d1b77737c6fd42232b481f0
|
[
"MIT"
] | null | null | null |
project-bikesharing/Predicting_bike_sharing_data.ipynb
|
GabbySuwichaya/deep-learning-v2-pytorch
|
4d0d171a5ea1c2821d1b77737c6fd42232b481f0
|
[
"MIT"
] | null | null | null |
project-bikesharing/Predicting_bike_sharing_data.ipynb
|
GabbySuwichaya/deep-learning-v2-pytorch
|
4d0d171a5ea1c2821d1b77737c6fd42232b481f0
|
[
"MIT"
] | null | null | null | 333.53125 | 158,708 | 0.90219 |
[
[
[
"# Your first neural network\n\nIn this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.\n\n",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n%load_ext autoreload\n%autoreload 2\n%config InlineBackend.figure_format = 'retina'\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"## Load and prepare the data\n\nA critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!",
"_____no_output_____"
]
],
[
[
"data_path = 'Bike-Sharing-Dataset/hour.csv'\n\nrides = pd.read_csv(data_path)",
"_____no_output_____"
],
[
"rides.head()",
"_____no_output_____"
]
],
[
[
"## Checking out the data\n\nThis dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the `cnt` column. You can see the first few rows of the data above.\n\nBelow is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.",
"_____no_output_____"
]
],
[
[
"rides[:24*10].plot(x='dteday', y='cnt')",
"_____no_output_____"
]
],
[
[
"### Dummy variables\nHere we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to `get_dummies()`.",
"_____no_output_____"
]
],
[
[
"dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']\nfor each in dummy_fields:\n dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)\n rides = pd.concat([rides, dummies], axis=1)\n\nfields_to_drop = ['instant', 'dteday', 'season', 'weathersit', \n 'weekday', 'atemp', 'mnth', 'workingday', 'hr']\ndata = rides.drop(fields_to_drop, axis=1)\ndata.head()",
"_____no_output_____"
]
],
[
[
"### Scaling target variables\nTo make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.\n\nThe scaling factors are saved so we can go backwards when we use the network for predictions.",
"_____no_output_____"
]
],
[
[
"quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']\n# Store scalings in a dictionary so we can convert back later\nscaled_features = {}\nfor each in quant_features:\n mean, std = data[each].mean(), data[each].std()\n scaled_features[each] = [mean, std]\n data.loc[:, each] = (data[each] - mean)/std",
"_____no_output_____"
]
],
[
[
"### Splitting the data into training, testing, and validation sets\n\nWe'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.",
"_____no_output_____"
]
],
[
[
"# Save data for approximately the last 21 days \ntest_data = data[-21*24:]\n\n# Now remove the test data from the data set \ndata = data[:-21*24]\n\n# Separate the data into features and targets\ntarget_fields = ['cnt', 'casual', 'registered']\nfeatures, targets = data.drop(target_fields, axis=1), data[target_fields]\ntest_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]",
"_____no_output_____"
]
],
[
[
"We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).",
"_____no_output_____"
]
],
[
[
"# Hold out the last 60 days or so of the remaining data as a validation set\ntrain_features, train_targets = features[:-60*24], targets[:-60*24]\nval_features, val_targets = features[-60*24:], targets[-60*24:]",
"_____no_output_____"
]
],
[
[
"## Time to build the network\n\nBelow you'll build your network. We've built out the structure. You'll implement both the forward pass and backwards pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.\n\n<img src=\"assets/neural_network.png\" width=300px>\n\nThe network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called *forward propagation*.\n\nWe use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called *backpropagation*.\n\n> **Hint:** You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.\n\nBelow, you have these tasks:\n1. Implement the sigmoid function to use as the activation function. Set `self.activation_function` in `__init__` to your sigmoid function.\n2. Implement the forward pass in the `train` method.\n3. Implement the backpropagation algorithm in the `train` method, including calculating the output error.\n4. Implement the forward pass in the `run` method.\n ",
"_____no_output_____"
]
],
[
[
"#############\n# In the my_answers.py file, fill out the TODO sections as specified\n#############\n\nfrom my_answers import NeuralNetwork",
"_____no_output_____"
],
[
"def MSE(y, Y):\n return np.mean((y-Y)**2)",
"_____no_output_____"
]
],
[
[
"## Unit tests\n\nRun these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly befor you starting trying to train it. These tests must all be successful to pass the project.",
"_____no_output_____"
]
],
[
[
"import unittest\n\ninputs = np.array([[0.5, -0.2, 0.1]])\ntargets = np.array([[0.4]])\ntest_w_i_h = np.array([[0.1, -0.2],\n [0.4, 0.5],\n [-0.3, 0.2]])\ntest_w_h_o = np.array([[0.3],\n [-0.1]])\n\nclass TestMethods(unittest.TestCase):\n \n ##########\n # Unit tests for data loading\n ##########\n \n def test_data_path(self):\n # Test that file path to dataset has been unaltered\n self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')\n \n def test_data_loaded(self):\n # Test that data frame loaded\n self.assertTrue(isinstance(rides, pd.DataFrame))\n \n ##########\n # Unit tests for network functionality\n ##########\n\n def test_activation(self):\n network = NeuralNetwork(3, 2, 1, 0.5)\n # Test that the activation function is a sigmoid\n self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))\n\n def test_train(self):\n # Test that weights are updated correctly on training\n network = NeuralNetwork(3, 2, 1, 0.5)\n network.weights_input_to_hidden = test_w_i_h.copy()\n network.weights_hidden_to_output = test_w_h_o.copy()\n \n network.train(inputs, targets)\n self.assertTrue(np.allclose(network.weights_hidden_to_output, \n np.array([[ 0.37275328], \n [-0.03172939]])))\n self.assertTrue(np.allclose(network.weights_input_to_hidden,\n np.array([[ 0.10562014, -0.20185996], \n [0.39775194, 0.50074398], \n [-0.29887597, 0.19962801]])))\n\n def test_run(self):\n # Test correctness of run method\n network = NeuralNetwork(3, 2, 1, 0.5)\n network.weights_input_to_hidden = test_w_i_h.copy()\n network.weights_hidden_to_output = test_w_h_o.copy()\n\n self.assertTrue(np.allclose(network.run(inputs), 0.09998924))\n\nsuite = unittest.TestLoader().loadTestsFromModule(TestMethods())\nunittest.TextTestRunner().run(suite)",
".....\n----------------------------------------------------------------------\nRan 5 tests in 0.015s\n\nOK\n"
]
],
[
[
"## Training the network\n\nHere you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.\n\nYou'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.\n\n### Choose the number of iterations\nThis is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, this process can have sharply diminishing returns and can waste computational resources if you use too many iterations. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. The ideal number of iterations would be a level that stops shortly after the validation loss is no longer decreasing.\n\n### Choose the learning rate\nThis scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. Normally a good choice to start at is 0.1; however, if you effectively divide the learning rate by n_records, try starting out with a learning rate of 1. In either case, if the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.\n\n### Choose the number of hidden nodes\nIn a model where all the weights are optimized, the more hidden nodes you have, the more accurate the predictions of the model will be. (A fully optimized model could have weights of zero, after all.) However, the more hidden nodes you have, the harder it will be to optimize the weights of the model, and the more likely it will be that suboptimal weights will lead to overfitting. With overfitting, the model will memorize the training data instead of learning the true pattern, and won't generalize well to unseen data. \n\nTry a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose. You'll generally find that the best number of hidden nodes to use ends up being between the number of input and output nodes.",
"_____no_output_____"
]
],
[
[
"import sys\n\n####################\n### Set the hyperparameters in you myanswers.py file ###\n####################\n\nfrom my_answers import iterations, learning_rate, hidden_nodes, output_nodes\n\n\nN_i = train_features.shape[1]\nnetwork = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)\n\nlosses = {'train':[], 'validation':[]}\nfor ii in range(iterations):\n # Go through a random batch of 128 records from the training data set\n batch = np.random.choice(train_features.index, size=128)\n X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']\n \n network.train(X, y)\n \n # Printing out the training progress\n train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)\n val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)\n sys.stdout.write(\"\\rProgress: {:2.1f}\".format(100 * ii/float(iterations)) \\\n + \"% ... Training loss: \" + str(train_loss)[:5] \\\n + \" ... Validation loss: \" + str(val_loss)[:5])\n sys.stdout.flush()\n \n losses['train'].append(train_loss)\n losses['validation'].append(val_loss)",
"\rProgress: 0.0% ... Training loss: 0.952 ... Validation loss: 1.289"
],
[
"plt.plot(losses['train'], label='Training loss')\nplt.plot(losses['validation'], label='Validation loss')\nplt.legend()\n_ = plt.ylim()",
"_____no_output_____"
]
],
[
[
"## Check out your predictions\n\nHere, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(figsize=(8,4))\n\nmean, std = scaled_features['cnt']\npredictions = network.run(test_features).T*std + mean\nax.plot(predictions[0], label='Prediction')\nax.plot((test_targets['cnt']*std + mean).values, label='Data')\nax.set_xlim(right=len(predictions))\nax.legend()\n\ndates = pd.to_datetime(rides.ix[test_data.index]['dteday'])\ndates = dates.apply(lambda d: d.strftime('%b %d'))\nax.set_xticks(np.arange(len(dates))[12::24])\n_ = ax.set_xticklabels(dates[12::24], rotation=45)",
"C:\\Users\\Gabby\\Anaconda3\\envs\\deep-learning\\lib\\site-packages\\ipykernel_launcher.py:10: DeprecationWarning: \n.ix is deprecated. Please use\n.loc for label based indexing or\n.iloc for positional indexing\n\nSee the documentation here:\nhttp://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated\n # Remove the CWD from sys.path while we load stuff.\n"
]
],
[
[
"## OPTIONAL: Thinking about your results(this question will not be evaluated in the rubric).\n \nAnswer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does?\n\n> **Note:** You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter\n\n#### Your answer below\n\nSince most of the wrong prediction occures from the 23rd to the 31st of Decemeber, I think the reason is because of the fact that training data may not include information that are similar to the events that occure during the wrong prediction period. ",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a842749d3978cfb450a84f8c6d0dc7b80ece5e6
| 69,363 |
ipynb
|
Jupyter Notebook
|
sandbox/ENV/.ipynb_checkpoints/Client-checkpoint.ipynb
|
uah-cao1/CEO
|
40dbf7db365d9cd14268cd36b1c789b22750d552
|
[
"Zlib"
] | 18 |
2016-02-29T12:41:52.000Z
|
2021-12-03T15:10:34.000Z
|
sandbox/ENV/.ipynb_checkpoints/Client-checkpoint.ipynb
|
uah-cao1/CEO
|
40dbf7db365d9cd14268cd36b1c789b22750d552
|
[
"Zlib"
] | 23 |
2015-04-27T14:17:19.000Z
|
2021-11-29T22:19:12.000Z
|
sandbox/ENV/.ipynb_checkpoints/Client-checkpoint.ipynb
|
uah-cao1/CEO
|
40dbf7db365d9cd14268cd36b1c789b22750d552
|
[
"Zlib"
] | 17 |
2015-04-09T14:13:16.000Z
|
2022-02-17T10:03:00.000Z
| 265.758621 | 47,110 | 0.914666 |
[
[
[
"import zmq\nimport msgpack\nimport sys\nfrom pprint import pprint\nimport json\nimport numpy as np\nimport ceo\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nport = \"5556\"",
"_____no_output_____"
]
],
[
[
"# SETUP",
"_____no_output_____"
]
],
[
[
"context = zmq.Context()\nprint \"Connecting to server...\"\nsocket = context.socket(zmq.REQ)\nsocket.connect (\"tcp://localhost:%s\" % port)\n\nprint \"Sending request \", \"ubuntu_cuda70\",\"...\"\nsocket.send (\"ubuntu_cuda70\")\n\nmessage = socket.recv_json()\npprint(message)\n\noptical_path = {}\nfor kk, vv in message.iteritems():\n print kk, ' is ', vv\n socket.send_string (vv)\n message = socket.recv_json()\n pprint(message)\n if kk==\"Source\":\n optical_path[vv] = ceo.Source(message[\"band\"],\n zenith=message[\"zenith\"],\n azimuth=message[\"azimuth\"],\n height=np.float(message[\"height\"]),\n magnitude = message[\"magnitude\"],\n rays_box_size=message[\"pupil size\"],\n rays_box_sampling=message[\"pupil sampling\"],\n rays_origin=[0.0,0.0,25])\n N_SRC = optical_path[vv].N_SRC\n elif kk==\"GMT_MX\":\n D_px = message[\"pupil sampling\"]\n optical_path[vv] = ceo.GMT_MX(message[\"pupil size\"],\n message[\"pupil sampling\"],\n M1_radial_order=message[\"M1\"][\"Zernike radial order\"],\n M2_radial_order=message[\"M2\"][\"Zernike radial order\"])\n elif kk==\"Imaging\":\n optical_path[vv] = ceo.Imaging(1, D_px-1,\n DFT_osf=2*message[\"nyquist oversampling\"],\n N_PX_IMAGE=message[\"resolution\"],\n N_SOURCE=N_SRC)\n",
"Connecting to server...\nSending request ubuntu_cuda70 ...\n{u'GMT_MX': u'GMT', u'Imaging': u'imager', u'Source': u'star'}\nSource is star\n{u'N': 1,\n u'azimuth': [0.0],\n u'band': u'K',\n u'height': u'Inf',\n u'magnitude': 3,\n u'pupil sampling': 521,\n u'pupil size': 26.0,\n u'zenith': [0.0]}\nGMT_MX is GMT\n{u'M1': {u'Zernike radial order': 3},\n u'M2': {u'Zernike radial order': 0},\n u'pupil sampling': 521,\n u'pupil size': 26.0}\nImaging is imager\n{u'nyquist oversampling': 2, u'resolution': 51}\n"
],
[
"optical_path[\"star\"].reset()\noptical_path[\"GMT\"].propagate(optical_path[\"star\"])\noptical_path[\"imager\"].propagate(optical_path[\"star\"])\nplt.imshow(optical_path[\"star\"].phase.host(),interpolation='None')",
"_____no_output_____"
],
[
"plt.imshow(optical_path[\"imager\"].frame.host())",
"_____no_output_____"
]
],
[
[
"# DATA SERVER",
"_____no_output_____"
]
],
[
[
"port = \"5557\"\ncontext = zmq.Context()\nsocket = context.socket(zmq.REP)\nsocket.bind(\"tcp://*:%s\" % port)",
"_____no_output_____"
],
[
"port = \"5558\"\nsub_context = zmq.Context()\nsub_socket = sub_context.socket(zmq.SUB)\nsub_socket.connect (\"tcp://localhost:%s\" % port)",
"_____no_output_____"
],
[
"message = socket.recv()\nprint \"Received request: \", message\n\noptical_path[\"star\"].reset()\noptical_path[\"GMT\"].propagate(optical_path[\"star\"])\noptical_path[\"imager\"].propagate(optical_path[\"star\"])\n\ndata = optical_path[\"star\"].phase.host()\nmsg = msgpack.packb(data.tolist())\nsocket.send(msg)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a8465eb04f326b0d1737de5cdd82ada97e45d27
| 5,480 |
ipynb
|
Jupyter Notebook
|
Wednesday/binary_search.ipynb
|
JSJeong-me/KOSA-Python_Algorithm
|
f680e93c0976fd0c28b1d7aeacb7896e1b4a2bd3
|
[
"MIT"
] | null | null | null |
Wednesday/binary_search.ipynb
|
JSJeong-me/KOSA-Python_Algorithm
|
f680e93c0976fd0c28b1d7aeacb7896e1b4a2bd3
|
[
"MIT"
] | null | null | null |
Wednesday/binary_search.ipynb
|
JSJeong-me/KOSA-Python_Algorithm
|
f680e93c0976fd0c28b1d7aeacb7896e1b4a2bd3
|
[
"MIT"
] | 3 |
2021-12-16T07:19:05.000Z
|
2021-12-16T08:43:58.000Z
| 25.971564 | 252 | 0.422263 |
[
[
[
"<a href=\"https://colab.research.google.com/github/JSJeong-me/KOSA-Python_Algorithm/blob/main/Wednesday/binary_search.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"이진 탐색이란?\n이진 탐색이란 데이터가 정렬돼 있는 배열에서 특정한 값을 찾아내는 알고리즘이다. 배열의 중간에 있는 임의의 값을 선택하여 찾고자 하는 값 X와 비교한다. X가 중간 값보다 작으면 중간 값을 기준으로 좌측의 데이터들을 대상으로, X가 중간값보다 크면 배열의 우측을 대상으로 다시 탐색한다. 동일한 방법으로 다시 중간의 값을 임의로 선택하고 비교한다. 해당 값을 찾을 때까지 이 과정을 반복한다.",
"_____no_output_____"
]
],
[
[
"data = [17, 28, 43, 67, 88, 92, 100] # { 17, 28, 43, 67, 88, 92, 100 }",
"_____no_output_____"
]
],
[
[
"첫 번째 시도\n우선 가운데에 위치한 임의의 값 67을 선택한다.\n\n선택한 값 67과 찾고자 하는 값 43를 비교한다.\n\n43 < 67 이므로 43은 67의 좌측에 존재한다는 것을 알 수 있다.\n\n\n\n두 번째 시도\n67을 기준으로 좌측에 있는 배열 값들을 대상으로 다시 탐색을 진행한다.\n\n{ 17, 28, 43 }\n\n마찬가지로 가운데의 임의의 값 28을 선택한다.\n\n28 < 43 이번에는 28이 43보다 작으므로 28 우측에 위치하는 것을 알 수 있다.\n\n\n\n세 번째 시도\n28의 우측을 기준으로 배열을 다시 설정해보면\n\n{ 43 }\n\n배열에 값이 하나만 남게 되고 값을 확인해보면,\n43 == 43 원하는 값을 찾았다.\n\n\n\n종료 조건\n탐색의 종료 조건은 원하는 값을 찾으면 종료된다.\n운이 좋게 한 번에 찾을 수도 있고 위의 예제와 같이 마지막에 찾을 수도 있다.\n\n만약 원하는 값이 배열이 존재하지 않는다면 어떻게 종료될까?\n방금 살펴본 예제를 그대로 이용하여 40 을 찾는다고 가정해보자.\n\n두 번째 시도까지는 동일하게 진행된다.\n\n세 번째 시도에서 값을 비교해보면, 40 < 43 으로 원하는 값 40 보다 작다. 배열의 좌측을 탐색해야 하는데 더 이상 남은 배열이 존재하지 않는다.\n이렇게 탐색하고자 하는 배열이 더이상 존재하지 않으면 찾고자 하는 값이 배열에 존재하지 않는다는 것으로 판단할 수 있고 탐색을 종료한다.\n\n",
"_____no_output_____"
]
],
[
[
"def BinarySearch(arr, low, high, x): \n # Binary Search:\n # arr --> sorted array\n # low --> lowest index\n # high --> highest index\n # x --> searched value\n\n if high >= low: \n mid = (high + low) // 2\n \n if arr[mid] == x: \n return mid \n elif arr[mid] > x: \n return BinarySearch(arr, low, mid - 1, x) \n else: \n return BinarySearch(arr, mid + 1, high, x) \n \n else: \n # Element is not present in the array \n return -1\n \ndef binary_search(arr, x):\n # initiation of BinarySearch recursion\n # arr --> unsorted array\n return BinarySearch(arr, 0, len(arr) - 1, x)",
"_____no_output_____"
],
[
"found_index = binary_search(data, 92)",
"_____no_output_____"
],
[
"found_index",
"_____no_output_____"
],
[
"data[found_index]",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a846f0a1ba23e1dec6d2b2abd0080a6ce77e4bf
| 20,239 |
ipynb
|
Jupyter Notebook
|
notebooks/scores_analysis.ipynb
|
karsti11/caffe_bar_sales_analysis
|
f7001bbf2d09c1ceeb8aef35322652a8495949ed
|
[
"MIT"
] | null | null | null |
notebooks/scores_analysis.ipynb
|
karsti11/caffe_bar_sales_analysis
|
f7001bbf2d09c1ceeb8aef35322652a8495949ed
|
[
"MIT"
] | null | null | null |
notebooks/scores_analysis.ipynb
|
karsti11/caffe_bar_sales_analysis
|
f7001bbf2d09c1ceeb8aef35322652a8495949ed
|
[
"MIT"
] | null | null | null | 93.699074 | 13,260 | 0.82514 |
[
[
[
"import os\nimport ast\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom src.utils import get_project_root\n",
"_____no_output_____"
],
[
"PROJECT_ROOT_PATH = get_project_root()\nSCORES_DIR = os.path.join(PROJECT_ROOT_PATH,'reports/scores/')",
"_____no_output_____"
],
[
"PROJECT_ROOT_PATH",
"_____no_output_____"
],
[
"scores_dict = {}\nfor filename in os.listdir(SCORES_DIR):\n if filename.endswith('.csv'):\n scores_dict[filename] = pd.read_csv(SCORES_DIR+filename)\n \n ",
"_____no_output_____"
],
[
"for key, val in scores_dict.items():\n print(f\"Features from {key}:\\n{list(ast.literal_eval(val.features_coefs.values[0]).keys())}\")\n print(f\"Train wmape distribution:\\n{val.train_wmape.describe()}\")\n print(f\"Val wmape distribution:\\n{val.val_wmape.describe()}\")",
"Features from 2021_06_06scores.csv:\n['item_price', 'day_of_week_0', 'day_of_week_1', 'day_of_week_2', 'day_of_week_3', 'day_of_week_4', 'day_of_week_5', 'day_of_week_6', 'month_of_year_1', 'month_of_year_2', 'month_of_year_3', 'month_of_year_4', 'month_of_year_5', 'month_of_year_6', 'month_of_year_7', 'month_of_year_8', 'month_of_year_9', 'month_of_year_10', 'month_of_year_11', 'month_of_year_12', 'year', 'first_third_of_month', 'second_third_of_month', 'last_third_of_month', 'easter', 'easter_monday', 'christmas', 'new_years_day', 'new_years_eve']\nTrain wmape distribution:\ncount 35.000000\nmean 54.598000\nstd 15.052693\nmin 22.750000\n25% 47.940000\n50% 52.540000\n75% 60.210000\nmax 97.510000\nName: train_wmape, dtype: float64\nVal wmape distribution:\ncount 35.000000\nmean 59.954000\nstd 19.716281\nmin 24.880000\n25% 49.925000\n50% 59.980000\n75% 71.295000\nmax 121.440000\nName: val_wmape, dtype: float64\nFeatures from scaled_and_times_10_100.csv:\n['item_price', 'day_of_week_0', 'day_of_week_1', 'day_of_week_2', 'day_of_week_3', 'day_of_week_4', 'day_of_week_5', 'day_of_week_6', 'month_of_year_1', 'month_of_year_2', 'month_of_year_3', 'month_of_year_4', 'month_of_year_5', 'month_of_year_6', 'month_of_year_7', 'month_of_year_8', 'month_of_year_9', 'month_of_year_10', 'month_of_year_11', 'month_of_year_12', 'year', 'first_third_of_month', 'second_third_of_month', 'last_third_of_month']\nTrain wmape distribution:\ncount 35.000000\nmean 54.708571\nstd 15.084558\nmin 23.270000\n25% 47.850000\n50% 52.580000\n75% 60.155000\nmax 97.650000\nName: train_wmape, dtype: float64\nVal wmape distribution:\ncount 35.000000\nmean 59.931429\nstd 19.617268\nmin 24.940000\n25% 49.940000\n50% 60.090000\n75% 70.895000\nmax 120.110000\nName: val_wmape, dtype: float64\nFeatures from score_v1.csv:\n['item_price', 'day_of_week_0', 'day_of_week_1', 'day_of_week_2', 'day_of_week_3', 'day_of_week_4', 'day_of_week_5', 'day_of_week_6', 'month_of_year_1', 'month_of_year_2', 'month_of_year_3', 'month_of_year_4', 'month_of_year_5', 'month_of_year_6', 'month_of_year_7', 'month_of_year_8', 'month_of_year_9', 'month_of_year_10', 'month_of_year_11', 'month_of_year_12', 'year', 'first_third_of_month', 'second_third_of_month', 'last_third_of_month']\nTrain wmape distribution:\ncount 35.000000\nmean 54.717714\nstd 15.111217\nmin 22.780000\n25% 47.940000\n50% 52.550000\n75% 60.220000\nmax 97.650000\nName: train_wmape, dtype: float64\nVal wmape distribution:\ncount 35.000000\nmean 60.119143\nstd 19.735531\nmin 24.900000\n25% 49.920000\n50% 59.980000\n75% 71.275000\nmax 121.430000\nName: val_wmape, dtype: float64\n"
],
[
"labels = list(scores_dict.keys())\ntrain_data = [val.train_wmape.values for val in scores_dict.values()]\ntest_data = [val.val_wmape.values for val in scores_dict.values()]\n\nfig = plt.figure(figsize =(10, 7)) \n# Creating axes instance\nax = fig.add_axes([0, 0, 1, 1])\n \n# Creating plot\nbp = ax.boxplot(train_data)\nax.set_xticklabels(labels)\n# show plot\nplt.show()\n",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a8476390f98fbc467c96246513de2206f72de6a
| 35,010 |
ipynb
|
Jupyter Notebook
|
ipynb/T11-Files-in-Python-Part-2.ipynb
|
bbashari/analysis3_lab
|
912722624d7c35dad4cfb34960af022d0f83aeed
|
[
"MIT"
] | null | null | null |
ipynb/T11-Files-in-Python-Part-2.ipynb
|
bbashari/analysis3_lab
|
912722624d7c35dad4cfb34960af022d0f83aeed
|
[
"MIT"
] | null | null | null |
ipynb/T11-Files-in-Python-Part-2.ipynb
|
bbashari/analysis3_lab
|
912722624d7c35dad4cfb34960af022d0f83aeed
|
[
"MIT"
] | null | null | null | 24.989293 | 281 | 0.499257 |
[
[
[
"# **Tutorial 11: Working with Files (Part 02)** 👀",
"_____no_output_____"
],
[
"<a id='t11toc'></a>\n#### Contents: ####\n- **[Parsing](#t11parsing)**\n - [`strip()`](#t11strip)\n - [Exercise 1](#t11ex1)\n - [`split()`](#t11split)\n - [Exercise 2](#t11ex2)\n- **[JSON](#t11json)**\n - [Reading JSON from a String](#t11loads)\n - [Reading a JSON File](#t11load)\n - [Converting to a JSON String](#t11dumps)\n - [Writing to a JSON File](#t11dump)\n- **[CSV](#t11csv)**\n - [Reading CSV Files in Python](#t11readcsv)\n - [Writing into CSV Files (Row by Row)](#t11writecsvrbr)\n - [Writing into CSV Files (Multiple Rows)](#t11writecsvmultiple)\n - [Writing into CSV Files (Custom Delimiter)](#t11delimiter)\n- [Exercise 3](#t11ex3)\n- [Exercises Solutions](#t11sol)",
"_____no_output_____"
],
[
"💡 <b>TIP</b><br>\n> <i>In Exercises, when time permits, try to write the codes yourself, and do not copy it from the other cells.</i>",
"_____no_output_____"
],
[
"<a id='t11parsing'></a>\n## ▙▂ **🄿ARSING ▂▂**\n\nParsing means splitting up a text into meaningful components (meaningful for a *given purpose*). \nPython has some built-in methods that you can be used for some basic parsing tasks on strings. We will practise with few of them.",
"_____no_output_____"
],
[
"<a id='t11strip'></a>\n#### **▇▂ `strip()` ▂▂**",
"_____no_output_____"
],
[
"The `strip()` method removes characters from both left and right sides of a string, based on the argument (a string specifying the set of characters to be removed). \n\nThe syntax of the `strip()` method is: `string.strip([chars])`\n\n**`strip()` Parameters** \n- `chars` (optional) - a string specifying the set of characters to be removed from the left and right sides of the string.\n - If the chars argument is not provided, all **leading and trailing whitespaces** are removed from the string.",
"_____no_output_____"
]
],
[
[
"Str = ' Analysis 3: Object Oriented Programming '",
"_____no_output_____"
]
],
[
[
"The code below removes all white spaces (blank) from the left and right side of the string:",
"_____no_output_____"
]
],
[
[
"CleanStr1 = Str.strip()",
"_____no_output_____"
],
[
"print(f'Original String is = \"{Str}\" --- (length={len(Str)})')\nprint(f'Removing Leading and Trailing White spaces = \"{CleanStr1}\" --- (length={len(CleanStr1)})')",
"_____no_output_____"
]
],
[
[
"The method can also be diectly applied to a string:",
"_____no_output_____"
]
],
[
[
"CleanStr2 = 'OOOOOOOOAnalysis 3: Object Oriented ProgrammingOOOOO'.strip('O')\nprint(f'Removing O\\'s = \"{CleanStr2}\"')",
"_____no_output_____"
]
],
[
[
"<br>⚠ <b>NOTE</b><br>\n>It removes only leading and trailing `'O'`s, but not those in between.<br>",
"_____no_output_____"
],
[
"##### **Multiple Characters**\n\nThe `chars` parameter is not a prefix or suffix; rather, all combinations of its values are stripped. \n\nIn below example, `strip()` would strip all the characters provided in the argument i.e. `'+'`, `'*'`.",
"_____no_output_____"
]
],
[
[
"CleanStr3 = '+*++*++Analysis 3: Object Oriented Programming**++**'.strip('+*')\nprint(f'Stripping + and * on both sides = {CleanStr3}')",
"_____no_output_____"
]
],
[
[
"##### **Only One Side**\n\n- `lstrip()` trims leading characters and return the trimmed string.\n- `rstrip()` strips any trailing white-spaces, tabs, and newlines, and returns the trimmed string.\n",
"_____no_output_____"
]
],
[
[
"CleanStr4 = '***********Analysis 3: Object Oriented Programming***********'.lstrip('*')\nprint('Removing Left Side using lstrip() is = \"%s\"' %CleanStr4)",
"_____no_output_____"
]
],
[
[
"<i>Try to do this first without checking the solution in the next cell. After typing your code, compare it with the solution. </i>",
"_____no_output_____"
]
],
[
[
"CleanStr5 = '***********Analysis 3: Object Oriented Programming***********'.rstrip('*')\nprint('Removing Right Side using rstrip() is = \"%s\"' %CleanStr5)",
"_____no_output_____"
]
],
[
[
"<br>[back to top ↥](#t11toc)",
"_____no_output_____"
],
[
"<br><br><a id='t11ex1'></a>\n◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾",
"_____no_output_____"
],
[
"**✎ Exercise 𝟙**<br> <br> ▙ ⏰ ~ 2+2 min. ▟ <br>",
"_____no_output_____"
],
[
"❶ We have a file `studentsgrades.txt` in the current folder which contains the students first name and last names and their grades for Analysis 2.\n\nthe information are not properly formatted in the file and there are some extra characters on each row. (Open the file to see the records.) \n\n**CMI-Inf St. Num 1002121 Andrew Bren 8.4 \nCMI-Inf St. Num 1002121 Peter Cole 7.0 \nCMI-Inf St. Num 1002121 Chris Charles 9.1 \nCMI-Inf St. Num 1002121 Andy Frankline 6.9 \nCMI-Inf St. Num 1002121 Robert Ford 5.6 \nCMI-Inf St. Num 1002121 Charley Walton 7.7**\n\nWrite a short piece of code to read the file and remove the extra leading characters and students numbers from each row, and Create a new file `studentsgrades-m.txt` containing the new records. The new file should be like: \n\n**Andrew Bren 8.4 \nPeter Cole 7.0 \nChris Charles 9.1 \nAndy Frankline 6.9 \nRobert Ford 5.6 \nCharley Walton 7.7**",
"_____no_output_____"
]
],
[
[
"# Exercise 1.1\n",
"_____no_output_____"
]
],
[
[
"❷ Modify your code to work on the original file `studentsgrades.txt` and create another new file `studentsnames.txt` containing only the students names, without grades.",
"_____no_output_____"
]
],
[
[
"# Exercise 1.2\n",
"_____no_output_____"
]
],
[
[
"<br>[back to top ↥](#t11toc)",
"_____no_output_____"
],
[
"<a id='t11split'></a>\n#### **▇▂ `split()` ▂▂**",
"_____no_output_____"
],
[
"The `split()` method breaks up a string at the specified separator and returns a *list of strings*. \n\n\nThe syntax of the `split()` method is: `str.split([separator [, maxsplit]])`\n\n\n\n**`split()` Parameters** \n`split()` method takes a maximum of 2 parameters:\n- `separator` (optional)- It is a delimiter. The string splits at the specified separator.\n - If the separator is not specified, any whitespace (space, newline etc.) string is a separator.\n- `maxsplit` (optional) - The maxsplit defines the maximum number of splits.\n - The default value of maxsplit is `-1`, meaning, no limit on the number of splits.\n",
"_____no_output_____"
]
],
[
[
"text = 'Never regret anything that made you smile'",
"_____no_output_____"
],
[
"print(text.split())",
"_____no_output_____"
]
],
[
[
"##### **with `seperator`**",
"_____no_output_____"
]
],
[
[
"grocery = 'Milk, Chicken, Bread, Butter'",
"_____no_output_____"
],
[
"print(grocery.split(', '))",
"_____no_output_____"
]
],
[
[
"Try the following code:",
"_____no_output_____"
]
],
[
[
"print(grocery.split(':'))",
"_____no_output_____"
]
],
[
[
"🔴 How many elements are in the python list above? Why? Discuss it.",
"_____no_output_____"
],
[
"##### **with `maxsplit`**",
"_____no_output_____"
]
],
[
[
"print(grocery.split(', ', 1))",
"_____no_output_____"
],
[
"print(grocery.split(', ', 2))",
"_____no_output_____"
],
[
"print(grocery.split(', ', 5))",
"_____no_output_____"
]
],
[
[
"<br>⚠ <b>NOTE</b><br>\n>If `maxsplit` is specified, the list will have at most `maxsplit+1` items.<br>",
"_____no_output_____"
],
[
"<br>[back to top ↥](#t11toc)",
"_____no_output_____"
],
[
"<br><br><a id='t11ex2'></a>\n◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾",
"_____no_output_____"
],
[
"**✎ Exercise 𝟚**<br> <br> ▙ ⏰ ~ 3+3 min. ▟ <br>",
"_____no_output_____"
],
[
"❶ We want to further process `studentsgrades.txt` file discussed in Exercise 1. Now, we would like to make a **list** of students. Each item in the list must be a student. A student should be created as an object with three attributes: `first name`, `last name`, and `grade`.",
"_____no_output_____"
]
],
[
[
"# Exercise 2.1\n",
"_____no_output_____"
]
],
[
[
"❷ Define a class to represent a group of students. Create an object to contain the students in our list, and add a method to the class for a simple linear search based on the last name of a student. ",
"_____no_output_____"
]
],
[
[
"# Exercise 2.2\n",
"_____no_output_____"
]
],
[
[
"<br>[back to top ↥](#t11toc)",
"_____no_output_____"
],
[
"<a id='t11json'></a>\n## ▙▂ **🄹SON ▂▂**",
"_____no_output_____"
],
[
"JSON is a syntax for storing and exchanging data.\n- JSON is text, written with JavaScript object notation.\n- JSON is language independent.\n- JSON uses JavaScript syntax, but the JSON format is text only.\n- Text can be read and used as a data format by any programming language.",
"_____no_output_____"
],
[
"<a id='t11loads'></a>\n#### **▇▂ Reading JSON from a String ▂▂**",
"_____no_output_____"
],
[
"`json.loads()` reads JSON from a string and converts it to a Python dictionary.",
"_____no_output_____"
]
],
[
[
"import json\n\nbook = \"\"\" \n {\n \"author\": \"Chinua Achebe\",\n \"editor\": null,\n \"country\": \"Nigeria\",\n \"imageLink\": \"images/things-fall-apart.jpg\",\n \"language\": \"English\",\n \"link\": \"https://en.wikipedia.org/wiki/Things_Fall_Apart\",\n \"pages\": 209,\n \"title\": \"Things Fall Apart\",\n \"year\": 1958,\n \"available\": true\n }\n \"\"\"\n\nbook_dict = json.loads(book)\n\nprint(book_dict)\nprint(\"\\n\")\nprint(\"The title of the book is:\", book_dict['title'])\nprint(f'book_json is: {type(book_dict)}')",
"_____no_output_____"
]
],
[
[
"<br>[back to top ↥](#t11toc)",
"_____no_output_____"
],
[
"<a id='t11load'></a>\n#### **▇▂ Reading a JSON File ▂▂**",
"_____no_output_____"
],
[
"Now let's load a JSON file into a JSON object in Python. For this, we use the file `book.json`, located in the current directory.",
"_____no_output_____"
],
[
"First, let's take a look into the file contents.",
"_____no_output_____"
]
],
[
[
"f = open(\"book.json\")\ntext = f.read()\nf.close()\nprint(f\"The full text in the file is:\\n\\n{text}\")",
"_____no_output_____"
]
],
[
[
"you could also open the `book.json` in the Jupyter or a text editor.",
"_____no_output_____"
],
[
"The `json.load()` method reads a file containing JSON object:",
"_____no_output_____"
]
],
[
[
"import json\n\nwith open('book.json') as f:\n data = json.load(f)\n\nprint(data)\nprint('\\n')\nprint(type(data))",
"_____no_output_____"
]
],
[
[
"<br>⚠ <b>NOTE</b><br>\n>Note the difference between JSON object and Python object.<br>",
"_____no_output_____"
],
[
"<br>[back to top ↥](#t11toc)",
"_____no_output_____"
],
[
"<a id='t11dumps'></a>\n#### **▇▂ Converting to a JSON String ▂▂**",
"_____no_output_____"
],
[
"`json.dumps()` converts a python dictionary to a JSON string.",
"_____no_output_____"
]
],
[
[
"import json\n\nbook = {\n \"author\": \"Chinua Achebe\",\n \"editor\": None,\n \"country\": \"Nigeria\",\n \"imageLink\": \"images/things-fall-apart.jpg\",\n \"language\": \"English\",\n \"link\": \"https://en.wikipedia.org/wiki/Things_Fall_Apart\",\n \"pages\": 209,\n \"title\": \"Things Fall Apart\",\n \"year\": 1958,\n \"available\": True\n }\n \nbook_json = json.dumps(book)\nprint(book_json)\nprint('\\n')\nprint(f'book is: {type(book)}')\nprint(f'book_json is: {type(book_json)}')",
"_____no_output_____"
]
],
[
[
"<br>[back to top ↥](#t11toc)",
"_____no_output_____"
],
[
"<a id='t11dump'></a>\n#### **▇▂ Writing to a JSON File ▂▂**",
"_____no_output_____"
],
[
"`json.dump()` converts and writes a dictionary to a JSON file.",
"_____no_output_____"
]
],
[
[
"import json\n\nbook_dict = {\n \"author\": \"Chinua Achebe\",\n \"editor\": None,\n \"country\": \"Nigeria\",\n \"imageLink\": \"images/things-fall-apart.jpg\",\n \"language\": \"English\",\n \"link\": \"https://en.wikipedia.org/wiki/Things_Fall_Apart\",\n \"pages\": 209,\n \"title\": \"Things Fall Apart\",\n \"year\": 1958,\n \"available\": True\n }\n\nwith open('book-new.json', 'w') as json_file:\n json.dump(book_dict, json_file)",
"_____no_output_____"
]
],
[
[
"Now let's take a look into the contents of the file we have just written:",
"_____no_output_____"
]
],
[
[
"f = open(\"book-new.json\")\ntext = f.read()\nf.close()\nprint(f\"The full text in the file is:\\n\\n{text}\")",
"_____no_output_____"
]
],
[
[
"<br>[back to top ↥](#t11toc)",
"_____no_output_____"
],
[
"<a id='t11csv'></a>\n## ▙▂ **🄲SV ▂▂**",
"_____no_output_____"
],
[
"While we could use the built-in `open()` function to work with `CSV` files in Python, there is a dedicated `csv` module that makes working with `CSV` files much easier. \n\nBefore we can use the methods to the `csv` module, we need to import the module first, using:",
"_____no_output_____"
]
],
[
[
"import csv",
"_____no_output_____"
]
],
[
[
"<a id='t11readcsv'></a>\n#### **▇▂ Reading CSV Files in Python ▂▂**",
"_____no_output_____"
],
[
"To read a `CSV` file in Python, we can use the `csv.reader()` function. \n\nThe `csv.reader()` returns an iterable `reader` object.\n\nThe `reader` object is then iterated using a for loop to print the contents of each row.",
"_____no_output_____"
],
[
"##### **Using comma (`,`) as delimiter**\n\nComma is the default delimiter for `csv.reader()`.",
"_____no_output_____"
]
],
[
[
"import csv\nwith open('grades.csv', 'r') as file:\n reader = csv.reader(file)\n for row in reader:\n print(row)",
"_____no_output_____"
]
],
[
[
"##### **Using tab (`\\t`) as delimiter**",
"_____no_output_____"
]
],
[
[
"import csv\nwith open('gradesTab.csv', 'r',) as file:\n reader = csv.reader(file, delimiter = '\\t')\n for row in reader:\n print(row)",
"_____no_output_____"
]
],
[
[
"<br>[back to top ↥](#t11toc)",
"_____no_output_____"
],
[
"<a id='t11writecsvrbr'></a>\n#### **▇▂ Writing into CSV Files (Row by Row) ▂▂**",
"_____no_output_____"
],
[
"To write to a CSV file in Python, we can use the `csv.writer()` function. \n\nThe `csv.writer()` function returns a `writer` object that converts the user's data into a delimited string. This string can later be used to write into `CSV` files using the `writerow()` function. \n\n`csv.writer` class provides two methods for writing to `CSV`. They are `writerow()` and `writerows()`:\n- `writerow()`: This method writes a single row at a time. Fields row can be written using this method.\n- `writerows()`: This method is used to write multiple rows at a time. This can be used to write rows list.",
"_____no_output_____"
]
],
[
[
"import csv\nwith open('gradesW1.csv', 'w', newline='') as file:\n writer = csv.writer(file)\n \n writer.writerow([\"Lastname\",\"Firstname\",\"SSN\",\"Test1\",\"Test2\",\"Test3\",\"Test4\",\"Final\",\"Grade\"])\n writer.writerow([\"George\",\"Boy\",\"345-67-3901\",40.0,1.0,11.0,-1.0,4.0,\"B\"])\n writer.writerow([\"Heffalump\",\"Harvey\",\"632-79-9439\",30.0,1.0,20.0,30.0,40.0,\"C\"])",
"_____no_output_____"
]
],
[
[
"Now let's take a look into the contents of the file we have just written: ",
"_____no_output_____"
]
],
[
[
"f = open(\"gradesW1.csv\")\ntext = f.read()\nf.close()\nprint(f\"The full text in the file is:\\n\\n{text}\")",
"_____no_output_____"
]
],
[
[
"<br>[back to top ↥](#t11toc)",
"_____no_output_____"
],
[
"<a id='t11writecsvmultiple'></a>\n#### **▇▂ Writing into CSV Files (Multiple Rows) ▂▂**",
"_____no_output_____"
],
[
"If we need to write the contents of the 2-dimensional list to a `CSV file`, here's how we can do it:",
"_____no_output_____"
]
],
[
[
"import csv\ncsv_rowlist = [ [\"Lastname\",\"Firstname\",\"SSN\",\"Test1\",\"Test2\",\"Test3\",\"Test4\",\"Final\",\"Grade\"],\n [\"Dandy\",\"Jim\",\"087-75-4321\",47.0,1.0,23.0,36.0,45.0,\"C+\"],\n [\"Elephant\",\"Ima\",\"456-71-9012\",45.0,1.0,78.0,88.0,77.0,\"B-\"],\n [\"Franklin\",\"Benny\",\"234-56-2890\",50.0,1.0,90.0,80.0,90.0,\"B-\"]]\n\nwith open('gradesW2.csv', 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerows(csv_rowlist)",
"_____no_output_____"
]
],
[
[
"Now let's take a look into the contents of the file we have just written:",
"_____no_output_____"
]
],
[
[
"f = open(\"gradesW2.csv\")\ntext = f.read()\nf.close()\nprint(f\"The full text in the file is:\\n\\n{text}\")",
"_____no_output_____"
]
],
[
[
"<br>[back to top ↥](#t11toc)",
"_____no_output_____"
],
[
"<a id='t11delimiter'></a>\n#### **▇▂ Writing into CSV Files (Custom Delimiter) ▂▂**",
"_____no_output_____"
],
[
"As mentioned before, by default, a comma `,` is used as a delimiter in a `CSV` file.",
"_____no_output_____"
],
[
"However, we can pass a different delimiter parameter as argument to the `csv.writer()` function:",
"_____no_output_____"
]
],
[
[
"import csv\ncsv_rowlist = [ [\"Lastname\",\"Firstname\",\"SSN\",\"Test1\",\"Test2\",\"Test3\",\"Test4\",\"Final\",\"Grade\"],\n [\"Dandy\",\"Jim\",\"087-75-4321\",47.0,1.0,23.0,36.0,45.0,\"C+\"],\n [\"Elephant\",\"Ima\",\"456-71-9012\",45.0,1.0,78.0,88.0,77.0,\"B-\"],\n [\"Franklin\",\"Benny\",\"234-56-2890\",50.0,1.0,90.0,80.0,90.0,\"B-\"]]\n\nwith open('gradesW3.csv', 'w', newline='') as file:\n writer = csv.writer(file, delimiter='|')\n writer.writerows(csv_rowlist)",
"_____no_output_____"
]
],
[
[
"Now let's take a look into the contents of the file we have just written:",
"_____no_output_____"
]
],
[
[
"f = open(\"gradesW3.csv\")\ntext = f.read()\nf.close()\nprint(f\"The full text in the file is:\\n\\n{text}\")",
"_____no_output_____"
]
],
[
[
"<br>[back to top ↥](#t11toc)",
"_____no_output_____"
],
[
"<br><br><a id='t11ex3'></a>\n◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾◾",
"_____no_output_____"
],
[
"**✎ Exercise 𝟛**<br> <br> ▙ ⏰ ~ 3+3 min. ▟ <br>",
"_____no_output_____"
],
[
"❶ Write a code to write the result of the Exercise 2.1 into a JSON file. ",
"_____no_output_____"
]
],
[
[
"# Exercise 3.1\n",
"_____no_output_____"
]
],
[
[
"❷ Write a code to write the result of the Exercise 2.1 into a CSV file.",
"_____no_output_____"
]
],
[
[
"# Exercise 3.2\n",
"_____no_output_____"
]
],
[
[
"<br>[back to top ↥](#t11toc)",
"_____no_output_____"
],
[
"<br><br><a id='t11sol'></a>\n◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼<br>\n◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼◼",
"_____no_output_____"
],
[
"#### 🔑 **Exercises Solutions** ####",
"_____no_output_____"
],
[
"**Exercise 1.1:**",
"_____no_output_____"
]
],
[
[
"with open(\"studentsgrades.txt\") as f:\n t = f.readlines()\n\nt = [student.lstrip('CMI-InfSt.Num ') for student in t]\nt = [student.lstrip(' 0123456789') for student in t]\n\n\nwith open(\"studentsgrades-m.txt\", \"w\") as f:\n f.writelines(t)",
"_____no_output_____"
]
],
[
[
"**Exercise 1.2:**",
"_____no_output_____"
]
],
[
[
"with open(\"studentsgrades.txt\") as f:\n t = f.readlines()\n\nt = [student.lstrip('CMI-InfSt.Num ') for student in t]\nt = [student.strip(' .0123456789\\n') + '\\n' for student in t]\n\n\nwith open(\"studentsnames.txt\", \"w\") as f:\n f.writelines(t)",
"_____no_output_____"
]
],
[
[
"<br>[back to Exercise 1 ↥](#t11ex1)",
"_____no_output_____"
],
[
"<br>[back to top ↥](#t11toc)",
"_____no_output_____"
],
[
"**Exercise 2.1:**",
"_____no_output_____"
]
],
[
[
"class student:\n \n def __init__(self, fn, ln, gr):\n self.firstName = fn\n self.lastName = ln\n self.grade = gr\n\n def __str__(self):\n return f'{self.firstName} {self.lastName} ({self.grade})'\n\n def __repr__(self):\n return f'{self.__class__.__name__}({self.firstName}, {self.lastName}, {self.grade})'\n\n\nwith open(\"studentsgrades.txt\") as f:\n t = f.readlines()\n\nt = [st.lstrip('CMI-InfSt.Num ') for st in t]\nt = [st.lstrip(' 0123456789') for st in t]\nt = [st.rstrip('\\n') for st in t]\n\nt = [st.split(' ') for st in t]\n\nstudentsList= [student(st[0],st[1],st[2]) for st in t]\n\nfor st in studentsList:\n print(st)",
"_____no_output_____"
]
],
[
[
"**Exercise 2.2:**",
"_____no_output_____"
]
],
[
[
"class student:\n def __init__(self, fn, ln, gr):\n self.firstName = fn\n self.lastName = ln\n self.grade = gr\n\n def __str__(self):\n return f'{self.firstName} {self.lastName} ({self.grade})'\n\n def __repr__(self):\n return f'{self.__class__.__name__}({self.firstName}, {self.lastName}, {self.grade})'\n\nclass group:\n def __init__(self, sl):\n self.studentsList = sl\n\n def search(self, key_lastname):\n for st in self.studentsList:\n if st.lastName == key_lastname:\n return st\n return None\n\nwith open(\"studentsgrades.txt\") as f:\n t = f.readlines()\n\nt = [st.lstrip('CMI-InfSt.Num ') for st in t]\nt = [st.lstrip(' 0123456789') for st in t]\nt = [st.rstrip('\\n') for st in t]\n\nt = [st.split(' ') for st in t]\n\nstudentsList= [student(st[0],st[1],st[2]) for st in t]\n\nanalysis2_cmiinf1M = group(studentsList)\n\nprint(analysis2_cmiinf1M.search('Charles'))\nprint(analysis2_cmiinf1M.search('Andy'))",
"_____no_output_____"
]
],
[
[
"<br>[back to Exercise 2 ↥](#t11ex2)",
"_____no_output_____"
],
[
"<br>[back to top ↥](#t11toc)",
"_____no_output_____"
],
[
"**Exercise 3.1:**",
"_____no_output_____"
]
],
[
[
"import json\n\nclass student:\n \n def __init__(self, fn, ln, gr):\n self.firstName = fn\n self.lastName = ln\n self.grade = gr\n\n def __str__(self):\n return f'{self.firstName} {self.lastName} ({self.grade})'\n\n def __repr__(self):\n return f'{self.__class__.__name__}({self.firstName}, {self.lastName}, {self.grade})'\n\n\nwith open(\"studentsgrades.txt\") as f:\n t = f.readlines()\n\nt = [st.lstrip('CMI-InfSt.Num ') for st in t]\nt = [st.lstrip(' 0123456789') for st in t]\nt = [st.rstrip('\\n') for st in t]\n\nt = [st.split(' ') for st in t]\n\nstudentsList= [student(st[0], st[1], st[2]) for st in t]\n\nstudentsDict = []\n\nfor st in studentsList:\n dictItem ={\"First Name\": st.firstName, \"Last Name\": st.lastName, \"Grade\": st.grade}\n studentsDict.append(dictItem)\n\nwith open('students-grades.json', 'w') as f:\n json.dump(studentsDict , f, indent = 1)",
"_____no_output_____"
]
],
[
[
"**Exercise 3.2:**",
"_____no_output_____"
]
],
[
[
"import csv\n\nclass student:\n \n def __init__(self, fn, ln, gr):\n self.firstName = fn\n self.lastName = ln\n self.grade = gr\n\n def __str__(self):\n return f'{self.firstName} {self.lastName} ({self.grade})'\n\n def __repr__(self):\n return f'{self.__class__.__name__}({self.firstName}, {self.lastName}, {self.grade})'\n\n\nwith open(\"studentsgrades.txt\") as f:\n t = f.readlines()\n\nt = [st.lstrip('CMI-InfSt.Num ') for st in t]\nt = [st.lstrip(' 0123456789') for st in t]\nt = [st.rstrip('\\n') for st in t]\n\nt = [st.split(' ') for st in t]\n\nstudentsList= [student(st[0], st[1], st[2]) for st in t]\n\nheader = [\"FName\", \"LName\", \"Grade\"] \nrows = [header]\nfor st in studentsList:\n new_row =[st.firstName, st.lastName, st.grade]\n rows.append(new_row)\n\nwith open('students-grades.csv', 'w', newline='') as f:\n writer = csv.writer(f, delimiter='\\t')\n writer.writerows(rows) ",
"_____no_output_____"
]
],
[
[
"<br>[back to Exercise 3 ↥](#t11ex3)",
"_____no_output_____"
],
[
"<br>[back to top ↥](#t11toc)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a847d4ac3f09a7c29ad897634929d3648cea35d
| 14,228 |
ipynb
|
Jupyter Notebook
|
0_teoria_taller_v1_2018.ipynb
|
BioinfoALaEscuela/talleres
|
e16e303ef18395c19b6098d22213a201882dda73
|
[
"MIT"
] | null | null | null |
0_teoria_taller_v1_2018.ipynb
|
BioinfoALaEscuela/talleres
|
e16e303ef18395c19b6098d22213a201882dda73
|
[
"MIT"
] | null | null | null |
0_teoria_taller_v1_2018.ipynb
|
BioinfoALaEscuela/talleres
|
e16e303ef18395c19b6098d22213a201882dda73
|
[
"MIT"
] | null | null | null | 43.778462 | 866 | 0.668752 |
[
[
[
"<div align=\"center\"><img src='http://ufq.unq.edu.ar/sbg/images/top.jpg' alt=\"SGB logo\"> </div>\n\n<h1 align='center'> TALLER “PROGRAMACIÓN ORIENTADA A LA BIOLOGÍA”</h1>\n<h3 align='center'>(En el marco del II CONCURSO “BIOINFORMÁTICA EN EL AULA”)</h3>\n\nLa bioinformática es una disciplina científica destinada a la aplicación de métodos computacionales al análisis de datos biológicos, para poder contestar numerosas preguntas. Las tecnologías computacionales permiten, entre otras cosas, el análisis en plazos cortos de gran cantidad de datos (provenientes de experimentos, bibliografía, bases de datos públicas, etc), así como la predicción de la forma o la función de las distintas moléculas, o la simulación del comportamiento de sistemas biológicos complejos como células y organismos.\n\nLa bioinformática puede ser pensada como una herramienta en el aprendizaje de la biología: su objeto de trabajo son entidades biológicas (ADN, proteínas, organismos completos, sus poblaciones, etc.) que se analizan con métodos que permiten “visualizar” distintos procesos de la naturaleza. Así, la bioinformática aporta una manera clara y precisa de percibir los procesos biológicos, y acerca a los estudiantes herramientas que integran múltiples conocimientos (lógico-matemático, biológico, físico, estadístico) generando un aprendizaje significativo y envolvente.",
"_____no_output_____"
],
[
"# ¡Bienvenido! ¿Estás listo?\nPodemos comenzar con algunas definiciones.\n\n### ¿En qué consiste una computadora?\nUna computadora está formada por el `Hardware` (que son todas las partes o elementos físicos que la componen) y el `Software` (que son todas las instrucciones para el funcionamiento del Hardware). El sistema operativo es el principal Software de la computadora, pues proporciona una interfaz con el usuario y permite al resto de los programas una interacción correcta con el Hardware.\n\n### ¿Qué hacemos entonces cuando programamos?\nUna computadora está constituida, básicamente, por un gran número de circuitos eléctricos que pueden ser activados **(1)** o desactivados **(0)**. Al establecer diferentes combinaciones de prendido y apagado de los circuitos, los usuarios de computadoras podemos lograr que el equipo realice alguna acción (por ejemplo, que muestre algo en la pantalla). ¡Esto es programar!\n\nLos lenguajes de programación actúan como traductores entre el usuario y el equipo. En lugar de aprender el difícil lenguaje de la máquina, con sus combinaciones de ceros y unos, se puede utilizar un `lenguaje de programación` para dar instrucciones al equipo de un modo que sea más fácil de aprender y entender. Para que la computadora entienda nuestras órdenes, un programa intermedio, denominado `compilador`, convierte las instrucciones dadas por el usuario en un determinado lenguaje de programación, al `lenguaje máquina` de ceros y unos. Esto significa que, como programadores de Python (o cualquier otro lenguaje), no necesitamos entender lo que el equipo hace o cómo lo hace, basta con que entendamos a “hablar y escribir” en el lenguaje de programación.\n\n### ¿Por qué es útil aprender a programar?\nTu smartphone, tu Playstation o Smart TV no serían muy útiles sin programas (aplicaciones, juegos, etc) para hacerlas funcionar. Cada vez que abrimos un documento para hacer un trabajo práctico para la escuela, o usamos el WhatsApp para chatear con nuestros amigos, estamos usando programas que interpretan lo que deseamos, como cambiar un color de fuente, aumentar el tamaño de letra o enviar un mensaje. Estos programas le comunican nuestras órdenes a la PC o teléfono para que las ejecuten. 1Aprendiendo a programar podrías hacer una gran diversidad de cosas: desde escribir tus propios juegos y aplicaciones para celular, combinar el uso de varios programas en forma secuencial o leer millones de textos sin abrir un solo libro… hasta analizar el genoma de un organismo o miles de estructuras de proteínas y así sacar conclusiones de relevancia biológica.",
"_____no_output_____"
],
[
"# Entonces: ¿Qué es Python? \nEs un lenguaje de programación con una forma de escritura (sintaxis) sencilla y poderosa. Es lo que se conoce como lenguaje de scripting, que puede ser ejecutado por partes y no necesita ser compilado en un paso aparte. Python tiene muchas características, ventajas y usos que vamos a pasar por alto en este taller, pero que podes leer en las páginas oficiales de [Python](https://www.python.org/) y [Python Argentina](https://www.python.org.ar/). Para nosotros, la razón más importante para elegir Python es que podés comenzar a hacer tus propios programas realmente rápido.",
"_____no_output_____"
],
[
"### ¿Cómo se puede usar Python?\n\nDepende del dispositivo que uses. En las computadoras, suele venir instalado. Si tenés un teléfono inteligente existen varias aplicaciones que instalan todo lo necesario para que Python funcione. Solo debés buscar ‘Python’ en tu tienda y descargarte alguna de las apps disponibles. Recomendamos las siguientes opciones gratuitas:\n- Para teléfonos Android: QPython 3 (o Pydroid 3).\n- Para teléfonos Windows: Python 3.\n- Para iOS: Python 2.5 for iOS",
"_____no_output_____"
],
[
"# ¿Cómo escribimos código en Python dentro de las apps?\n¡Es muy fácil! Sólo tenés que abrir la app y ejecutar la `consola` o terminal de Python. Te aparecerá una pantalla con un pequeño símbolo, usualmente `>>>`, donde podrás ingresar las distintas órdenes `(o\ncomandos)` que le darás a la computadora, siempre en sintaxis de Python. Cada vez que des `Enter` se ejecutará esa orden y podrás escribir un nuevo comando. ¡Ojo! Al salir de la consola se borrarán los comandos, a menos que los guardemos en un archivo o `script` para volver a ejecutarlos más adelante.",
"_____no_output_____"
],
[
"### ¿Se pueden correr scripts en las apps de Python?\n¡Si, es posible! Pero para eso, primero hay que crearlos. Veremos cómo hacerlo con QPython3. Abrimos el editor de la aplicación desde la pantalla principal, luego escribimos el script que queremos ejecutar y lo guardamos en una carpeta (por ejemplo, “scripts3”) usando el botón correspondiente.\n\n¡Atención! Siempre al final del nombre que demos a nuestro script debemos usar la extensión **“.py”**, como en el ejemplo de la foto: **“ejemplo.py”**.\n\nNuestro script se ejecuta desde la página principal; allí apretamos el botón **“Programs”** y navegamos en los archivos para encontrar nuestro script. Al seleccionarlo aparecerán las opciones que se muestran en la imagen, de las cuales debemos elegir y pulsar **“Run”**. ¡Tachan! ¡El script se ejecuta!",
"_____no_output_____"
],
[
"### ¿Qué otras formas tenemos para correr Python?\nExisten consolas en línea que te permiten correr Python en internet como si estuviese instalado en tu PC o teléfono, que son completamente gratis (bueno, ¡siempre que tengas internet!). Te recomendamos dos, pero siempre podés buscar otras:\n- [repl.it](https://repl.it/languages/python3)\n- [Tutorials Point](https://www.tutorialspoint.com/execute_python_online.php)\n\n### “Aún el camino más largo siempre comienza con el primer paso” - Lao Tse\nEl primer paso para poder hacer tu primer programa es abrir la consola de Python, tu app del teléfono o consola en línea, ¡lo que tengas a mano para arrancar!",
"_____no_output_____"
],
[
"### El principio de un comienzo\nEn todo proceso de aprendizaje los ‘errores’ tienen un rol muy importante: nos plantean nuevos interrogantes, nos acercan a nuevas hipótesis y sobre todo nos dan oportunidades \npara seguir aprendiendo. En la programación los ‘errores’ también importan, ¡y mucho! Son una suerte de comunicación con la máquina, que nos advierte cuando no funciona algo\nde lo que intentamos hacer.\nExisten distintos tipos de errores en Python y con cada tipo de error la máquina nos marca qué es lo que puede estar fallando de nuestro código. Por eso te pedimos que anotes\ntodos los errores que puedan ir apareciendo durante tu trabajo en el taller o en casa y que lo compartas con nosotros, para charlar entre todos acerca de los conocimientos \nque se ponen en juego durante la resolución de estos problemas. Así que, como diría la señorita Ricitos en su Autobús Mágico, **a cometer errores, tomar oportunidades y rockear con Python, que donde termina la carretera comienza la aventura!**",
"_____no_output_____"
],
[
"### Tu primer programa\nUna forma no muy original de a aprender escribir tu programa es simplemente abrir la consola, escribir lo siguiente y darle `Enter`:",
"_____no_output_____"
]
],
[
[
"print('A rockear con Python!')",
"A rockear con Python!\n"
]
],
[
[
"#### ¿Qué pasó? \n**print** es una función que te permite imprimir o mostrar en la consola todo lo que se encuentre dentro de los paréntesis y entre comillas, como en nuestro ejemplo. Entre otras cosas, esta función nos permite interactuar con nuestro programa o con el futuro usuario de nuestro programa. Felicitaciones, ¡ese fue tu primer programa en Python!",
"_____no_output_____"
],
[
"#### Una calculadora super-archi-genial \nCon Python podemos hacer todo tipo de cálculos matemáticos. Aunque suene medio bodrio, aprender a hacer estos cálculos nos va a ayudar después a trabajar sobre otros tipos de datos. Vamos a probar algunos cálculos. Escribí en tu consola:",
"_____no_output_____"
]
],
[
[
"3*5",
"_____no_output_____"
]
],
[
[
"#### ¿Cuál es el resultado? \nSi, como ves, el asterisco es el símbolo que se utiliza en Python para multiplicar. Probemos ahora:",
"_____no_output_____"
]
],
[
[
"8/4",
"_____no_output_____"
]
],
[
[
"#### ¿Qué resultado nos da? ¿Para qué se usa la barra hacia adelante?\n<p>En Python se puede usar los siguientes sı́mbolos básicos de matemáticas, que en programación se llaman operadores:</p>\n\nOperador | Descripción\n------------ | -------------\n+ | Suma\n- | Resta\n* | Multiplicación\n/ | División",
"_____no_output_____"
],
[
"Si por ejemplo tomamos la siguiente operación:",
"_____no_output_____"
]
],
[
[
"5+30*20",
"_____no_output_____"
]
],
[
[
"¿Qué da? ¿Por qué? Y si ahora hacemos:",
"_____no_output_____"
]
],
[
[
"(5+30)*20",
"_____no_output_____"
]
],
[
[
"#### ¿Nos da el mismo resultado? ¿Por qué pensás que ocurre eso?",
"_____no_output_____"
],
[
"De las dos operaciones anteriores podemos concluir dos cosas importantes: en este lenguaje, al igual que en la matemática, los operadores no tienen la misma prioridad de lectura o ejecución. La multiplicación y la división tienen mayor orden de prioridad que la suma y la resta. Esto significa que en el ejemplo 5+30*20, Python primero realiza la operación 30*20 y luego le suma 5 al resultado de la multiplicación. Los paréntesis nos sirven para reordenar las prioridades: al hacer (5+30) obligamos la ejecución de esta operación antes que la multiplicación",
"_____no_output_____"
],
[
"#### ¿Qué tal si probamos algo más complejo? Escribamos lo siguiente:",
"_____no_output_____"
]
],
[
[
"((4+5)*2)/5",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
4a847f007074bc6451d204d71a16e22a47aa30ea
| 189,360 |
ipynb
|
Jupyter Notebook
|
App/Untitled.ipynb
|
McGlash/Toronto_Rental_Market_Project
|
9c0af4ac28609f99b50bb740c12d34c7707ac2fa
|
[
"MIT"
] | null | null | null |
App/Untitled.ipynb
|
McGlash/Toronto_Rental_Market_Project
|
9c0af4ac28609f99b50bb740c12d34c7707ac2fa
|
[
"MIT"
] | null | null | null |
App/Untitled.ipynb
|
McGlash/Toronto_Rental_Market_Project
|
9c0af4ac28609f99b50bb740c12d34c7707ac2fa
|
[
"MIT"
] | 2 |
2020-08-16T20:18:25.000Z
|
2020-08-28T00:30:02.000Z
| 53.749645 | 640 | 0.523231 |
[
[
[
"from IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = 'all'\n\nfrom flask import Flask, jsonify, render_template\nimport sqlalchemy\nfrom sqlalchemy import create_engine, func, inspect\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nimport pandas as pd",
"_____no_output_____"
],
[
"connection_string = \"postgres:postgres@localhost:5432/ETL_Rental_DB\"",
"_____no_output_____"
],
[
"def postgres_create_session(connection_string):\n #####This functions create all the background functions for a successful connections to the db\n #####and returns a session class, mapped classes\n #Create an engine to the hawaii.sqlite database\n engine = create_engine(f'postgresql://{connection_string}', echo=True)\n # reflect an existing database into a new model; reflect the tables\n Base = automap_base()\n Base.prepare(engine, reflect=True)\n\n # Save references to each table\n Rental = Base.classes.Rental\n Income = Base.classes.Income\n Crime = Base.classes.Crime\n Community_Assets = Base.classes.Community_Assets\n Bridge_Rental_Crime = Base.classes.Bridge_Rental_Crime\n\n # Create our session (link) from Python to the DB\n session = Session(bind=engine)\n return session, Rental, Income, Crime, Community_Assets, Bridge_Rental_Crime",
"_____no_output_____"
],
[
"session, Rental, Income, Crime, Community_Assets, Bridge_Rental_Crime = postgres_create_session(connection_string)",
"2020-08-15 20:44:56,239 INFO sqlalchemy.engine.base.Engine select version()\n2020-08-15 20:44:56,240 INFO sqlalchemy.engine.base.Engine {}\n2020-08-15 20:44:56,243 INFO sqlalchemy.engine.base.Engine select current_schema()\n2020-08-15 20:44:56,245 INFO sqlalchemy.engine.base.Engine {}\n2020-08-15 20:44:56,255 INFO sqlalchemy.engine.base.Engine SELECT CAST('test plain returns' AS VARCHAR(60)) AS anon_1\n2020-08-15 20:44:56,256 INFO sqlalchemy.engine.base.Engine {}\n2020-08-15 20:44:56,262 INFO sqlalchemy.engine.base.Engine SELECT CAST('test unicode returns' AS VARCHAR(60)) AS anon_1\n2020-08-15 20:44:56,264 INFO sqlalchemy.engine.base.Engine {}\n2020-08-15 20:44:56,273 INFO sqlalchemy.engine.base.Engine show standard_conforming_strings\n2020-08-15 20:44:56,275 INFO sqlalchemy.engine.base.Engine {}\n2020-08-15 20:44:56,284 INFO sqlalchemy.engine.base.Engine SELECT c.relname FROM pg_class c JOIN pg_namespace n ON n.oid = c.relnamespace WHERE n.nspname = %(schema)s AND c.relkind in ('r', 'p')\n2020-08-15 20:44:56,285 INFO sqlalchemy.engine.base.Engine {'schema': 'public'}\n2020-08-15 20:44:56,306 INFO sqlalchemy.engine.base.Engine \n SELECT c.oid\n FROM pg_catalog.pg_class c\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n WHERE (pg_catalog.pg_table_is_visible(c.oid))\n AND c.relname = %(table_name)s AND c.relkind in\n ('r', 'v', 'm', 'f', 'p')\n \n2020-08-15 20:44:56,310 INFO sqlalchemy.engine.base.Engine {'table_name': 'Income'}\n2020-08-15 20:44:56,324 INFO sqlalchemy.engine.base.Engine \n SELECT a.attname,\n pg_catalog.format_type(a.atttypid, a.atttypmod),\n (SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid)\n FROM pg_catalog.pg_attrdef d\n WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum\n AND a.atthasdef)\n AS DEFAULT,\n a.attnotnull, a.attnum, a.attrelid as table_oid,\n pgd.description as comment,\n a.attgenerated as generated\n FROM pg_catalog.pg_attribute a\n LEFT JOIN pg_catalog.pg_description pgd ON (\n pgd.objoid = a.attrelid AND pgd.objsubid = a.attnum)\n WHERE a.attrelid = %(table_oid)s\n AND a.attnum > 0 AND NOT a.attisdropped\n ORDER BY a.attnum\n \n2020-08-15 20:44:56,325 INFO sqlalchemy.engine.base.Engine {'table_oid': 27436}\n2020-08-15 20:44:56,336 INFO sqlalchemy.engine.base.Engine \n SELECT t.typname as \"name\",\n pg_catalog.format_type(t.typbasetype, t.typtypmod) as \"attype\",\n not t.typnotnull as \"nullable\",\n t.typdefault as \"default\",\n pg_catalog.pg_type_is_visible(t.oid) as \"visible\",\n n.nspname as \"schema\"\n FROM pg_catalog.pg_type t\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace\n WHERE t.typtype = 'd'\n \n2020-08-15 20:44:56,337 INFO sqlalchemy.engine.base.Engine {}\n2020-08-15 20:44:56,360 INFO sqlalchemy.engine.base.Engine \n SELECT t.typname as \"name\",\n -- no enum defaults in 8.4 at least\n -- t.typdefault as \"default\",\n pg_catalog.pg_type_is_visible(t.oid) as \"visible\",\n n.nspname as \"schema\",\n e.enumlabel as \"label\"\n FROM pg_catalog.pg_type t\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace\n LEFT JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid\n WHERE t.typtype = 'e'\n ORDER BY \"schema\", \"name\", e.oid\n2020-08-15 20:44:56,361 INFO sqlalchemy.engine.base.Engine {}\n2020-08-15 20:44:56,370 INFO sqlalchemy.engine.base.Engine \n SELECT a.attname\n FROM pg_attribute a JOIN (\n SELECT unnest(ix.indkey) attnum,\n generate_subscripts(ix.indkey, 1) ord\n FROM pg_index ix\n WHERE ix.indrelid = %(table_oid)s AND ix.indisprimary\n ) k ON a.attnum=k.attnum\n WHERE a.attrelid = %(table_oid)s\n ORDER BY k.ord\n \n2020-08-15 20:44:56,371 INFO sqlalchemy.engine.base.Engine {'table_oid': 27436}\n2020-08-15 20:44:56,375 INFO sqlalchemy.engine.base.Engine \n SELECT conname\n FROM pg_catalog.pg_constraint r\n WHERE r.conrelid = %(table_oid)s AND r.contype = 'p'\n ORDER BY 1\n \n2020-08-15 20:44:56,376 INFO sqlalchemy.engine.base.Engine {'table_oid': 27436}\n2020-08-15 20:44:56,384 INFO sqlalchemy.engine.base.Engine \n SELECT r.conname,\n pg_catalog.pg_get_constraintdef(r.oid, true) as condef,\n n.nspname as conschema\n FROM pg_catalog.pg_constraint r,\n pg_namespace n,\n pg_class c\n\n WHERE r.conrelid = %(table)s AND\n r.contype = 'f' AND\n c.oid = confrelid AND\n n.oid = c.relnamespace\n ORDER BY 1\n \n2020-08-15 20:44:56,385 INFO sqlalchemy.engine.base.Engine {'table': 27436}\n2020-08-15 20:44:56,390 INFO sqlalchemy.engine.base.Engine \n SELECT\n i.relname as relname,\n ix.indisunique, ix.indexprs, ix.indpred,\n a.attname, a.attnum, c.conrelid, ix.indkey::varchar,\n ix.indoption::varchar, i.reloptions, am.amname,\n ix.indnkeyatts as indnkeyatts\n FROM\n pg_class t\n join pg_index ix on t.oid = ix.indrelid\n join pg_class i on i.oid = ix.indexrelid\n left outer join\n pg_attribute a\n on t.oid = a.attrelid and a.attnum = ANY(ix.indkey)\n left outer join\n pg_constraint c\n on (ix.indrelid = c.conrelid and\n ix.indexrelid = c.conindid and\n c.contype in ('p', 'u', 'x'))\n left outer join\n pg_am am\n on i.relam = am.oid\n WHERE\n t.relkind IN ('r', 'v', 'f', 'm', 'p')\n and t.oid = %(table_oid)s\n and ix.indisprimary = 'f'\n ORDER BY\n t.relname,\n i.relname\n \n2020-08-15 20:44:56,392 INFO sqlalchemy.engine.base.Engine {'table_oid': 27436}\n2020-08-15 20:44:56,410 INFO sqlalchemy.engine.base.Engine \n SELECT\n cons.conname as name,\n cons.conkey as key,\n a.attnum as col_num,\n a.attname as col_name\n FROM\n pg_catalog.pg_constraint cons\n join pg_attribute a\n on cons.conrelid = a.attrelid AND\n a.attnum = ANY(cons.conkey)\n WHERE\n cons.conrelid = %(table_oid)s AND\n cons.contype = 'u'\n \n2020-08-15 20:44:56,414 INFO sqlalchemy.engine.base.Engine {'table_oid': 27436}\n2020-08-15 20:44:56,425 INFO sqlalchemy.engine.base.Engine \n SELECT\n cons.conname as name,\n pg_get_constraintdef(cons.oid) as src\n FROM\n pg_catalog.pg_constraint cons\n WHERE\n cons.conrelid = %(table_oid)s AND\n cons.contype = 'c'\n \n2020-08-15 20:44:56,425 INFO sqlalchemy.engine.base.Engine {'table_oid': 27436}\n2020-08-15 20:44:56,433 INFO sqlalchemy.engine.base.Engine \n SELECT\n pgd.description as table_comment\n FROM\n pg_catalog.pg_description pgd\n WHERE\n pgd.objsubid = 0 AND\n pgd.objoid = %(table_oid)s\n \n2020-08-15 20:44:56,435 INFO sqlalchemy.engine.base.Engine {'table_oid': 27436}\n2020-08-15 20:44:56,451 INFO sqlalchemy.engine.base.Engine \n SELECT c.oid\n FROM pg_catalog.pg_class c\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n WHERE (pg_catalog.pg_table_is_visible(c.oid))\n AND c.relname = %(table_name)s AND c.relkind in\n ('r', 'v', 'm', 'f', 'p')\n \n"
],
[
"def listings(Rental, count=100):\n ####This function retrieves the \"count\" of listings upto 100 of data from the Rental class\n \n \n #limit count to 100\n count = 100 if count>100 else max(count,1)\n ### Design a query to retrieve the \"count\" no of listings\n rental_listing = session.query(Rental.id, Rental.title, Rental.price, Rental.image, Rental.url, Rental.bedrooms, Rental.rental_type, Rental.source, Rental.sqft).filter().order_by(Rental.post_published_date).limit(count)\n rental_listing_DF = pd.DataFrame(rental_listing)\n\n #Convert the DF to a dictionary\n rental_listing_dict = rental_listing_DF.T.to_dict()\n return rental_listing_dict",
"_____no_output_____"
],
[
"def comm_services(Community_Assets, count=100):\n ####This function retrieves the \"count\" of comm_services upto 100 of data from the Community_Assets class\n \n \n #limit count to 100\n count = 100 if count>100 else max(count,1)\n ### Design a query to retrieve the \"count\" no of services\n service_listing = session.query(Community_Assets.id, Community_Assets.agency_name, Community_Assets.e_mail, Community_Assets.fees, Community_Assets.hours, Community_Assets.application, Community_Assets.category, Community_Assets.address, Community_Assets.crisis_phone).limit(count)\n servicel_listing_DF = pd.DataFrame(service_listing)\n\n #Convert the DF to a dictionary\n servicel_listing_dict = servicel_listing_DF.T.to_dict()\n return servicel_listing_dict",
"_____no_output_____"
],
[
"def crime_details(Crime, Type=\"Assault\"):\n ####This function retrieves all the crime data in the last year based on type\n #[\"assault\", \"auto theft\", \"break and enter\", \"robbery\" ,'Homicide', and \"theft over\"]\n \n Type = \"Assault\" if Type not in ['Assault', 'Auto Theft', 'Break and Enter', 'Theft Over',\n 'Robbery', 'Homicide'] else Type\n \n\n ### Design a query to retrieve all the crime data based on type\n crime_listing = session.query(Crime.MCI, Crime.occurrencedate, Crime.reporteddate, Crime.offence, Crime.neighbourhood).filter(Crime.MCI==Type).order_by(Crime.occurrencedate)\n crime_listing_DF = pd.DataFrame(crime_listing)\n\n #Convert the DF to a dictionary\n crime_listing_dict = crime_listing_DF.T.to_dict()\n return crime_listing_dict",
"_____no_output_____"
],
[
"def income_details(Income):\n ####This function retrieves the income details for all FSA in Toronto\n \n\n ### Design a query to retrieve all the income data for all FSA in Toronto\n fsa_income = session.query(Income.FSA, Income.avg_income)\n fsa_income_DF = pd.DataFrame(fsa_income)\n\n #Convert the DF to a dictionary\n fsa_income_dict = fsa_income_DF.T.to_dict()\n return fsa_income_dict",
"_____no_output_____"
],
[
"listings(Rental, count=100)",
"2020-08-15 20:58:18,035 INFO sqlalchemy.engine.base.Engine SELECT \"Rental\".id AS \"Rental_id\", \"Rental\".title AS \"Rental_title\", \"Rental\".price AS \"Rental_price\", \"Rental\".image AS \"Rental_image\", \"Rental\".url AS \"Rental_url\", \"Rental\".bedrooms AS \"Rental_bedrooms\", \"Rental\".rental_type AS \"Rental_rental_type\", \"Rental\".source AS \"Rental_source\", \"Rental\".sqft AS \"Rental_sqft\" \nFROM \"Rental\" ORDER BY \"Rental\".post_published_date \n LIMIT %(param_1)s\n2020-08-15 20:58:18,037 INFO sqlalchemy.engine.base.Engine {'param_1': 100}\n"
],
[
"comm_services(Community_Assets, count=100)",
"2020-08-15 21:13:06,686 INFO sqlalchemy.engine.base.Engine SELECT \"Community_Assets\".id AS \"Community_Assets_id\", \"Community_Assets\".agency_name AS \"Community_Assets_agency_name\", \"Community_Assets\".e_mail AS \"Community_Assets_e_mail\", \"Community_Assets\".fees AS \"Community_Assets_fees\", \"Community_Assets\".hours AS \"Community_Assets_hours\", \"Community_Assets\".application AS \"Community_Assets_application\", \"Community_Assets\".category AS \"Community_Assets_category\", \"Community_Assets\".address AS \"Community_Assets_address\", \"Community_Assets\".crisis_phone AS \"Community_Assets_crisis_phone\" \nFROM \"Community_Assets\" \n LIMIT %(param_1)s\n2020-08-15 21:13:06,688 INFO sqlalchemy.engine.base.Engine {'param_1': 100}\n"
],
[
"crime_details(Crime, \"Homicide\")",
"2020-08-15 21:24:40,833 INFO sqlalchemy.engine.base.Engine SELECT \"Crime\".\"MCI\" AS \"Crime_MCI\", \"Crime\".occurrencedate AS \"Crime_occurrencedate\", \"Crime\".reporteddate AS \"Crime_reporteddate\", \"Crime\".offence AS \"Crime_offence\", \"Crime\".neighbourhood AS \"Crime_neighbourhood\" \nFROM \"Crime\" \nWHERE \"Crime\".\"MCI\" = %(MCI_1)s ORDER BY \"Crime\".occurrencedate\n2020-08-15 21:24:40,837 INFO sqlalchemy.engine.base.Engine {'MCI_1': 'Homicide'}\n"
],
[
"#limit count to 100\ncount = 100\ncount = 100 if count>100 else max(count,1)\nrental_listing = session.query(Rental.id, Rental.title, Rental.price, Rental.image, Rental.url, Rental.bedrooms, Rental.rental_type, Rental.source, Rental.sqft).filter().order_by(Rental.post_published_date).limit(count)\nrental_listing_DF = pd.DataFrame(rental_listing)\n",
"2020-08-15 20:54:02,989 INFO sqlalchemy.engine.base.Engine SELECT \"Rental\".id AS \"Rental_id\", \"Rental\".title AS \"Rental_title\", \"Rental\".price AS \"Rental_price\", \"Rental\".image AS \"Rental_image\", \"Rental\".url AS \"Rental_url\", \"Rental\".bedrooms AS \"Rental_bedrooms\", \"Rental\".rental_type AS \"Rental_rental_type\", \"Rental\".source AS \"Rental_source\", \"Rental\".sqft AS \"Rental_sqft\" \nFROM \"Rental\" ORDER BY \"Rental\".post_published_date \n LIMIT %(param_1)s\n2020-08-15 20:54:02,990 INFO sqlalchemy.engine.base.Engine {'param_1': 100}\n"
],
[
"income_details(Income)",
"2020-08-15 22:02:13,573 INFO sqlalchemy.engine.base.Engine SELECT \"Income\".\"FSA\" AS \"Income_FSA\", \"Income\".avg_income AS \"Income_avg_income\" \nFROM \"Income\"\n2020-08-15 22:02:13,576 INFO sqlalchemy.engine.base.Engine {}\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a8482c469ca3e2ebbfd9794716d5cd6624ddb15
| 3,903 |
ipynb
|
Jupyter Notebook
|
zh_CN_Jupyter/doc/src/devdocs/sanitizers.md.ipynb
|
aifact/JuliaZH.jl
|
73a5eb3a7823bb95a37e8529be074a28d41cd78f
|
[
"MIT"
] | null | null | null |
zh_CN_Jupyter/doc/src/devdocs/sanitizers.md.ipynb
|
aifact/JuliaZH.jl
|
73a5eb3a7823bb95a37e8529be074a28d41cd78f
|
[
"MIT"
] | null | null | null |
zh_CN_Jupyter/doc/src/devdocs/sanitizers.md.ipynb
|
aifact/JuliaZH.jl
|
73a5eb3a7823bb95a37e8529be074a28d41cd78f
|
[
"MIT"
] | null | null | null | 47.024096 | 164 | 0.66641 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a848871d9c58c7860d25a02ff6f42c61921b941
| 3,806 |
ipynb
|
Jupyter Notebook
|
scripts/data/process_era5.ipynb
|
tobifinn/ensemble_transformer
|
9b31f193048a31efd6aacb759e8a8b4a28734e6c
|
[
"MIT"
] | 7 |
2021-01-03T11:37:31.000Z
|
2022-01-28T09:13:19.000Z
|
scripts/data/process_era5.ipynb
|
tobifinn/ensemble_transformer
|
9b31f193048a31efd6aacb759e8a8b4a28734e6c
|
[
"MIT"
] | 3 |
2021-06-21T16:44:45.000Z
|
2021-10-15T11:47:15.000Z
|
scripts/data/process_era5.ipynb
|
tobifinn/ensemble_transformer
|
9b31f193048a31efd6aacb759e8a8b4a28734e6c
|
[
"MIT"
] | null | null | null | 25.205298 | 128 | 0.504204 |
[
[
[
"import xarray as xr\nimport distributed",
"_____no_output_____"
],
[
"cluster = distributed.LocalCluster(n_workers=4, threads_per_worker=1, local_directory='/tmp')\nclient = distributed.Client(cluster)\nclient",
"_____no_output_____"
]
],
[
[
"# Split ERA5 into training / test",
"_____no_output_____"
]
],
[
[
"ds_era5 = xr.open_dataset('../data/raw/era5/regridded_merged.nc').chunk({'time': 2})",
"_____no_output_____"
],
[
"ds_era5 = ds_era5.rename({'lat': 'latitude', 'lon': 'longitude'})",
"_____no_output_____"
],
[
"ds_era5_train = ds_era5.sel(time=slice('2017-01-01 00:00', '2018-12-31 12:00'))[['t2m']]\nds_era5_test = ds_era5.sel(time=slice('2019-01-01 00:00', '2019-12-31 12:00'))[['t2m']]",
"_____no_output_____"
],
[
"ds_era5_train.to_zarr(\n '../data/processed/era5/ds_train',\n encoding={\n 't2m': {'dtype': 'float32', 'scale_factor': 1.0, 'add_offset': 0.0},\n }\n)\nds_era5_test.to_zarr(\n '../data/processed/era5/ds_test',\n encoding={\n 't2m': {'dtype': 'float32', 'scale_factor': 1.0, 'add_offset': 0.0},\n }\n)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4a849a04fffa98fe7743430768f30f1d9dd2d2ac
| 5,964 |
ipynb
|
Jupyter Notebook
|
online_judges/license_key/format_license_key_challenge.ipynb
|
sophomore99/PythonInterective
|
f2ff4d798274218e8543e071141b60c35e86a3eb
|
[
"Apache-2.0"
] | 10 |
2017-08-11T15:53:02.000Z
|
2021-11-06T12:23:06.000Z
|
online_judges/license_key/format_license_key_challenge.ipynb
|
andy1729/interactive-coding-challenges
|
ba3ec6f492b2b9e1a7e266b5f3db91cffa42b7ec
|
[
"Apache-2.0"
] | 1 |
2020-02-07T18:25:36.000Z
|
2020-02-07T18:25:36.000Z
|
online_judges/license_key/format_license_key_challenge.ipynb
|
andy1729/interactive-coding-challenges
|
ba3ec6f492b2b9e1a7e266b5f3db91cffa42b7ec
|
[
"Apache-2.0"
] | 7 |
2017-09-18T09:19:02.000Z
|
2019-11-22T06:15:50.000Z
| 29.37931 | 375 | 0.563213 |
[
[
[
"This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).",
"_____no_output_____"
],
[
"# Challenge Notebook",
"_____no_output_____"
],
[
"## Problem: Format license keys.\n\nSee the [LeetCode](https://leetcode.com/problems/license-key-formatting/) problem page.\n\n<pre>\nNow you are given a string S, which represents a software license key which we would like to format. The string S is composed of alphanumerical characters and dashes. The dashes split the alphanumerical characters within the string into groups. (i.e. if there are M dashes, the string is split into M+1 groups). The dashes in the given string are possibly misplaced.\n\nWe want each group of characters to be of length K (except for possibly the first group, which could be shorter, but still must contain at least one character). To satisfy this requirement, we will reinsert dashes. Additionally, all the lower case letters in the string must be converted to upper case.\n\nSo, you are given a non-empty string S, representing a license key to format, and an integer K. And you need to return the license key formatted according to the description above.\n\nExample 1:\nInput: S = \"2-4A0r7-4k\", K = 4\n\nOutput: \"24A0-R74K\"\n\nExplanation: The string S has been split into two parts, each part has 4 characters.\nExample 2:\nInput: S = \"2-4A0r7-4k\", K = 3\n\nOutput: \"24-A0R-74K\"\n\nExplanation: The string S has been split into three parts, each part has 3 characters except the first part as it could be shorter as said above.\nNote:\nThe length of string S will not exceed 12,000, and K is a positive integer.\nString S consists only of alphanumerical characters (a-z and/or A-Z and/or 0-9) and dashes(-).\nString S is non-empty.\n</pre>\n\n* [Constraints](#Constraints)\n* [Test Cases](#Test-Cases)\n* [Algorithm](#Algorithm)\n* [Code](#Code)\n* [Unit Test](#Unit-Test)\n* [Solution Notebook](#Solution-Notebook)",
"_____no_output_____"
],
[
"## Constraints\n\n* Is the output a string?\n * Yes\n* Can we change the input string?\n * No, you can't modify the input string\n* Can we assume the inputs are valid?\n * No\n* Can we assume this fits memory?\n * Yes",
"_____no_output_____"
],
[
"## Test Cases\n\n* None -> TypeError\n* '---', k=3 -> ''\n* '2-4A0r7-4k', k=3 -> '24-A0R-74K'\n* '2-4A0r7-4k', k=4 -> '24A0-R74K'",
"_____no_output_____"
],
[
"## Algorithm\n\nRefer to the [Solution Notebook](). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.",
"_____no_output_____"
],
[
"## Code",
"_____no_output_____"
]
],
[
[
"class Solution(object):\n\n def format_license_key(self, license_key, k):\n # TODO: Implement me\n pass",
"_____no_output_____"
]
],
[
[
"## Unit Test",
"_____no_output_____"
],
[
"**The following unit test is expected to fail until you solve the challenge.**",
"_____no_output_____"
]
],
[
[
"# %load test_format_license_key.py\nfrom nose.tools import assert_equal, assert_raises\n\n\nclass TestSolution(object):\n\n def test_format_license_key(self):\n solution = Solution()\n assert_raises(TypeError, solution.format_license_key, None, None)\n license_key = '---'\n k = 3\n expected = ''\n assert_equal(solution.format_license_key(license_key, k), expected)\n license_key = '2-4A0r7-4k'\n k = 3\n expected = '24-A0R-74K'\n assert_equal(solution.format_license_key(license_key, k), expected)\n license_key = '2-4A0r7-4k'\n k = 4\n expected = '24A0-R74K'\n assert_equal(solution.format_license_key(license_key, k), expected)\n print('Success: test_format_license_key')\n\ndef main():\n test = TestSolution()\n test.test_format_license_key()\n\n\nif __name__ == '__main__':\n main()",
"_____no_output_____"
]
],
[
[
"## Solution Notebook\n\nReview the [Solution Notebook]() for a discussion on algorithms and code solutions.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a84a843c2ae75ef063f097a2987a0537bd8ab0b
| 524,322 |
ipynb
|
Jupyter Notebook
|
tensorflow_probability/examples/jupyter_notebooks/Fitting_DPMM_Using_pSGLD.ipynb
|
wataruhashimoto52/probability
|
12e3f256544eadea6e863868da825614f4423eb0
|
[
"Apache-2.0"
] | 2 |
2020-12-17T20:43:24.000Z
|
2021-06-11T22:09:16.000Z
|
tensorflow_probability/examples/jupyter_notebooks/Fitting_DPMM_Using_pSGLD.ipynb
|
wataruhashimoto52/probability
|
12e3f256544eadea6e863868da825614f4423eb0
|
[
"Apache-2.0"
] | 2 |
2021-08-25T16:14:51.000Z
|
2022-02-10T04:47:11.000Z
|
tensorflow_probability/examples/jupyter_notebooks/Fitting_DPMM_Using_pSGLD.ipynb
|
wataruhashimoto52/probability
|
12e3f256544eadea6e863868da825614f4423eb0
|
[
"Apache-2.0"
] | 1 |
2020-10-22T21:09:22.000Z
|
2020-10-22T21:09:22.000Z
| 357.411043 | 110,190 | 0.91028 |
[
[
[
"##### Copyright 2018 The TensorFlow Probability Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\n",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\"); { display-mode: \"form\" }\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Fitting Dirichlet Process Mixture Model Using Preconditioned Stochastic Gradient Langevin Dynamics\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/probability/examples/Fitting_DPMM_Using_pSGLD\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Fitting_DPMM_Using_pSGLD.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Fitting_DPMM_Using_pSGLD.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/probability/tensorflow_probability/examples/jupyter_notebooks/Fitting_DPMM_Using_pSGLD.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"In this notebook, we will demonstrate how to cluster a large number of samples and infer the number of clusters simultaneously by fitting a Dirichlet Process Mixture of Gaussian distribution. We use Preconditioned Stochastic Gradient Langevin Dynamics (pSGLD) for inference. ",
"_____no_output_____"
],
[
"## Table of contents",
"_____no_output_____"
],
[
"1. Samples\n\n1. Model\n\n1. Optimization\n\n1. Visualize the result\n\n 4.1. Clustered result\n\n 4.2. Visualize uncertainty\n \n 4.3. Mean and scale of selected mixture component\n \n 4.4. Mixture weight of each mixture component\n \n 4.5. Convergence of $\\alpha$\n \n 4.6. Inferred number of clusters over iterations\n \n 4.7. Fitting the model using RMSProp\n\n1. Conclusion",
"_____no_output_____"
],
[
"---\n",
"_____no_output_____"
],
[
"## 1. Samples",
"_____no_output_____"
],
[
"First, we set up a toy dataset. We generate 50,000 random samples from three bivariate Gaussian distributions.",
"_____no_output_____"
]
],
[
[
"import time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow.compat.v1 as tf\nimport tensorflow_probability as tfp",
"_____no_output_____"
],
[
"plt.style.use('ggplot')\ntfd = tfp.distributions",
"_____no_output_____"
],
[
"def session_options(enable_gpu_ram_resizing=True):\n \"\"\"Convenience function which sets common `tf.Session` options.\"\"\"\n config = tf.ConfigProto()\n config.log_device_placement = True\n if enable_gpu_ram_resizing:\n # `allow_growth=True` makes it possible to connect multiple colabs to your\n # GPU. Otherwise the colab malloc's all GPU ram.\n config.gpu_options.allow_growth = True\n return config\n\ndef reset_sess(config=None):\n \"\"\"Convenience function to create the TF graph and session, or reset them.\"\"\"\n if config is None:\n config = session_options()\n tf.reset_default_graph()\n global sess\n try:\n sess.close()\n except:\n pass\n sess = tf.InteractiveSession(config=config)",
"_____no_output_____"
],
[
"# For reproducibility\nrng = np.random.RandomState(seed=45)\ntf.set_random_seed(76)\n\n# Precision\ndtype = np.float64\n\n# Number of training samples\nnum_samples = 50000\n\n# Ground truth loc values which we will infer later on. The scale is 1.\ntrue_loc = np.array([[-4, -4],\n [0, 0],\n [4, 4]], dtype)\n\ntrue_components_num, dims = true_loc.shape\n\n# Generate training samples from ground truth loc\ntrue_hidden_component = rng.randint(0, true_components_num, num_samples)\nobservations = (true_loc[true_hidden_component]\n + rng.randn(num_samples, dims).astype(dtype))",
"_____no_output_____"
],
[
"# Visualize samples\nplt.scatter(observations[:, 0], observations[:, 1], 1)\nplt.axis([-10, 10, -10, 10])\nplt.show()",
"_____no_output_____"
]
],
[
[
"## 2. Model",
"_____no_output_____"
],
[
"Here, we define a Dirichlet Process Mixture of Gaussian distribution with Symmetric Dirichlet Prior. Throughout the notebook, vector quantities are written in bold. Over $i\\in\\{1,\\ldots,N\\}$ samples, the model with a mixture of $j \\in\\{1,\\ldots,K\\}$ Gaussian distributions is formulated as follow:\n\n$$\\begin{align*}\np(\\boldsymbol{x}_1,\\cdots, \\boldsymbol{x}_N) &=\\prod_{i=1}^N \\text{GMM}(x_i), \\\\\n&\\,\\quad \\text{with}\\;\\text{GMM}(x_i)=\\sum_{j=1}^K\\pi_j\\text{Normal}(x_i\\,|\\,\\text{loc}=\\boldsymbol{\\mu_{j}},\\,\\text{scale}=\\boldsymbol{\\sigma_{j}})\\\\ \n\\end{align*}$$\nwhere:\n\n$$\\begin{align*}\nx_i&\\sim \\text{Normal}(\\text{loc}=\\boldsymbol{\\mu}_{z_i},\\,\\text{scale}=\\boldsymbol{\\sigma}_{z_i}) \\\\\nz_i &= \\text{Categorical}(\\text{prob}=\\boldsymbol{\\pi}),\\\\\n&\\,\\quad \\text{with}\\;\\boldsymbol{\\pi}=\\{\\pi_1,\\cdots,\\pi_K\\}\\\\\n\\boldsymbol{\\pi}&\\sim\\text{Dirichlet}(\\text{concentration}=\\{\\frac{\\alpha}{K},\\cdots,\\frac{\\alpha}{K}\\})\\\\\n\\alpha&\\sim \\text{InverseGamma}(\\text{concentration}=1,\\,\\text{rate}=1)\\\\\n\\boldsymbol{\\mu_j} &\\sim \\text{Normal}(\\text{loc}=\\boldsymbol{0}, \\,\\text{scale}=\\boldsymbol{1})\\\\\n\\boldsymbol{\\sigma_j} &\\sim \\text{InverseGamma}(\\text{concentration}=\\boldsymbol{1},\\,\\text{rate}=\\boldsymbol{1})\\\\\n\\end{align*}$$\n\nOur goal is to assign each $x_i$ to the $j$th cluster through $z_i$ which represents the inferred index of a cluster.\n\nFor an ideal Dirichlet Mixture Model, $K$ is set to $\\infty$. However, it is known that one can approximate a Dirichlet Mixture Model with a sufficiently large $K$. Note that although we arbitrarily set an initial value of $K$, an optimal number of clusters is also inferred through optimization, unlike a simple Gaussian Mixture Model.\n\nIn this notebook, we use a bivariate Gaussian distribution as a mixture component and set $K$ to 30.",
"_____no_output_____"
]
],
[
[
"reset_sess()\n\n# Upperbound on K\nmax_cluster_num = 30\n\n# Define trainable variables.\nmix_probs = tf.nn.softmax(\n tf.Variable(\n name='mix_probs',\n initial_value=np.ones([max_cluster_num], dtype) / max_cluster_num))\n\nloc = tf.Variable(\n name='loc',\n initial_value=np.random.uniform(\n low=-9, #set around minimum value of sample value\n high=9, #set around maximum value of sample value\n size=[max_cluster_num, dims]))\n\nprecision = tf.nn.softplus(tf.Variable(\n name='precision',\n initial_value=\n np.ones([max_cluster_num, dims], dtype=dtype)))\n\nalpha = tf.nn.softplus(tf.Variable(\n name='alpha',\n initial_value=\n np.ones([1], dtype=dtype)))\n\ntraining_vals = [mix_probs, alpha, loc, precision]\n\n\n# Prior distributions of the training variables\n\n#Use symmetric Dirichlet prior as finite approximation of Dirichlet process.\nrv_symmetric_dirichlet_process = tfd.Dirichlet(\n concentration=np.ones(max_cluster_num, dtype) * alpha / max_cluster_num,\n name='rv_sdp')\n\nrv_loc = tfd.Independent(\n tfd.Normal(\n loc=tf.zeros([max_cluster_num, dims], dtype=dtype),\n scale=tf.ones([max_cluster_num, dims], dtype=dtype)),\n reinterpreted_batch_ndims=1,\n name='rv_loc')\n\n\nrv_precision = tfd.Independent(\n tfd.InverseGamma(\n concentration=np.ones([max_cluster_num, dims], dtype),\n rate=np.ones([max_cluster_num, dims], dtype)),\n reinterpreted_batch_ndims=1,\n name='rv_precision')\n\nrv_alpha = tfd.InverseGamma(\n concentration=np.ones([1], dtype=dtype),\n rate=np.ones([1]),\n name='rv_alpha')\n\n# Define mixture model\nrv_observations = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(probs=mix_probs),\n components_distribution=tfd.MultivariateNormalDiag(\n loc=loc,\n scale_diag=precision))",
"_____no_output_____"
]
],
[
[
"## 3. Optimization\n",
"_____no_output_____"
],
[
"We optimize the model with Preconditioned Stochastic Gradient Langevin Dynamics (pSGLD), which enables us to optimize a model over a large number of samples in a mini-batch gradient descent manner. \n\nTo update parameters $\\boldsymbol{\\theta}\\equiv\\{\\boldsymbol{\\pi},\\,\\alpha,\\, \\boldsymbol{\\mu_j},\\,\\boldsymbol{\\sigma_j}\\}$ in $t\\,$th iteration with mini-batch size $M$, the update is sampled as:\n\n$$\\begin{align*}\n\\Delta \\boldsymbol { \\theta } _ { t } & \\sim \\frac { \\epsilon _ { t } } { 2 } \\bigl[ G \\left( \\boldsymbol { \\theta } _ { t } \\right) \\bigl( \\nabla _ { \\boldsymbol { \\theta } } \\log p \\left( \\boldsymbol { \\theta } _ { t } \\right) \n + \\frac { N } { M } \\sum _ { k = 1 } ^ { M } \\nabla _ \\boldsymbol { \\theta } \\log \\text{GMM}(x_{t_k})\\bigr) + \\sum_\\boldsymbol{\\theta}\\nabla_\\theta G \\left( \\boldsymbol { \\theta } _ { t } \\right) \\bigr]\\\\\n&+ G ^ { \\frac { 1 } { 2 } } \\left( \\boldsymbol { \\theta } _ { t } \\right) \\text { Normal } \\left( \\text{loc}=\\boldsymbol{0} ,\\, \\text{scale}=\\epsilon _ { t }\\boldsymbol{1} \\right)\\\\\n\\end{align*}$$\n\nIn the above equation, $\\epsilon _ { t }$ is learning rate at $t\\,$th iteration and $\\log p(\\theta_t)$ is a sum of log prior distributions of $\\theta$. $G ( \\boldsymbol { \\theta } _ { t })$ is a preconditioner which adjusts the scale of the gradient of each parameter. \n",
"_____no_output_____"
]
],
[
[
"# Learning rates and decay\nstarter_learning_rate = 1e-6\nend_learning_rate = 1e-10\ndecay_steps = 1e4\n\n# Number of training steps\ntraining_steps = 10000\n\n# Mini-batch size\nbatch_size = 20\n\n# Sample size for parameter posteriors\nsample_size = 100",
"_____no_output_____"
]
],
[
[
"We will use the joint log probability of the likelihood $\\text{GMM}(x_{t_k})$ and the prior probabilities $p(\\theta_t)$ as the loss function for pSGLD.\n\nNote that as specified in the [API of pSGLD](https://www.tensorflow.org/probability/api_docs/python/tfp/optimizer/StochasticGradientLangevinDynamics), we need to divide the sum of the prior probabilities by sample size $N$. ",
"_____no_output_____"
]
],
[
[
"# Placeholder for mini-batch\nobservations_tensor = tf.compat.v1.placeholder(dtype, shape=[batch_size, dims])\n\n# Define joint log probabilities\n# Notice that each prior probability should be divided by num_samples and\n# likelihood is divided by batch_size for pSGLD optimization.\nlog_prob_parts = [\n rv_loc.log_prob(loc) / num_samples,\n rv_precision.log_prob(precision) / num_samples,\n rv_alpha.log_prob(alpha) / num_samples,\n rv_symmetric_dirichlet_process.log_prob(mix_probs)[..., tf.newaxis]\n / num_samples,\n rv_observations.log_prob(observations_tensor) / batch_size\n]\njoint_log_prob = tf.reduce_sum(tf.concat(log_prob_parts, axis=-1), axis=-1)",
"_____no_output_____"
],
[
"# Make mini-batch generator\ndx = tf.compat.v1.data.Dataset.from_tensor_slices(observations)\\\n .shuffle(500).repeat().batch(batch_size)\niterator = tf.compat.v1.data.make_one_shot_iterator(dx)\nnext_batch = iterator.get_next()\n\n# Define learning rate scheduling\nglobal_step = tf.Variable(0, trainable=False)\nlearning_rate = tf.train.polynomial_decay(\n starter_learning_rate,\n global_step, decay_steps,\n end_learning_rate, power=1.)\n\n# Set up the optimizer. Don't forget to set data_size=num_samples.\noptimizer_kernel = tfp.optimizer.StochasticGradientLangevinDynamics(\n learning_rate=learning_rate,\n preconditioner_decay_rate=0.99,\n burnin=1500,\n data_size=num_samples)\n\ntrain_op = optimizer_kernel.minimize(-joint_log_prob)\n\n# Arrays to store samples\nmean_mix_probs_mtx = np.zeros([training_steps, max_cluster_num])\nmean_alpha_mtx = np.zeros([training_steps, 1])\nmean_loc_mtx = np.zeros([training_steps, max_cluster_num, dims])\nmean_precision_mtx = np.zeros([training_steps, max_cluster_num, dims])\n\ninit = tf.global_variables_initializer()\nsess.run(init)\n\nstart = time.time()\nfor it in range(training_steps):\n [\n mean_mix_probs_mtx[it, :],\n mean_alpha_mtx[it, 0],\n mean_loc_mtx[it, :, :],\n mean_precision_mtx[it, :, :],\n _\n ] = sess.run([\n *training_vals,\n train_op\n ], feed_dict={\n observations_tensor: sess.run(next_batch)})\n\nelapsed_time_psgld = time.time() - start\nprint(\"Elapsed time: {} seconds\".format(elapsed_time_psgld))\n\n# Take mean over the last sample_size iterations\nmean_mix_probs_ = mean_mix_probs_mtx[-sample_size:, :].mean(axis=0)\nmean_alpha_ = mean_alpha_mtx[-sample_size:, :].mean(axis=0)\nmean_loc_ = mean_loc_mtx[-sample_size:, :].mean(axis=0)\nmean_precision_ = mean_precision_mtx[-sample_size:, :].mean(axis=0)",
"Elapsed time: 309.8013095855713 seconds\n"
]
],
[
[
"## 4. Visualize the result",
"_____no_output_____"
],
[
"### 4.1. Clustered result",
"_____no_output_____"
],
[
"First, we visualize the result of clustering.\n\n\nFor assigning each sample $x_i$ to a cluster $j$, we calculate the posterior of $z_i$ as:\n\n$$\\begin{align*}\nj = \\underset{z_i}{\\arg\\max}\\,p(z_i\\,|\\,x_i,\\,\\boldsymbol{\\theta})\n\\end{align*}$$",
"_____no_output_____"
]
],
[
[
"loc_for_posterior = tf.compat.v1.placeholder(\n dtype, [None, max_cluster_num, dims], name='loc_for_posterior')\nprecision_for_posterior = tf.compat.v1.placeholder(\n dtype, [None, max_cluster_num, dims], name='precision_for_posterior')\nmix_probs_for_posterior = tf.compat.v1.placeholder(\n dtype, [None, max_cluster_num], name='mix_probs_for_posterior')\n\n# Posterior of z (unnormalized)\nunnomarlized_posterior = tfd.MultivariateNormalDiag(\n loc=loc_for_posterior, scale_diag=precision_for_posterior)\\\n .log_prob(tf.expand_dims(tf.expand_dims(observations, axis=1), axis=1))\\\n + tf.log(mix_probs_for_posterior[tf.newaxis, ...])\n\n# Posterior of z (normarizad over latent states)\nposterior = unnomarlized_posterior\\\n - tf.reduce_logsumexp(unnomarlized_posterior, axis=-1)[..., tf.newaxis]\n\ncluster_asgmt = sess.run(tf.argmax(\n tf.reduce_mean(posterior, axis=1), axis=1), feed_dict={\n loc_for_posterior: mean_loc_mtx[-sample_size:, :],\n precision_for_posterior: mean_precision_mtx[-sample_size:, :],\n mix_probs_for_posterior: mean_mix_probs_mtx[-sample_size:, :]})\n\nidxs, count = np.unique(cluster_asgmt, return_counts=True)\n\nprint('Number of inferred clusters = {}\\n'.format(len(count)))\nnp.set_printoptions(formatter={'float': '{: 0.3f}'.format})\n\nprint('Number of elements in each cluster = {}\\n'.format(count))\n\ndef convert_int_elements_to_consecutive_numbers_in(array):\n unique_int_elements = np.unique(array)\n for consecutive_number, unique_int_element in enumerate(unique_int_elements):\n array[array == unique_int_element] = consecutive_number\n return array\n\ncmap = plt.get_cmap('tab10')\nplt.scatter(\n observations[:, 0], observations[:, 1],\n 1,\n c=cmap(convert_int_elements_to_consecutive_numbers_in(cluster_asgmt)))\nplt.axis([-10, 10, -10, 10])\nplt.show()",
"Number of inferred clusters = 3\n\nNumber of elements in each cluster = [16911 16645 16444]\n\n"
]
],
[
[
"We can see an almost equal number of samples are assigned to appropriate clusters and the model has successfully inferred the correct number of clusters as well.\n",
"_____no_output_____"
],
[
"### 4.2. Visualize uncertainty",
"_____no_output_____"
],
[
"Here, we look at the uncertainty of the clustering result by visualizing it for each sample.\n\nWe calculate uncertainty by using entropy:\n\n$$\\begin{align*}\n\\text{Uncertainty}_\\text{entropy} = -\\frac{1}{K}\\sum^{K}_{z_i=1}\\sum^{O}_{l=1}p(z_i\\,|\\,x_i,\\,\\boldsymbol{\\theta}_l)\\log p(z_i\\,|\\,x_i,\\,\\boldsymbol{\\theta}_l)\n\\end{align*}$$\n\nIn pSGLD, we treat the value of a training parameter at each iteration as a sample from its posterior distribution. Thus, we calculate entropy over values from $O$ iterations for each parameter. The final entropy value is calculated by averaging entropies of all the cluster assignments.",
"_____no_output_____"
]
],
[
[
"# Calculate entropy\nposterior_in_exponential = tf.exp(posterior)\nuncertainty_in_entropy = tf.reduce_mean(-tf.reduce_sum(\n posterior_in_exponential\n * posterior,\n axis=1), axis=1)\n\nuncertainty_in_entropy_ = sess.run(uncertainty_in_entropy, feed_dict={\n loc_for_posterior: mean_loc_mtx[-sample_size:, :],\n precision_for_posterior: mean_precision_mtx[-sample_size:, :],\n mix_probs_for_posterior: mean_mix_probs_mtx[-sample_size:, :]\n})",
"_____no_output_____"
],
[
"plt.title('Entropy')\nsc = plt.scatter(observations[:, 0],\n observations[:, 1],\n 1,\n c=uncertainty_in_entropy_,\n cmap=plt.cm.viridis_r)\ncbar = plt.colorbar(sc,\n fraction=0.046,\n pad=0.04,\n ticks=[uncertainty_in_entropy_.min(),\n uncertainty_in_entropy_.max()])\ncbar.ax.set_yticklabels(['low', 'high'])\ncbar.set_label('Uncertainty', rotation=270)\nplt.show()",
"_____no_output_____"
]
],
[
[
"In the above graph, less luminance represents more uncertainty. \nWe can see the samples near the boundaries of the clusters have especially higher uncertainty. This is intuitively true, that those samples are difficult to cluster.",
"_____no_output_____"
],
[
"### 4.3. Mean and scale of selected mixture component",
"_____no_output_____"
],
[
"Next, we look at selected clusters' $\\mu_j$ and $\\sigma_j$.",
"_____no_output_____"
]
],
[
[
"for idx, numbe_of_samples in zip(idxs, count):\n print(\n 'Component id = {}, Number of elements = {}'\n .format(idx, numbe_of_samples))\n print(\n 'Mean loc = {}, Mean scale = {}\\n'\n .format(mean_loc_[idx, :], mean_precision_[idx, :]))",
"Component id = 0, Number of elements = 16911\nMean loc = [-4.030 -4.113], Mean scale = [ 0.994 0.972]\n\nComponent id = 4, Number of elements = 16645\nMean loc = [ 3.999 4.069], Mean scale = [ 1.038 1.046]\n\nComponent id = 5, Number of elements = 16444\nMean loc = [-0.005 -0.023], Mean scale = [ 0.967 1.025]\n\n"
]
],
[
[
"Again, the $\\boldsymbol{\\mu_j}$ and $\\boldsymbol{\\sigma_j}$ close to the ground truth.",
"_____no_output_____"
],
[
"### 4.4 Mixture weight of each mixture component",
"_____no_output_____"
],
[
"We also look at inferred mixture weights.",
"_____no_output_____"
]
],
[
[
"plt.ylabel('Mean posterior of mixture weight')\nplt.xlabel('Component')\nplt.bar(range(0, max_cluster_num), mean_mix_probs_)\nplt.show()",
"_____no_output_____"
]
],
[
[
"We see only a few (three) mixture component have significant weights and the rest of the weights have values close to zero. This also shows the model successfully inferred the correct number of mixture components which constitutes the distribution of the samples.",
"_____no_output_____"
],
[
"### 4.5. Convergence of $\\alpha$",
"_____no_output_____"
],
[
"We look at convergence of Dirichlet distribution's concentration parameter $\\alpha$.",
"_____no_output_____"
]
],
[
[
"print('Value of inferred alpha = {0:.3f}\\n'.format(mean_alpha_[0]))\nplt.ylabel('Sample value of alpha')\nplt.xlabel('Iteration')\nplt.plot(mean_alpha_mtx)\nplt.show()",
"Value of inferred alpha = 0.679\n\n"
]
],
[
[
"Considering the fact that smaller $\\alpha$ results in less expected number of clusters in a Dirichlet mixture model, the model seems to be learning the optimal number of clusters over iterations.",
"_____no_output_____"
],
[
"### 4.6. Inferred number of clusters over iterations",
"_____no_output_____"
],
[
"We visualize how the inferred number of clusters changes over iterations.\n\nTo do so, we infer the number of clusters over the iterations.",
"_____no_output_____"
]
],
[
[
"step = sample_size\nnum_of_iterations = 50\nestimated_num_of_clusters = []\ninterval = (training_steps - step) // (num_of_iterations - 1)\niterations = np.asarray(range(step, training_steps+1, interval))\nfor iteration in iterations:\n start_position = iteration-step\n end_position = iteration\n\n result = sess.run(tf.argmax(\n tf.reduce_mean(posterior, axis=1), axis=1), feed_dict={\n loc_for_posterior:\n mean_loc_mtx[start_position:end_position, :],\n precision_for_posterior:\n mean_precision_mtx[start_position:end_position, :],\n mix_probs_for_posterior:\n mean_mix_probs_mtx[start_position:end_position, :]})\n\n idxs, count = np.unique(result, return_counts=True)\n estimated_num_of_clusters.append(len(count))",
"_____no_output_____"
],
[
"plt.ylabel('Number of inferred clusters')\nplt.xlabel('Iteration')\nplt.yticks(np.arange(1, max(estimated_num_of_clusters) + 1, 1))\nplt.plot(iterations - 1, estimated_num_of_clusters)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Over the iterations, the number of clusters is getting closer to three. With the result of convergence of $\\alpha$ to smaller value over iterations, we can see the model is successfully learning the parameters to infer an optimal number of clusters.\n\nInterestingly, we can see the inference has already converged to the correct number of clusters in the early iterations, unlike $\\alpha$ converged in much later iterations. \n",
"_____no_output_____"
],
[
"### 4.7. Fitting the model using RMSProp ",
"_____no_output_____"
],
[
"In this section, to see the effectiveness of Monte Carlo sampling scheme of pSGLD, we use RMSProp to fit the model. We choose RMSProp for comparison because it comes without the sampling scheme and pSGLD is based on RMSProp.\n",
"_____no_output_____"
]
],
[
[
"# Learning rates and decay\nstarter_learning_rate_rmsprop = 1e-2\nend_learning_rate_rmsprop = 1e-4\ndecay_steps_rmsprop = 1e4\n\n# Number of training steps\ntraining_steps_rmsprop = 50000\n\n# Mini-batch size\nbatch_size_rmsprop = 20",
"_____no_output_____"
],
[
"# Define trainable variables.\nmix_probs_rmsprop = tf.nn.softmax(\n tf.Variable(\n name='mix_probs_rmsprop',\n initial_value=np.ones([max_cluster_num], dtype) / max_cluster_num))\n\nloc_rmsprop = tf.Variable(\n name='loc_rmsprop',\n initial_value=np.zeros([max_cluster_num, dims], dtype)\n + np.random.uniform(\n low=-9, #set around minimum value of sample value\n high=9, #set around maximum value of sample value\n size=[max_cluster_num, dims]))\n\nprecision_rmsprop = tf.nn.softplus(tf.Variable(\n name='precision_rmsprop',\n initial_value=\n np.ones([max_cluster_num, dims], dtype=dtype)))\n\nalpha_rmsprop = tf.nn.softplus(tf.Variable(\n name='alpha_rmsprop',\n initial_value=\n np.ones([1], dtype=dtype)))\n\ntraining_vals_rmsprop =\\\n [mix_probs_rmsprop, alpha_rmsprop, loc_rmsprop, precision_rmsprop]\n\n# Prior distributions of the training variables\n\n#Use symmetric Dirichlet prior as finite approximation of Dirichlet process.\nrv_symmetric_dirichlet_process_rmsprop = tfd.Dirichlet(\n concentration=np.ones(max_cluster_num, dtype)\n * alpha_rmsprop / max_cluster_num,\n name='rv_sdp_rmsprop')\n\nrv_loc_rmsprop = tfd.Independent(\n tfd.Normal(\n loc=tf.zeros([max_cluster_num, dims], dtype=dtype),\n scale=tf.ones([max_cluster_num, dims], dtype=dtype)),\n reinterpreted_batch_ndims=1,\n name='rv_loc_rmsprop')\n\n\nrv_precision_rmsprop = tfd.Independent(\n tfd.InverseGamma(\n concentration=np.ones([max_cluster_num, dims], dtype),\n rate=np.ones([max_cluster_num, dims], dtype)),\n reinterpreted_batch_ndims=1,\n name='rv_precision_rmsprop')\n\nrv_alpha_rmsprop = tfd.InverseGamma(\n concentration=np.ones([1], dtype=dtype),\n rate=np.ones([1]),\n name='rv_alpha_rmsprop')\n\n# Define mixture model\nrv_observations_rmsprop = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(probs=mix_probs_rmsprop),\n components_distribution=tfd.MultivariateNormalDiag(\n loc=loc_rmsprop,\n scale_diag=precision_rmsprop))",
"_____no_output_____"
],
[
"og_prob_parts_rmsprop = [\n rv_loc_rmsprop.log_prob(loc_rmsprop),\n rv_precision_rmsprop.log_prob(precision_rmsprop),\n rv_alpha_rmsprop.log_prob(alpha_rmsprop),\n rv_symmetric_dirichlet_process_rmsprop\n .log_prob(mix_probs_rmsprop)[..., tf.newaxis],\n rv_observations_rmsprop.log_prob(observations_tensor)\n * num_samples / batch_size\n]\njoint_log_prob_rmsprop = tf.reduce_sum(\n tf.concat(log_prob_parts_rmsprop, axis=-1), axis=-1)",
"_____no_output_____"
],
[
"# Define learning rate scheduling\nglobal_step_rmsprop = tf.Variable(0, trainable=False)\nlearning_rate = tf.train.polynomial_decay(\n starter_learning_rate_rmsprop,\n global_step_rmsprop, decay_steps_rmsprop,\n end_learning_rate_rmsprop, power=1.)\n\n# Set up the optimizer. Don't forget to set data_size=num_samples.\noptimizer_kernel_rmsprop = tf.train.RMSPropOptimizer(\n learning_rate=learning_rate,\n decay=0.99)\n\ntrain_op_rmsprop = optimizer_kernel_rmsprop.minimize(-joint_log_prob_rmsprop)\n\ninit_rmsprop = tf.global_variables_initializer()\nsess.run(init_rmsprop)\n\nstart = time.time()\nfor it in range(training_steps_rmsprop):\n [\n _\n ] = sess.run([\n train_op_rmsprop\n ], feed_dict={\n observations_tensor: sess.run(next_batch)})\n\nelapsed_time_rmsprop = time.time() - start\nprint(\"RMSProp elapsed_time: {} seconds ({} iterations)\"\n .format(elapsed_time_rmsprop, training_steps_rmsprop))\nprint(\"pSGLD elapsed_time: {} seconds ({} iterations)\"\n .format(elapsed_time_psgld, training_steps))\n\nmix_probs_rmsprop_, alpha_rmsprop_, loc_rmsprop_, precision_rmsprop_ =\\\n sess.run(training_vals_rmsprop)",
"RMSProp elapsed_time: 53.7574200630188 seconds (50000 iterations)\npSGLD elapsed_time: 309.8013095855713 seconds (10000 iterations)\n"
]
],
[
[
"Compare to pSGLD, although the number of iterations for RMSProp is longer, optimization by RMSProp is much faster.\n\nNext, we look at the clustering result.",
"_____no_output_____"
]
],
[
[
"cluster_asgmt_rmsprop = sess.run(tf.argmax(\n tf.reduce_mean(posterior, axis=1), axis=1), feed_dict={\n loc_for_posterior: loc_rmsprop_[tf.newaxis, :],\n precision_for_posterior: precision_rmsprop_[tf.newaxis, :],\n mix_probs_for_posterior: mix_probs_rmsprop_[tf.newaxis, :]})\n\nidxs, count = np.unique(cluster_asgmt_rmsprop, return_counts=True)\n\nprint('Number of inferred clusters = {}\\n'.format(len(count)))\nnp.set_printoptions(formatter={'float': '{: 0.3f}'.format})\n\nprint('Number of elements in each cluster = {}\\n'.format(count))\n\ncmap = plt.get_cmap('tab10')\nplt.scatter(\n observations[:, 0], observations[:, 1],\n 1,\n c=cmap(convert_int_elements_to_consecutive_numbers_in(\n cluster_asgmt_rmsprop)))\nplt.axis([-10, 10, -10, 10])\nplt.show()",
"Number of inferred clusters = 4\n\nNumber of elements in each cluster = [ 1644 15267 16647 16442]\n\n"
]
],
[
[
"The number of clusters was not correctly inferred by RMSProp optimization in our experiment. We also look at the mixture weight.",
"_____no_output_____"
]
],
[
[
"plt.ylabel('MAP inferece of mixture weight')\nplt.xlabel('Component')\nplt.bar(range(0, max_cluster_num), mix_probs_rmsprop_)\nplt.show()",
"_____no_output_____"
]
],
[
[
"We can see the incorrect number of components have significant mixture weights.\n\nAlthough the optimization takes longer time, pSGLD, which has Monte Carlo sampling scheme, performed better in our experiment.",
"_____no_output_____"
],
[
"## 5. Conclusion",
"_____no_output_____"
],
[
"In this notebook, we have described how to cluster a large number of samples as well as to infer the number of clusters simultaneously by fitting a Dirichlet Process Mixture of Gaussian distribution using pSGLD.\n\nThe experiment has shown the model successfully clustered samples and inferred the correct number of clusters. Also, we have shown the Monte Carlo sampling scheme of pSGLD allows us to visualize uncertainty in the result. Not only clustering the samples but also we have seen the model could infer the correct parameters of mixture components. On the relationship between the parameters and the number of inferred clusters, we have investigated how the model learns the parameter to control the number of effective clusters by visualizing the correlation between convergence of 𝛼 and the number of inferred clusters. Lastly, we have looked at the results of fitting the model using RMSProp. We have seen RMSProp, which is the optimizer without Monte Carlo sampling scheme, works considerably faster than pSGLD but has produced less accuracy in clustering.\n\nAlthough the toy dataset only had 50,000 samples with only two dimensions, the mini-batch manner optimization used here is scalable for much larger datasets.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
4a84a96e50905f63804f703474d8a80baf197cca
| 677,429 |
ipynb
|
Jupyter Notebook
|
Project 1.ipynb
|
omkarmayekar2598/Project-1-I.T-Vedant-
|
c94d0c04e27e5c2f882ce1854a60a6ff4aa47087
|
[
"MIT"
] | null | null | null |
Project 1.ipynb
|
omkarmayekar2598/Project-1-I.T-Vedant-
|
c94d0c04e27e5c2f882ce1854a60a6ff4aa47087
|
[
"MIT"
] | null | null | null |
Project 1.ipynb
|
omkarmayekar2598/Project-1-I.T-Vedant-
|
c94d0c04e27e5c2f882ce1854a60a6ff4aa47087
|
[
"MIT"
] | null | null | null | 138.420311 | 282,812 | 0.830367 |
[
[
[
"# Problem Statement",
"_____no_output_____"
],
[
"The Indian Premier League (IPL) is a professional Twenty20 cricket league in India contested during March or April and May of every year by eight teams representing eight different cities in India.The league was founded by the Board of Control for Cricket in India (BCCI) in 2008. The IPL has an exclusive window in ICC Future Tours Programme.In this we have to perform few data analysis using pandas,numpy and visualization libraries. In this few task has been completed like which team has won more number of times,at which venue more number of matches has been played and few other relevant task has been done.The Dataset is download from kaggle and contains around 6 csv's but For the current analysis we will be using only matches Played Data i.e matches.csv.",
"_____no_output_____"
],
[
"# Import Relevant Libraries",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as p\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"df=pd.read_csv('matches.csv')",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 756 entries, 0 to 755\nData columns (total 18 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 756 non-null int64 \n 1 Season 756 non-null object\n 2 city 749 non-null object\n 3 date 756 non-null object\n 4 team1 756 non-null object\n 5 team2 756 non-null object\n 6 toss_winner 756 non-null object\n 7 toss_decision 756 non-null object\n 8 result 756 non-null object\n 9 dl_applied 756 non-null int64 \n 10 winner 752 non-null object\n 11 win_by_runs 756 non-null int64 \n 12 win_by_wickets 756 non-null int64 \n 13 player_of_match 752 non-null object\n 14 venue 756 non-null object\n 15 umpire1 754 non-null object\n 16 umpire2 754 non-null object\n 17 umpire3 119 non-null object\ndtypes: int64(4), object(14)\nmemory usage: 106.4+ KB\n"
],
[
"df.describe()",
"_____no_output_____"
],
[
"df['city'].nunique()",
"_____no_output_____"
]
],
[
[
" ",
"_____no_output_____"
],
[
"# Cleaning unnecessary data",
"_____no_output_____"
],
[
" ",
"_____no_output_____"
],
[
"We wont be using the Umpires Columns ('umpire1', 'umpire2', 'umpire3') in this analysis so we will remove those fields using .drop() method",
"_____no_output_____"
]
],
[
[
"df.drop(['umpire1','umpire2','umpire3'],axis=1,inplace=True)",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df['winner'].nunique()",
"_____no_output_____"
],
[
"df['team1'].unique()",
"_____no_output_____"
],
[
"df['city'].unique()",
"_____no_output_____"
]
],
[
[
" ",
"_____no_output_____"
],
[
"As the teams names get changed as per the season we need to replace those names for all required teams.",
"_____no_output_____"
]
],
[
[
"df.team1.replace({'Rising Pune Supergiants' : 'Rising Pune Supergiant', 'Delhi Daredevils':'Delhi Capitals','Pune Warriors' : 'Rising Pune Supergiant'},inplace=True)\ndf.team2.replace({'Rising Pune Supergiants' : 'Rising Pune Supergiant', 'Delhi Daredevils':'Delhi Capitals','Pune Warriors' : 'Rising Pune Supergiant'},inplace=True)\ndf.toss_winner.replace({'Rising Pune Supergiants' : 'Rising Pune Supergiant', 'Delhi Daredevils':'Delhi Capitals','Pune Warriors' : 'Rising Pune Supergiant'},inplace=True)\ndf.winner.replace({'Rising Pune Supergiants' : 'Rising Pune Supergiant', 'Delhi Daredevils':'Delhi Capitals','Pune Warriors' : 'Rising Pune Supergiant'},inplace=True)\ndf.city.replace({'Bangalore' : 'Bengaluru'},inplace=True)",
"_____no_output_____"
],
[
"df['team1'].unique()",
"_____no_output_____"
],
[
"df['team2'].unique()",
"_____no_output_____"
],
[
"df['city'].unique()",
"_____no_output_____"
],
[
"df.isnull().sum()",
"_____no_output_____"
]
],
[
[
"Checking the position of null values",
"_____no_output_____"
]
],
[
[
"null_df = df[df.isna().any(axis=1)]",
"_____no_output_____"
],
[
"null_df",
"_____no_output_____"
],
[
"df.loc[460:470]\n",
"_____no_output_____"
],
[
"df.loc[[461,462,466,468,469,474,476],'city'] = \"Dubai\"",
"_____no_output_____"
],
[
"df.loc[461:480]",
"_____no_output_____"
],
[
"df.isnull().sum()",
"_____no_output_____"
]
],
[
[
"Now we will analyse different types of data\n",
"_____no_output_____"
]
],
[
[
"df['id'].count()",
"_____no_output_____"
]
],
[
[
"# Few relevant task",
"_____no_output_____"
],
[
" ",
"_____no_output_____"
],
[
"# Checking for how many matches the result is normal or tie.",
"_____no_output_____"
],
[
"So now we can see that there are around 756 matches has been played. Now we have to find for how many matches the result is normal or tie.\n",
"_____no_output_____"
]
],
[
[
"regular_matches= df[df['result']== 'normal'].count()",
"_____no_output_____"
],
[
"regular_matches",
"_____no_output_____"
]
],
[
[
"So we can see there are around 13 matches whose result is tie or not played. Rest all matches are played normal i.e 743.",
"_____no_output_____"
]
],
[
[
"df['city'].unique()",
"_____no_output_____"
]
],
[
[
"Now let see how many matches has been played in each cities",
"_____no_output_____"
]
],
[
[
"cities=df.groupby('city')[['id']].count()",
"_____no_output_____"
],
[
"cities",
"_____no_output_____"
]
],
[
[
"Arranging data in organised manner",
"_____no_output_____"
]
],
[
[
"cities.rename(columns={'id':'matches'},inplace=True)\ncities = cities.sort_values('matches',ascending=True).reset_index()\ncities",
"_____no_output_____"
]
],
[
[
"# Importing Visualization Library",
"_____no_output_____"
],
[
"Performing visualization on number of matches played in each city",
"_____no_output_____"
]
],
[
[
"import seaborn as sns",
"_____no_output_____"
],
[
"plt.figure(figsize=(20,10))\nplt.title('Number Of Matches Played In Each City')\nsns.barplot(x='matches',y='city',data=cities)",
"_____no_output_____"
]
],
[
[
"# Now we will see total matches won by each team.",
"_____no_output_____"
]
],
[
[
"df.winner.unique()",
"_____no_output_____"
],
[
"winner_df = df.groupby('winner')[['id']].count()\nwinner_df = winner_df.sort_values('id', ascending=False).reset_index()\n\nwinner_df.rename(columns = {'id':'wins','winner':'Teams'},inplace=True)\nwinner_df",
"_____no_output_____"
],
[
"plt.figure(figsize=(30,20))\nplt.xlabel('Teams')\nplt.ylabel('Wins')\nplt.title('Matches Won By Each Team')\nsns.barplot(x='Teams',y='wins',data=winner_df)",
"_____no_output_____"
]
],
[
[
"# Now we will see the season with most number of matches",
"_____no_output_____"
]
],
[
[
"season_df = df.groupby('Season')[['id']].count()\nseason_df = season_df.sort_values('Season', ascending=False).reset_index()\nseason_df.rename(columns = {'id':'Matches','Season':'Year'},inplace = True)",
"_____no_output_____"
],
[
"season_df",
"_____no_output_____"
],
[
"plt.figure(figsize=(20,10))\nplt.title(\"Mathes Played In Each Season\",fontsize=30)\nplt.xlabel('Season',fontsize=30)\nplt.ylabel('Total Matches',fontsize=30)\nplt.xticks(rotation='60')\nplt.tick_params(labelsize=20)\nsns.barplot(x='Year', y='Matches', data=season_df)",
"_____no_output_____"
]
],
[
[
"# We will find out now most preferred decision on winning toss",
"_____no_output_____"
]
],
[
[
"df.toss_decision.unique()",
"_____no_output_____"
],
[
"decision_df = df.groupby('toss_decision')[['id']].count()\ndecision_df = decision_df.sort_values('id').reset_index()\ndecision_df.rename(columns={'id':'Total','toss_decision':'Decision'},inplace=True)",
"_____no_output_____"
],
[
"decision_df",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,10))\nplt.title(\"Preferred Decision\",fontsize=30)\nplt.xlabel('Decision',fontsize=30)\nplt.ylabel('Total',fontsize=30)\nplt.tick_params(labelsize=20)\nsns.barplot(x='Decision', y= 'Total', data=decision_df)",
"_____no_output_____"
]
],
[
[
"So fielding is the most preferable decision after winning a toss",
"_____no_output_____"
],
[
" ",
"_____no_output_____"
],
[
" ",
"_____no_output_____"
],
[
"# So now we will check which decision is more beneficial",
"_____no_output_____"
]
],
[
[
"field_df = df.loc[(df['toss_winner'] == df['winner']) & (df['toss_decision'] == 'field'), ['id', 'winner','toss_decision']]",
"_____no_output_____"
],
[
"field_df.count()",
"_____no_output_____"
],
[
"bat_df = df.loc[(df['toss_winner'] == df['winner']) & (df['toss_decision'] == 'bat'), ['id', 'winner','toss_decision']]\n",
"_____no_output_____"
],
[
"bat_df.count()",
"_____no_output_____"
],
[
"frames = [bat_df, field_df]\nresult_df = pd.concat(frames)\nresult_df = result_df.groupby('toss_decision')[['id']].count()\nresult_df",
"_____no_output_____"
]
],
[
[
"So we can conclude here that the team who chooses fielding have more chances of winning.",
"_____no_output_____"
],
[
" ",
"_____no_output_____"
]
],
[
[
"result_df = result_df.sort_values('id').reset_index()\nresult_df.rename(columns={'id':'Total','toss_decision':'Decision'},inplace=True)\nresult_df",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,10))\nplt.title(\"Decision Success\",fontsize=30)\nplt.xlabel('Decision',fontsize=30)\nplt.ylabel('Total',fontsize=30)\nplt.tick_params(labelsize=20)\nsns.barplot(x='Decision', y='Total',data=result_df)",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,10))\nplt.title(\"Decision Success\",fontsize=30)\nplt.xlabel('Decision',fontsize=30)\nplt.ylabel('Total',fontsize=30)\nplt.tick_params(labelsize=20)\nsns.barplot(x='Decision', y='Total', data=decision_df,palette='rainbow')\nsns.barplot(x='Decision', y='Total', data=result_df, palette='coolwarm')\nplt.legend(['Decision Taken','Decision Proved Right'])",
"_____no_output_____"
]
],
[
[
"# Venue which has hosted most number of matches",
"_____no_output_____"
]
],
[
[
"df['venue'].unique()",
"_____no_output_____"
],
[
"len(df['venue'].unique())",
"_____no_output_____"
],
[
"venue_df = df.groupby('venue')[['id']].count()\nvenue_df = venue_df.sort_values('id',ascending=False).reset_index()\nvenue_df.rename(columns={'id':'Total','venue':'Stadium'},inplace=True)",
"_____no_output_____"
],
[
"plt.figure(figsize=(20,20))\nplt.title(\"Venues\",fontweight='bold',fontsize=30)\nplt.tick_params(labelsize=40)\nplt.pie(venue_df['Total'],labels=venue_df['Stadium'],textprops={'fontsize': 10});",
"_____no_output_____"
]
],
[
[
"So we can conclude here that the most of the matches are played in Eden Gardens.",
"_____no_output_____"
],
[
" ",
"_____no_output_____"
],
[
"# So now we will find the player with maximum number of Man of the match award",
"_____no_output_____"
]
],
[
[
"len(df['player_of_match'].unique())",
"_____no_output_____"
],
[
"player_df= df.groupby('player_of_match')[['id']].count()",
"_____no_output_____"
],
[
"player_df",
"_____no_output_____"
],
[
"player_df=player_df.sort_values('id',ascending=False).reset_index()",
"_____no_output_____"
],
[
"player_df = player_df.head(15).copy()\nplayer_df.rename(columns={'id':'Total_Awards','player_of_match':'Man_Of_the_Match'},inplace=True)\nplayer_df",
"_____no_output_____"
],
[
"player_df.head(10)",
"_____no_output_____"
],
[
"import numpy as np",
"_____no_output_____"
],
[
"plt.figure(figsize=(15,10))\nplt.title(\"Top 15 Players with Highest Man Of the Match Titles\",fontweight='bold' )\nplt.xticks(rotation=90)\nplt.yticks(ticks=np.arange(0,25,5))\nplt.ylabel('No. of Awards')\nplt.xlabel('Players')\nsns.barplot(x=player_df['Man_Of_the_Match'],y=player_df['Total_Awards'], alpha=0.6)",
"_____no_output_____"
]
],
[
[
" ",
"_____no_output_____"
],
[
" ",
"_____no_output_____"
],
[
"# Team who has won IPL Trophy most of times.",
"_____no_output_____"
]
],
[
[
"final_df=df.groupby('Season').tail(1).copy()\nfinal_df",
"_____no_output_____"
],
[
"final_df = final_df.sort_values('Season')\nfinal_df",
"_____no_output_____"
],
[
"final_df.winner.unique()",
"_____no_output_____"
],
[
"final_df['winner'].value_counts()",
"_____no_output_____"
]
],
[
[
"As now we can conclude here that Mumbai Indians have won maximum number of trophies",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(20,10))\nplt.title(\"Season Champions\",fontweight='bold',fontsize=20)\nplt.xlabel('Teams',fontweight='bold',fontsize=30)\nplt.ylabel('Total Seasons',fontweight='bold',fontsize=20)\nplt.xticks(rotation='60')\nplt.tick_params(labelsize=10)\nsns.countplot(x=final_df['winner'],palette='rainbow')",
"_____no_output_____"
]
],
[
[
"# Conclusion",
"_____no_output_____"
],
[
"This project simply implies that we have made this IPL data more readble and by using few visualization libraries it helped us to solve different problems which are difficult to handle. SO basically we have analysed data by cleaning and manipulating the required data which helped us to perform few task.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a84ae1b5a392e67396b619ce875efc9dd8eb49d
| 21,101 |
ipynb
|
Jupyter Notebook
|
DBMS Project.ipynb
|
fattahsamit/Blood-Bank-Management-System
|
4348279f01b8b6c09d35dd31095d3ba6ef7022d2
|
[
"MIT"
] | null | null | null |
DBMS Project.ipynb
|
fattahsamit/Blood-Bank-Management-System
|
4348279f01b8b6c09d35dd31095d3ba6ef7022d2
|
[
"MIT"
] | null | null | null |
DBMS Project.ipynb
|
fattahsamit/Blood-Bank-Management-System
|
4348279f01b8b6c09d35dd31095d3ba6ef7022d2
|
[
"MIT"
] | 1 |
2021-12-19T18:52:44.000Z
|
2021-12-19T18:52:44.000Z
| 57.96978 | 216 | 0.484953 |
[
[
[
"## Blood Donor Management System",
"_____no_output_____"
]
],
[
[
"#Tkinter is the standard GUI library for Python\nfrom tkinter import *\nfrom tkinter import ttk\nimport pymysql\n\n#class creation\nclass Donor:\n def __init__(self,root):\n self.root=root\n #Title of the application\n self.root.title(\"Blood Donor Management System\")\n #Setting the Resolution\n self.root.geometry(\"1350x700+0+0\")\n \n \n #Title of the Upper Label. Can change the values of border,relief, fonts, background and foreground colour. \n #relief = RAISED/ GROOVE/ RIDGE/ FLAT/ SUNKEN\n title=Label(self.root,text=\"🩸 Blood Donor Management System\",bd=10,relief=RAISED,font=(\"arial\",35,\"bold\"),bg=\"crimson\",fg=\"white\")\n #packed, choosing location, fill=X to fillup the X axis area\n title.pack(side=TOP,fill=X)\n \n\n#-------VARIABLES--------- \n #using String variable because we don't want to use any calculations with these\n self.id_var=StringVar()\n self.name_var=StringVar()\n self.gender_var=StringVar()\n self.bg_var=StringVar()\n self.num_var=StringVar()\n self.email_var=StringVar()\n self.dob_var=StringVar()\n self.ail_var=StringVar()\n self.lastdn_var=StringVar()\n self.address_var=StringVar()\n \n self.search_by=StringVar()\n self.search_txt=StringVar()\n \n \n#-------MANAGE FRAME--------- \n #create frame\n #border size, style\n Manage_Frame=Frame(self.root,bd=4,relief=RIDGE,bg=\"crimson\")\n #placement and resolution of the frame\n Manage_Frame.place(x=10,y=82,height=610,width=472)\n #title for Manage_Frame\n m_title=Label(Manage_Frame,text=\"Manage Donors\",font=(\"arial\",25,\"bold\"),bg=\"crimson\",fg=\"white\")\n #grid method makes table-like structure, How many Rows and Column will be there. padx/pady gives space between the x/y axis \n m_title.grid(row=0,columnspan=2,pady=20)\n \n #ID\n #label field\n lbl_id=Label(Manage_Frame,text=\"ID No.\",font=(\"arial\",15,\"bold\"),bg=\"crimson\",fg=\"white\")\n lbl_id.grid(row=1,column=0,pady=5,padx=10,sticky=\"w\")\n #text field, using entry method\n #textvariable is used to access the variables\n txt_id=Entry(Manage_Frame,textvariable=self.id_var,font=(\"arial\",15,\"bold\"),bd=5,relief=GROOVE)\n txt_id.grid(row=1,column=1,pady=5,padx=10,sticky=\"w\")\n \n #Name\n lbl_name=Label(Manage_Frame,text=\"Name\",font=(\"arial\",15,\"bold\"),bg=\"crimson\",fg=\"white\")\n lbl_name.grid(row=2,column=0,pady=5,padx=10,sticky=\"w\")\n \n txt_name=Entry(Manage_Frame,textvariable=self.name_var,font=(\"arial\",15,\"bold\"),bd=5,relief=GROOVE)\n txt_name.grid(row=2,column=1,pady=5,padx=10,sticky=\"w\")\n \n #Gender (combobox) - kinda like a option system\n lbl_gender=Label(Manage_Frame,text=\"Gender\",font=(\"arial\",15,\"bold\"),bg=\"crimson\",fg=\"white\")\n lbl_gender.grid(row=3,column=0,pady=5,padx=10,sticky=\"w\")\n #using combobox\n combo_gender=ttk.Combobox(Manage_Frame,textvariable=self.gender_var,font=(\"arial\",14,\"bold\"),state=\"readonly\")\n combo_gender['values']=(\"Male\",\"Female\",\"Other\")\n combo_gender.grid(row=3,column=1,pady=5,padx=10)\n \n #Blood Group (combobox)\n lbl_bg=Label(Manage_Frame,text=\"Blood Group\",font=(\"arial\",15,\"bold\"),bg=\"crimson\",fg=\"white\")\n lbl_bg.grid(row=4,column=0,pady=5,padx=10,sticky=\"w\")\n \n combo_bg=ttk.Combobox(Manage_Frame,textvariable=self.bg_var,font=(\"arial\",14,\"bold\"),state=\"readonly\")\n combo_bg['values']=(\"A+\",\"A-\",\"B+\",\"B-\",\"AB+\",\"AB-\",\"O+\",\"O-\")\n combo_bg.grid(row=4,column=1,pady=5,padx=10)\n \n #Phone Number\n lbl_num=Label(Manage_Frame,text=\"Phone Number\",font=(\"arial\",15,\"bold\"),bg=\"crimson\",fg=\"white\")\n lbl_num.grid(row=5,column=0,pady=5,padx=10,sticky=\"w\")\n \n txt_num=Entry(Manage_Frame,textvariable=self.num_var,font=(\"arial\",15,\"bold\"),bd=5,relief=GROOVE)\n txt_num.grid(row=5,column=1,pady=5,padx=10,sticky=\"w\")\n \n #Email\n lbl_email=Label(Manage_Frame,text=\"E-mail\",font=(\"arial\",15,\"bold\"),bg=\"crimson\",fg=\"white\")\n lbl_email.grid(row=6,column=0,pady=5,padx=10,sticky=\"w\")\n \n txt_email=Entry(Manage_Frame,textvariable=self.email_var,font=(\"arial\",15,\"bold\"),bd=5,relief=GROOVE)\n txt_email.grid(row=6,column=1,pady=5,padx=10,sticky=\"w\") \n \n #Date of Birth\n lbl_dob=Label(Manage_Frame,text=\"Date of Birth\",font=(\"arial\",15,\"bold\"),bg=\"crimson\",fg=\"white\")\n lbl_dob.grid(row=7,column=0,pady=5,padx=10,sticky=\"w\")\n \n txt_dob=Entry(Manage_Frame,textvariable=self.dob_var,font=(\"arial\",15,\"bold\"),bd=5,relief=GROOVE)\n txt_dob.grid(row=7,column=1,pady=5,padx=10,sticky=\"w\") \n \n #Known Ailments\n lbl_ail=Label(Manage_Frame,text=\"Known Ailments\",font=(\"arial\",15,\"bold\"),bg=\"crimson\",fg=\"white\")\n lbl_ail.grid(row=8,column=0,pady=5,padx=10,sticky=\"w\")\n \n txt_ail=Entry(Manage_Frame,textvariable=self.ail_var,font=(\"arial\",15,\"bold\"),bd=5,relief=GROOVE)\n txt_ail.grid(row=8,column=1,pady=5,padx=10,sticky=\"w\")\n \n #Last Donation\n lbl_lastdn=Label(Manage_Frame,text=\"Last Donation Date\",font=(\"arial\",15,\"bold\"),bg=\"crimson\",fg=\"white\")\n lbl_lastdn.grid(row=9,column=0,pady=5,padx=10,sticky=\"w\")\n \n txt_lastdn=Entry(Manage_Frame,textvariable=self.lastdn_var,font=(\"arial\",15,\"bold\"),bd=5,relief=GROOVE)\n txt_lastdn.grid(row=9,column=1,pady=5,padx=10,sticky=\"w\")\n \n #Address\n lbl_address=Label(Manage_Frame,text=\"Address\",font=(\"arial\",15,\"bold\"),bg=\"crimson\",fg=\"white\")\n lbl_address.grid(row=10,column=0,pady=5,padx=10,sticky=\"w\")\n\n txt_address=Entry(Manage_Frame,textvariable=self.address_var,font=(\"arial\",15,\"bold\"),bd=5,relief=GROOVE)\n txt_address.grid(row=10,column=1,pady=5,padx=10,sticky=\"w\")\n \n #using text method (we are not using it)\n #use the help of self to access Text data\n #self.txt_address=Text(Manage_Frame,height=3, width=29)\n #self.txt_address.grid(row=10,column=1,pady=5,padx=10,sticky=\"w\") \n \n#-------BUTTON FRAME--------- \n btn_Frame=Frame(Manage_Frame,bd=4,relief=RIDGE,bg=\"crimson\")\n btn_Frame.place(x=12,y=555,width=433)\n \n #command is used to call function\n Addbtn=Button(btn_Frame,text=\"Add\",width=11,command=self.add_donors).grid(row=0,column=0,padx=10,pady=5)\n upbtn=Button(btn_Frame,text=\"Update\",width=11,command=self.update_data).grid(row=0,column=1,padx=10,pady=5)\n delbtn=Button(btn_Frame,text=\"Delete\",width=11,command=self.delete_data).grid(row=0,column=2,padx=10,pady=5)\n clrbtn=Button(btn_Frame,text=\"Clear\",width=11,command=self.clear).grid(row=0,column=3,padx=10,pady=5)\n \n \n#-------DETAIL FRAME--------- \n Detail_Frame=Frame(self.root,bd=4,relief=RIDGE,bg=\"crimson\")\n Detail_Frame.place(x=487,y=82,height=610,width=857)\n \n lbl_search=Label(Detail_Frame,text=\"Search By\",font=(\"arial\",15,\"bold\"),bg=\"crimson\",fg=\"white\")\n lbl_search.grid(row=0,column=0,pady=5,padx=10,sticky=\"w\")\n \n combo_search=ttk.Combobox(Detail_Frame,textvariable=self.search_by,width=13,font=(\"arial\",14,\"bold\"),state=\"readonly\")\n #name must be same as the database\n combo_search['values']=(\"Blood_Group\",\"Last_Donation\",\"Address\",\"Number\")\n combo_search.grid(row=0,column=1,pady=5,padx=10)\n \n txt_search=Entry(Detail_Frame,textvariable=self.search_txt,width=25,font=(\"arial\",13,\"bold\"),bd=5,relief=GROOVE)\n txt_search.grid(row=0,column=2,pady=5,padx=10,sticky=\"w\")\n \n searchbtn=Button(Detail_Frame,text=\"Search\",width=13,pady=5,command=self.search_data).grid(row=0,column=3,padx=10,pady=5)\n showallbtn=Button(Detail_Frame,text=\"Show All\",width=13,pady=5,command=self.fetch_data).grid(row=0,column=4,padx=10,pady=5)\n \n#-------TABLE FRAME--------- \n Table_Frame=Frame(Detail_Frame,bd=4,relief=RIDGE,bg=\"crimson\")\n Table_Frame.place(x=10,y=50,height=545,width=830)\n #scrolling method to add scrollbars for x and y axis\n scroll_x=Scrollbar(Table_Frame,orient=HORIZONTAL)\n scroll_y=Scrollbar(Table_Frame,orient=VERTICAL)\n #TreeView allows us to do is to build a tree-like structure and insert items accordingly\n self.Donor_table=ttk.Treeview(Table_Frame,columns=(\"id\",\"name\",\"gender\",\"bg\",\"num\",\"email\",\"dob\",\"ail\",\"lastdn\",\"address\"),xscrollcommand=scroll_x.set,yscrollcommand=scroll_y.set)\n scroll_x.pack(side=BOTTOM,fill=X)\n scroll_y.pack(side=RIGHT,fill=Y)\n scroll_x.config(command=self.Donor_table.xview)\n scroll_y.config(command=self.Donor_table.yview)\n self.Donor_table.heading(\"id\",text=\"ID No.\")\n self.Donor_table.heading(\"name\",text=\"Name\")\n self.Donor_table.heading(\"gender\",text=\"Gender\")\n self.Donor_table.heading(\"bg\",text=\"Blood Group\")\n self.Donor_table.heading(\"num\",text=\"Phone No.\")\n self.Donor_table.heading(\"email\",text=\"E-mail\")\n self.Donor_table.heading(\"dob\",text=\"Date of Birth\")\n self.Donor_table.heading(\"ail\",text=\"Ailments\")\n self.Donor_table.heading(\"lastdn\",text=\"Last Donation\")\n self.Donor_table.heading(\"address\",text=\"Address\")\n #only show the ones with headings\n self.Donor_table[\"show\"]=\"headings\"\n #setting the column\n self.Donor_table.column(\"id\",width=45)\n self.Donor_table.column(\"name\",width=100)\n self.Donor_table.column(\"gender\",width=60)\n self.Donor_table.column(\"bg\",width=75)\n self.Donor_table.column(\"num\",width=75)\n self.Donor_table.column(\"email\",width=130)\n self.Donor_table.column(\"dob\",width=73)\n self.Donor_table.column(\"ail\",width=85)\n self.Donor_table.column(\"lastdn\",width=80)\n self.Donor_table.column(\"address\",width=130)\n #filled the table and expanded it for it cover the whole table\n self.Donor_table.pack(fill=BOTH,expand=1)\n #button event\n self.Donor_table.bind(\"<ButtonRelease-1>\",self.get_cursor)\n #to show the table from the database\n self.fetch_data()\n \n def add_donors(self):\n #connection with database #database name=bdms\n con=pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdms\")\n #cursor function is used to execute queries\n cur=con.cursor()\n #sql queries, Table name=donors, Used a tuple to store into variables, get() for accessing\n cur.execute(\"insert into donors values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\",(self.id_var.get(),\n self.name_var.get(),\n self.gender_var.get(),\n self.bg_var.get(),\n self.num_var.get(),\n self.email_var.get(),\n self.dob_var.get(),\n self.ail_var.get(),\n self.lastdn_var.get(),\n self.address_var.get(),\n ))\n #get('1.0',END) will show the first and the last line.....in the middle. (we are not using it btw)\n con.commit()\n #to show the table after inserting into the database \n self.fetch_data()\n #clears the manage donors tab\n self.clear()\n con.close()\n \n def fetch_data(self):\n con=pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdms\")\n cur=con.cursor()\n cur.execute(\"select * from donors\")\n #save all data into a variabble that will be fetched\n rows=cur.fetchall()\n #delete empty rows and their children\n if len(rows)!=0:\n self.Donor_table.delete(*self.Donor_table.get_children())\n for row in rows:\n self.Donor_table.insert('',END,values=row) #passing the values\n con.commit()\n con.close()\n \n def clear(self): \n #will show empty values\n self.id_var.set(\"\"),\n self.name_var.set(\"\"),\n self.gender_var.set(\"\"),\n self.bg_var.set(\"\"),\n self.num_var.set(\"\"),\n self.email_var.set(\"\"),\n self.dob_var.set(\"\"),\n self.ail_var.set(\"\"),\n self.lastdn_var.set(\"\"),\n self.address_var.set(\"\")\n \n def get_cursor(self,ev):\n cursor_row=self.Donor_table.focus() #focus brings up the row selected by the cursor\n contents=self.Donor_table.item(cursor_row) #brings selected the data into the function\n row=contents['values'] #fetches the values\n \n #saved into a list and will show in the management tab\n self.id_var.set(row[0])\n self.name_var.set(row[1]),\n self.gender_var.set(row[2]),\n self.bg_var.set(row[3]),\n #concatenation\n self.num_var.set(\"0\"+str(row[4])),\n self.email_var.set(row[5]),\n self.dob_var.set(row[6]),\n self.ail_var.set(row[7]),\n self.lastdn_var.set(row[8]),\n self.address_var.set(row[9])\n \n def update_data(self):\n con=pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdms\")\n cur=con.cursor()\n #name must be same as the database\n cur.execute(\"update donors set name=%s,gender=%s,blood_group=%s,number=%s,email=%s,dob=%s,ailment=%s,last_donation=%s,address=%s where id=%s\",(\n self.name_var.get(),\n self.gender_var.get(),\n self.bg_var.get(),\n self.num_var.get(),\n self.email_var.get(),\n self.dob_var.get(),\n self.ail_var.get(),\n self.lastdn_var.get(),\n self.address_var.get(),\n self.id_var.get()\n ))\n con.commit()\n self.fetch_data()\n self.clear()\n con.close()\n \n def delete_data(self):\n con=pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdms\")\n cur=con.cursor()\n cur.execute(\"delete from donors where id=%s\",self.id_var.get())\n con.commit()\n con.close()\n self.fetch_data()\n self.clear()\n \n def search_data(self):\n con=pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"bdms\")\n cur=con.cursor()\n #cur.execute(\"select * from donors where \" +str(self.search_by.get())+\" LIKE '\"+str(self.search_txt.get())+\"%'\")\n #cur.execute(\"select * from donors where \" +str(self.search_by.get())+\" LIKE '%\"+str(self.search_txt.get())+\"%'\") \n if str(self.search_by.get())==\"Blood_Group\":\n cur.execute(\"select * from donors where \" +str(self.search_by.get())+\" LIKE '\"+str(self.search_txt.get())+\"%'\")\n else:\n cur.execute(\"select * from donors where \" +str(self.search_by.get())+\" LIKE '%\"+str(self.search_txt.get())+\"%'\")\n\n rows=cur.fetchall()\n if len(rows)!=0:\n self.Donor_table.delete(*self.Donor_table.get_children())\n for row in rows:\n self.Donor_table.insert('',END,values=row)\n con.commit()\n con.close() \n \nroot=Tk()\nob=Donor(root)\n#just remove the comment and change the filepath of the image file on your pc\n#root.iconbitmap('D:/Blood-Bank-Management-System/blood_drop_no_shadow_icon-icons.com_76229.ico')\nroot.mainloop()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
]
] |
4a84aea7bca82857162c3b8c3698a30b7309b8b4
| 9,927 |
ipynb
|
Jupyter Notebook
|
Data Compression Methods/Huffman Code/Huffman_code.ipynb
|
ElizaLo/Practice-Python
|
81cc82b4fbe68c13647f18ea659c7ef1025ec951
|
[
"MIT"
] | 5 |
2020-07-20T10:57:28.000Z
|
2021-12-09T01:54:59.000Z
|
Data Compression Methods/Huffman Code/Huffman_code.ipynb
|
ElizaLo/Practice
|
81cc82b4fbe68c13647f18ea659c7ef1025ec951
|
[
"MIT"
] | 1 |
2020-10-02T15:26:57.000Z
|
2020-10-02T15:26:57.000Z
|
Data Compression Methods/Huffman Code/Huffman_code.ipynb
|
ElizaLo/Practice
|
81cc82b4fbe68c13647f18ea659c7ef1025ec951
|
[
"MIT"
] | 5 |
2020-06-06T14:16:01.000Z
|
2021-01-27T17:38:32.000Z
| 27.575 | 278 | 0.425103 |
[
[
[
"<a href=\"https://colab.research.google.com/github/ElizaLo/Practice-Python/blob/master/Data%20Compression%20Methods/Huffman%20Code/Huffman_code.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Huffman Coding",
"_____no_output_____"
],
[
"## **Solution**",
"_____no_output_____"
]
],
[
[
"import heapq\nfrom collections import Counter, namedtuple",
"_____no_output_____"
],
[
"class Node(namedtuple(\"Node\", [\"left\", \"right\"])):\n def walk(self, code, acc): # code - префикс кода, который мы накопили спускаясь от корня к узлу/литу\n self.left.walk(code, acc + \"0\")\n self.right.walk(code, acc + \"1\")",
"_____no_output_____"
],
[
"class Leaf(namedtuple(\"Leaf\", [\"char\"])):\n def walk(self, code, acc):\n code[self.char] = acc or \"0\"",
"_____no_output_____"
]
],
[
[
" **Encoding**",
"_____no_output_____"
]
],
[
[
"def huffman_encode(s):\n h = []\n for ch, freq in Counter(s).items():\n h.append((freq, len(h), Leaf(ch)))\n \n # h = [(freq, Leaf(ch)) for ch, freq in Counter(s).items()]\n heapq.heapify(h)\n \n count = len(h)\n while len(h) > 1: # пока в очереди есть элем\n freq1, _count1, left = heapq.heappop(h) # достаём элем с минимальной частотой\n freq2, _count2, right = heapq.heappop(h)\n heapq.heappush(h, (freq1 + freq2, count, Node(left, right)))\n count += 1\n \n code = {} \n if h: \n [(_freq, _count, root)] = h # корень дерева\n root.walk(code,\"\")\n \n return code",
"_____no_output_____"
]
],
[
[
"**Decoding**\n\n",
"_____no_output_____"
]
],
[
[
"def huffman_decode(encoded, code):\n sx = []\n enc_ch = \"\"\n for ch in encoded:\n enc_ch += ch\n for dec_ch in code:\n if code.get(dec_ch) == enc_ch:\n sx.append(dec_ch)\n enc_ch = \"\"\n break\n return \"\".join(sx)",
"_____no_output_____"
],
[
"def main():\n s = input()\n code = huffman_encode(s)\n \"\"\"\n закодированная версия строки s \n отображает каждый симвом в соответствующий ему код\n \"\"\"\n encoded = \"\".join(code[ch] for ch in s)\n \"\"\"\n len(code) - количество различных символов в строке s, словарь\n len(encoded) - длина закодированной строки\n \"\"\"\n print(\"\\nDictionary =\", len(code), \"\\nLength of string =\", len(encoded))\n # описываем как мы кодируем каждый символ\n print(\"\\n\")\n for ch in sorted(code):\n print(\"{}: {}\".format(ch, code[ch]))\n print(\"\\nEncoded string: \",encoded) # закодированная строка\n print(\"\\nDecoded string:\",huffman_decode(encoded, code))\n \n \nif __name__ == \"__main__\":\n main()",
"I saw you in restaurant yesterday\n\nDictionary = 14 \nLength of string = 121\n\n\n : 101\nI: 10010\na: 011\nd: 0100\ne: 000\ni: 11001\nn: 1000\no: 11000\nr: 1111\ns: 1101\nt: 001\nu: 0101\nw: 10011\ny: 1110\n\nEncoded string: 1001010111010111001110111101100001011011100110001011111000110100101101011111011100000110111100001101001000111101000111110\n\nDecoded string: I saw you in restaurant yesterday\n"
]
],
[
[
"## Testing ",
"_____no_output_____"
]
],
[
[
"import random\nimport string",
"_____no_output_____"
],
[
"def test(n_iter=100):\n for i in range(n_iter):\n length = random.randint(0, 32)\n s = \"\".join(random.choice(string.ascii_letters) for _ in range(length))\n code = huffman_encode(s)\n encoded = \"\".join(code[ch] for ch in s)\n assert huffman_decode(encoded, code) == s",
"_____no_output_____"
]
],
[
[
"## Simple code",
"_____no_output_____"
]
],
[
[
"def huffman_encode(s):\n return {ch: ch for ch in s} # кодирует сам в себя (отображает каждый символ в соответствующий ему код)",
"_____no_output_____"
],
[
"def main():\n s = input()\n code = huffman_encode(s)\n # закодированная версия строки s \n # отображает каждый симвом в соответствующий ему код\n encoded = \"\".join(code[ch] for ch in s)\n # len(code) - количество различных символов в строке s, словарь\n # len(encoded) - длина закодированной строки\n print(\"\\nDictionary =\", len(code), \"\\nLength of string =\", len(encoded))\n # описываем как мы кодируем каждый символ\n print(\"\\n\")\n for ch in sorted(code):\n print(\"{}: {}\".format(ch, code[ch]))\n print(\"\\n\", encoded) # закодированная строка\n \n \nif __name__ == \"__main__\":\n main()",
"abcdabcd\n\nDictionary = 4 \nLength of string = 8\n\n\na: a\nb: b\nc: c\nd: d\nabcdabcd\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a84b3e2835a15b9e0c911a6062e0c59ab6fe0cc
| 72,877 |
ipynb
|
Jupyter Notebook
|
.ipynb_checkpoints/Basic Plot-checkpoint.ipynb
|
rifathcsedu/Montana_Crash_Data
|
f6e9d0283f2ac9d600fe959720281ea2658f9dce
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/Basic Plot-checkpoint.ipynb
|
rifathcsedu/Montana_Crash_Data
|
f6e9d0283f2ac9d600fe959720281ea2658f9dce
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/Basic Plot-checkpoint.ipynb
|
rifathcsedu/Montana_Crash_Data
|
f6e9d0283f2ac9d600fe959720281ea2658f9dce
|
[
"MIT"
] | null | null | null | 441.678788 | 68,216 | 0.937401 |
[
[
[
"\nimport csv\nimport numpy as np\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LinearSegmentedColormap\n\n\ndef draw_map_background(m, ax):\n ax.set_facecolor('#729FCF')\n m.fillcontinents(color='#FFEFDB', ax=ax, zorder=0)\n m.drawcounties(ax=ax)\n m.drawstates(ax=ax)\n m.drawcountries(ax=ax)\n m.drawcoastlines(ax=ax)\nKM = 1000.\nclat = 39.3\nclon = -94.7333\nwid = 5500 * KM\nhgt = 3500 * KM\n\n#m= Basemap(llcrnrlon=-129.098907,llcrnrlat=22.700324,urcrnrlon=-65.553985,urcrnrlat=52.177390,\n# resolution='i', projection='lcc', lat_0 = 37.697948, lon_0 = -97.314835)\n#m = Basemap(width=wid, height=hgt, rsphere=(6378137.00,6356752.3142),\n# resolution='i', area_thresh=2500., projection='lcc',\n# lon_0=-110.428794,lat_0=46.998846)\n#m = Basemap(projection='lcc',lon_0=-110.428794,lat_0=46.998846,resolution='i',\\\n# llcrnrx=-800*600,llcrnry=-800*400,\n# urcrnrx=+600*900,urcrnry=+450*600)\n\n\nlats, lons = [], []\ncounty='Park_notownsKDE_notowns'\nwith open('/Users/usmp/Google Drive/Saidur_Matt_Term_Project/Data_Without_Towns/'+county+'_alldata.csv') as f:\n reader = csv.reader(f)\n next(reader) # Ignore the header row.\n for row in reader:\n lat = float(row[15])\n lon = float(row[16])\n # filter lat,lons to (approximate) map view:\n lats.append( lat )\n lons.append( lon )\n'''\n#For Gallatin\nmin_lat = 44.06338\nmax_lat = 47.200085\nmin_lon = -111.891\nmax_lon = -109.5396\n#For Montana\nmin_lat = 44.36338-1\nmax_lat = 49.00085+1\nmin_lon = -116.0491-1\nmax_lon = -104.0396+1\n#For Park\nmin_lat = 44.96338\nmax_lat = 46.800085\nmin_lon = -111.291\nmax_lon = -109.8396\n#For Madison\nmin_lat = 44.06338\nmax_lat = 46.200085\nmin_lon = -112.891\nmax_lon = -110.9396\n'''\nmin_lat = 44.96338\nmax_lat = 46.800085\nmin_lon = -111.291\nmax_lon = -109.8396\nm = Basemap(lon_0=-111.428794,lat_0=44.998846,llcrnrlat = min_lat, urcrnrlat = max_lat, llcrnrlon = min_lon, urcrnrlon=max_lon, resolution='l', fix_aspect = False)\nfig = plt.figure()\nax = fig.add_subplot(111)\n#print(lats)\n#print (lons)\n# define custom colormap, white -> nicered, #E6072A = RGB(0.9,0.03,0.16)\n#plt.clim([0,100])\n# translucent blue scatter plot of epicenters above histogram: \nx,y = m(lons, lats)\nm.plot(x, y, 'o', markersize=5,zorder=6, markerfacecolor='Red',markeredgecolor=\"none\", alpha=0.33)\ndraw_map_background(m, ax)\nplt.title(county+' Basic Plot')\nplt.gcf().set_size_inches(15,15)\n\nplt.show()\n#plt.savefig('/Users/usmp/Google Drive/Saidur_Matt_Term_Project/'+county+'CrashData(Basic).jpg')\n#plt.close()\n\nprint(\"Done\")\n",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code"
]
] |
4a84bf409a48c91641de511ba8b1a4e91d959676
| 19,375 |
ipynb
|
Jupyter Notebook
|
notebooks/tacotron.ipynb
|
aidiary/tacotron-pytorch
|
8ea9b1bb61bf753a64ff611b441326ea8c001d20
|
[
"MIT"
] | null | null | null |
notebooks/tacotron.ipynb
|
aidiary/tacotron-pytorch
|
8ea9b1bb61bf753a64ff611b441326ea8c001d20
|
[
"MIT"
] | 2 |
2019-10-04T05:54:20.000Z
|
2019-10-07T01:19:21.000Z
|
notebooks/tacotron.ipynb
|
aidiary/tacotron-pytorch
|
8ea9b1bb61bf753a64ff611b441326ea8c001d20
|
[
"MIT"
] | null | null | null | 40.197095 | 109 | 0.475252 |
[
[
[
"%reload_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"import sys\nsys.path.append('..')",
"_____no_output_____"
],
[
"import torch\nfrom model import Tacotron",
"_____no_output_____"
],
[
"tacotron = Tacotron(num_chars=71, r=7, linear_dim=1025, mel_dim=80)",
"_____no_output_____"
],
[
"tacotron",
"_____no_output_____"
],
[
"characters = torch.ones([32, 71], dtype=torch.long)\ntext_lengths = torch.ones(32, dtype=torch.long)\nmel_specs = torch.rand(32, 231, 80)",
"_____no_output_____"
],
[
"mel_outputs, linear_outputs, alignments, stop_tokens = tacotron(characters, text_lengths, mel_specs)",
"_____no_output_____"
],
[
"mel_outputs.shape",
"_____no_output_____"
],
[
"linear_outputs.shape",
"_____no_output_____"
],
[
"alignments.shape",
"_____no_output_____"
],
[
"stop_tokens.shape",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a84bfd2569503c17f49f0930a78f1adf6d45dcc
| 177,660 |
ipynb
|
Jupyter Notebook
|
s_analysis/stock price prediction and analysis.ipynb
|
Anova07/prediction
|
19316f7ad2c92eefafdd5e6223a15c6d88599ac9
|
[
"MIT"
] | 3 |
2021-03-28T06:52:56.000Z
|
2022-01-11T20:17:08.000Z
|
s_analysis/stock price prediction and analysis.ipynb
|
Anova07/prediction
|
19316f7ad2c92eefafdd5e6223a15c6d88599ac9
|
[
"MIT"
] | null | null | null |
s_analysis/stock price prediction and analysis.ipynb
|
Anova07/prediction
|
19316f7ad2c92eefafdd5e6223a15c6d88599ac9
|
[
"MIT"
] | null | null | null | 136.661538 | 55,772 | 0.823933 |
[
[
[
"# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport datetime",
"_____no_output_____"
],
[
"dataset = pd.read_csv(r'C:\\Users\\ANOVA AJAY PANDEY\\Desktop\\SEM4\\CSE 3021 SIN\\proj\\stock analysis\\Google_Stock_Price_Train.csv',index_col=\"Date\",parse_dates=True)",
"_____no_output_____"
],
[
"dataset = pd.read_csv(r'C:\\Users\\ANOVA AJAY PANDEY\\Desktop\\SEM4\\CSE 3021 SIN\\proj\\stock analysis\\Google_Stock_Price_Train.csv',index_col=\"Date\",parse_dates=True)",
"_____no_output_____"
],
[
"dataset.tail()",
"_____no_output_____"
],
[
"dataset.isna().any()",
"_____no_output_____"
],
[
"dataset.info()",
"<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 1258 entries, 2012-01-03 to 2019-01-01\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Open 1258 non-null float64\n 1 High 1258 non-null float64\n 2 Low 1258 non-null float64\n 3 Close 1258 non-null object \n 4 Volume 1258 non-null object \ndtypes: float64(3), object(2)\nmemory usage: 59.0+ KB\n"
],
[
"dataset['Open'].plot(figsize=(16,6))",
"_____no_output_____"
],
[
"# convert column \"a\" of a DataFrame\ndataset[\"Close\"] = dataset[\"Close\"].str.replace(',', '').astype(float)",
"_____no_output_____"
],
[
"\ndataset[\"Volume\"] = dataset[\"Volume\"].str.replace(',', '').astype(float)",
"_____no_output_____"
],
[
"# 7 day rolling mean\ndataset.rolling(7).mean().tail(20)",
"_____no_output_____"
],
[
"dataset['Open'].plot(figsize=(16,6))\ndataset.rolling(window=30).mean()['Close'].plot()",
"_____no_output_____"
],
[
"dataset['Close: 30 Day Mean'] = dataset['Close'].rolling(window=30).mean()\ndataset[['Close','Close: 30 Day Mean']].plot(figsize=(16,6))",
"_____no_output_____"
],
[
"# Optional specify a minimum number of periods\ndataset['Close'].expanding(min_periods=1).mean().plot(figsize=(16,6))",
"_____no_output_____"
],
[
"training_set=dataset['Open']\ntraining_set=pd.DataFrame(training_set)",
"_____no_output_____"
],
[
"# Feature Scaling\nfrom sklearn.preprocessing import MinMaxScaler\nsc = MinMaxScaler(feature_range = (0, 1))\ntraining_set_scaled = sc.fit_transform(training_set)",
"_____no_output_____"
],
[
"# Creating a data structure with 60 timesteps and 1 output\nX_train = []\ny_train = []\nfor i in range(60, 1258):\n X_train.append(training_set_scaled[i-60:i, 0])\n y_train.append(training_set_scaled[i, 0])\nX_train, y_train = np.array(X_train), np.array(y_train)\n\n# Reshaping\nX_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))",
"_____no_output_____"
],
[
"from tensorflow.keras.models import Sequential",
"_____no_output_____"
],
[
"# Part 2 - Building the RNN\n\n# Importing the Keras libraries and packages\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Dropout",
"_____no_output_____"
],
[
"# Initialising the RNN\nregressor = Sequential()",
"_____no_output_____"
],
[
"# Adding the first LSTM layer and some Dropout regularisation\nregressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1)))\nregressor.add(Dropout(0.2))",
"_____no_output_____"
],
[
"# Adding a second LSTM layer and some Dropout regularisation\nregressor.add(LSTM(units = 50, return_sequences = True))\nregressor.add(Dropout(0.2))",
"_____no_output_____"
],
[
"# Adding a third LSTM layer and some Dropout regularisation\nregressor.add(LSTM(units = 50, return_sequences = True))\nregressor.add(Dropout(0.2))\nregressor.add(Dropout(0.2))",
"_____no_output_____"
],
[
"# Adding a fourth LSTM layer and some Dropout regularisation\nregressor.add(LSTM(units = 50))\nregressor.add(Dropout(0.2))",
"_____no_output_____"
],
[
"# Adding the output layer\nregressor.add(Dense(units = 1))",
"_____no_output_____"
],
[
"# Compiling the RNN\nregressor.compile(optimizer = 'adam', loss = 'mean_squared_error')",
"_____no_output_____"
],
[
"# Fitting the RNN to the Training set\nregressor.fit(X_train, y_train, epochs = 100, batch_size = 32)",
"Epoch 1/100\n1198/1198 [==============================] - 8s 7ms/step - loss: 0.0366\nEpoch 2/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0060\nEpoch 3/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0055\nEpoch 4/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0061\nEpoch 5/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0053\nEpoch 6/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0060\nEpoch 7/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0051\nEpoch 8/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0052\nEpoch 9/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0053\nEpoch 10/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0039\nEpoch 11/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0043\nEpoch 12/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0046\nEpoch 13/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0040\nEpoch 14/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0040\nEpoch 15/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0038\nEpoch 16/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0039\nEpoch 17/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0039\nEpoch 18/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0036\nEpoch 19/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0035\nEpoch 20/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0033\nEpoch 21/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0034\nEpoch 22/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0034\nEpoch 23/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0033\nEpoch 24/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0034\nEpoch 25/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0033\nEpoch 26/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0035\nEpoch 27/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0033\nEpoch 28/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0033\nEpoch 29/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0029\nEpoch 30/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0029\nEpoch 31/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0036\nEpoch 32/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0030\nEpoch 33/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0030\nEpoch 34/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0030\nEpoch 35/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0030\nEpoch 36/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0025\nEpoch 37/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0028\nEpoch 38/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0026\nEpoch 39/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0024\nEpoch 40/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0027\nEpoch 41/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0025\nEpoch 42/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0029\nEpoch 43/100\n1198/1198 [==============================] - 7s 6ms/step - loss: 0.0026\nEpoch 44/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0024\nEpoch 45/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0031\nEpoch 46/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0026\nEpoch 47/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0022\nEpoch 48/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0024\nEpoch 49/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0025\nEpoch 50/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0024\nEpoch 51/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0022\nEpoch 52/100\n1198/1198 [==============================] - 7s 5ms/step - loss: 0.0021\nEpoch 53/100\n1198/1198 [==============================] - 7s 6ms/step - loss: 0.0024\nEpoch 54/100\n1198/1198 [==============================] - 7s 6ms/step - loss: 0.0023\nEpoch 55/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0021\nEpoch 56/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0020\nEpoch 57/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0022\nEpoch 58/100\n1198/1198 [==============================] - 7s 5ms/step - loss: 0.0020\nEpoch 59/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0020\nEpoch 60/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0020\nEpoch 61/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0021\nEpoch 62/100\n1198/1198 [==============================] - 7s 5ms/step - loss: 0.0019\nEpoch 63/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0019\nEpoch 64/100\n1198/1198 [==============================] - 7s 6ms/step - loss: 0.0019\nEpoch 65/100\n1198/1198 [==============================] - 7s 6ms/step - loss: 0.0020\nEpoch 66/100\n1198/1198 [==============================] - 7s 5ms/step - loss: 0.0017A: 2s -\nEpoch 67/100\n1198/1198 [==============================] - 7s 6ms/step - loss: 0.0019\nEpoch 68/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0018\nEpoch 69/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0018\nEpoch 70/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0018\nEpoch 71/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0017\nEpoch 72/100\n1198/1198 [==============================] - 7s 6ms/step - loss: 0.0017\nEpoch 73/100\n1198/1198 [==============================] - 7s 6ms/step - loss: 0.0020\nEpoch 74/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0017\nEpoch 75/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0018\nEpoch 76/100\n1198/1198 [==============================] - 7s 6ms/step - loss: 0.0018\nEpoch 77/100\n1198/1198 [==============================] - 7s 6ms/step - loss: 0.0015\nEpoch 78/100\n1198/1198 [==============================] - 7s 6ms/step - loss: 0.0017\nEpoch 79/100\n1198/1198 [==============================] - 8s 7ms/step - loss: 0.0016\nEpoch 80/100\n1198/1198 [==============================] - 8s 7ms/step - loss: 0.0015\nEpoch 81/100\n1198/1198 [==============================] - 8s 7ms/step - loss: 0.0017\nEpoch 82/100\n1198/1198 [==============================] - 7s 6ms/step - loss: 0.0017A: 3\nEpoch 83/100\n1198/1198 [==============================] - 7s 6ms/step - loss: 0.0018\nEpoch 84/100\n1198/1198 [==============================] - 7s 6ms/step - loss: 0.0015\nEpoch 85/100\n1198/1198 [==============================] - 7s 6ms/step - loss: 0.0015\nEpoch 86/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0018\nEpoch 87/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0017\nEpoch 88/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0016\nEpoch 89/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0015\nEpoch 90/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0014\nEpoch 91/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0015\nEpoch 92/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0015\nEpoch 93/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0015\nEpoch 94/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0014\nEpoch 95/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0015\nEpoch 96/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0013\nEpoch 97/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0016\nEpoch 98/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0015\nEpoch 99/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0014\nEpoch 100/100\n1198/1198 [==============================] - 6s 5ms/step - loss: 0.0014\n"
],
[
"# Part 3 - Making the predictions and visualising the results\n\n# Getting the real stock price of 2017\ndataset_test = pd.read_csv(r'C:\\Users\\ANOVA AJAY PANDEY\\Desktop\\SEM4\\CSE 3021 SIN\\proj\\stock analysis\\Google_Stock_Price_Test.csv',index_col=\"Date\",parse_dates=True)",
"_____no_output_____"
],
[
"real_stock_price = dataset_test.iloc[:, 1:2].values",
"_____no_output_____"
],
[
"dataset_test.head()",
"_____no_output_____"
],
[
"dataset_test.info()",
"_____no_output_____"
],
[
"dataset_test[\"Volume\"] = dataset_test[\"Volume\"].str.replace(',', '').astype(float)",
"_____no_output_____"
],
[
"test_set=dataset_test['Open']\ntest_set=pd.DataFrame(test_set)",
"_____no_output_____"
],
[
"test_set.info()",
"_____no_output_____"
],
[
"# Getting the predicted stock price of 2017\ndataset_total = pd.concat((dataset['Open'], dataset_test['Open']), axis = 0)\ninputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values\ninputs = inputs.reshape(-1,1)\ninputs = sc.transform(inputs)\nX_test = []\nfor i in range(60, 80):\n X_test.append(inputs[i-60:i, 0])\nX_test = np.array(X_test)\nX_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))\npredicted_stock_price = regressor.predict(X_test)\npredicted_stock_price = sc.inverse_transform(predicted_stock_price)",
"_____no_output_____"
],
[
"predicted_stock_price=pd.DataFrame(predicted_stock_price)\npredicted_stock_price.info()",
"_____no_output_____"
],
[
"# Visualising the results\nplt.plot(real_stock_price, color = 'red', label = 'Real Google Stock Price')\nplt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Google Stock Price')\nplt.title('Google Stock Price Prediction')\nplt.xlabel('Time')\nplt.ylabel('Google Stock Price')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a84c355684775f8d8abe9624917ad61b3a2e603
| 2,215 |
ipynb
|
Jupyter Notebook
|
tutorials/InfraPyTutorial.ipynb
|
LANL-Seismoacoustics/infrapy
|
132c1f5f9c074eca7300ab35d23109d8423a9912
|
[
"MIT"
] | 23 |
2020-03-17T18:43:19.000Z
|
2022-03-22T17:47:14.000Z
|
tutorials/InfraPyTutorial.ipynb
|
LANL-Seismoacoustics/infrapy
|
132c1f5f9c074eca7300ab35d23109d8423a9912
|
[
"MIT"
] | 10 |
2020-04-28T01:09:35.000Z
|
2022-02-28T06:06:20.000Z
|
tutorials/InfraPyTutorial.ipynb
|
LANL-Seismoacoustics/infrapy
|
132c1f5f9c074eca7300ab35d23109d8423a9912
|
[
"MIT"
] | 2 |
2021-03-08T20:29:27.000Z
|
2021-03-28T18:03:39.000Z
| 20.136364 | 180 | 0.554853 |
[
[
[
"# Getting Started with Infrapy",
"_____no_output_____"
],
[
"This tutorial assumes you have followed directions for installation of Infrapy outlined in the manual, and have tested to make sure your installation works.",
"_____no_output_____"
],
[
"The tutorial is broken up into stand-alone sections, each linked within this master tutorial notebook, that will walk a user through the basic processing workflows in Infrapy",
"_____no_output_____"
],
[
"## Author Information",
"_____no_output_____"
],
[
"This tutorial was authored by Fransiska Dannemann Dugick (fransiska at lanl dot gov) and was last updated 12/19",
"_____no_output_____"
],
[
"## Sections",
"_____no_output_____"
],
[
"### [Quick Start](QuickStart.ipynb)",
"_____no_output_____"
],
[
"### [Establishing a Database Connection (Pisces Tutorial)](DBConnect.ipynb)",
"_____no_output_____"
],
[
"### [FK Processing (CLI)](CLIFK.ipynb)",
"_____no_output_____"
],
[
"### [Detection Processing (CLI)](CLIFD.ipynb)",
"_____no_output_____"
],
[
"### [Association Processing (CLI)](CLIASSOC.ipynb)",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a84c4ef30d0a34075bbd101a91c0192c02d9aa4
| 54,379 |
ipynb
|
Jupyter Notebook
|
nlu/colab/healthcare/de_identification/DeIdentification_model_overview.ipynb
|
iamvarol/spark-nlp-workshop
|
73a9064bd47d4dc0692f0297748eb43cd094aabd
|
[
"Apache-2.0"
] | null | null | null |
nlu/colab/healthcare/de_identification/DeIdentification_model_overview.ipynb
|
iamvarol/spark-nlp-workshop
|
73a9064bd47d4dc0692f0297748eb43cd094aabd
|
[
"Apache-2.0"
] | null | null | null |
nlu/colab/healthcare/de_identification/DeIdentification_model_overview.ipynb
|
iamvarol/spark-nlp-workshop
|
73a9064bd47d4dc0692f0297748eb43cd094aabd
|
[
"Apache-2.0"
] | null | null | null | 54,379 | 54,379 | 0.635448 |
[
[
[
"\n\n[](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/healthcare/de_identification/DeIdentification_model_overview.ipynb)\n\nAll the models avaiable are :\n\n| Language | nlu.load() reference | Spark NLP Model reference |\n| -------- | ------------------------------------------------------------ | ------------------------------------------------------------ |\n| English | med_ner.deid | nerdl_deid |\n| English | [en.de_identify](https://nlp.johnsnowlabs.com/2019/06/04/deidentify_rb_en.html) | [deidentify_rb](https://nlp.johnsnowlabs.com/2019/06/04/deidentify_rb_en.html) |\n| English | de_identify.rules | deid_rules |\n| English | [de_identify.clinical](https://nlp.johnsnowlabs.com/2021/01/29/deidentify_enriched_clinical_en.html) | [deidentify_enriched_clinical](https://nlp.johnsnowlabs.com/2021/01/29/deidentify_enriched_clinical_en.html) |\n| English | [de_identify.large](https://nlp.johnsnowlabs.com/2020/08/04/deidentify_large_en.html) | [deidentify_large](https://nlp.johnsnowlabs.com/2020/08/04/deidentify_large_en.html) |\n| English | [de_identify.rb](https://nlp.johnsnowlabs.com/2019/06/04/deidentify_rb_en.html) | [deidentify_rb](https://nlp.johnsnowlabs.com/2019/06/04/deidentify_rb_en.html) |\n| English | de_identify.rb_no_regex | deidentify_rb_no_regex |\n| English | [resolve_chunk.athena_conditions](https://nlp.johnsnowlabs.com/2020/09/16/chunkresolve_athena_conditions_healthcare_en.html) | [chunkresolve_athena_conditions_healthcare](https://nlp.johnsnowlabs.com/2020/09/16/chunkresolve_athena_conditions_healthcare_en.html) |\n| English | [resolve_chunk.cpt_clinical](https://nlp.johnsnowlabs.com/2021/04/02/chunkresolve_cpt_clinical_en.html) | [chunkresolve_cpt_clinical](https://nlp.johnsnowlabs.com/2021/04/02/chunkresolve_cpt_clinical_en.html) |\n| English | [resolve_chunk.icd10cm.clinical](https://nlp.johnsnowlabs.com/2021/04/02/chunkresolve_icd10cm_clinical_en.html) | [chunkresolve_icd10cm_clinical](https://nlp.johnsnowlabs.com/2021/04/02/chunkresolve_icd10cm_clinical_en.html) |\n| English | [resolve_chunk.icd10cm.diseases_clinical](https://nlp.johnsnowlabs.com/2021/04/02/chunkresolve_icd10cm_diseases_clinical_en.html) | [chunkresolve_icd10cm_diseases_clinical](https://nlp.johnsnowlabs.com/2021/04/02/chunkresolve_icd10cm_diseases_clinical_en.html) |\n| English | resolve_chunk.icd10cm.hcc_clinical | chunkresolve_icd10cm_hcc_clinical |\n| English | resolve_chunk.icd10cm.hcc_healthcare | chunkresolve_icd10cm_hcc_healthcare |\n| English | [resolve_chunk.icd10cm.injuries](https://nlp.johnsnowlabs.com/2021/04/02/chunkresolve_icd10cm_injuries_clinical_en.html) | [chunkresolve_icd10cm_injuries_clinical](https://nlp.johnsnowlabs.com/2021/04/02/chunkresolve_icd10cm_injuries_clinical_en.html) |\n| English | [resolve_chunk.icd10cm.musculoskeletal](https://nlp.johnsnowlabs.com/2021/04/02/chunkresolve_icd10cm_musculoskeletal_clinical_en.html) | [chunkresolve_icd10cm_musculoskeletal_clinical](https://nlp.johnsnowlabs.com/2021/04/02/chunkresolve_icd10cm_musculoskeletal_clinical_en.html) |\n| English | [resolve_chunk.icd10cm.neoplasms](https://nlp.johnsnowlabs.com/2021/04/02/chunkresolve_icd10cm_neoplasms_clinical_en.html) | [chunkresolve_icd10cm_neoplasms_clinical](https://nlp.johnsnowlabs.com/2021/04/02/chunkresolve_icd10cm_neoplasms_clinical_en.html) |\n| English | [resolve_chunk.icd10cm.poison](https://nlp.johnsnowlabs.com/2020/04/28/chunkresolve_icd10cm_poison_ext_clinical_en.html) | [chunkresolve_icd10cm_poison_ext_clinical](https://nlp.johnsnowlabs.com/2020/04/28/chunkresolve_icd10cm_poison_ext_clinical_en.html) |\n| English | [resolve_chunk.icd10cm.puerile](https://nlp.johnsnowlabs.com/2020/04/28/chunkresolve_icd10cm_puerile_clinical_en.html) | [chunkresolve_icd10cm_puerile_clinical](https://nlp.johnsnowlabs.com/2020/04/28/chunkresolve_icd10cm_puerile_clinical_en.html) |\n| English | resolve_chunk.icd10pcs.clinical | chunkresolve_icd10pcs_clinical |\n| English | [resolve_chunk.icdo.clinical](https://nlp.johnsnowlabs.com/2021/04/02/chunkresolve_icd10pcs_clinical_en.html) | [chunkresolve_icdo_clinical](https://nlp.johnsnowlabs.com/2021/04/02/chunkresolve_icd10pcs_clinical_en.html) |\n| English | [resolve_chunk.loinc](https://nlp.johnsnowlabs.com/2021/04/02/chunkresolve_loinc_clinical_en.html) | [chunkresolve_loinc_clinical](https://nlp.johnsnowlabs.com/2021/04/02/chunkresolve_loinc_clinical_en.html) |\n| English | [resolve_chunk.rxnorm.cd](https://nlp.johnsnowlabs.com/2020/07/27/chunkresolve_rxnorm_cd_clinical_en.html) | [chunkresolve_rxnorm_cd_clinical](https://nlp.johnsnowlabs.com/2020/07/27/chunkresolve_rxnorm_cd_clinical_en.html) |\n| English | resolve_chunk.rxnorm.in | chunkresolve_rxnorm_in_clinical |\n| English | resolve_chunk.rxnorm.in_healthcare | chunkresolve_rxnorm_in_healthcare |\n| English | [resolve_chunk.rxnorm.sbd](https://nlp.johnsnowlabs.com/2020/07/27/chunkresolve_rxnorm_sbd_clinical_en.html) | [chunkresolve_rxnorm_sbd_clinical](https://nlp.johnsnowlabs.com/2020/07/27/chunkresolve_rxnorm_sbd_clinical_en.html) |\n| English | [resolve_chunk.rxnorm.scd](https://nlp.johnsnowlabs.com/2020/07/27/chunkresolve_rxnorm_scd_clinical_en.html) | [chunkresolve_rxnorm_scd_clinical](https://nlp.johnsnowlabs.com/2020/07/27/chunkresolve_rxnorm_scd_clinical_en.html) |\n| English | resolve_chunk.rxnorm.scdc | chunkresolve_rxnorm_scdc_clinical |\n| English | resolve_chunk.rxnorm.scdc_healthcare | chunkresolve_rxnorm_scdc_healthcare |\n| English | [resolve_chunk.rxnorm.xsmall.clinical](https://nlp.johnsnowlabs.com/2020/06/24/chunkresolve_rxnorm_xsmall_clinical_en.html) | [chunkresolve_rxnorm_xsmall_clinical](https://nlp.johnsnowlabs.com/2020/06/24/chunkresolve_rxnorm_xsmall_clinical_en.html) |\n| English | [resolve_chunk.snomed.findings](https://nlp.johnsnowlabs.com/2020/06/20/chunkresolve_snomed_findings_clinical_en.html) | [chunkresolve_snomed_findings_clinical](https://nlp.johnsnowlabs.com/2020/06/20/chunkresolve_snomed_findings_clinical_en.html) |\n| English | classify.icd10.clinical | classifier_icd10cm_hcc_clinical |\n| English | classify.icd10.healthcare | classifier_icd10cm_hcc_healthcare |\n| English | [classify.ade.biobert](https://nlp.johnsnowlabs.com/2021/01/21/classifierdl_ade_biobert_en.html) | [classifierdl_ade_biobert](https://nlp.johnsnowlabs.com/2021/01/21/classifierdl_ade_biobert_en.html) |\n| English | [classify.ade.clinical](https://nlp.johnsnowlabs.com/2021/01/21/classifierdl_ade_clinicalbert_en.html) | [classifierdl_ade_clinicalbert](https://nlp.johnsnowlabs.com/2021/01/21/classifierdl_ade_clinicalbert_en.html) |\n| English | [classify.ade.conversational](https://nlp.johnsnowlabs.com/2021/01/21/classifierdl_ade_conversational_biobert_en.html) | [classifierdl_ade_conversational_biobert](https://nlp.johnsnowlabs.com/2021/01/21/classifierdl_ade_conversational_biobert_en.html) |\n| English | [classify.gender.biobert](https://nlp.johnsnowlabs.com/2021/01/21/classifierdl_gender_biobert_en.html) | [classifierdl_gender_biobert](https://nlp.johnsnowlabs.com/2021/01/21/classifierdl_gender_biobert_en.html) |\n| English | [classify.gender.sbert](https://nlp.johnsnowlabs.com/2021/01/21/classifierdl_gender_sbert_en.html) | [classifierdl_gender_sbert](https://nlp.johnsnowlabs.com/2021/01/21/classifierdl_gender_sbert_en.html) |\n| English | classify.pico | classifierdl_pico_biobert |",
"_____no_output_____"
]
],
[
[
"# Install NLU\n!wget https://setup.johnsnowlabs.com/nlu/colab.sh | bash\n\n# Upload add your spark_nlp_for_healthcare.json",
"--2022-04-15 03:44:39-- https://setup.johnsnowlabs.com/nlu/colab.sh\nResolving setup.johnsnowlabs.com (setup.johnsnowlabs.com)... 51.158.130.125\nConnecting to setup.johnsnowlabs.com (setup.johnsnowlabs.com)|51.158.130.125|:443... connected.\nHTTP request sent, awaiting response... 302 Moved Temporarily\nLocation: https://raw.githubusercontent.com/JohnSnowLabs/nlu/master/scripts/colab_setup.sh [following]\n--2022-04-15 03:44:39-- https://raw.githubusercontent.com/JohnSnowLabs/nlu/master/scripts/colab_setup.sh\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.110.133, 185.199.108.133, 185.199.111.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.110.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 1665 (1.6K) [text/plain]\nSaving to: ‘STDOUT’\n\n- 100%[===================>] 1.63K --.-KB/s in 0s \n\n2022-04-15 03:44:39 (32.3 MB/s) - written to stdout [1665/1665]\n\nInstalling NLU 3.4.3rc2 with PySpark 3.0.3 and Spark NLP 3.4.2 for Google Colab ...\nGet:1 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/ InRelease [3,626 B]\nIgn:2 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 InRelease\nGet:3 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic InRelease [15.9 kB]\nGet:4 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB]\nHit:5 http://archive.ubuntu.com/ubuntu bionic InRelease\nIgn:6 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 InRelease\nGet:7 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB]\nGet:8 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Release [696 B]\nHit:9 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 Release\nGet:10 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Release.gpg [836 B]\nHit:11 http://ppa.launchpad.net/cran/libgit2/ubuntu bionic InRelease\nGet:12 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB]\nGet:13 http://ppa.launchpad.net/deadsnakes/ppa/ubuntu bionic InRelease [15.9 kB]\nHit:14 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu bionic InRelease\nGet:15 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic/main Sources [1,947 kB]\nGet:17 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Packages [953 kB]\nGet:18 http://security.ubuntu.com/ubuntu bionic-security/universe amd64 Packages [1,490 kB]\nGet:19 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic/main amd64 Packages [996 kB]\nGet:20 http://security.ubuntu.com/ubuntu bionic-security/main amd64 Packages [2,695 kB]\nGet:21 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [3,134 kB]\nGet:22 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [2,268 kB]\nGet:23 http://ppa.launchpad.net/deadsnakes/ppa/ubuntu bionic/main amd64 Packages [45.3 kB]\nFetched 13.8 MB in 3s (4,029 kB/s)\nReading package lists... Done\ntar: spark-3.0.2-bin-hadoop2.7.tgz: Cannot open: No such file or directory\ntar: Error is not recoverable: exiting now\n\u001b[K |████████████████████████████████| 209.1 MB 54 kB/s \n\u001b[K |████████████████████████████████| 142 kB 50.8 MB/s \n\u001b[K |████████████████████████████████| 505 kB 53.6 MB/s \n\u001b[K |████████████████████████████████| 198 kB 55.6 MB/s \n\u001b[?25h Building wheel for pyspark (setup.py) ... \u001b[?25l\u001b[?25hdone\nCollecting nlu_tmp==3.4.3rc10\n Downloading nlu_tmp-3.4.3rc10-py3-none-any.whl (510 kB)\n\u001b[K |████████████████████████████████| 510 kB 27.4 MB/s \n\u001b[?25hRequirement already satisfied: dataclasses in /usr/local/lib/python3.7/dist-packages (from nlu_tmp==3.4.3rc10) (0.6)\nRequirement already satisfied: pandas>=1.3.5 in /usr/local/lib/python3.7/dist-packages (from nlu_tmp==3.4.3rc10) (1.3.5)\nRequirement already satisfied: spark-nlp<3.5.0,>=3.4.2 in /usr/local/lib/python3.7/dist-packages (from nlu_tmp==3.4.3rc10) (3.4.2)\nRequirement already satisfied: pyarrow>=0.16.0 in /usr/local/lib/python3.7/dist-packages (from nlu_tmp==3.4.3rc10) (6.0.1)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from nlu_tmp==3.4.3rc10) (1.21.5)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=1.3.5->nlu_tmp==3.4.3rc10) (2.8.2)\nRequirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=1.3.5->nlu_tmp==3.4.3rc10) (2018.9)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas>=1.3.5->nlu_tmp==3.4.3rc10) (1.15.0)\nInstalling collected packages: nlu-tmp\nSuccessfully installed nlu-tmp-3.4.3rc10\n Spark NLP for Healthcare could not be imported. Installing latest spark-nlp-jsl PyPI package via pip...\n"
]
],
[
[
"#### [Deidentify RB](https://nlp.johnsnowlabs.com/2019/06/04/deidentify_rb_en.html)",
"_____no_output_____"
]
],
[
[
"import nlu\n\ndata = '''A . Record date : 2093-01-13 , David Hale , M.D . , Name : Hendrickson , Ora MR . # 7194334 Date : 01/13/93 PCP : Oliveira , 25 years-old , Record date : 2079-11-09 . Cocke County Baptist Hospital . 0295 Keats Street'''\nnlu.load(\"med_ner.jsl.wip.clinical en.de_identify\").predict(data,output_level = 'sentence')",
"ner_wikiner_glove_840B_300 download started this may take some time.\nApproximate size to download 14.8 MB\n[OK!]\ndeidentify_rb download started this may take some time.\nApproximate size to download 3.8 KB\n[OK!]\nglove_840B_300 download started this may take some time.\nApproximate size to download 2.3 GB\n[OK!]\nsentence_detector_dl download started this may take some time.\nApproximate size to download 354.6 KB\n[OK!]\n"
]
],
[
[
"#### [Deidentify (Enriched)](https://nlp.johnsnowlabs.com/2021/01/29/deidentify_enriched_clinical_en.html)",
"_____no_output_____"
]
],
[
[
"data = '''A . Record date : 2093-01-13 , David Hale , M.D . , Name : Hendrickson , Ora MR . # 7194334 Date : 01/13/93 PCP : Oliveira , 25 years-old , Record date : 2079-11-09 . Cocke County Baptist Hospital . 0295 Keats Street'''\nnlu.load(\"med_ner.jsl.wip.clinical en.de_identify.clinical\").predict(data,output_level = 'sentence')",
"ner_wikiner_glove_840B_300 download started this may take some time.\nApproximate size to download 14.8 MB\n[OK!]\ndeidentify_enriched_clinical download started this may take some time.\nApproximate size to download 73.6 KB\n[OK!]\nglove_840B_300 download started this may take some time.\nApproximate size to download 2.3 GB\n[OK!]\nsentence_detector_dl download started this may take some time.\nApproximate size to download 354.6 KB\n[OK!]\n"
]
],
[
[
"#### [Deidentify PHI (Large)](https://nlp.johnsnowlabs.com/2020/08/04/deidentify_large_en.html)",
"_____no_output_____"
]
],
[
[
"data = '''Patient AIQING, 25 month years-old , born in Beijing, was transfered to the The Johns Hopkins Hospital.\n Phone number: (541) 754-3010. MSW 100009632582 for his colonic polyps. He wants to know the results from them.\n He is not taking hydrochlorothiazide and is curious about his blood pressure. He said he has cut his alcohol back to 6 pack once a week. \n He has cut back his cigarettes to one time per week. P: Follow up with Dr. Hobbs in 3 months. Gilbert P. Perez, M.D.'''\nnlu.load(\"med_ner.jsl.wip.clinical en.de_identify.large\").predict(data,output_level = 'sentence')",
"ner_wikiner_glove_840B_300 download started this may take some time.\nApproximate size to download 14.8 MB\n[OK!]\ndeidentify_large download started this may take some time.\nApproximate size to download 188.1 KB\n[OK!]\nglove_840B_300 download started this may take some time.\nApproximate size to download 2.3 GB\n[OK!]\nsentence_detector_dl download started this may take some time.\nApproximate size to download 354.6 KB\n[OK!]\n"
],
[
"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a84c765dbd44a853c5050fe5e329fb4ab8960c4
| 148,816 |
ipynb
|
Jupyter Notebook
|
notebooks/RESULT-effect-of-adaptation-1.ipynb
|
caglarcakan/sleeping_brain
|
12272f49bee1a8ad9eb30f47067abc1f252cded6
|
[
"MIT"
] | null | null | null |
notebooks/RESULT-effect-of-adaptation-1.ipynb
|
caglarcakan/sleeping_brain
|
12272f49bee1a8ad9eb30f47067abc1f252cded6
|
[
"MIT"
] | null | null | null |
notebooks/RESULT-effect-of-adaptation-1.ipynb
|
caglarcakan/sleeping_brain
|
12272f49bee1a8ad9eb30f47067abc1f252cded6
|
[
"MIT"
] | 1 |
2021-03-23T10:19:43.000Z
|
2021-03-23T10:19:43.000Z
| 373.909548 | 42,644 | 0.933273 |
[
[
[
"# change into the root directory of the project\nimport os\nif os.getcwd().split(\"/\")[-1] == \"notebooks\":\n os.chdir('..')",
"_____no_output_____"
],
[
"import logging\nlogger = logging.getLogger()\n#import warnings\n#warnings.filterwarnings(\"ignore\")\n\nlogger.setLevel(logging.INFO)\n#logging.disable(logging.WARNING)\n#logging.disable(logging.WARN)\n\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\nplt.rcParams['image.cmap'] = 'plasma'\nimport scipy\nimport copy \n\nimport tqdm\n\nfrom neurolib.models.aln import ALNModel\nfrom neurolib.utils.parameterSpace import ParameterSpace\nfrom neurolib.optimize.exploration import BoxSearch\nimport neurolib.utils.functions as func\nimport neurolib.utils.devutils as du\n\nimport neurolib.utils.brainplot as bp\nimport neurolib.optimize.exploration.explorationUtils as eu\n\nfrom neurolib.utils.loadData import Dataset",
"_____no_output_____"
],
[
"from neurolib.utils import atlases\natlas = atlases.AutomatedAnatomicalParcellation2()",
"WARNING:root:Atlas doesn't start at 0, reindexing...\n"
],
[
"#plt.style.use(\"dark\")",
"_____no_output_____"
],
[
"plt.style.use(\"paper\")",
"_____no_output_____"
],
[
"# import matplotlib as mpl\n\n# mpl.rcParams['axes.spines.left'] = True\n# mpl.rcParams['axes.spines.right'] = True\n# mpl.rcParams['axes.spines.top'] = True\n# mpl.rcParams['axes.spines.bottom'] = True",
"_____no_output_____"
],
[
"ds = Dataset(\"gw\", fcd=False)\nmodel = ALNModel(Cmat = ds.Cmat, Dmat = ds.Dmat)\nmodel.params['dt'] = 0.1\nmodel.params['duration'] = 1.0 * 60 * 1000 #ms",
"INFO:root:Loading dataset gw_big from /Users/caglar/Documents/PhD/projects/neurolib/neurolib/utils/../data/datasets/gw_big.\nINFO:root:Dataset gw_big loaded.\nINFO:root:aln: Model initialized.\n"
],
[
"models = []",
"_____no_output_____"
],
[
"model.params[\"mue_ext_mean\"] = 3.3202829454334535\nmodel.params[\"mui_ext_mean\"] = 3.682451894176651\nmodel.params[\"b\"] = 3.2021806735984186\nmodel.params[\"tauA\"] = 4765.3385276559875\nmodel.params[\"sigma_ou\"] = 0.36802952978628106\nmodel.params[\"Ke_gl\"] = 265.48075753153\n\nmodels.append(copy.deepcopy(model))\ncontrol_params = copy.deepcopy(model.params)",
"_____no_output_____"
],
[
"def add_to_models(models, change_par, change_by = 0.5):\n model.params = copy.deepcopy(control_params)\n model.params[change_par] -= model.params[change_par] * change_by\n logging.info(f\"Adding {change_par} = {model.params[change_par]}\")\n models.append(copy.deepcopy(model))\n\n model.params = copy.deepcopy(control_params)\n model.params[change_par] += model.params[change_par] * change_by\n logging.info(f\"Adding {change_par} = {model.params[change_par]}\")\n models.append(copy.deepcopy(model))\n return models",
"_____no_output_____"
],
[
"#changepars = [\"b\", \"Ke_gl\", \"sigma_ou\", \"signalV\"]\nchangepars = [\"b\"]\nfor changepar in changepars:\n models = add_to_models(models, changepar)",
"INFO:root:Adding b = 1.6010903367992093\nINFO:root:Adding b = 4.803271010397628\n"
],
[
"#labels = [\"control\", \"$-b$\", \"$+b$\", \"$-K_{gl}$\", \"$+K_{gl}$\", \"$-\\\\sigma_{ou}$\", \"$+\\\\sigma_{ou}$\" , \"$-v_s$\", \"$+v_s$\"]\nlabels = [\"control\", \"$-b$\", \"$+b$\"]",
"_____no_output_____"
]
],
[
[
"# Run",
"_____no_output_____"
]
],
[
[
"for model in tqdm.tqdm(models, total=len(models)):\n model.run()",
"100%|██████████| 3/3 [01:07<00:00, 22.55s/it]\n"
],
[
"involvements = []\nall_states = []\nall_durations = []\nfor i in tqdm.tqdm(range(len(models)), total=len(models)):\n model = models[i]\n states = bp.detectSWs(model)\n all_states.append(states)\n durations = bp.get_state_lengths(states)\n all_durations.append(durations)\n involvement = bp.get_involvement(states)\n involvements.append(involvement)",
"100%|██████████| 3/3 [00:58<00:00, 19.35s/it]\n"
],
[
"#bp.plot_involvement_timeseries(models[0], involvements[0])",
"_____no_output_____"
],
[
"# Make a multiple-histogram of data-sets with different length.\n\n#import matplotlib as mpl\n#mpl.rc('text', usetex=False)\nindices = [1, 0, 2]\ncolors = ['C1', 'lightgray', 'C0']\nplt.figure(figsize=(2.5, 2))\nplt.hist([involvements[n]*100 for n in indices], 10, histtype='bar', density=True, rwidth=0.8, edgecolor='k', color=colors, label=[labels[n] for n in indices])\n#plt.title('Adaptation')\nplt.legend(fontsize=8, loc=1, frameon=False)\nplt.xticks([0, 50, 100])\nplt.yticks([])\nplt.ylabel(\"Density\")\nplt.xlabel(\"Involvement [%]\")\nplt.xlim([0, 100])\nplt.tight_layout()\n#plt.savefig(\"/Users/caglar/Documents/PhD/papers/2020-1-evolutionary-fitting/figures/assets/adaptation/assets/involvement-adaptation.pdf\", transparent=True)\nplt.show() ",
"_____no_output_____"
],
[
"for i, model in enumerate(models):\n states = bp.detectSWs(model, filter_long=True)\n bp.plot_states_timeseries(model, states, title=None, labels=False) \n #plt.savefig(f\"/Users/caglar/Documents/PhD/papers/2020-1-evolutionary-fitting/figures/assets/adaptation/assets/states-{labels[i]}.pdf\", transparent=True)\n plt.show()\n #bp.plot_state_durations(model, states)\n #plt.show()",
"_____no_output_____"
],
[
"import dill\nfor i, model in enumerate(models):\n fname = f\"data/models/effect-of-adaptation-{labels[i]}.dill\"\n print(fname)\n dill.dump(model, open(fname, \"wb+\"))",
"data/models/effect-of-adaptation-control.dill\ndata/models/effect-of-adaptation-$-b$.dill\ndata/models/effect-of-adaptation-$+b$.dill\n"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a85035121ffc125df0cef74e6b332fbc5a3971b
| 11,053 |
ipynb
|
Jupyter Notebook
|
src/notebooks/web_scrapper.ipynb
|
Evaaaaaaa/Learn2Relax-Stress-Detection-on-Social-Media
|
a2b4dd59b4fc9b832f51abce5c369cab2905ad87
|
[
"MIT"
] | 3 |
2020-06-26T22:25:31.000Z
|
2020-11-01T06:21:58.000Z
|
src/notebooks/web_scrapper.ipynb
|
Evaaaaaaa/Learn2Relax-Stress-Detection-on-Social-Media
|
a2b4dd59b4fc9b832f51abce5c369cab2905ad87
|
[
"MIT"
] | null | null | null |
src/notebooks/web_scrapper.ipynb
|
Evaaaaaaa/Learn2Relax-Stress-Detection-on-Social-Media
|
a2b4dd59b4fc9b832f51abce5c369cab2905ad87
|
[
"MIT"
] | 1 |
2020-06-24T00:21:35.000Z
|
2020-06-24T00:21:35.000Z
| 38.378472 | 242 | 0.387859 |
[
[
[
"<a href=\"https://colab.research.google.com/github/Evaaaaaaa/Learn2Relax/blob/master/notebooks/web_scrapper.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"%pip install praw",
"_____no_output_____"
]
],
[
[
"get posts that contain the word 'stress'",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport praw\n\nreddit = praw.Reddit(client_id='P-Y2W0ZbjYE7lA', client_secret='smd0GrdCwMc0DIcDKyviqw1bC2I', user_agent='Reddit Scrapper')\n\n\nsubs = ['PTSD', 'Assistance', 'relationships', 'survivorsofabuse',\n 'domesticviolence', 'Anxiety', 'homeless', 'Stress',\n 'COVID19','Coronavirus','domesticviolence', 'almosthomeless', 'food_pantry']\n\ncorpus = pd.DataFrame(posts,columns=['title', 'id', 'subreddit', 'num_comments', 'body', 'created'])\n\nfor sub in subs:\n posts = []\n subreddit = reddit.subreddit(sub)\n for post in subreddit.search(\"stress\"):\n a_series = pd.Series([post.title, post.id, post.subreddit, post.num_comments, post.selftext, post.created], index = corpus.columns)\n corpus = corpus.append(a_series, ignore_index=True)\n",
"_____no_output_____"
],
[
"corpus1",
"_____no_output_____"
],
[
"import pandas as pd\nimport praw\n\nreddit = praw.Reddit(client_id='P-Y2W0ZbjYE7lA', client_secret='smd0GrdCwMc0DIcDKyviqw1bC2I', user_agent='Reddit Scrapper')\n\n\nsubs = ['PTSD', 'Assistance', 'relationships', 'survivorsofabuse',\n 'domesticviolence', 'Anxiety', 'homeless', 'Stress',\n 'COVID19','Coronavirus','domesticviolence', 'almosthomeless', 'food_pantry']\n\ncorpus = pd.DataFrame(posts,columns=['id', 'body', 'subreddit', 'created'])\n\nfor sub in subs:\n posts = []\n subreddit = reddit.subreddit(sub)\n for post in subreddit.hot(limit=1e+10): \n comments = post.comments.list()[:-1]\n for comment in comments:\n a_series = pd.Series([comment.id, comment.body, comment.subreddit, comment.created], index = corpus.columns)\n corpus = corpus.append(a_series, ignore_index=True)\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a85095076048a8550bfa9f315c8c6bdaa585939
| 4,480 |
ipynb
|
Jupyter Notebook
|
1_extracting_IDs_from_zip.ipynb
|
nathaliagg/pmi
|
e4c06bf65e26cd9654ed8dfafc9b9e263278d694
|
[
"MIT"
] | null | null | null |
1_extracting_IDs_from_zip.ipynb
|
nathaliagg/pmi
|
e4c06bf65e26cd9654ed8dfafc9b9e263278d694
|
[
"MIT"
] | null | null | null |
1_extracting_IDs_from_zip.ipynb
|
nathaliagg/pmi
|
e4c06bf65e26cd9654ed8dfafc9b9e263278d694
|
[
"MIT"
] | null | null | null | 23.829787 | 623 | 0.530357 |
[
[
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Get-the-directories-within-the-zip-files-to-extract\" data-toc-modified-id=\"Get-the-directories-within-the-zip-files-to-extract-1\"><span class=\"toc-item-num\">1 </span>Get the directories within the zip files to extract</a></span></li><li><span><a href=\"#Extracting-sample-information-(aka-metadata)-and-BIOM-files\" data-toc-modified-id=\"Extracting-sample-information-(aka-metadata)-and-BIOM-files-2\"><span class=\"toc-item-num\">2 </span>Extracting sample information (aka metadata) and BIOM files</a></span></li></ul></div>",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport os\nimport subprocess",
"_____no_output_____"
]
],
[
[
"# Get the directories within the zip files to extract",
"_____no_output_____"
]
],
[
[
"pwd",
"_____no_output_____"
],
[
"df = pd.read_csv('Literature_Sources-Sheet1.csv')\ndf = df[(~df['StudyID'].isna()) & (df['ID Final table'] != '-')]\ndf['StudyID'] = df['StudyID'].astype(int).astype(str)\nlist_dirs_to_extract = list(zip(df['StudyID'], df['ID Final table']))\nprint(len(list_dirs_to_extract))",
"7\n"
]
],
[
[
"# Extracting sample information (aka metadata) and BIOM files",
"_____no_output_____"
]
],
[
[
"for d in list_dirs_to_extract:\n print(d)\n os.chdir(d[0])\n cmd_1 = f\"unzip sample_information*\"\n subprocess.call(cmd_1, shell=True)\n cmd_2 = f'unzip biom_{d[0]}.zip \"BIOM/{d[1]}/*\"'\n subprocess.call(cmd_2, shell=True)\n # print(cmd_1)\n # print(cmd_2)\n os.chdir(\"../\")",
"('1609', '45905')\n('714', '46682')\n('10141', '44906')\n('10141', '44878')\n('10143', '45990')\n('10143', '46002')\n('10321', '47677')\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a8512e19dbeeaf7ac06a0bdfa1ffbf69079eb35
| 83,794 |
ipynb
|
Jupyter Notebook
|
connectivity_analysis/signal_extraction/dn_sa_08_signal_exploration.ipynb
|
kbonna/decidenet
|
e0a1c90b17e96351f5a2f4e677bb6f5193fc6977
|
[
"MIT"
] | 7 |
2019-11-14T14:27:13.000Z
|
2020-07-06T16:26:44.000Z
|
connectivity_analysis/signal_extraction/dn_sa_08_signal_exploration.ipynb
|
kbonna/DecideNet
|
e0a1c90b17e96351f5a2f4e677bb6f5193fc6977
|
[
"MIT"
] | 1 |
2020-12-23T15:40:01.000Z
|
2020-12-23T15:40:01.000Z
|
connectivity_analysis/signal_extraction/dn_sa_08_signal_exploration.ipynb
|
kbonna/DecideNet
|
e0a1c90b17e96351f5a2f4e677bb6f5193fc6977
|
[
"MIT"
] | 1 |
2021-04-08T07:07:12.000Z
|
2021-04-08T07:07:12.000Z
| 585.972028 | 79,320 | 0.948803 |
[
[
[
"import os\nimport sys\nimport itertools\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport statsmodels.regression.linear_model as sm\nfrom scipy import io\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\npath_root = os.environ.get('DECIDENET_PATH')\npath_code = os.path.join(path_root, 'code')\nif path_code not in sys.path:\n sys.path.append(path_code)\nfrom dn_utils.behavioral_models import load_behavioral_data\n\n%matplotlib inline",
"_____no_output_____"
],
[
"# Directory for PPI analysis\npath_out = os.path.join(path_root, 'data/main_fmri_study/derivatives/ppi')\npath_timeries = os.path.join(path_out, 'timeseries')\n\n# Load behavioral data\npath_beh = os.path.join(path_root, 'data/main_fmri_study/sourcedata/behavioral')\nbeh, meta = load_behavioral_data(path=path_beh, verbose=False)\nn_subjects, n_conditions, n_trials, _ = beh.shape\n\n# Load neural & BOLD timeseries\ndata = io.loadmat(os.path.join(\n path_timeries, \n 'timeseries_pipeline-24HMPCSFWM_atlas-metaROI_neural.mat'))\ntimeseries_neural_aggregated = data['timeseries_neural_aggregated']\ntimeseries_denoised_aggregated = np.load(os.path.join(\n path_timeries, \n 'timeseries_pipeline-24HMPCSFWM_atlas-metaROI_bold.npy'))\ndownsamples = data['k'].flatten()\n\n# Acquisition parameters\n_, _, n_volumes, n_rois = timeseries_denoised_aggregated.shape\n\n# Input data shape\nprint('timeseries_neural_aggregated.shape', timeseries_neural_aggregated.shape)\nprint('timeseries_denoised_aggregated.shape', timeseries_denoised_aggregated.shape)",
"timeseries_neural_aggregated.shape (32, 2, 11680, 30)\ntimeseries_denoised_aggregated.shape (32, 2, 730, 30)\n"
],
[
"mpl.rcParams.update({\"font.size\": 15})\n\nfc_rest = np.zeros((n_subjects, n_conditions, n_rois, n_rois))\nfor i in range(n_subjects):\n for j in range(n_conditions):\n fc_rest[i, j] = np.corrcoef(timeseries_denoised_aggregated[i, j].T)\n \n\nfig, ax = plt.subplots(nrows=2, ncols=2, figsize=(15, 15))\n\nim = [[None, None], [None, None]]\nim[0][0] = ax[0][0].imshow(fc_rest[:, 0, :, :].mean(axis=0), clim=[-1, 1], cmap='RdBu_r')\nim[0][1] = ax[0][1].imshow(fc_rest[:, 1, :, :].mean(axis=0), clim=[-1, 1], cmap='RdBu_r')\nim[1][0] = ax[1][0].imshow(fc_rest[:, 0, :, :].std(axis=0), clim=[0, .2], cmap='RdBu_r')\nim[1][1] = ax[1][1].imshow(fc_rest[:, 1, :, :].std(axis=0), clim=[0, .2], cmap='RdBu_r')\n\nfor i, j in itertools.product([0, 1], repeat=2):\n divider = make_axes_locatable(ax[i][j])\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(im[i][j], cax=cax)\n \nax[0][0].set_title(\"Reward-seeking\")\nax[0][1].set_title(\"Punishment-avoiding\")\nax[0][0].set_ylabel(\"Mean connectivity\")\nax[1][0].set_ylabel(\"Variability of connectivity\")\n\nplt.tight_layout()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code"
]
] |
4a85153cfeb0f4988f78734885d8e5052bf850e7
| 57,272 |
ipynb
|
Jupyter Notebook
|
interactivecontent/compare-two-samples-by-bootstrapping/deflategate.ipynb
|
garath/inferentialthinking
|
fd27c1aa0b813de273160cbe682e28cc9da0dae3
|
[
"MIT"
] | null | null | null |
interactivecontent/compare-two-samples-by-bootstrapping/deflategate.ipynb
|
garath/inferentialthinking
|
fd27c1aa0b813de273160cbe682e28cc9da0dae3
|
[
"MIT"
] | null | null | null |
interactivecontent/compare-two-samples-by-bootstrapping/deflategate.ipynb
|
garath/inferentialthinking
|
fd27c1aa0b813de273160cbe682e28cc9da0dae3
|
[
"MIT"
] | null | null | null | 72.957962 | 29,004 | 0.738476 |
[
[
[
"from datascience import *\n%matplotlib inline\npath_data = '../../../../data/'\nimport matplotlib.pyplot as plots\nplots.style.use('fivethirtyeight')\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"### Deflategate ###\nOn January 18, 2015, the Indianapolis Colts and the New England Patriots played the American Football Conference (AFC) championship game to determine which of those teams would play in the Super Bowl. After the game, there were allegations that the Patriots' footballs had not been inflated as much as the regulations required; they were softer. This could be an advantage, as softer balls might be easier to catch.\n\nFor several weeks, the world of American football was consumed by accusations, denials, theories, and suspicions: the press labeled the topic Deflategate, after the Watergate political scandal of the 1970's. The National Football League (NFL) commissioned an independent analysis. In this example, we will perform our own analysis of the data.\n\nPressure is often measured in pounds per square inch (psi). NFL rules stipulate that game balls must be inflated to have pressures in the range 12.5 psi and 13.5 psi. Each team plays with 12 balls. Teams have the responsibility of maintaining the pressure in their own footballs, but game officials inspect the balls. Before the start of the AFC game, all the Patriots' balls were at about 12.5 psi. Most of the Colts' balls were at about 13.0 psi. However, these pre-game data were not recorded.\n\nDuring the second quarter, the Colts intercepted a Patriots ball. On the sidelines, they measured the pressure of the ball and determined that it was below the 12.5 psi threshold. Promptly, they informed officials. \n\nAt half-time, all the game balls were collected for inspection. Two officials, Clete Blakeman and Dyrol Prioleau, measured the pressure in each of the balls. \n\nHere are the data. Each row corresponds to one football. Pressure is measured in psi. The Patriots ball that had been intercepted by the Colts was not inspected at half-time. Nor were most of the Colts' balls – the officials simply ran out of time and had to relinquish the balls for the start of second half play.",
"_____no_output_____"
]
],
[
[
"football = Table.read_table(path_data + 'deflategate.csv')\nfootball.show()",
"_____no_output_____"
]
],
[
[
"For each of the 15 balls that were inspected, the two officials got different results. It is not uncommon that repeated measurements on the same object yield different results, especially when the measurements are performed by different people. So we will assign to each the ball the average of the two measurements made on that ball.",
"_____no_output_____"
]
],
[
[
"football = football.with_column(\n 'Combined', (football.column(1)+football.column(2))/2\n ).drop(1, 2)\nfootball.show()",
"_____no_output_____"
]
],
[
[
"At a glance, it seems apparent that the Patriots' footballs were at a lower pressure than the Colts' balls. Because some deflation is normal during the course of a game, the independent analysts decided to calculate the drop in pressure from the start of the game. Recall that the Patriots' balls had all started out at about 12.5 psi, and the Colts' balls at about 13.0 psi. Therefore the drop in pressure for the Patriots' balls was computed as 12.5 minus the pressure at half-time, and the drop in pressure for the Colts' balls was 13.0 minus the pressure at half-time.\n\nWe can calculate the drop in pressure for each football, by first setting up an array of the starting values. For this we will need an array consisting of 11 values each of which is 12.5, and another consisting of four values each of which is all 13. We will use the NumPy function `np.ones`, which takes a count as its argument and returns an array of that many elements, each of which is 1.",
"_____no_output_____"
]
],
[
[
"np.ones(11)",
"_____no_output_____"
],
[
"patriots_start = 12.5 * np.ones(11)\ncolts_start = 13 * np.ones(4)\nstart = np.append(patriots_start, colts_start)\nstart",
"_____no_output_____"
]
],
[
[
"The drop in pressure for each football is the difference between the starting pressure and the combined pressure measurement.",
"_____no_output_____"
]
],
[
[
"drop = start - football.column('Combined')\nfootball = football.with_column('Pressure Drop', drop)\nfootball.show()",
"_____no_output_____"
]
],
[
[
"It looks as though the Patriots' drops were larger than the Colts'. Let's look at the average drop in each of the two groups. We no longer need the combined scores.",
"_____no_output_____"
]
],
[
[
"football = football.drop('Combined')\nfootball.group('Team', np.average)",
"_____no_output_____"
]
],
[
[
"The average drop for the Patriots was about 1.2 psi compared to about 0.47 psi for the Colts. \n\nThe question now is why the Patriots' footballs had a larger drop in pressure, on average, than the Colts footballs. Could it be due to chance?\n\n### The Hypotheses ###\nHow does chance come in here? Nothing was being selected at random. But we can make a chance model by hypothesizing that the 11 Patriots' drops look like a random sample of 11 out of all the 15 drops, with the Colts' drops being the remaining four. That's a completely specified chance model under which we can simulate data. So it's the **null hypothesis**.\n\nFor the alternative, we can take the position that the Patriots' drops are too large, on average, to resemble a random sample drawn from all the drops. \n\n### Test Statistic ###\nA natural statistic is the difference between the two average drops, which we will compute as \"average drop for Patriots - average drop for Colts\". Large values of this statistic will favor the alternative hypothesis.",
"_____no_output_____"
]
],
[
[
"observed_means = football.group('Team', np.average).column(1)\n\nobserved_difference = observed_means.item(1) - observed_means.item(0)\nobserved_difference",
"_____no_output_____"
]
],
[
[
"This positive difference reflects the fact that the average drop in pressure of the Patriots' footballs was greater than that of the Colts.",
"_____no_output_____"
],
[
"The function `difference_of_means` takes three arguments:\n\n- the name of the table of data\n- the label of the column containing the numerical variable whose average is of interest\n- the label of the column containing the two group labels\n\nIt returns the difference between the means of the two groups. \n\nWe have defined this function in an earlier section. The definition is repeated here for ease of reference.",
"_____no_output_____"
]
],
[
[
"def difference_of_means(table, label, group_label):\n reduced = table.select(label, group_label)\n means_table = reduced.group(group_label, np.average)\n means = means_table.column(1)\n return means.item(1) - means.item(0)",
"_____no_output_____"
],
[
"difference_of_means(football, 'Pressure Drop', 'Team')",
"_____no_output_____"
]
],
[
[
"Notice that the difference has been calculated as Patriots' drops minus Colts' drops as before.",
"_____no_output_____"
],
[
"### Predicting the Statistic Under the Null Hypothesis ###\nIf the null hypothesis were true, then it shouldn't matter which footballs are labeled Patriots and which are labeled Colts. The distributions of the two sets of drops would be the same. We can simulate this by randomly shuffling the team labels.",
"_____no_output_____"
]
],
[
[
"shuffled_labels = football.sample(with_replacement=False).column(0)\noriginal_and_shuffled = football.with_column('Shuffled Label', shuffled_labels)\noriginal_and_shuffled.show()",
"_____no_output_____"
]
],
[
[
"How do all the group averages compare?",
"_____no_output_____"
]
],
[
[
"difference_of_means(original_and_shuffled, 'Pressure Drop', 'Shuffled Label')",
"_____no_output_____"
],
[
"difference_of_means(original_and_shuffled, 'Pressure Drop', 'Team')",
"_____no_output_____"
]
],
[
[
"The two teams' average drop values are closer when the team labels are randomly assigned to the footballs than they were for the two groups actually used in the game.\n\n### Permutation Test ###\nIt's time for a step that is now familiar. We will do repeated simulations of the test statistic under the null hypothesis, by repeatedly permuting the footballs and assigning random sets to the two teams.\n\nOnce again, we will use the function `one_simulated_difference` defined in an earlier section as follows.",
"_____no_output_____"
]
],
[
[
"def one_simulated_difference(table, label, group_label):\n shuffled_labels = table.sample(with_replacement = False\n ).column(group_label)\n shuffled_table = table.select(label).with_column(\n 'Shuffled Label', shuffled_labels)\n return difference_of_means(shuffled_table, label, 'Shuffled Label') ",
"_____no_output_____"
]
],
[
[
"We can now use this function to create an array `differences` that contains 10,000 values of the test statistic simulated under the null hypothesis.",
"_____no_output_____"
]
],
[
[
"differences = make_array()\n\nrepetitions = 10000\nfor i in np.arange(repetitions):\n new_difference = one_simulated_difference(football, 'Pressure Drop', 'Team')\n differences = np.append(differences, new_difference)",
"_____no_output_____"
]
],
[
[
"### Conclusion of the Test ###\nTo calculate the empirical P-value, it's important to recall the alternative hypothesis, which is that the Patriots' drops are too large to be the result of chance variation alone.\n\nLarger drops for the Patriots favor the alternative hypothesis. So the P-value is the chance (computed under the null hypothesis) of getting a test statistic equal to our observed value of 0.733522727272728 or larger.",
"_____no_output_____"
]
],
[
[
"empirical_P = np.count_nonzero(differences >= observed_difference) / 10000\nempirical_P",
"_____no_output_____"
]
],
[
[
"That's a pretty small P-value. To visualize this, here is the empirical distribution of the test statistic under the null hypothesis, with the observed statistic marked on the horizontal axis.",
"_____no_output_____"
]
],
[
[
"Table().with_column('Difference Between Group Averages', differences).hist()\nplots.scatter(observed_difference, 0, color='red', s=30)\nplots.title('Prediction Under the Null Hypothesis')\nprint('Observed Difference:', observed_difference)\nprint('Empirical P-value:', empirical_P)",
"Observed Difference: 0.733522727272728\nEmpirical P-value: 0.0043\n"
]
],
[
[
"As in previous examples of this test, the bulk of the distribution is centered around 0. Under the null hypothesis, the Patriots' drops are a random sample of all 15 drops, and therefore so are the Colts'. Therefore the two sets of drops should be about equal on average, and therefore their difference should be around 0.\n\nBut the observed value of the test statistic is quite far away from the heart of the distribution. By any reasonable cutoff for what is \"small\", the empirical P-value is small. So we end up rejecting the null hypothesis of randomness, and conclude that the Patriots drops were too large to reflect chance variation alone.\n\nThe independent investigative team analyzed the data in several different ways, taking into account the laws of physics. The final report said, \n\n> \"[T]he average pressure drop of the Patriots game balls exceeded the average pressure drop of the Colts balls by 0.45 to 1.02 psi, depending on various possible assumptions regarding the gauges used, and assuming an initial pressure of 12.5 psi for the Patriots balls and 13.0 for the Colts balls.\"\n> \n> -- *Investigative report commissioned by the NFL regarding the AFC Championship game on January 18, 2015*\n\nOur analysis shows an average pressure drop of about 0.73 psi, which is close to the center of the interval \"0.45 to 1.02 psi\" and therefore consistent with the official analysis.",
"_____no_output_____"
],
[
"Remember that our test of hypotheses does not establish the reason *why* the difference is not due to chance. Establishing causality is usually more complex than running a test of hypotheses.\n\nBut the all-important question in the football world was about causation: the question was whether the excess drop of pressure in the Patriots' footballs was deliberate. If you are curious about the answer given by the investigators, here is the [full report](https://nfllabor.files.wordpress.com/2015/05/investigative-and-expert-reports-re-footballs-used-during-afc-championsh.pdf).",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a853465e0ca0db494f08e9eadfe46681e0be610
| 2,226 |
ipynb
|
Jupyter Notebook
|
guide/tutorial/02-step-01-data-download.ipynb
|
jdvelasq/tech-miner
|
85735b3b94b9d56784eafce73c7f9bee37d8c6ed
|
[
"MIT"
] | 1 |
2021-12-03T11:10:35.000Z
|
2021-12-03T11:10:35.000Z
|
guide/tutorial/02-step-01-data-download.ipynb
|
jdvelasq/tech-miner
|
85735b3b94b9d56784eafce73c7f9bee37d8c6ed
|
[
"MIT"
] | null | null | null |
guide/tutorial/02-step-01-data-download.ipynb
|
jdvelasq/tech-miner
|
85735b3b94b9d56784eafce73c7f9bee37d8c6ed
|
[
"MIT"
] | null | null | null | 24.733333 | 340 | 0.609164 |
[
[
[
"Step 1: Data download by the user\n====",
"_____no_output_____"
],
[
"The information is usually captured by the user directly in a biblographical database as Scopus using a complex search string. The refinement of the search string is iterative and preliminary analysis of the keywords can help debug the search string. In this first part of the tutorial it is assumed that the search string is refined.",
"_____no_output_____"
],
[
"We recommend downloading the bibliographic information in three files to facilitate the loading of data in Python. The first file, named `demo-citations.csv`, contains the fields showed in the following figure:",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"The second file, named `demo-keywords.csv`, contains the information showed in the next figure.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"Finally, the third file, `demo-refs.csv`, contains the information about references cited by docuemnts selected in the search.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"The selected fields maximize the potential of information for obtain insights of the data.",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a853c8c432ee12cf7e29370b4a86b51ae940da3
| 1,516 |
ipynb
|
Jupyter Notebook
|
docs/scala/basics/find_largest_key_or_value_in_a_map.ipynb
|
revgizmo-forks/ds_notes
|
ffc73d06b07fb2b137e7e679d3c99dab53580afa
|
[
"CC0-1.0"
] | 1 |
2018-01-09T19:06:03.000Z
|
2018-01-09T19:06:03.000Z
|
docs/scala/basics/find_largest_key_or_value_in_a_map.ipynb
|
revgizmo-forks/ds_notes
|
ffc73d06b07fb2b137e7e679d3c99dab53580afa
|
[
"CC0-1.0"
] | null | null | null |
docs/scala/basics/find_largest_key_or_value_in_a_map.ipynb
|
revgizmo-forks/ds_notes
|
ffc73d06b07fb2b137e7e679d3c99dab53580afa
|
[
"CC0-1.0"
] | 1 |
2020-10-17T22:00:42.000Z
|
2020-10-17T22:00:42.000Z
| 1,516 | 1,516 | 0.590369 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a8549b22b241b91ca7ca4e93ec418c9eff3d2d3
| 3,910 |
ipynb
|
Jupyter Notebook
|
examples/notebooks/12_split_map.ipynb
|
aplitest/leafmap
|
17dde7dc8aca6553633608e5c6135799baf27176
|
[
"MIT"
] | 1,187 |
2021-03-10T02:50:29.000Z
|
2022-03-29T19:53:40.000Z
|
examples/notebooks/12_split_map.ipynb
|
aplitest/leafmap
|
17dde7dc8aca6553633608e5c6135799baf27176
|
[
"MIT"
] | 118 |
2021-05-25T18:17:20.000Z
|
2022-03-20T20:58:32.000Z
|
examples/notebooks/12_split_map.ipynb
|
aplitest/leafmap
|
17dde7dc8aca6553633608e5c6135799baf27176
|
[
"MIT"
] | 137 |
2021-03-23T09:49:13.000Z
|
2022-03-27T21:39:22.000Z
| 21.843575 | 154 | 0.557289 |
[
[
[
"[](https://gishub.org/leafmap-binder)\n\n**Creating a split-panel map with only one line of code**\n\nUncomment the following line to install [leafmap](https://leafmap.org) if needed.",
"_____no_output_____"
]
],
[
[
"# !pip install leafmap",
"_____no_output_____"
]
],
[
[
"This notebook example requires the ipyleaflet plotting backend. Folium is not supported.",
"_____no_output_____"
]
],
[
[
"import leafmap.leafmap as leafmap",
"_____no_output_____"
]
],
[
[
"Print out the list of available basemaps.",
"_____no_output_____"
]
],
[
[
"print(leafmap.leafmap_basemaps.keys())",
"_____no_output_____"
]
],
[
[
"Create a split-panel map by specifying the `left_layer` and `right_layer`, which can be chosen from the basemap names, or any custom XYZ tile layer.",
"_____no_output_____"
]
],
[
[
"leafmap.split_map(left_layer=\"ROADMAP\", right_layer=\"HYBRID\")",
"_____no_output_____"
]
],
[
[
"Hide the zoom control from the map.",
"_____no_output_____"
]
],
[
[
"leafmap.split_map(left_layer=\"Esri.WorldTopoMap\", right_layer=\"OpenTopoMap\", zoom_control=False)",
"_____no_output_____"
]
],
[
[
"Add labels to the map and change the default map center and zoom level.",
"_____no_output_____"
]
],
[
[
"leafmap.split_map(left_layer=\"NLCD 2001 CONUS Land Cover\", right_layer=\"NLCD 2016 CONUS Land Cover\", \n left_label = \"2001\", right_label=\"2016\", label_position=\"bottom\", center=[36.1, -114.9], zoom=10)",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a855ecd1253e4902404591480f5a3e12ce86c59
| 62,833 |
ipynb
|
Jupyter Notebook
|
examples/demo.ipynb
|
MartinKocour/MarkovModels
|
c488c697f6b87420e3222fd1e83d04390e0ec92f
|
[
"MIT"
] | null | null | null |
examples/demo.ipynb
|
MartinKocour/MarkovModels
|
c488c697f6b87420e3222fd1e83d04390e0ec92f
|
[
"MIT"
] | null | null | null |
examples/demo.ipynb
|
MartinKocour/MarkovModels
|
c488c697f6b87420e3222fd1e83d04390e0ec92f
|
[
"MIT"
] | null | null | null | 85.024357 | 12,922 | 0.613849 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a85617e8e77c7313268783103d2bed1f9704c5c
| 42,045 |
ipynb
|
Jupyter Notebook
|
Main.ipynb
|
UdayShankar04/Twitter_sentiment_analysis
|
ed0327ec7c1bf750a38d5e6251b15eb854ed3e93
|
[
"MIT"
] | null | null | null |
Main.ipynb
|
UdayShankar04/Twitter_sentiment_analysis
|
ed0327ec7c1bf750a38d5e6251b15eb854ed3e93
|
[
"MIT"
] | null | null | null |
Main.ipynb
|
UdayShankar04/Twitter_sentiment_analysis
|
ed0327ec7c1bf750a38d5e6251b15eb854ed3e93
|
[
"MIT"
] | null | null | null | 85.981595 | 1,636 | 0.657082 |
[
[
[
"# This is Main function.\n# Extracting streaming data from Twitter, pre-processing, and loading into MySQL\nimport credentials # Import api/access_token keys from credentials.py\nimport setting # Import related setting constants from settings.py \n\nimport re\nimport tweepy\nimport mysql.connector\nimport pandas as pd\nfrom textblob import TextBlob\n# Streaming With Tweepy \n# http://docs.tweepy.org/en/v3.4.0/streaming_how_to.html#streaming-with-tweepy\n\n\n# Override tweepy.StreamListener to add logic to on_status\nclass MyStreamListener(tweepy.StreamListener):\n '''\n Tweets are known as “status updates”. So the Status class in tweepy has properties describing the tweet.\n https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/tweet-object.html\n '''\n \n def on_status(self, status):\n '''\n Extract info from tweets\n '''\n \n if status.retweeted:\n # Avoid retweeted info, and only original tweets will be received\n return True\n # Extract attributes from each tweet\n id_str = status.id_str\n created_at = status.created_at\n text = deEmojify(status.text) # Pre-processing the text \n sentiment = TextBlob(text).sentiment\n polarity = sentiment.polarity\n subjectivity = sentiment.subjectivity\n \n user_created_at = status.user.created_at\n user_location = deEmojify(status.user.location)\n user_description = deEmojify(status.user.description)\n user_followers_count =status.user.followers_count\n longitude = None\n latitude = None\n if status.coordinates:\n longitude = status.coordinates['coordinates'][0]\n latitude = status.coordinates['coordinates'][1]\n \n retweet_count = status.retweet_count\n favorite_count = status.favorite_count\n \n print(status.text)\n print(\"Long: {}, Lati: {}\".format(longitude, latitude))\n \n # Store all data in MySQL\n if mydb.is_connected():\n mycursor = mydb.cursor()\n sql = \"INSERT INTO {} (id_str, created_at, text, polarity, subjectivity, user_created_at, user_location, user_description, user_followers_count, longitude, latitude, retweet_count, favorite_count) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\".format(setting.TABLE_NAME)\n val = (id_str, created_at, text, polarity, subjectivity, user_created_at, user_location, \\\n user_description, user_followers_count, longitude, latitude, retweet_count, favorite_count)\n mycursor.execute(sql, val)\n mydb.commit()\n mycursor.close()\n \n \n def on_error(self, status_code):\n '''\n Since Twitter API has rate limits, stop srcraping data as it exceed to the thresold.\n '''\n if status_code == 420:\n # return False to disconnect the stream\n return ",
"_____no_output_____"
],
[
"def clean_tweet(self, tweet): \n ''' \n Use sumple regex statemnents to clean tweet text by removing links and special characters\n '''\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t]) \\\n |(\\w+:\\/\\/\\S+)\", \" \", tweet).split()) \ndef deEmojify(text):\n '''\n Strip all non-ASCII characters to remove emoji characters\n '''\n if text:\n return text.encode('ascii', 'ignore').decode('ascii')\n else:\n return None",
"_____no_output_____"
],
[
"\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"\",\n database=\"Twitterdb\",\n charset = 'utf8'\n)\nif mydb.is_connected():\n '''\n Check if this table exits. If not, then create a new one.\n '''\n mycursor = mydb.cursor()\n mycursor.execute(\"\"\"\n SELECT COUNT(*)\n FROM information_schema.tables\n WHERE table_name = '{0}'\n \"\"\".format(setting.TABLE_NAME))\n if mycursor.fetchone()[0] != 1:\n mycursor.execute(\"CREATE TABLE {} ({})\".format(setting.TABLE_NAME, setting.TABLE_ATTRIBUTES))\n mydb.commit()\n mycursor.close()",
"_____no_output_____"
],
[
"auth = tweepy.OAuthHandler(credentials.API_KEYS, credentials.API_SECRET_KEYS)\nauth.set_access_token(credentials.ACCESS_TOKEN, credentials.ACCESS_TOKEN_SECRET)\napi = tweepy.API(auth)",
"_____no_output_____"
],
[
"myStreamListener = MyStreamListener()\nmyStream = tweepy.Stream(auth = api.auth, listener = myStreamListener)\nmyStream.filter(languages=[\"en\"], track = setting.TRACK_WORDS)\n# Close the MySQL connection as it finished\n# However, this won't be reached as the stream listener won't stop automatically\n# Press STOP button to finish the process.\nmydb.close()",
"Check out what I just added to my closet on Poshmark: Louis Vuitton Noe Petit Bag. https://t.co/H4ZbbKpbjX via @poshmarkapp #shopmycloset\nLong: None, Lati: None\nRT @sunglassesemoji: Middle aged white women love to come in wearing Louis Vuitton and tipping their servers $0. Who’re you flexing on baby…\nLong: None, Lati: None\ni was singing lyrics while on my hike earlier today, \"gucci gucci, loui loui, fenti fenti, prada.\" \n\nnow i get Loui… https://t.co/hiVSk0EIGk\nLong: None, Lati: None\nRT @Reveblinkland: I just peeped Seulgi and Jeongyeon got the same Louis Vuitton green suits 🤨🤔. Am I just slow or did y'all notice too htt…\nLong: None, Lati: None\n@LouisVuitton Hope the whole of Louis Vuitton is well and safe in these difficult times https://t.co/qegGYCRzjS\nLong: None, Lati: None\nwith a louis vuitton speedy bag\nLong: None, Lati: None\nRT @tearsofjisoo: streets saying jisoo x louis vuitton but wtf is bitton https://t.co/GOrpWGrmRL\nLong: None, Lati: None\nRT @BoF: The answer has a lot to do with Louis Vuitton, Dior and the Chinese market. BoF breaks down the key factors behind the rebound. ht…\nLong: None, Lati: None\nRT @DailyCpop: #OuyangNana updates Weibo with a shoutout to Louis Vuitton's Summer 2021 female fashion wear. https://t.co/er5MGgPzW0\nLong: None, Lati: None\nRT @tearsofjisoo: streets saying jisoo x louis vuitton but wtf is bitton https://t.co/GOrpWGrmRL\nLong: None, Lati: None\nI just saw a tweet and a girl have grass craved out in the Louis Vuitton logo all over her living room walls and I’… https://t.co/XGrNEXIfBu\nLong: None, Lati: None\nRT @2000sphase: Kim & Paris carrying their iconic Louis Vuitton metallic Alma bags 13 years later. https://t.co/N89BO0jmuw\nLong: None, Lati: None\nRT @jisoograce: WHAAAT?! OMG JISOO LOUIS VUITTON?!!!\nLong: None, Lati: None\nRT @thanksic: 201017 TWICE x FBE \"REACT\" on IGTV\n\nLOUIS VUITTON\nsignature print & embroidery t-shirt\n\napprox. $ 694\n\n#MINA #TWICE #미나 #트와이스…\nLong: None, Lati: None\nRT @tearsofjisoo: streets saying jisoo x louis vuitton but wtf is bitton https://t.co/GOrpWGrmRL\nLong: None, Lati: None\nAlicia Vikander in a White Striped Leggings Arrives at 2021 Louis Vuitton Fashion Show During Paris Fashion Week in… https://t.co/UprluW2zM4\nLong: None, Lati: None\n@fuckmemayb The Chanel or Balenciaga, Louis and Vuitton\nLong: None, Lati: None\nRT @tearsofjisoo: streets saying jisoo x louis vuitton but wtf is bitton https://t.co/GOrpWGrmRL\nLong: None, Lati: None\nRT @2000sphase: Kim & Paris carrying their iconic Louis Vuitton metallic Alma bags 13 years later. https://t.co/N89BO0jmuw\nLong: None, Lati: None\nRT @sooyaswhore: WAIT JISOO AND LOUIS VUITTON CONFIRMED????? FUCK YES\nLong: None, Lati: None\nNot blinks being illiterate 😩😩\nLong: None, Lati: None\nnicekicks: Lucien Clarke’s Louis Vuitton “A View” skate sneakers are around the corner. 💥\nLong: None, Lati: None\nRT @tearsofjisoo: streets saying jisoo x louis vuitton but wtf is bitton https://t.co/GOrpWGrmRL\nLong: None, Lati: None\nRT @tearsofjisoo: streets saying jisoo x louis vuitton but wtf is bitton https://t.co/GOrpWGrmRL\nLong: None, Lati: None\nY’all Louis Vuitton print be real light 🥴\nLong: None, Lati: None\nthe Louis Vuitton bag I’ve been waiting for the last 8 months to restock is bacckkkkkk 😭🙏🏾\nLong: None, Lati: None\nIt’s like rain on your wedding day... Or insurgents shooting up & robbing your wedding day. I wanted to love this,… https://t.co/lu3mluNVih\nLong: None, Lati: None\nRT @ClementJ64: Lightning looks so pissed that she has to model for Louis Vuitton. https://t.co/s39s8napaQ\nLong: None, Lati: None\nRT @nicekicks: Lucien Clarke’s Louis Vuitton “A View” skate sneakers are around the corner. 💥\nLong: None, Lati: None\nRT @chanbaekhyuned: Believe or not but these 4 dorks are booked by the top fashion luxury & cosmetic brands\n\n🐶BAEKHYUN: Burberry, Privè All…\nLong: None, Lati: None\nRT @jisoograce: WHAAAT?! OMG JISOO LOUIS VUITTON?!!!\nLong: None, Lati: None\nI'm dead at my mom pronouncing Louis Vuitton as Louis Vintoni, samthandi😭🤭\nLong: None, Lati: None\nRT @thinglygrammar: “If 2015 was the year unisex became a trend in fashion, 2016 may be the year the question of gender and dress enters an…\nLong: None, Lati: None\nLouis Vuitton is killing my people.\nLong: None, Lati: None\nRT @chanbaekhyuned: Believe or not but these 4 dorks are booked by the top fashion luxury & cosmetic brands\n\n🐶BAEKHYUN: Burberry, Privè All…\nLong: None, Lati: None\nCan safely say I’ll never buy a Louis Vuitton bag wow\nLong: None, Lati: None\nRT @Fashion_Critic_: Millie Bobby Brown In Louis Vuitton – 2020 SAG Awards https://t.co/E92w6JKe5O https://t.co/1lhT4h0tau\nLong: None, Lati: None\nRT @badestoutfit: mini monogram cream louis vuitton puffer jacket https://t.co/ydWAyi14io\nLong: None, Lati: None\n#hashtag2 Louis Vuitton Shoes https://t.co/81b3lOUKH4 https://t.co/Zq2yh1V8K0\nLong: None, Lati: None\nRT @thanksic: 201017 TWICE x FBE \"REACT\" on IGTV\n\nLOUIS VUITTON\nsignature print & embroidery t-shirt\n\napprox. $ 694\n\n#MINA #TWICE #미나 #트와이스…\nLong: None, Lati: None\nwtf is people's obsession with putting logos on things particular Louis Vuitton and Chanel it's tacky as fuck\nLong: None, Lati: None\nRT @NEWSJISOO: Benjamin Cercio, Louis Vuitton Director of Press & Entertainment Relations liked JISOO latest post on Instagram. \n\n블랙핑크 지수 #…\nLong: None, Lati: None\nLOUIS VUITTON LOVERS!! Check out our store for NEW Louis Vuitton Bags🌹💟News Letter From Dct Vintage Japan - https://t.co/DEmEGLqzkT\nLong: None, Lati: None\nLouis Vuitton owner LVMH’s third-quarter sales boosted by demand for US$3,000-plus Christian Dior bags… https://t.co/uJENt5yEmr\nLong: None, Lati: None\nRT @chanbaekhyuned: Believe or not but these 4 dorks are booked by the top fashion luxury & cosmetic brands\n\n🐶BAEKHYUN: Burberry, Privè All…\nLong: None, Lati: None\nRT @kenjenstyle: Kendall Jenner x Met Gala after party outfit x Yellow see-through hoodie x Glittery bra x Silver strappy heels x Black min…\nLong: None, Lati: None\nPowerful pro-leather testimony from Louis Vuitton CEO https://t.co/70g7Pr2HUr https://t.co/rYZCYTq52t\nLong: None, Lati: None\n@pumpkineaterana YES with her louis vuitton wallet\nLong: None, Lati: None\nI miss those days where we get updates almost every second minus the cyworld boy jongdae\nLong: None, Lati: None\nRT @chanbaekhyuned: Believe or not but these 4 dorks are booked by the top fashion luxury & cosmetic brands\n\n🐶BAEKHYUN: Burberry, Privè All…\nLong: None, Lati: None\nRT @sweetcreamdutch: Putting Louis Vuitton logos on random things like the wall is so tacky\nLong: None, Lati: None\ni went to this thrift store and i was looking to buy this Louis Vuitton bag they had and I’m wondering how can yo…… https://t.co/YlG3d6DlQ9\nLong: None, Lati: None\nJust discovered Woodkid & dive into his works. Oh good God. No wonder Nicolas Ghesquiére chose to work with him for… https://t.co/6EnJ86YhlE\nLong: None, Lati: None\nRT @angelinajeffect: In less than 24hrs, this photo off Angelina Jolie's 2011 campaign becomes one of Louis Vuitton's most liked Instagram…\nLong: None, Lati: None\nRT @kaifrancesss: God pls release me from the shackles of poverty.\nLong: None, Lati: None\nIt's a Tank Top not a Sweater vest. I know this, my uncle's factory in Manchester, made millions of them, we used t… https://t.co/z3NHs4MB6B\nLong: None, Lati: None\nRT @fieryujin: can yall believe that it has been a year since ryujin served us her iconic louis vuitton fashion show look https://t.co/W42Y…\nLong: None, Lati: None\nThese are nice\nLong: None, Lati: None\nThese boots live rent free in my head..\nLong: None, Lati: None\nLouis Vuitton Vampire makeup tutorial is now uploaded over on my youtube xx https://t.co/8GsTapRr42 via @YouTube\nLong: None, Lati: None\nRT @chanbaekhyuned: Believe or not but these 4 dorks are booked by the top fashion luxury & cosmetic brands\n\n🐶BAEKHYUN: Burberry, Privè All…\nLong: None, Lati: None\nRT @aakhtar: It's a Tank Top not a Sweater vest. I know this, my uncle's factory in Manchester, made millions of them, we used to sell on m…\nLong: None, Lati: None\nRT @chanbaekhyuned: Believe or not but these 4 dorks are booked by the top fashion luxury & cosmetic brands\n\n🐶BAEKHYUN: Burberry, Privè All…\nLong: None, Lati: None\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a85684e76b83fb7c9aa0fa78acbd38eb21f9440
| 2,123 |
ipynb
|
Jupyter Notebook
|
assignments/graph/Wall Gate Algorithm.ipynb
|
Anjani100/logicmojo
|
25763e401f5cc7e874d28866c06cf39ad42b3a8d
|
[
"MIT"
] | null | null | null |
assignments/graph/Wall Gate Algorithm.ipynb
|
Anjani100/logicmojo
|
25763e401f5cc7e874d28866c06cf39ad42b3a8d
|
[
"MIT"
] | null | null | null |
assignments/graph/Wall Gate Algorithm.ipynb
|
Anjani100/logicmojo
|
25763e401f5cc7e874d28866c06cf39ad42b3a8d
|
[
"MIT"
] | 2 |
2021-09-15T19:16:18.000Z
|
2022-03-31T11:14:26.000Z
| 24.686047 | 147 | 0.419689 |
[
[
[
"# Time: O(m * n) + O(m * n)\n# Space: O(m * n)\n\nfrom collections import deque\n\ndef wallsAndGates(matrix):\n X = [-1, 0, 0, 1]\n Y = [0, -1, 1, 0]\n m, n = len(matrix), len(matrix[0])\n queue = deque()\n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 0:\n queue.append([i, j, 0])\n while queue:\n i, j, dist = queue.popleft()\n for k in range(4):\n x, y = i + X[k], j + Y[k]\n if 0 <= x < m and 0 <= y < n and matrix[x][y] == 2147483647:\n matrix[x][y] = dist + 1\n queue.append([x, y, dist + 1])\n\nif __name__=='__main__':\n tc = [[[2147483647,-1,0,2147483647],[2147483647,2147483647,2147483647,-1],[2147483647,-1,2147483647,-1],[0,-1,2147483647,2147483647]],\n [[0,-1],[2147483647,2147483647]]]\n for matrix in tc:\n wallsAndGates(matrix)\n for m in matrix:\n print(m)\n print()",
"[3, -1, 0, 1]\n[2, 2, 1, -1]\n[1, -1, 2, -1]\n[0, -1, 3, 4]\n\n[0, -1]\n[1, 2]\n\n"
]
]
] |
[
"code"
] |
[
[
"code"
]
] |
4a85853551f7f9037f6a8609be2ba3b5afc482d1
| 49,499 |
ipynb
|
Jupyter Notebook
|
omSpeech.ipynb
|
ajayjg/omipynb
|
7ac6cb486c47b824f6d0db2e836eed45715cf83b
|
[
"Apache-2.0"
] | null | null | null |
omSpeech.ipynb
|
ajayjg/omipynb
|
7ac6cb486c47b824f6d0db2e836eed45715cf83b
|
[
"Apache-2.0"
] | null | null | null |
omSpeech.ipynb
|
ajayjg/omipynb
|
7ac6cb486c47b824f6d0db2e836eed45715cf83b
|
[
"Apache-2.0"
] | null | null | null | 46.69717 | 221 | 0.513344 |
[
[
[
"<a href=\"https://colab.research.google.com/github/ajayjg/omipynb/blob/master/omSpeech.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"OM **IPYNB**\n\n\n---\n\n",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"#Imports",
"_____no_output_____"
]
],
[
[
"!pip install Keras==2.2.0\n!pip install pandas==0.22.0\n!pip install pandas-ml==0.5.0\n!pip install tensorflow>=1.14.0\n!pip install tensorflow-gpu>=1.14.0\n!pip install scikit-learn==0.21\n!pip install wget==3.2\n",
"_____no_output_____"
]
],
[
[
"#Download Dataset",
"_____no_output_____"
]
],
[
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport wget\nimport tarfile\n\nfrom shutil import rmtree\n\nDATASET_URL = 'http://download.tensorflow.org/data/speech_commands_v0.01.tar.gz'\nARCHIVE = os.path.basename(DATASET_URL)\n\nwget.download(DATASET_URL)\n\nif os.path.exists('data'):\n rmtree('data')\n\nos.makedirs('data/train')\n\nwith tarfile.open(ARCHIVE, 'r:gz') as tar:\n tar.extractall(path='data/train')\n\nos.remove(ARCHIVE)\n",
"_____no_output_____"
]
],
[
[
"# Training",
"_____no_output_____"
]
],
[
[
"%%file train.py\n\nimport numpy as np\n# from sklearn.preprocessing import Imputer\n# from sklearn.metrics import confusion_matrix\nfrom pandas_ml import ConfusionMatrix\nfrom sklearn.metrics import jaccard_similarity_score\n#from keras.callbacks import Callback\n\nimport hashlib\nimport math\nimport os.path\nimport random\nimport re\nimport sys\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow.compat.v1 as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.callbacks import Callback\n\nimport argparse\nimport os\n\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\nfrom tensorflow.keras.callbacks import TensorBoard\n\ntf.compat.v1.disable_eager_execution()\n\ndef log_loss(y_true, y_pred, eps=1e-12):\n y_pred = np.clip(y_pred, eps, 1. - eps)\n ce = -(np.sum(y_true * np.log(y_pred), axis=1))\n mce = ce.mean()\n return mce\n\n\nclass ConfusionMatrixCallback(Callback):\n\n def __init__(self, validation_data, validation_steps, wanted_words, all_words,\n label2int):\n self.validation_data = validation_data\n self.validation_steps = validation_steps\n self.wanted_words = wanted_words\n self.all_words = all_words\n self.label2int = label2int\n self.int2label = {v: k for k, v in label2int.items()}\n with open('confusion_matrix.txt', 'w'):\n pass\n with open('wanted_confusion_matrix.txt', 'w'):\n pass\n\n def accuracies(self, confusion_val):\n accuracies = []\n for i in range(confusion_val.shape[0]):\n num = confusion_val[i, :].sum()\n if num:\n accuracies.append(confusion_val[i, i] / num)\n else:\n accuracies.append(0.0)\n accuracies = np.float32(accuracies)\n return accuracies\n\n def accuracy(self, confusion_val):\n num_correct = 0\n for i in range(confusion_val.shape[0]):\n num_correct += confusion_val[i, i]\n accuracy = float(num_correct) / confusion_val.sum()\n return accuracy\n\n def on_epoch_end(self, epoch, logs=None):\n y_true, y_pred = [], []\n for i in range(self.validation_steps):\n X_batch, y_true_batch = next(self.validation_data)\n y_pred_batch = self.model.predict(X_batch)\n\n y_true.extend(y_true_batch)\n y_pred.extend(y_pred_batch)\n\n y_true = np.float32(y_true)\n y_pred = np.float32(y_pred)\n val_loss = log_loss(y_true, y_pred)\n # map integer labels to strings\n y_true = list(y_true.argmax(axis=-1))\n y_pred = list(y_pred.argmax(axis=-1))\n y_true = [self.int2label[y] for y in y_true]\n y_pred = [self.int2label[y] for y in y_pred]\n confusion = ConfusionMatrix(y_true, y_pred)\n accs = self.accuracies(confusion._df_confusion.values)\n acc = self.accuracy(confusion._df_confusion.values)\n # same for wanted words\n y_true = [y if y in self.wanted_words else '_unknown_' for y in y_true]\n y_pred = [y if y in self.wanted_words else '_unknown_' for y in y_pred]\n wanted_words_confusion = ConfusionMatrix(y_true, y_pred)\n wanted_accs = self.accuracies(wanted_words_confusion._df_confusion.values)\n acc_line = ('\\n[%03d]: val_categorical_accuracy: %.2f, '\n 'val_mean_categorical_accuracy_wanted: %.2f') % (\n epoch, acc, wanted_accs.mean()) # noqa\n with open('confusion_matrix.txt', 'a') as f:\n f.write('%s\\n' % acc_line)\n f.write(confusion.to_dataframe().to_string())\n\n with open('wanted_confusion_matrix.txt', 'a') as f:\n f.write('%s\\n' % acc_line)\n f.write(wanted_words_confusion.to_dataframe().to_string())\n\n logs['val_loss'] = val_loss\n logs['val_categorical_accuracy'] = acc\n logs['val_mean_categorical_accuracy_all'] = accs.mean()\n logs['val_mean_categorical_accuracy_wanted'] = wanted_accs.mean()\n\n# vghbjnm\n\ndef data_gen(audio_processor,\n sess,\n batch_size=128,\n background_frequency=0.3,\n background_volume_range=0.15,\n foreground_frequency=0.3,\n foreground_volume_range=0.15,\n time_shift_frequency=0.3,\n time_shift_range=[-500, 0],\n mode='validation',\n flip_frequency=0.0,\n silence_volume_range=0.3):\n ep_count = 0\n offset = 0\n if mode != 'training':\n background_frequency = 0.0\n background_volume_range = 0.0\n foreground_frequency = 0.0\n foreground_volume_range = 0.0\n time_shift_frequency = 0.0\n time_shift_range = [0, 0]\n flip_frequency = 0.0\n # silence_volume_range: stays the same for validation\n while True:\n X, y = audio_processor.get_data(\n how_many=batch_size,\n offset=0 if mode == 'training' else offset,\n background_frequency=background_frequency,\n background_volume_range=background_volume_range,\n foreground_frequency=foreground_frequency,\n foreground_volume_range=foreground_volume_range,\n time_shift_frequency=time_shift_frequency,\n time_shift_range=time_shift_range,\n mode=mode,\n sess=sess,\n flip_frequency=flip_frequency,\n silence_volume_range=silence_volume_range)\n offset += batch_size\n if offset > audio_processor.set_size(mode) - batch_size:\n offset = 0\n print('\\n[Ep:%03d: %s-mode]' % (ep_count, mode))\n ep_count += 1\n yield X, y\n\n\ndef tf_roll(a, shift, a_len=16000):\n # https://stackoverflow.com/questions/42651714/vector-shift-roll-in-tensorflow\n def roll_left(a, shift, a_len):\n shift %= a_len\n rolled = tf.concat([a[a_len - shift:, :], a[:a_len - shift, :]], axis=0)\n return rolled\n\n def roll_right(a, shift, a_len):\n shift = -shift\n shift %= a_len\n rolled = tf.concat([a[shift:, :], a[:shift, :]], axis=0)\n return rolled\n\n # https://stackoverflow.com/questions/35833011/how-to-add-if-condition-in-a-tensorflow-graph\n return tf.cond(\n tf.greater_equal(shift, 0),\n true_fn=lambda: roll_left(a, shift, a_len),\n false_fn=lambda: roll_right(a, shift, a_len))\n\n# gvhbnm\n\nMAX_NUM_WAVS_PER_CLASS = 2**27 - 1 # ~134M\nSILENCE_LABEL = '_silence_'\nSILENCE_INDEX = 0\nUNKNOWN_WORD_LABEL = '_unknown_'\nUNKNOWN_WORD_INDEX = 1\nBACKGROUND_NOISE_DIR_NAME = '_background_noise_'\nRANDOM_SEED = 59185\n\n\ndef prepare_words_list(wanted_words):\n \"\"\"Prepends common tokens to the custom word list.\"\"\"\n return [SILENCE_LABEL, UNKNOWN_WORD_LABEL] + wanted_words\n\n\ndef which_set(filename, validation_percentage, testing_percentage):\n \"\"\"Determines which data partition the file should belong to.\"\"\"\n dir_name = os.path.basename(os.path.dirname(filename))\n if dir_name == 'unknown_unknown':\n return 'training'\n\n base_name = os.path.basename(filename)\n hash_name = re.sub(r'_nohash_.*$', '', base_name)\n\n hash_name_hashed = hashlib.sha1(tf.compat.as_bytes(hash_name)).hexdigest()\n percentage_hash = ((int(hash_name_hashed, 16) % (MAX_NUM_WAVS_PER_CLASS + 1))\n * (100.0 / MAX_NUM_WAVS_PER_CLASS))\n if percentage_hash < validation_percentage:\n result = 'validation'\n elif percentage_hash < (testing_percentage + validation_percentage):\n result = 'testing'\n else:\n result = 'training'\n return result\n\n\ndef load_wav_file(filename):\n \"\"\"Loads an audio file and returns a float PCM-encoded array of samples.\"\"\"\n with tf.Session(graph=tf.Graph()) as sess:\n wav_filename_placeholder = tf.placeholder(tf.string, [])\n wav_loader = tf.io.read_file(wav_filename_placeholder)\n wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1)\n return sess.run(\n wav_decoder, feed_dict={\n wav_filename_placeholder: filename\n }).audio.flatten()\n\n\ndef save_wav_file(filename, wav_data, sample_rate):\n \"\"\"Saves audio sample data to a .wav audio file.\"\"\"\n with tf.Session(graph=tf.Graph()) as sess:\n wav_filename_placeholder = tf.placeholder(tf.string, [])\n sample_rate_placeholder = tf.placeholder(tf.int32, [])\n wav_data_placeholder = tf.placeholder(tf.float32, [None, 1])\n wav_encoder = tf.audio.encode_wav(wav_data_placeholder,\n sample_rate_placeholder)\n wav_saver = tf.io.write_file(wav_filename_placeholder, wav_encoder)\n sess.run(\n wav_saver,\n feed_dict={\n wav_filename_placeholder: filename,\n sample_rate_placeholder: sample_rate,\n wav_data_placeholder: np.reshape(wav_data, (-1, 1))\n })\n\n\nclass AudioProcessor(object):\n \"\"\"Handles loading, partitioning, and preparing audio training data.\"\"\"\n\n def __init__(self,\n data_dirs,\n silence_percentage,\n unknown_percentage,\n wanted_words,\n validation_percentage,\n testing_percentage,\n model_settings,\n output_representation=False):\n self.data_dirs = data_dirs\n assert output_representation in {'raw', 'spec', 'mfcc', 'mfcc_and_raw'}\n self.output_representation = output_representation\n self.model_settings = model_settings\n for data_dir in self.data_dirs:\n self.maybe_download_and_extract_dataset(data_dir)\n self.prepare_data_index(silence_percentage, unknown_percentage,\n wanted_words, validation_percentage,\n testing_percentage)\n self.prepare_background_data()\n self.prepare_processing_graph(model_settings)\n\n def maybe_download_and_extract_dataset(self, data_dir):\n if not os.path.exists(data_dir):\n print('Please download the dataset!')\n sys.exit(0)\n\n def prepare_data_index(self, silence_percentage, unknown_percentage,\n wanted_words, validation_percentage,\n testing_percentage):\n \"\"\"Prepares a list of the samples organized by set and label.\"\"\"\n random.seed(RANDOM_SEED)\n wanted_words_index = {}\n for index, wanted_word in enumerate(wanted_words):\n wanted_words_index[wanted_word] = index + 2\n self.data_index = {'validation': [], 'testing': [], 'training': []}\n unknown_index = {'validation': [], 'testing': [], 'training': []}\n all_words = {}\n # Look through all the subfolders to find audio samples\n for data_dir in self.data_dirs:\n search_path = os.path.join(data_dir, '*', '*.wav')\n for wav_path in tf.io.gfile.glob(search_path):\n word = re.search('.*/([^/]+)/.*.wav', wav_path).group(1).lower()\n # Treat the '_background_noise_' folder as a special case,\n # since we expect it to contain long audio samples we mix in\n # to improve training.\n if word == BACKGROUND_NOISE_DIR_NAME:\n continue\n all_words[word] = True\n set_index = which_set(wav_path, validation_percentage,\n testing_percentage)\n # If it's a known class, store its detail, otherwise add it to the list\n # we'll use to train the unknown label.\n if word in wanted_words_index:\n self.data_index[set_index].append({'label': word, 'file': wav_path})\n else:\n unknown_index[set_index].append({'label': word, 'file': wav_path})\n if not all_words:\n raise Exception('No .wavs found at ' + search_path)\n for index, wanted_word in enumerate(wanted_words):\n if wanted_word not in all_words:\n raise Exception('Expected to find ' + wanted_word +\n ' in labels but only found ' +\n ', '.join(all_words.keys()))\n # We need an arbitrary file to load as the input for the silence samples.\n # It's multiplied by zero later, so the content doesn't matter.\n silence_wav_path = self.data_index['training'][0]['file']\n for set_index in ['validation', 'testing', 'training']:\n set_size = len(self.data_index[set_index])\n silence_size = int(math.ceil(set_size * silence_percentage / 100))\n for _ in range(silence_size):\n self.data_index[set_index].append({\n 'label': SILENCE_LABEL,\n 'file': silence_wav_path\n })\n # Pick some unknowns to add to each partition of the data set.\n random.shuffle(unknown_index[set_index])\n unknown_size = int(math.ceil(set_size * unknown_percentage / 100))\n self.data_index[set_index].extend(unknown_index[set_index][:unknown_size])\n # Make sure the ordering is random.\n for set_index in ['validation', 'testing', 'training']:\n # not really needed since the indices are chosen by random\n random.shuffle(self.data_index[set_index])\n # Prepare the rest of the result data structure.\n self.words_list = prepare_words_list(wanted_words)\n self.word_to_index = {}\n for word in all_words:\n if word in wanted_words_index:\n self.word_to_index[word] = wanted_words_index[word]\n else:\n self.word_to_index[word] = UNKNOWN_WORD_INDEX\n self.word_to_index[SILENCE_LABEL] = SILENCE_INDEX\n\n def prepare_background_data(self):\n \"\"\"Searches a folder for background noise audio and loads it into memory.\"\"\"\n self.background_data = []\n background_dir = os.path.join(self.data_dirs[0], BACKGROUND_NOISE_DIR_NAME)\n if not os.path.exists(background_dir):\n return self.background_data\n with tf.Session(graph=tf.Graph()) as sess:\n wav_filename_placeholder = tf.placeholder(tf.string, [])\n wav_loader = tf.io.read_file(wav_filename_placeholder)\n wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1)\n search_path = os.path.join(self.data_dirs[0], BACKGROUND_NOISE_DIR_NAME,\n '*.wav')\n for wav_path in tf.io.gfile.glob(search_path):\n wav_data = sess.run(\n wav_decoder, feed_dict={\n wav_filename_placeholder: wav_path\n }).audio.flatten()\n self.background_data.append(wav_data)\n if not self.background_data:\n raise Exception('No background wav files were found in ' + search_path)\n\n def prepare_processing_graph(self, model_settings):\n \"\"\"Builds a TensorFlow graph to apply the input distortions.\"\"\"\n desired_samples = model_settings['desired_samples']\n self.wav_filename_placeholder_ = tf.placeholder(\n tf.string, [], name='filename')\n wav_loader = tf.io.read_file(self.wav_filename_placeholder_)\n wav_decoder = tf.audio.decode_wav(\n wav_loader, desired_channels=1, desired_samples=desired_samples)\n # Allow the audio sample's volume to be adjusted.\n self.foreground_volume_placeholder_ = tf.placeholder(\n tf.float32, [], name='foreground_volme')\n scaled_foreground = tf.multiply(wav_decoder.audio,\n self.foreground_volume_placeholder_)\n # Shift the sample's start position, and pad any gaps with zeros.\n self.time_shift_placeholder_ = tf.placeholder(tf.int32, name='timeshift')\n shifted_foreground = tf_roll(scaled_foreground,\n self.time_shift_placeholder_)\n # Mix in background noise.\n self.background_data_placeholder_ = tf.placeholder(\n tf.float32, [desired_samples, 1], name='background_data')\n self.background_volume_placeholder_ = tf.placeholder(\n tf.float32, [], name='background_volume')\n background_mul = tf.multiply(self.background_data_placeholder_,\n self.background_volume_placeholder_)\n background_add = tf.add(background_mul, shifted_foreground)\n # removed clipping: tf.clip_by_value(background_add, -1.0, 1.0)\n self.background_clamp_ = background_add\n self.background_clamp_ = tf.reshape(self.background_clamp_,\n (1, model_settings['desired_samples']))\n # Run the spectrogram and MFCC ops to get a 2D 'fingerprint' of the audio.\n stfts = tf.signal.stft(\n self.background_clamp_,\n frame_length=model_settings['window_size_samples'],\n frame_step=model_settings['window_stride_samples'],\n fft_length=None)\n self.spectrogram_ = tf.abs(stfts)\n num_spectrogram_bins = self.spectrogram_.shape[-1]\n lower_edge_hertz, upper_edge_hertz = 80.0, 7600.0\n linear_to_mel_weight_matrix = \\\n tf.signal.linear_to_mel_weight_matrix(\n model_settings['dct_coefficient_count'],\n num_spectrogram_bins, model_settings['sample_rate'],\n lower_edge_hertz, upper_edge_hertz)\n mel_spectrograms = tf.tensordot(self.spectrogram_,\n linear_to_mel_weight_matrix, 1)\n mel_spectrograms.set_shape(self.spectrogram_.shape[:-1].concatenate(\n linear_to_mel_weight_matrix.shape[-1:]))\n log_mel_spectrograms = tf.log(mel_spectrograms + 1e-6)\n self.mfcc_ = tf.signal.mfccs_from_log_mel_spectrograms(\n log_mel_spectrograms)[:, :, :\n model_settings['num_log_mel_features']] # :13\n\n def set_size(self, mode):\n \"\"\"Calculates the number of samples in the dataset partition.\"\"\"\n return len(self.data_index[mode])\n\n def get_data(self,\n how_many,\n offset,\n background_frequency,\n background_volume_range,\n foreground_frequency,\n foreground_volume_range,\n time_shift_frequency,\n time_shift_range,\n mode,\n sess,\n flip_frequency=0.0,\n silence_volume_range=0.0):\n \"\"\"Gather samples from the data set, applying transformations as needed.\"\"\"\n # Pick one of the partitions to choose samples from.\n model_settings = self.model_settings\n candidates = self.data_index[mode]\n if how_many == -1:\n sample_count = len(candidates)\n else:\n sample_count = max(0, min(how_many, len(candidates) - offset))\n # Data and labels will be populated and returned.\n if self.output_representation == 'raw':\n data_dim = model_settings['desired_samples']\n elif self.output_representation == 'spec':\n data_dim = model_settings['spectrogram_length'] * model_settings[\n 'spectrogram_frequencies']\n elif self.output_representation == 'mfcc':\n data_dim = model_settings['spectrogram_length'] * \\\n model_settings['num_log_mel_features']\n elif self.output_representation == 'mfcc_and_raw':\n data_dim = model_settings['spectrogram_length'] * \\\n model_settings['num_log_mel_features']\n raw_data = np.zeros((sample_count, model_settings['desired_samples']))\n\n data = np.zeros((sample_count, data_dim))\n labels = np.zeros((sample_count, model_settings['label_count']))\n desired_samples = model_settings['desired_samples']\n use_background = self.background_data and (mode == 'training')\n pick_deterministically = (mode != 'training')\n # Use the processing graph we created earlier to repeatedly to generate the\n # final output sample data we'll use in training.\n for i in xrange(offset, offset + sample_count):\n # Pick which audio sample to use.\n if how_many == -1 or pick_deterministically:\n sample_index = i\n sample = candidates[sample_index]\n else:\n sample_index = np.random.randint(len(candidates))\n sample = candidates[sample_index]\n\n # If we're time shifting, set up the offset for this sample.\n if np.random.uniform(0.0, 1.0) < time_shift_frequency:\n time_shift = np.random.randint(time_shift_range[0],\n time_shift_range[1] + 1)\n else:\n time_shift = 0\n input_dict = {\n self.wav_filename_placeholder_: sample['file'],\n self.time_shift_placeholder_: time_shift,\n }\n # Choose a section of background noise to mix in.\n if use_background:\n background_index = np.random.randint(len(self.background_data))\n background_samples = self.background_data[background_index]\n background_offset = np.random.randint(\n 0,\n len(background_samples) - model_settings['desired_samples'])\n background_clipped = background_samples[background_offset:(\n background_offset + desired_samples)]\n background_reshaped = background_clipped.reshape([desired_samples, 1])\n if np.random.uniform(0, 1) < background_frequency:\n background_volume = np.random.uniform(0, background_volume_range)\n else:\n background_volume = 0.0\n # silence class with all zeros is boring!\n if sample['label'] == SILENCE_LABEL and \\\n np.random.uniform(0, 1) < 0.9:\n background_volume = np.random.uniform(0, silence_volume_range)\n else:\n background_reshaped = np.zeros([desired_samples, 1])\n background_volume = 0.0\n input_dict[self.background_data_placeholder_] = background_reshaped\n input_dict[self.background_volume_placeholder_] = background_volume\n # If we want silence, mute out the main sample but leave the background.\n if sample['label'] == SILENCE_LABEL:\n input_dict[self.foreground_volume_placeholder_] = 0.0\n else:\n # Turn it up or down\n foreground_volume = 1.0\n if np.random.uniform(0, 1) < foreground_frequency:\n foreground_volume = 1.0 + np.random.uniform(-foreground_volume_range,\n foreground_volume_range)\n # flip sign\n if np.random.uniform(0, 1) < flip_frequency:\n foreground_volume *= -1.0\n input_dict[self.foreground_volume_placeholder_] = foreground_volume\n\n # Run the graph to produce the output audio.\n if self.output_representation == 'raw':\n data[i - offset, :] = sess.run(\n self.background_clamp_, feed_dict=input_dict).flatten()\n elif self.output_representation == 'spec':\n data[i - offset, :] = sess.run(\n self.spectrogram_, feed_dict=input_dict).flatten()\n elif self.output_representation == 'mfcc':\n data[i - offset, :] = sess.run(\n self.mfcc_, feed_dict=input_dict).flatten()\n elif self.output_representation == 'mfcc_and_raw':\n raw_val, mfcc_val = sess.run([self.background_clamp_, self.mfcc_],\n feed_dict=input_dict)\n data[i - offset, :] = mfcc_val.flatten()\n raw_data[i - offset, :] = raw_val.flatten()\n\n label_index = self.word_to_index[sample['label']]\n labels[i - offset, label_index] = 1\n\n if self.output_representation != 'mfcc_and_raw':\n return data, labels\n else:\n return [data, raw_data], labels\n\n def get_unprocessed_data(self, how_many, model_settings, mode):\n \"\"\"Gets sample data without transformations.\"\"\"\n candidates = self.data_index[mode]\n if how_many == -1:\n sample_count = len(candidates)\n else:\n sample_count = how_many\n desired_samples = model_settings['desired_samples']\n words_list = self.words_list\n data = np.zeros((sample_count, desired_samples))\n labels = []\n with tf.Session(graph=tf.Graph()) as sess:\n wav_filename_placeholder = tf.placeholder(tf.string, [], name='filename')\n wav_loader = tf.io.read_file(wav_filename_placeholder)\n wav_decoder = tf.audio.decode_wav(\n wav_loader, desired_channels=1, desired_samples=desired_samples)\n foreground_volume_placeholder = tf.placeholder(\n tf.float32, [], name='foreground_volume')\n scaled_foreground = tf.multiply(wav_decoder.audio,\n foreground_volume_placeholder)\n for i in range(sample_count):\n if how_many == -1:\n sample_index = i\n else:\n sample_index = np.random.randint(len(candidates))\n sample = candidates[sample_index]\n input_dict = {wav_filename_placeholder: sample['file']}\n if sample['label'] == SILENCE_LABEL:\n input_dict[foreground_volume_placeholder] = 0\n else:\n input_dict[foreground_volume_placeholder] = 1\n data[i, :] = sess.run(scaled_foreground, feed_dict=input_dict).flatten()\n label_index = self.word_to_index[sample['label']]\n labels.append(words_list[label_index])\n return data, labels\n\n def summary(self):\n \"\"\"Prints a summary of classes and label distributions.\"\"\"\n set_counts = {}\n print('There are %d classes.' % (len(self.word_to_index)))\n print(\"1%% <-> %d samples in 'training'\" % int(\n self.set_size('training') / 100))\n for set_index in ['training', 'validation', 'testing']:\n counts = {k: 0 for k in sorted(self.word_to_index.keys())}\n num_total = self.set_size(set_index)\n for data_point in self.data_index[set_index]:\n counts[data_point['label']] += (1.0 / num_total) * 100.0\n set_counts[set_index] = counts\n\n print('%-13s%-6s%-6s%-6s' % ('', 'Train', 'Val', 'Test'))\n for label_name in sorted(\n self.word_to_index.keys(), key=self.word_to_index.get):\n line = '%02d %-12s: ' % (self.word_to_index[label_name], label_name)\n for set_index in ['training', 'validation', 'testing']:\n line += '%.1f%% ' % (set_counts[set_index][label_name])\n print(line)\n\n\n#cevw\n\ndef preprocess(x):\n x = (x + 0.8) / 7.0\n x = K.clip(x, -5, 5)\n return x\n\n\ndef preprocess_raw(x):\n return x\n\n\nPreprocess = Lambda(preprocess)\n\nPreprocessRaw = Lambda(preprocess_raw)\n\n\ndef relu6(x):\n return K.relu(x, max_value=6)\n\n\ndef conv_1d_time_stacked_model(input_size=16000, num_classes=11):\n \"\"\" Creates a 1D model for temporal data.\n\n Note: Use only\n with compute_mfcc = False (e.g. raw waveform data).\n Args:\n input_size: How big the input vector is.\n num_classes: How many classes are to be recognized.\n\n Returns:\n Compiled keras model\n \"\"\"\n input_layer = Input(shape=[input_size])\n x = input_layer\n x = Reshape([800, 20])(x)\n x = PreprocessRaw(x)\n\n def _reduce_conv(x, num_filters, k, strides=2, padding='valid'):\n x = Conv1D(\n num_filters,\n k,\n padding=padding,\n use_bias=False,\n kernel_regularizer=l2(0.00001))(\n x)\n x = BatchNormalization()(x)\n x = Activation(relu6)(x)\n x = MaxPool1D(pool_size=3, strides=strides, padding=padding)(x)\n return x\n\n def _context_conv(x, num_filters, k, dilation_rate=1, padding='valid'):\n x = Conv1D(\n num_filters,\n k,\n padding=padding,\n dilation_rate=dilation_rate,\n kernel_regularizer=l2(0.00001),\n use_bias=False)(\n x)\n x = BatchNormalization()(x)\n x = Activation(relu6)(x)\n return x\n\n x = _context_conv(x, 32, 1)\n x = _reduce_conv(x, 48, 3)\n x = _context_conv(x, 48, 3)\n x = _reduce_conv(x, 96, 3)\n x = _context_conv(x, 96, 3)\n x = _reduce_conv(x, 128, 3)\n x = _context_conv(x, 128, 3)\n x = _reduce_conv(x, 160, 3)\n x = _context_conv(x, 160, 3)\n x = _reduce_conv(x, 192, 3)\n x = _context_conv(x, 192, 3)\n x = _reduce_conv(x, 256, 3)\n x = _context_conv(x, 256, 3)\n\n x = Dropout(0.3)(x)\n x = Conv1D(num_classes, 5, activation='softmax')(x)\n x = Reshape([-1])(x)\n\n model = Model(input_layer, x, name='conv_1d_time_stacked')\n model.compile(\n optimizer=tf.keras.optimizers.Adam(lr=3e-4),\n loss=tf.keras.losses.categorical_crossentropy,\n metrics=[tf.keras.metrics.categorical_accuracy])\n return model\n\n\ndef speech_model(model_type, input_size, num_classes=11, *args, **kwargs):\n if model_type == 'conv_1d_time_stacked':\n return conv_1d_time_stacked_model(input_size, num_classes)\n else:\n raise ValueError('Invalid model: %s' % model_type)\n\n\ndef prepare_model_settings(label_count,\n sample_rate,\n clip_duration_ms,\n window_size_ms,\n window_stride_ms,\n dct_coefficient_count,\n num_log_mel_features,\n output_representation='raw'):\n \"\"\"Calculates common settings needed for all models.\"\"\"\n desired_samples = int(sample_rate * clip_duration_ms / 1000)\n window_size_samples = int(sample_rate * window_size_ms / 1000)\n window_stride_samples = int(sample_rate * window_stride_ms / 1000)\n length_minus_window = (desired_samples - window_size_samples)\n spectrogram_frequencies = 257\n if length_minus_window < 0:\n spectrogram_length = 0\n else:\n spectrogram_length = 1 + int(length_minus_window / window_stride_samples)\n\n if output_representation == 'mfcc':\n fingerprint_size = num_log_mel_features * spectrogram_length\n elif output_representation == 'raw':\n fingerprint_size = desired_samples\n elif output_representation == 'spec':\n fingerprint_size = spectrogram_frequencies * spectrogram_length\n elif output_representation == 'mfcc_and_raw':\n fingerprint_size = num_log_mel_features * spectrogram_length\n return {\n 'desired_samples': desired_samples,\n 'window_size_samples': window_size_samples,\n 'window_stride_samples': window_stride_samples,\n 'spectrogram_length': spectrogram_length,\n 'spectrogram_frequencies': spectrogram_frequencies,\n 'dct_coefficient_count': dct_coefficient_count,\n 'fingerprint_size': fingerprint_size,\n 'label_count': label_count,\n 'sample_rate': sample_rate,\n 'num_log_mel_features': num_log_mel_features\n }\n\nfrom collections import OrderedDict\n\ndef get_classes(wanted_only=False):\n if wanted_only:\n classes = 'stop down off right up go on yes left no'\n classes = classes.split(' ')\n assert len(classes) == 10\n else:\n classes = ('sheila nine stop bed four six down bird marvin cat off right '\n 'seven eight up three happy go zero on wow dog yes five one tree'\n ' house two left no') # noqa\n classes = classes.split(' ')\n assert len(classes) == 30\n return classes\n\n\ndef get_int2label(wanted_only=False, extend_reversed=False):\n classes = get_classes(\n wanted_only=wanted_only, extend_reversed=extend_reversed)\n classes = prepare_words_list(classes)\n int2label = {i: l for i, l in enumerate(classes)}\n int2label = OrderedDict(sorted(int2label.items(), key=lambda x: x[0]))\n return int2label\n\n\ndef get_label2int(wanted_only=False, extend_reversed=False):\n classes = get_classes(\n wanted_only=wanted_only, extend_reversed=extend_reversed)\n classes = prepare_words_list(classes)\n label2int = {l: i for i, l in enumerate(classes)}\n label2int = OrderedDict(sorted(label2int.items(), key=lambda x: x[1]))\n return label2int\n\n\n#train\nparser = argparse.ArgumentParser(description='set input arguments')\n\nparser.add_argument(\n '-sample_rate',\n action='store',\n dest='sample_rate',\n type=int,\n default=16000,\n help='Sample rate of audio')\nparser.add_argument(\n '-batch_size',\n action='store',\n dest='batch_size',\n type=int,\n default=32,\n help='Size of the training batch')\nparser.add_argument(\n '-output_representation',\n action='store',\n dest='output_representation',\n type=str,\n default='raw',\n help='raw, spec, mfcc or mfcc_and_raw')\nparser.add_argument(\n '-data_dirs',\n '--list',\n dest='data_dirs',\n nargs='+',\n required=True,\n help='<Required> The list of data directories. e.g., data/train')\n\nargs = parser.parse_args()\nparser.print_help()\nprint('input args: ', args)\n\nif __name__ == '__main__':\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n tf.keras.backend.set_session(sess)\n data_dirs = args.data_dirs\n output_representation = args.output_representation\n sample_rate = args.sample_rate\n batch_size = args.batch_size\n classes = get_classes(wanted_only=True)\n model_settings = prepare_model_settings(\n label_count=len(prepare_words_list(classes)),\n sample_rate=sample_rate,\n clip_duration_ms=1000,\n window_size_ms=30.0,\n window_stride_ms=10.0,\n dct_coefficient_count=80,\n num_log_mel_features=60,\n output_representation=output_representation)\n\n print(model_settings)\n\n ap = AudioProcessor(\n data_dirs=data_dirs,\n wanted_words=classes,\n silence_percentage=13.0,\n unknown_percentage=60.0,\n validation_percentage=10.0,\n testing_percentage=0.0,\n model_settings=model_settings,\n output_representation=output_representation)\n train_gen = data_gen(ap, sess, batch_size=batch_size, mode='training')\n val_gen = data_gen(ap, sess, batch_size=batch_size, mode='validation')\n\n model = speech_model(\n 'conv_1d_time_stacked',\n model_settings['fingerprint_size']\n if output_representation != 'raw' else model_settings['desired_samples'],\n # noqa\n num_classes=model_settings['label_count'],\n **model_settings)\n\n # embed()\n checkpoints_path = os.path.join('checkpoints', 'conv_1d_time_stacked_model')\n if not os.path.exists(checkpoints_path):\n os.makedirs(checkpoints_path)\n\n callbacks = [\n ConfusionMatrixCallback(\n val_gen,\n ap.set_size('validation') // batch_size,\n wanted_words=prepare_words_list(get_classes(wanted_only=True)),\n all_words=prepare_words_list(classes),\n label2int=ap.word_to_index),\n ReduceLROnPlateau(\n monitor='val_categorical_accuracy',\n mode='max',\n factor=0.5,\n patience=4,\n verbose=1,\n min_lr=1e-5),\n TensorBoard(log_dir='logs'),\n ModelCheckpoint(\n os.path.join(checkpoints_path,\n 'ep-{epoch:03d}-vl-{val_loss:.4f}.hdf5'),\n save_best_only=True,\n monitor='val_categorical_accuracy',\n mode='max')\n ]\n model.fit_generator(\n train_gen,\n steps_per_epoch=ap.set_size('training') // batch_size,\n epochs=100,\n verbose=1,\n callbacks=callbacks)\n\n eval_res = model.evaluate_generator(val_gen,\n ap.set_size('validation') // batch_size)\n print(eval_res)\n",
"_____no_output_____"
]
],
[
[
"# It's show time",
"_____no_output_____"
]
],
[
[
"%run train.py -sample_rate 16000 -batch_size 64 -output_representation raw -data_dirs data/train",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a859e2ef2e846bd6a0dcdf74518a271bef4bfd0
| 20,536 |
ipynb
|
Jupyter Notebook
|
AIOpSchool/KIKS/MachineLearningClassificatie/0300_StomataZonSchaduwClassificatie.ipynb
|
dwengovzw/PythonNotebooks
|
633bea4b07efbd920349d6f1dc346522ce118b70
|
[
"CC0-1.0"
] | null | null | null |
AIOpSchool/KIKS/MachineLearningClassificatie/0300_StomataZonSchaduwClassificatie.ipynb
|
dwengovzw/PythonNotebooks
|
633bea4b07efbd920349d6f1dc346522ce118b70
|
[
"CC0-1.0"
] | 3 |
2021-09-30T11:38:24.000Z
|
2021-10-04T09:25:39.000Z
|
AIOpSchool/KIKS/MachineLearningClassificatie/0300_StomataZonSchaduwClassificatie.ipynb
|
dwengovzw/PythonNotebooks
|
633bea4b07efbd920349d6f1dc346522ce118b70
|
[
"CC0-1.0"
] | null | null | null | 33.28363 | 572 | 0.550594 |
[
[
[
"<img src=\"images/kiksmeisedwengougent.png\" alt=\"Banner\" width=\"1100\"/>",
"_____no_output_____"
],
[
"<div>\n <font color=#690027 markdown=\"1\"> \n <h1>CLASSIFICATIE STOMATA OP BEZONDE EN BESCHADUWDE BLADEREN</h1>\n </font>\n</div>",
"_____no_output_____"
],
[
"<div class=\"alert alert-box alert-success\">\nIn deze notebook zal je bezonde en beschaduwde bladeren van elkaar scheiden. De twee klassen zijn bij benadering lineair scheidbaar. \n</div>",
"_____no_output_____"
],
[
"Krappa of crabwood is een snel groeiende boomsoort die veelvuldig voorkomt in het Amazonegebied. Volwassen exemplaren kunnen een diameter hebben van meer dan een meter en kunnen meer dan 40 meter hoog zijn. Het hout van hoge kwaliteit wordt gebruikt voor het maken van meubelen, vloeren, masten... Uit de schors wordt een koorstwerend middel gehaald. Uit de zaden produceert men een olie voor medicinale toepassingen, waaronder de behandeling van huidziekten en tetanos, en als afweermiddel voor insecten. ",
"_____no_output_____"
],
[
"<table><tr>\n<td> <img src=\"images/andirobaamazonica.jpg\" alt=\"Drawing\" width=\"200\"/></td>\n<td> <img src=\"images/crabwoodtree.jpg\" alt=\"Drawing\" width=\"236\"/> </td>\n</tr></table>",
"_____no_output_____"
],
[
"<center>\nFoto's: Mauroguanandi [Public domain] [2] en P. S. Sena [CC BY-SA 4.0] [3].\n</center>",
"_____no_output_____"
],
[
"Omdat sommige klimaatmodellen een stijging van de temperatuur en een vermindering in regenval voorspellen in de komende decennia, is het belangrijk om te weten hoe deze bomen zich aanpassen aan veranderende omstandigheden. <br>\nWetenschappers Camargo en Marenco deden onderzoek in het Amazonewoud [1].<br>\nNaast de invloed van seizoensgebonden regenval, bekeken ze ook stomatale kenmerken van bladeren onder bezonde en onder beschaduwde condities.<br> Hiervoor werden een aantal planten, opgekweekt in de schaduw, verplaatst naar vol zonlicht gedurende 60 dagen. Een andere groep planten werd in de schaduw gehouden. <br>De kenmerken van de stomata werden opgemeten op afdrukken van de bladeren gemaakt met transparante nagellak. ",
"_____no_output_____"
],
[
"### Nodige modules importeren",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn.linear_model import LogisticRegression\n\nfrom matplotlib import animation\nfrom IPython.display import HTML",
"_____no_output_____"
]
],
[
[
"<div>\n <font color=#690027 markdown=\"1\"> \n <h2>1. Inlezen van de data</h2> \n </font>\n</div>",
"_____no_output_____"
],
[
"Lees met de module `pandas` de dataset in.",
"_____no_output_____"
]
],
[
[
"stomata = pd.read_csv(\".data/schaduwzon.dat\", header=\"infer\") # in te lezen tabel heeft een hoofding",
"_____no_output_____"
]
],
[
[
"<div>\n <font color=#690027 markdown=\"1\"> \n <h2>2. Tonen van de ingelezen data</h2> \n </font>\n</div>",
"_____no_output_____"
],
[
"<div>\n <font color=#690027 markdown=\"1\"> \n <h3>2.1 Tabel met de data</h3> \n </font>\n</div>",
"_____no_output_____"
],
[
"Kijk de gegevens in. ",
"_____no_output_____"
]
],
[
[
"stomata",
"_____no_output_____"
]
],
[
[
"Welke gegevens zijn kenmerken? <br> Welk gegeven is het label? <br> \nDeze gegevens kunnen worden gevisualiseerd met een puntenwolk. Welke matrices heb je daarvoor nodig? ",
"_____no_output_____"
],
[
"Antwoord:\nDe plantensoort is overal dezelfde: Carapa. <br>\nDe kenmerken zijn de stomatale dichtheid en de stomatale grootte. <br>\nHet aantal monsters is 50.<br>\nHet label is het milieu waarin het monster werd geplukt: zon of schaduw.<br>\nOm de puntenwolk weer te geven, heb je twee matrices nodig met dimensie 50x1. ",
"_____no_output_____"
],
[
"De onderzoekers zetten de stomatale dichtheid uit tegenover de stomatale lengte.<br> Ga op dezelfde manier te werk.",
"_____no_output_____"
],
[
"<div>\n <font color=#690027 markdown=\"1\"> \n <h3>2.2 De data weergeven in puntenwolk</h3> \n </font>\n</div>",
"_____no_output_____"
]
],
[
[
"x1 = stomata[\"stomatale lengte\"] # kenmerk: lengte\nx2 = stomata[\"stomatale dichtheid\"] # kenmerk: dichtheid",
"_____no_output_____"
],
[
"x1 = np.array(x1) # kenmerk: lengte\nx2 = np.array(x2) # kenmerk: dichtheid",
"_____no_output_____"
],
[
"# dichtheid t.o.v. lengte\nplt.figure()\n\nplt.scatter(x1[:25], x2[:25], color=\"lightgreen\", marker=\"o\", label=\"zon\") # zon zijn eerste 25\nplt.scatter(x1[25:], x2[25:], color=\"darkgreen\", marker=\"o\", label=\"schaduw\") # schaduw zijn de volgende 25\n \nplt.title(\"Carapa\")\nplt.xlabel(\"stomatale lengte (micron)\")\nplt.ylabel(\"stomatale densiteit (per mm²)\")\nplt.legend(loc=\"lower left\")\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"<div>\n <font color=#690027 markdown=\"1\"> \n <h2>3. Standaardiseren</h2> \n </font>\n</div>",
"_____no_output_____"
],
[
"<div>\n <font color=#690027 markdown=\"1\"> \n <h3>3.1 Lineair scheidbaar?</h3> \n </font>\n</div>",
"_____no_output_____"
],
[
"Er zijn twee groepen te onderscheiden. Ze zijn op enkele punten na lineair scheidbaar.",
"_____no_output_____"
],
[
"De grootte-orde van deze gegevens is sterk verschillend. De gegevens moeten gestandaardiseerd worden. ",
"_____no_output_____"
],
[
"<div>\n <font color=#690027 markdown=\"1\"> \n <h3>3.2 Standaardiseren</h3> \n </font>\n</div>",
"_____no_output_____"
],
[
"<div class=\"alert alert-block alert-warning\">\nMeer uitleg over het belang van standaardiseren vind je in de notebook 'Standaardiseren'.\n</div>",
"_____no_output_____"
]
],
[
[
"x1_gem = np.mean(x1)\nx1_std = np.std(x1)\nx2_gem = np.mean(x2)\nx2_std = np.std(x2)\nx1 = (x1 - x1_gem) / x1_std\nx2 = (x2 - x2_gem) / x2_std",
"_____no_output_____"
],
[
"# dichtheid t.o.v. lengte\nplt.figure()\n\nplt.scatter(x1[:25], x2[:25], color=\"lightgreen\", marker=\"o\", label=\"zon\") # zon zijn eerste 25\nplt.scatter(x1[25:], x2[25:], color=\"darkgreen\", marker=\"o\", label=\"schaduw\") # schaduw zijn de volgende 25\n \nplt.title(\"Carapa\")\nplt.xlabel(\"gestandaardiseerde stomatale lengte (micron)\")\nplt.ylabel(\"gestandaardiseerde stomatale densiteit (per mm²)\")\nplt.legend(loc=\"lower left\")\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"<div>\n <font color=#690027 markdown=\"1\"> \n <h2>4. Classificatie met Perceptron</h2> \n </font>\n</div>",
"_____no_output_____"
],
[
"<div>\n <font color=#690027 markdown=\"1\"> \n <h3>4.1 Geannoteerde data</h3> \n </font>\n</div>",
"_____no_output_____"
],
[
"Het ML-systeem zal machinaal leren uit de 50 gelabelde voorbeelden.<br> \nLees de labels in.",
"_____no_output_____"
]
],
[
[
"y = stomata[\"milieu\"] # labels: tweede kolom van de oorspronkelijke tabel\ny = np.array(y)\nprint(y)",
"_____no_output_____"
],
[
"y = np.where(y == \"zon\", 1, 0) # labels numeriek maken, zon:1, schaduw:0\nprint(y)",
"_____no_output_____"
],
[
"X = np.stack((x1, x2), axis = 1) # omzetten naar gewenste formaat",
"_____no_output_____"
]
],
[
[
"<div>\n <font color=#690027 markdown=\"1\"> \n <h3>4.2 Perceptron</h3> \n </font>\n</div>",
"_____no_output_____"
],
[
"<div class=\"alert alert-box alert-info\">\nAls twee klassen lineair scheidbaar zijn, kan men een rechte vinden die beide klassen scheidt. Men kan de vergelijking van de scheidingslijn opschrijven in de vorm $ax+by+c=0$. Voor elk punt $(x_{1}, y_{1})$ in de ene klasse is dan $ax_{1}+by_{1}+c \\geq 0$ en voor elk punt $(x_{2}, y_{2})$ in de andere klasse is dan $ax_{2} +by_{2}+c < 0$. <br> \nZolang dit niet voldaan is, moeten de coëfficiënten worden aangepast.<br>\nDe trainingset met bijhorende labels wordt enkele keren doorlopen. Voor elk punt worden de coëfficiënten aangepast indien nodig.\n</div>",
"_____no_output_____"
],
[
"Er wordt een willekeurige rechte gekozen die de twee soorten bladeren zou moeten scheiden. Dit gebeurt door de coëfficiënten in de vergelijking van de rechte willekeurig te kiezen. Beide kanten van de scheidingslijn bepalen een andere klasse. <br>Met systeem wordt getraind met de trainingset en de gegeven labels. Voor elk punt van de trainingset wordt nagegaan of het punt aan de juiste kant van de scheidingslijn ligt. Bij een punt die niet aan de juiste kant van de scheidingslijn ligt, worden de coëfficiënten in de vergelijking van de rechte aangepast. <br>\nDe volledige trainingset wordt een aantal keer doorlopen. Het systeem leert gedurende deze 'pogingen' of *epochs*.",
"_____no_output_____"
]
],
[
[
"def grafiek(coeff_x1, coeff_x2, cte):\n \"\"\"Plot scheidingsrechte ('decision boundary') en geeft vergelijking ervan.\"\"\"\n # stomatale densiteit t.o.v. lengte van stomata\n plt.figure()\n \n plt.scatter(x1[:25], x2[:25], color=\"lightgreen\", marker=\"o\", label=\"zon\") # zon zijn eerste 25 (label 1)\n plt.scatter(x1[25:], x2[25:], color=\"darkgreen\", marker=\"o\", label=\"schaduw\") # schaduw zijn de volgende 25 (label 0)\n x = np.linspace(-1.5, 1.5, 10)\n y_r = -coeff_x1/coeff_x2 * x - cte/coeff_x2\n print(\"De grens is een rechte met vgl.\", coeff_x1, \"* x1 +\", coeff_x2, \"* x2 +\", cte, \"= 0\")\n plt.plot(x, y_r, color=\"black\")\n \n plt.title(\"Classificatie Carapa\")\n plt.xlabel(\"gestandaardiseerde stomatale lengte (micron)\")\n plt.ylabel(\"gestandaardiseerde stomatale densiteit (per mm²)\")\n plt.legend(loc=\"lower left\")\n \n plt.show()\n\nclass Perceptron(object):\n \"\"\"Perceptron classifier.\"\"\" \n \n def __init__(self, eta=0.01, n_iter=50, random_state=1):\n \"\"\"self heeft drie parameters: leersnelheid, aantal pogingen, willekeurigheid.\"\"\"\n self.eta = eta\n self.n_iter = n_iter\n self.random_state = random_state\n \n def fit(self, X, y):\n \"\"\"Fit training data.\"\"\"\n rgen = np.random.RandomState(self.random_state)\n # kolommatrix van de gewichten ('weights')\n # willekeurig gegenereerd uit normale verdeling met gemiddelde 0 en standaardafwijking 0.01\n # aantal gewichten is aantal kenmerken in X plus 1 (+1 voor bias)\n self.w_ = rgen.normal(loc=0.0, scale=0.01, size=X.shape[1]+1) # gewichtenmatrix die 3 gewichten bevat \n print(\"Initiële willekeurige gewichten:\", self.w_)\n self.errors_ = [] # foutenlijst\n \n # plot grafiek met initiële scheidingsrechte\n print(\"Initiële willekeurige rechte:\")\n grafiek(self.w_[1], self.w_[2], self.w_[0])\n gewichtenlijst = np.array([self.w_])\n \n # gewichten punt per punt aanpassen, gebaseerd op feedback van de verschillende pogingen \n for _ in range(self.n_iter):\n print(\"epoch =\", _)\n errors = 0\n teller = 0\n for x, label in zip(X, y): # x is datapunt, y overeenkomstig label\n print(\"teller =\", teller) # tel punten, het zijn er acht\n print(\"punt:\", x, \"\\tlabel:\", label)\n gegiste_klasse = self.predict(x)\n print(\"gegiste klasse =\", gegiste_klasse)\n # aanpassing nagaan voor dit punt\n update = self.eta * (label - gegiste_klasse) # als update = 0, juiste klasse, geen aanpassing nodig\n print(\"update =\", update)\n # grafiek en gewichten eventueel aanpassen na dit punt\n if update !=0:\n self.w_[1:] += update *x\n self.w_[0] += update\n errors += update\n print(\"gewichten =\", self.w_) # bepalen voorlopige 'decision boundary'\n gewichtenlijst = np.append(gewichtenlijst, [self.w_], axis =0)\n teller += 1\n self.errors_.append(errors) # na alle punten, totale fout toevoegen aan foutenlijst\n print(\"foutenlijst =\", self.errors_) \n return self, gewichtenlijst # geeft lijst gewichtenmatrices terug\n \n def net_input(self, x): # punt invullen in de voorlopige scheidingsrechte\n \"\"\"Berekenen van z = lineaire combinatie van de inputs inclusief bias en de weights voor elke gegeven punt.\"\"\"\n return np.dot(x, self.w_[1:]) + self.w_[0]\n \n def predict(self, x):\n \"\"\"Gist klasse.\"\"\"\n print(\"punt ingevuld in vergelijking rechte:\", self.net_input(x))\n klasse = np.where(self.net_input(x) >=0, 1, 0)\n return klasse\n ",
"_____no_output_____"
],
[
"# perceptron, leersnelheid 0.0001 en 20 pogingen\nppn = Perceptron(eta=0.0001, n_iter=20)\ngewichtenlijst = ppn.fit(X,y)[1]\nprint(\"Gewichtenlijst =\", gewichtenlijst)",
"_____no_output_____"
],
[
"# animatie\n\nxcoord = np.linspace(-1.5, 1.5, 10)\n\nycoord = []\nfor w in gewichtenlijst:\n y_r = -w[1]/w[2] * xcoord - w[0]/w[2]\n ycoord.append(y_r)\nycoord = np.array(ycoord) # type casting\n\nfig, ax = plt.subplots()\nline, = ax.plot(xcoord, ycoord[0])\n\nplt.scatter(x1[:25], x2[:25], color=\"lightgreen\", marker=\"o\", label=\"zon\") # zon zijn eerste 25 (label 1)\nplt.scatter(x1[25:], x2[25:], color=\"darkgreen\", marker=\"o\", label=\"schaduw\") # schaduw zijn de volgende 25 (label 0)\n\nax.axis([-2,2,-2,2])\n\ndef animate(i):\n line.set_ydata(ycoord[i]) # update de vergelijking van de rechte \n return line,\n\nplt.close() # om voorlopig plot-venster te sluiten, enkel animatiescherm nodig\n\nanim = animation.FuncAnimation(fig, animate, interval=1000, repeat=False, frames=len(ycoord))\n\nHTML(anim.to_jshtml())",
"_____no_output_____"
]
],
[
[
"Mooi resultaat! Maar nog niet optimaal. \n### Opdracht 4.2\nWellicht bieden meer iteraties nog een beter resultaat. Probeer eens uit.",
"_____no_output_____"
],
[
"<div class=\"alert alert-block alert-info\">\nOmdat de klassen niet lineair scheidbaar zijn, zal het Perceptron er natuurlijk niet in slagen de fout op nul te krijgen. Door de leersnelheid en het aantal epochs zo goed mogelijke te kiezen, kan je een zo goed mogelijke scheiding proberen bekomen.<br>\nBij niet-lineair scheidbare klassen zal men daarom in machinaal leren geen Perceptron gebruiken, maar de klassen optimaal proberen scheiden op een andere manier: met gradient descent voor de aanpassingen en binary cross entropy om de fout te bepalen.\n</div>",
"_____no_output_____"
],
[
"<img src=\"images/cclic.png\" alt=\"Banner\" align=\"left\" width=\"100\"/><br><br>\nNotebook KIKS, zie <a href=\"http://www.aiopschool.be\">AI op School</a>, van F. wyffels & N. Gesquière is in licentie gegeven volgens een <a href=\"http://creativecommons.org/licenses/by-nc-sa/4.0/\">Creative Commons Naamsvermelding-NietCommercieel-GelijkDelen 4.0 Internationaal-licentie</a>.",
"_____no_output_____"
],
[
"<div>\n <h2>Met steun van</h2> \n</div>",
"_____no_output_____"
],
[
"<img src=\"images/kikssteun.png\" alt=\"Banner\" width=\"1100\"/>",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a85a21ae70e455ab64542ed722a6207b4bb2cd7
| 80,127 |
ipynb
|
Jupyter Notebook
|
CNNs_Part1.ipynb
|
mok232/CIFAR-10-Image-Classification
|
cc5366cb5fda5cee2673ae2c3e08ec4733ae471c
|
[
"MIT"
] | 4 |
2019-11-24T18:12:25.000Z
|
2022-01-10T18:47:49.000Z
|
CNNs_Part1.ipynb
|
mok232/CIFAR-10-Image-Classification
|
cc5366cb5fda5cee2673ae2c3e08ec4733ae471c
|
[
"MIT"
] | null | null | null |
CNNs_Part1.ipynb
|
mok232/CIFAR-10-Image-Classification
|
cc5366cb5fda5cee2673ae2c3e08ec4733ae471c
|
[
"MIT"
] | 3 |
2020-02-27T13:07:27.000Z
|
2021-10-01T14:33:34.000Z
| 118.531065 | 22,632 | 0.857401 |
[
[
[
"# Loading and Checking Data",
"_____no_output_____"
],
[
"## Importing Libraries",
"_____no_output_____"
]
],
[
[
"import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"use_cuda = torch.cuda.is_available()",
"_____no_output_____"
]
],
[
[
"## Loading Data",
"_____no_output_____"
]
],
[
[
"batch_size = 4\n\n# These are the mean and standard deviation values for all pictures in the training set.\nmean = (0.4914 , 0.48216, 0.44653)\nstd = (0.24703, 0.24349, 0.26159)\n\n# Class to denormalize images to display later.\nclass DeNormalize(object):\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, tensor):\n for t, m, s in zip(tensor, self.mean, self.std):\n t.mul_(s).add_(m)\n return tensor\n\n# Creating instance of Functor\ndenorm = DeNormalize(mean, std)\n\n# Load data\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize(mean, std)])\n\ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\n\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n shuffle=True, num_workers=4)\n\ntestset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=transform)\n\n# Do NOT shuffle the test set or else the order will be messed up\ntestloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,\n shuffle=False, num_workers=4)\n\n# Classes in order\nclasses = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')",
"Files already downloaded and verified\nFiles already downloaded and verified\n"
]
],
[
[
"## Sample Images and Labels",
"_____no_output_____"
]
],
[
[
"# functions to show an image\ndef imshow(img):\n img = denorm(img) # unnormalize\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n\n\n# get some random training images\ndataiter = iter(trainloader)\nimages, labels = dataiter.next()\n\n# show images\nimshow(torchvision.utils.make_grid(images))\n# print labels\nprint(' '.join('%5s' % classes[labels[j]] for j in range(4)))",
" dog truck dog bird\n"
]
],
[
[
"# Defining Model",
"_____no_output_____"
],
[
"## Fully-Connected DNN",
"_____no_output_____"
]
],
[
[
"class Net_DNN(nn.Module):\n def __init__(self, architecture):\n super().__init__()\n self.layers = nn.ModuleList([\n nn.Linear(architecture[layer], architecture[layer + 1]) \n for layer in range(len(architecture) - 1)])\n \n def forward(self, data):\n # Flatten the Tensor (i.e., dimensions 3 x 32 x 32) to a single column\n data = data.view(data.size(0), -1)\n for layer in self.layers:\n layer_data = layer(data)\n data = F.relu(layer_data)\n return F.log_softmax(layer_data, dim=-1)",
"_____no_output_____"
]
],
[
[
"## Fully-CNN",
"_____no_output_____"
]
],
[
[
"class Net_CNN(nn.Module):\n # Padding is set to 2 and stride to 2\n # Padding ensures all edge pixels are exposed to the filter\n # Stride = 2 is common practice\n def __init__(self, layers, c, stride=2):\n super().__init__()\n self.layers = nn.ModuleList([\n nn.Conv2d(layers[i], layers[i + 1], kernel_size=3, padding=2, stride=stride)\n for i in range(len(layers) - 1)])\n self.pool = nn.AdaptiveMaxPool2d(1) # Simply takes the maximum value from the Tensor\n self.out = nn.Linear(layers[-1], c)\n \n def forward(self, data):\n for layer in self.layers: \n data = F.relu(layer(data))\n data = self.pool(data)\n data = data.view(data.size(0), -1)\n return F.log_softmax(self.out(data), dim=-1)",
"_____no_output_____"
]
],
[
[
"## Chained CNN and NN",
"_____no_output_____"
]
],
[
[
"class Net_CNN_NN(nn.Module):\n # Padding is set to 2 and stride to 2\n # Padding ensures all edge pixels are exposed to the filter\n # Stride = 2 is common practice\n def __init__(self, layers, architecture, stride=2):\n super().__init__()\n # Fully Convolutional Layers\n self.layers = nn.ModuleList([\n nn.Conv2d(layers[i], layers[i + 1], kernel_size=3, padding=2,stride=stride)\n for i in range(len(layers) - 1)])\n # Fully Connected Neural Network to map to output\n self.layers_NN = nn.ModuleList([\n nn.Linear(architecture[layer], architecture[layer + 1]) \n for layer in range(len(architecture) - 1)])\n \n self.pool = nn.AdaptiveMaxPool2d(1) # Simply takes the maximum value from the Tensor\n \n def forward(self, data):\n for layer in self.layers: \n data = F.relu(layer(data))\n data = self.pool(data)\n data = data.view(data.size(0), -1)\n for layer in self.layers_NN:\n layer_data = layer(data)\n data = F.relu(layer_data)\n \n return F.log_softmax(layer_data, dim=-1)",
"_____no_output_____"
]
],
[
[
"## Defining the NN, Loss Function and Optimizer",
"_____no_output_____"
]
],
[
[
"# ---------------------------------------------\n# Uncomment the architecture you want to use\n# ---------------------------------------------\n\n# # DNN\n# architecture = [32*32*3, 100, 100, 100, 100, 10]\n# net = Net_DNN(architecture)\n\n# # CNN\n# architecture = [3, 20, 40, 80, 160]\n# num_outputs = 10\n# net = Net_CNN(architecture, num_outputs)\n\n# # CNN with NN\n# architecture = [3, 20, 40, 80]\n# architecture_NN = [80, 40, 20, 10]\n# num_outputs = 10\n# net = Net_CNN_NN(architecture, architecture_NN)\n\nif use_cuda:\n net = net.cuda() # Training on the GPU\n\ncriterion = nn.CrossEntropyLoss()",
"_____no_output_____"
]
],
[
[
"## Loading Model",
"_____no_output_____"
]
],
[
[
"# ---------------------------------------------\n# Uncomment the architecture you want to use\n# ---------------------------------------------\n\n# # DNN\n# architecture = [32*32*3, 100, 100, 10]\n# net = Net_DNN(architecture)\n\n# # CNN\n# architecture = [3, 20, 40, 80, 160]\n# num_outputs = 10\n# net = Net_CNN(architecture, num_outputs)\n# criterion = nn.CrossEntropyLoss()\n\nif use_cuda:\n net = net.cuda() # Training on the GPU\n\n# ---------------------------------------------\n# Uetermine the path for the saved weights\n# ---------------------------------------------\nPATH = './checkpoints_CNN_v2/5'\n# Load weights\nnet.load_state_dict(torch.load(PATH))",
"_____no_output_____"
]
],
[
[
"## Recording Loss",
"_____no_output_____"
]
],
[
[
"# Initialize a list of loss_results\nloss_results = []",
"_____no_output_____"
]
],
[
[
"# Training Manual",
"_____no_output_____"
]
],
[
[
"# Set the Learning rate and epoch start and end points\nstart_epoch = 11\nend_epoch = 15\nlr = 0.0001\n# Define the optimizer\noptimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)\n\nfor epoch in range(start_epoch, end_epoch+1): # loop over the dataset multiple times\n print(\"Epoch:\", epoch)\n running_loss = 0.0\n for i, (inputs, labels) in enumerate(trainloader, 0):\n # get the inputs\n if use_cuda:\n inputs, labels = inputs.cuda(), labels.cuda()\n\n # wrap them in Variable\n inputs, labels = Variable(inputs), Variable(labels) # Inputs and Target values to GPU\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.data[0]\n if i % 2000 == 1999: # print every 2000 mini-batches\n print(running_loss / 2000)\n loss_results.append(running_loss / 2000)\n running_loss = 0.0\n PATH = './checkpoints_hybrid/' + str(epoch)\n torch.save(net.state_dict(), PATH)\n ",
"Epoch: 11\n0.7631593999336473\n0.772029997533653\n0.7622815664624796\n0.7895928663983941\n0.7818622282706201\n0.7965481701032259\nEpoch: 12\n0.7587507799463347\n0.7542067431546747\n0.7607613385347649\n0.7611085303728469\n0.7500329479556531\n0.768398270084057\nEpoch: 13\n0.7284724237506743\n0.7313052681912668\n0.7489473702453543\n0.7530480517586693\n0.7388339222935029\n0.776530001245439\nEpoch: 14\n0.7227064029038883\n0.7381190525260753\n0.7362304750531912\n0.7219595499557908\n0.7571793817970902\n0.7341004028483294\nEpoch: 15\n0.7172498370609246\n0.7022846946662757\n0.7447555431951769\n0.7256098798593739\n0.719516308715567\n0.731529594759224\n"
]
],
[
[
"## Sample of the Results",
"_____no_output_____"
]
],
[
[
"# load a min-batch of the images\ndataiter = iter(testloader)\nimages, labels = dataiter.next()\n\n# print images\nimshow(torchvision.utils.make_grid(images))\nprint('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))",
"GroundTruth: cat ship ship plane\n"
]
],
[
[
"## Sample of Predictions",
"_____no_output_____"
]
],
[
[
"# For the images shown above, show the predictions\n# first activate GPU processing\nimages, labels = images.cuda(), labels.cuda()\n\n# Feed forward\noutputs = net(Variable(images))\n_, predicted = torch.max(outputs.data, 1)\n\nprint('Predicted: ', ' '.join('%5s' % classes[predicted[j]]\n for j in range(4)))",
"Predicted: cat ship ship ship\n"
]
],
[
[
"## Total Test Set Accuracy",
"_____no_output_____"
]
],
[
[
"# Small code snippet to determine test accuracy\ncorrect = 0\ntotal = 0\nfor data in testloader:\n # load images\n images, labels = data\n if use_cuda:\n images, labels = images.cuda(), labels.cuda()\n # feed forward\n outputs = net(Variable(images))\n # perform softmax regression\n _, predicted = torch.max(outputs.data, 1)\n # update stats\n total += labels.size(0)\n correct += (predicted == labels).sum()\n\n# print the results\nprint('Accuracy of the network on the 10000 test images: %d %%' % (\n 100 * correct / total))",
"Accuracy of the network on the 10000 test images: 66 %\n"
]
],
[
[
"## Accuracy per Class for Test Set",
"_____no_output_____"
]
],
[
[
"class_correct = list(0. for i in range(10))\nclass_total = list(0. for i in range(10))\nfor data in testloader:\n images, labels = data\n if use_cuda:\n images, labels = images.cuda(), labels.cuda()\n outputs = net(Variable(images))\n _, predicted = torch.max(outputs.data, 1)\n c = (predicted == labels).squeeze()\n for i in range(4):\n label = labels[i]\n class_correct[label] += c[i]\n class_total[label] += 1\n\n# Print the accuracy per class\nfor i in range(10):\n print(classes[i], 100 * class_correct[i] / class_total[i])",
"69.7\n79.4\n58.9\n55.5\n55.8\n56.8\n70.1\n69.0\n77.7\n75.4\n"
]
],
[
[
"# Plot Loss",
"_____no_output_____"
]
],
[
[
"batch_size = 4\nloss_samples_per_epoch = 6\nnum_epochs = 15\nepochs_list = [(i/loss_samples_per_epoch) for i in range(1, num_epochs*loss_samples_per_epoch + 1)] \nplt.semilogy(epochs_list, loss_results[:-6])\nplt.ylabel('Loss')\nplt.xlabel('Epoch Number')\nplt.savefig('./DNN_v2.png', format='png', pad_inches=1, dpi=1200)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a85d156c119fd4bc28667efad96408341d864aa
| 55,250 |
ipynb
|
Jupyter Notebook
|
support vector.ipynb
|
abinarain/DMWAS
|
ea5b95c9026abe745f3a3201cd4e8d356fd163a8
|
[
"MIT"
] | 2 |
2021-03-24T14:13:39.000Z
|
2021-11-06T15:06:06.000Z
|
support vector.ipynb
|
abinarain/DMWAS
|
ea5b95c9026abe745f3a3201cd4e8d356fd163a8
|
[
"MIT"
] | null | null | null |
support vector.ipynb
|
abinarain/DMWAS
|
ea5b95c9026abe745f3a3201cd4e8d356fd163a8
|
[
"MIT"
] | null | null | null | 28.52349 | 287 | 0.261357 |
[
[
[
"import numpy as np\nimport pandas as pd\nimport os\n#importing basic library for preprocessing",
"_____no_output_____"
],
[
"data=pd.read_csv(\"EncodedVarScore.csv\") #reading data",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
],
[
"x=data.values#converting into array",
"_____no_output_____"
],
[
"x",
"_____no_output_____"
]
],
[
[
"y=np.random.choice([0,1],40) #generating random dependent values",
"_____no_output_____"
]
],
[
[
"y=pd.read_csv(\"y_data\")",
"_____no_output_____"
],
[
"y=y.values[:,1]",
"_____no_output_____"
],
[
"y",
"_____no_output_____"
],
[
"data.isnull().any() #checking for null values",
"_____no_output_____"
],
[
"data[\"391\"].isnull().any() #individually checking null values",
"_____no_output_____"
],
[
"c=[]\nfor i in data:\n if data[i].isnull().any():\n c.append(i)\n#getting to list of column for nulll values",
"_____no_output_____"
],
[
"c",
"_____no_output_____"
],
[
"c.reverse()",
"_____no_output_____"
],
[
"for i in c:\n data=data.drop(i,axis=1)\n#dropping null columns from back direction in order to prevent column number change",
"_____no_output_____"
],
[
"data",
"_____no_output_____"
],
[
"x=data.values",
"_____no_output_____"
],
[
"x",
"_____no_output_____"
]
],
[
[
"#### applying neural layer",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"xtrain,xtest,ytrain,ytest=train_test_split(x,y,test_size=.3,random_state=10)",
"_____no_output_____"
],
[
"xtrain",
"_____no_output_____"
],
[
"from sklearn.svm import SVC",
"_____no_output_____"
]
],
[
[
" from sklearn.model_selection import GridSearchCV",
"_____no_output_____"
],
[
"param_grid = {'kernel': ['linear','poly','rbf','sigmoid']}",
"_____no_output_____"
]
],
[
[
"clf=SVC()",
"_____no_output_____"
]
],
[
[
"search = GridSearchCV(clf, param_grid, cv=5)",
"_____no_output_____"
],
[
"search.fit(xtrain,ytrain)",
"_____no_output_____"
],
[
"search.best_params_",
"_____no_output_____"
],
[
"ypred=search.predict(xtest)",
"_____no_output_____"
],
[
"pd.crosstab(ytest,ypred)",
"_____no_output_____"
]
],
[
[
"clf.fit(xtrain,ytrain)",
"/home/satyanveshi/.local/lib/python3.6/site-packages/sklearn/svm/base.py:193: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\n"
],
[
"clf.predict(xtest)",
"_____no_output_____"
],
[
"pd.crosstab(ytest,ypred)",
"_____no_output_____"
]
],
[
[
"##### support vector is not giving suitable results",
"_____no_output_____"
]
]
] |
[
"code",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"raw",
"code",
"markdown"
] |
[
[
"code",
"code",
"code",
"code",
"code"
],
[
"raw"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"raw",
"raw",
"raw",
"raw",
"raw"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a85e87468beb14a19f17669b3c9544a5317c196
| 345,706 |
ipynb
|
Jupyter Notebook
|
app.ipynb
|
jackky46/tugasakhirdatascience
|
d218525893ad7b1a796e0e3d49b19d337ab30eb9
|
[
"MIT"
] | null | null | null |
app.ipynb
|
jackky46/tugasakhirdatascience
|
d218525893ad7b1a796e0e3d49b19d337ab30eb9
|
[
"MIT"
] | null | null | null |
app.ipynb
|
jackky46/tugasakhirdatascience
|
d218525893ad7b1a796e0e3d49b19d337ab30eb9
|
[
"MIT"
] | null | null | null | 131.547184 | 63,224 | 0.837119 |
[
[
[
"## <center>Mempermudah para peneliti dan dokter dalam meneliti persebaran Covid-19 di US",
"_____no_output_____"
],
[
"Kelompok-1 :\n1. Gunawan Adhiwirya\n2. Reyhan Septri Asta\n3. Muhammad Figo Mahendra",
"_____no_output_____"
],
[
"#### Langkah pertama kami me-import package yang di butuhkan",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"#import pandas\nimport pandas as pd\n#import numpy\nimport numpy as np\n#import seaborn\nimport seaborn as sns\n#import matplotlib\nimport matplotlib.pyplot as plt\n# Import Module LinearRegression digunakan untuk memanggil algoritma Linear Regression.\nfrom sklearn.linear_model import LinearRegression\n# import Module train_test_split digunakan untuk membagi data kita menjadi training dan testing set.\nfrom sklearn.model_selection import train_test_split\n# import modul mean_absolute_error dari library sklearn\nfrom sklearn.metrics import mean_absolute_error\n#import math agar program dapat menggunakan semua fungsi yang ada pada modul math.(ex:sqrt)\nimport math\n# me-non aktifkan peringatan pada python\nimport warnings \nwarnings.filterwarnings('ignore')\n\nfrom sklearn.cluster import KMeans",
"_____no_output_____"
]
],
[
[
"#### Kemudian load dataset yang akan dipakai",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"df_train = pd.read_csv('datacovid.csv')\ndf_train",
"_____no_output_____"
]
],
[
[
"#### Kemudian kita cek shape dari dataset yang di gunakan",
"_____no_output_____"
]
],
[
[
"df_train.shape",
"_____no_output_____"
]
],
[
[
"#### Lalu kita cek ringkasan dari dataset yang di pakai",
"_____no_output_____"
],
[
"count = Jumlah data\n\nmean = Rata - rata\n\nstd = Standar Deviasi\n\nmin = Nilai Minimum\n\n25% = Adalah Nilai 1/4 dari data tersebut\n\n50% = Adalah Nilai 1/2 dari data tersebut\n\n75% = Adalah Nilai 3/4 dari data tersebut\n\n100% = Adalah Nilai Maksimum",
"_____no_output_____"
]
],
[
[
"df_train.describe()",
"_____no_output_____"
]
],
[
[
"#### Lalu dilakukan pengecekan apakah data tersebut ada yang kosong atau null",
"_____no_output_____"
]
],
[
[
"df_train.isnull().sum()",
"_____no_output_____"
]
],
[
[
"#### Membersihkan data yang memiliki nilai Null",
"_____no_output_____"
]
],
[
[
"#dikarenakan datanya masih ada yang null maka data yang memiliki nilai NaN akan di drop\ndf_train.dropna(inplace = True)\ndf_train",
"_____no_output_____"
]
],
[
[
"#### Kemudian kita cek tipe-tipe data yang ada pada dataset",
"_____no_output_____"
]
],
[
[
"#me-cek tipe data \ndf_train.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 17341 entries, 1 to 39658\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 submission_date 17341 non-null object \n 1 state 17341 non-null object \n 2 tot_cases 17341 non-null int64 \n 3 conf_cases 17341 non-null float64\n 4 prob_cases 17341 non-null float64\n 5 new_case 17341 non-null int64 \n 6 pnew_case 17341 non-null float64\n 7 tot_death 17341 non-null int64 \n 8 conf_death 17341 non-null float64\n 9 prob_death 17341 non-null float64\n 10 new_death 17341 non-null int64 \n 11 pnew_death 17341 non-null float64\n 12 created_at 17341 non-null object \n 13 consent_cases 17341 non-null object \n 14 consent_deaths 17341 non-null object \ndtypes: float64(6), int64(4), object(5)\nmemory usage: 2.1+ MB\n"
]
],
[
[
"#### Kemudian melakukan pengecekan apakah data tersebut ada yang duplikat",
"_____no_output_____"
]
],
[
[
"#melihat jika ada data duplikat\ndf_train.duplicated().sum()",
"_____no_output_____"
]
],
[
[
"#### Lalu data yang sudah di proses sebelumnya di buat visualisasi dengan histogram",
"_____no_output_____"
]
],
[
[
"#membuat histogram\ndf_train.hist(figsize = (20,12))",
"_____no_output_____"
]
],
[
[
"#### Selanjutnya kita menghapus data yang tidak di perlukan",
"_____no_output_____"
]
],
[
[
"#dikarenakan kita tidak membutuhkan data pending dalam melakukan visualisasi kita, maka kolom pnew_death, pnew_case di drop\n# dan kami memutuskan bahwa data prob_cases, prob_death, dan created_at di drop karena tidak ada hubungannya untuk peneliti dalam membantu meneliti penyebaran Covid\ndft=df_train.drop(['pnew_death', 'pnew_case','prob_cases', 'prob_death','created_at'], axis = 1)\ndft",
"_____no_output_____"
]
],
[
[
"#### Kemudian dilakukan pengecekan data duplikat kembali setelah menghapus data yang tidak digunakan",
"_____no_output_____"
]
],
[
[
"#pengecekan duplikasi setelah men-drop beberapa data\ndft.duplicated().sum()",
"_____no_output_____"
]
],
[
[
"#### Selanjutnya kita mencari korelasi pada data dengan menggunakan visualisasi Heatmap",
"_____no_output_____"
]
],
[
[
"#mencari korelasi dengan menggunakan heatmap\nfig , axes = plt.subplots(figsize = (14,12))\nsns.heatmap(dft.corr(), annot=True)\ndft.columns",
"_____no_output_____"
]
],
[
[
"Pada gambar diatas data yang memiliki korelasi terbaik adalah data tot_death dengan conf_death dan tot_cases dan conf_cases",
"_____no_output_____"
],
[
"Karena kami memiliki 2 nilai korelasi yang sama dan hubungan korelasinya kuat maka dari itu kami memutuskan untuk memilih salah satu korelasi sebagai data yang akan kami uji pada data tersebut yaitu data conf_cases dengan data tot_cases",
"_____no_output_____"
],
[
"#### Setelah mendapat data yang ingin digunakan, selanjutnya kita memeriksa apakah ada outlier (pencilan) pada data yang digunakan",
"_____no_output_____"
]
],
[
[
"#mememeriksa apakah data memiliki outlier(pencilan)\nq1 = df_train.iloc[:,[2,3,]].quantile(0.25)\nq3 = df_train.iloc[:,[2,3]].quantile(0.75)\nIQR = q3 - q1\nIQR",
"_____no_output_____"
]
],
[
[
"#### Setelah itu memeriksa data apakah ada outlier (pencilan) dengan menggunakan boxplot",
"_____no_output_____"
]
],
[
[
"#memeriksa apakah data memiliki outlier(pencilan) dengan menggunakan boxplot\ndf = df_train.iloc[:,[2,3]]\ndf.columns\n\nfig, axes = plt.subplots(ncols = 2, nrows = 1, figsize = (18,8))\n\nfor i, ax in zip(df.columns, axes.flat):\n sns.boxplot(x = df[i], ax = ax)\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Setelah data outlier (pencilan) ditemukan, kita hapus data outliernya",
"_____no_output_____"
]
],
[
[
"#menghapus data ouliernya\nQ1 = (df_train[['tot_cases', 'conf_cases']]).quantile(0.25)\nQ3 = (df_train[['tot_cases', 'conf_cases']]).quantile(0.75)\nIQR = Q3 - Q1\nmax = Q3 + (1.5*IQR)\nmin = Q1 - (1.5*IQR)\n\nJlebih = (df_train > max)\nJkurang = (df_train < min)\ndf_train = df_train.mask(Jlebih, max, axis=1)\ndf_train = df_train.mask(Jkurang, min, axis=1)",
"_____no_output_____"
]
],
[
[
"#### Setelah data outlier (pencilan) nya sudah di hapus, kita cek sekali lagi untuk memastikan apakah data outlier (pencilan) masih tersisa di dalam data",
"_____no_output_____"
]
],
[
[
"#memeriksa data apakah masih ada outlier setelah dilakukan penghapusan\ndf = df_train.iloc[:,[2,3]]\ndf.columns\n\nfig, axes = plt.subplots(ncols = 2, nrows = 1, figsize = (18,8))\n\nfor i, ax in zip(df.columns, axes.flat):\n sns.boxplot(x = df[i], ax = ax)\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Setelah itu kita akan mengambil 2 data utama yang akan digunakan untuk analisis",
"_____no_output_____"
]
],
[
[
"dfu=df.head(50)\ndfu",
"_____no_output_____"
]
],
[
[
"#### Selanjutnya kita visualisasikan prediksi data yang digunakan menggunakan Scatter Plot",
"_____no_output_____"
]
],
[
[
"plt.scatter(dfu['conf_cases'],dfu['tot_cases'])\nplt.xlabel('conf_cases')\nplt.ylabel('tot_cases')\nplt.title('Scatter Plot conf_cases vs tot_cases')\nplt.show()",
"_____no_output_____"
],
[
"x = dfu['conf_cases'].values.reshape(-1,1)\ny = dfu['tot_cases'].values.reshape(-1,1)",
"_____no_output_____"
]
],
[
[
"#### Melihat nilai rata - rata dari Variabel X dan Y",
"_____no_output_____"
]
],
[
[
"x_mean = np.mean(x)\ny_mean = np.mean(y)\nprint('nilai mean var x: ', x_mean,'\\n'\n 'nilai mean var y: ', y_mean)",
"nilai mean var x: 453307.66 \nnilai mean var y: 514574.44\n"
]
],
[
[
"#### Kemudian melihat nilai korelasi koefisien pada data",
"_____no_output_____"
]
],
[
[
"atas = sum((x - x_mean)*(y - y_mean))\nbawah = math.sqrt((sum((x - x_mean)**2)) * (sum((y - y_mean)**2)))\ncorrelation = atas/bawah\nprint('Nilai Correlation Coefficient: ', correlation)",
"Nilai Correlation Coefficient: [0.99416166]\n"
]
],
[
[
"#### Melihat Slope pada data",
"_____no_output_____"
],
[
"Slope adalah tingkat kemiringan garis",
"_____no_output_____"
]
],
[
[
"# slope\n# Slope adalah tingkat kemiringan garis, intercept \n# adalah jarak titik y pada garis dari titik 0\nvariance = sum((x - x_mean)**2)\ncovariance = sum((x - x_mean) * (y - y_mean))\ntheta_1 = covariance/variance\nprint('Nilai theta_1: ',theta_1)",
"Nilai theta_1: [1.10941586]\n"
]
],
[
[
"Intercept adalah jarak titik y pada garis dari titik 0",
"_____no_output_____"
]
],
[
[
"# intercept\ntheta_0 = y_mean - (theta_1 * x_mean)\nprint('Nilai theta_0: ',theta_0)",
"Nilai theta_0: [11667.73160133]\n"
]
],
[
[
"#### Melakukan manual prediksi",
"_____no_output_____"
]
],
[
[
"# prediction manual\ny_pred = theta_0 + (theta_1 * 130)\n\nprint(y_pred)",
"[11811.95566339]\n"
]
],
[
[
"#### Memvisualisasikan prediksi dengan scatter plot",
"_____no_output_____"
]
],
[
[
"# visualisasi prediksi dengan scatter plot\ny_pred = theta_0 + (theta_1 * x)\n\nplt.scatter(x,y)\nplt.plot(x, y_pred, c='r')\nplt.xlabel('conf_cases')\nplt.ylabel('tot_cases')\nplt.title('Plot conf_cases vs tot_cases')",
"_____no_output_____"
],
[
"x_train, x_test, y_train, y_test = train_test_split(x, y, train_size = 0.8, test_size = 0.2, random_state = 0)",
"_____no_output_____"
]
],
[
[
"#### Selanjutnya kita cek regresi koefisien dan intercepnya",
"_____no_output_____"
]
],
[
[
"regressor = LinearRegression()\nregressor.fit(x_train, y_train)\nprint(regressor.coef_)\nprint(regressor.intercept_)",
"[[1.09658641]]\n[15695.8934773]\n"
]
],
[
[
"#### Mencetak score regresi untuk melihat akurasi",
"_____no_output_____"
]
],
[
[
"regressor.score(x_test, y_test)",
"_____no_output_____"
]
],
[
[
"#### Mencetak nilai korelasi dari score regresi",
"_____no_output_____"
]
],
[
[
"print('Correlation: ', math.sqrt(regressor.score(x_test,y_test)))",
"Correlation: 0.9987225295509001\n"
]
],
[
[
"#### Memvisualisasikan regresi menggunakan data testing",
"_____no_output_____"
]
],
[
[
"y_prediksi = regressor.predict(x_test)\n\nplt.scatter(x_test, y_test)\nplt.plot(x_test, y_prediksi, c='r')\nplt.xlabel('conf_cases')\nplt.ylabel('tot_cases')\nplt.title('Plot conf_cases vs tot_cases')",
"_____no_output_____"
]
],
[
[
"#### Memasukan dataframe ke dalam array kemudian memvisualisasikan dengan menggunakan metode Elbow",
"_____no_output_____"
]
],
[
[
"#memasukkan dataframe ke dalam array \ndata = np.array(df_train[[\"conf_cases\", \"tot_cases\"]])\ndata\n\nwcss = []\nfor i in range(1, 11):\n kmeans = KMeans(n_clusters = i)\n kmeans.fit(data)\n wcss.append(kmeans.inertia_)\n\nplt.plot(range(1, 11), wcss)\nplt.title('Elbow Method')\nplt.xlabel('Jumlah Cluster')\nplt.ylabel('WCSS')\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"Dari hasil Elbow Method diatas dapat di simpulkan bahwa jumlah cluster yang paling optimal dalam melakukan K-Means adalah 2 (dua)",
"_____no_output_____"
],
[
"#### Kemudian menentukan jumlah cluster dan centroidnya",
"_____no_output_____"
]
],
[
[
"df_cluster = df_train[[\"conf_cases\", \"tot_cases\"]]\n\n#centroid \nCentroids = (df_cluster.sample(n = 2))\nplt.figure(figsize = (10, 5))\nplt.scatter(df_cluster[\"conf_cases\"], df_cluster[\"tot_cases\"], color = 'blue')\nplt.scatter(Centroids[\"conf_cases\"], Centroids[\"tot_cases\"], color = 'coral')\nplt.xlabel('confirm cases')\nplt.ylabel('total cases') \nplt.show()",
"_____no_output_____"
]
],
[
[
"Pada visualiasi Centroid di atas, dapat diketahui bahwa titik centroid tersebut memiliki sifat yang masih acak (random). Oleh karena itu kita melakukan proses pengelompokan dengan menggunakan metode K-Means",
"_____no_output_____"
],
[
"#### Kemudian melakukan perhitungan menggunakan K-Means",
"_____no_output_____"
]
],
[
[
"#K-Means\ndiff = 1\ni = 0\n\nwhile(diff!=0):\n data_new = df_cluster\n j = 1\n for index1 ,row_c in Centroids.iterrows():\n Y=[]\n for index2,row_d in data_new.iterrows():\n nd1=(row_c['conf_cases']-row_d['conf_cases'])**2\n nd2=(row_c[\"tot_cases\"]-row_d[\"tot_cases\"])**2\n nd=np.sqrt(nd1+nd2)\n Y.append(nd)\n df_cluster[j]=Y\n j=j+1\n\n hasil=[]\n for index,row in df_cluster.iterrows():\n min_dist=row[1]\n pos=1\n for j in range(2):\n if row[j+1] < min_dist:\n min_dist = row[j+1]\n pos=j+1\n hasil.append(pos)\n df_cluster[\"Cluster\"]=hasil\n Centroids_new = df_cluster.groupby([\"Cluster\"]).mean()[[\"conf_cases\", \"tot_cases\"]]\n if i == 0:\n diff=1\n i=i+1\n else:\n diff = (Centroids_new['conf_cases'] - Centroids['conf_cases']).sum() + (Centroids_new[\"tot_cases\"] - Centroids[\"tot_cases\"]).sum()\n print(diff.sum())\n Centroids = df_cluster.groupby([\"Cluster\"]).mean()[['conf_cases',\"tot_cases\"]]",
"575919.2140289962\n271925.06803870416\n166897.27773183194\n62414.231701318844\n25435.920983209333\n8376.812338868243\n2869.894804758107\n884.4677689888485\n220.33032504811126\n221.62453045746952\n0.0\n"
]
],
[
[
"#### Menentukan kelompok centroid dan memvisualisasikannya",
"_____no_output_____"
]
],
[
[
"#menentukan kelompok centroid\nwarna=['red','green','blue']\n\nplt.figure(figsize=(10,5))\n\nfor i in range(3):\n df_Model=df_cluster[df_cluster[\"Cluster\"] == i + 1]\n plt.scatter(df_Model['conf_cases'], df_Model [\"tot_cases\"], color = warna[i])\n \nplt.scatter(Centroids['conf_cases'],Centroids[\"tot_cases\"],color='black')\nplt.xlabel('confirm cases')\nplt.ylabel(\"total cases\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"Dapat dilihat pada Visualisasi di atas bahwa kedua titik Centroid sudah berada pada center (titik tengah) kelompoknya masing- masing, dimana pengelempokannya diberikan visualiasi warna yang berbeda",
"_____no_output_____"
],
[
"## Kesimpulan",
"_____no_output_____"
],
[
"Pada analisa yang di lakukan pada dataset, bisa disimpulkan bahwa pada hasil preprocessing di dapatkan 2 (dua) hubungan korelasi yang kuat yaitu, conf_cases dan tot_cases. Dimana conf_cases adalah kasus covid di Amerika Serikat yang sudah terkonfirmasi dan tot_cases adalah total kasus yang terjadi di Amerika Serikat. Kemudian pada data korelasi antara conf_death dan tot_death tidak dibutuhkan karena data ini tidak relevan untuk memprediksi perkembangan dan persebaran covid yang ada di USA.\n\nPada data conf_cases dan tot_cases akan dilakukan clustering dimana clustering tersebut sangat berguna untuk memprediksi dan menganalisa persebaran covid-19 di Amerika Serikat. Pada clustering, jumlah cluster yang optimal digunakan pada K-Means adalah berjumlah 2 (dua). Setelah melakukan proses analisa dengan metode K-means, hasil pengelompokan pada 2 (dua) cluster dapat di visualisasikan.",
"_____no_output_____"
],
[
"### Referensi",
"_____no_output_____"
],
[
"https://www.analyticsvidhya.com/blog/2019/08/comprehensive-guide-k-means-clustering/\n\nhttps://towardsdatascience.com/machine-learning-algorithms-part-9-k-means-example-in-python-f2ad05ed5203\n\nTugas Mandiri dan Kelompok SPADA DIKTI",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a85fb80f4edc331f32c99ec3f13181dc11b0b25
| 191,869 |
ipynb
|
Jupyter Notebook
|
handling_outliers.ipynb
|
PacktPublishing/Python-for-Data-Analysis-step-by-step-with-projects-
|
d4303844d927f7bd61e48b71a9d272f426dfba0e
|
[
"MIT"
] | 6 |
2021-12-19T00:45:37.000Z
|
2022-03-26T05:11:59.000Z
|
handling_outliers.ipynb
|
PacktPublishing/Python-for-Data-Analysis-step-by-step-with-projects-
|
d4303844d927f7bd61e48b71a9d272f426dfba0e
|
[
"MIT"
] | null | null | null |
handling_outliers.ipynb
|
PacktPublishing/Python-for-Data-Analysis-step-by-step-with-projects-
|
d4303844d927f7bd61e48b71a9d272f426dfba0e
|
[
"MIT"
] | 10 |
2021-12-13T16:54:04.000Z
|
2022-03-30T18:12:27.000Z
| 100.088159 | 83,532 | 0.75513 |
[
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"wdi_imp = pd.read_pickle('wdi_imp.pkl')",
"_____no_output_____"
],
[
"wdi_imp.describe()",
"_____no_output_____"
],
[
"wdi_imp.describe().T",
"_____no_output_____"
],
[
"wdi_imp.hist(figsize=(20,20)) # population_density, population, atms_per_100000",
"_____no_output_____"
],
[
"wdi_imp['population'].hist(bins=50)",
"_____no_output_____"
],
[
"wdi_imp[['country_name','population']].sort_values(by='population',ascending=False)",
"_____no_output_____"
],
[
"wdi_imp.columns",
"_____no_output_____"
],
[
"wdi_imp.loc[wdi_imp['is_region']==1, 'country_name'].value_counts()",
"_____no_output_____"
],
[
"wdi_imp = wdi_imp[wdi_imp['is_region']==0]",
"_____no_output_____"
],
[
"wdi_imp.shape",
"_____no_output_____"
],
[
"wdi_imp['population'].hist(bins=50)",
"_____no_output_____"
],
[
"wdi_imp.boxplot('atms_per_100000')",
"_____no_output_____"
],
[
"Q1 = wdi_imp['atms_per_100000'].quantile(0.25)\nQ3 = wdi_imp['atms_per_100000'].quantile(0.75)",
"_____no_output_____"
],
[
"print(Q1)\nprint(Q3)",
"14.29433637383361\n66.92085678105676\n"
],
[
"wdi_imp['atms_per_100000'].describe()",
"_____no_output_____"
],
[
"IQR = Q3 - Q1\nIQR",
"_____no_output_____"
],
[
"outlier_high = Q3 + 1.5*IQR\noutlier_low = Q1 - 1.5*IQR",
"_____no_output_____"
],
[
"print(outlier_high)\nprint(outlier_low)",
"145.8606373918915\n-64.6454442370011\n"
],
[
"msk = (wdi_imp['atms_per_100000'] < outlier_low) | (wdi_imp['atms_per_100000'] > outlier_high)\nwdi_imp[msk]",
"_____no_output_____"
],
[
"wdi_imp['atms_per_100000'].hist(bins=50)",
"_____no_output_____"
],
[
"p99 = wdi_imp['atms_per_100000'].quantile(0.99)\np99",
"_____no_output_____"
],
[
"wdi_imp.loc[wdi_imp['atms_per_100000'] > p99, 'atms_per_100000']",
"_____no_output_____"
],
[
"wdi_imp.loc[wdi_imp['atms_per_100000'] > p99, 'atms_per_100000'] = p99",
"_____no_output_____"
],
[
"wdi_imp['atms_per_100000'].hist(bins=50)",
"_____no_output_____"
],
[
"wdi_imp['population_density'].hist(bins=50)",
"_____no_output_____"
],
[
"wdi_imp[wdi_imp['population_density'] >= 7000]",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a86048db877c471ca7b5b7d4604c13dacfee564
| 34,590 |
ipynb
|
Jupyter Notebook
|
2019-04-16 BioIT World/2019-04-16_07 BioITWorld Class Project.ipynb
|
g2nb/workshop-notebooks
|
1e22f9569438dd509f3148959ca5b87d78ea5157
|
[
"BSD-3-Clause"
] | null | null | null |
2019-04-16 BioIT World/2019-04-16_07 BioITWorld Class Project.ipynb
|
g2nb/workshop-notebooks
|
1e22f9569438dd509f3148959ca5b87d78ea5157
|
[
"BSD-3-Clause"
] | 1 |
2019-05-03T18:53:07.000Z
|
2019-05-03T18:54:59.000Z
|
Integrative_Genomics_Analysis_In_GenePattern/notebooks/2019-04-16_07 BioITWorld Class Project.ipynb
|
genepattern/tutorial-materials
|
2ab7978fd46343274de999d96e6a65e568dd129f
|
[
"BSD-3-Clause"
] | 1 |
2022-01-12T20:17:50.000Z
|
2022-01-12T20:17:50.000Z
| 38.561873 | 541 | 0.596386 |
[
[
[
"# Analyzing HTSeq Data Using Two Differential Expression Modules",
"_____no_output_____"
],
[
"<p>The main goals of this project are:</p>\n\n<ul>\n\t<li>Analyze HTSeq count data with tools that assume an underlying <a href=\"https://en.wikipedia.org/wiki/Negative_binomial_distribution\" target=\"_blank\">negative binomial distribution</a> on the data</li>\n\t<li>Analyze <a href=\"http://software.broadinstitute.org/cancer/software/genepattern/modules/docs/PreprocessReadCounts/1\" target=\"_blank\">normalized HTSeq count</a> data with tools that assume an underlying <a href=\"https://en.wikipedia.org/wiki/Normal_distribution\" target=\"_blank\">normal distribution</a> on the data.</li>\n\t<li>Compare the results of differential gene expression analysis under the two scenarios above.</li>\n</ul>\n\n<p><img alt=\"2019-04-16_07_BioITWorld_Class-Project.jpg\" src=\"https://datasets.genepattern.org/data/BioITWorld/2019-04-16_07_BioITWorld_Class-Project.jpg\" /></p>\n",
"_____no_output_____"
]
],
[
[
"# Requires GenePattern Notebook: pip install genepattern-notebook\nimport gp\nimport genepattern\n\n# Username and password removed for security reasons.\ngenepattern.display(genepattern.session.register(\"https://cloud.genepattern.org/gp\", \"\", \"\"))",
"_____no_output_____"
]
],
[
[
"## Section 1: Load and Filter the Dataset",
"_____no_output_____"
],
[
"In brief, the dataset we will use in this notebook is RNA-Seq counts downloaded from TCGA. We have selected 40 samples of Breast Invasive Carcinoma (BRCA), 20 of those samples come from tumor tissue and 20 come from their corresponding normal tissue.",
"_____no_output_____"
],
[
"### 1.1 Load the CLS file for future use by using the Python function below. ",
"_____no_output_____"
],
[
"In order to make the phenotype labels file (the CLS file) easily accessible in the GenePattern modules in this notebook, we will use a Python function wrapped in a GenePattern UIBuilder cell titled **`Load URL Into Notebook {}`** using this function is as simple as typing the url which contains the data we want to load.\n<div class=\"alert alert-info\"> \n<ul>\n<li><b>url</b>: Drag and drop the link to <a href=\"https://datasets.genepattern.org/data/TCGA_BRCA/WP_0_BRCA_cp_40_samples.cls\">this CLS file</a><br>\n <em>Note: It should display the file's url after you have done so.</em> \n</ul>\n</div>",
"_____no_output_____"
]
],
[
[
"@genepattern.build_ui(name=\"Load URL Into Notebook\",\n parameters={\n \"url\": {\"default\":\"https://datasets.genepattern.org/data/TCGA_BRCA/WP_0_BRCA_cp_40_samples.cls\"},\n \"output_var\":{\"default\":\"\", \"hide\":True}\n })\ndef load_data_from_url(url):\n \"\"\"This simple function \"\"\"\n return genepattern.GPUIOutput(files=[url])",
"_____no_output_____"
]
],
[
[
"<div class=\"well\">\n <em>Note:</em> you can use this function to load data from an URL in any of your notebooks\n</div>",
"_____no_output_____"
],
[
"### 1.2 Filter out uninformative genes",
"_____no_output_____"
],
[
"<div class=\"alert alert-info\">\n\n<p>In order to remove the uninformative genes from the the HTseq dataset (i.e., the rows in the GCT file with the smallest variance), create a new cell below this one and use the <strong>PreprocessDataset*</strong> GenePattern module with these parameters:</p>\n\n<ul>\n\t<li><strong>input filename</strong>: Drag and drop the link to <a href=\"https://datasets.genepattern.org/data/TCGA_BRCA/WP_0_BRCA_cp_40_samples.gct\" target=\"_blank\">this GCT file</a><br />\n\t<em>Note: It should display the file's url after you have done so.</em></li>\n\t<li><strong>output filename: workshop_BRCA_filtered.gct</strong></li>\n <li><strong>ceiling: </strong> 20000000.\n <br />\n\t<em>Note: The default value is 20,000 we are changing this value to 20,000,000.</em></li>\n\t<li>The rest of the parameters can be left as default.</li>\n</ul>\n</div>",
"_____no_output_____"
]
],
[
[
"preprocessdataset_task = gp.GPTask(genepattern.session.get(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.analysis:00020')\npreprocessdataset_job_spec = preprocessdataset_task.make_job_spec()\npreprocessdataset_job_spec.set_parameter(\"input.filename\", \"\")\npreprocessdataset_job_spec.set_parameter(\"threshold.and.filter\", \"1\")\npreprocessdataset_job_spec.set_parameter(\"floor\", \"20\")\npreprocessdataset_job_spec.set_parameter(\"ceiling\", \"20000000\")\npreprocessdataset_job_spec.set_parameter(\"min.fold.change\", \"3\")\npreprocessdataset_job_spec.set_parameter(\"min.delta\", \"100\")\npreprocessdataset_job_spec.set_parameter(\"num.outliers.to.exclude\", \"0\")\npreprocessdataset_job_spec.set_parameter(\"row.normalization\", \"0\")\npreprocessdataset_job_spec.set_parameter(\"row.sampling.rate\", \"1\")\npreprocessdataset_job_spec.set_parameter(\"threshold.for.removing.rows\", \"\")\npreprocessdataset_job_spec.set_parameter(\"number.of.columns.above.threshold\", \"\")\npreprocessdataset_job_spec.set_parameter(\"log2.transform\", \"0\")\npreprocessdataset_job_spec.set_parameter(\"output.file.format\", \"3\")\npreprocessdataset_job_spec.set_parameter(\"output.file\", \"workshop_BRCA_filtered.gct\")\npreprocessdataset_job_spec.set_parameter(\"job.memory\", \"2 Gb\")\npreprocessdataset_job_spec.set_parameter(\"job.walltime\", \"02:00:00\")\npreprocessdataset_job_spec.set_parameter(\"job.cpuCount\", \"1\")\ngenepattern.display(preprocessdataset_task)",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"## Section 2: Analyzing HTseq Counts Using DESeq2",
"_____no_output_____"
],
[
"The results you generate in this section will be used as the reference for comparison later in this notebook and will be refered to as **`DESeq2_results`**.",
"_____no_output_____"
],
[
"### 2.1 Perform differential gene expression using DESeq2 ",
"_____no_output_____"
],
[
"<div class=\"alert alert-info\">\n Create a new cell bellow this one and use the <b>DESeq2</b> GenePattern module using the following parameters:\n<ul>\n <li><b>input file</b>: From the dropdown menu, choose the output from the PreprocessDataset module (i.e., <b>workshop_BRCA_filtered.gct</b> if you used the suggested parameters in section 1).</li>\n <li><b>cls file</b>: From the dropdown menu, choose the output from the <b>`Load URL Into Notebook {}`</b>> UIBuilder cell (i.e., <b>WP_0_BRCA_cp_40_samples.cls</b> if you used the suggested parameters in section 1).</li>\n <li>Click on <b>Run</b> and move on to step 2.2 of this section once the job is complete. </li></ul>\n</div>",
"_____no_output_____"
]
],
[
[
"deseq2_task = gp.GPTask(genepattern.session.get(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.analysis:00362')\ndeseq2_job_spec = deseq2_task.make_job_spec()\ndeseq2_job_spec.set_parameter(\"input.file\", \"\")\ndeseq2_job_spec.set_parameter(\"cls.file\", \"\")\ndeseq2_job_spec.set_parameter(\"confounding.variable.cls.file\", \"\")\ndeseq2_job_spec.set_parameter(\"output.file.base\", \"<input.file_basename>\")\ndeseq2_job_spec.set_parameter(\"qc.plot.format\", \"skip\")\ndeseq2_job_spec.set_parameter(\"fdr.threshold\", \"0.1\")\ndeseq2_job_spec.set_parameter(\"top.N.count\", \"20\")\ndeseq2_job_spec.set_parameter(\"random.seed\", \"779948241\")\ndeseq2_job_spec.set_parameter(\"job.memory\", \"2 Gb\")\ndeseq2_job_spec.set_parameter(\"job.walltime\", \"02:00:00\")\ndeseq2_job_spec.set_parameter(\"job.cpuCount\", \"1\")\ngenepattern.display(deseq2_task)",
"_____no_output_____"
]
],
[
[
"### 2.2 Extract top 25 differentially expressed genes and save them to a DataFrame for later use",
"_____no_output_____"
],
[
"<div class=\"alert alert-info\">We will parse the one of the TXT files from the previous cell (<strong>DESeq2</strong>) and extract only the information that we want (i.e., the name and rank of the 100 most differentially expressed genes) and save that list in a python dictionary named <strong><code>DESeq2_results</code></strong>. To do so, we are using the GenePattern UI Builder in the next cell. Feel free to check out the underlying code if you want. Set the input parameters as follows:\n\n<ul>\n\t<li>Send the <strong>first output</strong> of <strong>DESeq2</strong> to Extract Ranked Gene List From TXT GenePattern Variable { }\n\t<ul>\n\t\t<li>Hint: the name of the file should be <strong>workshop_BRCA_filtered.normal.vs.tumor.DESeq2_results_report.txt</strong></li>\n\t\t<li>From the dropdown menu, choose the output from the DESeq2 module (i.e., <b>...results_report.txt</b> if you used the suggested parameters in section 1)</li>\n\t</ul>\n\t</li>\n\t<li><strong>file var</strong>: the action just before this one should have populated this parameter with a long URL similar to this one: <em>https://gp-beta-ami.genepattern.org/gp/jobResults/1234567/workshop_BRCA_filtered.normal.vs.tumor.DESeq2_results_report.txt</em>.</li>\n\t<li><strong>number of genes</strong>: 25 (default)</li>\n\t<li><strong>verbose</strong>: true (default)</li>\n\t<li>Confirm that the <strong>output variable</strong> is is set to be <strong>DESeq2_results</strong></li>\n\t<li>Run the cell.</li>\n</ul>\n</div>\n",
"_____no_output_____"
]
],
[
[
"import genepattern\[email protected]_ui(name=\"Extract Ranked Gene List From TXT GenePattern Variable\",\n parameters={\n \"file_var\": {\n \"type\": \"file\",\n \"kinds\": [\"txt\"],\n },\n \"number_of_genes\": {\"default\":25},\n \"output_var\": {\"default\":\"DESeq2_results\"},\n })\ndef extract_genes_from_txt(file_var:'URL of the results_report_txt file from DESeq2', \n number_of_genes:'How many genes to extract'=100, \n verbose:'Whether or not to print the gene list'=True):\n \n genes_dict = {} # Initializing the dictionary of genes and rankings\n \n # Get the job number and name of the file\n temp = file_var.split('/')\n # programatically access that job to open the file\n gp_file = eval('job'+temp[5]+'.get_file(\"'+temp[6]+'\")')\n py_file = gp_file.open()\n py_file.readline()\n \n rank = 1\n for line in py_file.readlines():\n formatted_line = str(line,'utf-8').strip('\\n').split('\\t')\n genes_dict[formatted_line[0]] = rank\n if rank >= number_of_genes:\n break\n rank += 1\n \n if verbose:\n # For display only\n for gene in genes_dict:\n print(\"{}: {}\".format(genes_dict[gene],gene))\n \n return genes_dict",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"## Section 3: Analyzing HTSeq Counts Using ComparativeMarkerSelection",
"_____no_output_____"
],
[
"These results will be used for comparison later in this notebook and will be refered to as **`CMS_results`**",
"_____no_output_____"
],
[
"### 3.1 Transform HTSeq counts by using VoomNormalize",
"_____no_output_____"
],
[
"<div class=\"alert alert-info\">\n<h3 style=\"margin-top: 0;\"> Instructions <i class=\"fa fa-info-circle\"></i></h3>\nCreate a new cell bellow this one and use the <strong>VoomNormalize</strong> GenePattern module with the following parameters:\n\n<ul>\n\t<li><strong>input file</strong>: The output from the <strong>PreprocessDataset</strong> module (i.e., <strong>workshop_BRCA_filtered.gct</strong> if you used the suggested parameters in section 1).</li>\n\t<li><strong>cls file</strong>: The output from the <strong>`Load URL Into Notebook {}`</strong> UIBuilder cell (i.e., <strong>WP_0_BRCA_cp_40_samples.cls</strong> is you used the suggested parameters in section 1).</li>\n\t<li><strong>output file</strong>: leave as default.</li>\n</ul>\n</div>",
"_____no_output_____"
]
],
[
[
"voomnormalize_task = gp.GPTask(genepattern.session.get(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.analysis:00355')\nvoomnormalize_job_spec = voomnormalize_task.make_job_spec()\nvoomnormalize_job_spec.set_parameter(\"input.file\", \"\")\nvoomnormalize_job_spec.set_parameter(\"cls.file\", \"\")\nvoomnormalize_job_spec.set_parameter(\"output.file\", \"<input.file_basename>.preprocessed.gct\")\nvoomnormalize_job_spec.set_parameter(\"expression.value.filter.threshold\", \"1\")\nvoomnormalize_job_spec.set_parameter(\"job.memory\", \"2 Gb\")\nvoomnormalize_job_spec.set_parameter(\"job.walltime\", \"02:00:00\")\nvoomnormalize_job_spec.set_parameter(\"job.cpuCount\", \"1\")\ngenepattern.display(voomnormalize_task)",
"_____no_output_____"
]
],
[
[
"### 3.2 Perform differential gene expression analysis on transformed counts using ComparativeMarkerSelection",
"_____no_output_____"
],
[
"<div class=\"alert alert-info\">Create a new cell bellow this one and use the <strong>ComparativeMarkerSelection</strong> GenePattern module with the following parameters:\n\n<ul>\n\t<li><strong>input file</strong>: The output from the <strong>PreprocessReadCounts</strong> module (i.e., <strong>workshop_BRCA_filtered.preprocessed.gct</strong> if you used the suggested parameters in step 5.1 of this section).</li>\n\t<li><strong>cls file</strong>: The output from the <strong>`Load URL Into Notebook {}`</strong> UIBuilder cell (i.e., <strong>WP_0_BRCA_cp_40_samples.cls</strong> is you used the suggested parameters in section 1).</li>\n\t<li>The rest of the parameters can be left as default.</li>\n</ul>\n</div>\n",
"_____no_output_____"
]
],
[
[
"comparativemarkerselection_task = gp.GPTask(genepattern.session.get(0), 'urn:lsid:broad.mit.edu:cancer.software.genepattern.module.analysis:00044')\ncomparativemarkerselection_job_spec = comparativemarkerselection_task.make_job_spec()\ncomparativemarkerselection_job_spec.set_parameter(\"input.file\", \"\")\ncomparativemarkerselection_job_spec.set_parameter(\"cls.file\", \"\")\ncomparativemarkerselection_job_spec.set_parameter(\"confounding.variable.cls.file\", \"\")\ncomparativemarkerselection_job_spec.set_parameter(\"test.direction\", \"2\")\ncomparativemarkerselection_job_spec.set_parameter(\"test.statistic\", \"0\")\ncomparativemarkerselection_job_spec.set_parameter(\"min.std\", \"\")\ncomparativemarkerselection_job_spec.set_parameter(\"number.of.permutations\", \"10000\")\ncomparativemarkerselection_job_spec.set_parameter(\"log.transformed.data\", \"false\")\ncomparativemarkerselection_job_spec.set_parameter(\"complete\", \"false\")\ncomparativemarkerselection_job_spec.set_parameter(\"balanced\", \"false\")\ncomparativemarkerselection_job_spec.set_parameter(\"random.seed\", \"779948241\")\ncomparativemarkerselection_job_spec.set_parameter(\"smooth.p.values\", \"true\")\ncomparativemarkerselection_job_spec.set_parameter(\"phenotype.test\", \"one versus all\")\ncomparativemarkerselection_job_spec.set_parameter(\"output.filename\", \"<input.file_basename>.comp.marker.odf\")\ncomparativemarkerselection_job_spec.set_parameter(\"job.memory\", \"2 Gb\")\ncomparativemarkerselection_job_spec.set_parameter(\"job.walltime\", \"02:00:00\")\ncomparativemarkerselection_job_spec.set_parameter(\"job.cpuCount\", \"1\")\ngenepattern.display(comparativemarkerselection_task)",
"_____no_output_____"
]
],
[
[
"### 3.3 Extract top 100 genes and save to a dictionary for later use.",
"_____no_output_____"
],
[
"<div class=\"alert alert-info\">\n<p>We will parse the ODF file from the <strong>ComparativeMarkerSelection</strong> you just ran (using the <strong>preprocessed</strong> data) and extract only the information that we want (i.e., the name and rank of the 100 most differentially expressed genes) and save that list in a python dictionary named <strong><code>transformed_normal_results</code></strong>. To do so, we are using the GenePattern UI Builder in the next cell. Feel free to check out the underlying code if you want. Set the input parameters as follows:</p>\n\n<ul>\n <li>Choose the <em>workshop_BRCA_filtered.preprocessed.comp.marker.odf</em> from the dropdown menu of the cell below.</li>\n\tThe action just before this one should have populated this parameter with a long URL similar to this one: <em>https://gp-beta-ami.genepattern.org/gp/jobResults/1234567/workshop_BRCA_filtered.preprocessed.comp.marker.odf</em>.\n\t<li><strong>number of genes</strong>: 100 (default)</li>\n\t<li><strong>verbose</strong>: true (default)</li>\n\t<li>Confirm that the <strong>output variable</strong> is is set to be <strong>CMS_results</strong></li>\n\t<li>Run the cell.</li>\n</ul>\n\n<em>The Pandas warning can be ignored</em>\n\n</div>\n",
"_____no_output_____"
]
],
[
[
"import warnings\nwarnings.filterwarnings('ignore')\n\nfrom gp.data import ODF\n#transformed_normal_results = custom_CMSreader(**INSERT_THE_VALUE_YOU_COPIED_IN_THE_PREVIOUS_CELL_HERE**, number_of_genes=100)\ndef custom_CMSreader(GP_ODF:'URL of the ODF output from ComparativeMarkerSelection',\n number_of_genes:'How many genes to extract'=100, \n verbose:'Whether or not to print the gene list'=True):\n \n # Get the job number and name of the file\n temp = GP_ODF.split('/')\n # programatically access that job to open the file\n GP_ODF = eval('ODF(job'+temp[5]+'.get_file(\"'+temp[6]+'\"))')\n# GP_ODF = GP_ODF.dataframe\n GP_ODF = GP_ODF.loc[GP_ODF['Rank']<=number_of_genes,['Rank','Feature']]\n GP_ODF.set_index('Feature', inplace=True)\n to_return = GP_ODF.to_dict()['Rank']\n if verbose:\n # For display only\n genes_list = sorted([[v,k] for k,v in to_return.items()])\n for gene in genes_list:\n print(\"{}: {}\".format(gene[0],gene[1]))\n return to_return\n# naive_normal_results = custom_CMSreader(**INSERT_THE_VALUE_YOU_COPIED_IN_THE_PREVIOUS_CELL_HERE**, number_of_genes=100)\n\ngenepattern.GPUIBuilder(custom_CMSreader, \n name=\"Extract Ranked Gene List From ODF GenePattern Variable\",\n parameters={\n \"GP_ODF\": { \"name\": \"Comparative Marker Selection ODF filename\",\n \"type\": \"file\",\n \"kinds\": [\"Comparative Marker Selection\", \"odf\",\"ODF\"],\n \"description\":\"The output from ComparativeMarkerSelection\",\n },\n \"number_of_genes\": {\"default\":25},\n \"output_var\": {\"default\":\"CMS_results\"},\n })",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"## Section 4: Comparing Results of the Negative Binomial and Transformed Normal Models",
"_____no_output_____"
],
[
"In this short section we compare the dictionaries which contain the lists of top differentially expressed genes and their ranks. Use the following parameters: \n- **reference list**: DESeq2_results\n- **new list**: CMS_results",
"_____no_output_____"
]
],
[
[
"from scipy.stats import kendalltau as kTau\n\ndef compare_dictionaries(reference_list, new_list):\n# print(reference_list)\n# print(new_list)\n # compute how many of the genes in ref are in new\n common = (list(set(reference_list) & set(new_list)))\n \n ref_common = [reference_list[temp] for temp in common]\n new_common = [new_list[temp] for temp in common]\n kendall_tau = kTau(ref_common,new_common)[0] # Kendall's Tau measures the similarity between to ordered lists.\n \n metric = 0.5*(1+kendall_tau) * len(common)/len(reference_list) # Penalizing low overlap between lists.\n \n \n print(\"There is a {:.3g}% overlap.\".format(100*len(common)/len(reference_list)),\n \"Custom metric is {:.3g} (simmilarity metric range [0,1])\".format(metric),\n \"Kendall's tau is {:.3g}\".format(kendall_tau))\n print(\"---\")\n print(f'Here are the ranks of the new the {len(ref_common)} genes which overlap:') \n print(ref_common)\n print(new_common)\n# print( len(common)/len(reference_list))\n return metric\n\n# compare_dictionaries(negative_binomial_results, naive_normal_results)\n\ngenepattern.GPUIBuilder(compare_dictionaries, name=\"Compare Two Ranked Lists\",\n parameters={\n \"output_var\":{\"default\":\"temp_result_1\",\n \"hide\":True}\n })\n# compare_dictionaries(negative_binomial_results, transformed_normal_results)",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a86097aedea443ba3303629866b276b37fce3bb
| 13,304 |
ipynb
|
Jupyter Notebook
|
formation/02-Machine learning avec Pandas et Scikit-learn/2.4.1 Forets d'arbre aleatoire.ipynb
|
hdossot/formation-machine-learning
|
5733511a7770bd9f528ff4005f747496db7c014c
|
[
"Apache-2.0"
] | 1 |
2020-11-23T14:32:05.000Z
|
2020-11-23T14:32:05.000Z
|
formation/02-Machine learning avec Pandas et Scikit-learn/2.4.1 Forets d'arbre aleatoire.ipynb
|
hdossot/formation-machine-learning
|
5733511a7770bd9f528ff4005f747496db7c014c
|
[
"Apache-2.0"
] | null | null | null |
formation/02-Machine learning avec Pandas et Scikit-learn/2.4.1 Forets d'arbre aleatoire.ipynb
|
hdossot/formation-machine-learning
|
5733511a7770bd9f528ff4005f747496db7c014c
|
[
"Apache-2.0"
] | null | null | null | 33.766497 | 428 | 0.623271 |
[
[
[
"## Apprentissage supervisé: Forêts d'arbres aléatoires (Random Forests)",
"_____no_output_____"
],
[
"Intéressons nous maintenant à un des algorithmes les plus popualires de l'état de l'art. Cet algorithme est non-paramétrique et porte le nom de **forêts d'arbres aléatoires**",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\nplt.style.use('seaborn')",
"_____no_output_____"
]
],
[
[
"## A l'origine des forêts d'arbres aléatoires : l'arbre de décision",
"_____no_output_____"
],
[
"Les fôrets aléatoires appartiennent à la famille des méthodes d'**apprentissage ensembliste** et sont construits à partir d'**arbres de décision**. Pour cette raison, nous allons tout d'abord présenter les arbres de décisions.\n\nUn arbre de décision est une manière très intuitive de résoudre un problème de classification. On se contente de définir un certain nombre de questions qui vont permetre d'identifier la classe adéquate.",
"_____no_output_____"
]
],
[
[
"import fig_code.figures as fig\nfig.plot_example_decision_tree()",
"_____no_output_____"
]
],
[
[
"Le découpage binaire des données est rapide a mettre en oeuvre. La difficulté va résider dans la manière de déterminer quelle est la \"bonne\" question à poser.\n\nC'est tout l'enjeu de la phase d'apprentissage d'un arbre de décision. L'algorithme va déterminer, au vue d'un ensemble de données, quelle question (ou découpage...) va apporter le plus gros gain d'information.\n\n### Construction d'un arbre de décision\nVoici un exemple de classifieur à partir d'un arbre de décision en utlisiant la libraire scikit-learn.\n\nNous commencons par définir un jeu de données en 2 dimensions avec des labels associés:",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import make_blobs\n\nX, y = make_blobs(n_samples=300, centers=4,\n random_state=0, cluster_std=1.0)\nplt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='rainbow');",
"_____no_output_____"
]
],
[
[
"Nous avons précemment défini une fonction qui va faciliter la visualisation du processus :",
"_____no_output_____"
]
],
[
[
"from fig_code.figures import visualize_tree, plot_tree_interactive",
"_____no_output_____"
]
],
[
[
"On utilise maintenant le module ``interact`` dans Ipython pour visualiser les découpages effectués par l'arbre de décision en fonction de la profondeur de l'arbre (*depth* en anglais), i.e. le nombre de questions que l'arbre peut poser :",
"_____no_output_____"
]
],
[
[
"plot_tree_interactive(X, y);",
"_____no_output_____"
]
],
[
[
"**Remarque** : à chaque augmentation de la profondeur de l'arbre, chaque branche est découpée en deux **à l'expection** des branches qui contiennent uniquement des points d'une unique classe.\n\nL'arbre de décision est une méthode de classification non paramétrique facile à mettre en oeuvre\n\n**Question: Observez-vous des problèmes avec cette modélisation ?**",
"_____no_output_____"
],
[
"## Arbre de décision et sur-apprentissage\n\nUn problème avec les arbres de décision est qu'ils ont tendance à **sur-apprendre** rapidement sur les données d'apprentissage. En effet, ils ont une forte tendance à capturer le bruit présent dans les données plutôt que la vraie distribution recherchée. Par exemple, si on construit 2 arbres à partir de sous ensembles des données définies précédemment, on obtient les deux classifieurs suivants:",
"_____no_output_____"
]
],
[
[
"from sklearn.tree import DecisionTreeClassifier\nclf = DecisionTreeClassifier()\n\nplt.figure()\nvisualize_tree(clf, X[:200], y[:200], boundaries=False)\nplt.figure()\nvisualize_tree(clf, X[-200:], y[-200:], boundaries=False)",
"_____no_output_____"
]
],
[
[
"Les 2 classifieurs ont des différences notables si on regarde en détails les figures. Lorsque'on va prédire la classe d'un nouveau point, cela risque d'être impacté par le bruit dans les données plus que par le signal que l'on cherche à modéliser.\n",
"_____no_output_____"
],
[
"## Prédictions ensemblistes: Forêts aléatoires\nUne façon de limiter ce problème de sur-apprentissage est d'utiliser un **modèle ensembliste**: un méta-estimateur qui va aggréger les predictions de mutliples estimateurs (qui peuvent sur-apprendre individuellement). Grace à des propriétés mathématiques plutôt magiques (!), la prédiction aggrégée de ces estimateurs s'avère plus performante et robuste que les performances des estimateurs considérés individuellement.\n\nUne des méthodes ensemblistes les plus célèbres est la méthode des **forêts d'arbres aléatoires** qui aggrège les prédictions de multiples arbres de décision.\n\nIl y a beaucoup de littératures scientifiques pour déterminer la façon de rendre aléatoires ces arbres mais donner un exemple concret, voici un ensemble de modèle qui utilise seulement un sous échantillon des données :\n",
"_____no_output_____"
]
],
[
[
"X, y = make_blobs(n_samples=300, centers=4,\n random_state=0, cluster_std=2.0)\n\ndef fit_randomized_tree(random_state=0):\n rng = np.random.RandomState(random_state)\n i = np.arange(len(y))\n rng.shuffle(i)\n \n clf = DecisionTreeClassifier(max_depth=5)\n #on utilise seulement 250 exemples choisis aléatoirement sur les 300 disponibles\n visualize_tree(clf, X[i[:250]], y[i[:250]], boundaries=False,\n xlim=(X[:, 0].min(), X[:, 0].max()),\n ylim=(X[:, 1].min(), X[:, 1].max()))\n \nfrom ipywidgets import interact\ninteract(fit_randomized_tree, random_state=(0, 100));",
"_____no_output_____"
]
],
[
[
"On peut observer dans le détail les changements du modèle en fonction du tirage aléatoire des données qu'il utilise pour l'apprentissage, alors que la distribution des données est figée !\n\nLa forêt aléatoire va faire des caluls similaires, mais va aggréger l'ensemble des arbres aléatoires générés pour construire une unique prédiction:",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestClassifier\nclf = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=0)\nvisualize_tree(clf, X, y, boundaries=False);",
"_____no_output_____"
],
[
"from sklearn.svm import SVC \nclf = SVC(kernel='linear')\nclf.fit(X, y)\nvisualize_tree(clf,X, y, boundaries=False)\nplt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],\n s=200, facecolors='none');",
"_____no_output_____"
]
],
[
[
"En moyennant 100 arbres de décision \"perturbés\" aléatoirement, nous obtenons une prédiction aggrégé qui modélise avec plus de précision nos données.\n\n*(Remarque: ci dessus, notre perturbation aléatoire est effectué en echantillonant de manière aléatoire nos données... Les arbres aléatoires utilisent des techniques plus sophistiquées, pour plus de détails voir la [documentation de scikit-learn](http://scikit-learn.org/stable/modules/ensemble.html#forest)*)",
"_____no_output_____"
],
[
"## Exemple 1 : utilisation en régression\nOn considère pour cet exemple un cas d'tétude différent des exemples précédent de classification. Les arbres aléatoires peuvent être également utilisés sur des problèmes de régression (c'est à dire la prédiction d'une variable continue plutôt que discrète).\n\nL'estimateur que nous utiliserons est le suivant: ``sklearn.ensemble.RandomForestRegressor``.\n\nNous présentons rapidement comment il peut être utilisé:",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestRegressor\n# On commence par créer un jeu de données d'apprentissage\nx = 10 * np.random.rand(100)\n\ndef model(x, sigma=0.):\n # sigma controle le bruit \n # sigma=0 pour avoir une distribution \"parfaite\"\n oscillation_rapide = np.sin(5 * x)\n oscillation_lente = np.sin(0.5 * x)\n bruit = sigma * np.random.randn(len(x))\n\n return oscillation_rapide + oscillation_lente + bruit\n\ny = model(x)\nplt.figure(figsize=(10,5))\nplt.scatter(x, y);",
"_____no_output_____"
],
[
"xfit = np.linspace(0, 10, num=1000)\n# yfit contient les prédictions de la forêt aléatoire à partir des données bruités\nyfit = RandomForestRegressor(100).fit(x[:, None], y).predict(xfit[:, None])\n# ytrue contient les valuers du modèle qui génèrent nos données avec un bruit nul \nytrue = model(xfit, sigma=0) \n\nplt.figure(figsize=(10,5))\n#plt.scatter(x, y)\nplt.plot(xfit, yfit, '-r', label = 'forêt aléatoire')\nplt.plot(xfit, ytrue, '-g', alpha=0.5, label = 'distribution non bruitée')\nplt.legend();",
"_____no_output_____"
]
],
[
[
"On observe que les forêts aléatoires, de manière non-paramétrique, arrivent à estimer une distribution avec de mutliples périodicités sans aucune intervention de notre part pour définir ces périodicités !",
"_____no_output_____"
],
[
"---\n**Hyperparamètres**\n\nUtilisons l'outil d'aide inclus dans Ipython pour explorer la classe ``RandomForestRegressor``. Pour cela on rajoute un ? à la fin de l'objet",
"_____no_output_____"
]
],
[
[
"RandomForestRegressor?",
"_____no_output_____"
]
],
[
[
"Quelle sont les options disponibles pour le ``RandomForestRegressor``?\nQuelle influence sur le graphique précédent si on modifie ces valeurs?\n\nCes paramètres de classe sont appelés les **hyperparamètres** d'un modèle.",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
]
],
[
[
"# Exercice : proposer un modèle de régression à vecteur support permettant de modéliser le phénomène\nfrom sklearn.svm import SVR\n\nSVMreg = SVR().fit(x[:, None], y)\n\nyfit_SVM = SVMreg.predict(xfit[:, None])\n\nplt.figure(figsize=(10,5))\nplt.scatter(x, y)\nplt.plot(xfit, yfit_SVM, '-r', label = 'SVM')\nplt.plot(xfit, ytrue, '-g', alpha=0.5, label = 'distribution non bruitée')\nplt.legend();",
"_____no_output_____"
],
[
"SVR?",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
4a860a8e2eddc25163470318a9e83ded28e5e207
| 3,861 |
ipynb
|
Jupyter Notebook
|
tutorials/.ipynb_checkpoints/0 - Retrieve and parse EEG-checkpoint.ipynb
|
wmvanvliet/dyconnmap
|
15a830a5755ce198a33b245b18927c494c767a60
|
[
"BSD-3-Clause"
] | null | null | null |
tutorials/.ipynb_checkpoints/0 - Retrieve and parse EEG-checkpoint.ipynb
|
wmvanvliet/dyconnmap
|
15a830a5755ce198a33b245b18927c494c767a60
|
[
"BSD-3-Clause"
] | null | null | null |
tutorials/.ipynb_checkpoints/0 - Retrieve and parse EEG-checkpoint.ipynb
|
wmvanvliet/dyconnmap
|
15a830a5755ce198a33b245b18927c494c767a60
|
[
"BSD-3-Clause"
] | null | null | null | 23.981366 | 109 | 0.526807 |
[
[
[
"# Retrieve\n\nYou have to download manually the EEG data from https://physionet.org/content/eegmmidb",
"_____no_output_____"
],
[
"# Parse\n\nGo through all subjects from the dataset, read the EDF files and store them into NumPy arrays.\n\n**Notes**\n\n* You have to download the dataset yourself, and modify the `edf_dir` variable.\n\n* In some subjects, we drop the last 170 samples, to make sure equal number of samples across subjects.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pyedflib\nimport os",
"_____no_output_____"
],
[
"eyes_open = np.zeros((109, 64, 9600))\neyes_closed = np.zeros((109, 64, 9600))",
"_____no_output_____"
]
],
[
[
"Define the directory where dataset is located ",
"_____no_output_____"
]
],
[
[
"edf_dir = '/opt/Temp/physionet.nlm.nih.gov/pn4/eegmmidb/'",
"_____no_output_____"
]
],
[
[
"### Process the baseline file for \"eyes open\" ",
"_____no_output_____"
]
],
[
[
"for sub_id in range(0, 109):\n subj_prefix = \"S{0:03}\".format(sub_id + 1)\n subj_dir = \"{0}/{1}\".format(edf_dir, subj_prefix)\n baseline_eyes_open = \"{0}/{1}R01\".format(subj_dir, subj_prefix)\n \n f = pyedflib.EdfReader(baseline_eyes_open + \".edf\")\n a = f.read_annotation()\n n = f.signals_in_file\n signal_labels = f.getSignalLabels()\n\n for chan in np.arange(n):\n eyes_open[sub_id, chan, :] = f.readSignal(chan)[0:9600]",
"_____no_output_____"
]
],
[
[
"### Process the baseline file for \"eyes closed\" ",
"_____no_output_____"
]
],
[
[
"for sub_id in range(0, 109):\n subj_prefix = \"S{0:03}\".format(sub_id + 1)\n subj_dir = \"{0}/{1}\".format(edf_dir, subj_prefix)\n baseline_eyes_closed = \"{0}/{1}R02\".format(subj_dir, subj_prefix)\n \n f = pyedflib.EdfReader(baseline_eyes_closed + \".edf\")\n a = f.read_annotation() #baseline_eyes_open + \".edf.event\")\n n = f.signals_in_file\n signal_labels = f.getSignalLabels()\n\n for chan in np.arange(n):\n eyes_closed[sub_id, chan, :] = f.readSignal(chan)[0:9600]",
"_____no_output_____"
]
],
[
[
"### Store files ",
"_____no_output_____"
]
],
[
[
"if not os.path.exists(\"data/\"):\n os.makedirs(\"data/\")\n\nnp.save('data/eyes_opened.npy', eyes_open)\nnp.save('data/eyes_closed.npy', eyes_closed)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a86117703dfb801182b1dd675ef6d8a93968a58
| 154,755 |
ipynb
|
Jupyter Notebook
|
doc/tutorials/.ipynb_checkpoints/TestScratchAssayTutorial-checkpoint.ipynb
|
jmsgrogan/PyChaste
|
48a9863d2c941c71e47ecb72e917b477ba5c1413
|
[
"FTL"
] | 6 |
2017-02-04T16:10:53.000Z
|
2021-07-01T08:03:16.000Z
|
doc/tutorials/.ipynb_checkpoints/TestScratchAssayTutorial-checkpoint.ipynb
|
jmsgrogan/PyChaste
|
48a9863d2c941c71e47ecb72e917b477ba5c1413
|
[
"FTL"
] | 6 |
2017-06-22T08:50:41.000Z
|
2019-12-15T20:17:29.000Z
|
doc/tutorials/.ipynb_checkpoints/TestScratchAssayTutorial-checkpoint.ipynb
|
jmsgrogan/PyChaste
|
48a9863d2c941c71e47ecb72e917b477ba5c1413
|
[
"FTL"
] | 3 |
2017-05-15T21:33:58.000Z
|
2019-10-27T21:43:07.000Z
| 352.517084 | 62,603 | 0.917075 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a86184615fd1d58cdc1288d7a7dc477b521e0b4
| 7,396 |
ipynb
|
Jupyter Notebook
|
clipping.ipynb
|
euribates/Jupyter-Intro
|
a199655436cc4ccd41ec22398a1c5212c541f24b
|
[
"MIT"
] | null | null | null |
clipping.ipynb
|
euribates/Jupyter-Intro
|
a199655436cc4ccd41ec22398a1c5212c541f24b
|
[
"MIT"
] | null | null | null |
clipping.ipynb
|
euribates/Jupyter-Intro
|
a199655436cc4ccd41ec22398a1c5212c541f24b
|
[
"MIT"
] | null | null | null | 37.165829 | 107 | 0.562061 |
[
[
[
"### Clipping\n\nEn un juego a menudo tenemos la necesidad de dibujar solo en una pate \nde la pantalla. Por ejemplo, en un juego de estrategia, al estilo\ndel [Command and Conquer](https://es.wikipedia.org/wiki/Command_%26_Conquer), podemos\nquerer dividir la pantalla en dos. Una parte superior grande donse se muestra un mapa\ny un panel inferior, más pequeño, donde se muestre información de nuestras tropas,\ndel estado de municiones, etc... Obviamente, no queremos que al dibujar el mapa\npinte cosas enciam del panel de estado, ni al contrario. \n\nPara solucionar esto se puede usar una característica de las superficies de pygame\nque es al _clipping area_ o área de recorte. Cada superficie tiene una área de recorte, \nun rectángulo (`un objeto de la clase `pygame.Rect`) que puede estar activa a no. Si \ndefinimos y activamos el area, todas las operaciones de\ndibujo se verán limitadas al rectángulo del área, dejando el resto de la superficie \nintacta.\n\nPara definir un area de recorte,se usa el método `set_clip` de las superficies, pasandole\ncomo parámetro un objeto de tipo `Rect`. Se puede obtener el aárea definida en cualquier\nmomento llamanda a `get_clip`. \n\nEl siguiente codigo muestra una simulacion de un juego de estrategia con la configuración \nexplicada antes. La primera llamada a `set_clip` define como área sobre la que se puede pintar\nla parte superior, correspondiente al mapa (definido en el rectángulo `map_area`, y la\nsegunda limita el área utilizable a el area de información:\n\nOften when you are building a screen for a game, you will want to draw only to a portion of the\ndisplay. For instance, in a strategy Command & Conquer–like game, you might have the top of\nthe screen as a scrollable map, and below it a panel that displays troop information. But when\nyou start to draw the troop images to the screen, you don’t want to have them draw over the\ninformation panel. To solve this problem, surfaces have a clipping area, which is a rectangle\nthat defines what part of the screen can be drawn to. To set the clipping area, call the set_clip\nmethod of a surface object with a Rect-style object. You can also retrieve the current clipping\nregion by calling get_clip.\nThe following snippet shows how we might use clipping to construct the screen for a strat-\negy game. The first call to clip sets the region so that the call to draw_map will only be able to\ndraw onto the top half of the screen. The second call to set_clip sets the clipping area to the\nremaining portion of the screen:\nscreen.set_clip(0, 0, 640, 300)\ndraw_map()\nscreen.set_clip(0, 300, 640, 180)\ndraw_panel()",
"_____no_output_____"
]
],
[
[
"import pygame\nimport random\n\nSIZE = WIDTH, HEIGHT = 800, 600\nFPS = 60\nBLACK = (0, 0, 0)\nGRAY = (128, 128, 128)\nCYAN = (0, 255, 255)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\n\nclass Soldier:\n \n def __init__(self, x=None, y=None):\n self.x = x or random.randrange(WIDTH)\n self.y = y or random.randrange(HEIGHT)\n \n def update(self):\n self.x += random.choice([-2, -1, -1, 0, 0, 0, 1, 1, 2])\n self.y += random.choice([-2, -1, -1, 0, 0, 0, 1, 1, 2])\n \nclass Troop(Soldier):\n \n def draw(self, canvas):\n pygame.draw.circle(canvas, CYAN, (self.x, self.y), 5)\n \n def is_enemy(self):\n return False\n\nclass Enemy(Soldier):\n \n def draw(self, canvas):\n r = pygame.Rect(self.x-5, self.y-5, 11, 11)\n pygame.draw.rect(canvas, RED, r)\n \n def is_enemy(self):\n return True\n \nmap_area = pygame.Rect((0, 0), (WIDTH, HEIGHT-40))\ninfo_area = pygame.Rect((0, HEIGHT-32), (WIDTH, 40))\n \npygame.init()\ntry:\n pygame.display.set_caption(\"Clippeng Demo\")\n screen = pygame.display.set_mode(SIZE, 0, 24)\n\n # Parte de inicialización del juego\n crowd = [Troop() for _ in range(10)] + [Enemy() for _ in range(12)]\n clock = pygame.time.Clock() \n in_game = True\n\n while in_game:\n # Obtener datos de entrada\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n in_game = False\n # Recalcular el estado del juego, en base al estado actual y a las entradas\n for soldier in crowd:\n soldier.update()\n # Representamos el nuevo estado\n screen.set_clip(map_area)\n screen.fill(BLACK)\n for soldier in crowd:\n soldier.draw(screen)\n screen.set_clip(info_area)\n screen.fill(GRAY)\n pos = 5\n for soldier in crowd: \n r = pygame.Rect((pos, 0), (4, HEIGHT))\n if soldier.is_enemy():\n pygame.draw.rect(screen, RED, r)\n else:\n pygame.draw.rect(screen, GREEN, r)\n pos += 5\n pygame.display.update()\n clock.tick(FPS)\nfinally:\n pygame.quit()",
"_____no_output_____"
]
],
[
[
"**Ejercicio**: Modificar el programa para que detecte los eventos de pulsar \nla tecla del ratón. Si en las coordenadas pulsadas hay un soldado (Ya \nsea nuestro o del enemigo), borrarlo. Para nuestros efectos, borrarlo es\nsimplemente eliminarlo de la lista `crowd`. Para eso se puede usar \nel método `remove(elem)` de las listas.",
"_____no_output_____"
]
],
[
[
"l = [1, 2, 3, 4, 5, 6]\nprint(l)\nl.remove(4)\nprint(l)",
"[1, 2, 3, 4, 5, 6]\n[1, 2, 3, 5, 6]\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a862e6c774a9a3ad11a18dd660ff1890640f4e4
| 15,459 |
ipynb
|
Jupyter Notebook
|
test/Models/pheno_pkg/test/f90/Ptq.ipynb
|
cyrillemidingoyi/PyCropML
|
b866cc17374424379142d9162af985c1f87c74b6
|
[
"MIT"
] | 5 |
2020-06-21T18:58:04.000Z
|
2022-01-29T21:32:28.000Z
|
test/Models/pheno_pkg/test/f90/Ptq.ipynb
|
cyrillemidingoyi/PyCropML
|
b866cc17374424379142d9162af985c1f87c74b6
|
[
"MIT"
] | 27 |
2018-12-04T15:35:44.000Z
|
2022-03-11T08:25:03.000Z
|
test/Models/pheno_pkg/test/f90/Ptq.ipynb
|
cyrillemidingoyi/PyCropML
|
b866cc17374424379142d9162af985c1f87c74b6
|
[
"MIT"
] | 7 |
2019-04-20T02:25:22.000Z
|
2021-11-04T07:52:35.000Z
| 45.201754 | 131 | 0.40326 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a862ee20a1ea16ecd9a501ed9610eba0baeafa9
| 19,963 |
ipynb
|
Jupyter Notebook
|
docs/examples/matrix-factorization-for-recommender-systems-part-2.ipynb
|
etiennekintzler/river
|
1fea6987e0caad3a3e174aa5a212adc7165963ed
|
[
"BSD-3-Clause"
] | null | null | null |
docs/examples/matrix-factorization-for-recommender-systems-part-2.ipynb
|
etiennekintzler/river
|
1fea6987e0caad3a3e174aa5a212adc7165963ed
|
[
"BSD-3-Clause"
] | null | null | null |
docs/examples/matrix-factorization-for-recommender-systems-part-2.ipynb
|
etiennekintzler/river
|
1fea6987e0caad3a3e174aa5a212adc7165963ed
|
[
"BSD-3-Clause"
] | null | null | null | 35.58467 | 585 | 0.580624 |
[
[
[
"# Matrix Factorization for Recommender Systems - Part 2",
"_____no_output_____"
],
[
"As seen in [Part 1](https://online-ml.github.io/examples/matrix-factorization-for-recommender-systems-part-1), strength of [Matrix Factorization (MF)](https://en.wikipedia.org/wiki/Matrix_factorization_(recommender_systems)) lies in its ability to deal with sparse and high cardinality categorical variables. In this second tutorial we will have a look at Factorization Machines (FM) algorithm and study how it generalizes the power of MF.",
"_____no_output_____"
],
[
"**Table of contents of this tutorial series on matrix factorization for recommender systems:**\n\n- [Part 1 - Traditional Matrix Factorization methods for Recommender Systems](https://online-ml.github.io/examples/matrix-factorization-for-recommender-systems-part-1)\n- [Part 2 - Factorization Machines and Field-aware Factorization Machines](https://online-ml.github.io/examples/matrix-factorization-for-recommender-systems-part-2)\n- [Part 3 - Large scale learning and better predictive power with multiple pass learning](https://online-ml.github.io/examples/matrix-factorization-for-recommender-systems-part-3)",
"_____no_output_____"
],
[
"## Factorization Machines",
"_____no_output_____"
],
[
"Steffen Rendel came up in 2010 with [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf), an algorithm able to handle any real valued feature vector, combining the advantages of general predictors with factorization models. It became quite popular in the field of online advertising, notably after winning several Kaggle competitions. The modeling technique starts with a linear regression to capture the effects of each variable individually:\n\n$$\n\\normalsize\n\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j}\n$$\n\nThen are added interaction terms to learn features relations. Instead of learning a single and specific weight per interaction (as in [polynomial regression](https://en.wikipedia.org/wiki/Polynomial_regression)), a set of latent factors is learnt per feature (as in MF). An interaction is calculated by multiplying involved features product with their latent vectors dot product. The degree of factorization — or model order — represents the maximum number of features per interaction considered. The model equation for a factorization machine of degree $d$ = 2 is defined as:\n\n$$\n\\normalsize\n\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle x_{j} x_{j'}\n$$\n\nWhere $\\normalsize \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle$ is the dot product of $j$ and $j'$ latent vectors:\n\n$$\n\\normalsize\n\\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle = \\sum_{f=1}^{k} \\mathbf{v}_{j, f} \\cdot \\mathbf{v}_{j', f}\n$$\n\nHigher-order FM will be covered in a following section, just note that factorization models express their power in sparse settings, which is also where higher-order interactions are hard to estimate.\n\nStrong emphasis must be placed on feature engineering as it allows FM to mimic most factorization models and significantly impact its performance. High cardinality categorical variables one hot encoding is the most frequent step before feeding the model with data. For more efficiency, `river` FM implementation considers string values as categorical variables and automatically one hot encode them. FM models have their own module [river.facto](https://online-ml.github.io/api/overview/#facto).",
"_____no_output_____"
],
[
" ## Mimic Biased Matrix Factorization (BiasedMF)",
"_____no_output_____"
],
[
"Let's start with a simple example where we want to reproduce the Biased Matrix Factorization model we trained in the previous tutorial. For a fair comparison with [Part 1 example](https://online-ml.github.io/examples/matrix-factorization-for-recommender-systems-part-1/#biased-matrix-factorization-biasedmf), let's set the same evaluation framework:",
"_____no_output_____"
]
],
[
[
"from river import datasets\nfrom river import metrics\nfrom river.evaluate import progressive_val_score\n\ndef evaluate(model):\n X_y = datasets.MovieLens100K()\n metric = metrics.MAE() + metrics.RMSE()\n _ = progressive_val_score(X_y, model, metric, print_every=25_000, show_time=True, show_memory=True)",
"_____no_output_____"
]
],
[
[
"In order to build an equivalent model we need to use the same hyper-parameters. As we can't replace FM intercept by the global running mean we won't be able to build the exact same model:",
"_____no_output_____"
]
],
[
[
"from river import compose\nfrom river import facto\nfrom river import meta\nfrom river import optim\nfrom river import stats\n\nfm_params = {\n 'n_factors': 10,\n 'weight_optimizer': optim.SGD(0.025),\n 'latent_optimizer': optim.SGD(0.05),\n 'sample_normalization': False,\n 'l1_weight': 0.,\n 'l2_weight': 0.,\n 'l1_latent': 0.,\n 'l2_latent': 0.,\n 'intercept': 3,\n 'intercept_lr': .01,\n 'weight_initializer': optim.initializers.Zeros(),\n 'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.1, seed=73),\n}\n\nregressor = compose.Select('user', 'item')\nregressor |= facto.FMRegressor(**fm_params)\n\nmodel = meta.PredClipper(\n regressor=regressor,\n y_min=1,\n y_max=5\n)\n\nevaluate(model)",
"[25,000] MAE: 0.761778, RMSE: 0.960803 – 0:00:04 – 1.15 MB\n[50,000] MAE: 0.751986, RMSE: 0.949941 – 0:00:09 – 1.34 MB\n[75,000] MAE: 0.750044, RMSE: 0.948911 – 0:00:13 – 1.56 MB\n[100,000] MAE: 0.748609, RMSE: 0.947994 – 0:00:17 – 1.75 MB\n"
]
],
[
[
"Both MAE are very close to each other (0.7486 vs 0.7485) showing that we almost reproduced [reco.BiasedMF](https://online-ml.github.io/api/reco/BiasedMF/) algorithm. The cost is a naturally slower running time as FM implementation offers more flexibility.",
"_____no_output_____"
],
[
"## Feature engineering for FM models",
"_____no_output_____"
],
[
"Let's study the basics of how to properly encode data for FM models. We are going to keep using MovieLens 100K as it provides various feature types:",
"_____no_output_____"
]
],
[
[
"import json\n\nfor x, y in datasets.MovieLens100K():\n print(f'x = {json.dumps(x, indent=4)}\\ny = {y}')\n break",
"x = {\n \"user\": \"259\",\n \"item\": \"255\",\n \"timestamp\": 874731910000000000,\n \"title\": \"My Best Friend's Wedding (1997)\",\n \"release_date\": 866764800000000000,\n \"genres\": \"comedy, romance\",\n \"age\": 21.0,\n \"gender\": \"M\",\n \"occupation\": \"student\",\n \"zip_code\": \"48823\"\n}\ny = 4.0\n"
]
],
[
[
"The features we are going to add to our model don't improve its predictive power. Nevertheless, they are useful to illustrate different methods of data encoding:",
"_____no_output_____"
],
[
"1. Set-categorical variables\n\nWe have seen that categorical variables are one hot encoded automatically if set to strings, in the other hand, set-categorical variables must be encoded explicitly by the user. A good way of doing so is to assign them a value of $1/m$, where $m$ is the number of elements of the sample set. It gives the feature a constant \"weight\" across all samples preserving model's stability. Let's create a routine to encode movies genres this way:",
"_____no_output_____"
]
],
[
[
"def split_genres(x):\n genres = x['genres'].split(', ')\n return {f'genre_{genre}': 1 / len(genres) for genre in genres}",
"_____no_output_____"
]
],
[
[
"2. Numerical variables\n\nIn practice, transforming numerical features into categorical ones works better in most cases. Feature binning is the natural way, but finding good bins is sometimes more an art than a science. Let's encode users age with something simple:",
"_____no_output_____"
]
],
[
[
"def bin_age(x):\n if x['age'] <= 18:\n return {'age_0-18': 1}\n elif x['age'] <= 32:\n return {'age_19-32': 1}\n elif x['age'] < 55:\n return {'age_33-54': 1}\n else:\n return {'age_55-100': 1}",
"_____no_output_____"
]
],
[
[
"Let's put everything together:",
"_____no_output_____"
]
],
[
[
"fm_params = {\n 'n_factors': 14,\n 'weight_optimizer': optim.SGD(0.01),\n 'latent_optimizer': optim.SGD(0.025),\n 'intercept': 3,\n 'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.05, seed=73),\n}\n\nregressor = compose.Select('user', 'item')\nregressor += (\n compose.Select('genres') |\n compose.FuncTransformer(split_genres)\n)\nregressor += (\n compose.Select('age') |\n compose.FuncTransformer(bin_age)\n)\nregressor |= facto.FMRegressor(**fm_params)\n\nmodel = meta.PredClipper(\n regressor=regressor,\n y_min=1,\n y_max=5\n)\n\nevaluate(model)",
"[25,000] MAE: 0.759838, RMSE: 0.961281 – 0:00:10 – 1.42 MB\n[50,000] MAE: 0.751307, RMSE: 0.951391 – 0:00:21 – 1.66 MB\n[75,000] MAE: 0.750361, RMSE: 0.951393 – 0:00:32 – 1.93 MB\n[100,000] MAE: 0.749994, RMSE: 0.951435 – 0:00:42 – 2.18 MB\n"
]
],
[
[
"Note that using more variables involves factorizing a larger latent space, then increasing the number of latent factors $k$ often helps capturing more information.\n\nSome other feature engineering tips from [3 idiots' winning solution](https://www.kaggle.com/c/criteo-display-ad-challenge/discussion/10555) for Kaggle [Criteo display ads](https://www.kaggle.com/c/criteo-display-ad-challenge) competition in 2014:\n\n- Infrequent modalities often bring noise and little information, transforming them into a special tag can help\n- In some cases, sample-wise normalization seems to make the optimization problem easier to be solved",
"_____no_output_____"
],
[
"## Higher-Order Factorization Machines (HOFM)",
"_____no_output_____"
],
[
"The model equation generalized to any order $d \\geq 2$ is defined as:\n\n$$\n\\normalsize\n\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{l=2}^{d} \\sum_{j_1=1}^{p} \\cdots \\sum_{j_l=j_{l-1}+1}^{p} \\left(\\prod_{j'=1}^{l} x_{j_{j'}} \\right) \\left(\\sum_{f=1}^{k_l} \\prod_{j'=1}^{l} v_{j_{j'}, f}^{(l)} \\right)\n$$",
"_____no_output_____"
]
],
[
[
"hofm_params = {\n 'degree': 3,\n 'n_factors': 12,\n 'weight_optimizer': optim.SGD(0.01),\n 'latent_optimizer': optim.SGD(0.025),\n 'intercept': 3,\n 'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.05, seed=73),\n}\n\nregressor = compose.Select('user', 'item')\nregressor += (\n compose.Select('genres') |\n compose.FuncTransformer(split_genres)\n)\nregressor += (\n compose.Select('age') |\n compose.FuncTransformer(bin_age)\n)\nregressor |= facto.HOFMRegressor(**hofm_params)\n\nmodel = meta.PredClipper(\n regressor=regressor,\n y_min=1,\n y_max=5\n)\n\nevaluate(model)",
"[25,000] MAE: 0.761297, RMSE: 0.962054 – 0:00:55 – 2.59 MB\n[50,000] MAE: 0.751865, RMSE: 0.951499 – 0:01:55 – 3.05 MB\n[75,000] MAE: 0.750853, RMSE: 0.951526 – 0:02:52 – 3.56 MB\n[100,000] MAE: 0.750607, RMSE: 0.951982 – 0:03:51 – 4.03 MB\n"
]
],
[
[
"As said previously, high-order interactions are often hard to estimate due to too much sparsity, that's why we won't spend too much time here.",
"_____no_output_____"
],
[
"## Field-aware Factorization Machines (FFM)",
"_____no_output_____"
],
[
"[Field-aware variant of FM (FFM)](https://www.csie.ntu.edu.tw/~cjlin/papers/ffm.pdf) improved the original method by adding the notion of \"*fields*\". A \"*field*\" is a group of features that belong to a specific domain (e.g. the \"*users*\" field, the \"*items*\" field, or the \"*movie genres*\" field).\n\nFFM restricts itself to pairwise interactions and factorizes separated latent spaces — one per combination of fields (e.g. users/items, users/movie genres, or items/movie genres) — instead of a common one shared by all fields. Therefore, each feature has one latent vector per field it can interact with — so that it can learn the specific effect with each different field.\n\nThe model equation is defined by:\n\n$$\n\\normalsize\n\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} \\langle \\mathbf{v}_{j, f_{j'}}, \\mathbf{v}_{j', f_{j}} \\rangle x_{j} x_{j'}\n$$\n\nWhere $f_j$ and $f_{j'}$ are the fields corresponding to $j$ and $j'$ features, respectively.",
"_____no_output_____"
]
],
[
[
"ffm_params = {\n 'n_factors': 8,\n 'weight_optimizer': optim.SGD(0.01),\n 'latent_optimizer': optim.SGD(0.025),\n 'intercept': 3,\n 'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.05, seed=73),\n}\n\nregressor = compose.Select('user', 'item')\nregressor += (\n compose.Select('genres') |\n compose.FuncTransformer(split_genres)\n)\nregressor += (\n compose.Select('age') |\n compose.FuncTransformer(bin_age)\n)\nregressor |= facto.FFMRegressor(**ffm_params)\n\nmodel = meta.PredClipper(\n regressor=regressor,\n y_min=1,\n y_max=5\n)\n\nevaluate(model)",
"[25,000] MAE: 0.757718, RMSE: 0.958158 – 0:00:15 – 3 MB\n[50,000] MAE: 0.749502, RMSE: 0.948065 – 0:00:32 – 3.54 MB\n[75,000] MAE: 0.749275, RMSE: 0.948918 – 0:00:48 – 4.14 MB\n[100,000] MAE: 0.749542, RMSE: 0.949769 – 0:01:04 – 4.69 MB\n"
]
],
[
[
"Note that FFM usually needs to learn smaller number of latent factors $k$ than FM as each latent vector only deals with one field.",
"_____no_output_____"
],
[
"## Field-weighted Factorization Machines (FwFM)",
"_____no_output_____"
],
[
"[Field-weighted Factorization Machines (FwFM)](https://arxiv.org/abs/1806.03514) address FFM memory issues caused by its large number of parameters, which is in the order of *feature number* times *field number*. As FFM, FwFM is an extension of FM restricted to pairwise interactions, but instead of factorizing separated latent spaces, it learns a specific weight $r_{f_j, f_{j'}}$ for each field combination modelling the interaction strength.\n\nThe model equation is defined as:\n\n$$\n\\normalsize\n\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} r_{f_j, f_{j'}} \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle x_{j} x_{j'}\n$$",
"_____no_output_____"
]
],
[
[
"fwfm_params = {\n 'n_factors': 10,\n 'weight_optimizer': optim.SGD(0.01),\n 'latent_optimizer': optim.SGD(0.025),\n 'intercept': 3,\n 'seed': 73,\n}\n\nregressor = compose.Select('user', 'item')\nregressor += (\n compose.Select('genres') |\n compose.FuncTransformer(split_genres)\n)\nregressor += (\n compose.Select('age') |\n compose.FuncTransformer(bin_age)\n)\nregressor |= facto.FwFMRegressor(**fwfm_params)\n\nmodel = meta.PredClipper(\n regressor=regressor,\n y_min=1,\n y_max=5\n)\n\nevaluate(model)",
"[25,000] MAE: 0.761539, RMSE: 0.962241 – 0:00:21 – 1.17 MB\n[50,000] MAE: 0.754089, RMSE: 0.953181 – 0:00:43 – 1.36 MB\n[75,000] MAE: 0.754806, RMSE: 0.954979 – 0:01:05 – 1.58 MB\n[100,000] MAE: 0.755404, RMSE: 0.95604 – 0:01:27 – 1.77 MB\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
4a86326522385483587a5da00894654cd0820092
| 193,714 |
ipynb
|
Jupyter Notebook
|
nbody.ipynb
|
AlbertoRosado1/desihigh
|
a9f9d78c0c7605a704ae8008633f7aa9f641f7e4
|
[
"BSD-3-Clause"
] | null | null | null |
nbody.ipynb
|
AlbertoRosado1/desihigh
|
a9f9d78c0c7605a704ae8008633f7aa9f641f7e4
|
[
"BSD-3-Clause"
] | null | null | null |
nbody.ipynb
|
AlbertoRosado1/desihigh
|
a9f9d78c0c7605a704ae8008633f7aa9f641f7e4
|
[
"BSD-3-Clause"
] | null | null | null | 232.54982 | 41,537 | 0.896156 |
[
[
[
"<a href=\"https://colab.research.google.com/github/AlbertoRosado1/desihigh/blob/main/nbody.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"Mounted at /content/drive\n"
],
[
"from IPython.display import clear_output\nfrom time import sleep",
"_____no_output_____"
],
[
"import sys\nsys.path.append('/content/drive/MyDrive/desihigh')",
"_____no_output_____"
],
[
"import time\nimport astropy\nimport itertools\nimport matplotlib\n\nimport numpy as np\nimport pylab as pl\nimport matplotlib.pyplot as plt\nimport astropy.units as u\n\nfrom astropy.cosmology import FlatLambdaCDM\nfrom IPython.display import YouTubeVideo\nfrom tools.flops import flops",
"_____no_output_____"
],
[
"#%matplotlib notebook\n%matplotlib inline\n\nplt.style.use('dark_background')",
"_____no_output_____"
]
],
[
[
"# DESI and the fastest supercomputer in the West",
"_____no_output_____"
],
[
"Understanding _how_ the 30 million galaxies surveyed by DESI actually formed in the Universe is hard, really hard. So hard in fact that DESI scientists exploit [Summit](https://www.olcf.ornl.gov/summit/), the world's fastest supercomputer[<sup>1</sup>](#Footnotes) at Oak Ridge National Lab to calculate how the distribution of galaxies should look depending on the type of Dark Energy: ",
"_____no_output_____"
],
[
"<img src=\"https://github.com/AlbertoRosado1/desihigh/blob/main/desihigh/images/summit.jpg?raw=1\" alt=\"Drawing\" style=\"width: 800px;\"/>",
"_____no_output_____"
],
[
"Costing a cool 325 million dollars to build, Summit is capable of calculating addition and multiplication operations $1.486 \\times 10^{17}$ times a second, equivalent to $1.486 \\times 10^{11}$ MegaFlops or MFLOPS. For comparison, let's see what Binder provides (you'll need some patience, maybe leave this to later): ",
"_____no_output_____"
]
],
[
[
"_ = flops()",
"_____no_output_____"
]
],
[
[
"So Summit is at least a billion times more powerful! With Summit, we can resolve the finest details of the distribution of _dark matter_ that all galaxies trace:",
"_____no_output_____"
],
[
"<img src=\"https://github.com/AlbertoRosado1/desihigh/blob/main/desihigh/images/abacus.png?raw=1\" alt=\"Drawing\" style=\"width: 600px;\"/>",
"_____no_output_____"
],
[
"Here the brightest regions signify the densest regions of dark matter in the Universe, in which we expect to find more galaxies (for some zoom-ins, [click here](https://lgarrison.github.io/halos/)). The video below shows that we have observed this predicted structure in the distribution of real galaxies observed with experiments prior to DESI:",
"_____no_output_____"
]
],
[
[
"YouTubeVideo('08LBltePDZw', width=800, height=400)",
"_____no_output_____"
]
],
[
[
"[Dark matter](https://en.wikipedia.org/wiki/Dark_matter#:~:text=Dark%20matter%20is%20a%20form,%E2%88%9227%20kg%2Fm3.) is a pervasive element in our Universe, making up 25% of the total (energy) density. With Dark Energy and the common atom (\"baryonic matter\") making up the remainder. We know next to nothing about Dark Matter, beyond its gravitational attraction of other matter and light in the Universe.",
"_____no_output_____"
],
[
"Fortunately, the equations that describe the evolution of dark matter, rather than the [complex formation of galaxies](https://www.space.com/15680-galaxies.html), are relatively simple for the Universe in which we seem to live. All that is required is to track the gravitational attraction of dark matter particles (on an expanding stage).",
"_____no_output_____"
],
[
"We can predict the evolution of dark matter by sampling the gravitational force, velocity and position with a set of (fictitious) particles that each represent a 'clump' of dark matter with some total mass. Of course, this means we cannot solve for the distribution of dark matter within these clump sized regions, but just the distribution amongst clumps that leads to the structure you can see above. With Summit, the smallest clump we can resolve is not far from the combined mass of all the stars in the [Milky Way](https://www.nasa.gov/feature/goddard/2019/what-does-the-milky-way-weigh-hubble-and-gaia-investigate):",
"_____no_output_____"
],
[
"<img src=\"https://github.com/AlbertoRosado1/desihigh/blob/main/desihigh/images/MilkyWay.jpg?raw=1\" alt=\"Drawing\" style=\"width: 1000px;\"/>",
"_____no_output_____"
],
[
"To start, we'll initially postition a set of clumps at random positions within a 3D cube and give them zero initial velocities. Velocities will be generated at subsequent times as the ($1/r^2$) gravitational attraction of a particle to all others causes a net acceleration.",
"_____no_output_____"
]
],
[
[
"def init_dof(npt=1):\n # Create a set of particles at random positions in a box, which will soon predict the distribution of dark matter \n # as we see above.\n xs = np.random.uniform(0., 1., npt)\n ys = np.random.uniform(0., 1., npt)\n zs = np.random.uniform(0., 1., npt)\n\n pos = np.vstack((xs,ys,zs)).T\n vel = np.zeros_like(pos)\n\n return pos, vel",
"_____no_output_____"
],
[
"pos[0][0] = 1\npos[0]\n\nls = []\n\nfor i in ls:\n for j in \n pos[i]",
"_____no_output_____"
],
[
"mass_r = np.random.uniform(0., 1., npt)\nmass_r",
"_____no_output_____"
]
],
[
[
"The gravitational force experienced by each dark matter particle is [Newton's](https://en.wikipedia.org/wiki/Isaac_Newton) $F = \\frac{GmM}{r^2} \\hat r$ that you may be familiar with. We just need to do a thorough job on the book keeping required for to calculate the total force experienced by one particle due to all others:",
"_____no_output_____"
]
],
[
[
"def g_at_pos(pos, particles, mass, epsilon=1.0, doimages=True):\n # eqn. (10) of http://www.skiesanduniverses.org/resources/KlypinNbody.pdf.\n # Here epsilon is a fudge factor to stop a blow up of the gravitational force at zero distance.\n \n delta_r = particles - pos\n result = mass * np.sum(delta_r / (delta_r**2. + epsilon**2.)**(3./2.), axis=0)\n \n # If 'pos' is one of the particles, then technically we've including the \"self-force\"\n # But such a pos will have delta_r = 0, and thus contribute nothing to the total force, as it should!\n\n if doimages:\n # Our simulation assumes periodic boundary conditions, so for the acceleration of each particle, there's a \n # corresponding acceleration due to the image of the particle produced by applying periodic shifts to its\n # position.\n shift = np.array([-1, 0, 1])\n images = []\n\n for triple in itertools.product(shift, repeat=3):\n images.append(triple)\n \n images.remove((0, 0, 0))\n \n images = np.array(images)\n \n for image in images:\n delta_r_displaced = delta_r + image\n result += mass * np.sum(delta_r_displaced / (delta_r_displaced**2. + epsilon**2.)**(3./2.), axis=0)\n\n return result",
"_____no_output_____"
]
],
[
[
"In a remarkable experiment in 1941, Erik Holmberg used the fact that the brightness of light decays with distance at the same ($1/r^2$) rate as gravity. To calculate the total force on a 'particle' in his 'simulation', Holmberg placed a lightbulb at the position of each particle and calculated the effective force on a given particle by measuring the total brightness at each point! The figure below illustrates this idea.\n\nTry running the following cell a few times! You'll get a different random layout of \"lightbulbs\" each time.",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(1, 1, figsize=(5,5), dpi=150)\n\nxmin, xmax, ymin, ymax = (0., 1., 0., 1.)\n\nNgrid = 100\nxx, yy = np.meshgrid(np.linspace(xmin, xmax, Ngrid), np.linspace(ymin, ymax, Ngrid))\nepsilon = 0.1\n\nweights = np.zeros_like(xx)\n\nnpt = 10\npos, vel = init_dof(npt=npt)\n\nfor par in pos:\n weights += 1. / ((xx - par[0])**2 + (yy - par[1])**2 + epsilon**2.)\n\nax.imshow(weights, extent=(xmin, xmax, ymin, ymax), cmap=plt.cm.afmhot, alpha=1., origin='lower')\n\nax.scatter(pos[:,0], pos[:,1], color='k', edgecolor='w')\nax.tick_params(labelbottom=False, labelleft=False, left=False, bottom=False)\nax.set_title(f\"Holmberg's Lightbulb Experiment with $N={npt}$ Bulbs\")\n\nax.set_xlim(0., 1.)\nax.set_ylim(0., 1.)\nfig.tight_layout()",
"_____no_output_____"
]
],
[
[
"This work was the original concept of gravitational 'N-body' simulations that are described here. It's almost criminal that only 118 authors have referenced this groundbreaking idea!",
"_____no_output_____"
],
[
"<img src=\"https://github.com/AlbertoRosado1/desihigh/blob/main/desihigh/images/Holmberg.png?raw=1\" alt=\"Drawing\" style=\"width: 800px;\"/>",
"_____no_output_____"
],
[
"Today, given the mini supercomputers we often have at our fingertips, we can determine the final distribution of dark matter more accurately with computers than light bulbs. By evolving an initial homogeneous distribution (a nearly uniform distribution of dark matter clumps, as the universe produces in the Big Bang), we can accurately predict the locations of galaxies (the places where the biggest dark matter clumps form).\n\nTo do this, we just need to calculate the acceleration on each particle at a series of time steps and update the velocity and position accordingly according to the acceleration that particle experiences. You'll be familiar with this as the sensation you feel as a car turns a corner, or speeds up. ",
"_____no_output_____"
]
],
[
[
"# We'll sample the equations of motion in discrete time steps. \ndt = 5e-4\nnsteps = 500\ntimesteps = np.linspace(0, (nsteps)*dt, nsteps, endpoint=False)\n\n# Number and mass of particles\nnpt = 2\nmass = 0.25\n\n# Whether to draw arrows for the acceleration and velocity\ndraw_acc = True\ndraw_vel = False\n\n# A small drag term to simulate the real drag dark matter particles experience due to the expanding universe\ndrag = 1e-2",
"_____no_output_____"
]
],
[
[
"Now we simply have to run the simulation!",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(1,1, figsize=(5,5), dpi=150)\n\nax.tick_params(labelbottom=False, labelleft=False, left=False, bottom=False)\n\n# Reinitialise particles.\npos, vel = init_dof(npt=npt)\n\n# A helper function to make a nice-looking legend for our arrows\n# from https://stackoverflow.com/a/22349717\ndef make_legend_arrow(legend, orig_handle,\n xdescent, ydescent,\n width, height, fontsize):\n p = matplotlib.patches.FancyArrow(0, 0.5*height, width, 0, length_includes_head=True, head_width=0.75*height)\n return p\n\nfor index_in_timestep, time in enumerate(timesteps):\n ax.clear()\n\n ax.set_title(f'N-body simulation with $N={npt}$ particles')\n step_label = ax.text(0.03, .97, f'Step {index_in_timestep}',\n transform=ax.transAxes, verticalalignment='top', c='k',\n bbox=dict(color='w', alpha=0.8))\n\n dvel = np.zeros_like(vel)\n dpos = np.zeros_like(pos)\n acc = np.zeros_like(pos)\n\n for index_in_particle in range(npt):\n acc[index_in_particle] = g_at_pos(pos[index_in_particle], pos, mass, epsilon=0.1)\n\n # Update velocities. \n dvel[index_in_particle] = dt * acc[index_in_particle]\n\n # Update positions.\n dpos[index_in_particle] = dt * vel[index_in_particle] \n\n vel += dvel - drag*vel\n pos += dpos\n\n # Our simulation has periodic boundaries, if you go off one side you come back on the other!\n pos = pos % 1.\n\n ax.scatter(pos[:,0], pos[:,1], color='darkorange', edgecolor='w')\n\n # Draw arrows representing the velocity and acceleration vectors, if requested\n # The code here is a little verbose to get nice-looking arrows in the legend\n arrows = []\n if draw_vel:\n ax.quiver(pos[:,0], pos[:,1], vel[:,0], vel[:,1], color='w', zorder=0)\n arrows += [matplotlib.patches.FancyArrow(0,0, 0.5, 0.6, label='Velocity', color='w')]\n\n if draw_acc:\n ax.quiver(pos[:,0], pos[:,1], acc[:,0], acc[:,1], color='darkorange', zorder=0)\n arrows += [matplotlib.patches.FancyArrow(0,0, 0.5, 0.6, label='Accel', color='darkorange')]\n\n if draw_vel or draw_acc:\n ax.legend(handles=arrows, handler_map={matplotlib.patches.FancyArrow:matplotlib.legend_handler.HandlerPatch(patch_func=make_legend_arrow)},\n facecolor='k', edgecolor='white', framealpha=0.8,\n loc='lower right')\n\n ax.set_xlim(0., 1.)\n ax.set_ylim(0., 1.)\n\n fig.canvas.draw()",
"_____no_output_____"
],
[
"# Reinitialise particles.\npos, vel = init_dof(npt=npt)\n# A helper function to make a nice-looking legend for our arrows\n# from https://stackoverflow.com/a/22349717\ndef make_legend_arrow(legend, orig_handle,\n xdescent, ydescent,\n width, height, fontsize):\n p = matplotlib.patches.FancyArrow(0, 0.5*height, width, 0, length_includes_head=True, head_width=0.75*height)\n return p\nfor index_in_timestep, time in enumerate(timesteps):\n clear_output(wait=True)\n fig, ax = plt.subplots(1,1, figsize=(5,5), dpi=150)\n ax.tick_params(labelbottom=False, labelleft=False, left=False, bottom=False)\n ax.clear()\n ax.set_title(f'N-body simulation with $N={npt}$ particles')\n step_label = ax.text(0.03, .97, f'Step {index_in_timestep}',\n transform=ax.transAxes, verticalalignment='top', c='k',\n bbox=dict(color='w', alpha=0.8))\n dvel = np.zeros_like(vel)\n dpos = np.zeros_like(pos)\n acc = np.zeros_like(pos)\n for index_in_particle in range(npt):\n acc[index_in_particle] = g_at_pos(pos[index_in_particle], pos, mass, epsilon=0.1,doimages=False)\n # Update velocities.\n dvel[index_in_particle] = dt * acc[index_in_particle]\n # Update positions.\n dpos[index_in_particle] = dt * vel[index_in_particle]\n vel += dvel - drag*vel\n pos += dpos\n # Our simulation has periodic boundaries, if you go off one side you come back on the other!\n pos = pos % 1.\n \n ax.scatter(pos[:,0], pos[:,1], color='darkorange', edgecolor='w')\n # Draw arrows representing the velocity and acceleration vectors, if requested\n # The code here is a little verbose to get nice-looking arrows in the legend\n arrows = []\n if draw_vel:\n ax.quiver(pos[:,0], pos[:,1], vel[:,0], vel[:,1], color='w', zorder=0)\n arrows += [matplotlib.patches.FancyArrow(0,0, 0.5, 0.6, label='Velocity', color='w')]\n if draw_acc:\n ax.quiver(pos[:,0], pos[:,1], acc[:,0], acc[:,1], color='darkorange', zorder=0)\n arrows += [matplotlib.patches.FancyArrow(0,0, 0.5, 0.6, label='Accel', color='darkorange')]\n if draw_vel or draw_acc:\n ax.legend(handles=arrows, handler_map={matplotlib.patches.FancyArrow:matplotlib.legend_handler.HandlerPatch(patch_func=make_legend_arrow)},\n facecolor='k', edgecolor='white', framealpha=0.8,\n loc='lower right')\n #if index_in_timestep%10 == 1:\n ax.set_xlim(0., 1.)\n ax.set_ylim(0., 1.)\n #fig.canvas.draw()\n plt.show(fig)\n sleep(0.001)\n #temp_points.remove()\n ",
"_____no_output_____"
]
],
[
[
"Try playing around with the settings! More than 100 particles won't run very smoothly, however.\n\nWith the default settings, you'll find that the particles tend fall into one or two clumps before too long. This is due to the drag that we put in. The drag simulates the effect that the expanding universe has on real dark matter particles, which is to slow them down and cause them to group together. These clumps are known as *halos*, and form \"galactic nurseries\" where gas can gather to form new stars and galaxies.",
"_____no_output_____"
],
[
"Now, when DESI scientists run huge simulations, such as those run on Summit, a total of ~48 _trillion_ particles are solved for. Don't try this here! But the results are really quite extraordinary (skip to 6 mins 45 seconds if you're impatient to see the result!):",
"_____no_output_____"
]
],
[
[
"YouTubeVideo('LQMLFryA_7k', width=800, height=400)",
"_____no_output_____"
]
],
[
[
"With this great success, comes added responsibility. Global computing infrastructure (the data centers that power the internet, the cloud, and supercomputers like Summit), while fantastic for DESI and science, now has a [carbon footprint](https://en.wikipedia.org/wiki/Carbon_footprint) comparable to the [world airline industry](https://www.hpcwire.com/solution_content/ibm/cross-industry/five-tips-to-reduce-your-hpc-carbon-footprint/) and consumes the same amount of electicity as the country of Iran (82 million people!). \n\nMore worrying still, this will soon grow from 2% of the World's energy consumption, to ~30%. An extraordinary rate! ",
"_____no_output_____"
],
[
"Fortunately, Summit is also among the greenest of supercomputers. It's 14.7 GFlops/watt means a #1 ranking on the [global Green 500 list 2019](https://www.top500.org/lists/green500/2019/06/).",
"_____no_output_____"
],
[
"<img src=\"https://github.com/AlbertoRosado1/desihigh/blob/main/desihigh/images/Sequoia.jpg?raw=1\" alt=\"Drawing\" style=\"width: 800px;\"/>",
"_____no_output_____"
],
[
"### Footnote\n1. Well, at least Summit *was* the world's fastest supercomputer while DESI scientists were using it in early 2020. Japan's Fugaku supercomputer overtook Summit in June 2020. The world's 500 fastest supercomputers are tracked on the \"Top500\" website here: https://www.top500.org/lists/top500/2020/06/. Better luck next year, USA!",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"raw",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"raw"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a86492ba55a7f9ffae2a73a69825f7fa580706c
| 335,408 |
ipynb
|
Jupyter Notebook
|
bonus content/feature engineering text data/Feature Engineering Text Data - Advanced Deep Learning Strategies.ipynb
|
sweet-addy/practical-machine-learning-with-python
|
533d061f5bd8f8b7542cd40c6735c0fe079c8846
|
[
"Apache-2.0"
] | null | null | null |
bonus content/feature engineering text data/Feature Engineering Text Data - Advanced Deep Learning Strategies.ipynb
|
sweet-addy/practical-machine-learning-with-python
|
533d061f5bd8f8b7542cd40c6735c0fe079c8846
|
[
"Apache-2.0"
] | null | null | null |
bonus content/feature engineering text data/Feature Engineering Text Data - Advanced Deep Learning Strategies.ipynb
|
sweet-addy/practical-machine-learning-with-python
|
533d061f5bd8f8b7542cd40c6735c0fe079c8846
|
[
"Apache-2.0"
] | null | null | null | 104.946183 | 52,374 | 0.771565 |
[
[
[
"# Import necessary dependencies and settings",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport re\nimport nltk\nimport matplotlib.pyplot as plt\n\npd.options.display.max_colwidth = 200\n%matplotlib inline",
"_____no_output_____"
],
[
"# Sample corpus of text documents",
"_____no_output_____"
],
[
"corpus = ['The sky is blue and beautiful.',\n 'Love this blue and beautiful sky!',\n 'The quick brown fox jumps over the lazy dog.',\n \"A king's breakfast has sausages, ham, bacon, eggs, toast and beans\",\n 'I love green eggs, ham, sausages and bacon!',\n 'The brown fox is quick and the blue dog is lazy!',\n 'The sky is very blue and the sky is very beautiful today',\n 'The dog is lazy but the brown fox is quick!' \n]\nlabels = ['weather', 'weather', 'animals', 'food', 'food', 'animals', 'weather', 'animals']\n\ncorpus = np.array(corpus)\ncorpus_df = pd.DataFrame({'Document': corpus, \n 'Category': labels})\ncorpus_df = corpus_df[['Document', 'Category']]\ncorpus_df",
"_____no_output_____"
]
],
[
[
"# Simple text pre-processing",
"_____no_output_____"
]
],
[
[
"wpt = nltk.WordPunctTokenizer()\nstop_words = nltk.corpus.stopwords.words('english')\n\ndef normalize_document(doc):\n # lower case and remove special characters\\whitespaces\n doc = re.sub(r'[^a-zA-Z\\s]', '', doc, re.I|re.A)\n doc = doc.lower()\n doc = doc.strip()\n # tokenize document\n tokens = wpt.tokenize(doc)\n # filter stopwords out of document\n filtered_tokens = [token for token in tokens if token not in stop_words]\n # re-create document from filtered tokens\n doc = ' '.join(filtered_tokens)\n return doc\n\nnormalize_corpus = np.vectorize(normalize_document)",
"_____no_output_____"
],
[
"norm_corpus = normalize_corpus(corpus)\nnorm_corpus",
"_____no_output_____"
]
],
[
[
"# Word Embeddings",
"_____no_output_____"
],
[
"## Load up sample corpus - Bible",
"_____no_output_____"
]
],
[
[
"from nltk.corpus import gutenberg\nfrom string import punctuation\n\nbible = gutenberg.sents('bible-kjv.txt') \nremove_terms = punctuation + '0123456789'\n\nnorm_bible = [[word.lower() for word in sent if word not in remove_terms] for sent in bible]\nnorm_bible = [' '.join(tok_sent) for tok_sent in norm_bible]\nnorm_bible = filter(None, normalize_corpus(norm_bible))\nnorm_bible = [tok_sent for tok_sent in norm_bible if len(tok_sent.split()) > 2]\n\nprint('Total lines:', len(bible))\nprint('\\nSample line:', bible[10])\nprint('\\nProcessed line:', norm_bible[10])",
"Total lines: 30103\n\nSample line: ['1', ':', '6', 'And', 'God', 'said', ',', 'Let', 'there', 'be', 'a', 'firmament', 'in', 'the', 'midst', 'of', 'the', 'waters', ',', 'and', 'let', 'it', 'divide', 'the', 'waters', 'from', 'the', 'waters', '.']\n\nProcessed line: god said let firmament midst waters let divide waters waters\n"
]
],
[
[
"## Implementing a word2vec model using a CBOW (Continuous Bag of Words) neural network architecture",
"_____no_output_____"
],
[
"### Build Vocabulary",
"_____no_output_____"
]
],
[
[
"from keras.preprocessing import text\nfrom keras.utils import np_utils\nfrom keras.preprocessing import sequence\n\ntokenizer = text.Tokenizer()\ntokenizer.fit_on_texts(norm_bible)\nword2id = tokenizer.word_index\n\nword2id['PAD'] = 0\nid2word = {v:k for k, v in word2id.items()}\nwids = [[word2id[w] for w in text.text_to_word_sequence(doc)] for doc in norm_bible]\n\nvocab_size = len(word2id)\nembed_size = 100\nwindow_size = 2\n\nprint('Vocabulary Size:', vocab_size)\nprint('Vocabulary Sample:', list(word2id.items())[:10])",
"Vocabulary Size: 12425\nVocabulary Sample: [('perceived', 1460), ('flagon', 7287), ('gardener', 11641), ('named', 973), ('remain', 732), ('sticketh', 10622), ('abstinence', 11848), ('rufus', 8190), ('adversary', 2018), ('jehoiachin', 3189)]\n"
]
],
[
[
"### Build (context_words, target_word) pair generator",
"_____no_output_____"
]
],
[
[
"def generate_context_word_pairs(corpus, window_size, vocab_size):\n context_length = window_size*2\n for words in corpus:\n sentence_length = len(words)\n for index, word in enumerate(words):\n context_words = []\n label_word = [] \n start = index - window_size\n end = index + window_size + 1\n \n context_words.append([words[i] \n for i in range(start, end) \n if 0 <= i < sentence_length \n and i != index])\n label_word.append(word)\n\n x = sequence.pad_sequences(context_words, maxlen=context_length)\n y = np_utils.to_categorical(label_word, vocab_size)\n yield (x, y)",
"_____no_output_____"
],
[
"i = 0\nfor x, y in generate_context_word_pairs(corpus=wids, window_size=window_size, vocab_size=vocab_size):\n if 0 not in x[0]:\n print('Context (X):', [id2word[w] for w in x[0]], '-> Target (Y):', id2word[np.argwhere(y[0])[0][0]])\n \n if i == 10:\n break\n i += 1",
"Context (X): ['old', 'testament', 'james', 'bible'] -> Target (Y): king\nContext (X): ['first', 'book', 'called', 'genesis'] -> Target (Y): moses\nContext (X): ['beginning', 'god', 'heaven', 'earth'] -> Target (Y): created\nContext (X): ['earth', 'without', 'void', 'darkness'] -> Target (Y): form\nContext (X): ['without', 'form', 'darkness', 'upon'] -> Target (Y): void\nContext (X): ['form', 'void', 'upon', 'face'] -> Target (Y): darkness\nContext (X): ['void', 'darkness', 'face', 'deep'] -> Target (Y): upon\nContext (X): ['spirit', 'god', 'upon', 'face'] -> Target (Y): moved\nContext (X): ['god', 'moved', 'face', 'waters'] -> Target (Y): upon\nContext (X): ['god', 'said', 'light', 'light'] -> Target (Y): let\nContext (X): ['god', 'saw', 'good', 'god'] -> Target (Y): light\n"
]
],
[
[
"### Build CBOW Deep Network Model",
"_____no_output_____"
]
],
[
[
"import keras.backend as K\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Embedding, Lambda\n\ncbow = Sequential()\ncbow.add(Embedding(input_dim=vocab_size, output_dim=embed_size, input_length=window_size*2))\ncbow.add(Lambda(lambda x: K.mean(x, axis=1), output_shape=(embed_size,)))\ncbow.add(Dense(vocab_size, activation='softmax'))\n\ncbow.compile(loss='categorical_crossentropy', optimizer='rmsprop')\nprint(cbow.summary())",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_6 (Embedding) (None, 4, 100) 1242500 \n_________________________________________________________________\nlambda_2 (Lambda) (None, 100) 0 \n_________________________________________________________________\ndense_4 (Dense) (None, 12425) 1254925 \n=================================================================\nTotal params: 2,497,425\nTrainable params: 2,497,425\nNon-trainable params: 0\n_________________________________________________________________\nNone\n"
],
[
"from IPython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\n\nSVG(model_to_dot(cbow, show_shapes=True, show_layer_names=False, \n rankdir='TB').create(prog='dot', format='svg'))",
"_____no_output_____"
]
],
[
[
"### Train model for 5 epochs",
"_____no_output_____"
]
],
[
[
"for epoch in range(1, 6):\n loss = 0.\n i = 0\n for x, y in generate_context_word_pairs(corpus=wids, window_size=window_size, vocab_size=vocab_size):\n i += 1\n loss += cbow.train_on_batch(x, y)\n if i % 100000 == 0:\n print('Processed {} (context, word) pairs'.format(i))\n\n print('Epoch:', epoch, '\\tLoss:', loss)\n print()",
"Processed 100000 (context, word) pairs\nProcessed 200000 (context, word) pairs\nProcessed 300000 (context, word) pairs\nEpoch: 1 \tLoss: 4257900.60084\n\nProcessed 100000 (context, word) pairs\nProcessed 200000 (context, word) pairs\nProcessed 300000 (context, word) pairs\nEpoch: 2 \tLoss: 4256209.59646\n\nProcessed 100000 (context, word) pairs\nProcessed 200000 (context, word) pairs\nProcessed 300000 (context, word) pairs\nEpoch: 3 \tLoss: 4247990.90456\n\nProcessed 100000 (context, word) pairs\nProcessed 200000 (context, word) pairs\nProcessed 300000 (context, word) pairs\nEpoch: 4 \tLoss: 4225663.18927\n\nProcessed 100000 (context, word) pairs\nProcessed 200000 (context, word) pairs\nProcessed 300000 (context, word) pairs\nEpoch: 5 \tLoss: 4104501.48929\n\n"
]
],
[
[
"### Get word embeddings",
"_____no_output_____"
]
],
[
[
"weights = cbow.get_weights()[0]\nweights = weights[1:]\nprint(weights.shape)\n\npd.DataFrame(weights, index=list(id2word.values())[1:]).head()",
"(12424, 100)\n"
]
],
[
[
"### Build a distance matrix to view the most similar words (contextually)",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics.pairwise import euclidean_distances\n\n# compute pairwise distance matrix\ndistance_matrix = euclidean_distances(weights)\nprint(distance_matrix.shape)\n\n# view contextually similar words\nsimilar_words = {search_term: [id2word[idx] for idx in distance_matrix[word2id[search_term]-1].argsort()[1:6]+1] \n for search_term in ['god', 'jesus', 'noah', 'egypt', 'john', 'gospel', 'moses','famine']}\n\nsimilar_words",
"(12424, 12424)\n"
]
],
[
[
"## Implementing a word2vec model using a skip-gram neural network architecture",
"_____no_output_____"
],
[
"### Build Vocabulary",
"_____no_output_____"
]
],
[
[
"from keras.preprocessing import text\n\ntokenizer = text.Tokenizer()\ntokenizer.fit_on_texts(norm_bible)\n\nword2id = tokenizer.word_index\nid2word = {v:k for k, v in word2id.items()}\n\nvocab_size = len(word2id) + 1 \nembed_size = 100\n\nwids = [[word2id[w] for w in text.text_to_word_sequence(doc)] for doc in norm_bible]\nprint('Vocabulary Size:', vocab_size)\nprint('Vocabulary Sample:', list(word2id.items())[:10])",
"Vocabulary Size: 12425\nVocabulary Sample: [('perceived', 1460), ('flagon', 7287), ('gardener', 11641), ('named', 973), ('remain', 732), ('sticketh', 10622), ('abstinence', 11848), ('rufus', 8190), ('adversary', 2018), ('jehoiachin', 3189)]\n"
]
],
[
[
"### Build and View sample skip grams ((word1, word2) -> relevancy)",
"_____no_output_____"
]
],
[
[
"from keras.preprocessing.sequence import skipgrams\n\n# generate skip-grams\nskip_grams = [skipgrams(wid, vocabulary_size=vocab_size, window_size=10) for wid in wids]\n\n# view sample skip-grams\npairs, labels = skip_grams[0][0], skip_grams[0][1]\nfor i in range(10):\n print(\"({:s} ({:d}), {:s} ({:d})) -> {:d}\".format(\n id2word[pairs[i][0]], pairs[i][0], \n id2word[pairs[i][1]], pairs[i][1], \n labels[i]))",
"(james (1154), king (13)) -> 1\n(king (13), james (1154)) -> 1\n(james (1154), perform (1249)) -> 0\n(bible (5766), dismissed (6274)) -> 0\n(king (13), alter (5275)) -> 0\n(james (1154), bible (5766)) -> 1\n(king (13), bible (5766)) -> 1\n(bible (5766), king (13)) -> 1\n(king (13), compassion (1279)) -> 0\n(james (1154), foreskins (4844)) -> 0\n"
]
],
[
[
"### Build Skip-gram Deep Network Model",
"_____no_output_____"
]
],
[
[
"from keras.layers import Merge\nfrom keras.layers.core import Dense, Reshape\nfrom keras.layers.embeddings import Embedding\nfrom keras.models import Sequential\n\nword_model = Sequential()\nword_model.add(Embedding(vocab_size, embed_size,\n embeddings_initializer=\"glorot_uniform\",\n input_length=1))\nword_model.add(Reshape((embed_size, )))\n\ncontext_model = Sequential()\ncontext_model.add(Embedding(vocab_size, embed_size,\n embeddings_initializer=\"glorot_uniform\",\n input_length=1))\ncontext_model.add(Reshape((embed_size,)))\n\nmodel = Sequential()\nmodel.add(Merge([word_model, context_model], mode=\"dot\"))\nmodel.add(Dense(1, kernel_initializer=\"glorot_uniform\", activation=\"sigmoid\"))\nmodel.compile(loss=\"mean_squared_error\", optimizer=\"rmsprop\")\nprint(model.summary())",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nmerge_3 (Merge) (None, 1) 0 \n_________________________________________________________________\ndense_5 (Dense) (None, 1) 2 \n=================================================================\nTotal params: 2,485,002\nTrainable params: 2,485,002\nNon-trainable params: 0\n_________________________________________________________________\nNone\n"
],
[
"from IPython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\n\nSVG(model_to_dot(model, show_shapes=True, show_layer_names=False, \n rankdir='TB').create(prog='dot', format='svg'))",
"_____no_output_____"
]
],
[
[
"### Train the model for 5 epochs",
"_____no_output_____"
]
],
[
[
"for epoch in range(1, 6):\n loss = 0\n for i, elem in enumerate(skip_grams):\n pair_first_elem = np.array(list(zip(*elem[0]))[0], dtype='int32')\n pair_second_elem = np.array(list(zip(*elem[0]))[1], dtype='int32')\n labels = np.array(elem[1], dtype='int32')\n X = [pair_first_elem, pair_second_elem]\n Y = labels\n if i % 10000 == 0:\n print('Processed {} (skip_first, skip_second, relevance) pairs'.format(i))\n loss += model.train_on_batch(X,Y) \n\n print('Epoch:', epoch, 'Loss:', loss)",
"Processed 0 (skip_first, skip_second, relevance) pairs\nProcessed 10000 (skip_first, skip_second, relevance) pairs\nProcessed 20000 (skip_first, skip_second, relevance) pairs\nEpoch: 1 Loss: 4529.63803683\nProcessed 0 (skip_first, skip_second, relevance) pairs\nProcessed 10000 (skip_first, skip_second, relevance) pairs\nProcessed 20000 (skip_first, skip_second, relevance) pairs\nEpoch: 2 Loss: 3750.71884749\nProcessed 0 (skip_first, skip_second, relevance) pairs\nProcessed 10000 (skip_first, skip_second, relevance) pairs\nProcessed 20000 (skip_first, skip_second, relevance) pairs\nEpoch: 3 Loss: 3752.47489296\nProcessed 0 (skip_first, skip_second, relevance) pairs\nProcessed 10000 (skip_first, skip_second, relevance) pairs\nProcessed 20000 (skip_first, skip_second, relevance) pairs\nEpoch: 4 Loss: 3793.9177565\nProcessed 0 (skip_first, skip_second, relevance) pairs\nProcessed 10000 (skip_first, skip_second, relevance) pairs\nProcessed 20000 (skip_first, skip_second, relevance) pairs\nEpoch: 5 Loss: 3716.07605051\n"
]
],
[
[
"### Get word embeddings",
"_____no_output_____"
]
],
[
[
"merge_layer = model.layers[0]\nword_model = merge_layer.layers[0]\nword_embed_layer = word_model.layers[0]\nweights = word_embed_layer.get_weights()[0][1:]\n\nprint(weights.shape)\npd.DataFrame(weights, index=id2word.values()).head()",
"(12424, 100)\n"
]
],
[
[
"### Build a distance matrix to view the most similar words (contextually)",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics.pairwise import euclidean_distances\n\ndistance_matrix = euclidean_distances(weights)\nprint(distance_matrix.shape)\n\nsimilar_words = {search_term: [id2word[idx] for idx in distance_matrix[word2id[search_term]-1].argsort()[1:6]+1] \n for search_term in ['god', 'jesus', 'noah', 'egypt', 'john', 'gospel', 'moses','famine']}\n\nsimilar_words",
"(12424, 12424)\n"
]
],
[
[
"### Visualize word embeddings",
"_____no_output_____"
]
],
[
[
"from sklearn.manifold import TSNE\n\nwords = sum([[k] + v for k, v in similar_words.items()], [])\nwords_ids = [word2id[w] for w in words]\nword_vectors = np.array([weights[idx] for idx in words_ids])\nprint('Total words:', len(words), '\\tWord Embedding shapes:', word_vectors.shape)\n\ntsne = TSNE(n_components=2, random_state=0, n_iter=10000, perplexity=3)\nnp.set_printoptions(suppress=True)\nT = tsne.fit_transform(word_vectors)\nlabels = words\n\nplt.figure(figsize=(14, 8))\nplt.scatter(T[:, 0], T[:, 1], c='steelblue', edgecolors='k')\nfor label, x, y in zip(labels, T[:, 0], T[:, 1]):\n plt.annotate(label, xy=(x+1, y+1), xytext=(0, 0), textcoords='offset points')",
"Total words: 48 \tWord Embedding shapes: (48, 100)\n"
]
],
[
[
"## Leveraging gensim for building a word2vec model",
"_____no_output_____"
]
],
[
[
"from gensim.models import word2vec\n\n# tokenize sentences in corpus\nwpt = nltk.WordPunctTokenizer()\ntokenized_corpus = [wpt.tokenize(document) for document in norm_bible]\n\n# Set values for various parameters\nfeature_size = 100 # Word vector dimensionality \nwindow_context = 30 # Context window size \nmin_word_count = 1 # Minimum word count \nsample = 1e-3 # Downsample setting for frequent words\n\nw2v_model = word2vec.Word2Vec(tokenized_corpus, size=feature_size, \n window=window_context, min_count=min_word_count,\n sample=sample, iter=50)\n\n# view similar words based on gensim's model\nsimilar_words = {search_term: [item[0] for item in w2v_model.wv.most_similar([search_term], topn=5)]\n for search_term in ['god', 'jesus', 'noah', 'egypt', 'john', 'gospel', 'moses','famine']}\nsimilar_words",
"_____no_output_____"
]
],
[
[
"## Visualizing word embeddings",
"_____no_output_____"
]
],
[
[
"from sklearn.manifold import TSNE\n\nwords = sum([[k] + v for k, v in similar_words.items()], [])\nwvs = w2v_model.wv[words]\n\ntsne = TSNE(n_components=2, random_state=0, n_iter=10000, perplexity=2)\nnp.set_printoptions(suppress=True)\nT = tsne.fit_transform(wvs)\nlabels = words\n\nplt.figure(figsize=(14, 8))\nplt.scatter(T[:, 0], T[:, 1], c='orange', edgecolors='r')\nfor label, x, y in zip(labels, T[:, 0], T[:, 1]):\n plt.annotate(label, xy=(x+1, y+1), xytext=(0, 0), textcoords='offset points')",
"_____no_output_____"
]
],
[
[
"## Applying the word2vec model on our sample corpus",
"_____no_output_____"
]
],
[
[
"wpt = nltk.WordPunctTokenizer()\ntokenized_corpus = [wpt.tokenize(document) for document in norm_corpus]\n\n# Set values for various parameters\nfeature_size = 10 # Word vector dimensionality \nwindow_context = 10 # Context window size \nmin_word_count = 1 # Minimum word count \nsample = 1e-3 # Downsample setting for frequent words\n\nw2v_model = word2vec.Word2Vec(tokenized_corpus, size=feature_size, \n window=window_context, min_count = min_word_count,\n sample=sample, iter=100)",
"_____no_output_____"
]
],
[
[
"## Visualize word embeddings",
"_____no_output_____"
]
],
[
[
"from sklearn.manifold import TSNE\n\nwords = w2v_model.wv.index2word\nwvs = w2v_model.wv[words]\n\ntsne = TSNE(n_components=2, random_state=0, n_iter=5000, perplexity=2)\nnp.set_printoptions(suppress=True)\nT = tsne.fit_transform(wvs)\nlabels = words\n\nplt.figure(figsize=(12, 6))\nplt.scatter(T[:, 0], T[:, 1], c='orange', edgecolors='r')\nfor label, x, y in zip(labels, T[:, 0], T[:, 1]):\n plt.annotate(label, xy=(x+1, y+1), xytext=(0, 0), textcoords='offset points')",
"_____no_output_____"
]
],
[
[
"## Sample word embedding",
"_____no_output_____"
]
],
[
[
"w2v_model.wv['sky']",
"_____no_output_____"
]
],
[
[
"## Build framework for getting document level embeddings",
"_____no_output_____"
]
],
[
[
"def average_word_vectors(words, model, vocabulary, num_features):\n \n feature_vector = np.zeros((num_features,),dtype=\"float64\")\n nwords = 0.\n \n for word in words:\n if word in vocabulary: \n nwords = nwords + 1.\n feature_vector = np.add(feature_vector, model[word])\n \n if nwords:\n feature_vector = np.divide(feature_vector, nwords)\n \n return feature_vector\n \n \ndef averaged_word_vectorizer(corpus, model, num_features):\n vocabulary = set(model.wv.index2word)\n features = [average_word_vectors(tokenized_sentence, model, vocabulary, num_features)\n for tokenized_sentence in corpus]\n return np.array(features)",
"_____no_output_____"
],
[
"w2v_feature_array = averaged_word_vectorizer(corpus=tokenized_corpus, model=w2v_model,\n num_features=feature_size)\npd.DataFrame(w2v_feature_array)",
"_____no_output_____"
]
],
[
[
"## Clustering with word embeddings",
"_____no_output_____"
]
],
[
[
"from sklearn.cluster import AffinityPropagation\n\nap = AffinityPropagation()\nap.fit(w2v_feature_array)\ncluster_labels = ap.labels_\ncluster_labels = pd.DataFrame(cluster_labels, columns=['ClusterLabel'])\npd.concat([corpus_df, cluster_labels], axis=1)",
"_____no_output_____"
],
[
"from sklearn.decomposition import PCA\n\npca = PCA(n_components=2, random_state=0)\npcs = pca.fit_transform(w2v_feature_array)\nlabels = ap.labels_\ncategories = list(corpus_df['Category'])\nplt.figure(figsize=(8, 6))\n\nfor i in range(len(labels)):\n label = labels[i]\n color = 'orange' if label == 0 else 'blue' if label == 1 else 'green'\n annotation_label = categories[i]\n x, y = pcs[i]\n plt.scatter(x, y, c=color, edgecolors='k')\n plt.annotate(annotation_label, xy=(x+1e-4, y+1e-3), xytext=(0, 0), textcoords='offset points')",
"_____no_output_____"
]
],
[
[
"## GloVe Embeddings with spaCy",
"_____no_output_____"
]
],
[
[
"# Use the following command to install spaCy\n> pip install -U spacy\n\nOR\n\n> conda install -c conda-forge spacy\n\n\n# Download the following language model and store it in disk\nhttps://github.com/explosion/spacy-models/releases/tag/en_vectors_web_lg-2.0.0\n\n\n# Link the same to spacy \n> python -m spacy link ./spacymodels/en_vectors_web_lg-2.0.0/en_vectors_web_lg en_vecs\n\n Linking successful\n ./spacymodels/en_vectors_web_lg-2.0.0/en_vectors_web_lg --> ./Anaconda3/lib/site-packages/spacy/data/en_vecs\n\n You can now load the model via spacy.load('en_vecs')",
"_____no_output_____"
]
],
[
[
"import spacy\n\nnlp = spacy.load('en_vecs')\n\ntotal_vectors = len(nlp.vocab.vectors)\nprint('Total word vectors:', total_vectors)",
"Total word vectors: 1070971\n"
]
],
[
[
"## Visualize GloVe word embeddings",
"_____no_output_____"
]
],
[
[
"unique_words = list(set([word for sublist in [doc.split() for doc in norm_corpus] for word in sublist]))\n\nword_glove_vectors = np.array([nlp(word).vector for word in unique_words])\npd.DataFrame(word_glove_vectors, index=unique_words)",
"_____no_output_____"
],
[
"from sklearn.manifold import TSNE\n\ntsne = TSNE(n_components=2, random_state=0, n_iter=5000, perplexity=3)\nnp.set_printoptions(suppress=True)\nT = tsne.fit_transform(word_glove_vectors)\nlabels = unique_words\n\nplt.figure(figsize=(12, 6))\nplt.scatter(T[:, 0], T[:, 1], c='orange', edgecolors='r')\nfor label, x, y in zip(labels, T[:, 0], T[:, 1]):\n plt.annotate(label, xy=(x+1, y+1), xytext=(0, 0), textcoords='offset points')",
"_____no_output_____"
]
],
[
[
"## Cluster documents with GloVe Embeddings",
"_____no_output_____"
]
],
[
[
"doc_glove_vectors = np.array([nlp(str(doc)).vector for doc in norm_corpus])\n\nkm = KMeans(n_clusters=3, random_state=0)\nkm.fit_transform(doc_glove_vectors)\ncluster_labels = km.labels_\ncluster_labels = pd.DataFrame(cluster_labels, columns=['ClusterLabel'])\npd.concat([corpus_df, cluster_labels], axis=1)",
"_____no_output_____"
]
],
[
[
"# Leveraging gensim for building a FastText model",
"_____no_output_____"
]
],
[
[
"from gensim.models.fasttext import FastText\n\nwpt = nltk.WordPunctTokenizer()\ntokenized_corpus = [wpt.tokenize(document) for document in norm_bible]\n\n# Set values for various parameters\nfeature_size = 100 # Word vector dimensionality \nwindow_context = 50 # Context window size \nmin_word_count = 5 # Minimum word count \nsample = 1e-3 # Downsample setting for frequent words\n\n\nft_model = FastText(tokenized_corpus, size=feature_size, window=window_context, \n min_count=min_word_count,sample=sample, sg=1, iter=50)\n",
"_____no_output_____"
],
[
"# view similar words based on gensim's model\nsimilar_words = {search_term: [item[0] for item in ft_model.wv.most_similar([search_term], topn=5)]\n for search_term in ['god', 'jesus', 'noah', 'egypt', 'john', 'gospel', 'moses','famine']}\nsimilar_words",
"_____no_output_____"
],
[
"from sklearn.decomposition import PCA\n\nwords = sum([[k] + v for k, v in similar_words.items()], [])\nwvs = ft_model.wv[words]\n\npca = PCA(n_components=2)\nnp.set_printoptions(suppress=True)\nP = pca.fit_transform(wvs)\nlabels = words\n\nplt.figure(figsize=(18, 10))\nplt.scatter(P[:, 0], P[:, 1], c='lightgreen', edgecolors='g')\nfor label, x, y in zip(labels, P[:, 0], P[:, 1]):\n plt.annotate(label, xy=(x+0.06, y+0.03), xytext=(0, 0), textcoords='offset points')",
"_____no_output_____"
],
[
"ft_model.wv['jesus']",
"_____no_output_____"
],
[
"print(ft_model.wv.similarity(w1='god', w2='satan'))\nprint(ft_model.wv.similarity(w1='god', w2='jesus'))",
"0.333260876685\n0.698824900473\n"
],
[
"st1 = \"god jesus satan john\"\nprint('Odd one out for [',st1, ']:', ft_model.wv.doesnt_match(st1.split()))\n\nst2 = \"john peter james judas\"\nprint('Odd one out for [',st2, ']:', ft_model.wv.doesnt_match(st2.split()))",
"Odd one out for [ god jesus satan john ]: satan\nOdd one out for [ john peter james judas ]: judas\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"raw"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a864cea9ee8bb3ed44f822595e423dc1e4a8604
| 4,616 |
ipynb
|
Jupyter Notebook
|
ft/FourierTransformation_A8A2DEE53A.ipynb
|
spatialaudio/signals-and-systems-exercises
|
d1dbeb5bce74abbd211f6888186556cbe46869f2
|
[
"CC-BY-4.0"
] | 6 |
2020-05-20T10:01:29.000Z
|
2021-08-05T17:48:25.000Z
|
ft/FourierTransformation_A8A2DEE53A.ipynb
|
spatialaudio/signals-and-systems-exercises
|
d1dbeb5bce74abbd211f6888186556cbe46869f2
|
[
"CC-BY-4.0"
] | 10 |
2021-06-23T19:36:40.000Z
|
2021-10-03T15:39:48.000Z
|
ft/FourierTransformation_A8A2DEE53A.ipynb
|
spatialaudio/signals-and-systems-exercises
|
d1dbeb5bce74abbd211f6888186556cbe46869f2
|
[
"CC-BY-4.0"
] | null | null | null | 34.192593 | 116 | 0.594021 |
[
[
[
"[Sascha Spors](https://orcid.org/0000-0001-7225-9992),\nProfessorship Signal Theory and Digital Signal Processing,\n[Institute of Communications Engineering (INT)](https://www.int.uni-rostock.de/),\nFaculty of Computer Science and Electrical Engineering (IEF),\n[University of Rostock, Germany](https://www.uni-rostock.de/en/)\n\n# Tutorial Signals and Systems (Signal- und Systemtheorie)\n\nSummer Semester 2021 (Bachelor Course #24015)\n\n- lecture: https://github.com/spatialaudio/signals-and-systems-lecture\n- tutorial: https://github.com/spatialaudio/signals-and-systems-exercises\n\nWIP...\nThe project is currently under heavy development while adding new material for the summer semester 2021\n\nFeel free to contact lecturer [[email protected]](https://orcid.org/0000-0002-3010-0294)\n\n## Fourier Series Right Time Shift <-> Phase Mod\n",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef my_sinc(x): # we rather use definition sinc(x) = sin(x)/x, thus:\n return np.sinc(x/np.pi)",
"_____no_output_____"
],
[
"Th_des = [1, 0.2]\nom = np.linspace(-100, 100, 1000)\nplt.figure(figsize=(10, 8))\nplt.subplot(2,1,1)\nfor idx, Th in enumerate(Th_des):\n A = 1/Th # such that sinc amplitude is always 1\n # Fourier transform for single rect pulse\n Xsinc = A*Th * my_sinc(om*Th/2) \n Xsinc_phase = Xsinc*np.exp(-1j*om*Th/2)\n plt.plot(om, Xsinc, 'C7', lw=1) \n plt.plot(om, np.abs(Xsinc_phase), label=r'$T_h$=%1.0e s' % Th, lw=5-idx)\nplt.legend()\nplt.title(r'Fourier transform of single rectangular impulse with $A=1/T_h$ right-shifted by $\\tau=T_h/2$')\nplt.ylabel(r'magnitude $|X(\\mathrm{j}\\omega)|$')\nplt.xlim(om[0], om[-1])\nplt.grid(True)\n\n\nplt.subplot(2,1,2)\nfor idx, Th in enumerate(Th_des):\n Xsinc = A*Th * my_sinc(om*Th/2) \n Xsinc_phase = Xsinc*np.exp(-1j*om*Th/2)\n plt.plot(om, np.angle(Xsinc_phase), label=r'$T_h$=%1.0e s' % Th, lw=5-idx)\nplt.legend()\nplt.xlabel(r'$\\omega$ / (rad/s)')\nplt.ylabel(r'phase $\\angle X(\\mathrm{j}\\omega)$')\nplt.xlim(om[0], om[-1])\nplt.ylim(-4, +4)\nplt.grid(True)\nplt.savefig('A8A2DEE53A.pdf')",
"_____no_output_____"
]
],
[
[
"## Copyright\n\nThis tutorial is provided as Open Educational Resource (OER), to be found at\nhttps://github.com/spatialaudio/signals-and-systems-exercises\naccompanying the OER lecture\nhttps://github.com/spatialaudio/signals-and-systems-lecture.\nBoth are licensed under a) the Creative Commons Attribution 4.0 International\nLicense for text and graphics and b) the MIT License for source code.\nPlease attribute material from the tutorial as *Frank Schultz,\nContinuous- and Discrete-Time Signals and Systems - A Tutorial Featuring\nComputational Examples, University of Rostock* with\n``main file, github URL, commit number and/or version tag, year``.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
4a864de95108837b8de533022de28b737ae9cbb1
| 52,856 |
ipynb
|
Jupyter Notebook
|
scratch/random_forest.ipynb
|
pezpet/group-1-project-4
|
6258685ec50b48f39cbff60f6d13d584f7706e98
|
[
"CC0-1.0"
] | null | null | null |
scratch/random_forest.ipynb
|
pezpet/group-1-project-4
|
6258685ec50b48f39cbff60f6d13d584f7706e98
|
[
"CC0-1.0"
] | null | null | null |
scratch/random_forest.ipynb
|
pezpet/group-1-project-4
|
6258685ec50b48f39cbff60f6d13d584f7706e98
|
[
"CC0-1.0"
] | null | null | null | 58.275634 | 13,646 | 0.709342 |
[
[
[
"# !pip install scikeras\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nfrom sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV\nfrom sklearn.compose import make_column_selector, make_column_transformer\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler\nfrom sklearn.metrics import ConfusionMatrixDisplay, balanced_accuracy_score\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\n\n# from scikeras.wrappers import KerasClassifier\n\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras.callbacks import EarlyStopping\n\n",
"_____no_output_____"
]
],
[
[
"## Import Data",
"_____no_output_____"
]
],
[
[
"data = pd.read_csv('./demographics-data/classification_data_demographics.csv')\ndata.head(2)",
"_____no_output_____"
]
],
[
[
"## Transform and Scale Data",
"_____no_output_____"
],
[
"### Column Transformer",
"_____no_output_____"
]
],
[
[
"X = data.drop(columns=['labels'])\nct = make_column_transformer(\n (OneHotEncoder(sparse=False, handle_unknown='ignore'), make_column_selector(dtype_include=object)),\n remainder='passthrough',\n verbose_feature_names_out=False\n)\nX_encoded = ct.fit_transform(X)\nX_encoded\nct.get_feature_names_out()\nX_encoded = pd.DataFrame(X_encoded, columns=ct.get_feature_names_out())\n\nX_encoded.head(2)",
"_____no_output_____"
]
],
[
[
"### Scaling",
"_____no_output_____"
]
],
[
[
"X_encoded_scaled = StandardScaler().fit_transform(X_encoded)",
"_____no_output_____"
]
],
[
[
"## Target",
"_____no_output_____"
]
],
[
[
"y = data['labels']\n\ny_categorical = to_categorical(y, 3)",
"_____no_output_____"
]
],
[
[
"## Baseline",
"_____no_output_____"
]
],
[
[
"y.value_counts(normalize=True)",
"_____no_output_____"
]
],
[
[
"## Test/Train Split\n",
"_____no_output_____"
]
],
[
[
"X_train, X_test, y_train, y_test = train_test_split(X_encoded_scaled, y_categorical, stratify=y, random_state=13)\n\nX_train.shape\n\ny_train.shape",
"_____no_output_____"
]
],
[
[
"## Random Forest",
"_____no_output_____"
]
],
[
[
"rf = RandomForestClassifier(n_jobs=-1)\nrf.fit(X_train, y_train)\nrf.score(X_test, y_test)",
"_____no_output_____"
]
],
[
[
"### Confusion Matrix Display",
"_____no_output_____"
]
],
[
[
"y_preds = rf.predict(X_test)",
"_____no_output_____"
],
[
"ConfusionMatrixDisplay.from_predictions(y_test.argmax(axis=1), np.rint(y_preds).argmax(axis=1), cmap='Blues')\n;\nplt.title(\"Random Forest Confusion Matrix\")\nplt.savefig('./figures/confusion_matrix_random_forest.png')",
"_____no_output_____"
]
],
[
[
"### Balanced Accuracy Score",
"_____no_output_____"
]
],
[
[
"balanced_accuracy_score(y_test.argmax(axis=1), y_preds.argmax(axis=1))",
"_____no_output_____"
]
],
[
[
"### Feature Importances",
"_____no_output_____"
]
],
[
[
"pd.DataFrame([ct.get_feature_names_out(), rf.feature_importances_]).T.sort_values(by=1, ascending=False).head(10)",
"_____no_output_____"
]
],
[
[
"## Extra Trees",
"_____no_output_____"
]
],
[
[
"et = ExtraTreesClassifier(n_jobs=-1)\net.fit(X_train, y_train)\net.score(X_test, y_test)",
"_____no_output_____"
]
],
[
[
"### Confusion Matrix Display",
"_____no_output_____"
]
],
[
[
"y_preds = et.predict(X_test)",
"_____no_output_____"
],
[
"ConfusionMatrixDisplay.from_predictions(y_test.argmax(axis=1), np.rint(y_preds).argmax(axis=1), cmap='Blues')\n;\nplt.title(\"Extra Trees Confusion Matrix\")\nplt.savefig('./figures/confusion_matrix_extra_trees.png')",
"_____no_output_____"
]
],
[
[
"### Balanced Accuracy Score",
"_____no_output_____"
]
],
[
[
"balanced_accuracy_score(y_test.argmax(axis=1), y_preds.argmax(axis=1))",
"_____no_output_____"
]
],
[
[
"## RandomizedSearchCV on Extra Trees",
"_____no_output_____"
],
[
"### Extra Trees",
"_____no_output_____"
]
],
[
[
"et = ExtraTreesClassifier()",
"_____no_output_____"
],
[
"et.get_params().keys()",
"_____no_output_____"
],
[
"params = {\n 'n_estimators': range(100, 1000)\n}",
"_____no_output_____"
]
],
[
[
"### RandomizedSearchCV",
"_____no_output_____"
]
],
[
[
"# rs = RandomizedSearchCV(\n# et,\n# params,\n# n_jobs=-1\n# )\n\n# rs_result = rs.fit(X_train, y_train)\n\n# # Result summary\n# print(f\"Best score: {rs_result.best_score_}. Used these parameters: {rs_result.best_params_}\")\n\n# # This part copied from machine learning mastery prints out all results to check where improvements can be made\n# means = rs_result.cv_results_['mean_test_score']\n# stds = rs_result.cv_results_['std_test_score']\n# params = rs_result.cv_results_['params']\n# for mean, stdev, param in zip(means, stds, params):\n# print(\"%f (%f) with: %r\" % (mean, stdev, param))",
"_____no_output_____"
],
[
"# y_preds = rs_result.best_estimator_.predict(X_test)",
"_____no_output_____"
],
[
"# balanced_accuracy_score(y_test.argmax(axis=1), y_preds.argmax(axis=1))",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.