hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c527bf62165709ac880f5814cde4a3d0393e176c
| 62,471 |
ipynb
|
Jupyter Notebook
|
internal/add_chapterwise_readme.ipynb
|
patel-zeel/pyprobml
|
027ef3c13a2a63d958e05fdedb68fd7b8f0e0261
|
[
"MIT"
] | null | null | null |
internal/add_chapterwise_readme.ipynb
|
patel-zeel/pyprobml
|
027ef3c13a2a63d958e05fdedb68fd7b8f0e0261
|
[
"MIT"
] | 1 |
2022-03-27T04:59:50.000Z
|
2022-03-27T04:59:50.000Z
|
internal/add_chapterwise_readme.ipynb
|
patel-zeel/pyprobml
|
027ef3c13a2a63d958e05fdedb68fd7b8f0e0261
|
[
"MIT"
] | 2 |
2022-03-26T11:52:36.000Z
|
2022-03-27T05:17:48.000Z
| 36.682913 | 1,161 | 0.428071 |
[
[
[
"import re\nfrom glob import glob\nimport requests\nimport pandas as pd\nimport os\nfrom probml_utils.url_utils import is_dead_url,make_url_from_chapter_no_and_script_name, extract_scripts_name_from_caption\nfrom TexSoup import TexSoup",
"_____no_output_____"
]
],
[
[
"## Get chapter names",
"_____no_output_____"
]
],
[
[
"chap_names = {}\nfor chap_no in range(1,24):\n suppl = f\"../../pml-book/pml1/supplements/chap{chap_no}.md\"\n with open(suppl, \"r\") as fp:\n text = fp.read()\n names = re.findall(r\"Chapter.+?[(](.+)[)]\",text)\n chap_names[chap_no] = names[0]\n print(chap_no, names)",
"1 ['Introduction']\n2 ['Probability: univariate models']\n3 ['Probability: multivariate models']\n4 ['Statistics']\n5 ['Decision theory']\n6 ['Information theory']\n7 ['Linear algebra']\n8 ['Optimization']\n9 ['Linear discriminant analysis']\n10 ['Logistic regression']\n11 ['Linear regression']\n12 ['Generalized linear models']\n13 ['Neural networks for unstructured data']\n14 ['Neural networks for images']\n15 ['Neural networks for sequences']\n16 ['Exemplar-based methods']\n17 ['Kernel methods']\n18 ['Trees']\n19 ['Learning with fewer labeled examples']\n20 ['Dimensionality reduction']\n21 ['Clustering']\n22 ['Recommender systems']\n23 ['Graph embeddings']\n"
],
[
"df = pd.DataFrame(chap_names.items(), columns=[\"chap_no\",\"chap_name\"])\ndf",
"_____no_output_____"
],
[
"df.to_csv(\"chapter_no_to_name_mapping.csv\", index=None)",
"_____no_output_____"
]
],
[
[
"## Create a Readme.md",
"_____no_output_____"
]
],
[
[
"content = '''\n# \"Probabilistic Machine Learning: An Introduction\"\n\n## Chapters\n|Chapter|Name| Notebooks|\n|-|-|-|\n'''\n\nfor chap_no in range(1,24):\n chap_url = f\"https://github.com/probml/pyprobml/tree/master/notebooks/book1/{chap_no:02d}\"\n content+=f\"| {chap_no} | {chap_names[chap_no]} | [{chap_no:02d}/]({chap_no:02d}/) |\\n\"\ncontent",
"_____no_output_____"
],
[
"readme_file = \"../notebooks/book1/README.md\"\nwith open(readme_file,\"w\") as fp:\n fp.write(content)",
"_____no_output_____"
]
],
[
[
"## Chapterwise README.md",
"_____no_output_____"
]
],
[
[
"with open(\"pml1.lof\") as fp:\n LoF_File_Contents = fp.read()\n soup = TexSoup(LoF_File_Contents)\n \n # create mapping of fig_no to list of script_name\n\n url_mapping = {}\n for caption in soup.find_all(\"numberline\"):\n fig_no = str(caption.contents[0])\n extracted_scripts = extract_scripts_name_from_caption(str(caption))\n if len(extracted_scripts) == 1:\n url_mapping[fig_no] = extracted_scripts[0]+\"\"\n elif len(extracted_scripts) > 1:\n url_mapping[fig_no] = \"fig_\"+fig_no.replace(\".\",\"_\")+\".ipynb\"\n else:\n url_mapping[fig_no] = \"\"",
"_____no_output_____"
],
[
"url_mapping",
"_____no_output_____"
],
[
"chapter_wise_mappping = {}\nfor fig_no in url_mapping:\n chap_no = int(fig_no.split(\".\")[0])\n if chap_no not in chapter_wise_mappping:\n chapter_wise_mappping[chap_no] = {}\n chapter_wise_mappping[chap_no][fig_no] = url_mapping[fig_no]\nchapter_wise_mappping",
"_____no_output_____"
],
[
"book1_figures = os.listdir(\"../../pml-book/book1-figures/\")\nimage_mapping = {}\nfor each in book1_figures:\n fig_no = re.findall(r\"\\d+\\.\\d+\", each)[0]\n try:\n image_mapping[fig_no].append(each)\n except:\n image_mapping[fig_no] = [each]\nimage_mapping",
"_____no_output_____"
],
[
"def get_figure_text(fig_no):\n if fig_no not in image_mapping:\n return \"-\"\n\n url = \"https://github.com/probml/pml-book/blob/main/book1-figures/\"\n text = \"\"\n for fig in image_mapping[fig_no]:\n text += f\"[{fig}]({os.path.join(url,fig)})<br/>\"\n return text",
"_____no_output_____"
],
[
"def extract_url(line):\n links = re.findall(r\"(https.+)?\\)\" ,line)\n if links:\n return links\n return None",
"_____no_output_____"
],
[
"dead = []\nfor chap_no in chapter_wise_mappping:\n if chap_no == 23:\n continue #not present in pyprobml\n content = f'''\n# Chapter {chap_no}: {chap_names[chap_no]}\n\n## Figures\n\n|Figure No. | Notebook | Figure |\n|--|--|--|\n'''\n for fig_no in chapter_wise_mappping[chap_no]:\n notebook_link = f\"[{chapter_wise_mappping[chap_no][fig_no]}]({chapter_wise_mappping[chap_no][fig_no]})\" if chapter_wise_mappping[chap_no][fig_no] != \"\" else \"-\"\n content += f\"| {fig_no} | {notebook_link} \"\n content+= f\"| {get_figure_text(fig_no)} |\\n\"\n \n # append supplementary \n \n \n \n suppl = f\"../../pml-book/pml1/supplements/chap{chap_no}.md\"\n with open(suppl, \"r\") as fp:\n text = fp.read()\n print(chap_no,len(text.split(\"\\n\")))\n if len(text.split(\"\\n\")) > 3:\n content += \"## Supplementary material\\n\"\n text = \"\\n\".join(text.split(\"\\n\")[1:])\n #change tutorial location from probml_notebooks to pyprobml\n text = text.replace(\"https://github.com/probml/probml-notebooks/blob/main/markdown/\",\"https://github.com/probml/pyprobml/tree/master/tutorials/\")\n content+=text\n \n #print(content)\n \n # save this as README.md\n readme_file = f\"../notebooks/book1/{chap_no:02d}/README.md\"\n with open(readme_file,\"w\") as fp:\n fp.write(content)\n",
"1 12\n2 4\n3 4\n4 5\n5 5\n6 3\n7 4\n8 9\n9 3\n10 6\n11 9\n12 3\n13 13\n14 17\n15 20\n16 4\n17 3\n18 5\n19 6\n20 10\n21 3\n22 5\n"
],
[
" \n# for line in lines:\n# links = extract_url(line)\n# if links:\n# for link in links:\n# if \"http\" in link and is_dead_url(link):\n# print(link)\n# dead.append(link)\n# text = \"\\n\".join(lines)\n# content+=text",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c527c0ae862ebc6e730146b65e91e318e7be8f12
| 26,298 |
ipynb
|
Jupyter Notebook
|
data_specs/Landsat_C2_SR_specs.ipynb
|
vikineema/deafrica-docs
|
3e82b1dee56508fe31341263cd1ddb6857532715
|
[
"Apache-2.0"
] | null | null | null |
data_specs/Landsat_C2_SR_specs.ipynb
|
vikineema/deafrica-docs
|
3e82b1dee56508fe31341263cd1ddb6857532715
|
[
"Apache-2.0"
] | null | null | null |
data_specs/Landsat_C2_SR_specs.ipynb
|
vikineema/deafrica-docs
|
3e82b1dee56508fe31341263cd1ddb6857532715
|
[
"Apache-2.0"
] | null | null | null | 42.0768 | 693 | 0.610389 |
[
[
[
"# Landsat Collection 2 Level-2 Surface Reflectance",
"_____no_output_____"
]
],
[
[
".. contents::\n :local:",
"_____no_output_____"
]
],
[
[
"**Date modified:** 23 August 2021",
"_____no_output_____"
],
[
"## Product overview",
"_____no_output_____"
],
[
"### Background",
"_____no_output_____"
],
[
"Digital Earth Africa (DE Africa) provides free and open access to a copy of [Landsat Collection 2 Level-2](https://www.usgs.gov/core-science-systems/nli/landsat/landsat-collection-2-level-2-science-products) products over Africa. These products are produced and provided by the United States Geological Survey (USGS).\n\nThe [Landsat series](https://www.usgs.gov/core-science-systems/nli/landsat) of Earth Observation satellites, jointly led by USGS and NASA, have been continuously acquiring images of the Earth’s land surface since 1972. DE Africa provides data from Landsat 5, 7 and 8 satellites, including historical observations dating back to late 1980s and regularly updated new acquisitions.\n\nNew Level-2 Landsat 7 and Landsat 8 data are available after 15 to 27 days from acquisition. See [Landsat Collection 2 Generation Timeline](https://www.usgs.gov/media/images/landsat-collection-2-generation-timeline) for details.\n\nUSGS Landsat Collection 2 was released early 2021 and offers improved processing, geometric accuracy, and radiometric calibration compared to previous Collection 1 products. The Level-2 products are endorsed by the Committee on Earth Observation Satellites (CEOS) to be Analysis Ready Data for Land ([CARD4L](https://ceos.org/ard/))-compliant. This internationally-recognised certification ensures these products have been processed to a minimum set of requirements and organised into a form that allows immediate analysis with a minimum of additional user effort and interoperability both through time and with other datasets.\n\nUSGS Landsat Collection 2 Level-2 includes:\n\n* Surface Reflectance\n* Surface Temperature\n\nThis document provides technical specifications for the Surface Reflectance product. Information for the Surface Temperature product can be found in the [Landsat Collection 2 Level-2 Surface Temperature specification](https://docs.digitalearthafrica.org/en/latest/data_specs/Landsat_C2_ST_specs.html).\n\nSurface reflectance is the fraction of incoming solar radiation that is reflected from Earth's surface. Variations in satellite measured radiance due to atmospheric properties have been corrected for so images acquired over the same area at different times are comparable and can be used readily to detect changes on Earth's surface.\n",
"_____no_output_____"
],
[
"### Specifications",
"_____no_output_____"
],
[
"#### Spatial and temporal coverage",
"_____no_output_____"
],
[
"DE Africa provides Landsat Collection 2 Level-2 from Landsat 5, 7 and 8 as seperate products. Relevant coverage and metadata can be viewed on DE Africa Metadata Exploer:\n\n* [Landsat 5 Collection 2 Level-2 Surface Reflectance](https://explorer.digitalearth.africa/products/ls5_sr)\n* [Landsat 7 Collection 2 Level-2 Surface Reflectance](https://explorer.digitalearth.africa/products/ls7_sr)\n* [Landsat 8 Collection 2 Level-2 Surface Reflectance](https://explorer.digitalearth.africa/products/ls8_sr)",
"_____no_output_____"
],
[
"**Table 1: Landsat Collection 2 Level-2 Surface Reflectance product specifications**",
"_____no_output_____"
],
[
"|Satellite | Landsat 5 | Landsat 7 | Landsat 8 | \n|----------|:---------:|:---------:|:---------:|\n|Instrument| Multispectral Scanner (MSS), Thematic Mapper (TM)| Enhanced Thematic Mapper (ETM+)| Operational Land Imager (OLI), Thermal Infrared Sensor (TIRS) |\n|Number of bands | 10 | 10 | 10 |\n|Cell size - X (metres) | 30 | 30 | 30 |\n|Cell size - Y (metres) | 30 | 30 | 30 |\n|Coordinate reference system |Universal Transverse Mercator (UTM) | UTM | UTM |\n|Temporal resolution | Every 16 days | Every 16 days | Every 16 days |\n|Temporal range| 1984 – 2012 | 1999 – present | 2013 – present |\n|Parent dataset| [Landsat Collection 2 Level-1](https://www.usgs.gov/land-resources/nli/landsat/landsat-collection-2-level-1-data) | [Landsat Collection 2 Level-1](https://www.usgs.gov/land-resources/nli/landsat/landsat-collection-2-level-1-data) | [Landsat Collection 2 Level-1](https://www.usgs.gov/land-resources/nli/landsat/landsat-collection-2-level-1-data) |\n|Update frequency| NA (archive)| Daily | Daily |",
"_____no_output_____"
],
[
"#### Measurements",
"_____no_output_____"
],
[
"**Table 2: Landsat 5 and Landsat 7 Level-2 Surface Reflectance measurements**\n\n|Band ID|Description |Units | Range | Data type| No data$^\\dagger$ | Conversion$^\\ddagger$ |\n|----------|-------------|----------------|----------------|:---------:|:----------:|:----------:|\n|SR_B1 | Surface reflectance band 1 (Blue) | Unitless | `1-65455`| `uint16` | `0` | 2.75e-05 \\* DN - 0.2 |\n|SR_B2 | Surface reflectance band 2 (Green) | Unitless | `1-65455`| `uint16` | `0` | 2.75e-05 \\* DN - 0.2 |\n|SR_B3 | Surface reflectance band 3 (Red) | Unitless | `1-65455`| `uint16` | `0` | 2.75e-05 \\* DN - 0.2 |\n|SR_B4 | Surface reflectance band 4 (Near-Infrared (NIR)) | Unitless | `1-65455`| `uint16` | `0` | 2.75e-05 \\* DN - 0.2 |\n|SR_B5 | Surface reflectance band 5 (Short Wavelength Infrared (SWIR) 1) | Unitless | `1-65455`| `uint16` | `0` | 2.75e-05 \\* DN - 0.2 |\n|SR_B7 | Surface reflectance band 7 (SWIR 2) | Unitless | `1-65455`| `uint16` | `0` | 2.75e-05 \\* DN - 0.2 |\n|QA_PIXEL | Pixel quality | Bit Index | `0-65535` | `uint16` | `1` | NA |\n|QA_RADSAT | Radiometric saturation | Bit Index | `0-65535` | `uint16` | `0` | NA |\n|SR_ATMOS \\_OPACITY | Atmospheric opacity | Unitless | `0-32767` | `int16` | `-9999` | 0.001 \\* DN |\n|SR_CLOUD \\_QA | Cloud mask quality | Bit Index | `0-255` | `uint8` | `0` | NA |\n\n$^\\dagger$ No data or fill value.\n\n$^\\ddagger$ Physical measurement can be derived from the Digital Number (DN) stored in the product using the conversion equation listed. \n\nMore inforamtion can be found from the [Landsat 4-7 Collection 2 Science Product Guide](https://www.usgs.gov/media/files/landsat-4-7-collection-2-level-2-science-product-guide).",
"_____no_output_____"
],
[
"**Table 3: Landsat 8 Level-2 Surface Reflectance measurements**\n\nLandsat 8 Level-2 science product is generated using a different algorithm and has different output measurements compared to Landsat 5 and Landsat 7.\n\n|Band ID|Description |Units | Range | Data type| No data$^\\dagger$ | Conversion$^\\ddagger$ |\n|----------|-------------|----------------|----------------|:---------:|:----------:|:----------:|\n|SR_B1 | Surface reflectance band 1 (Coastal Aerosol) | Unitless | `1-65455`| `uint16` | `0` | 2.75e-05 \\* DN - 0.2 |\n|SR_B2 | Surface reflectance band 2 (Blue) | Unitless | `1-65455`| `uint16` | `0` | 2.75e-05 \\* DN - 0.2 |\n|SR_B3 | Surface reflectance band 3 (Green) | Unitless | `1-65455`| `uint16` | `0` | 2.75e-05 \\* DN - 0.2 |\n|SR_B4 | Surface reflectance band 4 (Red) | Unitless | `1-65455`| `uint16` | `0` | 2.75e-05 \\* DN - 0.2 |\n|SR_B5 | Surface reflectance band 5 (NIR) | Unitless | `1-65455`| `uint16` | `0` | 2.75e-05 \\* DN - 0.2 |\n|SR_B6 | Surface reflectance band 6 (SWIR 1) | Unitless | `1-65455`| `uint16` | `0` | 2.75e-05 \\* DN - 0.2 |\n|SR_B7 | Surface reflectance band 7 (SWIR 2) | Unitless | `1-65455`| `uint16` | `0` | 2.75e-05 \\* DN - 0.2 |\n|QA_PIXEL | Pixel quality | Bit Index | `0-65535` | `uint16` | `1` | NA |\n|QA_RADSAT | Radiometric saturation | Bit Index | `0-65535` | `uint16` | `0` | NA |\n|SR_QA \\_AEROSOL | Aerosol level | Bit Index | `0-255` | `uint8` | `1` | NA |\n\n$^\\dagger$ No data or fill value.\n\n$^\\ddagger$ Physical measurement can be derived from the Digital Number (DN) stored in the product using the conversion equation listed. \n\nMore inforamtion can be found from [Landsat 8 OLI/TIRS Collection 2 Science Product Guide](https://www.usgs.gov/media/files/landsat-8-collection-2-level-2-science-product-guide) and [Landsat 8-9 OLI/TIRS Collection 2 Level 2 Data Format Control Book](https://www.usgs.gov/media/files/landsat-8-9-olitirs-collection-2-level-2-data-format-control-book)",
"_____no_output_____"
],
[
"#### Quality assessment bands",
"_____no_output_____"
],
[
"Pixel quality assessment (QA_PIXEL) bands for Landsat 5, 7 and 8 are generated by the CFMask algorithm. Different bit definitions are used because the cirrus band is only available on Landsat 8. This band is relevant to both Surface Reflectance and Surface Temperature products.",
"_____no_output_____"
],
[
"**Table 4: Pixel quality assessment (QA_PIXEL) bit index.**",
"_____no_output_____"
],
[
"| Bit | Landat 5 & 7 | Landsat 8 | Description Values |\n|-----|------|------------|-------------------|\n| 0 | Fill | Fill | 0 for image data; 1 for fill data |\n| 1 | Dilated Cloud | Dilated Cloud | 0 for cloud is not dilated or no cloud; 1 for cloud dilation |\n| 2 | Unused | Cirrus | 0 for cirrus confidence is not; 1 for high confidence cirrus |\n| 3 | Cloud | Cloud | 0 for cloud confidence is not high; 1 for high confidence cloud |\n| 4 | Cloud Shadow | Cloud Shadow | 0 for Cloud Shadow Confidence is not high; 1 for high confidence cloud shadow |\n| 5 | Snow | Snow | 0 for Snow/Ice Confidence is not high; 1 for high confidence snow cover |\n| 6 | Clear | Clear | 0 if Cloud or Dilated Cloud bits are set; 1 if Cloud and Dilated Cloud bits are not set |\n| 7 | Water | Water | 0 for land or cloud; 1 for water |\n| 8-9 | Cloud Confidence | Cloud Confidence | 00 for no confidence level set; 01 Low confidence; 10 Medium confidence; 11 High confidence |\n| 10-11 | Cloud Shadow Confidence | Cloud Shadow Confidence | 00 for no confidence level set; 01 Low confidence; 10 Reserved; 11 High confidence |\n| 12-13 | Snow/Ice Confidence | Snow/Ice Confidence | 00 for no confidence level set; 01 Low confidence; 10 Reserved; 11 High confidence |\n| 14-15 | Unused | Cirrus Confidence | 00 for no confidence level set; 01 Low confidence; 10 Reserved; 11 High confidence |",
"_____no_output_____"
],
[
"Radiometric saturation quality assessment (QA_RADSAT) bands are different for Landsat 5, 7 and 8 because the sensors have different spectral bands. This band is relevant to both Surface Reflectance and Surface Temperature products.",
"_____no_output_____"
],
[
"**Table 5: Radiometric saturation quality assessment (QA_RADSAT) bit index.**",
"_____no_output_____"
],
[
"| Bit | Landsat 5 | Landsat 7 | Landsat 8 | Description Values |\n|-----|-----------|-----------|-----------|--------------------|\n| 0 | Band 1 (Blue) | Band 1 (Blue) | Band 1 (Coastal)| 0 no saturation; 1 saturated data |\n| 1 | Band 2 (Green) | Band 2 (Green) | Band 2 (Blue) | 0 no saturation; 1 saturated data |\n| 2 | Band 3 (Red) | Band 3 (Red) | Band 3 (Green) | 0 no saturation; 1 saturated data |\n| 3 | Band 4 (NIR) | Band 4 (NIR) | Band 4 (Red) | 0 no saturation; 1 saturated data |\n| 4 | Band 5 (SWIR1) | Band 5 (SWIR1) | Band 5 (NIR) | 0 no saturation; 1 saturated data |\n| 5 | Band 6 (TIR) | Band 6L (TIR)† | Band 6 (SWIR1) | 0 no saturation; 1 saturated data |\n| 6 | Band 7 (SWIR2) | Band 7 (SWIR2) | Band 7 (SWIR2) | 0 no saturation; 1 saturated data |\n| 7 | Unused | Unused | Unused | 0 |\n| 8 | Unused | Band 6H (TIR)‡ | Band 9 (Cirrus) | 0 no saturation; 1 saturated data |\n| 9 | Dropped Pixel | Dropped Pixel | Unused | 0 Pixel present; 1 detector doesn’t have a value – no data |\n| 10 | Unused | Unused | Unused | 0 |\n| 11 | Unused | Unused | Terrain occlusion | 0 no terrain occlusion; 1 terrain occlusion |\n| 12 | Unused | Unused | Unused | 0 |\n| 13 | Unused | Unused | Unused | 0 |\n| 14 | Unused | Unused | Unused | 0 |\n| 15 | Unused | Unused |Unused | 0 |\n\n$^\\dagger$, $^\\ddagger$ For Landsat 7 products, the Band 6 TOA brightness temperature product is generated from ETM+ Band 6 High gain (6H) and Band 6 Low gain (6L) merged together. The merged Band 6 is comprised of pixels that are not saturated in Band 6H. When Band 6H pixels are saturated with a brightness temperature outside of the 6H dynamic range (from 240K to 322K), they will be filled with pixels from the 6L band even if those pixels are saturated.",
"_____no_output_____"
],
[
"For Landsat 5 and 7, another cloud mask band (SR_CLOUD_QA) is available but is less accurate than the QA_PIXEL band.",
"_____no_output_____"
],
[
"**Table 6: Landsat 5 and Landsat 7 cloud mask (SR_CLOUD_QA) bit index.**",
"_____no_output_____"
],
[
"| Bit | Attribute |\n|-----|-----------|\n| 0 | Dark Dense Vegetation (DDV) |\n| 1 | Cloud | \n| 2 | Cloud shadow |\n| 3 | Adjacent to cloud |\n| 4 | Snow |\n| 5 | Water |\n| 6 | Unused |\n| 7 | Unused |",
"_____no_output_____"
],
[
"For Landsat 8, aerosol retrieval information that may have impacted the product is provided in a SR_Aerosol_QA band. The default \"Aerosol Level\" is Climatology (00), which means no aerosol correction was applied. Pixels with an \"Aerosol Level\" classified as high are not recommended for use.",
"_____no_output_____"
],
[
"**Table 7: Landsat 8 aerosol level (SR_Aerosol_QA) bit index.**",
"_____no_output_____"
],
[
"| Bit | Flag | Description Values |\n|-----|------|--------------------|\n| 0 | Fill | 0 Pixel is not fill; 1 Pixel is fill |\n| 1 | Valid aerosol retrieval | 0 Pixel retrieval is not valid; 1 Pixel retrieval is valid |\n| 2 | Water | 0 Pixel is not water; 1 Pixel is water |\n| 3 | Unused | 0 |\n| 4 | Unused | 0 |\n| 5 | Interpolated Aerosol | 0 Pixel is not aerosol interpolated; 1 Pixel is aerosol interpolated |\n| 6-7 | Aerosol Level | 00 Climatology; 01 Low; 10 Medium; 11 High |",
"_____no_output_____"
],
[
"### Processing",
"_____no_output_____"
],
[
"Landsat Collection 2 Level-2 products are processed by the USGS from Collection 2 Level-1 inputs.\nLandsat 8 OLI surface reflectance products are generated using the Land Surface Reflectance Code (LaSRC) algorithm. Landsat 4-5 TM and Landsat 7 ETM+ surface reflectance products are generated using the Landsat Ecosystem Disturbance Adaptive Processing System (LEDAPS) algorithm.",
"_____no_output_____"
],
[
"### Media and example images",
"_____no_output_____"
],
[
"<img src=\"../_static/data_specs/Landsat_C2_specs/ls_libya.png\" alt=\"Landsat composites for Libya\" width=\"600\" align=\"left\"/>",
"_____no_output_____"
],
[
"**Figure 1: Landsat false color composites (highlighting vegetation) over an area in Tripoli District, Libya, showing changes between selected dates from 1984 to 2021.**",
"_____no_output_____"
],
[
"### Related services",
"_____no_output_____"
],
[
"* [Water Observations from Space](https://docs.digitalearthafrica.org/en/latest/data_specs/WOfS_specs.html)\n* [GeoMAD cloud-free composite services](https://docs.digitalearthafrica.org/en/latest/data_specs/GeoMAD_specs.html)\n* [Landsat Collection 2 Level-2 Surface Temperature](https://docs.digitalearthafrica.org/en/latest/data_specs/Landsat_C2_ST_specs.html)",
"_____no_output_____"
],
[
"### References\n\n[USGS Collection Level-2 website](https://www.usgs.gov/core-science-systems/nli/landsat/landsat-collection-2)\n",
"_____no_output_____"
],
[
"### License\n\nThere are no restrictions on Landsat data downloaded from the USGS; it can be used or redistributed as desired. USGS request that you include a [statement of the data source](https://www.usgs.gov/centers/eros/data-citation?qt-science_support_page_related_con=0#qt-science_support_page_related_con) when citing, copying, or reprinting USGS Landsat data or images.",
"_____no_output_____"
],
[
"### Acknowledgements\n\nLandsat Level- 2 Surface Reflectance Science Product courtesy of the U.S. Geological Survey.",
"_____no_output_____"
],
[
"## Data access",
"_____no_output_____"
],
[
"### Amazon Web Services S3 ",
"_____no_output_____"
],
[
"Landsat Collection 2 Level-2 is available in AWS S3, sponsored by the [Public Dataset Program](https://registry.opendata.aws/deafrica-landsat/).",
"_____no_output_____"
],
[
"**Table 8: AWS data access details.**",
"_____no_output_____"
],
[
"|AWS S3 details | |\n|----------|-------------|\n|Bucket ARD | `arn:aws:s3:::deafrica-landsat`|\n|Region | `af-south-1` |",
"_____no_output_____"
],
[
"The bucket is in the AWS region `af-south-1` (Cape Town). Additional region specifications can be applied as follows:\n\n`aws s3 ls --region=af-south-1 s3://deafrica-landsat/`\n\nThe file paths follow the format `collection02/level-2/standard/<sensor>/<year>/<path>/<row>/<scene_id>/`.\n",
"_____no_output_____"
],
[
"**Table 9: AWS file path convention.**\n\n|File path element | Description |Example |\n|----------|-------------|-----------------|\n|`sensor`| Landsat sensor name, `tm`, `etm` or `oli-tirs` for landsat 5, 7 and 8 | `oli-tirs` | \n|`year` | Observation year | `2021` | \n|`path` | Landsat orbit path id | `172` | \n|`row` | Landsat orbit row id | `057` | \n|`scene_id` | Landsat scene id | `LC08_L2SP_172057_20210101_20210308_02_T1` |",
"_____no_output_____"
],
[
"### OGC Web Services (OWS)",
"_____no_output_____"
],
[
"This product is available through DE Africa's OWS.",
"_____no_output_____"
],
[
"**Table 10: OWS data access details.**\n\n|OWS details | |\n|----------|-------------|\n|Name | `DE Africa Services` |\n|Web Map Services (WMS) URL | `https://ows.digitalearth.africa/wms?version=1.3.0` |\n| Web Coverage Service (WCS) URL | `https://ows.digitalearth.africa/wcs?version=2.1.0`|\n| Layer name | `ls5_sr`, `ls7_sr`, `ls8_sr` |",
"_____no_output_____"
],
[
"Digital Earth Africa OWS details can be found at [https://ows.digitalearth.africa/](https://ows.digitalearth.africa/).\n\nFor instructions on how to connect to OWS, see [this tutorial](https://training.digitalearthafrica.org/en/latest/OWS_tutorial.html).",
"_____no_output_____"
],
[
"### Open Data Cube (ODC)",
"_____no_output_____"
],
[
"The Landsat Collection 2 Level-2 products can be accessed through the Digital Earth Africa ODC API, which is available through the [Digital Earth Africa Sandbox](https://sandbox.digitalearth.africa/hub/login).\n\n**ODC product names:** `ls5_sr`, `ls7_sr`, `ls8_sr`",
"_____no_output_____"
],
[
"Specific bands of data can be called by using either the default names or any of a band's alternative names, as listed in the table below. ODC `Datacube.load` commands without specified bands will load all bands.",
"_____no_output_____"
],
[
"**Table 11: Landsat 5 and Landsat 7 Level-2 Surface Reflectance (ODC product ls5_sr and ls7_sr) band names.**",
"_____no_output_____"
],
[
"|Band name| Alternative names| Fill value |\n|----------|-------------|:------:|\n|SR_B1 | band_1, blue | `0` |\n|SR_B2 | band_2, green | `0` |\n|SR_B3 | band_3, red | `0` |\n|SR_B4 | band_4, nir | `0` |\n|SR_B5 | band_5, swir_1 | `0` |\n|SR_B7 | band_7, swir_2 | `0` |\n|QA_PIXEL | pq, pixel_quality | `1` |\n|QA_RADSAT | radsat, radiometric_saturation | `0` |\n|SR_ATMOS_OPACITY | atmos_opacity | `-9999` |\n|SR_CLOUD_QA | cloud_qa | `0` |",
"_____no_output_____"
],
[
"**Table 12: Landsat 8 Level-2 Surface Reflectance (ODC product ls8_sr) band names.**",
"_____no_output_____"
],
[
"|Band name| Alternative names| Fill value |\n|----------|-------------|:------:|\n|SR_B1 | band_1, coastal_aerosol | `0` |\n|SR_B2 | band_2, blue | `0` |\n|SR_B3 | band_3, green | `0` |\n|SR_B4 | band_4, red | `0` |\n|SR_B5 | band_5, nir | `0` |\n|SR_B6 | band_6, swir_1 | `0` |\n|SR_B7 | band_7, swir_2 | `0` |\n|QA_PIXEL | pq, pixel_quality | `1` |\n|QA_RADSAT | radsat, radiometric_saturation | `0` |\n|SR_QA_AEROSOL | qa_aerosol, aerosol_qa | `1` |",
"_____no_output_____"
],
[
"Band names are case-sensitive. \n\nFor examples on how to use the ODC API, see the DE Africa [example notebook repository](https://github.com/digitalearthafrica/deafrica-sandbox-notebooks).",
"_____no_output_____"
],
[
"## Technical information",
"_____no_output_____"
],
[
"### Surface Reflectance",
"_____no_output_____"
],
[
"The surface reflectance products for Landat 5, 7 and 8 are generated using two different methods. \n\nLandsat 5 TM and Landsat 7 ETM+ Collection 2 Surface Reflectance are generated using the Landsat Ecosystem Disturbance Adaptive Processing System (LEDAPS) algorithm. The software applies Moderate Resolution Imaging Spectroradiometer (MODIS) atmospheric correction routines to Level-1 data products. Water vapor, ozone, atmospheric height, aerosol optical thickness, and digital elevation are input with Landsat data to the Second Simulation of a Satellite Signal in the Solar Spectrum (6S) radiative transfer models to generate top of atmosphere (TOA) reflectance, surface reflectance, TOA brightness temperature, and masks for clouds, cloud shadows, adjacent clouds, land, and water.\n\nLandsat 8 OLI Collection 2 Surface Reflectance data are generated using the Land Surface Reflectance Code (LaSRC), which makes use of the coastal aerosol band to perform aerosol inversion tests, uses auxiliary climate data from MODIS, and a unique radiative transfer model.\n\nFor more information on the different processing algorithms and caveats of the products, visit the [Landsat Collection 2 Surface Reflectance webpage](https://www.usgs.gov/core-science-systems/nli/landsat/landsat-collection-2-surface-reflectance).\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"raw",
"markdown"
] |
[
[
"markdown"
],
[
"raw"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
c527c7465253a7b053533450fc4373e6d626b264
| 61,225 |
ipynb
|
Jupyter Notebook
|
mlxtend/docs/sources/user_guide/frequent_patterns/association_rules.ipynb
|
WhiteWolf21/fp-growth
|
01e1d853b09f244f14e66d7d0c87f139a0f67c81
|
[
"MIT"
] | null | null | null |
mlxtend/docs/sources/user_guide/frequent_patterns/association_rules.ipynb
|
WhiteWolf21/fp-growth
|
01e1d853b09f244f14e66d7d0c87f139a0f67c81
|
[
"MIT"
] | null | null | null |
mlxtend/docs/sources/user_guide/frequent_patterns/association_rules.ipynb
|
WhiteWolf21/fp-growth
|
01e1d853b09f244f14e66d7d0c87f139a0f67c81
|
[
"MIT"
] | null | null | null | 34.551354 | 634 | 0.375745 |
[
[
[
"## Association Rules Generation from Frequent Itemsets",
"_____no_output_____"
],
[
"Function to generate association rules from frequent itemsets",
"_____no_output_____"
],
[
"> from mlxtend.frequent_patterns import association_rules",
"_____no_output_____"
],
[
"## Overview",
"_____no_output_____"
],
[
"Rule generation is a common task in the mining of frequent patterns. _An association rule is an implication expression of the form $X \\rightarrow Y$, where $X$ and $Y$ are disjoint itemsets_ [1]. A more concrete example based on consumer behaviour would be $\\{Diapers\\} \\rightarrow \\{Beer\\}$ suggesting that people who buy diapers are also likely to buy beer. To evaluate the \"interest\" of such an association rule, different metrics have been developed. The current implementation make use of the `confidence` and `lift` metrics. \n\n\n### Metrics\n\nThe currently supported metrics for evaluating association rules and setting selection thresholds are listed below. Given a rule \"A -> C\", *A* stands for antecedent and *C* stands for consequent.\n\n\n#### 'support':\n\n$$\\text{support}(A\\rightarrow C) = \\text{support}(A \\cup C), \\;\\;\\; \\text{range: } [0, 1]$$\n\n- introduced in [3]\n\nThe support metric is defined for itemsets, not assocication rules. The table produced by the association rule mining algorithm contains three different support metrics: 'antecedent support', 'consequent support', and 'support'. Here, 'antecedent support' computes the proportion of transactions that contain the antecedent A, and 'consequent support' computes the support for the itemset of the consequent C. The 'support' metric then computes the support of the combined itemset A $\\cup$ C -- note that 'support' depends on 'antecedent support' and 'consequent support' via min('antecedent support', 'consequent support').\n\n\nTypically, support is used to measure the abundance or frequency (often interpreted as significance or importance) of an itemset in a database. We refer to an itemset as a \"frequent itemset\" if you support is larger than a specified minimum-support threshold. Note that in general, due to the *downward closure* property, all subsets of a frequent itemset are also frequent.\n\n\n#### 'confidence': \n\n$$\\text{confidence}(A\\rightarrow C) = \\frac{\\text{support}(A\\rightarrow C)}{\\text{support}(A)}, \\;\\;\\; \\text{range: } [0, 1]$$\n\n- introduced in [3]\n\nThe confidence of a rule A->C is the probability of seeing the consequent in a transaction given that it also contains the antecedent. Note that the metric is not symmetric or directed; for instance, the confidence for A->C is different than the confidence for C->A. The confidence is 1 (maximal) for a rule A->C if the consequent and antecedent always occur together. \n\n\n#### 'lift':\n\n$$\\text{lift}(A\\rightarrow C) = \\frac{\\text{confidence}(A\\rightarrow C)}{\\text{support}(C)}, \\;\\;\\; \\text{range: } [0, \\infty]$$\n\n\n- introduced in [4]\n\n\nThe lift metric is commonly used to measure how much more often the antecedent and consequent of a rule A->C occur together than we would expect if they were statistically independent. If A and C are independent, the Lift score will be exactly 1.\n\n\n#### 'leverage':\n\n$$\\text{levarage}(A\\rightarrow C) = \\text{support}(A\\rightarrow C) - \\text{support}(A) \\times \\text{support}(C), \\;\\;\\; \\text{range: } [-1, 1]$$\n\n\n- introduced in [5]\n\nLeverage computes the difference between the observed frequency of A and C appearing together and the frequency that would be expected if A and C were independent. An leverage value of 0 indicates independence.\n\n#### 'conviction':\n\n$$\\text{conviction}(A\\rightarrow C) = \\frac{1 - \\text{support}(C)}{1 - \\text{confidence}(A\\rightarrow C)}, \\;\\;\\; \\text{range: } [0, \\infty]$$\n\n- introduced in [6]\n\nA high conviction value means that the consequent is highly depending on the antecedent. For instance, in the case of a perfect confidence score, the denominator becomes 0 (due to 1 - 1) for which the conviction score is defined as 'inf'. Similar to lift, if items are independent, the conviction is 1.\n ",
"_____no_output_____"
],
[
"## References\n",
"_____no_output_____"
],
[
"[1] Tan, Steinbach, Kumar. Introduction to Data Mining. Pearson New International Edition. Harlow: Pearson Education Ltd., 2014. (pp. 327-414).\n\n[2] Michael Hahsler, http://michael.hahsler.net/research/association_rules/measures.html\n\n[3] R. Agrawal, T. Imielinski, and A. Swami. Mining associations between sets of items in large databases. In Proc. of the ACM SIGMOD Int'l Conference on Management of Data, pages 207-216, Washington D.C., May 1993\n\n[4] S. Brin, R. Motwani, J. D. Ullman, and S. Tsur. Dynamic itemset counting and implication rules for market basket data\n\n[5] Piatetsky-Shapiro, G., Discovery, analysis, and presentation of strong rules. Knowledge Discovery in Databases, 1991: p. 229-248.\n\n[6] Sergey Brin, Rajeev Motwani, Jeffrey D. Ullman, and Shalom Turk. Dynamic itemset counting and implication rules for market basket data. In SIGMOD 1997, Proceedings ACM SIGMOD International Conference on Management of Data, pages 255-264, Tucson, Arizona, USA, May 1997",
"_____no_output_____"
],
[
"## Example 1 -- Generating Association Rules from Frequent Itemsets",
"_____no_output_____"
],
[
"The `generate_rules` takes dataframes of frequent itemsets as produced by the `apriori` function in *mlxtend.association*. To demonstrate the usage of the `generate_rules` method, we first create a pandas `DataFrame` of frequent itemsets as generated by the [`apriori`](./apriori.md) function:\n",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nfrom mlxtend.preprocessing import TransactionEncoder\nfrom mlxtend.frequent_patterns import apriori\n\n\ndataset = [['Milk', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'],\n ['Dill', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'],\n ['Milk', 'Apple', 'Kidney Beans', 'Eggs'],\n ['Milk', 'Unicorn', 'Corn', 'Kidney Beans', 'Yogurt'],\n ['Corn', 'Onion', 'Onion', 'Kidney Beans', 'Ice cream', 'Eggs']]\n\nte = TransactionEncoder()\nte_ary = te.fit(dataset).transform(dataset)\ndf = pd.DataFrame(te_ary, columns=te.columns_)\nfrequent_itemsets = apriori(df, min_support=0.6, use_colnames=True)\n\nfrequent_itemsets",
"_____no_output_____"
]
],
[
[
"The `generate_rules()` function allows you to (1) specify your metric of interest and (2) the according threshold. Currently implemented measures are **confidence** and **lift**. Let's say you are interested in rules derived from the frequent itemsets only if the level of confidence is above the 70 percent threshold (`min_threshold=0.7`):",
"_____no_output_____"
]
],
[
[
"from mlxtend.frequent_patterns import association_rules\n\nassociation_rules(frequent_itemsets, metric=\"confidence\", min_threshold=0.7)",
"_____no_output_____"
]
],
[
[
"## Example 2 -- Rule Generation and Selection Criteria",
"_____no_output_____"
],
[
"If you are interested in rules according to a different metric of interest, you can simply adjust the `metric` and `min_threshold` arguments . E.g. if you are only interested in rules that have a lift score of >= 1.2, you would do the following:",
"_____no_output_____"
]
],
[
[
"rules = association_rules(frequent_itemsets, metric=\"lift\", min_threshold=1.2)\nrules",
"_____no_output_____"
]
],
[
[
"Pandas `DataFrames` make it easy to filter the results further. Let's say we are ony interested in rules that satisfy the following criteria:\n\n1. at least 2 antecedents\n2. a confidence > 0.75\n3. a lift score > 1.2\n\nWe could compute the antecedent length as follows:",
"_____no_output_____"
]
],
[
[
"rules[\"antecedent_len\"] = rules[\"antecedents\"].apply(lambda x: len(x))\nrules",
"_____no_output_____"
]
],
[
[
"Then, we can use pandas' selection syntax as shown below:",
"_____no_output_____"
]
],
[
[
"rules[ (rules['antecedent_len'] >= 2) &\n (rules['confidence'] > 0.75) &\n (rules['lift'] > 1.2) ]",
"_____no_output_____"
]
],
[
[
"Similarly, using the Pandas API, we can select entries based on the \"antecedents\" or \"consequents\" columns:",
"_____no_output_____"
]
],
[
[
"rules[rules['antecedents'] == {'Eggs', 'Kidney Beans'}]",
"_____no_output_____"
]
],
[
[
"**Frozensets**\n\nNote that the entries in the \"itemsets\" column are of type `frozenset`, which is built-in Python type that is similar to a Python `set` but immutable, which makes it more efficient for certain query or comparison operations (https://docs.python.org/3.6/library/stdtypes.html#frozenset). Since `frozenset`s are sets, the item order does not matter. I.e., the query\n\n`rules[rules['antecedents'] == {'Eggs', 'Kidney Beans'}]`\n \nis equivalent to any of the following three\n\n- `rules[rules['antecedents'] == {'Kidney Beans', 'Eggs'}]`\n- `rules[rules['antecedents'] == frozenset(('Eggs', 'Kidney Beans'))]`\n- `rules[rules['antecedents'] == frozenset(('Kidney Beans', 'Eggs'))]`\n\n\n",
"_____no_output_____"
],
[
"## Example 3 -- Frequent Itemsets with Incomplete Antecedent and Consequent Information",
"_____no_output_____"
],
[
"Most metrics computed by `association_rules` depends on the consequent and antecedent support score of a given rule provided in the frequent itemset input DataFrame. Consider the following example:",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\ndict = {'itemsets': [['177', '176'], ['177', '179'],\n ['176', '178'], ['176', '179'],\n ['93', '100'], ['177', '178'],\n ['177', '176', '178']],\n 'support':[0.253623, 0.253623, 0.217391,\n 0.217391, 0.181159, 0.108696, 0.108696]}\n\nfreq_itemsets = pd.DataFrame(dict)\nfreq_itemsets",
"_____no_output_____"
]
],
[
[
"Note that this is a \"cropped\" DataFrame that doesn't contain the support values of the item subsets. This can create problems if we want to compute the association rule metrics for, e.g., `176 => 177`.\n\nFor example, the confidence is computed as\n\n$$\\text{confidence}(A\\rightarrow C) = \\frac{\\text{support}(A\\rightarrow C)}{\\text{support}(A)}, \\;\\;\\; \\text{range: } [0, 1]$$\n\nBut we do not have $\\text{support}(A)$. All we know about \"A\"'s support is that it is at least 0.253623.\n\nIn these scenarios, where not all metric's can be computed, due to incomplete input DataFrames, you can use the `support_only=True` option, which will only compute the support column of a given rule that does not require as much info:\n\n$$\\text{support}(A\\rightarrow C) = \\text{support}(A \\cup C), \\;\\;\\; \\text{range: } [0, 1]$$\n\n\n\"NaN's\" will be assigned to all other metric columns:",
"_____no_output_____"
]
],
[
[
"from mlxtend.frequent_patterns import association_rules\n\nres = association_rules(freq_itemsets, support_only=True, min_threshold=0.1)\nres",
"_____no_output_____"
]
],
[
[
"To clean up the representation, you may want to do the following:",
"_____no_output_____"
]
],
[
[
"res = res[['antecedents', 'consequents', 'support']]\nres",
"_____no_output_____"
]
],
[
[
"## API",
"_____no_output_____"
]
],
[
[
"with open('../../api_modules/mlxtend.frequent_patterns/association_rules.md', 'r') as f:\n print(f.read())",
"## association_rules\n\n*association_rules(df, metric='confidence', min_threshold=0.8, support_only=False)*\n\nGenerates a DataFrame of association rules including the\nmetrics 'score', 'confidence', and 'lift'\n\n**Parameters**\n\n- `df` : pandas DataFrame\n\n pandas DataFrame of frequent itemsets\n with columns ['support', 'itemsets']\n\n\n- `metric` : string (default: 'confidence')\n\n Metric to evaluate if a rule is of interest.\n**Automatically set to 'support' if `support_only=True`.**\n Otherwise, supported metrics are 'support', 'confidence', 'lift',\n\n'leverage', and 'conviction'\n These metrics are computed as follows:\n\n - support(A->C) = support(A+C) [aka 'support'], range: [0, 1]\n\n - confidence(A->C) = support(A+C) / support(A), range: [0, 1]\n\n - lift(A->C) = confidence(A->C) / support(C), range: [0, inf]\n\n - leverage(A->C) = support(A->C) - support(A)*support(C),\n range: [-1, 1]\n\n - conviction = [1 - support(C)] / [1 - confidence(A->C)],\n range: [0, inf]\n\n\n\n- `min_threshold` : float (default: 0.8)\n\n Minimal threshold for the evaluation metric,\n via the `metric` parameter,\n to decide whether a candidate rule is of interest.\n\n\n- `support_only` : bool (default: False)\n\n Only computes the rule support and fills the other\n metric columns with NaNs. This is useful if:\n\n a) the input DataFrame is incomplete, e.g., does\n not contain support values for all rule antecedents\n and consequents\n\n b) you simply want to speed up the computation because\n you don't need the other metrics.\n\n**Returns**\n\npandas DataFrame with columns \"antecedents\" and \"consequents\"\n that store itemsets, plus the scoring metric columns:\n \"antecedent support\", \"consequent support\",\n \"support\", \"confidence\", \"lift\",\n \"leverage\", \"conviction\"\n of all rules for which\n metric(rule) >= min_threshold.\n Each entry in the \"antecedents\" and \"consequents\" columns are\n of type `frozenset`, which is a Python built-in type that\n behaves similarly to sets except that it is immutable\n (For more info, see\n https://docs.python.org/3.6/library/stdtypes.html#frozenset).\n\n**Examples**\n\nFor usage examples, please see\n [http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/association_rules/](http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/association_rules/)\n\n\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
c527cd6ab0ea0a4e253703980375cba21ddefd96
| 13,940 |
ipynb
|
Jupyter Notebook
|
.ipynb_checkpoints/2.1-a-first-look-at-a-neural-network-checkpoint.ipynb
|
memari-majid/deep-learning-with-python-notebooks
|
8f08975a93d590251160fd83c246cfab94a71e3a
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/2.1-a-first-look-at-a-neural-network-checkpoint.ipynb
|
memari-majid/deep-learning-with-python-notebooks
|
8f08975a93d590251160fd83c246cfab94a71e3a
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/2.1-a-first-look-at-a-neural-network-checkpoint.ipynb
|
memari-majid/deep-learning-with-python-notebooks
|
8f08975a93d590251160fd83c246cfab94a71e3a
|
[
"MIT"
] | null | null | null | 30.840708 | 368 | 0.587805 |
[
[
[
"import keras\nkeras.__version__",
"Using TensorFlow backend.\n"
]
],
[
[
"# A first look at a neural network\n\nThis notebook contains the code samples found in Chapter 2, Section 1 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.\n\n----\n\nWe will now take a look at a first concrete example of a neural network, which makes use of the Python library Keras to learn to classify \nhand-written digits. Unless you already have experience with Keras or similar libraries, you will not understand everything about this \nfirst example right away. You probably haven't even installed Keras yet. Don't worry, that is perfectly fine. In the next chapter, we will \nreview each element in our example and explain them in detail. So don't worry if some steps seem arbitrary or look like magic to you! \nWe've got to start somewhere.\n\nThe problem we are trying to solve here is to classify grayscale images of handwritten digits (28 pixels by 28 pixels), into their 10 \ncategories (0 to 9). The dataset we will use is the MNIST dataset, a classic dataset in the machine learning community, which has been \naround for almost as long as the field itself and has been very intensively studied. It's a set of 60,000 training images, plus 10,000 test \nimages, assembled by the National Institute of Standards and Technology (the NIST in MNIST) in the 1980s. You can think of \"solving\" MNIST \nas the \"Hello World\" of deep learning -- it's what you do to verify that your algorithms are working as expected. As you become a machine \nlearning practitioner, you will see MNIST come up over and over again, in scientific papers, blog posts, and so on.",
"_____no_output_____"
],
[
"The MNIST dataset comes pre-loaded in Keras, in the form of a set of four Numpy arrays:",
"_____no_output_____"
]
],
[
[
"from keras.datasets import mnist\n\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()",
"_____no_output_____"
]
],
[
[
"`train_images` and `train_labels` form the \"training set\", the data that the model will learn from. The model will then be tested on the \n\"test set\", `test_images` and `test_labels`. Our images are encoded as Numpy arrays, and the labels are simply an array of digits, ranging \nfrom 0 to 9. There is a one-to-one correspondence between the images and the labels.\n\nLet's have a look at the training data:",
"_____no_output_____"
]
],
[
[
"train_images.shape",
"_____no_output_____"
],
[
"len(train_labels)",
"_____no_output_____"
],
[
"train_labels",
"_____no_output_____"
]
],
[
[
"Let's have a look at the test data:",
"_____no_output_____"
]
],
[
[
"test_images.shape",
"_____no_output_____"
],
[
"len(test_labels)",
"_____no_output_____"
],
[
"test_labels",
"_____no_output_____"
]
],
[
[
"Our workflow will be as follow: first we will present our neural network with the training data, `train_images` and `train_labels`. The \nnetwork will then learn to associate images and labels. Finally, we will ask the network to produce predictions for `test_images`, and we \nwill verify if these predictions match the labels from `test_labels`.\n\nLet's build our network -- again, remember that you aren't supposed to understand everything about this example just yet.",
"_____no_output_____"
]
],
[
[
"from keras import models\nfrom keras import layers\n\nnetwork = models.Sequential()\nnetwork.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,)))\nnetwork.add(layers.Dense(10, activation='softmax'))",
"_____no_output_____"
]
],
[
[
"\nThe core building block of neural networks is the \"layer\", a data-processing module which you can conceive as a \"filter\" for data. Some \ndata comes in, and comes out in a more useful form. Precisely, layers extract _representations_ out of the data fed into them -- hopefully \nrepresentations that are more meaningful for the problem at hand. Most of deep learning really consists of chaining together simple layers \nwhich will implement a form of progressive \"data distillation\". A deep learning model is like a sieve for data processing, made of a \nsuccession of increasingly refined data filters -- the \"layers\".\n\nHere our network consists of a sequence of two `Dense` layers, which are densely-connected (also called \"fully-connected\") neural layers. \nThe second (and last) layer is a 10-way \"softmax\" layer, which means it will return an array of 10 probability scores (summing to 1). Each \nscore will be the probability that the current digit image belongs to one of our 10 digit classes.\n\nTo make our network ready for training, we need to pick three more things, as part of \"compilation\" step:\n\n* A loss function: the is how the network will be able to measure how good a job it is doing on its training data, and thus how it will be \nable to steer itself in the right direction.\n* An optimizer: this is the mechanism through which the network will update itself based on the data it sees and its loss function.\n* Metrics to monitor during training and testing. Here we will only care about accuracy (the fraction of the images that were correctly \nclassified).\n\nThe exact purpose of the loss function and the optimizer will be made clear throughout the next two chapters.",
"_____no_output_____"
]
],
[
[
"network.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])",
"_____no_output_____"
]
],
[
[
"\nBefore training, we will preprocess our data by reshaping it into the shape that the network expects, and scaling it so that all values are in \nthe `[0, 1]` interval. Previously, our training images for instance were stored in an array of shape `(60000, 28, 28)` of type `uint8` with \nvalues in the `[0, 255]` interval. We transform it into a `float32` array of shape `(60000, 28 * 28)` with values between 0 and 1.",
"_____no_output_____"
]
],
[
[
"train_images = train_images.reshape((60000, 28 * 28))\ntrain_images = train_images.astype('float32') / 255\n\ntest_images = test_images.reshape((10000, 28 * 28))\ntest_images = test_images.astype('float32') / 255",
"_____no_output_____"
]
],
[
[
"We also need to categorically encode the labels, a step which we explain in chapter 3:",
"_____no_output_____"
]
],
[
[
"from keras.utils import to_categorical\n\ntrain_labels = to_categorical(train_labels)\ntest_labels = to_categorical(test_labels)",
"_____no_output_____"
]
],
[
[
"We are now ready to train our network, which in Keras is done via a call to the `fit` method of the network: \nwe \"fit\" the model to its training data.",
"_____no_output_____"
]
],
[
[
"network.fit(train_images, train_labels, epochs=5, batch_size=128)",
"Epoch 1/5\n60000/60000 [==============================] - 2s - loss: 0.2577 - acc: 0.9245 \nEpoch 2/5\n60000/60000 [==============================] - 1s - loss: 0.1042 - acc: 0.9690 \nEpoch 3/5\n60000/60000 [==============================] - 1s - loss: 0.0687 - acc: 0.9793 \nEpoch 4/5\n60000/60000 [==============================] - 1s - loss: 0.0508 - acc: 0.9848 \nEpoch 5/5\n60000/60000 [==============================] - 1s - loss: 0.0382 - acc: 0.9890 \n"
]
],
[
[
"Two quantities are being displayed during training: the \"loss\" of the network over the training data, and the accuracy of the network over \nthe training data.\n\nWe quickly reach an accuracy of 0.989 (i.e. 98.9%) on the training data. Now let's check that our model performs well on the test set too:",
"_____no_output_____"
]
],
[
[
"test_loss, test_acc = network.evaluate(test_images, test_labels)",
" 9536/10000 [===========================>..] - ETA: 0s"
],
[
"print('test_acc:', test_acc)",
"test_acc: 0.9777\n"
]
],
[
[
"\nOur test set accuracy turns out to be 97.8% -- that's quite a bit lower than the training set accuracy. \nThis gap between training accuracy and test accuracy is an example of \"overfitting\", \nthe fact that machine learning models tend to perform worse on new data than on their training data. \nOverfitting will be a central topic in chapter 3.\n\nThis concludes our very first example -- you just saw how we could build and a train a neural network to classify handwritten digits, in \nless than 20 lines of Python code. In the next chapter, we will go in detail over every moving piece we just previewed, and clarify what is really \ngoing on behind the scenes. You will learn about \"tensors\", the data-storing objects going into the network, about tensor operations, which \nlayers are made of, and about gradient descent, which allows our network to learn from its training examples.",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
c527e07d3d84472d2bb0fde35de39c7410042990
| 332,907 |
ipynb
|
Jupyter Notebook
|
praise_to_and_from.ipynb
|
jdclifton2/praiseanalysis
|
e527e49b45e2551401a64004338e23eba2d78ea9
|
[
"MIT"
] | null | null | null |
praise_to_and_from.ipynb
|
jdclifton2/praiseanalysis
|
e527e49b45e2551401a64004338e23eba2d78ea9
|
[
"MIT"
] | null | null | null |
praise_to_and_from.ipynb
|
jdclifton2/praiseanalysis
|
e527e49b45e2551401a64004338e23eba2d78ea9
|
[
"MIT"
] | null | null | null | 74.259871 | 48,650 | 0.658232 |
[
[
[
"<a href=\"https://colab.research.google.com/github/jdclifton2/praiseanalysis/blob/main/praise_to_and_from.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# The first part of this notebook loads the data. It was done by @ygg_anderson ",
"_____no_output_____"
]
],
[
[
"import panel as pn\npn.extension()\nimport pandas as pd\nimport numpy as np\n#import hvplot.pandas\nimport param, random\nimport datetime as dt\n\nimport matplotlib.pyplot as plt\n\n%matplotlib inline",
"_____no_output_____"
],
[
"periods = [\n \"#17 May 7\",\n \"#16 Apr 24\",\n \"#15 Apr 9\",\n \"#14 Mar 26\",\n \"#13 Mar 12\",\n \"#12 Feb 26\",\n \"#11 Feb 12\",\n \"#10 Jan 29\",\n \"#9 Jan 15\", \n \"#8 Jan 1\",\n \"#7 Dec 18\",\n \"#6 Dec 4\",\n \"#5 Nov 20\", #\n \"#4 Nov 6\", #\n \"#3 Oct 23\", #\n \"#2 Oct 9\",\n \"#1 Sept 24\", #\n \"#0 Sept 7 (historic)\", #\n]",
"_____no_output_____"
],
[
"data = []\nfor i, period in enumerate(periods):\n \n if i not in (17, 16, 14, 12, 13):\n \n df = pd.read_excel('data/TEC Praise Quantification.xlsx', skiprows=2, sheet_name=period,engine='openpyxl', usecols=\"A:M\")\n\n df[['v1','v2','v3']] = list(df.columns[6:9])\n\n df.columns = list(df.columns[:6]) + ['v1 norm', 'v2 norm', 'v3 norm'] + list(df.columns[9:])\n\n df['period'] = period\n\n df = df.dropna(thresh=8)\n\n data.append(df)",
"_____no_output_____"
],
[
"combined_data = pd.concat(data)",
"_____no_output_____"
],
[
"combined_data",
"_____no_output_____"
]
],
[
[
"quantifiers = combined_data[combined_data[['IH per Praise', 'IH per person', 'Unnamed: 12']].isna().all(axis=1)]\nquantifiers",
"_____no_output_____"
]
],
[
[
"receivers = combined_data[~combined_data[['IH per Praise', 'IH per person', 'Unnamed: 12']].isna().all(axis=1)]\nreceivers",
"_____no_output_____"
]
],
[
[
"---\n\n# Investigations from octopus🐙",
"_____no_output_____"
],
[
"I'm going to do some basic analysis of the parts of the data set that are encoded with words. ",
"_____no_output_____"
]
],
[
[
"import seaborn as sns #seaborn is my plotting tool of choice",
"_____no_output_____"
],
[
"receivers.columns",
"_____no_output_____"
]
],
[
[
"### Where does praise happen?",
"_____no_output_____"
]
],
[
[
"sources = receivers.groupby(\"Unnamed: 3\").count()\nsources\n\nax = sns.barplot(y = sources.index, x = sources[\"To\"], order = sources.sort_values(\"To\", ascending = False).index)\nax.set(xlabel=\"Count\", ylabel=\"Source\", title = \"Where Praise is Given\")",
"_____no_output_____"
],
[
"room_sources = receivers.groupby(\"Room\").count()\nroom_sources\n\nplt.figure(figsize=(10,8))\nax = sns.barplot(y = room_sources.index, x = room_sources[\"To\"], order = room_sources.sort_values(\"To\", ascending = False).index)\nax.set(xlabel=\"Count\", ylabel=\"Source\", title = \"Where Praise is Given\")",
"_____no_output_____"
]
],
[
[
"### Do some basic cleaning and analysis. ",
"_____no_output_____"
],
[
"Now I'm going to create a new data frame that incorporates how many times each user gave and received praise. The next few cells accomplish this by using pivot_tables to create data frames which are then merged, with missing values replaced by 0.",
"_____no_output_____"
]
],
[
[
"praise_to = receivers.pivot_table(index = [\"To\"], aggfunc = 'size' ).to_frame(name = \"to\")\npraise_to",
"_____no_output_____"
],
[
"praise_from = receivers.pivot_table(index = [\"From\"], aggfunc = 'size').to_frame(name = \"from\")\npraise_from",
"_____no_output_____"
],
[
"praise_to_and_from = pd.concat([praise_from, praise_to], axis = 1)\npraise_to_and_from = praise_to_and_from.fillna(0)\npraise_to_and_from.head(5)",
"_____no_output_____"
]
],
[
[
"### A naming issue in our data ",
"_____no_output_____"
],
[
"So now we have **praise_to_and_from** as a data frame where each row is a user, and we see how many times they gave oraise (measured in **from**) and received praise (measured in **to**). \n\nThere is a **naming** issue that should be addressed at some point. Look below. \n",
"_____no_output_____"
]
],
[
[
"zep_df = praise_to_and_from.filter(like = \"zep\", axis = 0)\nzep_df",
"_____no_output_____"
],
[
"ygg_df = praise_to_and_from.filter(like = \"ygg\", axis = 0)\nygg_df",
"_____no_output_____"
]
],
[
[
"The issue is that some users receive praise with variations on their names. It would be good to consoliate these users for this analysis. However, this is an issue that's unlikely to affect most users, so we wll come back to it. ",
"_____no_output_____"
],
[
"### Relationship Between Givers and Receivers",
"_____no_output_____"
],
[
"Let's make a scatterplot to see if we think there is a relationship betwen these two quantities. ",
"_____no_output_____"
]
],
[
[
"ax = sns.scatterplot(x = \"from\", y = \"to\", data = praise_to_and_from)\nax.set(title = \"Praise: To and From\")",
"_____no_output_____"
]
],
[
[
"There doen't apear to be much of a relationship, primarily because **so many users** have zero values of **from**; this includes many users who are frequent praise recipients but do not give much. This is partially distorted by the naming issues above. ",
"_____no_output_____"
]
],
[
[
"sns.histplot(data = praise_to_and_from, x = \"to\")",
"_____no_output_____"
],
[
"sns.histplot(data = praise_to_and_from.query(\"to > 0\"), x = \"to\")",
"_____no_output_____"
],
[
"sum(praise_to_and_from[\"to\"] <= 1)/len(praise_to_and_from)",
"_____no_output_____"
],
[
"praise_to_and_from[\"from\"] == 0",
"_____no_output_____"
],
[
"sum(praise_to_and_from[\"from\"] == 0)/len(praise_to_and_from)",
"_____no_output_____"
],
[
"praise_to_and_from[praise_to_and_from[\"from\"] > 0][\"from\"].mean()",
"_____no_output_____"
]
],
[
[
"Almost 80% of users here have not given any praise (this might be skewed *slightly* by the naming issue above, but probably not much). This is probably described by a Pareto distribution -- with 20% of the users giving 100% of the praise. Among users who gave some praise, they gave an average of roughly 90 praises.",
"_____no_output_____"
]
],
[
[
"praise_to_and_from.describe()",
"_____no_output_____"
],
[
"praise_to_and_from[\"generosity\"] = praise_to_and_from[\"from\"]/(praise_to_and_from[\"to\"] + 1)\npraise_to_and_from",
"_____no_output_____"
],
[
"praise_to_and_from.query(\"generosity > 0\").query(\"generosity < 1\")",
"_____no_output_____"
]
],
[
[
"# Investigations from courier",
"_____no_output_____"
]
],
[
[
"# Run this cell to install the pygraphviz library\n#!apt-get install python3-dev graphviz libgraphviz-dev pkg-config\n#!pip install pygraphviz",
"_____no_output_____"
],
[
"import networkx as nx\nimport pygraphviz",
"_____no_output_____"
]
],
[
[
"We will attempt to make a graph from the dataframe. \n\nFirst we will begin with a simple proof of concept. ",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv(\"recievers.csv\")\ndf.head()\ndf.columns\nG = nx.DiGraph()\nedges = df[['To','From']]\n\n\nfor ind, tup in edges[1:2].iterrows():\n to = tup[0]\n fro = tup[1]\n G.add_edge(fro, to)\n\n \nG.edges\nG = nx.nx_agraph.to_agraph(G)",
"_____no_output_____"
],
[
"G.layout(prog=\"dot\")\nG.draw(\"file.png\") ",
"_____no_output_____"
],
[
"edges[1:2]\n",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
],
[
"Now we create a function that will automate this process.",
"_____no_output_____"
]
],
[
[
"def viz_graph(df, file_name):\n \"\"\"\n Creates a visualization of a graph created from a dataset of praise. \n\n :param df: The dataframe that contains who the praise is from and who it is to.\n Note that this dataframe should ONLY contain columns To and From.\n :param file_name: The name of the image that will be saved to your computer. \n \"\"\"\n\n G = nx.DiGraph()\n\n for ind, tup in df.iterrows():\n to = tup[0]\n fro = tup[1]\n G.add_edge(fro, to)\n\n G = nx.nx_agraph.to_agraph(G)\n G.layout(prog=\"dot\")\n G.draw(file_name) \n return G",
"_____no_output_____"
],
[
"#viz_graph(edges, \"praise_graph.png\")",
"_____no_output_____"
]
],
[
[
"You can use boolean conditions to get more specific graphs. ",
"_____no_output_____"
]
],
[
[
"#viz_graph(edges[edges[\"To\"] == \"zeptimusQ\"], \"zeptimus_praise_graph.png\")",
"_____no_output_____"
],
[
"def to_user_graph(df, user, file_name):\n \"\"\"\n Creates a visualization of a graph created from a dataset of praise. \n\n :param df: The dataframe that contains who the praise is from and who it is to.\n Note that this dataframe should ONLY contain columns To and From.\n :param user: All donations to this user will be graphed. \n :param file_name: The name of the image that will be saved to your computer. \n \"\"\"\n df = pd.read_csv(\"recievers.csv\")\n G = nx.DiGraph()\n\n for ind, tup in edges[edges[\"To\"] == user].iterrows():\n to = tup[0]\n fro = tup[1]\n G.add_edge(fro, to)\n\n G = nx.nx_agraph.to_agraph(G)\n G.layout(prog=\"dot\")\n G.draw(file_name) \n return G",
"_____no_output_____"
],
[
"def from_user_graph(df, user, file_name):\n \"\"\"\n Creates a visualization of a graph created from a dataset of praise. \n\n :param df: The dataframe that contains who the praise is from and who it is to.\n Note that this dataframe should ONLY contain columns To and From.\n :param user: All donations from this user will be graphed. \n :param file_name: The name of the image that will be saved to your computer. \n \"\"\"\n df = pd.read_csv(\"recievers.csv\")\n G = nx.DiGraph()\n\n for ind, tup in edges[edges[\"To\"] == user].iterrows():\n to = tup[0]\n fro = tup[1]\n G.add_edge(to, fro)\n\n G = nx.nx_agraph.to_agraph(G)\n G.layout(prog=\"dot\")\n G.draw(file_name) \n return G",
"_____no_output_____"
]
],
[
[
"# You can utilize these 2 forms to create user graphs. The first one will create a graph of donations from the provided user and the second form will graph donations to the provided user.",
"_____no_output_____"
]
],
[
[
"df_str = \"recievers.csv\" #@param {type:\"string\"}\nuser = \"zeptimusQ\" #@param {type:\"string\"}\nfile_name = \"zeptimusQ_to_graph.png\" #@param {type:\"string\"}\nfrom_user_graph(df=df_str, user=user,file_name=file_name)",
"_____no_output_____"
],
[
"df_str = \"recievers.csv\" #@param {type:\"string\"}\nuser = \"recievers.csv\" #@param {type:\"string\"}\nfile_name = \"zeptimusQ_from_graph.png\" #@param {type:\"string\"}\nto_user_graph(df=df_str, user=user,file_name=file_name)\n",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
],
[
"df.isna().sum()",
"_____no_output_____"
]
],
[
[
"We observe many null values in the final 3 columns. We will drop these for now.",
"_____no_output_____"
]
],
[
[
"df.drop(['Cred per Praise', 'Cred per person', 'To.1'], inplace=True, axis=1)",
"_____no_output_____"
]
],
[
[
"# Date inconsistency\n\nIt appears that the format of the dates in the data is inconsistent. Some entries opt for the format Month-Day-Year while others use Year-Month-Day.",
"_____no_output_____"
]
],
[
[
"df['Date']",
"_____no_output_____"
],
[
"df[\"Date\"].tail()",
"_____no_output_____"
],
[
"df[\"Date\"].head()",
"_____no_output_____"
]
],
[
[
"We will drop any NA dates.",
"_____no_output_____"
]
],
[
[
"df[\"Date\"] = df[\"Date\"].dropna()",
"_____no_output_____"
],
[
"from dateutil import parser",
"_____no_output_____"
]
],
[
[
"First we begin by turning all of the dates in the column into strings.",
"_____no_output_____"
]
],
[
[
"df[\"Date\"] = df[\"Date\"].apply(str)",
"_____no_output_____"
]
],
[
[
"Some other dates just have the word \"dup\" instead of a date. We can get rid of these. ",
"_____no_output_____"
]
],
[
[
"df[df[\"Date\"] == \"dup\"]",
"_____no_output_____"
],
[
"df = df[df[\"Date\"] != \"dup\"]",
"_____no_output_____"
],
[
"df = df[df[\"Date\"] != \"nan\"]",
"_____no_output_____"
],
[
"def normalize_dates(date_str):\n \"\"\"\n This function takes in a string that contains a date and converts it to the \n format Year-Month-Day.\n \n :param date_str: The string containing the date. \n :return: The date in the format Year-Month-Day.\n \"\"\"\n d = parser.parse(date_str)\n return d.strftime(\"%Y-%m-%d\")",
"_____no_output_____"
]
],
[
[
"A quick sanity check to ensure that we have the correct results after this function was applied.",
"_____no_output_____"
]
],
[
[
"df[\"Date\"].apply(normalize_dates)",
"_____no_output_____"
],
[
"df[\"Date\"]",
"_____no_output_____"
],
[
"df[\"Date\"] = pd.to_datetime(df[\"Date\"])",
"_____no_output_____"
]
],
[
[
"Now we add features for the year month and day.",
"_____no_output_____"
]
],
[
[
"df[\"Year\"] = df[\"Date\"].dt.year",
"_____no_output_____"
],
[
"df[\"Month\"] = df[\"Date\"].dt.month",
"_____no_output_____"
],
[
"df[\"Day\"] = df[\"Date\"].dt.day",
"_____no_output_____"
]
],
[
[
"Now we can group the data based on these features.",
"_____no_output_____"
]
],
[
[
"days = df.groupby(['Day']).To.nunique().reset_index()",
"_____no_output_____"
],
[
"days",
"_____no_output_____"
]
],
[
[
"Consider a plot of the number of praises given broken down by day of the month. We see that the most praise is given on the 5th day of the month.",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()\nax.bar(days[\"Day\"], days[\"To\"], color='k')\nax.set_ylim(0, days[\"To\"].max()+5)\nfig.autofmt_xdate()\nplt.xlabel('Day of Month')\nplt.ylabel('Counts')\nplt.show()\n",
"_____no_output_____"
],
[
"months = df.groupby(['Month']).To.nunique().reset_index()",
"_____no_output_____"
]
],
[
[
"Consider a plot of the number of praises given broken down by the month. We see that the most praise is given in April. Interestingly, no praise at all is given between the months of June-October. ",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()\nax.bar(months[\"Month\"], months[\"To\"], color='k')\nax.set_ylim(0, months[\"To\"].max()+5)\nfig.autofmt_xdate()\nplt.xlabel('Month')\nplt.ylabel('Counts')\nplt.show()",
"_____no_output_____"
],
[
"dates = df.groupby(['Date']).To.nunique().reset_index()",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"\nfig, ax = plt.subplots()\nax.plot_date(dates[\"Date\"], dates[\"To\"], color='k', xdate=True)\nax.set_ylim(0, dates[\"To\"].max()+5)\nfig.autofmt_xdate()\nplt.xlabel('Date')\nplt.ylabel('Counts')\nplt.show()",
"_____no_output_____"
],
[
"sns.lineplot(x=\"Date\", y=\"To\", data=dates)",
"_____no_output_____"
],
[
"dates.sort_values(by=['To'], ascending=False)",
"_____no_output_____"
],
[
"df.to_csv(\"processed_praise.csv\")",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
c527e908d3711d1430a520251d20de41985809e6
| 84,493 |
ipynb
|
Jupyter Notebook
|
jupyter_notebooks/scratch/intro_cartpole.ipynb
|
doMConSwiss/easyagents
|
7306c4b73d4c30a3037ad9b242767e895e08fbb5
|
[
"MIT"
] | 40 |
2019-07-02T21:24:47.000Z
|
2021-10-05T20:58:58.000Z
|
jupyter_notebooks/scratch/intro_cartpole.ipynb
|
doMConSwiss/easyagents
|
7306c4b73d4c30a3037ad9b242767e895e08fbb5
|
[
"MIT"
] | 49 |
2019-08-09T14:11:01.000Z
|
2022-03-11T23:50:28.000Z
|
jupyter_notebooks/scratch/intro_cartpole.ipynb
|
doMConSwiss/easyagents
|
7306c4b73d4c30a3037ad9b242767e895e08fbb5
|
[
"MIT"
] | 18 |
2019-07-15T11:05:47.000Z
|
2020-10-15T20:02:59.000Z
| 343.46748 | 78,296 | 0.936184 |
[
[
[
"<a href=\"https://colab.research.google.com/github/christianhidber/easyagents/blob/master/jupyter_notebooks/intro_cartpole.ipynb\" \n target=\"_parent\">\n <img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/>\n</a>",
"_____no_output_____"
],
[
"# CartPole Gym environment with TfAgents",
"_____no_output_____"
],
[
"## Install packages (gym, tfagents, tensorflow,....)",
"_____no_output_____"
],
[
"#### suppress package warnings, prepare matplotlib, if in colab: load additional packages for rendering",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport sys\nimport warnings\nwarnings.filterwarnings('ignore')\n\nif 'google.colab' in sys.modules:\n !apt-get update >/dev/null\n !apt-get install xvfb >/dev/null\n !pip install pyvirtualdisplay >/dev/null \n \n from pyvirtualdisplay import Display\n Display(visible=0, size=(960, 720)).start() \nelse:\n # for local installation\n sys.path.append('..')",
"_____no_output_____"
]
],
[
[
"#### install easyagents",
"_____no_output_____"
]
],
[
[
"import sys\nif 'google.colab' in sys.modules:\n !pip install easyagents >/dev/null",
"_____no_output_____"
]
],
[
[
"The fc_layers argument defines the policy's neural network architecture. Here we use 3 fully connected layers\nwith 100 neurons in the first, 50 in the second and 25 in the final layer. \nBy default fc_layers=(75,75) is used.\n\nThe first argument of the train method is a list of callbacks. Through callbacks we define the plots generated during \ntraining, the logging behaviour or control training duration. \nBy passing [plot.State(), plot.Loss(), plot.Actions(), plot.Rewards()] we add in particular the State() plot, \ndepicting the last observation state of the last evaluation episode. plot.Actions() displays a histogram of the \nactions taken for each episode played during the last evaluation period. \n\nBesides num_iterations there are quite a few parameters to specify the exact training duration (e.g. \nnum_episodes_per_iteration, num_epochs_per_iteration, max_steps_per_episode,...).",
"_____no_output_____"
],
[
"## Switching the algorithm\n\nSwitching from Ppo to Dqn is easy, essentially just replace PpoAgent with DqnAgent (the evaluation may take a few\nminuites):",
"_____no_output_____"
]
],
[
[
"from easyagents.agents import DqnAgent\nfrom easyagents.callbacks import plot",
"_____no_output_____"
],
[
"%%time\n\ndqnAgent = DqnAgent('CartPole-v0', fc_layers=(100, ))\ndqnAgent.train([plot.State(), plot.Loss(), plot.Actions(), plot.Rewards()], \n num_iterations=20000, num_iterations_between_eval=1000)",
"Wall time: 4min 50s\n"
]
],
[
[
"Since Dqn by default only takes 1 step per iteration (and thus an episode spans over several iterations) we increased\nthe num_iterations parameter.",
"_____no_output_____"
],
[
"## Next: custom training, creating a movie & switching backends.\n\n* see \n [Orso on colab](https://colab.research.google.com/github/christianhidber/easyagents/blob/master/jupyter_notebooks/intro_orso.ipynb)\n (an example of a gym environment implementation based on a routing problem)\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
c527eafbecdf1b9d93c07cc3504ecee42eae7a55
| 16,566 |
ipynb
|
Jupyter Notebook
|
notebooks/tutorials/Tutorial-3_Use_homemade_dataset.ipynb
|
HeytemBou/distributed-learning-contributivity
|
99d276ac94817fcc1f1d9c294eb89a8b13206edd
|
[
"Apache-2.0"
] | 1 |
2021-04-06T13:02:12.000Z
|
2021-04-06T13:02:12.000Z
|
notebooks/tutorials/Tutorial-3_Use_homemade_dataset.ipynb
|
mshuaic/distributed-learning-contributivity
|
47419667e11f16c5846328c1e19979cb04045904
|
[
"Apache-2.0"
] | null | null | null |
notebooks/tutorials/Tutorial-3_Use_homemade_dataset.ipynb
|
mshuaic/distributed-learning-contributivity
|
47419667e11f16c5846328c1e19979cb04045904
|
[
"Apache-2.0"
] | null | null | null | 37.65 | 731 | 0.566763 |
[
[
[
"Tutorials table of content:\n\n- [Tutorial 1: Run a first scenario](./Tutorial-1_Run_your_first_scenario.ipynb)\n- [Tutorial 2: Add contributivity measurements methods](./Tutorial-2_Add_contributivity_measurement.ipynb)\n- Tutorial 3: Use a custom dataset\n\n\n# Tutorial 3 : Use homemade dataset \n\nWith this example, we dive deeper into the potential of the library, and run a scenario on a new dataset, that we will implement ",
"_____no_output_____"
],
[
"## 1 - Prerequisites\n\nIn order to run this example, you'll need to:\n\n* use python 3.7 +\n* install this package https://pypi.org/project/mplc/\n\nIf you did not follow our firsts tutorials, it is highly recommended to [take a look at it !](https://github.com/SubstraFoundation/distributed-learning-contributivity/tree/master/notebooks/examples/) \n",
"_____no_output_____"
]
],
[
[
"!pip install mplc",
"_____no_output_____"
]
],
[
[
"## 2 - Context \n\nIn collaborative data science projects partners sometimes need to train a model on multiple datasets, contributed by different data providing partners. In such cases the partners might have to measure how much each dataset involved contributed to the performance of the model. This is useful for example as a basis to agree on how to share the reward of the ML challenge or the future revenues derived from the predictive model, or to detect possible corrupted datasets or partners not playing by the rules. The library explores this question and the opportunity to implement some mechanisms helping partners in such scenarios to measure each dataset's *contributivity* (as *contribution to the performance of the model*).\n\nIn the [first tutorial](./Tutorial-1_Run_your_first_scenario.ipynb), you learned how to parametrize and run a scenario.\nIn the [second tutorial](./Tutorial-2_Add_contributivity_measurement.ipynb), you discovered how to add to your scenario run one of the contributivity measurement methods available.\nIn this third tutorial, we are going to use a custom dataset. ",
"_____no_output_____"
],
[
"### The dataset : Sentiment140\nWe are going to use a subset of the [sentiment140](http://help.sentiment140.com/for-students) dataset and try to \nclassified short film review, between positive sentiments and negative sentiments for movies. \n\n*The whole machine learning process is inspired from this [article](https://medium.com/@alyafey22/sentiment-classification-from-keras-to-the-browser-7eda0d87cdc6)*\nPlease note that the library provided a really easy way to adapt a single partner, common machine learning use case with tensorflow, to a multipartner case, with contributivity measurement. ",
"_____no_output_____"
]
],
[
[
"# imports\nimport seaborn as sns\nimport pandas as pd \nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\nimport re\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, GRU, Embedding\n\nfrom mplc.dataset import Dataset\nfrom mplc.scenario import Scenario\n\nsns.set()",
"Using TensorFlow backend.\n"
]
],
[
[
"## 3 - Generation, and preparation of the dataset\n \nThe scenario object needs a dataset object to run. In the previous tutorials, we indicate which one to generate automatically by passing a name of a pre-implemented dataset to the scenario constructor. \nHere, we will create this dataset object and pass it to the scenario constructor. To do so, we are going to create a new class, which inherit from the mplc.Dataset abstract class.\n\nA sub-class of Dataset needs few attribute and method. First, the constructor of the Dataset object needs few arguments.\n### Dataset generator :\n\nThe structure of the dataset generator is represented below:\n\n```python\ndataset = Dataset(\n \"name\",\n x_train,\n x_test,\n y_train,\n y_test,\n input_shape,\n num_classes,\n)\n```\n#### Data labels\nThe data labels can take whatever shape you need, with only one condition. \nThe labels need to be convertible into string format, and with respect to the condition that if label1 is equal to label2 (\nreciprocally different from), therefore str(label1) must be equal to str(label2) (reciprocally different from)\n#### Model generator\nThis method needs to be implemented, and provides the model use, which will be trained by the `Scenario` object.\nNote: It is mandatory to have loss and accuracy as metrics for your model.\n\n#### Train/validation/test splits\n\nThe `Dataset` constructor (called via `super()`) must be provided some separated train and test sets (referred to as global train set and global test set).\nThe global train set is then further split into a global train set and a global validation set, by the function `train_val_split_global`. Please denote that if this function is not overwritten, the sklearn's `train_test_split` function will be called by default, and 10% of the training set will be use as validation set.\nIn the multi-partner learning computations, the global validation set is used for early stopping and the global test set is used for performance evaluation.\nThe global train set is then split amongst partners (according to the scenario configuration) to populate the partner's local datasets.\nFor each partner, the local dataset will be split into separated train, validation and test sets, using the `train_test_split_local` and `train_val_split_local` methods.\nThese are not mandatory, by default the local dataset will not be split. \nDenote that currently, the local validation and test set are not used, but they are available for further developments of multi-partner learning and contributivity measurement approaches.\n\n### Dataset construction\nNow that we know all of that, we can create our dataset class.\n#### Download and unzip data if needed",
"_____no_output_____"
]
],
[
[
"!curl https://cs.stanford.edu/people/alecmgo/trainingandtestdata.zip --output trainingandtestdata.zip\n!unzip trainingandtestdata.zip",
" % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n100 77.5M 100 77.5M 0 0 15.0M 0 0:00:05 0:00:05 --:--:-- 15.9M\nArchive: trainingandtestdata.zip\n inflating: testdata.manual.2009.06.14.csv \n inflating: training.1600000.processed.noemoticon.csv \n"
]
],
[
[
"#### Define our Dataset class",
"_____no_output_____"
]
],
[
[
"class Sentiment140(Dataset):\n def __init__(self):\n\n x, y = self.load_data()\n self.max_tokens = self.getMax(x)\n\n self.num_words = None\n self.word_index = self.tokenize()\n self.num_words = len(self.word_index)\n\n x = self.create_sequences(x)\n y = self.preprocess_dataset_labels(y)\n\n self.input_shape = self.max_tokens\n self.num_classes = len(np.unique(y))\n\n\n print('length of the dictionary ',len(self.word_index))\n print('max token ', self.max_tokens)\n print('num classes', self.num_classes)\n\n (x_train, x_test) = train_test_split(x, shuffle = False)\n (y_train, y_test) = train_test_split(y, shuffle = False)\n\n super(Sentiment140, self).__init__(dataset_name='sentiment140',\n num_classes=self.num_classes,\n input_shape=self.input_shape,\n x_train=x_train,\n y_train=y_train,\n x_test=x_test,\n y_test=y_test)\n\n @staticmethod\n def load_data(): # load the data, transform the .csv into usable dataframe\n\n df_train = pd.read_csv(\"training.1600000.processed.noemoticon.csv\", encoding = \"raw_unicode_escape\", header=None)\n df_test = pd.read_csv(\"testdata.manual.2009.06.14.csv\", encoding = \"raw_unicode_escape\", header=None)\n\n df_train.columns = [\"polarity\", \"id\", \"date\", \"query\", \"user\", \"text\"]\n df_test.columns = [\"polarity\", \"id\", \"date\", \"query\", \"user\", \"text\"]\n\n # We keep only a fraction of the whole dataset\n\n df_train = df_train.sample(frac = 0.1)\n\n x = df_train[\"text\"]\n y = df_train[\"polarity\"]\n\n return x, y\n\n # Preprocessing methods\n @staticmethod\n def process( txt):\n out = re.sub(r'[^a-zA-Z0-9\\s]', '', txt)\n out = out.split()\n out = [word.lower() for word in out]\n return out\n\n @staticmethod\n def getMax( data):\n max_tokens = 0\n for txt in data:\n if max_tokens < len(txt.split()):\n max_tokens = len(txt.split())\n return max_tokens\n\n\n def tokenize(self, thresh = 5):\n count = dict()\n idx = 1\n word_index = dict()\n for txt in x:\n words = self.process(txt)\n for word in words:\n if word in count.keys():\n count[word] += 1\n else:\n count[word] = 1\n most_counts = [word for word in count.keys() if count[word]>=thresh]\n for word in most_counts:\n word_index[word] = idx\n idx+=1\n return word_index\n\n\n def create_sequences(self,data):\n tokens = []\n for txt in data:\n words = self.process(txt)\n seq = [0] * self.max_tokens\n i = 0\n for word in words:\n start = self.max_tokens-len(words)\n if word.lower() in self.word_index.keys():\n seq[i+start] = self.word_index[word]\n i+=1\n tokens.append(seq)\n return np.array(tokens)\n\n @staticmethod\n def preprocess_dataset_labels( label):\n label = np.array([e/4 for e in label])\n return label\n\n\n def generate_new_model(self): # Define the model generator\n model = Sequential()\n embedding_size = 8\n model.add(Embedding(input_dim=self.num_words,\n output_dim=embedding_size,\n input_length=self.max_tokens,\n name='layer_embedding'))\n\n model.add(GRU(units=16, name = \"gru_1\",return_sequences=True))\n model.add(GRU(units=8, name = \"gru_2\" ,return_sequences=True))\n model.add(GRU(units=4, name= \"gru_3\"))\n model.add(Dense(1, activation='sigmoid',name=\"dense_1\"))\n model.compile(loss='binary_crossentropy',\n optimizer=\"Adam\",\n metrics=['accuracy'])\n return model\n",
"_____no_output_____"
]
],
[
[
"#### Create dataset\n\nAnd we can eventually generate our object!",
"_____no_output_____"
]
],
[
[
"my_dataset = Sentiment140()",
"_____no_output_____"
],
[
"## 4 - Create the custom scenario\nThe dataset can be passed to the scenario, through the `dataset` argument.",
"_____no_output_____"
]
],
[
[
"# That's it!\n\nNow you can explore our other tutorials for a better overview of what can be done with `mplc`!\n\nThis work is collaborative, enthusiasts are welcome to comment open issues and PRs or open new ones.\n\nShould you be interested in this open effort and would like to share any question, suggestion or input, you can use the following channels:\n\n- This Github repository (issues or PRs)\n- Substra Foundation's [Slack workspace](https://substra-workspace.slack.com/join/shared_invite/zt-cpyedcab-FHYgpy08efKJ2FCadE2yCA), channel `#workgroup-mpl-contributivity`\n- Email: [email protected]\n\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
c527ffacd7bea5ccd38f39d066696a039f790029
| 26,002 |
ipynb
|
Jupyter Notebook
|
notebooks/ClassesObjects.ipynb
|
UBprojects/eas503-ub
|
94f7a207c0c50629f8eefa475a0eb5c6cd3b4069
|
[
"MIT"
] | 1 |
2021-03-07T06:37:00.000Z
|
2021-03-07T06:37:00.000Z
|
notebooks/ClassesObjects.ipynb
|
UBprojects/eas503-ub
|
94f7a207c0c50629f8eefa475a0eb5c6cd3b4069
|
[
"MIT"
] | null | null | null |
notebooks/ClassesObjects.ipynb
|
UBprojects/eas503-ub
|
94f7a207c0c50629f8eefa475a0eb5c6cd3b4069
|
[
"MIT"
] | 1 |
2022-03-15T04:22:56.000Z
|
2022-03-15T04:22:56.000Z
| 23.876951 | 889 | 0.500538 |
[
[
[
" # Programming and Database Fundamentals for Data Scientists - EAS503",
"_____no_output_____"
],
[
"Python classes and objects.\n\nIn this notebook we will discuss the notion of classes and objects, which are a fundamental concept. Using the keyword `class`, one can define a class.\n\nBefore learning about how to define classes, we will first understand the need for defining classes.",
"_____no_output_____"
],
[
"### A Simple Banking Application\nRead data from `csv` files containing customer and account information and find all customers with more than \\$25,000 in their bank account, and send a letter to them with some scheme (find their address).",
"_____no_output_____"
]
],
[
[
"# Logical design\nimport csv\n# load customer information\ncustomerMap = {}\nwith open('customers.csv','r') as f:\n rd = csv.reader(f)\n next(rd)\n for row in rd:\n customerMap[int(row[0])] = (row[1],row[2])\n# load account information\naccountsMap = {}\nwith open('accounts.csv','r') as f:\n rd = csv.reader(f)\n next(rd)\n for row in rd:\n if int(row[1]) not in accountsMap.keys():\n accountsMap[int(row[1])] = []\n l = accountsMap[int(row[1])]\n l.append(int(row[2]))\n accountsMap[int(row[1])] = l",
"_____no_output_____"
],
[
"customerMap",
"_____no_output_____"
],
[
"accountsMap",
"_____no_output_____"
],
[
"for k in accountsMap.keys():\n if sum(accountsMap[k]) > 25000:\n print(customerMap[k])",
"('Jane', '123 Main Street')\n('Alice', '111 Central Ave')\n"
],
[
"# OOD\nclass Customer:\n def __init__(self, customerid, name, address):\n self.__name = name\n self.__customerid = customerid\n self.__address = address\n self.__accounts = []\n \n def add_account(self,account):\n self.__accounts.append(account)\n\n\n def get_total(self):\n s = 0\n for a in self.__accounts:\n s = s + a.get_amount()\n return s\n \n def get_name(self):\n return self.__name\n\n\nclass Account:\n def __init__(self,accounttype,amount):\n self.__accounttype = accounttype\n self.__amount = amount\n \n def get_amount(self):\n return self.__amount\n \n ",
"_____no_output_____"
],
[
"import csv\ncustomers = {}\nwith open('./customers.csv') as f:\n reader = csv.reader(f)\n next(reader)\n for row in reader:\n customer = Customer(row[0],row[1],row[2])\n customers[row[0]] = customer\n\nwith open('./accounts.csv') as f:\n reader = csv.reader(f)\n next(reader)\n for row in reader:\n customerid = row[1]\n account = Account(row[0],int(row[2]))\n customers[customerid].add_account(account)",
"_____no_output_____"
],
[
"for c in customers.keys():\n if customers[c].get_total() > 25000:\n print(customers[c].get_name())",
"Jane\nAlice\n"
]
],
[
[
"## Defining Classes\nMore details about `class` definition",
"_____no_output_____"
]
],
[
[
"# this class has no __init__ function\nclass myclass:\n def mymethod_myclass(self):\n print(\"hey\")",
"_____no_output_____"
],
[
"myobj = myclass()\nmyobj.mymethod_myclass()",
"hey\n"
],
[
"# this class has no __init__ function\nclass myclass:\n # we define a field \n __classtype='My Class'\n def mymethod(self):\n print(\"This is \"+self.__classtype)\n \n def getClasstype(self):\n return self.__classtype",
"_____no_output_____"
],
[
"# making fields private\nmyobj = myclass()\nmyobj.mymethod()\nprint(myobj.getClasstype())",
"This is My Class\nMy Class\n"
],
[
"myobj = myclass()\nmyobj.mymethod()",
"This is My Class\n"
],
[
"# this class has not __init__ function\nclass myclass:\n # we define a global field \n classtype='My Class'\n def mymethod(self):\n print(\"this is a method\")\n self.a = 'g'\n #print(\"This is \"+self.classtype) # note that we are explicitly referencing the field of the class\n \n def mymethod2(self):\n print(\"This is\"+self.classtype)\n print(self.a)",
"_____no_output_____"
],
[
"m = myclass()\nm.mymethod()",
"this is a method\n"
],
[
"type(m)",
"_____no_output_____"
],
[
"myobj = myclass()\nmyobj.mymethod()\nmyobj.mymethod2()",
"this is a method\nThis isMy Class\ng\n"
]
],
[
[
"#### Issues with defining fields outside the `__init__` function\nIf global field is mutable",
"_____no_output_____"
]
],
[
[
"# this class has not __init__ function\nclass myclass:\n # we define a field \n version='1.0.1'\n classtypes=['int']\n def __init__(self):\n self.a = ['m']\n def mymethod(self):\n print(self.classtypes) # note that we are explicitly referencing the field of the class\n print(self.a)\n def mystaticmethod():\n print('This class is open source')",
"_____no_output_____"
],
[
"myobj1 = myclass()\nmyobj2 = myclass()\n\nmyobj1.mymethod()\nmyobj2.mymethod()",
"['int']\n['m']\n['int']\n['m']\n"
],
[
"\nmyobj1.classtypes.append('float')\nmyobj1.a.append('n')\nmyobj1.mymethod()\n",
"['int', 'float']\n['m', 'n']\n"
],
[
"myobj2.mymethod()",
"['int', 'float']\n['m']\n"
]
],
[
[
"#### How to avoid the above issue?\nDefine mutable fields within `__init__`",
"_____no_output_____"
]
],
[
[
"# this class has an __init__ function\nclass myclass:\n def __init__(self):\n # we define a field \n self.classtypes=['int']\n def mymethod(self):\n print(self.classtypes) # note that we are explicitly referencing the field of the class",
"_____no_output_____"
],
[
"myobj1 = myclass()\nmyobj2 = myclass()\n\nmyobj1.mymethod()\nmyobj2.mymethod()\n\nmyobj1.classtypes.append('float')\n\nmyobj1.mymethod()\nmyobj2.mymethod()",
"['int']\n['int']\n['int', 'float']\n['int']\n"
],
[
"# you can directly access the field\nmyobj1.mymethod()\n",
"_____no_output_____"
]
],
[
[
"#### Hide fields from external use",
"_____no_output_____"
]
],
[
[
"class account:\n def __init__(self,u,p):\n self.username = u\n self.password = p\nact = account('chandola','chandola')\nprint(act.password)",
"chandola\n"
],
[
"class account:\n def __init__(self,u,p):\n self.__username = u\n self.__password = p\n \n def getUsername(self):\n return self.__username\n \n def checkPassword(self,p):\n if p == self.__getPassword():\n return True\n else:\n return False\n def __getPassword(self):\n return self.__password\n \nact = account('chandola','chandola')\nprint(act.getUsername())\nprint(act.checkPassword('chandola'))\nprint(act.__getPassword())",
"chandola\nTrue\n"
],
[
"# this class has an __init__ function\nclass myclass:\n def __init__(self):\n # we define a field \n self.__classtypes=['int']",
"_____no_output_____"
],
[
"myobj1 = myclass()\nmyobj1.__classtypes",
"_____no_output_____"
],
[
"# the private field will be accessible to the class methods\nclass myclass:\n def __init__(self):\n # we define a field \n self.__classtypes=['int']\n \n def appendType(self,newtype):\n self.__classtypes.append(newtype)",
"_____no_output_____"
],
[
"myobj1 = myclass()\nmyobj1.appendType('float')",
"_____no_output_____"
],
[
"# still cannot access the field outside\nmyobj1.__classtypes",
"_____no_output_____"
],
[
"# solution -- create a getter method\nclass myclass:\n def __init__(self):\n # we define a field \n self.__classtypes=['int']\n \n def appendType(self,newtype):\n self.__classtypes.append(newtype)\n \n def getClasstypes(self):\n return self.__classtypes",
"_____no_output_____"
],
[
"myobj1 = myclass()\nmyobj1.appendType('float')\nmyobj1.getClasstypes()",
"_____no_output_____"
],
[
"print(['s','g','h'])",
"['s', 'g', 'h']\n"
]
],
[
[
"One can create `getter` and `setter` methods to manipulate fields. While the name of the methods can be arbitrary, a good programming practice is to use get`FieldNameWithoutUnderscores()` and set`FieldNameWithoutUnderscores()`",
"_____no_output_____"
],
[
"## Inheritance in Python\nAbility to define subclasses. \n\nLet us assume that we want to have defined a class called `Employee` that has some information about a bank employee and some supporting methods.",
"_____no_output_____"
]
],
[
[
"class Employee:\n def __init__(self,firstname,lastname,empid):\n self.__firstname = firstname\n self.__lastname = lastname\n self.__empid = empid\n \n # following is a special function used by the Python in-built print() function\n def __str__(self):\n return \"Employee name is \"+self.__firstname+\" \"+self.__lastname\n \n def checkid(self,inputid):\n if inputid == self.__empid:\n return True\n else:\n return False\n \n def getfirstname(self):\n return self.__firstname\n \n def getlastname(self):\n return self.__lastname\n ",
"_____no_output_____"
],
[
"emp1 = Employee(\"Homer\",\"Simpson\",777)\nprint(emp1)",
"Employee name is Homer Simpson\n"
],
[
"print(emp1.checkid(777))",
"True\n"
]
],
[
[
"Now we want to create a new class called `Manager` which retains some properties of an `Employee` buts add some more",
"_____no_output_____"
]
],
[
[
"class Manager(Employee):\n def __init__(self,firstname,lastname,empid):\n super().__init__(firstname,lastname,empid)",
"_____no_output_____"
],
[
"mng1 = Manager(\"Charles\",\"Burns\",666)\nprint(mng1)",
"Employee name is Charles Burns\n"
]
],
[
[
"But we want to add extra fields and set them in the constructor",
"_____no_output_____"
]
],
[
[
"class Manager(Employee):\n def __init__(self,firstname,lastname,empid,managerid):\n super().__init__(firstname,lastname,empid)\n self.__managerid = managerid\n \n def checkmanagerid(self,inputid):\n if inputid == self.__managerid:\n return True\n else:\n return False",
"_____no_output_____"
],
[
"mng1 = Manager(\"Charles\",\"Burns\",666,111)\nprint(mng1)",
"Employee name is Charles Burns\n"
],
[
"mng1.checkid(666)",
"_____no_output_____"
],
[
"mng1.checkmanagerid(111)",
"_____no_output_____"
]
],
[
[
"You can modify methods of base classes",
"_____no_output_____"
]
],
[
[
"class Manager(Employee):\n def __init__(self,firstname,lastname,empid,managerid):\n super().__init__(firstname,lastname,empid)\n self.__managerid = managerid\n \n def checkmanagerid(self,inputid):\n if inputid == self.__managerid:\n return True\n else:\n return False\n \n def __str__(self):\n # why will the first line not work and the second one will\n #return \"Manager name is \"+self.__firstname+\" \"+self.__lastname\n return \"Manager name is \"+self.getfirstname()+\" \"+self.getlastname()",
"_____no_output_____"
],
[
"mng1 = Manager(\"Charles\",\"Burns\",666,111)\nprint(mng1)",
"Manager name is Charles Burns\n"
]
],
[
[
"**Remember** - Derived classes cannot access private fields of the base class directly\n\n### Inheriting from multiple classes\nConsider a scenario where you have additional class, `Citizen`, that has other information about a person. Can we create a derived class that inherits properties of both `Employee` and `Citizen` class?",
"_____no_output_____"
]
],
[
[
"class Citizen:\n def __init__(self,ssn,homeaddress):\n self.__ssn = ssn\n self.__homeaddress = homeaddress\n \n def __str__(self):\n return \"Person located at \"+self.__homeaddress\n ",
"_____no_output_____"
],
[
"ctz1 = Citizen(\"123-45-6789\",\"742 Evergreen Terrace\")\nprint(ctz1)",
"Person located at 742 Evergreen Terrace\n"
],
[
"# it is easy\nclass Manager2(Employee,Citizen):\n def __init__(self,firstname,lastname,empid,managerid,ssn,homeaddress):\n Citizen.__init__(self,ssn,homeaddress)\n Employee.__init__(self,firstname,lastname,empid)\n self.__managerid = managerid\n \n def __str__(self):\n return \"Manager name is \"+Employee.getfirstname(self)+\" \"+Employee.getlastname(self)+\", \"+Citizen.__str__(self)",
"_____no_output_____"
],
[
"mgr2 = Manager2(\"Charles\",\"Burns\",666,111,\"123-45-6789\",\"742 Evergreen Terrace\")\nprint(mgr2)",
"Manager name is Charles Burns, Person located at 742 Evergreen Terrace\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
c52801724cae25507c992a8f7fa178ea0226f2c5
| 405,703 |
ipynb
|
Jupyter Notebook
|
Stochastic Population Genetics Basic Models.ipynb
|
Beatris/biomath_models
|
7c68d112aefa5d14917d110a7323098b271cec03
|
[
"MIT"
] | null | null | null |
Stochastic Population Genetics Basic Models.ipynb
|
Beatris/biomath_models
|
7c68d112aefa5d14917d110a7323098b271cec03
|
[
"MIT"
] | null | null | null |
Stochastic Population Genetics Basic Models.ipynb
|
Beatris/biomath_models
|
7c68d112aefa5d14917d110a7323098b271cec03
|
[
"MIT"
] | null | null | null | 544.567785 | 59,948 | 0.932542 |
[
[
[
"# Случайни ефекти върху генетичната структура на популации\n\n\n***\n\nИзследвани са основни популационни модели върху генетичната структура на популации, както са представени в курса във ФМИ [\"Въведение в изчислителната биология\", доц. П. Рашков (ИМИ-БАН)](http://www.math.bas.bg/nummeth/rashkov/teaching/), 2018-2019г\n***",
"_____no_output_____"
],
[
"## Стохастичен модел за популация с два фенотипа\n\nРазглеждаме популация от белокрили и чернокрили молци.\n\nЦветът на крилата на индивид от популацията се определя от локус с два алела. Означаваме ги с **W** и **w**.\n\nКогато локусът е:\n- **WW** или **Ww** - молецът е белокрил\n- **ww** - молецът е чернокрил\n\n#### Закон на Харди-Вайнберг\n\n>В идеална популация съотношението между алелите остава непроменено с течение на поколенията.\n\nПод идеална се имат предвид следните условия (за един локус с 2 алела):\n- Поколенията са неприпокриващи се\n- Чифтосването между индивидите е на случаен принцип\n- Всички са еднакво способни да оцелеят\n- Няма мутации\n- Честотата на даден алел е еднаква за цялата популация\n\n#### Подобрен модел\n\nНека бележим с $\\alpha$ делът на белокрилите молци (WW или Ww), които оцеляват. А, с $\\gamma$ делът на чернокрилите (ww), които оцеляват.\n\nОчевидно, ако $\\alpha > \\gamma$, белокрилите имат предимство. Обратно, ако $\\gamma > \\alpha$ чернокрилите са повече.\n\nНека $p_n = \\frac{\\text{бр. W-алели}}{\\text{бр. всички алели}}$\n\nЗа да изследваме генотипа на бъдещите поколения, ще използваме [квадрата на Пънет](https://en.wikipedia.org/wiki/Punnett_square):\n\n\n<table style=\"width: 50%; text-align: center !important;\">\n <tbody>\n <tr>\n <td></td>\n <td></td>\n <th colspan=2>Женски</th>\n </tr>\n <tr>\n <td></td>\n <td></td>\n <th>W</th>\n <th>w</th>\n </tr>\n <tr>\n <th rowspan=2>Мъжки</th>\n <th>W</th>\n <td>$\\alpha p_n^2$</td>\n <td>$\\alpha p_n (1 - p_n)$</td>\n </tr>\n <tr>\n <th>w</th>\n <td>$\\alpha p_n (1 - p_n)$</td>\n <td>$\\gamma (1 - p_n)^2$</td>\n </tr>\n </tbody>\n</table>\n\nОт тук следва, че вероятностите за генотипа на потомството са:\n- **WW**: $\\alpha p_n^2$\n- **Ww**: $2 \\alpha p_n (1 - p_n)$\n- **ww**: $\\gamma (1 - p_n)^2$\n\nТогава за $p_{n+1}$ получаваме:\n\\begin{equation}\np_{n+1} = \\frac{\\alpha p_n^2 + 2 \\alpha p_n (1 - p_n) \\frac{1}{2}}{\\alpha p_n^2 + 2 \\alpha p_n (1 - p_n) + \\gamma (1 - p_n)^2} \\Leftrightarrow p_{n+1} = \\frac{\\alpha p_n}{p_n^2(\\gamma - \\alpha) - 2 p_n (\\gamma - \\alpha) + \\gamma}\n\\end{equation}\n\n- Ако $\\alpha = \\gamma$ => Закон на Харди-Вайнберг (съотношението между алелите остава непроменено)\n- Ако $\\alpha \\neq \\gamma$, търсим равновесни точки. Можем да ги видим и на графиката:",
"_____no_output_____"
]
],
[
[
"import math\nimport matplotlib.pyplot as plt\n\nfrom numpy import *",
"_____no_output_____"
],
[
"def f(pn, alpha=0.5, gamma=0.7):\n return (alpha * pn) / (pn**2 * (gamma - alpha) - 2 * pn * (gamma - alpha) + gamma)",
"_____no_output_____"
],
[
"def show_balance_points(initial, **kwargs):\n previous = initial\n for i in range(50):\n current = f(previous, **kwargs)\n plt.plot(previous, current, 'bo')\n previous = current\n\n plt.xlabel(\"p_n\")\n plt.ylabel(\"p_n+1\")\n plt.show()",
"_____no_output_____"
]
],
[
[
"Когато $\\alpha > \\gamma$, $p_n \\rightarrow 1$ и следователно белокрилите имат предимство:",
"_____no_output_____"
]
],
[
[
"show_balance_points(0.3, alpha=0.7, gamma=0.3)",
"_____no_output_____"
]
],
[
[
"Когато $\\alpha > \\gamma$, $p_n \\rightarrow 0$ и следователно чернокрилите имат предимство:",
"_____no_output_____"
]
],
[
[
"show_balance_points(0.3, alpha=0.4, gamma=0.6)",
"_____no_output_____"
]
],
[
[
"## Стохастични модели за генетичния дрейф\n\n\n> **Генетичният дрейф** представлява промяна в честотата на гените (алелите), която е случайна и няма приспособителен\nхарактер.\n\n>Генетичната структура на природните популации е **силно зависими от случайния генетичен дрейф**, тъй като случайните\nефекти могат:\n>- да унищожат генетичното разнообразие, изградено чрез мутация\n>- да противодействат на естествения отбор,\n>- да изградят статистически асоциации между различни локуси в генома\n\n>Затова изясняването на повечето въпроси в еволюционната биология изисква да се вземат под внимание случайните ефекти.\n\n>Значението на случайните ефекти **зависи от много параметри на модела** (размера на популацията или скоростта на мутация).\n\n>В това упражнение ще изведем прости случайни модели, за да разграничим ролята различните фактори и за да получим по-ясна представа за генетичния дрейф.",
"_____no_output_____"
],
[
"### Основен модел\n\nВ основния модел разглеждаме само един локус с алели A и a.\n\nДвата алела имат честоти fA и fa = 1 − fA .\n\nЗа простота предполагаме, че поколенията са дискретни и във всяко има мутация и естествен отбор.\nПо принцип трябва да симулираме мутацията и естествения отбор като случайни процеси.\n\nЗа да опростим модела, разглеждаме приближение, в което:\n1. мутацията и отборът са едновременно детерминистични,\n2. крайният размер на популацията се определя чрез вземане„\n\nна извадка от N генома (N е размерът на популацията) след като са се случили мутацията и естественият отбор.",
"_____no_output_____"
]
],
[
[
"def model(population_size=1000, mutation_rate=0.0001, selection_strenght=0, generations=5000, initial_frequency=0.1, sampling=True):\n frequencies = []\n previous_frequency = initial_frequency\n for i in range(1, generations):\n # Mutation:\n frequency = mutation_rate * (1 - previous_frequency) + (1 - mutation_rate) * previous_frequency # m(1 − fA) + (1 − m)fA\n # Selection:\n average_adaptability = (1 - selection_strenght) * frequency + (1 - frequency)\n frequency = frequency * (1 - selection_strenght) / average_adaptability\n # Sampling:\n if sampling:\n frequency = random.binomial(population_size, frequency) / population_size\n frequencies.append(frequency)\n previous_frequency = frequency\n\n plt.plot(frequencies)\n plt.show()",
"_____no_output_____"
],
[
"model(population_size=1000, generations=5000, initial_frequency=0.1)",
"_____no_output_____"
],
[
"model(population_size=10000, generations=5000, initial_frequency=0.1)",
"_____no_output_____"
],
[
"model(population_size=10000000, generations=5000, initial_frequency=0.1)",
"_____no_output_____"
],
[
"model(population_size=10000, generations=5000, initial_frequency=0.1)",
"_____no_output_____"
],
[
"model(population_size=10000, generations=5000, initial_frequency=0.1, sampling=False)",
"_____no_output_____"
],
[
"model(population_size=10000, generations=500, initial_frequency=0.1, sampling=False)",
"_____no_output_____"
],
[
"model(population_size=10000, generations=5000, initial_frequency=0.6, sampling=False)",
"_____no_output_____"
],
[
"model(population_size=10000, generations=5000, initial_frequency=0.6, sampling=True)",
"_____no_output_____"
],
[
"model(population_size=10000, generations=5000, initial_frequency=0.5, sampling=False)",
"_____no_output_____"
],
[
"model(population_size=10000, generations=5000, initial_frequency=0.5, sampling=True)",
"_____no_output_____"
],
[
"model(population_size=10000, generations=5000, initial_frequency=0.1, sampling=False, mutation_rate=0.03)",
"_____no_output_____"
],
[
"model(population_size=10000, generations=5000, initial_frequency=0.1, sampling=True, mutation_rate=0.03)",
"_____no_output_____"
],
[
"model(population_size=10000000, generations=5000, initial_frequency=0.1, sampling=True, mutation_rate=0.03)",
"_____no_output_____"
],
[
"model(population_size=10000, generations=5000, initial_frequency=0.1, mutation_rate=0.03, selection_strenght=0.09)",
"_____no_output_____"
],
[
"model(population_size=10000, generations=5000, initial_frequency=0.1, mutation_rate=0.01, selection_strenght=0.01, sampling=False)",
"_____no_output_____"
],
[
"model(population_size=10000, generations=5000, initial_frequency=0.1, mutation_rate=0.01, selection_strenght=0.09, sampling=False)",
"_____no_output_____"
]
],
[
[
"### Разширен модел (кръстосване)",
"_____no_output_____"
]
],
[
[
"DEFAULT_FREQUENCIES = {'ab': 0.1, 'Ab': 0.25, 'aB': 0.25, 'AB': 0.4}\n\ndef _opposite_allel(allel):\n if ord(allel) < 97:\n return allel.lower()\n return allel.upper()\n\n\ndef _different_allel(locus, diff_index):\n new_locus = ''\n for i, allel in enumerate(locus):\n if (i + 1) in diff_index:\n new_locus += _opposite_allel(allel)\n else:\n new_locus += allel\n return new_locus\n\n\ndef _mutate(frequencies, m): # m - mutation_rate\n new_frequencies = dict()\n for locus, freq in frequencies.items():\n new_frequencies[locus] = ((1 - m)**2)*freq +\\\n (1 - m) * m * (frequencies[_different_allel(locus, (1,))] + frequencies[_different_allel(locus, (2,))]) +\\\n (m**2)*frequencies[_different_allel(locus, (1,2))]\n return new_frequencies\n\n\ndef _select(f, s): # f - frequencies, s - selection strengths (dicts per locus type)\n new_frequencies = dict()\n average_adaptability = sum([s[locus] * f[locus] for locus in f])\n for locus in f:\n new_frequencies[locus] = f[locus] * s[locus] / average_adaptability\n return new_frequencies\n\n\ndef _recombinate(f, r): # f - frequencies, r - recombination rate\n D = f['ab'] * f['AB'] - f['Ab'] * f['aB'] # коефициентът на неравновесие на свързване на гените\n new_frequencies = dict()\n new_frequencies['ab'] = f['ab'] - r * D\n new_frequencies['AB'] = f['AB'] - r * D\n new_frequencies['Ab'] = f['Ab'] + r * D\n new_frequencies['aB'] = f['aB'] + r * D\n return new_frequencies\n\n\ndef model_two_loci(\n population_size=1000, mutation_rate=0.0001, selection_strengths=None,\n generations=5000, initial_frequencies=None, recombination_rate=0.1):\n initial_frequencies = initial_frequencies or DEFAULT_FREQUENCIES\n assert sum(list(initial_frequencies.values())) == 1.0, \"All frequencies must equal 1.0\"\n frequencies_values = list()\n previous_frequencies = initial_frequencies\n for i in range(1, generations):\n # Mutation:\n frequencies = _mutate(previous_frequencies, mutation_rate)\n # Selection:\n frequencies = _select(frequencies, selection_strengths)\n # Recombination:\n frequencies = _recombinate(frequencies, recombination_rate)\n # Sampling:\n freqs = sorted(frequencies.items())\n freqs_values = [x[1] for x in freqs]\n freqs_values = [x / population_size for x in random.multinomial(population_size, freqs_values)]\n frequencies_values.append(freqs_values)\n frequencies = dict(zip([x[0] for x in freqs], freqs_values))\n previous_frequencies = frequencies\n\n graph = plt.plot(frequencies_values)\n first_legend = plt.legend(graph, sorted(previous_frequencies))\n plt.ylabel(\"frequency\")\n plt.xlabel(\"generation\")\n plt.rcParams[\"figure.figsize\"] = 10, 8\n plt.show()",
"_____no_output_____"
],
[
"model_two_loci(\n initial_frequencies={'ab': 1, 'Ab': 0, 'aB': 0, 'AB': 0},\n selection_strengths={'ab': 1, 'Ab': 1.01, 'aB': 1.01, 'AB': 1.02},\n population_size=10000, generations=1000, recombination_rate=0.1)",
"_____no_output_____"
],
[
"model_two_loci(\n initial_frequencies={'ab': 1, 'Ab': 0, 'aB': 0, 'AB': 0},\n selection_strengths={'ab': 1, 'Ab': 1.01, 'aB': 1.01, 'AB': 1.02},\n population_size=10000, generations=1000, recombination_rate=0.4)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
c52832bc0e25076230e2028bb043c7800e4b396c
| 96,071 |
ipynb
|
Jupyter Notebook
|
notebooks/experiment_stash/twint_data_collection.ipynb
|
puevigreven/TweetFluencers
|
f726e6506428774d8bf1c2cd3d31f2f94386b5e5
|
[
"MIT"
] | null | null | null |
notebooks/experiment_stash/twint_data_collection.ipynb
|
puevigreven/TweetFluencers
|
f726e6506428774d8bf1c2cd3d31f2f94386b5e5
|
[
"MIT"
] | 4 |
2021-06-08T22:07:31.000Z
|
2022-03-12T00:43:11.000Z
|
notebooks/experiment_stash/twint_data_collection.ipynb
|
puevigreven/TweetFluencers
|
f726e6506428774d8bf1c2cd3d31f2f94386b5e5
|
[
"MIT"
] | null | null | null | 30.431105 | 643 | 0.485745 |
[
[
[
"import twint\nimport os\nimport nest_asyncio\nnest_asyncio.apply()\nimport logging\nimport pandas as pd\n# logging.basicConfig(filename='twint_data_collection.log',level=logging.DEBUG)\n# logging.debug('This message should go to the log file')\n# logging.warning('And this, too')\npd.set_option('display.max_colwidth', None) ",
"_____no_output_____"
],
[
"# Configure\nc = twint.Config()\nc.Username = \"neuripsconf\"\nc.Store_object = True\n# c.User_full = True\n# Run\ntwint.run.Following(c)\nfollow_list = twint.output.follows_list",
"shakir_za\niclr_conf\nrichhickey\njohnplattml\njbilmes\nCorinnaCortes\nrherbrich\ngxr\nAndrewYNg\nlawrennd\nsmolix\nylecun\nhannawallach\nkarpathy\nmxlearn\nGoogleAI\nmemming\nMSFTResearchCam\nbschoelkopf\nmikiobraun\nNandoDF\nMSFTResearch\nogrisel\nandrewmccallum\nSebastianThrun\ndriainmurray\nearnmyturns\nPASCALNetwork\nicmlconf\nsejnowski\ncnl_salk\n"
],
[
"len(follow_list)",
"_____no_output_____"
],
[
"import json\n",
"_____no_output_____"
],
[
"# for i in [\"AndrewYNg\", \"ylecun\", \"icmlconf\"]:\n\nall_users_info_dict = []\ndef get_user_info(username):\n c = twint.Config()\n c.Username = str(username)\n c.Store_object = True\n twint.run.Lookup(c)\n users = twint.output.users_list[-1]\n print(i)\n print(users.username)\n user_info_dict = twint.storage.write_meta.userData(users)\n print(user_info_dict)\n all_users_info_dict.append(user_info_dict)\n ",
"216939636 | Andrew Ng | @AndrewYNg | Private: 0 | Verified: 1 | Bio: Co-Founder of Coursera; Stanford CS adjunct faculty. Former head of Baidu AI Group/Google Brain. #ai #machinelearning, #deeplearning #MOOCs | Location: Palo Alto, CA | Url: http://www.andrewng.org | Joined: 17 Nov 2010 7:39 PM | Tweets: 1289 | Following: 466 | Followers: 513045 | Likes: 987 | Media: 236 | Avatar: https://pbs.twimg.com/profile_images/733174243714682880/oyG30NEH_400x400.jpg\nAndrewYNg\nAndrewYNg\n{'id': 216939636, 'name': 'Andrew Ng', 'username': 'AndrewYNg', 'bio': 'Co-Founder of Coursera; Stanford CS adjunct faculty. Former head of Baidu AI Group/Google Brain. #ai #machinelearning, #deeplearning #MOOCs', 'location': 'Palo Alto, CA', 'url': 'http://www.andrewng.org', 'join_date': '17 Nov 2010', 'join_time': '7:39 PM', 'tweets': 1289, 'following': 466, 'followers': 513045, 'likes': 987, 'media': 236, 'private': 0, 'verified': 1, 'profile_image_url': 'https://pbs.twimg.com/profile_images/733174243714682880/oyG30NEH_400x400.jpg', 'background_image': 'https://pbs.twimg.com/profile_banners/216939636/1483126470/1500x500'}\n48008938 | Yann LeCun | @ylecun | Private: 0 | Verified: 0 | Bio: Professor at NYU. Chief AI Scientist at Facebook. Researcher in AI, Machine Learning, etc. ACM Turing Award Laureate. | Location: New York | Url: http://yann.lecun.com | Joined: 17 Jun 2009 9:05 AM | Tweets: 5376 | Following: 288 | Followers: 211636 | Likes: 4282 | Media: 39 | Avatar: https://pbs.twimg.com/profile_images/2387565623/7gew8nz1z7ik1ch148so_400x400.jpeg\nylecun\nylecun\n{'id': 48008938, 'name': 'Yann LeCun', 'username': 'ylecun', 'bio': 'Professor at NYU. Chief AI Scientist at Facebook. Researcher in AI, Machine Learning, etc. ACM Turing Award Laureate.', 'location': 'New York', 'url': 'http://yann.lecun.com', 'join_date': '17 Jun 2009', 'join_time': '9:05 AM', 'tweets': 5376, 'following': 288, 'followers': 211636, 'likes': 4282, 'media': 39, 'private': 0, 'verified': 0, 'profile_image_url': 'https://pbs.twimg.com/profile_images/2387565623/7gew8nz1z7ik1ch148so_400x400.jpeg', 'background_image': None}\n387156826 | ICML Conference | @icmlconf | Private: 0 | Verified: 0 | Bio: International Conference on Machine Learning • July 12-18, 2020 (virtual) • #icml2020 • Contact: http://icml.cc/Help/Contact | Location: | Url: http://icml.cc/ | Joined: 8 Oct 2011 8:28 AM | Tweets: 941 | Following: 6 | Followers: 33188 | Likes: 886 | Media: 12 | Avatar: https://pbs.twimg.com/profile_images/1264614967908552704/ea0u5NgU_400x400.jpg\nicmlconf\nicmlconf\n{'id': 387156826, 'name': 'ICML Conference', 'username': 'icmlconf', 'bio': 'International Conference on Machine Learning • July 12-18, 2020 (virtual) • #icml2020 • Contact: http://icml.cc/Help/Contact\\xa0', 'location': '', 'url': 'http://icml.cc/', 'join_date': '8 Oct 2011', 'join_time': '8:28 AM', 'tweets': 941, 'following': 6, 'followers': 33188, 'likes': 886, 'media': 12, 'private': 0, 'verified': 0, 'profile_image_url': 'https://pbs.twimg.com/profile_images/1264614967908552704/ea0u5NgU_400x400.jpg', 'background_image': 'https://pbs.twimg.com/profile_banners/387156826/1514158533/1500x500'}\n"
],
[
"users.bio",
"_____no_output_____"
],
[
"c.Username = \"ylecun\"\nc.Store_object = True\n\ntwint.run.Lookup(c)\nusers = twint.output.users_list[0]",
"48008938 | Yann LeCun | @ylecun | Private: 0 | Verified: 0 | Bio: Professor at NYU. Chief AI Scientist at Facebook. Researcher in AI, Machine Learning, etc. ACM Turing Award Laureate. | Location: New York | Url: http://yann.lecun.com | Joined: 17 Jun 2009 9:05 AM | Tweets: 5372 | Following: 284 | Followers: 211067 | Likes: 4275 | Media: 39 | Avatar: https://pbs.twimg.com/profile_images/2387565623/7gew8nz1z7ik1ch148so_400x400.jpeg\n"
],
[
"users.name",
"_____no_output_____"
],
[
"follow_list = twint.output.follows_list",
"_____no_output_____"
],
[
"len(follow_list)",
"_____no_output_____"
],
[
"ML_KEYWORDS = [\"ai\", \"ml\", \"artificial intelligence\", \"machine learning\", \"deeplearning\", \"machinelearning\", \"nlproc\", \"nlp\", \"computer vision\", \"computervision\", \"cv\", \" reinforcement learning\", \"rl\", \"kaggle\", \"datascience\", \"google brain\", \"deepmind\", \"googleai\"] \n\n\ntest = \"\"\n\nif any(word in str(test).lower() for word in ML_KEYWORDS):\n print (\"hi\")",
"_____no_output_____"
],
[
"relevant_user = []",
"_____no_output_____"
],
[
"import twint\nimport os\nimport nest_asyncio\nnest_asyncio.apply()\nimport logging\nlogging.basicConfig(filename='twint_data_collection.log',level=logging.DEBUG)\n\ndef is_relevant_user(user):\n twint.output.clean_lists()\n c = twint.Config()\n c.Username = str(user)\n c.Store_object = True\n\n twint.run.Lookup(c)\n# print (twint.output.users_list)\n user = twint.output.users_list[0]\n print (user.name)\n if any(word in str(user.bio).lower() for word in ML_KEYWORDS):\n return True, user\n else:\n return False, None\n\ndef main():\n relevant_user.append(\"neuripsconf\")\n\n for target in relevant_user:\n # logging.info('target user: ', target,\" ====> Starting to find followings <====\")\n # Configure\n c = twint.Config()\n c.Username = str(target)\n c.Store_object = True\n # Run\n twint.run.Following(c)\n follow_list = twint.output.follows_list\n # logging.info(\"====> Starting to find relevant users <====\") \n for user in follow_list:\n rel, rel_user_info = is_relevant_user(user)\n if rel:\n if user not in relevant_user:\n relevant_user.append(user)\n # logging.info (\"==> User :\" + str(user) + \" added!\")\n\n if len(relevant_user) % 100 == 0:\n # logging.info (\"====> Starting to find write to files <====\") \n\n with open('relevant_user.txt', 'w') as f:\n for item in relevant_user:\n f.write(\"%s\\n\" % item)\n # logging.info(\"====> Completed Writting to file! <====\")\n # else:\n # logging.info(\"==> User :\" + str(user) + \" already present in the list!\")\n # else:\n # logging.info(\"==> User :\" + str(user) + \" not added!\")\n \n",
"_____no_output_____"
],
[
"import twint\nimport os\nimport nest_asyncio\nimport logging\nimport json\nimport pandas as pd\nnest_asyncio.apply()\n\nlogging.basicConfig(filename=\"twint_data_collection.log\", level=logging.DEBUG)\n\nML_KEYWORDS = [\n \"ai \",\n \"ml \",\n \".ai\" \"fast.ai\" \"artificial intelligence\",\n \"machine learning\",\n \"deeplearning\",\n \"machinelearning\",\n \"nlproc\",\n \"nlp \",\n \"computer vision\",\n \"computervision\",\n \"cv \",\n \"reinforcement learning\",\n \"rl \",\n \"kaggle\",\n \"datascience\",\n \"data science\",\n \"google brain\",\n \"deepmind\",\n \"googleai\",\n \"data scientist\",\n \"pattern analysis\",\n \"statistical modelling\",\n \"computational learning\",\n \"natural language processing\",\n \"vision and learning\",\n \"data visualization\",\n \"matplotlib\",\n \"computer science\",\n \"data ethics\",\n \"stats \",\n \"deepmind\",\n \"intelligent systems\",\n \"a.i.\",\n \"pytorch\",\n \"tensorflow\",\n \"keras\",\n \"theano\",\n \"bayesian statistics\",\n \"openai\",\n \"forecasting\"\n ]\n\n\ndef write_to_file(relevant_user, ):\n if len(relevant_user) % 10 == 0:\n with open(\"relevant_user.txt\", \"w+\") as f:\n for item in relevant_user:\n f.write(\"%s\\n\" % item)\n \ndef write_user_info(relevant_user_info_list):\n if len(relevant_user_info_list) > 100:\n relevant_user_info_df = pd.DataFrame(relevant_user_info_list)\n main_csv = pd.read_csv(\"relevant_user_info.csv\")\n main_csv.append(relevant_user_info_df, ignore_index=True)\n main_csv.to_csv(\"relevant_user_info.csv\",index=False)\n empty_list = []\n return empty_list\n return relevant_user_info_list\n \ndef is_relevant_user(user):\n\n \n twint.output.clean_lists()\n c = twint.Config()\n c.Username = str(user)\n c.Store_object = True\n# c.Hide_output = True\n twint.run.Lookup(c)\n users_list = twint.output.users_list\n user = twint.output.users_list[0]\n user_info_dict = twint.storage.write_meta.userData(user)\n \n if any(word in str(user.bio).lower() for word in ML_KEYWORDS):\n print ( \"====> \", user_info_dict)\n return True, user_info_dict\n else:\n return False, None\n\n\ndef main():\n relevant_user = []\n relevant_user_info_list = []\n relevant_user.append(\"neuripsconf\")\n \n for target in relevant_user:\n print(\"==> \", target)\n c = twint.Config()\n c.Username = str(target)\n# c.Hide_output = True\n c.Store_object = True\n twint.run.Following(c)\n follow_list = twint.output.follows_list\n \n for user in follow_list:\n print(\"hi\")\n rel, rel_user_info = is_relevant_user(user)\n if rel: \n relevant_user_info_list.append(rel_user_info)\n relevant_user_info_list = write_user_info(relevant_user_info_list)\n if rel and user not in relevant_user: \n relevant_user.append(user)\n write_to_file(relevant_user)\n \n \n break\n\n\nmain()\n",
"_____no_output_____"
],
[
"if True and True:\n print (\"hi\")\n ",
"hi\n"
],
[
"c = twint.Config()\nc.Username = \"noneprivacy\"\n# c.Hide_output = True\nc.Pandas = True\n# c.Store_pandas = True\nc.Pandas_clean=True\nc.Store_object = True\nc.Limit= 20 \ntwint.run.Lookup(c)\n\nfollowed = twint.storage.panda.User_df\ntwint.storage.panda.clean()",
"2550600458 | Francesco Poldi | @noneprivacy | Private: 0 | Verified: 0 | Bio: 👀👤 Experienced Open Source Intelligence developer, hunter and privacy advocate. | Location: Phantásien | Url: | Joined: 6 Jun 2014 10:46 AM | Tweets: 6187 | Following: 1300 | Followers: 3339 | Likes: 8231 | Media: 300 | Avatar: https://pbs.twimg.com/profile_images/1224048725171023877/7jHVO_YW_400x400.jpg\n"
],
[
"followed.shape",
"_____no_output_____"
],
[
"followed.values",
"_____no_output_____"
],
[
"twint.output.clean_lists()\nc = twint.Config()\n\nc.Username = \"noneprivacy\"\n\nc.Store_object = True\nc.Hide_output = True\n\nc.Store_object = True\ntwint.run.Lookup(c)\nusers_list = twint.output.users_list\nuser = twint.output.users_list[0]\nuser_info_dict = twint.storage.write_meta.userData(user)\n",
"2550600458 | Francesco Poldi | @noneprivacy | Private: 0 | Verified: 0 | Bio: 👀👤 Experienced Open Source Intelligence developer, hunter and privacy advocate. | Location: Phantásien | Url: | Joined: 6 Jun 2014 10:46 AM | Tweets: 6187 | Following: 1300 | Followers: 3339 | Likes: 8231 | Media: 300 | Avatar: https://pbs.twimg.com/profile_images/1224048725171023877/7jHVO_YW_400x400.jpg\n"
],
[
"relevant_user_info_list = []\nrelevant_user_info_list.append(user_info_dict)",
"_____no_output_____"
],
[
"with open('relevant_user_info.json', 'w') as fout:\n json.dump(relevant_user_info_list , fout)",
"_____no_output_____"
],
[
"main_csv = pd.DataFrame({})",
"_____no_output_____"
],
[
"\nmain_csv.head()",
"_____no_output_____"
],
[
"with open('relevant_user.txt') as f:\n lines = f.read().splitlines()",
"_____no_output_____"
],
[
"# lines[24:]",
"_____no_output_____"
],
[
"import twint\n\nc = twint.Config()\n\nc.Username = \"neuripsconf\"\n# c.Custom[\"tweet\"] = [\"id\"]\n# c.Custom[\"user\"] = [\"bio\"]\nc.Limit = 10\nc.Store_csv = True\nc.Output = \"neuripsconf.csv\"\n\ntwint.run.Search(c)",
"1283862778290348034 2020-07-17 02:05:14 +0530 <NeurIPSConf> Wondering how the NeurIPS review process is going? New blog post from @RaiaHadsell @MarcRanzato @hsuantienlin : https://medium.com/@NeurIPSConf/reviewing-is-underway-a5532d4615ec …\n1283779502313811970 2020-07-16 20:34:19 +0530 <NeurIPSConf> Want to write about your research but avoid the hype? @AIhubOrg has some suggestions https://aihub.org/wp-content/uploads/2020/01/Guidelines-for-promoting-your-AI-research.pdf …\n1283153889471205388 2020-07-15 03:08:21 +0530 <NeurIPSConf> @AIhubOrg is a place for research without the hype. Supported by NeurIPS, it's a great place to share and explore work! https://aihub.org/contribute/ \n1281359035422388226 2020-07-10 04:16:15 +0530 <NeurIPSConf> NeurIPS is a supporter of @AIhubOrg, they're always looking for practitioners to write about research. https://aihub.org/contribute/ \n1267874210682146816 2020-06-02 23:12:22 +0530 <NeurIPSConf> Important notice to all authors: the paper submission deadline has been extended by 48 hours. The new deadline is Friday June 5, 2020 at 1pm PDT Find the official announcement here: https://neurips.cc/Conferences/2020/DeadlineExtension …\n1260989820966506497 2020-05-14 23:16:16 +0530 <NeurIPSConf> Call for meetups! https://twitter.com/EmtiyazKhan/status/1260568561199788035 …\n1250525403187490818 2020-04-16 02:14:24 +0530 <NeurIPSConf> We're excited to welcome the @repro_challenge back for 2020! Check out their plans for this year and all the amazing work they've done so far! https://reproducibility-challenge.github.io/neurips2019/ https://twitter.com/repro_challenge/status/1248072072309420039 …\n1243662644520923136 2020-03-28 03:44:15 +0530 <NeurIPSConf> Check out the latest update from the NeurIPS 2020 program chairs, touching on: 1) the upcoming submission deadline 2) recruitment of SACs, ACs and reviewers 3) the new ethical review process 4) the new early rejection process: https://medium.com/@NeurIPSConf/updates-on-program-committee-desk-rejections-353adb8dc1ae …\n1230953304613515264 2020-02-22 02:01:52 +0530 <NeurIPSConf> If you would like to recommend or become a reviewer for NeurIPS 2020, please fill out the form linked below. Thanks! https://docs.google.com/forms/d/11N0OhXKn9wB428Qq7jP8XPctM_DoJ-8lT-5EIDn4Y0M/edit …\n1230242921250770946 2020-02-20 02:59:03 +0530 <NeurIPSConf> Now with Raia's correct handle: @RaiaHadsell\n1230239787769970689 2020-02-20 02:46:36 +0530 <NeurIPSConf> Call for Papers for NeurIPS 2020 is out: https://neurips.cc/Conferences/2020/CallForPapers … Learn more about what is new this year in this video: https://www.youtube.com/watch?v=361h6lHZGDg … Also checkout this blog post by the PCs, Marc'Aurelio Ranzato, Nina Balcan, @hsuantienlin and @raiahadsel : https://medium.com/@NeurIPSConf/getting-started-with-neurips-2020-e350f9b39c28 …\n1227978472963764224 2020-02-13 21:00:57 +0530 <NeurIPSConf> The NeurIPS 2019 proceedings are posted. https://papers.nips.cc/book/advances-in-neural-information-processing-systems-32-2019 …\n1204482188101738497 2019-12-11 00:55:06 +0530 <NeurIPSConf> NeurIPS Live Streaming https://slideslive.com/neurips/ \n1191452357831225344 2019-11-05 01:59:12 +0530 <NeurIPSConf> Mobile friendly NeurIPS 2019 schedule: https://nips.cc/Conferences/2019/Schedule …\n1191068131839528960 2019-11-04 00:32:26 +0530 <NeurIPSConf> The schedule is almost complete for NeurIPS 2019 https://neurips.cc/Conferences/2019/ScheduleMultitrack …\n1186357882557784070 2019-10-22 00:35:35 +0530 <NeurIPSConf> Announcing NeurIPS Meetups! https://medium.com/@NeurIPSConf/announcing-neurips-meetups-44b2385c67a2 …\n1184492430118096896 2019-10-16 21:02:56 +0530 <NeurIPSConf> NeurIPS 2019 Call for socials. https://nips.cc/Conferences/2019/Socials … We are very happy to inaugurate social events at the coming NeurIPS 2019. Given the steadily increasing amount of attendees of NeurIPS, it's becoming more and more difficult to meet colleagues with similar interests ...\n1182459045233356800 2019-10-11 06:23:00 +0530 <NeurIPSConf> There will be a rejection letter, but we haven't yet finished giving out all the awards. There are over 1600 applications to process.\n1174440782301761538 2019-09-19 03:21:17 +0530 <NeurIPSConf> The demo submission deadline for NeurIPS 2019 is Sept. 19, 2019, 4:59 p.m. pacific time.\n1172239263909515264 2019-09-13 01:33:14 +0530 <NeurIPSConf> All authors on accepted papers at NeurIPS 2019 who want to attend should be registering now. Be sure to log into the website first, so it recognizes that you have an accepted paper. Authors only have access to reserved tickets until September 26th or 27th depending on timezone.\n"
],
[
"ls ../twitter_thought_leader/data/raw",
"\u001b[0m\u001b[01;34mbackup_july_22\u001b[0m/ relevant_user.txt user_info.json\nrelevant_user_info.csv \u001b[01;34mtweet_since_2019\u001b[0m/\n"
],
[
"import json\ndata = json.load(open(\"../twitter_thought_leader/data/raw/user_info.json\"))",
"_____no_output_____"
],
[
"len(data)",
"_____no_output_____"
],
[
"import pandas as pd\ndf = pd.DataFrame(data)",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df.to_csv(\"../twitter_thought_leader/data/raw/results_user_info.csv\", index=False)",
"_____no_output_____"
],
[
"# /home/hustle/playground/twitter_thought_leader/data/raw/backup_july_22/tweets_1800+_users_1K_tweets_22_July",
"_____no_output_____"
],
[
"from os import listdir\nfrom os.path import isfile, join\nmypath = \"/home/hustle/playground/twitter_thought_leader/data/raw/backup_july_22/tweets_1800+_users_1K_tweets_22_July/tweets\"\nonlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n\ndownloaded_user = []\nfor i in onlyfiles:\n fname = i.split(\".\")[0]\n downloaded_user.append(fname)",
"_____no_output_____"
],
[
"len(set(downloaded_user))",
"_____no_output_____"
],
[
"with open(\"../twitter_thought_leader/data/raw/\"+ \"relevant_user.txt\") as f:\n relevant_user = f.read().splitlines() ",
"_____no_output_____"
],
[
"len(set(relevant_user))",
"_____no_output_____"
],
[
"relevant_user_b2 = list(set(relevant_user) - set(downloaded_user))",
"_____no_output_____"
],
[
"len(relevant_user_b2)",
"_____no_output_____"
],
[
"with open(\"../twitter_thought_leader/data/raw/\"+ \"relevant_user_batch_2.txt\", \"w+\") as f:\n for item in relevant_user_b2:\n f.write(\"%s\\n\" % item)",
"_____no_output_____"
],
[
"import tweepy\nimport os\n\ndef twitter_auth():\n access_token = os.environ.get('TWITTER_ACCESS_KEY')\n access_token_secret = os.environ.get('TWITTER_ACCESS_SECRET_KEY')\n consumer_key = os.environ.get('TWITTER_API_KEY')\n consumer_secret = os.environ.get('TWITTER_API_SECRET_KEY')\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth)\n \ndef collect_follower_using_twitter_api():\n follow_dict = {}\n with open(\"../twitter_thought_leader/data/raw/\"+ \"relevant_user.txt\") as f:\n relevant_user = f.read().splitlines() \n for i in relevant_user:\n follow_dict[str(i)] = tweepy.Cursor(api.friends, screen_name=\"neuripsconf\").items()\n time.sleep(2)",
"friend: shakir_za\nfriend: iclr_conf\nfriend: richhickey\nfriend: johnplattml\nfriend: jbilmes\nfriend: CorinnaCortes\nfriend: rherbrich\nfriend: gxr\nfriend: AndrewYNg\nfriend: lawrennd\nfriend: smolix\nfriend: ylecun\nfriend: hannawallach\nfriend: karpathy\nfriend: mxlearn\nfriend: GoogleAI\nfriend: memming\nfriend: MSFTResearchCam\nfriend: bschoelkopf\nfriend: mikiobraun\nfriend: NandoDF\nfriend: MSFTResearch\nfriend: ogrisel\nfriend: andrewmccallum\nfriend: SebastianThrun\nfriend: driainmurray\nfriend: earnmyturns\nfriend: PASCALNetwork\nfriend: icmlconf\nfriend: sejnowski\nfriend: cnl_salk\n"
],
[
"2 +2 ",
"_____no_output_____"
],
[
"import pandas as pd",
"_____no_output_____"
],
[
"df = pd.read_csv(\"../twitter_thought_leader/data/raw/relevant_user_info_complete.csv\",engine='python')",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"drop_dup_df = df.drop_duplicates()",
"_____no_output_____"
],
[
"drop_dup_df.shape",
"_____no_output_____"
],
[
"drop_dup_df[\"bio\"]",
"_____no_output_____"
],
[
"ML_KEYWORDS =[\n \"#ai\",\n \" ai\",\n \"ai \",\n \"_ai\",\n \"ai,\",\n \"ai.\",\n \"ai+\",\n \"a.i.\",\n \"#ml\",\n \"ml.\",\n \".ml\",\n \"ml@\",\n \"ml,\",\n \"ml \",\n \".ai\",\n \"#rl\",\n \" rl \",\n \"#cv\",\n \"cv \",\n \"ai/\",\n \"ml/\",\n \"rl/\",\n \"fast.ai\",\n \"artificial intelligence\",\n \"artificialintelligence\",\n \"machine learn\",\n \"machinelearning\",\n \"deep learn\",\n \"deeplearning\",\n \"nlproc\",\n \"nlp\",\n \"stanfordnlp\",\n \"stanfordai\",\n \"stanfordailabs\",\n \"data sci\",\n \"computer vision\",\n \"computervision\",\n \"reinforcement learning\",\n \"kaggle\",\n \"datascience\",\n \"data science\",\n \"google brain\",\n \"deepmind\",\n \"deep mind\",\n \"googleai\",\n \"google ai\",\n \"googlebrain\",\n \"data scientist\",\n \"pattern analysis\",\n \"statistical modelling\",\n \"computational learning\",\n \"natural language processing\",\n \"vision and learning\",\n \"data visualization\",\n \"matplotlib\",\n \"computer science\",\n \"data ethics\",\n \"stats \",\n \"autonomous cars\",\n \"gan\",\n \"openai\",\n \"icml\",\n \"neurips\",\n \"intelligent systems\", \n \"pytorch\",\n \"tensorflow\",\n \"keras\",\n \"theano\",\n \"bayesian statistics\",\n \"openai\",\n \"forecasting\",\n \"iclr\"\n ]\n\ntest = \"\"\n\nif any(word in str(test).lower() for word in ML_KEYWORDS):\n print (\"hi\")\n\ndef is_relevant(test):\n if any(word in str(test).lower() for word in ML_KEYWORDS):\n return True\n return False",
"_____no_output_____"
],
[
"df = pd.read_csv(\"/home/hustle/playground/twitter_thought_leader/data/raw/2nd_degree_relevant_user_info.csv\",engine=\"python\")",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"rel_df = df[df[\"bio\"].apply(is_relevant)]",
"_____no_output_____"
],
[
"rel_df = rel_df.drop_duplicates(subset=['id'])",
"_____no_output_____"
],
[
"rel_df.shape",
"_____no_output_____"
],
[
"non_rel_df = df[~df[\"bio\"].apply(is_relevant)]",
"_____no_output_____"
],
[
"non_rel_df = non_rel_df.drop_duplicates(subset=['id'])",
"_____no_output_____"
],
[
"non_rel_df.shape",
"_____no_output_____"
],
[
"non_rel_df.to_csv(\"/home/hustle/playground/twitter_thought_leader/data/raw/non_rel_user_info.csv\", index=False)",
"_____no_output_____"
],
[
"rel_df = rel_df.drop_duplicates(subset=['id'])",
"_____no_output_____"
],
[
"rel_df.shape",
"_____no_output_____"
],
[
"rel_df.to_csv(\"/home/hustle/playground/twitter_thought_leader/data/raw/2nd_degree_relevant_user_info.csv\", index=False)",
"_____no_output_____"
],
[
"import os\nimport glob\nimport pandas as pd\nos.chdir(\"/home/hustle/playground/twitter_thought_leader/data/raw/follow_lists\")\nextension = 'csv'\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]\n\nall_filenames\n\ncombined_csv = pd.concat([pd.read_csv(f, engine=\"python\") for f in all_filenames ])\n#export to csv\n\ncombined_csv.shape\n\ncombined_csv_rel_df = combined_csv[combined_csv[\"bio\"].apply(is_relevant)]\n\nnon_rel_df = combined_csv[~combined_csv[\"bio\"].apply(is_relevant)]\n\ncombined_csv_rel_df = combined_csv_rel_df.drop_duplicates(subset=['id'])",
"_____no_output_____"
],
[
"combined_csv_rel_df.to_csv(\"/home/hustle/playground/twitter_thought_leader/data/raw/rel_user_info.csv\", mode=\"a\", index=False, header=False)",
"_____no_output_____"
],
[
"rel_user_info = pd.read_csv(\"/home/hustle/playground/twitter_thought_leader/data/raw/rel_user_info.csv\",engine=\"python\")",
"_____no_output_____"
],
[
"rel_user_info.shape",
"_____no_output_____"
],
[
"combined_rel_df = rel_user_info[rel_user_info[\"bio\"].apply(is_relevant)]",
"_____no_output_____"
],
[
"combined_rel_df.shape",
"_____no_output_____"
],
[
"combined_rel_df = combined_rel_df.drop_duplicates(subset=['id'])",
"_____no_output_____"
],
[
"combined_rel_df.shape",
"_____no_output_____"
],
[
"combined_rel_df.to_csv(\"/home/hustle/playground/twitter_thought_leader/data/raw/rel_user_info.csv\", index=False)",
"_____no_output_____"
],
[
"pd.Series(combined_rel_df.username).to_csv(\"/home/hustle/playground/twitter_thought_leader/data/raw/4k_rel_user.txt\",index=False)",
"_____no_output_____"
],
[
"df = pd.read_csv(\"/home/hustle/playground/twitter_thought_leader/data/raw/rel_user_info.csv\",engine=\"python\")",
"_____no_output_____"
],
[
"rel_df = df[df[\"bio\"].apply(is_relevant)]",
"_____no_output_____"
],
[
"non_rel_df = df[~df[\"bio\"].apply(is_relevant)]",
"_____no_output_____"
],
[
"rel_df.to_csv(\"/home/hustle/playground/twitter_thought_leader/data/raw/rel_user_info.csv\", index=False)",
"_____no_output_____"
],
[
"# non_rel_df",
"_____no_output_____"
],
[
"with open(\"/home/hustle/playground/twitter_thought_leader/data/raw/\"+ \"refined_relevant_user_list.txt\") as f:\n relevant_user = f.read().splitlines() ",
"_____no_output_____"
],
[
"len(relevant_user)",
"_____no_output_____"
],
[
"len(set(relevant_user))",
"_____no_output_____"
],
[
"len(set(rel_df.username) - set(relevant_user))",
"_____no_output_____"
],
[
"relevant_user = list(set(rel_df.username) - set(relevant_user))",
"_____no_output_____"
],
[
"ser = pd.Series(relevant_user)",
"_____no_output_____"
],
[
"ser.shape",
"_____no_output_____"
],
[
"ser.to_csv(\"/home/hustle/playground/twitter_thought_leader/data/raw/\"+ \"refined_relevant_user_list.txt\", mode='a', index=False, header = False)",
"_____no_output_____"
],
[
"non_rel_df.shape",
"_____no_output_____"
],
[
"non_rel_df",
"_____no_output_____"
],
[
"from os import listdir\nfrom os.path import isfile, join\nimport time\ndef check_if_file_present(username):\n mypath = '/home/hustle/playground/twitter_thought_leader/data/raw/follow_lists/'\n onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n downloaded_user = []\n for i in onlyfiles:\n fname = i.split(\".\")[0]\n downloaded_user.append(fname)\n \n if username in downloaded_user:\n return \n raise\n \ndef subprocess_cmd(command):\n process = subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)\n proc_stdout = process.communicate()[0].strip()\n print( proc_stdout)\n\nlist_of_user = [ \"HEPfeickert\",\n\"ClementDelangue\",\n\"FedPernici\"] \ndef get_follow_list_with_retry(username):\n count = 0\n while count < 5 :\n print(\"attempt: \" + str(count) + \" for user: \" + username)\n count = count + 1\n try:\n command = 'cd /home/hustle/playground/twitter_thought_leader/data/raw/follow_lists/; conda activate tp; twint -u ' + username + ' --following -o ' + username + '.txt'\n print(command)\n subprocess_cmd(command)\n \n check_if_file_present(username)\n except:\n print (\"sleeping for 20 secs\")\n time.sleep(20)\n continue\n print(\"Completed for user: \" + username)\n break",
"_____no_output_____"
],
[
"for i in list_of_user:\n get_follow_list_with_retry(i)",
"_____no_output_____"
],
[
"follow_list",
"_____no_output_____"
],
[
"follow_list",
"_____no_output_____"
],
[
"import os\ncommand = \"twint -u neuripsconf --following --user-full -o neuripsconf.csv --csv\"\nos.system(command)",
"_____no_output_____"
],
[
"import subprocess\nresult = subprocess.run(['ls', '-l'], stdout=subprocess.PIPE)\nresult.stdout",
"_____no_output_____"
],
[
"def subprocess_cmd(command):\n process = subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)\n proc_stdout = process.communicate()[0].strip()\n print( proc_stdout)\n\nsubprocess_cmd('conda activate tp; twint -u neuripsconf --following -o neuripsconf.txt --csv')",
"b'shakir_za\\niclr_conf\\nrichhickey\\njohnplattml\\njbilmes\\nCorinnaCortes\\nrherbrich\\ngxr\\nAndrewYNg\\nlawrennd\\nsmolix\\nylecun\\nhannawallach\\nkarpathy\\nmxlearn\\nGoogleAI\\nmemming\\nMSFTResearchCam\\nbschoelkopf\\nmikiobraun\\nNandoDF\\nMSFTResearch\\nogrisel\\nandrewmccallum\\nSebastianThrun\\ndriainmurray\\nearnmyturns\\nPASCALNetwork\\nicmlconf\\nsejnowski\\ncnl_salk'\n"
],
[
"import glob\n\nread_files = glob.glob(\"/home/hustle/playground/twitter_thought_leader/data/raw/test/*.txt\")\n\nwith open(\"/home/hustle/playground/twitter_thought_leader/data/raw/selected_2nd_degree_result.txt\", \"wb\") as outfile:\n for f in read_files:\n with open(f, \"rb\") as infile:\n outfile.write(infile.read())",
"_____no_output_____"
],
[
"len(read_files)",
"_____no_output_____"
],
[
"fd = pd.read_csv(\"/home/hustle/playground/twitter_thought_leader/data/raw/pandeyparul_relevant_user_info.csv\",engine='python')",
"_____no_output_____"
],
[
"\nfd.bio",
"_____no_output_____"
],
[
"rel_df = fd[fd[\"bio\"].apply(is_relevant)]",
"_____no_output_____"
],
[
"len(set(rel_df.username))",
"_____no_output_____"
],
[
"with open(\"/home/hustle/playground/twitter_thought_leader/data/raw/\"+ \"4k_rel_user.txt\") as f:\n relevant_user = f.read().splitlines() ",
"_____no_output_____"
],
[
"with open(\"/home/hustle/playground/twitter_thought_leader/data/raw/2nd_degree_follow_list.txt\") as f:\n new_relevant_user = f.read().splitlines() \n\nlen(set(new_relevant_user))",
"_____no_output_____"
],
[
"new_users = set(rel_df.username) - set(relevant_user)",
"_____no_output_____"
],
[
"len(new_users)",
"_____no_output_____"
],
[
"ser = pd.Series(list(new_users))\n\nser.to_csv(\"/home/hustle/playground/twitter_thought_leader/data/raw/\"+ \"4k_rel_user.txt\", mode='a', index=False, header = False)",
"_____no_output_____"
],
[
"ser = list(set(rel_df.username) - set(relevant_user))",
"_____no_output_____"
],
[
"ser.to_csv(\"/home/hustle/playground/twitter_thought_leader/data/raw/\"+ \"refined_relevant_user_list.txt\", mode='a', index=False, header = False)",
"_____no_output_____"
],
[
"with open(\"/home/hustle/playground/twitter_thought_leader/data/raw/follow_lists/\"+ \"pandeyparul.txt\") as f:\n pandeyparul_relevant_user = f.read().splitlines() ",
"_____no_output_____"
],
[
"len(pandeyparul_relevant_user)",
"_____no_output_____"
],
[
"with open(\"/home/hustle/playground/twitter_thought_leader/data/raw/\"+ \"4k_rel_user.txt\") as f:\n relevant_user = f.read().splitlines() ",
"_____no_output_____"
],
[
"mypath = '/home/hustle/playground/twitter_thought_leader/data/raw/tweets_last_1500/'\nonlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\ndownloaded_user = []\nfor i in onlyfiles:\n fname = i.split(\".\")[0]\n downloaded_user.append(fname)",
"_____no_output_____"
],
[
"len(relevant_user)",
"_____no_output_____"
],
[
"len(set(pandeyparul_relevant_user) - set(downloaded_user))",
"_____no_output_____"
],
[
"rel_user_info = pd.read_csv(\"/home/hustle/playground/twitter_thought_leader/data/raw/rel_user_info.csv\",engine=\"python\")",
"_____no_output_____"
],
[
"rel_user_info.shape",
"_____no_output_____"
],
[
"rel_user_info = rel_user_info.drop_duplicates(subset=['id'])",
"_____no_output_____"
],
[
"rel_user_info.shape",
"_____no_output_____"
],
[
"rel_user_info_list = list(rel_user_info.username)",
"_____no_output_____"
],
[
"rel_user_info_list",
"_____no_output_____"
],
[
"len(set(pandeyparul_relevant_user) - set(rel_user_info_list))",
"_____no_output_____"
],
[
"user_list = [\n# \"/home/hustle/playground/twitter_thought_leader/data/raw/follow_lists/JayAlammar.txt\",\n# \"/home/hustle/playground/twitter_thought_leader/data/raw/follow_lists/abhi1thakur.txt\",\n# \"/home/hustle/playground/twitter_thought_leader/data/raw/follow_lists/suzatweet.txt\",\n# \"/home/hustle/playground/twitter_thought_leader/data/raw/follow_lists/fchollet.txt\",\n# \"/home/hustle/playground/twitter_thought_leader/data/raw/follow_lists/A_K_Nain.txt\",\n# \"/home/hustle/playground/twitter_thought_leader/data/raw/follow_lists/NirantK.txt\",\n# \"/home/hustle/playground/twitter_thought_leader/data/raw/follow_lists/omarsar0.txt\",\n \"/home/hustle/playground/twitter_thought_leader/data/raw/follow_lists/lexfridman.txt\",\n \"/home/hustle/playground/twitter_thought_leader/data/raw/follow_lists/bhutanisanyam1.txt\",\n \"/home/hustle/playground/twitter_thought_leader/data/raw/follow_lists/drfeifei.txt\",\n# \"/home/hustle/playground/twitter_thought_leader/data/raw/follow_lists/PralayRamteke.txt\",\n ]\n\n\nimport glob\n\n# read_files = glob.glob(\"/home/hustle/playground/twitter_thought_leader/data/raw/follow_lists/*.txt\")\n\nwith open(\"/home/hustle/playground/twitter_thought_leader/data/raw/user_list_1_result.txt\", \"wb\") as outfile:\n for f in user_list:\n with open(f, \"rb\") as infile:\n outfile.write(infile.read())",
"_____no_output_____"
],
[
"with open(\"/home/hustle/playground/twitter_thought_leader/data/raw/\"+ \"selected_2nd_degree_result.txt\") as f:\n relevant_user = f.read().splitlines() ",
"_____no_output_____"
],
[
"len(relevant_user)",
"_____no_output_____"
],
[
"len(set(relevant_user))",
"_____no_output_____"
],
[
"import os\nimport glob\nimport pandas as pd\nos.chdir(\"/home/hustle/playground/twitter_thought_leader/data/raw/follow_lists\")\nextension = 'csv'\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]\n\nall_filenames\n\n",
"_____no_output_____"
],
[
"combined_csv = pd.concat([pd.read_csv(f, engine=\"python\") for f in all_filenames ])\n#export to csv\n\ncombined_csv.shape\n\n",
"_____no_output_____"
],
[
"combined_csv_rel_df = combined_csv[combined_csv[\"bio\"].apply(is_relevant)]\n\nnon_rel_df = combined_csv[~combined_csv[\"bio\"].apply(is_relevant)]\n\ncombined_csv_rel_df.shape\n",
"_____no_output_____"
],
[
"\ncombined_csv_rel_df = combined_csv_rel_df.drop_duplicates(subset=['id'])",
"_____no_output_____"
],
[
"combined_csv_rel_df.shape\n",
"_____no_output_____"
],
[
"(set(combined_csv_rel_df.username) - set(downloaded_user))",
"_____no_output_____"
],
[
"ser = list(set(relevant_user) - set(downloaded_user))",
"_____no_output_____"
],
[
"ser = pd.Series(ser)",
"_____no_output_____"
],
[
"ser.to_csv(\"/home/hustle/playground/twitter_thought_leader/data/raw/\"+ \"selected_2nd_degree.txt\", index=False, header = False)",
"_____no_output_____"
],
[
" ",
"_____no_output_____"
],
[
"os.chdir(\"/home/hustle/playground/twitter_thought_leader/data/interim/cleaned_tweets\")\nextension = 'txt'\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]\nprint(len(all_filenames))",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c5283ff0b473010734207ea12def70677464e460
| 3,155 |
ipynb
|
Jupyter Notebook
|
tutorials/example.ipynb
|
Asjidkalam/simple-playgrounds
|
72ec42987a33175103191fa9722e0e002f889954
|
[
"MIT"
] | null | null | null |
tutorials/example.ipynb
|
Asjidkalam/simple-playgrounds
|
72ec42987a33175103191fa9722e0e002f889954
|
[
"MIT"
] | 1 |
2021-02-19T20:55:05.000Z
|
2021-02-19T20:55:05.000Z
|
tutorials/example.ipynb
|
Asjidkalam/simple-playgrounds
|
72ec42987a33175103191fa9722e0e002f889954
|
[
"MIT"
] | 1 |
2021-02-19T20:46:00.000Z
|
2021-02-19T20:46:00.000Z
| 23.37037 | 144 | 0.557528 |
[
[
[
"from simple_playgrounds.entities.agents import BaseInteractiveAgent\nfrom simple_playgrounds.entities.agents.sensors.visual_sensors import RgbSensor\nfrom simple_playgrounds.controllers import Random\nfrom simple_playgrounds.playgrounds.collection.test import Interactives\nfrom simple_playgrounds import Engine\n\n\n%matplotlib inline\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"Let's create a very simple random agent, with an RGB sensor.",
"_____no_output_____"
]
],
[
[
"my_agent = BaseInteractiveAgent(name = 'mercotte', controller=Random())\nmy_agent.add_sensor(RgbSensor(name='rgb_1', anchor= my_agent.base_platform, invisible_elements=my_agent.parts, resolution=128, range=300))",
"_____no_output_____"
]
],
[
[
"We build a playground, and associate it to an engine.\nEngines run the playgrounds.\n",
"_____no_output_____"
]
],
[
[
"playground = Interactives()\ngame = Engine(playground=playground, agents=my_agent, time_limit=100000, replay=True, screen=False)",
"_____no_output_____"
],
[
"from IPython.display import display, clear_output\nfig = plt.figure()\nax_1 = fig.add_subplot(1,1,1)\n\nwhile game.game_on:\n\n actions = {}\n for agent in game.agents:\n actions[agent.name] = agent.controller.generate_actions()\n\n game.step(actions)\n game.update_observations()\n\n #img = game.generate_sensor_image(my_agent)\n \n if my_agent.reward != 0: print(my_agent.name, my_agent.reward)\n\n img = game.generate_topdown_image()[:,:,::-1]\n #ax_1.cla()\n ax_1.imshow(img)\n display(fig)\n clear_output(wait=True)\n ",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
c52840fc61f94c74911abf3f3d1a822441f1717f
| 19,406 |
ipynb
|
Jupyter Notebook
|
quests/serverlessml/05_feateng/labs/feateng_bqml.ipynb
|
Glairly/introduction_to_tensorflow
|
aa0a44d9c428a6eb86d1f79d73f54c0861b6358d
|
[
"Apache-2.0"
] | 2 |
2022-01-06T11:52:57.000Z
|
2022-01-09T01:53:56.000Z
|
quests/serverlessml/05_feateng/labs/feateng_bqml.ipynb
|
Glairly/introduction_to_tensorflow
|
aa0a44d9c428a6eb86d1f79d73f54c0861b6358d
|
[
"Apache-2.0"
] | null | null | null |
quests/serverlessml/05_feateng/labs/feateng_bqml.ipynb
|
Glairly/introduction_to_tensorflow
|
aa0a44d9c428a6eb86d1f79d73f54c0861b6358d
|
[
"Apache-2.0"
] | null | null | null | 36.68431 | 314 | 0.601309 |
[
[
[
"# BigQuery ML models with feature engineering\n\nIn this notebook, we will use BigQuery ML to build more sophisticated models for taxifare prediction.\n\nThis is a continuation of our [first models](../../02_bqml/solution/first_model.ipynb) we created earlier with BigQuery ML but now with more feature engineering.\n\n## Learning Objectives\n1. Create and train a new Linear Regression model with BigQuery ML\n2. Evaluate and predict with the linear model\n3. Apply transformations using SQL to prune the taxi cab dataset\n4. Create a feature cross for day-hour combination using SQL\n5. Examine ways to reduce model overfitting with regularization\n6. Create and train a DNN model with BigQuery ML\n\nEach learning objective will correspond to a __#TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solution/feateng_bqml.ipynb). ",
"_____no_output_____"
]
],
[
[
"%%bash\nexport PROJECT=$(gcloud config list project --format \"value(core.project)\")\necho \"Your current GCP Project Name is: \"$PROJECT",
"_____no_output_____"
],
[
"import os\n\nPROJECT = \"your-gcp-project-here\" # REPLACE WITH YOUR PROJECT NAME\nREGION = \"us-central1\" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1\n\n# Do not change these\nos.environ[\"PROJECT\"] = PROJECT\nos.environ[\"REGION\"] = REGION\nos.environ[\"BUCKET\"] = PROJECT # DEFAULT BUCKET WILL BE PROJECT ID\n\nif PROJECT == \"your-gcp-project-here\":\n print(\"Don't forget to update your PROJECT name! Currently:\", PROJECT)",
"_____no_output_____"
]
],
[
[
"## Create a BigQuery Dataset and Google Cloud Storage Bucket\n\nA BigQuery dataset is a container for tables, views, and models built with BigQuery ML. Let's create one called __serverlessml__ if we have not already done so in an earlier lab. We'll do the same for a GCS bucket for our project too.",
"_____no_output_____"
]
],
[
[
"%%bash\n\n## Create a BigQuery dataset for serverlessml if it doesn't exist\ndatasetexists=$(bq ls -d | grep -w serverlessml)\n\nif [ -n \"$datasetexists\" ]; then\n echo -e \"BigQuery dataset already exists, let's not recreate it.\"\n\nelse\n echo \"Creating BigQuery dataset titled: serverlessml\"\n \n bq --location=US mk --dataset \\\n --description 'Taxi Fare' \\\n $PROJECT:serverlessml\n echo \"\\nHere are your current datasets:\"\n bq ls\nfi \n \n## Create GCS bucket if it doesn't exist already...\nexists=$(gsutil ls -d | grep -w gs://${PROJECT}/)\n\nif [ -n \"$exists\" ]; then\n echo -e \"Bucket exists, let's not recreate it.\"\n \nelse\n echo \"Creating a new GCS bucket.\"\n gsutil mb -l ${REGION} gs://${PROJECT}\n echo \"\\nHere are your current buckets:\"\n gsutil ls\nfi",
"_____no_output_____"
]
],
[
[
"## Model 4: With some transformations\n\nBigQuery ML automatically scales the inputs. so we don't need to do scaling, but human insight can help.\n\nSince we we'll repeat this quite a bit, let's make a dataset with 1 million rows. ",
"_____no_output_____"
]
],
[
[
"%%bigquery\nCREATE OR REPLACE TABLE serverlessml.feateng_training_data AS\n\nSELECT\n (tolls_amount + fare_amount) AS fare_amount,\n pickup_datetime,\n pickup_longitude AS pickuplon,\n pickup_latitude AS pickuplat,\n dropoff_longitude AS dropofflon,\n dropoff_latitude AS dropofflat,\n passenger_count*1.0 AS passengers\nFROM `nyc-tlc.yellow.trips`\n# The full dataset has 1+ Billion rows, let's take only 1 out of 1,000 (or 1 Million total)\nWHERE ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 1000)) = 1\n# placeholder for additional filters as part of TODO 3 later",
"_____no_output_____"
],
[
"%%bigquery\n# Tip: You can CREATE MODEL IF NOT EXISTS as well\nCREATE OR REPLACE MODEL serverlessml.model4_feateng\nTRANSFORM(\n * EXCEPT(pickup_datetime)\n , ST_Distance(ST_GeogPoint(pickuplon, pickuplat), ST_GeogPoint(dropofflon, dropofflat)) AS euclidean\n , CAST(EXTRACT(DAYOFWEEK FROM pickup_datetime) AS STRING) AS dayofweek\n , CAST(EXTRACT(HOUR FROM pickup_datetime) AS STRING) AS hourofday\n)\n\n# TODO 1: Specify the BigQuery ML options for a linear model to predict fare amount\n# OPTIONS()\n\nAS\n\nSELECT * FROM serverlessml.feateng_training_data",
"_____no_output_____"
]
],
[
[
"Once the training is done, visit the [BigQuery Cloud Console](https://console.cloud.google.com/bigquery) and look at the model that has been trained. Then, come back to this notebook.",
"_____no_output_____"
],
[
"Note that BigQuery automatically split the data we gave it, and trained on only a part of the data and used the rest for evaluation. We can look at eval statistics on that held-out data:",
"_____no_output_____"
]
],
[
[
"%%bigquery\nSELECT *, SQRT(loss) AS rmse FROM ML.TRAINING_INFO(MODEL serverlessml.model4_feateng)",
"_____no_output_____"
],
[
"%%bigquery\n# TODO 2: Evaluate and predict with the linear model\n# Write a SQL query to take the SQRT() of the Mean Squared Error as your loss metric for evaluation\n# Hint: Use ML.EVALUATE on your newly trained model\n",
"_____no_output_____"
]
],
[
[
"What is the RMSE? Could we do any better? \n\nTry re-creating the above feateng_training_data table with additional filters and re-running training and evaluation.\n\n### TODO 3: Apply transformations using SQL to prune the taxi cab dataset\n\nNow let's reduce the noise in our training dataset by only training on trips with a non-zero distance and fares above $2.50. Additionally, we will apply some geo location boundaries for New York City. Copy the below into your previous feateng_training_data table creation and re-train your model. \n\n```sql\nAND\n trip_distance > 0\n AND fare_amount >= 2.5\n AND pickup_longitude > -78\n AND pickup_longitude < -70\n AND dropoff_longitude > -78\n AND dropoff_longitude < -70\n AND pickup_latitude > 37\n AND pickup_latitude < 45\n AND dropoff_latitude > 37\n AND dropoff_latitude < 45\n AND passenger_count > 0\n```",
"_____no_output_____"
],
[
"Yippee! We're now below our target of 6 dollars in RMSE.\nWe are now beating our goals, and with just a linear model. \n\n## Making predictions with BigQuery ML\n\nThis is how the prediction query would look that we saw earlier [heading 1.3 miles uptown](https://www.google.com/maps/dir/'40.742104,-73.982683'/'40.755174,-73.983766'/@40.7481394,-73.993579,15z/data=!3m1!4b1!4m9!4m8!1m3!2m2!1d-73.982683!2d40.742104!1m3!2m2!1d-73.983766!2d40.755174) in New York City.",
"_____no_output_____"
]
],
[
[
"%%bigquery\nSELECT * FROM ML.PREDICT(MODEL serverlessml.model4_feateng, (\n SELECT \n -73.982683 AS pickuplon,\n 40.742104 AS pickuplat,\n -73.983766 AS dropofflon,\n 40.755174 AS dropofflat,\n 3.0 AS passengers,\n TIMESTAMP('2019-06-03 04:21:29.769443 UTC') AS pickup_datetime\n))",
"_____no_output_____"
]
],
[
[
"## Improving the model with feature crosses\n\nLet's do a [feature cross](https://developers.google.com/machine-learning/crash-course/feature-crosses/video-lecture) of the day-hour combination instead of using them raw",
"_____no_output_____"
]
],
[
[
"%%bigquery\nCREATE OR REPLACE MODEL serverlessml.model5_featcross\nTRANSFORM(\n * EXCEPT(pickup_datetime)\n , ST_Distance(ST_GeogPoint(pickuplon, pickuplat), ST_GeogPoint(dropofflon, dropofflat)) AS euclidean\n \n # TODO 4: Create a feature cross for day-hour combination using SQL \n , ML.( # <--- Enter the correct function for a BigQuery ML feature cross ahead of the (\n STRUCT(CAST(EXTRACT(DAYOFWEEK FROM pickup_datetime) AS STRING) AS dayofweek,\n CAST(EXTRACT(HOUR FROM pickup_datetime) AS STRING) AS hourofday)\n ) AS day_hr\n)\nOPTIONS(input_label_cols=['fare_amount'], model_type='linear_reg') \nAS\n\nSELECT * FROM serverlessml.feateng_training_data",
"_____no_output_____"
],
[
"%%bigquery\nSELECT *, SQRT(loss) AS rmse FROM ML.TRAINING_INFO(MODEL serverlessml.model5_featcross)",
"_____no_output_____"
],
[
"%%bigquery\nSELECT SQRT(mean_squared_error) AS rmse FROM ML.EVALUATE(MODEL serverlessml.model5_featcross)",
"_____no_output_____"
]
],
[
[
"Sometimes (not the case above), the training RMSE is quite reasonable, but the evaluation RMSE is terrible. This is an indication of overfitting.\nWhen we do feature crosses, we run into the risk of overfitting (for example, when a particular day-hour combo doesn't have enough taxirides).\n\n## Reducing overfitting\n\nLet's add [L2 regularization](https://developers.google.com/machine-learning/glossary/#L2_regularization) to help reduce overfitting. Let's set it to 0.1",
"_____no_output_____"
]
],
[
[
"%%bigquery\nCREATE OR REPLACE MODEL serverlessml.model6_featcross_l2\nTRANSFORM(\n * EXCEPT(pickup_datetime)\n , ST_Distance(ST_GeogPoint(pickuplon, pickuplat), ST_GeogPoint(dropofflon, dropofflat)) AS euclidean\n , ML.FEATURE_CROSS(STRUCT(CAST(EXTRACT(DAYOFWEEK FROM pickup_datetime) AS STRING) AS dayofweek,\n CAST(EXTRACT(HOUR FROM pickup_datetime) AS STRING) AS hourofday)) AS day_hr\n)\n# TODO 5: Set the model options for a linear regression model to predict fare amount with 0.1 L2 Regularization\n# Tip: Refer to the documentation for syntax: \n# https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create\nOPTIONS() \nAS\n\nSELECT * FROM serverlessml.feateng_training_data",
"_____no_output_____"
],
[
"%%bigquery\nSELECT SQRT(mean_squared_error) AS rmse FROM ML.EVALUATE(MODEL serverlessml.model6_featcross_l2)",
"_____no_output_____"
]
],
[
[
"These sorts of experiment would have taken days to do otherwise. We did it in minutes, thanks to BigQuery ML! The advantage of doing all this in the TRANSFORM is the client code doing the PREDICT doesn't change. Our model improvement is transparent to client code.",
"_____no_output_____"
]
],
[
[
"%%bigquery\nSELECT * FROM ML.PREDICT(MODEL serverlessml.model6_featcross_l2, (\n SELECT \n -73.982683 AS pickuplon,\n 40.742104 AS pickuplat,\n -73.983766 AS dropofflon,\n 40.755174 AS dropofflat,\n 3.0 AS passengers,\n TIMESTAMP('2019-06-03 04:21:29.769443 UTC') AS pickup_datetime\n))",
"_____no_output_____"
]
],
[
[
"## Let's try feature crossing the locations too\n\nBecause the lat and lon by themselves don't have meaning, but only in conjunction, it may be useful to treat the fields as a pair instead of just using them as numeric values. However, lat and lon are continuous numbers, so we have to discretize them first. That's what ML.BUCKETIZE does.\n\nHere are some of the preprocessing functions in BigQuery ML:\n* ML.FEATURE_CROSS(STRUCT(features)) does a feature cross of all the combinations\n* ML.POLYNOMIAL_EXPAND(STRUCT(features), degree) creates x, x^2, x^3, etc.\n* ML.BUCKETIZE(f, split_points) where split_points is an array ",
"_____no_output_____"
]
],
[
[
"%%bigquery\n\n-- BQML chooses the wrong gradient descent strategy here. It will get fixed in (b/141429990)\n-- But for now, as a workaround, explicitly specify optimize_strategy='BATCH_GRADIENT_DESCENT'\n\nCREATE OR REPLACE MODEL serverlessml.model7_geo\nTRANSFORM(\n fare_amount\n , ST_Distance(ST_GeogPoint(pickuplon, pickuplat), ST_GeogPoint(dropofflon, dropofflat)) AS euclidean\n , ML.FEATURE_CROSS(STRUCT(CAST(EXTRACT(DAYOFWEEK FROM pickup_datetime) AS STRING) AS dayofweek,\n CAST(EXTRACT(HOUR FROM pickup_datetime) AS STRING) AS hourofday), 2) AS day_hr\n , CONCAT(\n ML.BUCKETIZE(pickuplon, GENERATE_ARRAY(-78, -70, 0.01)),\n ML.BUCKETIZE(pickuplat, GENERATE_ARRAY(37, 45, 0.01)),\n ML.BUCKETIZE(dropofflon, GENERATE_ARRAY(-78, -70, 0.01)),\n ML.BUCKETIZE(dropofflat, GENERATE_ARRAY(37, 45, 0.01))\n ) AS pickup_and_dropoff\n)\nOPTIONS(input_label_cols=['fare_amount'], model_type='linear_reg', l2_reg=0.1, optimize_strategy='BATCH_GRADIENT_DESCENT') \nAS\n\nSELECT * FROM serverlessml.feateng_training_data",
"_____no_output_____"
],
[
"%%bigquery\nSELECT SQRT(mean_squared_error) AS rmse FROM ML.EVALUATE(MODEL serverlessml.model7_geo)",
"_____no_output_____"
]
],
[
[
"Yippee! We're now below our target of 6 dollars in RMSE.",
"_____no_output_____"
],
[
"## DNN\n\nYou could, of course, train a more sophisticated model. Change \"linear_reg\" above to \"dnn_regressor\" and see if it improves things.\n\n__Note: This takes 20 - 25 minutes to run.__",
"_____no_output_____"
]
],
[
[
"%%bigquery\n-- This is alpha and may not work for you.\nCREATE OR REPLACE MODEL serverlessml.model8_dnn\nTRANSFORM(\n fare_amount\n , ST_Distance(ST_GeogPoint(pickuplon, pickuplat), ST_GeogPoint(dropofflon, dropofflat)) AS euclidean\n , CONCAT(CAST(EXTRACT(DAYOFWEEK FROM pickup_datetime) AS STRING),\n CAST(EXTRACT(HOUR FROM pickup_datetime) AS STRING)) AS day_hr\n , CONCAT(\n ML.BUCKETIZE(pickuplon, GENERATE_ARRAY(-78, -70, 0.01)),\n ML.BUCKETIZE(pickuplat, GENERATE_ARRAY(37, 45, 0.01)),\n ML.BUCKETIZE(dropofflon, GENERATE_ARRAY(-78, -70, 0.01)),\n ML.BUCKETIZE(dropofflat, GENERATE_ARRAY(37, 45, 0.01))\n ) AS pickup_and_dropoff\n)\n-- at the time of writing, l2_reg wasn't supported yet.\n\n# TODO 6: Create a DNN model (dnn_regressor) with hidden_units [32,8]\nOPTIONS() \nAS\n\nSELECT * FROM serverlessml.feateng_training_data",
"_____no_output_____"
],
[
"%%bigquery\nSELECT SQRT(mean_squared_error) AS rmse FROM ML.EVALUATE(MODEL serverlessml.model8_dnn)",
"_____no_output_____"
]
],
[
[
"We really need the L2 reg (recall that we got 4.77 without the feateng). It's time to do [Feature Engineering in Keras](../../06_feateng_keras/labs/taxifare_fc.ipynb).",
"_____no_output_____"
],
[
"Copyright 2019 Google Inc.\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
c5284790eec6fdc45322740272e1c2cc047391b4
| 984,635 |
ipynb
|
Jupyter Notebook
|
jupyter_notebooks/3_eda.ipynb
|
Ihza430/capstone
|
f30bf8273794b78e22cdf35e05c61336400cc343
|
[
"CC0-1.0"
] | null | null | null |
jupyter_notebooks/3_eda.ipynb
|
Ihza430/capstone
|
f30bf8273794b78e22cdf35e05c61336400cc343
|
[
"CC0-1.0"
] | null | null | null |
jupyter_notebooks/3_eda.ipynb
|
Ihza430/capstone
|
f30bf8273794b78e22cdf35e05c61336400cc343
|
[
"CC0-1.0"
] | null | null | null | 515.515707 | 182,828 | 0.932524 |
[
[
[
"# Exploratory Data Analysis\n---\n*By Ihza Gonzales*\n\nThis notebook aims to explore the data collected. A series of graphs specific for time series data will be used. The distribution of stats and salaries will also be explored.",
"_____no_output_____"
],
[
"## Import Libraries\n---",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n#%matplotlib inline\n\nfrom statsmodels.graphics.tsaplots import plot_acf\nfrom statsmodels.graphics.tsaplots import plot_pacf\n\npd.set_option('display.max_rows', None)\n\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
]
],
[
[
"## Functions Implemented\n---",
"_____no_output_____"
]
],
[
[
"# Code modified from code written by Matthew Garton.\n\ndef plot_series(df, cols=None, title='Title', xlab=None, ylab=None, steps=1):\n \n # Set figure size to be (18, 9).\n plt.figure(figsize=(18,9))\n \n # Iterate through each column name.\n for col in cols:\n \n # Generate a line plot of the column name.\n # You only have to specify Y, since our\n # index will be a datetime index.\n plt.plot(df[col])\n \n # Generate title and labels.\n plt.title(title, fontsize=26)\n plt.xlabel(xlab, fontsize=20)\n plt.ylabel(ylab, fontsize=20)\n \n # Enlarge tick marks.\n plt.yticks(fontsize=18)\n plt.xticks(df.index[0::steps], fontsize=15);",
"_____no_output_____"
]
],
[
[
"## Exploring a Batter for Time Series\n---",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('../data/clean_players_bat/Enrique-Hernandez-571771.csv')\ndf.head()",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"plot_series(df, ['AVG'], title = \"AVG\", steps= 50)",
"_____no_output_____"
]
],
[
[
"The spikes in the AVG is due to the fact that the stats are reset for the beginning of each season. If the player does well in the first few games his AVG will be really high and vice versa if the player does badly in the beginning of the season.",
"_____no_output_____"
]
],
[
[
"games_30 = df.rolling(30).mean()\nplot_series(games_30, ['AVG'], title = \"AVG for rolling mean of 30 for Enrique (Kike) Hernandez\", steps= 50)",
"_____no_output_____"
]
],
[
[
"There seems to be a trend towards the 2021 season and typically when the season is almost at the end. Kike looks like for this season to have an upward trend. Besides that his average has had a pretty stationary trend.",
"_____no_output_____"
]
],
[
[
"plot_acf(df['AVG'], lags = 30);",
"_____no_output_____"
]
],
[
[
"Since there are large and positive values for the lower lags, there is a trend in the data. As for seasonality, a scallop shaped must be present but there is not one. This means the data has no seasonlity.",
"_____no_output_____"
]
],
[
[
"plot_pacf(df['AVG'], lags = 30);",
"_____no_output_____"
]
],
[
[
"There does not seem to have a pattern of fluctuations which means this data has no seasonlity",
"_____no_output_____"
],
[
"## Exploring Pitcher for Time Series",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('../data/clean_players_pitch/Max-Scherzer-453286.csv')\ndf.head()",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"plot_series(df, ['ERA'], title = \"ERA\", steps= 30)",
"_____no_output_____"
]
],
[
[
"There does not seem to be any consistent fluctuations in the data. There is a considerable spike around 2020-07-23.",
"_____no_output_____"
]
],
[
[
"games_30 = df.rolling(30).mean()\nplot_series(games_30, ['ERA'], title = \"AVG for rolling mean of 30 for Max Scherzer\", steps= 50)",
"_____no_output_____"
]
],
[
[
"It looks like had a very high ERA in 2020 which means he pitched bad during that season. But as he gets into 2021 his ERA has started to trend down.",
"_____no_output_____"
]
],
[
[
"plot_acf(df['ERA'], lags = 30);",
"_____no_output_____"
]
],
[
[
"There is a trend in the data because there is positive large values in the lower lags. There is no obvious scallop shape which means the data has no seasonlity.",
"_____no_output_____"
]
],
[
[
"plot_pacf(df['ERA'], lags = 30);",
"_____no_output_____"
]
],
[
[
"There is no obvious patterns in the fluctuations which means that the data has no seasonlity",
"_____no_output_____"
],
[
"## Exploring Stats and Salaries of Batters\n---",
"_____no_output_____"
]
],
[
[
"bat = pd.read_csv('../data/mlb_players_bat.csv').drop('Unnamed: 0', axis = 1)\nbat.head()",
"_____no_output_____"
],
[
"# Convert salary from object to int\nbat['salary'] = bat['salary'].str.replace(',', '').str.replace('$', '').astype(int)\n\n#Copied from https://stackoverflow.com/questions/38516481/trying-to-remove-commas-and-dollars-signs-with-pandas-in-python",
"_____no_output_____"
],
[
"bat.describe()",
"_____no_output_____"
]
],
[
[
"This provides the summary statistics of each of the batter stats. There is a lot of variability between how many at bats a person has. There are some players with only 50 at bats while others have a max of 664 at bats. This would be something to consider becuase it shows that some players might not as many data on them to be able to forecast well.",
"_____no_output_____"
]
],
[
[
"corr_matrix = bat.corr()\n\nplt.figure(figsize=(25,20))\n\nsns.heatmap(corr_matrix,\n annot = True,\n vmin = -1,\n vmax = 1,\n cmap = 'rocket');\n\n#copied from lesson 3.04",
"_____no_output_____"
]
],
[
[
"This is the correlation of all stats and salary of batters. Many of the stats are very highly correlare with each other with some even reaching 1. This makes sense the more hits a player has the more runs, home runs, or runs batted in they can have. For salary there is no stat that it is highly correlated with. A triple (3B) even has a very low correlation with salary. ",
"_____no_output_____"
]
],
[
[
"sns.pairplot(data = bat, \n y_vars=['salary'],\n x_vars = ['R',\n 'HR',\n 'RBI',\n 'BB'], \n hue = 'Pos',\n diag_kind=None);",
"_____no_output_____"
]
],
[
[
"This is the salary based on how many runs (R), home runs(HR), runs batted in(RBI), or walks(BB) a player has and the color represents the position of the batter. None of the graphs show any distinct pattern. Walks probably is the most disiguishable with a slight linear pattern. The other thing to note is that many players have a low salary but they could be performing well.",
"_____no_output_____"
]
],
[
[
"bat.hist(figsize = (17, 18));",
"_____no_output_____"
]
],
[
[
"Many of the stats as well as the salary is left skewed. The average(AVG), on base percentage(OBP), slugging(SLG) and on base plus slugging percentage (OPS) have a normal distribution. ",
"_____no_output_____"
]
],
[
[
"plt.style.use('fivethirtyeight')\nbat['salary'].hist()\n\nplt.xlabel('Salary')\nplt.ylabel('Count')\nplt.title('Distribution of Batter Salary');",
"_____no_output_____"
]
],
[
[
"Just to look at the Batter salary closer. It is highly left skewed with over half of the batters having a salary of less than 5 million.",
"_____no_output_____"
],
[
"## Exploring Stats and Salaries of Pitchers\n---",
"_____no_output_____"
]
],
[
[
"pitch = pd.read_csv('../data/mlb_players_pitch.csv').drop('Unnamed: 0', axis = 1)\npitch.head()",
"_____no_output_____"
],
[
"# Convert salary from object to int\npitch['salary'] = pitch['salary'].str.replace(',', '').str.replace('$', '').astype(int)\n\n#Copied from https://stackoverflow.com/questions/38516481/trying-to-remove-commas-and-dollars-signs-with-pandas-in-python",
"_____no_output_____"
],
[
"pitch.describe()",
"_____no_output_____"
]
],
[
[
"For this summary statistics of the statistics of pitchers. Lets look at innings pitched (ip), the min is 10 while the max is 213. This is also indicative that there are different kinds of pitchers. There are closers that pitch one maybe less that one inning and then you have starting pitchers who will pitch 5-7 innings and sometimes a whole game. ",
"_____no_output_____"
]
],
[
[
"corr_matrix = pitch.corr()\n\nplt.figure(figsize=(25,20))\n\nsns.heatmap(corr_matrix,\n annot = True,\n vmin = -1,\n vmax = 1,\n cmap = 'rocket');\n\n#copied from lesson 3.04",
"_____no_output_____"
]
],
[
[
"Like the stats for batters, the stats for pitchers are very highly correlated with each other. The stats that are not highly correlated is earned run average(ERA) and walks/hits per inning pitch (WHIP). As for salary it is not very correlated with the stats with even some almost being 0. ",
"_____no_output_____"
]
],
[
[
"sns.pairplot(data = pitch, \n y_vars=['ERA'],\n x_vars = ['WHIP',\n 'K',\n 'salary',\n 'IP'], \n diag_kind=None);",
"_____no_output_____"
]
],
[
[
"Based on the heatmap, it would be behooving to look at ERA further. It makes sense that ERA and WHIP would be higly correlated because the a greater ERA means a pitchers let more people score. WHIP is walks and hits per inning pitched. If a pitcher has a high WHIP means they are more likely to let batters score. It makes sense that the correlation for ERA and strikeout(K) would be a straight line at zero. If a pitcher strikesout the batter less likely that a run will score. As for the salary it sort of makes sense as there are plenty of \"rookie\" pitchers. There are only a handful of really good pitchers and they are typically starting pitchers which allows them that higher ERA because they are expected to pitch more. ",
"_____no_output_____"
]
],
[
[
"pitch.hist(figsize = (17, 18));",
"_____no_output_____"
]
],
[
[
"All the stats aside from ERA and WHIP are left skewed.",
"_____no_output_____"
]
],
[
[
"plt.style.use('fivethirtyeight')\npitch['salary'].hist()\n\nplt.xlabel('Salary')\nplt.ylabel('Count')\nplt.title('Distribution of Pitcher Salary');",
"_____no_output_____"
]
],
[
[
"Salary is heavily skewed to the left with having well over pitchers making less than 5 million. There also seems to be a break between pitchers making 25 million and around 20 million.",
"_____no_output_____"
],
[
"## Recap\n---\nExplored the time series data, batter salary and stats, and pitcher salary and stats. This information will allow for better modeling decisions. ",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
c52847b45b5f47d3e3f87ec99381a2ffb73d2ee6
| 513,038 |
ipynb
|
Jupyter Notebook
|
LV/plotting.ipynb
|
Wrede/BNN-LFI
|
8c5094f01c1eef286bdd84613c7259d534d2eb7e
|
[
"MIT"
] | null | null | null |
LV/plotting.ipynb
|
Wrede/BNN-LFI
|
8c5094f01c1eef286bdd84613c7259d534d2eb7e
|
[
"MIT"
] | null | null | null |
LV/plotting.ipynb
|
Wrede/BNN-LFI
|
8c5094f01c1eef286bdd84613c7259d534d2eb7e
|
[
"MIT"
] | null | null | null | 762.315007 | 76,156 | 0.945183 |
[
[
[
"import seaborn as sns\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")",
"_____no_output_____"
]
],
[
[
"### Load data\nNote the data files needed are large, and can be generated by the cells supplied in the experiement notebook. However, these will take some time to generate. One can therefore reduce the number of runs needed and use those instead.",
"_____no_output_____"
]
],
[
[
"if False:\n #BCNN and SNPE-C results\n ID = 'data'\n \n ## SNPE-C\n sbi_post = np.load(f'{ID}/sbi_{ID}_post.npy')\n sbi_time = np.load(f'{ID}/sbi_{ID}_time.npy')\n \n ## BCNN\n bcnn_post = np.load(f'{ID}/bcnn_{ID}_post.npy') \n bcnn_time = np.load(f'{ID}/bcnn_{ID}_time.npy')\n\nelse:\n ## SNPE-C\n sbi_post = np.load(f'SBI_10_10gen_large_sample.npy')\n sbi_time = np.load('SBI_10_10gen_large_sample_times.npy')\n sbi_post = sbi_post[:5,:8,:,:]\n sbi_time = sbi_time[:5,:8]\n \n ## BCNN\n bcnn_post = np.load('bnn_res_5_5round_8gen_theta_thresh.npy')\n bcnn_time = np.load('bnn_res_5_5round_8gen_time_thresh.npy')\n \nbcnn_post = bcnn_post[:,1:,:,:] # first sample is simply from prior, remove.\n \n## ABC-SMC\nsmc_post = np.load('smcabc_posterior_5gen.npy',allow_pickle=True)\nY = np.empty(shape=(5,8,1000,3))\nfor i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i,j,:,:] = smc_post[i][j][:1000][:]\nsmc_post = Y\nsmc_time = np.load('smcabc_posterior_5gen_time.npy')\n\n",
"_____no_output_____"
]
],
[
[
"# Main paper figures",
"_____no_output_____"
],
[
"### Compute mean and std",
"_____no_output_____"
]
],
[
[
"sbi_post_mean = sbi_post.mean(axis=2)\nsbi_post_std = sbi_post.std(axis=2)\n\nsbi_time_mean = sbi_time.mean(axis=0)\nsbi_time_std = sbi_time.std(axis=0)\n\nbcnn_post_mean = bcnn_post.mean(axis=2)\nbcnn_post_std = bcnn_post.std(axis=2)\n\nbcnn_time_mean = bcnn_time.mean(axis=0)\nbcnn_time_std = bcnn_time.std(axis=0)\n\nsmc_post_mean = np.mean(smc_post, axis=2)\nsmc_post_std = np.std(smc_post, axis=2)",
"_____no_output_____"
]
],
[
[
"### Compute the MSE",
"_____no_output_____"
]
],
[
[
"theta_true = np.log([[1.0,0.005, 1.0]])\ntheta_ = np.expand_dims(theta_true,axis=[0,1])\n\nsbi_post_mse = ((theta_ - sbi_post)**2).mean(axis=(2,3))\nbcnn_post_mse = ((theta_ - bcnn_post)**2).mean(axis=(2,3))\nsmc_post_mse = ((theta_ - smc_post)**2).mean(axis=(2,3))",
"_____no_output_____"
]
],
[
[
"## Figure 4 - MSE of BNN, SNPE-C, and ABC-SMC",
"_____no_output_____"
]
],
[
[
"import matplotlib\nmatplotlib.rcParams['ps.useafm'] = True\nmatplotlib.rcParams['pdf.use14corefonts'] = True\nmatplotlib.rcParams['text.usetex'] = True",
"_____no_output_____"
],
[
"sns.set_theme()\n\nfont_size = 8\nsns.set_context(\"paper\", rc={\"font.size\":font_size,\"axes.titlesize\":font_size,\"axes.labelsize\":font_size, \"axis.legendsize\":font_size })\nsns.set_style(\"darkgrid\", {\"axes.facecolor\": \".9\"})\n\n\n\nf = plt.figure(figsize=(4.25, 2), constrained_layout=True)\ngs = f.add_gridspec(1, 1)\n\n#Theta 1,2,3 mse(<post>)\nax = f.add_subplot(gs[0, 0])\nax.errorbar(x=np.arange(8)+1, y=sbi_post_mse.mean(axis=0)[:], \n yerr=sbi_post_mse.std(axis=0)[:], \n capsize=5, color='C0', label='SNPE-C')\nax.errorbar(x=np.arange(8)+1, y=bcnn_post_mse.mean(axis=0)[:], \n yerr=bcnn_post_mse.std(axis=0)[:],\n capsize=5, color='C2', label='BCNN')\nax.errorbar(x=np.arange(8)+1, y=smc_post_mse.mean(axis=0)[:], \n yerr=smc_post_mse.std(axis=0)[:],\n capsize=5, color='C1', label='ABC-SMC')\nax.set_ylabel('MSE')\nax.set_xlabel('Round')\n\nplt.legend(loc='upper right')\n#plt.yscale('log')\n\n#plt.savefig('lv_mse.pdf',dpi=350, bbox_inches = 'tight', pad_inches = 0)",
"_____no_output_____"
]
],
[
[
"## Figure 5 - Snapshot of $p(\\theta | D)$",
"_____no_output_____"
]
],
[
[
"\ndef posterior_snaps(run_idx=0, save=True): \n \n def multivar(grid, x, y, xlabel='', ylabel='', label='',color='C0'):\n ax = f.add_subplot(grid)\n sns.kdeplot(x=x, y=y, ax=ax, label=label,color=color)\n ax.set_ylim(np.log(0.002),np.log(2))\n ax.set_xlim(np.log(0.002),np.log(2))\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n return ax\n\n def singlevar(grid, x, y, xlabel='', ylabel='', label='',color='C0'):\n ax = f.add_subplot(grid)\n ax.plot(x, y, marker='x', ms=5, label=label,color=color)\n ax.set_ylim(np.log(0.002),np.log(2))\n ax.set_xlim(np.log(0.002),np.log(2))\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n return ax\n\n\n font_size = 8\n sns.set_context(\"paper\", rc={\"font.size\":font_size,\"axes.titlesize\":font_size,\"axes.labelsize\":font_size, \"axis.legendsize\":font_size })\n sns.set_style(\"darkgrid\", {\"axes.facecolor\": \".9\"})\n\n f = plt.figure(figsize=(4.25, 4.25), constrained_layout=True)\n\n widths = [1, 1, 1]# 1, 1, 1, 1, 1]\n heights = [1, 1, 1]#, 1, 1, 1, 1, 1, 1]\n gs = f.add_gridspec(3,3, width_ratios=widths, height_ratios=heights)\n # k1 x k2\n\n ax = multivar(gs[0, 0],x=smc_post[run_idx, 1, :, 1], y=smc_post[run_idx, 1, :, 0],ylabel=r'$(\\theta_2, \\theta_1)$',color='C1')\n ax = multivar(gs[0, 1],x=smc_post[run_idx, 3, :, 1], y=smc_post[run_idx, 3, :, 0],color='C1')\n ax = multivar(gs[0, 2],x=smc_post[run_idx, 7, :, 1], y=smc_post[run_idx, 7, :, 0], label='ABC-SMC',color='C1')\n\n ax = multivar(gs[0, 0],x=bcnn_post[run_idx, 1, :, 1], y=bcnn_post[run_idx, 1, :, 0],ylabel=r'$(\\theta_2, \\theta_1)$',color='C2')\n ax = multivar(gs[0, 1],x=bcnn_post[run_idx, 3, :, 1], y=bcnn_post[run_idx, 3, :, 0],color='C2')\n ax = multivar(gs[0, 2],x=bcnn_post[run_idx, 7, :, 1], y=bcnn_post[run_idx, 7, :, 0],label='BCNN',color='C2')\n\n ax = multivar(gs[0, 0],x=sbi_post[run_idx, 1, :, 1], y=sbi_post[run_idx, 1, :, 0],ylabel=r'$(\\theta_2, \\theta_1)$',color='C0')\n ax = multivar(gs[0, 1],x=sbi_post[run_idx, 3, :, 1], y=sbi_post[run_idx, 3, :, 0],color='C0')\n ax = multivar(gs[0, 2],x=sbi_post[run_idx, 7, :, 1], y=sbi_post[run_idx, 7, :, 0], label='SNPE-C',color='C0')\n\n ax = singlevar(gs[0, 0],x=theta_true[0,1],y=theta_true[0,0],color='C3',ylabel=r'$(\\theta_2, \\theta_1)$')\n ax = singlevar(gs[0, 1],x=theta_true[0,1],y=theta_true[0,0],color='C3')\n ax = singlevar(gs[0, 2],x=theta_true[0,1],y=theta_true[0,0],color='C3',label='truth')\n\n\n\n\n\n ax.legend(loc='lower right')\n\n # k1 x k3\n\n ax = multivar(gs[1, 0],x=smc_post[run_idx, 1, :, 2], y=smc_post[run_idx, 1, :, 0],ylabel=r'$(\\theta_3, \\theta_1)$',color='C1')\n ax = multivar(gs[1, 1],x=smc_post[run_idx, 3, :, 2], y=smc_post[run_idx, 3, :, 0],color='C1')\n ax = multivar(gs[1, 2],x=smc_post[run_idx, 7, :, 2], y=smc_post[run_idx, 7, :, 0], label='ABC-SMC',color='C1')\n\n ax = multivar(gs[1, 0],x=bcnn_post[run_idx, 1, :, 2], y=bcnn_post[run_idx, 1, :, 0],ylabel=r'$(\\theta_3, \\theta_1)$',color='C2')\n ax = multivar(gs[1, 1],x=bcnn_post[run_idx, 3, :, 2], y=bcnn_post[run_idx, 3, :, 0],color='C2')\n ax = multivar(gs[1, 2],x=bcnn_post[run_idx, 7, :, 2], y=bcnn_post[run_idx, 7, :, 0],color='C2')\n\n ax = multivar(gs[1, 0],x=sbi_post[run_idx, 1, :, 2], y=sbi_post[run_idx, 1, :, 0],ylabel=r'$(\\theta_3, \\theta_1)$', color='C0')\n ax = multivar(gs[1, 1],x=sbi_post[run_idx, 3, :, 2], y=sbi_post[run_idx, 3, :, 0], color='C0')\n ax = multivar(gs[1, 2],x=sbi_post[run_idx, 7, :, 2], y=sbi_post[run_idx, 7, :, 0], color='C0')\n\n ax = singlevar(gs[1, 0],x=theta_true[0,2],y=theta_true[0,0],color='C3',ylabel=r'$(\\theta_3, \\theta_1)$')\n ax = singlevar(gs[1, 1],x=theta_true[0,2],y=theta_true[0,0],color='C3')\n ax = singlevar(gs[1, 2],x=theta_true[0,2],y=theta_true[0,0],color='C3',label='truth')\n\n\n # k2 x k3\n\n\n ax = multivar(gs[2, 0],x=smc_post[run_idx, 1, :, 2], y=smc_post[run_idx, 1, :, 1], xlabel='Round 2',ylabel=r'$(\\theta_3, \\theta_2)$',color='C1')\n ax = multivar(gs[2, 1],x=smc_post[run_idx, 3, :, 2], y=smc_post[run_idx, 3, :, 1], xlabel='Round 2',color='C1')\n ax = multivar(gs[2, 2],x=smc_post[run_idx, 7, :, 2], y=smc_post[run_idx, 7, :, 1], xlabel='Round 2', label='ABC-SMC',color='C1')\n\n\n ax = multivar(gs[2, 0],x=bcnn_post[run_idx, 1, :, 2], y=bcnn_post[run_idx, 1, :, 1],xlabel='Round 2',ylabel=r'$(\\theta_3, \\theta_2)$',color='C2')\n ax = multivar(gs[2, 1],x=bcnn_post[run_idx, 3, :, 2], y=bcnn_post[run_idx, 3, :, 1],xlabel='Round 4',color='C2')\n ax = multivar(gs[2, 2],x=bcnn_post[run_idx, 7, :, 2], y=bcnn_post[run_idx, 7, :, 1],xlabel='Round 8',color='C2')\n\n\n ax = multivar(gs[2, 0],x=sbi_post[run_idx, 1, :, 2], y=sbi_post[run_idx, 1, :, 1], xlabel='Round 2',ylabel=r'$(\\theta_3, \\theta_2)$', color='C0')\n ax = multivar(gs[2, 1],x=sbi_post[run_idx, 3, :, 2], y=sbi_post[run_idx, 3, :, 1], xlabel='Round 4', color='C0')\n ax = multivar(gs[2, 2],x=sbi_post[run_idx, 7, :, 2], y=sbi_post[run_idx, 7, :, 1], xlabel='Round 8', color='C0')\n\n\n ax = singlevar(gs[2, 0],x=theta_true[0,2],y=theta_true[0,1],color='C3',xlabel='Round 2',ylabel=r'$(\\theta_3, \\theta_2)$')\n ax = singlevar(gs[2, 1],x=theta_true[0,2],y=theta_true[0,1],color='C3',xlabel='Round 4')\n ax = singlevar(gs[2, 2],x=theta_true[0,2],y=theta_true[0,1],color='C3',label='truth',xlabel='Round 8')\n\n if save:\n plt.savefig(f'lv_dens_{run_idx}.pdf',dpi=350, bbox_inches = 'tight', pad_inches = 0)\n\n",
"_____no_output_____"
],
[
"posterior_snaps(run_idx=0,save=False) ",
"_____no_output_____"
]
],
[
[
"## Supplemental figures ",
"_____no_output_____"
],
[
"### Figure S8 - multiple runs of the inference procedure with different number seeds",
"_____no_output_____"
]
],
[
[
"for i in range(bcnn_post.shape[0]):\n posterior_snaps(run_idx=i,save=False) ",
"_____no_output_____"
]
],
[
[
"### Figure S5 - impact of # of classes (bins$^2$)",
"_____no_output_____"
]
],
[
[
"bcnn3 = np.load('bnn_res_3_5round_8gen_theta_thresh.npy')\nbcnn4 = np.load('bnn_res_4_5round_8gen_theta_thresh.npy')\nbcnn5 = np.load('bnn_res_5_5round_8gen_theta_thresh.npy')",
"_____no_output_____"
],
[
"bcnn3_mse = ((theta_ - bcnn3)**2).mean(axis=(2,3))\nbcnn4_mse = ((theta_ - bcnn4)**2).mean(axis=(2,3))\nbcnn5_mse = ((theta_ - bcnn5)**2).mean(axis=(2,3))",
"_____no_output_____"
],
[
"sns.set_theme()\n\nfont_size = 8\nsns.set_context(\"paper\", rc={\"font.size\":font_size,\"axes.titlesize\":font_size,\"axes.labelsize\":font_size, \"axis.legendsize\":font_size })\nsns.set_style(\"darkgrid\", {\"axes.facecolor\": \".9\"})\n\n\n\nf = plt.figure(figsize=(4.25, 2), constrained_layout=True)\ngs = f.add_gridspec(1, 1)\n\n#Theta 1,2,3 mse(<post>)\nax = f.add_subplot(gs[0, 0])\nax.errorbar(x=np.arange(9)+1, y=bcnn3_mse.mean(axis=0)[:], \n yerr=bcnn3_mse.std(axis=0)[:], \n capsize=5, color='C0', label='9 classes')\nax.errorbar(x=np.arange(9)+1, y=bcnn4_mse.mean(axis=0)[:], \n yerr=bcnn4_mse.std(axis=0)[:],\n capsize=5, color='C2', label='16 classes')\nax.errorbar(x=np.arange(9)+1, y=bcnn5_mse.mean(axis=0)[:], \n yerr=bcnn5_mse.std(axis=0)[:],\n capsize=5, color='C1', label='25 classes')\nax.set_ylabel('MSE')\nax.set_xlabel('Round')\nplt.yscale('log')\nplt.legend(loc='upper right')\n\n#plt.savefig('lv_bins.pdf',dpi=350, bbox_inches = 'tight', pad_inches = 0)",
"_____no_output_____"
]
],
[
[
"### Figure S6 - threshold or not",
"_____no_output_____"
]
],
[
[
"bcnn5_no = np.load('bnn_res_5_5round_8gen_theta.npy')\nbcnn5_no_mse = ((theta_ - bcnn5_no)**2).mean(axis=(2,3))",
"_____no_output_____"
],
[
"sns.set_theme()\n\n\nfont_size = 8\nsns.set_context(\"paper\", rc={\"font.size\":font_size,\"axes.titlesize\":font_size,\"axes.labelsize\":font_size, \"axis.legendsize\":font_size })\nsns.set_style(\"darkgrid\", {\"axes.facecolor\": \".9\"})\n\n\n\nf = plt.figure(figsize=(4.25, 2), constrained_layout=True)\ngs = f.add_gridspec(1, 1)\n\n#Theta 1,2,3 mse(<post>)\nax = f.add_subplot(gs[0, 0])\nax.errorbar(x=np.arange(9)+1, y=bcnn5_no_mse.mean(axis=0)[:], \n yerr=bcnn5_no_mse.std(axis=0)[:],\n capsize=5, color='C0', label='$\\delta = 0.0$')\nax.errorbar(x=np.arange(9)+1, y=bcnn5_mse.mean(axis=0)[:], \n yerr=bcnn5_mse.std(axis=0)[:],\n capsize=5, color='C1', label='$\\delta = 0.05$')\nax.set_ylabel('MSE')\nax.set_xlabel('Round')\nplt.yscale('log')\nplt.legend(loc='upper right')\n\n#plt.savefig('lv_thresh.pdf',dpi=350, bbox_inches = 'tight', pad_inches = 0)",
"_____no_output_____"
]
],
[
[
"### Figure S7 - Elapsed time",
"_____no_output_____"
]
],
[
[
"sns.set_theme()\nsns.set_context(\"paper\", font_scale=1.5, rc={\"lines.linewidth\": 1.5})\nsns.set_style(\"darkgrid\", {\"axes.facecolor\": \".9\"})\n\nmpl.rcParams['ps.useafm'] = True\nmpl.rcParams['pdf.use14corefonts'] = True\nmpl.rcParams['text.usetex'] = True\n\nsns.set_theme()\nfont_size = 8\nsns.set_context(\"paper\", rc={\"font.size\":font_size,\"axes.titlesize\":font_size,\"axes.labelsize\":font_size, \"axis.legendsize\":font_size })\nsns.set_style(\"darkgrid\", {\"axes.facecolor\": \".9\"})\n\n\nf = plt.figure(figsize=(4.25, 2))\ngs = f.add_gridspec(1, 2)\n\n#Theta 1 E(<post>)\nax = f.add_subplot(gs[0, 0])\n\nax.errorbar(x=np.arange(8)+1, y=bcnn_time.mean(axis=0)/60, \n yerr=bcnn_time.std(axis=0)/60, \n capsize=5, color='C0', label='BCNN')\nax.errorbar(x=np.arange(8)+1, y=sbi_time.mean(axis=0)/60, \n yerr=sbi_time.std(axis=0)/60, \n capsize=5, color='C1', label='SNPE')\nax.set_xlabel(\"Round\")\nax.set_ylabel(\"time/round\")\nax.set_xticks(np.arange(8)+1)\n\nax = f.add_subplot(gs[0, 1])\n\nbcnn_cumsum = np.cumsum(bcnn_time, axis=1)\nsbi_cumsum = np.cumsum(sbi_time, axis=1)\nax.errorbar(x=np.arange(8)+1, y=bcnn_cumsum.mean(axis=0)/60, \n yerr=bcnn_cumsum.std(axis=0)/60, \n capsize=5, color='C0', label='BCNN')\nax.errorbar(x=np.arange(8)+1, y=sbi_cumsum.mean(axis=0)/60, \n yerr=sbi_cumsum.std(axis=0)/60, \n capsize=5, color='C1', label='SNPE-C')\n\nax.set_xlabel(\"Round\")\nax.set_ylabel(\"cumsum(time)\")\nax.set_xticks(np.arange(8)+1)\n\n\nplt.legend()\nplt.tight_layout()\n\n#plt.savefig('lv_time.pdf',dpi=350, bbox_inches = 'tight', pad_inches = 0)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
c5284da9331be62a085d326eac78d925fdf8cad6
| 1,022,665 |
ipynb
|
Jupyter Notebook
|
docs/tutorial_notebooks/tutorial11/NF_image_modeling.ipynb
|
phlippe/notebook_test
|
2ec9803490ea85e1f2f95731e2bfe52fa7581d50
|
[
"MIT"
] | 2 |
2020-09-30T07:26:29.000Z
|
2020-10-09T14:51:09.000Z
|
docs/tutorial_notebooks/tutorial11/NF_image_modeling.ipynb
|
phlippe/notebook_test
|
2ec9803490ea85e1f2f95731e2bfe52fa7581d50
|
[
"MIT"
] | null | null | null |
docs/tutorial_notebooks/tutorial11/NF_image_modeling.ipynb
|
phlippe/notebook_test
|
2ec9803490ea85e1f2f95731e2bfe52fa7581d50
|
[
"MIT"
] | null | null | null | 66.03377 | 35,810 | 0.669843 |
[
[
[
"# Tutorial 11: Normalizing Flows for image modeling\n\n\n\n**Filled notebook:** \n[](https://github.com/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial11/NF_image_modeling.ipynb)\n[](https://colab.research.google.com/github/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial11/NF_image_modeling.ipynb) \n**Pre-trained models:** \n[](https://github.com/phlippe/saved_models/tree/main/tutorial11)\n[](https://drive.google.com/drive/folders/1gttZ5DSrpKwn9g3RcizqA5qG7NFLMgvv?usp=sharing) \n**Recordings:** \n[](https://youtu.be/U1fwesIusbg)\n[](https://youtu.be/qMoGcRhVrF8)\n[](https://youtu.be/YoAWiaEt41Y)\n[](https://youtu.be/nTyDvn-ADJ4) \n**Author:** Phillip Lippe",
"_____no_output_____"
],
[
"In this tutorial, we will take a closer look at complex, deep normalizing flows. The most popular, current application of deep normalizing flows is to model datasets of images. As for other generative models, images are a good domain to start working on because (1) CNNs are widely studied and strong models exist, (2) images are high-dimensional and complex, and (3) images are discrete integers. In this tutorial, we will review current advances in normalizing flows for image modeling, and get hands-on experience on coding normalizing flows. Note that normalizing flows are commonly parameter heavy and therefore computationally expensive. We will use relatively simple and shallow flows to save computational cost and allow you to run the notebook on CPU, but keep in mind that a simple way to improve the scores of the flows we study here is to make them deeper. \n\nThroughout this notebook, we make use of [PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/latest/). The first cell imports our usual libraries.",
"_____no_output_____"
]
],
[
[
"## Standard libraries\nimport os\nimport math\nimport time\nimport numpy as np \n\n## Imports for plotting\nimport matplotlib.pyplot as plt\n%matplotlib inline \nfrom IPython.display import set_matplotlib_formats\nset_matplotlib_formats('svg', 'pdf') # For export\nfrom matplotlib.colors import to_rgb\nimport matplotlib\nmatplotlib.rcParams['lines.linewidth'] = 2.0\nimport seaborn as sns\nsns.reset_orig()\n\n## Progress bar\nfrom tqdm.notebook import tqdm\n\n## PyTorch\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data as data\nimport torch.optim as optim\n# Torchvision\nimport torchvision\nfrom torchvision.datasets import MNIST\nfrom torchvision import transforms\n# PyTorch Lightning\ntry:\n import pytorch_lightning as pl\nexcept ModuleNotFoundError: # Google Colab does not have PyTorch Lightning installed by default. Hence, we do it here if necessary\n !pip install --quiet pytorch-lightning>=1.4\n import pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint\n\n# Path to the folder where the datasets are/should be downloaded (e.g. MNIST)\nDATASET_PATH = \"../data\"\n# Path to the folder where the pretrained models are saved\nCHECKPOINT_PATH = \"../saved_models/tutorial11\"\n\n# Setting the seed\npl.seed_everything(42)\n\n# Ensure that all operations are deterministic on GPU (if used) for reproducibility\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\n# Fetching the device that will be used throughout this notebook\ndevice = torch.device(\"cpu\") if not torch.cuda.is_available() else torch.device(\"cuda:0\")\nprint(\"Using device\", device)",
"Using device cuda:0\n"
]
],
[
[
"Again, we have a few pretrained models. We download them below to the specified path above.",
"_____no_output_____"
]
],
[
[
"import urllib.request\nfrom urllib.error import HTTPError\n# Github URL where saved models are stored for this tutorial\nbase_url = \"https://raw.githubusercontent.com/phlippe/saved_models/main/tutorial11/\"\n# Files to download\npretrained_files = [\"MNISTFlow_simple.ckpt\", \"MNISTFlow_vardeq.ckpt\", \"MNISTFlow_multiscale.ckpt\"]\n# Create checkpoint path if it doesn't exist yet\nos.makedirs(CHECKPOINT_PATH, exist_ok=True)\n\n# For each file, check whether it already exists. If not, try downloading it.\nfor file_name in pretrained_files:\n file_path = os.path.join(CHECKPOINT_PATH, file_name)\n if not os.path.isfile(file_path):\n file_url = base_url + file_name\n print(f\"Downloading {file_url}...\")\n try:\n urllib.request.urlretrieve(file_url, file_path)\n except HTTPError as e:\n print(\"Something went wrong. Please try to download the file from the GDrive folder, or contact the author with the full output including the following error:\\n\", e)",
"_____no_output_____"
]
],
[
[
"We will use the MNIST dataset in this notebook. MNIST constitutes, despite its simplicity, a challenge for small generative models as it requires the global understanding of an image. At the same time, we can easily judge whether generated images come from the same distribution as the dataset (i.e. represent real digits), or not.\n\nTo deal better with the discrete nature of the images, we transform them from a range of 0-1 to a range of 0-255 as integers.",
"_____no_output_____"
]
],
[
[
"# Convert images from 0-1 to 0-255 (integers)\ndef discretize(sample):\n return (sample * 255).to(torch.int32)\n\n# Transformations applied on each image => make them a tensor and discretize\ntransform = transforms.Compose([transforms.ToTensor(),\n discretize])\n\n# Loading the training dataset. We need to split it into a training and validation part\ntrain_dataset = MNIST(root=DATASET_PATH, train=True, transform=transform, download=True)\npl.seed_everything(42)\ntrain_set, val_set = torch.utils.data.random_split(train_dataset, [50000, 10000])\n\n# Loading the test set\ntest_set = MNIST(root=DATASET_PATH, train=False, transform=transform, download=True)\n\n# We define a set of data loaders that we can use for various purposes later.\n# Note that for actually training a model, we will use different data loaders\n# with a lower batch size.\ntrain_loader = data.DataLoader(train_set, batch_size=256, shuffle=False, drop_last=False)\nval_loader = data.DataLoader(val_set, batch_size=64, shuffle=False, drop_last=False, num_workers=4)\ntest_loader = data.DataLoader(test_set, batch_size=64, shuffle=False, drop_last=False, num_workers=4)",
"_____no_output_____"
]
],
[
[
"In addition, we will define below a function to simplify the visualization of images/samples. Some training examples of the MNIST dataset is shown below.",
"_____no_output_____"
]
],
[
[
"def show_imgs(imgs, title=None, row_size=4):\n # Form a grid of pictures (we use max. 8 columns)\n num_imgs = imgs.shape[0] if isinstance(imgs, torch.Tensor) else len(imgs)\n is_int = imgs.dtype==torch.int32 if isinstance(imgs, torch.Tensor) else imgs[0].dtype==torch.int32\n nrow = min(num_imgs, row_size)\n ncol = int(math.ceil(num_imgs/nrow))\n imgs = torchvision.utils.make_grid(imgs, nrow=nrow, pad_value=128 if is_int else 0.5)\n np_imgs = imgs.cpu().numpy()\n # Plot the grid\n plt.figure(figsize=(1.5*nrow, 1.5*ncol))\n plt.imshow(np.transpose(np_imgs, (1,2,0)), interpolation='nearest')\n plt.axis('off')\n if title is not None:\n plt.title(title)\n plt.show()\n plt.close()\n\nshow_imgs([train_set[i][0] for i in range(8)])",
"_____no_output_____"
]
],
[
[
"## Normalizing Flows as generative model\n\nIn the previous lectures, we have seen Energy-based models, Variational Autoencoders (VAEs) and Generative Adversarial Networks (GANs) as example of generative models. However, none of them explicitly learn the probability density function $p(x)$ of the real input data. While VAEs model a lower bound, energy-based models only implicitly learn the probability density. GANs on the other hand provide us a sampling mechanism for generating new data, without offering a likelihood estimate. The generative model we will look at here, called Normalizing Flows, actually models the true data distribution $p(x)$ and provides us with an exact likelihood estimate. Below, we can visually compare VAEs, GANs and Flows\n(figure credit - [Lilian Weng](https://lilianweng.github.io/lil-log/2018/10/13/flow-based-deep-generative-models.html)):\n\n<center width=\"100%\"><img src=\"comparison_GAN_VAE_NF.png\" width=\"600px\"></center>\n\nThe major difference compared to VAEs is that flows use *invertible* functions $f$ to map the input data $x$ to a latent representation $z$. To realize this, $z$ must be of the same shape as $x$. This is in contrast to VAEs where $z$ is usually much lower dimensional than the original input data. However, an invertible mapping also means that for every data point $x$, we have a corresponding latent representation $z$ which allows us to perform lossless reconstruction ($z$ to $x$). In the visualization above, this means that $x=x'$ for flows, no matter what invertible function $f$ and input $x$ we choose. \n\nNonetheless, how are normalizing flows modeling a probability density with an invertible function? The answer to this question is the rule for change of variables. Specifically, given a prior density $p_z(z)$ (e.g. Gaussian) and an invertible function $f$, we can determine $p_x(x)$ as follows:\n\n$$\n\\begin{split}\n \\int p_x(x) dx & = \\int p_z(z) dz = 1 \\hspace{1cm}\\text{(by definition of a probability distribution)}\\\\\n \\Leftrightarrow p_x(x) & = p_z(z) \\left|\\frac{dz}{dx}\\right| = p_z(f(x)) \\left|\\frac{df(x)}{dx}\\right|\n\\end{split}\n$$\n\nHence, in order to determine the probability of $x$, we only need to determine its probability in latent space, and get the derivate of $f$. Note that this is for a univariate distribution, and $f$ is required to be invertible and smooth. For a multivariate case, the derivative becomes a Jacobian of which we need to take the determinant. As we usually use the log-likelihood as objective, we write the multivariate term with logarithms below:\n\n$$\n\\log p_x(\\mathbf{x}) = \\log p_z(f(\\mathbf{x})) + \\log{} \\left|\\det \\frac{df(\\mathbf{x})}{d\\mathbf{x}}\\right|\n$$\n\nAlthough we now know how a normalizing flow obtains its likelihood, it might not be clear what a normalizing flow does intuitively. For this, we should look from the inverse perspective of the flow starting with the prior probability density $p_z(z)$. If we apply an invertible function on it, we effectively \"transform\" its probability density. For instance, if $f^{-1}(z)=z+1$, we shift the density by one while still remaining a valid probability distribution, and being invertible. We can also apply more complex transformations, like scaling: $f^{-1}(z)=2z+1$, but there you might see a difference. When you scale, you also change the volume of the probability density, as for example on uniform distributions (figure credit - [Eric Jang](https://blog.evjang.com/2018/01/nf1.html)):\n\n<center width=\"100%\"><img src=\"uniform_flow.png\" width=\"300px\"></center>\n\nYou can see that the height of $p(y)$ should be lower than $p(x)$ after scaling. This change in volume represents $\\left|\\frac{df(x)}{dx}\\right|$ in our equation above, and ensures that even after scaling, we still have a valid probability distribution. We can go on with making our function $f$ more complex. However, the more complex $f$ becomes, the harder it will be to find the inverse $f^{-1}$ of it, and to calculate the log-determinant of the Jacobian $\\log{} \\left|\\det \\frac{df(\\mathbf{x})}{d\\mathbf{x}}\\right|$. An easier trick to stack multiple invertible functions $f_{1,...,K}$ after each other, as all together, they still represent a single, invertible function. Using multiple, learnable invertible functions, a normalizing flow attempts to transform $p_z(z)$ slowly into a more complex distribution which should finally be $p_x(x)$. We visualize the idea below\n(figure credit - [Lilian Weng](https://lilianweng.github.io/lil-log/2018/10/13/flow-based-deep-generative-models.html)):\n\n<center width=\"100%\"><img src=\"normalizing_flow_layout.png\" width=\"700px\"></center>\n\nStarting from $z_0$, which follows the prior Gaussian distribution, we sequentially apply the invertible functions $f_1,f_2,...,f_K$, until $z_K$ represents $x$. Note that in the figure above, the functions $f$ represent the inverted function from $f$ we had above (here: $f:Z\\to X$, above: $f:X\\to Z$). This is just a different notation and has no impact on the actual flow design because all $f$ need to be invertible anyways. When we estimate the log likelihood of a data point $x$ as in the equations above, we run the flows in the opposite direction than visualized above. Multiple flow layers have been proposed that use a neural network as learnable parameters, such as the planar and radial flow. However, we will focus here on flows that are commonly used in image modeling, and will discuss them in the rest of the notebook along with the details of how to train a normalizing flow.",
"_____no_output_____"
],
[
"## Normalizing Flows on images\n\nTo become familiar with normalizing flows, especially for the application of image modeling, it is best to discuss the different elements in a flow along with the implementation. As a general concept, we want to build a normalizing flow that maps an input image (here MNIST) to an equally sized latent space:\n\n<center width=\"100%\" style=\"padding: 10px\"><img src=\"image_to_gaussian.svg\" width=\"450px\"></center>\n\nAs a first step, we will implement a template of a normalizing flow in PyTorch Lightning. During training and validation, a normalizing flow performs density estimation in the forward direction. For this, we apply a series of flow transformations on the input $x$ and estimate the probability of the input by determining the probability of the transformed point $z$ given a prior, and the change of volume caused by the transformations. During inference, we can do both density estimation and sampling new points by inverting the flow transformations. Therefore, we define a function `_get_likelihood` which performs density estimation, and `sample` to generate new examples. The functions `training_step`, `validation_step` and `test_step` all make use of `_get_likelihood`. \n\nThe standard metric used in generative models, and in particular normalizing flows, is bits per dimensions (bpd). Bpd is motivated from an information theory perspective and describes how many bits we would need to encode a particular example in our modeled distribution. The less bits we need, the more likely the example in our distribution. When we test for the bits per dimension of our test dataset, we can judge whether our model generalizes to new samples of the dataset and didn't memorize the training dataset. In order to calculate the bits per dimension score, we can rely on the negative log-likelihood and change the log base (as bits are binary while NLL is usually exponential):\n\n$$\\text{bpd} = \\text{nll} \\cdot \\log_2\\left(\\exp(1)\\right) \\cdot \\left(\\prod d_i\\right)^{-1}$$\n\nwhere $d_1,...,d_K$ are the dimensions of the input. For images, this would be the height, width and channel number. We divide the log likelihood by these extra dimensions to have a metric which we can compare for different image resolutions. In the original image space, MNIST examples have a bits per dimension score of 8 (we need 8 bits to encode each pixel as there are 256 possible values). ",
"_____no_output_____"
]
],
[
[
"class ImageFlow(pl.LightningModule):\n \n def __init__(self, flows, import_samples=8):\n \"\"\"\n Inputs:\n flows - A list of flows (each a nn.Module) that should be applied on the images. \n import_samples - Number of importance samples to use during testing (see explanation below). Can be changed at any time\n \"\"\"\n super().__init__()\n self.flows = nn.ModuleList(flows)\n self.import_samples = import_samples\n # Create prior distribution for final latent space\n self.prior = torch.distributions.normal.Normal(loc=0.0, scale=1.0)\n # Example input for visualizing the graph\n self.example_input_array = train_set[0][0].unsqueeze(dim=0)\n\n def forward(self, imgs):\n # The forward function is only used for visualizing the graph\n return self._get_likelihood(imgs)\n\n def encode(self, imgs):\n # Given a batch of images, return the latent representation z and ldj of the transformations\n z, ldj = imgs, torch.zeros(imgs.shape[0], device=self.device)\n for flow in self.flows:\n z, ldj = flow(z, ldj, reverse=False)\n return z, ldj\n\n def _get_likelihood(self, imgs, return_ll=False):\n \"\"\"\n Given a batch of images, return the likelihood of those. \n If return_ll is True, this function returns the log likelihood of the input.\n Otherwise, the ouptut metric is bits per dimension (scaled negative log likelihood)\n \"\"\"\n z, ldj = self.encode(imgs)\n log_pz = self.prior.log_prob(z).sum(dim=[1,2,3])\n log_px = ldj + log_pz\n nll = -log_px\n # Calculating bits per dimension\n bpd = nll * np.log2(np.exp(1)) / np.prod(imgs.shape[1:])\n return bpd.mean() if not return_ll else log_px\n\n @torch.no_grad()\n def sample(self, img_shape, z_init=None):\n \"\"\"\n Sample a batch of images from the flow.\n \"\"\"\n # Sample latent representation from prior\n if z_init is None:\n z = self.prior.sample(sample_shape=img_shape).to(device)\n else:\n z = z_init.to(device)\n \n # Transform z to x by inverting the flows\n ldj = torch.zeros(img_shape[0], device=device)\n for flow in reversed(self.flows):\n z, ldj = flow(z, ldj, reverse=True)\n return z\n\n def configure_optimizers(self):\n optimizer = optim.Adam(self.parameters(), lr=1e-3)\n # An scheduler is optional, but can help in flows to get the last bpd improvement\n scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.99)\n return [optimizer], [scheduler]\n\n def training_step(self, batch, batch_idx):\n # Normalizing flows are trained by maximum likelihood => return bpd\n loss = self._get_likelihood(batch[0]) \n self.log('train_bpd', loss)\n return loss\n\n def validation_step(self, batch, batch_idx):\n loss = self._get_likelihood(batch[0])\n self.log('val_bpd', loss)\n\n def test_step(self, batch, batch_idx):\n # Perform importance sampling during testing => estimate likelihood M times for each image\n samples = []\n for _ in range(self.import_samples):\n img_ll = self._get_likelihood(batch[0], return_ll=True)\n samples.append(img_ll)\n img_ll = torch.stack(samples, dim=-1)\n \n # To average the probabilities, we need to go from log-space to exp, and back to log.\n # Logsumexp provides us a stable implementation for this\n img_ll = torch.logsumexp(img_ll, dim=-1) - np.log(self.import_samples)\n \n # Calculate final bpd\n bpd = -img_ll * np.log2(np.exp(1)) / np.prod(batch[0].shape[1:])\n bpd = bpd.mean()\n \n self.log('test_bpd', bpd)",
"_____no_output_____"
]
],
[
[
"The `test_step` function differs from the training and validation step in that it makes use of importance sampling. We will discuss the motiviation and details behind this after understanding how flows model discrete images in continuous space. ",
"_____no_output_____"
],
[
"### Dequantization\n\nNormalizing flows rely on the rule of change of variables, which is naturally defined in continuous space. Applying flows directly on discrete data leads to undesired density models where arbitrarly high likelihood are placed on a few, particular values. See the illustration below: \n\n<center><img src=\"dequantization_issue.svg\" width=\"40%\"/></center>\n\nThe black points represent the discrete points, and the green volume the density modeled by a normalizing flow in continuous space. The flow would continue to increase the likelihood for $x=0,1,2,3$ while having no volume on any other point. Remember that in continuous space, we have the constraint that the overall volume of the probability density must be 1 ($\\int p(x)dx=1$). Otherwise, we don't model a probability distribution anymore. However, the discrete points $x=0,1,2,3$ represent delta peaks with no width in continuous space. This is why the flow can place an infinite high likelihood on these few points while still representing a distribution in continuous space. Nonetheless, the learned density does not tell us anything about the distribution among the discrete points, as in discrete space, the likelihoods of those four points would have to sum to 1, not to infinity. \n\nTo prevent such degenerated solutions, a common solution is to add a small amount of noise to each discrete value, which is also referred to as dequantization. Considering $x$ as an integer (as it is the case for images), the dequantized representation $v$ can be formulated as $v=x+u$ where $u\\in[0,1)^D$. Thus, the discrete value $1$ is modeled by a distribution over the interval $[1.0, 2.0)$, the value $2$ by an volume over $[2.0, 3.0)$, etc. Our objective of modeling $p(x)$ becomes:\n\n$$ p(x) = \\int p(x+u)du = \\int \\frac{q(u|x)}{q(u|x)}p(x+u)du = \\mathbb{E}_{u\\sim q(u|x)}\\left[\\frac{p(x+u)}{q(u|x)} \\right]$$\n\nwith $q(u|x)$ being the noise distribution. For now, we assume it to be uniform, which can also be written as $p(x)=\\mathbb{E}_{u\\sim U(0,1)^D}\\left[p(x+u) \\right]$.\n\nIn the following, we will implement Dequantization as a flow transformation itself. After adding noise to the discrete values, we additionally transform the volume into a Gaussian-like shape. This is done by scaling $x+u$ between $0$ and $1$, and applying the invert of the sigmoid function $\\sigma(z)^{-1} = \\log z - \\log 1-z$. If we would not do this, we would face two problems: \n\n1. The input is scaled between 0 and 256 while the prior distribution is a Gaussian with mean $0$ and standard deviation $1$. In the first iterations after initializing the parameters of the flow, we would have extremely low likelihoods for large values like $256$. This would cause the training to diverge instantaneously.\n2. As the output distribution is a Gaussian, it is beneficial for the flow to have a similarly shaped input distribution. This will reduce the modeling complexity that is required by the flow.\n\nOverall, we can implement dequantization as follows:",
"_____no_output_____"
]
],
[
[
"class Dequantization(nn.Module):\n \n def __init__(self, alpha=1e-5, quants=256):\n \"\"\"\n Inputs:\n alpha - small constant that is used to scale the original input. \n Prevents dealing with values very close to 0 and 1 when inverting the sigmoid\n quants - Number of possible discrete values (usually 256 for 8-bit image)\n \"\"\"\n super().__init__()\n self.alpha = alpha\n self.quants = quants \n \n def forward(self, z, ldj, reverse=False):\n if not reverse:\n z, ldj = self.dequant(z, ldj)\n z, ldj = self.sigmoid(z, ldj, reverse=True)\n else:\n z, ldj = self.sigmoid(z, ldj, reverse=False)\n z = z * self.quants\n ldj += np.log(self.quants) * np.prod(z.shape[1:])\n z = torch.floor(z).clamp(min=0, max=self.quants-1).to(torch.int32)\n return z, ldj\n \n def sigmoid(self, z, ldj, reverse=False):\n # Applies an invertible sigmoid transformation\n if not reverse:\n ldj += (-z-2*F.softplus(-z)).sum(dim=[1,2,3])\n z = torch.sigmoid(z)\n else:\n z = z * (1 - self.alpha) + 0.5 * self.alpha # Scale to prevent boundaries 0 and 1\n ldj += np.log(1 - self.alpha) * np.prod(z.shape[1:])\n ldj += (-torch.log(z) - torch.log(1-z)).sum(dim=[1,2,3])\n z = torch.log(z) - torch.log(1-z)\n return z, ldj\n \n def dequant(self, z, ldj):\n # Transform discrete values to continuous volumes\n z = z.to(torch.float32)\n z = z + torch.rand_like(z).detach()\n z = z / self.quants\n ldj -= np.log(self.quants) * np.prod(z.shape[1:])\n return z, ldj",
"_____no_output_____"
]
],
[
[
"A good check whether a flow is correctly implemented or not, is to verify that it is invertible. Hence, we will dequantize a randomly chosen training image, and then quantize it again. We would expect that we would get the exact same image out:",
"_____no_output_____"
]
],
[
[
"## Testing invertibility of dequantization layer\npl.seed_everything(42)\norig_img = train_set[0][0].unsqueeze(dim=0)\nldj = torch.zeros(1,)\ndequant_module = Dequantization()\ndeq_img, ldj = dequant_module(orig_img, ldj, reverse=False)\nreconst_img, ldj = dequant_module(deq_img, ldj, reverse=True)\n\nd1, d2 = torch.where(orig_img.squeeze() != reconst_img.squeeze())\nif len(d1) != 0:\n print(\"Dequantization was not invertible.\")\n for i in range(d1.shape[0]):\n print(\"Original value:\", orig_img[0,0,d1[i], d2[i]].item())\n print(\"Reconstructed value:\", reconst_img[0,0,d1[i], d2[i]].item())\nelse:\n print(\"Successfully inverted dequantization\")\n\n# Layer is not strictly invertible due to float precision constraints\n# assert (orig_img == reconst_img).all().item()",
"Dequantization was not invertible.\nOriginal value: 0\nReconstructed value: 1\n"
]
],
[
[
"In contrast to our expectation, the test fails. However, this is no reason to doubt our implementation here as only one single value is not equal to the original. This is caused due to numerical inaccuracies in the sigmoid invert. While the input space to the inverted sigmoid is scaled between 0 and 1, the output space is between $-\\infty$ and $\\infty$. And as we use 32 bits to represent the numbers (in addition to applying logs over and over again), such inaccuries can occur and should not be worrisome. Nevertheless, it is good to be aware of them, and can be improved by using a double tensor (float64). \n\nFinally, we can take our dequantization and actually visualize the distribution it transforms the discrete values into:",
"_____no_output_____"
]
],
[
[
"def visualize_dequantization(quants, prior=None):\n \"\"\"\n Function for visualizing the dequantization values of discrete values in continuous space\n \"\"\"\n # Prior over discrete values. If not given, a uniform is assumed\n if prior is None:\n prior = np.ones(quants, dtype=np.float32) / quants\n prior = prior / prior.sum() * quants # In the following, we assume 1 for each value means uniform distribution\n\n inp = torch.arange(-4, 4, 0.01).view(-1, 1, 1, 1) # Possible continuous values we want to consider\n ldj = torch.zeros(inp.shape[0])\n dequant_module = Dequantization(quants=quants)\n # Invert dequantization on continuous values to find corresponding discrete value\n out, ldj = dequant_module.forward(inp, ldj, reverse=True)\n inp, out, prob = inp.squeeze().numpy(), out.squeeze().numpy(), ldj.exp().numpy()\n prob = prob * prior[out] # Probability scaled by categorical prior\n \n # Plot volumes and continuous distribution\n sns.set_style(\"white\")\n fig = plt.figure(figsize=(6,3))\n x_ticks = []\n for v in np.unique(out):\n indices = np.where(out==v)\n color = to_rgb(f\"C{v}\")\n plt.fill_between(inp[indices], prob[indices], np.zeros(indices[0].shape[0]), color=color+(0.5,), label=str(v))\n plt.plot([inp[indices[0][0]]]*2, [0, prob[indices[0][0]]], color=color)\n plt.plot([inp[indices[0][-1]]]*2, [0, prob[indices[0][-1]]], color=color)\n x_ticks.append(inp[indices[0][0]])\n x_ticks.append(inp.max())\n plt.xticks(x_ticks, [f\"{x:.1f}\" for x in x_ticks])\n plt.plot(inp,prob, color=(0.0,0.0,0.0))\n # Set final plot properties\n plt.ylim(0, prob.max()*1.1)\n plt.xlim(inp.min(), inp.max())\n plt.xlabel(\"z\")\n plt.ylabel(\"Probability\")\n plt.title(f\"Dequantization distribution for {quants} discrete values\")\n plt.legend()\n plt.show()\n plt.close()\n \nvisualize_dequantization(quants=8)",
"_____no_output_____"
]
],
[
[
"The visualized distribution show the sub-volumes that are assigned to the different discrete values. The value $0$ has its volume between $[-\\infty, -1.9)$, the value $1$ is represented by the interval $[-1.9, -1.1)$, etc. The volume for each discrete value has the same probability mass. That's why the volumes close to the center (e.g. 3 and 4) have a smaller area on the z-axis as others ($z$ is being used to denote the output of the whole dequantization flow).\n\nEffectively, the consecutive normalizing flow models discrete images by the following objective:\n\n$$\\log p(x) = \\log \\mathbb{E}_{u\\sim q(u|x)}\\left[\\frac{p(x+u)}{q(u|x)} \\right] \\geq \\mathbb{E}_{u}\\left[\\log \\frac{p(x+u)}{q(u|x)} \\right]$$\n\nAlthough normalizing flows are exact in likelihood, we have a lower bound. Specifically, this is an example of the Jensen inequality because we need to move the log into the expectation so we can use Monte-carlo estimates. In general, this bound is considerably smaller than the ELBO in variational autoencoders. Actually, we can reduce the bound ourselves by estimating the expectation not by one, but by $M$ samples. In other words, we can apply importance sampling which leads to the following inequality:\n\n$$\\log p(x) = \\log \\mathbb{E}_{u\\sim q(u|x)}\\left[\\frac{p(x+u)}{q(u|x)} \\right] \\geq \\mathbb{E}_{u}\\left[\\log \\frac{1}{M} \\sum_{m=1}^{M} \\frac{p(x+u_m)}{q(u_m|x)} \\right] \\geq \\mathbb{E}_{u}\\left[\\log \\frac{p(x+u)}{q(u|x)} \\right]$$\n\nThe importance sampling $\\frac{1}{M} \\sum_{m=1}^{M} \\frac{p(x+u_m)}{q(u_m|x)}$ becomes $\\mathbb{E}_{u\\sim q(u|x)}\\left[\\frac{p(x+u)}{q(u|x)} \\right]$ if $M\\to \\infty$, so that the more samples we use, the tighter the bound is. During testing, we can make use of this property and have it implemented in `test_step` in `ImageFlow`. In theory, we could also use this tighter bound during training. However, related work has shown that this does not necessarily lead to an improvement given the additional computational cost, and it is more efficient to stick with a single estimate [5].",
"_____no_output_____"
],
[
"### Variational Dequantization\n\nDequantization uses a uniform distribution for the noise $u$ which effectively leads to images being represented as hypercubes (cube in high dimensions) with sharp borders. However, modeling such sharp borders is not easy for a flow as it uses smooth transformations to convert it into a Gaussian distribution. \n\nAnother way of looking at it is if we change the prior distribution in the previous visualization. Imagine we have independent Gaussian noise on pixels which is commonly the case for any real-world taken picture. Therefore, the flow would have to model a distribution as above, but with the individual volumes scaled as follows:",
"_____no_output_____"
]
],
[
[
"visualize_dequantization(quants=8, prior=np.array([0.075, 0.2, 0.4, 0.2, 0.075, 0.025, 0.0125, 0.0125]))",
"_____no_output_____"
]
],
[
[
"Transforming such a probability into a Gaussian is a difficult task, especially with such hard borders. Dequantization has therefore been extended to more sophisticated, learnable distributions beyond uniform in a variational framework. In particular, if we remember the learning objective $\\log p(x) = \\log \\mathbb{E}_{u}\\left[\\frac{p(x+u)}{q(u|x)} \\right]$, the uniform distribution can be replaced by a learned distribution $q_{\\theta}(u|x)$ with support over $u\\in[0,1)^D$. This approach is called Variational Dequantization and has been proposed by Ho et al. [3]. How can we learn such a distribution? We can use a second normalizing flow that takes $x$ as external input and learns a flexible distribution over $u$. To ensure a support over $[0,1)^D$, we can apply a sigmoid activation function as final flow transformation. \n\nInheriting the original dequantization class, we can implement variational dequantization as follows: ",
"_____no_output_____"
]
],
[
[
"class VariationalDequantization(Dequantization):\n \n def __init__(self, var_flows, alpha=1e-5):\n \"\"\"\n Inputs: \n var_flows - A list of flow transformations to use for modeling q(u|x)\n alpha - Small constant, see Dequantization for details\n \"\"\"\n super().__init__(alpha=alpha)\n self.flows = nn.ModuleList(var_flows)\n \n def dequant(self, z, ldj):\n z = z.to(torch.float32)\n img = (z / 255.0) * 2 - 1 # We condition the flows on x, i.e. the original image\n \n # Prior of u is a uniform distribution as before\n # As most flow transformations are defined on [-infinity,+infinity], we apply an inverse sigmoid first.\n deq_noise = torch.rand_like(z).detach()\n deq_noise, ldj = self.sigmoid(deq_noise, ldj, reverse=True)\n for flow in self.flows:\n deq_noise, ldj = flow(deq_noise, ldj, reverse=False, orig_img=img)\n deq_noise, ldj = self.sigmoid(deq_noise, ldj, reverse=False)\n \n # After the flows, apply u as in standard dequantization\n z = (z + deq_noise) / 256.0\n ldj -= np.log(256.0) * np.prod(z.shape[1:])\n return z, ldj",
"_____no_output_____"
]
],
[
[
"Variational dequantization can be used as a substitute for dequantization. We will compare dequantization and variational dequantization in later experiments. ",
"_____no_output_____"
],
[
"### Coupling layers\n\nNext, we look at possible transformations to apply inside the flow. A recent popular flow layer, which works well in combination with deep neural networks, is the coupling layer introduced by Dinh et al. [1]. The input $z$ is arbitrarily split into two parts, $z_{1:j}$ and $z_{j+1:d}$, of which the first remains unchanged by the flow. Yet, $z_{1:j}$ is used to parameterize the transformation for the second part, $z_{j+1:d}$. Various transformations have been proposed in recent time [3,4], but here we will settle for the simplest and most efficient one: affine coupling. In this coupling layer, we apply an affine transformation by shifting the input by a bias $\\mu$ and scale it by $\\sigma$. In other words, our transformation looks as follows:\n\n$$z'_{j+1:d} = \\mu_{\\theta}(z_{1:j}) + \\sigma_{\\theta}(z_{1:j}) \\odot z_{j+1:d}$$\n\nThe functions $\\mu$ and $\\sigma$ are implemented as a shared neural network, and the sum and multiplication are performed element-wise. The LDJ is thereby the sum of the logs of the scaling factors: $\\sum_i \\left[\\log \\sigma_{\\theta}(z_{1:j})\\right]_i$. Inverting the layer can as simply be done as subtracting the bias and dividing by the scale: \n\n$$z_{j+1:d} = \\left(z'_{j+1:d} - \\mu_{\\theta}(z_{1:j})\\right) / \\sigma_{\\theta}(z_{1:j})$$\n\nWe can also visualize the coupling layer in form of a computation graph, where $z_1$ represents $z_{1:j}$, and $z_2$ represents $z_{j+1:d}$:\n\n<center width=\"100%\" style=\"padding: 10px\"><img src=\"coupling_flow.svg\" width=\"450px\"></center>\n\nIn our implementation, we will realize the splitting of variables as masking. The variables to be transformed, $z_{j+1:d}$, are masked when passing $z$ to the shared network to predict the transformation parameters. When applying the transformation, we mask the parameters for $z_{1:j}$ so that we have an identity operation for those variables:",
"_____no_output_____"
]
],
[
[
"class CouplingLayer(nn.Module):\n \n def __init__(self, network, mask, c_in):\n \"\"\"\n Coupling layer inside a normalizing flow.\n Inputs:\n network - A PyTorch nn.Module constituting the deep neural network for mu and sigma.\n Output shape should be twice the channel size as the input.\n mask - Binary mask (0 or 1) where 0 denotes that the element should be transformed,\n while 1 means the latent will be used as input to the NN.\n c_in - Number of input channels\n \"\"\"\n super().__init__()\n self.network = network\n self.scaling_factor = nn.Parameter(torch.zeros(c_in))\n # Register mask as buffer as it is a tensor which is not a parameter, \n # but should be part of the modules state.\n self.register_buffer('mask', mask)\n \n def forward(self, z, ldj, reverse=False, orig_img=None):\n \"\"\"\n Inputs:\n z - Latent input to the flow\n ldj - The current ldj of the previous flows. \n The ldj of this layer will be added to this tensor.\n reverse - If True, we apply the inverse of the layer.\n orig_img (optional) - Only needed in VarDeq. Allows external\n input to condition the flow on (e.g. original image)\n \"\"\"\n # Apply network to masked input\n z_in = z * self.mask\n if orig_img is None:\n nn_out = self.network(z_in)\n else:\n nn_out = self.network(torch.cat([z_in, orig_img], dim=1))\n s, t = nn_out.chunk(2, dim=1)\n \n # Stabilize scaling output\n s_fac = self.scaling_factor.exp().view(1, -1, 1, 1)\n s = torch.tanh(s / s_fac) * s_fac\n \n # Mask outputs (only transform the second part)\n s = s * (1 - self.mask)\n t = t * (1 - self.mask)\n \n # Affine transformation\n if not reverse:\n # Whether we first shift and then scale, or the other way round,\n # is a design choice, and usually does not have a big impact\n z = (z + t) * torch.exp(s)\n ldj += s.sum(dim=[1,2,3])\n else:\n z = (z * torch.exp(-s)) - t\n ldj -= s.sum(dim=[1,2,3])\n \n return z, ldj",
"_____no_output_____"
]
],
[
[
"For stabilization purposes, we apply a $\\tanh$ activation function on the scaling output. This prevents sudden large output values for the scaling that can destabilize training. To still allow scaling factors smaller or larger than -1 and 1 respectively, we have a learnable parameter per dimension, called `scaling_factor`. This scales the tanh to different limits. Below, we visualize the effect of the scaling factor on the output activation of the scaling terms:",
"_____no_output_____"
]
],
[
[
"with torch.no_grad():\n x = torch.arange(-5,5,0.01)\n scaling_factors = [0.5, 1, 2]\n sns.set()\n fig, ax = plt.subplots(1, 3, figsize=(12,3))\n for i, scale in enumerate(scaling_factors):\n y = torch.tanh(x / scale) * scale\n ax[i].plot(x.numpy(), y.numpy())\n ax[i].set_title(\"Scaling factor: \" + str(scale))\n ax[i].set_ylim(-3, 3)\n plt.subplots_adjust(wspace=0.4)\n sns.reset_orig()\n plt.show()",
"_____no_output_____"
]
],
[
[
"Coupling layers generalize to any masking technique we could think of. However, the most common approach for images is to split the input $z$ in half, using a checkerboard mask or channel mask. A checkerboard mask splits the variables across the height and width dimensions and assigns each other pixel to $z_{j+1:d}$. Thereby, the mask is shared across channels. In contrast, the channel mask assigns half of the channels to $z_{j+1:d}$, and the other half to $z_{1:j+1}$. Note that when we apply multiple coupling layers, we invert the masking for each other layer so that each variable is transformed a similar amount of times. \n\nLet's implement a function that creates a checkerboard mask and a channel mask for us:",
"_____no_output_____"
]
],
[
[
"def create_checkerboard_mask(h, w, invert=False):\n x, y = torch.arange(h, dtype=torch.int32), torch.arange(w, dtype=torch.int32)\n xx, yy = torch.meshgrid(x, y)\n mask = torch.fmod(xx + yy, 2)\n mask = mask.to(torch.float32).view(1, 1, h, w)\n if invert:\n mask = 1 - mask\n return mask\n\ndef create_channel_mask(c_in, invert=False):\n mask = torch.cat([torch.ones(c_in//2, dtype=torch.float32), \n torch.zeros(c_in-c_in//2, dtype=torch.float32)])\n mask = mask.view(1, c_in, 1, 1)\n if invert:\n mask = 1 - mask\n return mask",
"_____no_output_____"
]
],
[
[
"We can also visualize the corresponding masks for an image of size $8\\times 8\\times 2$ (2 channels):",
"_____no_output_____"
]
],
[
[
"checkerboard_mask = create_checkerboard_mask(h=8, w=8).expand(-1,2,-1,-1)\nchannel_mask = create_channel_mask(c_in=2).expand(-1,-1,8,8)\n\nshow_imgs(checkerboard_mask.transpose(0,1), \"Checkerboard mask\")\nshow_imgs(channel_mask.transpose(0,1), \"Channel mask\")",
"_____no_output_____"
]
],
[
[
"As a last aspect of coupling layers, we need to decide for the deep neural network we want to apply in the coupling layers. The input to the layers is an image, and hence we stick with a CNN. Because the input to a transformation depends on all transformations before, it is crucial to ensure a good gradient flow through the CNN back to the input, which can be optimally achieved by a ResNet-like architecture. Specifically, we use a Gated ResNet that adds a $\\sigma$-gate to the skip connection, similarly to the input gate in LSTMs. The details are not necessarily important here, and the network is strongly inspired from Flow++ [3] in case you are interested in building even stronger models.",
"_____no_output_____"
]
],
[
[
"class ConcatELU(nn.Module):\n \"\"\"\n Activation function that applies ELU in both direction (inverted and plain). \n Allows non-linearity while providing strong gradients for any input (important for final convolution)\n \"\"\"\n \n def forward(self, x):\n return torch.cat([F.elu(x), F.elu(-x)], dim=1)\n\n \nclass LayerNormChannels(nn.Module):\n \n def __init__(self, c_in):\n \"\"\"\n This module applies layer norm across channels in an image. Has been shown to work well with ResNet connections.\n Inputs: \n c_in - Number of channels of the input\n \"\"\"\n super().__init__()\n self.layer_norm = nn.LayerNorm(c_in)\n \n def forward(self, x):\n x = x.permute(0, 2, 3, 1)\n x = self.layer_norm(x)\n x = x.permute(0, 3, 1, 2)\n return x\n\n \nclass GatedConv(nn.Module):\n \n def __init__(self, c_in, c_hidden):\n \"\"\"\n This module applies a two-layer convolutional ResNet block with input gate\n Inputs:\n c_in - Number of channels of the input\n c_hidden - Number of hidden dimensions we want to model (usually similar to c_in)\n \"\"\"\n super().__init__()\n self.net = nn.Sequential(\n nn.Conv2d(c_in, c_hidden, kernel_size=3, padding=1),\n ConcatELU(),\n nn.Conv2d(2*c_hidden, 2*c_in, kernel_size=1)\n )\n \n def forward(self, x):\n out = self.net(x)\n val, gate = out.chunk(2, dim=1)\n return x + val * torch.sigmoid(gate)\n\n \nclass GatedConvNet(nn.Module):\n \n def __init__(self, c_in, c_hidden=32, c_out=-1, num_layers=3):\n \"\"\"\n Module that summarizes the previous blocks to a full convolutional neural network.\n Inputs:\n c_in - Number of input channels\n c_hidden - Number of hidden dimensions to use within the network\n c_out - Number of output channels. If -1, 2 times the input channels are used (affine coupling)\n num_layers - Number of gated ResNet blocks to apply\n \"\"\"\n super().__init__()\n c_out = c_out if c_out > 0 else 2 * c_in\n layers = []\n layers += [nn.Conv2d(c_in, c_hidden, kernel_size=3, padding=1)]\n for layer_index in range(num_layers):\n layers += [GatedConv(c_hidden, c_hidden),\n LayerNormChannels(c_hidden)]\n layers += [ConcatELU(),\n nn.Conv2d(2*c_hidden, c_out, kernel_size=3, padding=1)]\n self.nn = nn.Sequential(*layers)\n \n self.nn[-1].weight.data.zero_()\n self.nn[-1].bias.data.zero_()\n \n def forward(self, x):\n return self.nn(x)",
"_____no_output_____"
]
],
[
[
"### Training loop\n\nFinally, we can add Dequantization, Variational Dequantization and Coupling Layers together to build our full normalizing flow on MNIST images. We apply 8 coupling layers in the main flow, and 4 for variational dequantization if applied. We apply a checkerboard mask throughout the network as with a single channel (black-white images), we cannot apply channel mask. The overall architecture is visualized below.\n\n\n<center width=\"100%\" style=\"padding: 20px\"><img src=\"vanilla_flow.svg\" width=\"900px\"></center>",
"_____no_output_____"
]
],
[
[
"def create_simple_flow(use_vardeq=True):\n flow_layers = []\n if use_vardeq:\n vardeq_layers = [CouplingLayer(network=GatedConvNet(c_in=2, c_out=2, c_hidden=16),\n mask=create_checkerboard_mask(h=28, w=28, invert=(i%2==1)),\n c_in=1) for i in range(4)]\n flow_layers += [VariationalDequantization(var_flows=vardeq_layers)]\n else:\n flow_layers += [Dequantization()]\n \n for i in range(8):\n flow_layers += [CouplingLayer(network=GatedConvNet(c_in=1, c_hidden=32),\n mask=create_checkerboard_mask(h=28, w=28, invert=(i%2==1)),\n c_in=1)]\n \n flow_model = ImageFlow(flow_layers).to(device)\n return flow_model",
"_____no_output_____"
]
],
[
[
"For implementing the training loop, we use the framework of PyTorch Lightning and reduce the code overhead. If interested, you can take a look at the generated tensorboard file, in particularly the graph to see an overview of flow transformations that are applied. Note that we again provide pre-trained models (see later on in the notebook) as normalizing flows are particularly expensive to train. We have also run validation and testing as this can take some time as well with the added importance sampling.",
"_____no_output_____"
]
],
[
[
"def train_flow(flow, model_name=\"MNISTFlow\"):\n # Create a PyTorch Lightning trainer\n trainer = pl.Trainer(default_root_dir=os.path.join(CHECKPOINT_PATH, model_name), \n gpus=1 if torch.cuda.is_available() else 0, \n max_epochs=200, \n gradient_clip_val=1.0,\n callbacks=[ModelCheckpoint(save_weights_only=True, mode=\"min\", monitor=\"val_bpd\"),\n LearningRateMonitor(\"epoch\")])\n trainer.logger._log_graph = True\n trainer.logger._default_hp_metric = None # Optional logging argument that we don't need\n \n train_data_loader = data.DataLoader(train_set, batch_size=128, shuffle=True, drop_last=True, pin_memory=True, num_workers=8)\n result = None\n \n # Check whether pretrained model exists. If yes, load it and skip training\n pretrained_filename = os.path.join(CHECKPOINT_PATH, model_name + \".ckpt\")\n if os.path.isfile(pretrained_filename):\n print(\"Found pretrained model, loading...\")\n ckpt = torch.load(pretrained_filename)\n flow.load_state_dict(ckpt['state_dict'])\n result = ckpt.get(\"result\", None)\n else:\n print(\"Start training\", model_name)\n trainer.fit(flow, train_data_loader, val_loader)\n \n # Test best model on validation and test set if no result has been found\n # Testing can be expensive due to the importance sampling.\n if result is None:\n val_result = trainer.test(flow, val_loader, verbose=False)\n start_time = time.time()\n test_result = trainer.test(flow, test_loader, verbose=False)\n duration = time.time() - start_time\n result = {\"test\": test_result, \"val\": val_result, \"time\": duration / len(test_loader) / flow.import_samples}\n \n return flow, result",
"_____no_output_____"
]
],
[
[
"## Multi-scale architecture\n\nOne disadvantage of normalizing flows is that they operate on the exact same dimensions as the input. If the input is high-dimensional, so is the latent space, which requires larger computational cost to learn suitable transformations. However, particularly in the image domain, many pixels contain less information in the sense that we could remove them without loosing the semantical information of the image. \n\nBased on this intuition, deep normalizing flows on images commonly apply a multi-scale architecture [1]. After the first $N$ flow transformations, we split off half of the latent dimensions and directly evaluate them on the prior. The other half is run through $N$ more flow transformations, and depending on the size of the input, we split it again in half or stop overall at this position. The two operations involved in this setup is `Squeeze` and `Split` which we will review more closely and implement below. ",
"_____no_output_____"
],
[
"### Squeeze and Split\n\nWhen we want to remove half of the pixels in an image, we have the problem of deciding which variables to cut, and how to rearrange the image. Thus, the squeezing operation is commonly used before split, which divides the image into subsquares of shape $2\\times 2\\times C$, and reshapes them into $1\\times 1\\times 4C$ blocks. Effectively, we reduce the height and width of the image by a factor of 2 while scaling the number of channels by 4. Afterwards, we can perform the split operation over channels without the need of rearranging the pixels. The smaller scale also makes the overall architecture more efficient. Visually, the squeeze operation should transform the input as follows:\n\n<center><img src=\"Squeeze_operation.svg\" width=\"40%\"/></center>\n\nThe input of $4\\times 4\\times 1$ is scaled to $2\\times 2\\times 4$ following the idea of grouping the pixels in $2\\times 2\\times 1$ subsquares. Next, let's try to implement this layer:",
"_____no_output_____"
]
],
[
[
"class SqueezeFlow(nn.Module):\n \n def forward(self, z, ldj, reverse=False):\n B, C, H, W = z.shape\n if not reverse: \n # Forward direction: H x W x C => H/2 x W/2 x 4C\n z = z.reshape(B, C, H//2, 2, W//2, 2)\n z = z.permute(0, 1, 3, 5, 2, 4)\n z = z.reshape(B, 4*C, H//2, W//2)\n else: \n # Reverse direction: H/2 x W/2 x 4C => H x W x C\n z = z.reshape(B, C//4, 2, 2, H, W)\n z = z.permute(0, 1, 4, 2, 5, 3)\n z = z.reshape(B, C//4, H*2, W*2)\n return z, ldj",
"_____no_output_____"
]
],
[
[
"Before moving on, we can verify our implementation by comparing our output with the example figure above:",
"_____no_output_____"
]
],
[
[
"sq_flow = SqueezeFlow()\nrand_img = torch.arange(1,17).view(1, 1, 4, 4)\nprint(\"Image (before)\\n\", rand_img)\nforward_img, _ = sq_flow(rand_img, ldj=None, reverse=False)\nprint(\"\\nImage (forward)\\n\", forward_img.permute(0,2,3,1)) # Permute for readability\nreconst_img, _ = sq_flow(forward_img, ldj=None, reverse=True)\nprint(\"\\nImage (reverse)\\n\", reconst_img)",
"Image (before)\n tensor([[[[ 1, 2, 3, 4],\n [ 5, 6, 7, 8],\n [ 9, 10, 11, 12],\n [13, 14, 15, 16]]]])\n\nImage (forward)\n tensor([[[[ 1, 2, 5, 6],\n [ 3, 4, 7, 8]],\n\n [[ 9, 10, 13, 14],\n [11, 12, 15, 16]]]])\n\nImage (reverse)\n tensor([[[[ 1, 2, 3, 4],\n [ 5, 6, 7, 8],\n [ 9, 10, 11, 12],\n [13, 14, 15, 16]]]])\n"
]
],
[
[
"The split operation divides the input into two parts, and evaluates one part directly on the prior. So that our flow operation fits to the implementation of the previous layers, we will return the prior probability of the first part as the log determinant jacobian of the layer. It has the same effect as if we would combine all variable splits at the end of the flow, and evaluate them together on the prior. ",
"_____no_output_____"
]
],
[
[
"class SplitFlow(nn.Module):\n \n def __init__(self):\n super().__init__()\n self.prior = torch.distributions.normal.Normal(loc=0.0, scale=1.0)\n \n def forward(self, z, ldj, reverse=False):\n if not reverse:\n z, z_split = z.chunk(2, dim=1)\n ldj += self.prior.log_prob(z_split).sum(dim=[1,2,3])\n else:\n z_split = self.prior.sample(sample_shape=z.shape).to(device)\n z = torch.cat([z, z_split], dim=1)\n ldj -= self.prior.log_prob(z_split).sum(dim=[1,2,3])\n return z, ldj",
"_____no_output_____"
]
],
[
[
"### Building a multi-scale flow\n\nAfter defining the squeeze and split operation, we are finally able to build our own multi-scale flow. Deep normalizing flows such as Glow and Flow++ [2,3] often apply a split operation directly after squeezing. However, with shallow flows, we need to be more thoughtful about where to place the split operation as we need at least a minimum amount of transformations on each variable. Our setup is inspired by the original RealNVP architecture [1] which is shallower than other, more recent state-of-the-art architectures. \n\nHence, for the MNIST dataset, we will apply the first squeeze operation after two coupling layers, but don't apply a split operation yet. Because we have only used two coupling layers and each the variable has been only transformed once, a split operation would be too early. We apply two more coupling layers before finally applying a split flow and squeeze again. The last four coupling layers operate on a scale of $7\\times 7\\times 8$. The full flow architecture is shown below.\n\n<center width=\"100%\" style=\"padding: 20px\"><img src=\"multiscale_flow.svg\" width=\"1100px\"></center>\n\nNote that while the feature maps inside the coupling layers reduce with the height and width of the input, the increased number of channels is not directly considered. To counteract this, we increase the hidden dimensions for the coupling layers on the squeezed input. The dimensions are often scaled by 2 as this approximately increases the computation cost by 4 canceling with the squeezing operation. However, we will choose the hidden dimensionalities $32, 48, 64$ for the three scales respectively to keep the number of parameters reasonable and show the efficiency of multi-scale architectures. ",
"_____no_output_____"
]
],
[
[
"def create_multiscale_flow():\n flow_layers = []\n \n vardeq_layers = [CouplingLayer(network=GatedConvNet(c_in=2, c_out=2, c_hidden=16),\n mask=create_checkerboard_mask(h=28, w=28, invert=(i%2==1)),\n c_in=1) for i in range(4)]\n flow_layers += [VariationalDequantization(vardeq_layers)]\n \n flow_layers += [CouplingLayer(network=GatedConvNet(c_in=1, c_hidden=32),\n mask=create_checkerboard_mask(h=28, w=28, invert=(i%2==1)),\n c_in=1) for i in range(2)]\n flow_layers += [SqueezeFlow()]\n for i in range(2):\n flow_layers += [CouplingLayer(network=GatedConvNet(c_in=4, c_hidden=48),\n mask=create_channel_mask(c_in=4, invert=(i%2==1)),\n c_in=4)]\n flow_layers += [SplitFlow(),\n SqueezeFlow()]\n for i in range(4):\n flow_layers += [CouplingLayer(network=GatedConvNet(c_in=8, c_hidden=64),\n mask=create_channel_mask(c_in=8, invert=(i%2==1)),\n c_in=8)]\n\n flow_model = ImageFlow(flow_layers).to(device)\n return flow_model",
"_____no_output_____"
]
],
[
[
"We can show the difference in number of parameters below:",
"_____no_output_____"
]
],
[
[
"def print_num_params(model):\n num_params = sum([np.prod(p.shape) for p in model.parameters()])\n print(\"Number of parameters: {:,}\".format(num_params))\n\nprint_num_params(create_simple_flow(use_vardeq=False))\nprint_num_params(create_simple_flow(use_vardeq=True))\nprint_num_params(create_multiscale_flow())",
"Number of parameters: 335,128\nNumber of parameters: 379,556\nNumber of parameters: 1,062,090\n"
]
],
[
[
"Although the multi-scale flow has almost 3 times the parameters of the single scale flow, it is not necessarily more computationally expensive than its counterpart. We will compare the runtime in the following experiments as well. ",
"_____no_output_____"
],
[
"## Analysing the flows\n\nIn the last part of the notebook, we will train all the models we have implemented above, and try to analyze the effect of the multi-scale architecture and variational dequantization.\n\n### Training flow variants\n\nBefore we can analyse the flow models, we need to train them first. We provide pre-trained models that contain the validation and test performance, and run-time information. As flow models are computationally expensive, we advice you to rely on those pretrained models for a first run through the notebook.",
"_____no_output_____"
]
],
[
[
"flow_dict = {\"simple\": {}, \"vardeq\": {}, \"multiscale\": {}}\nflow_dict[\"simple\"][\"model\"], flow_dict[\"simple\"][\"result\"] = train_flow(create_simple_flow(use_vardeq=False), model_name=\"MNISTFlow_simple\")\nflow_dict[\"vardeq\"][\"model\"], flow_dict[\"vardeq\"][\"result\"] = train_flow(create_simple_flow(use_vardeq=True), model_name=\"MNISTFlow_vardeq\")\nflow_dict[\"multiscale\"][\"model\"], flow_dict[\"multiscale\"][\"result\"] = train_flow(create_multiscale_flow(), model_name=\"MNISTFlow_multiscale\")",
"GPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\nGPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\nGPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nLOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n"
]
],
[
[
"### Density modeling and sampling\n\nFirstly, we can compare the models on their quantitative results. The following table shows all important statistics. The inference time specifies the time needed to determine the probability for a batch of 64 images for each model, and the sampling time the duration it took to sample a batch of 64 images.",
"_____no_output_____"
]
],
[
[
"%%html\n<!-- Some HTML code to increase font size in the following table -->\n<style>\nth {font-size: 120%;}\ntd {font-size: 120%;}\n</style>",
"_____no_output_____"
],
[
"import tabulate\nfrom IPython.display import display, HTML\n\ntable = [[key, \n \"%4.3f bpd\" % flow_dict[key][\"result\"][\"val\"][0][\"test_bpd\"], \n \"%4.3f bpd\" % flow_dict[key][\"result\"][\"test\"][0][\"test_bpd\"], \n \"%2.0f ms\" % (1000 * flow_dict[key][\"result\"][\"time\"]),\n \"%2.0f ms\" % (1000 * flow_dict[key][\"result\"].get(\"samp_time\", 0)),\n \"{:,}\".format(sum([np.prod(p.shape) for p in flow_dict[key][\"model\"].parameters()]))] \n for key in flow_dict]\ndisplay(HTML(tabulate.tabulate(table, tablefmt='html', headers=[\"Model\", \"Validation Bpd\", \"Test Bpd\", \"Inference time\", \"Sampling time\", \"Num Parameters\"])))",
"_____no_output_____"
]
],
[
[
"As we have intially expected, using variational dequantization improves upon standard dequantization in terms of bits per dimension. Although the difference with 0.04bpd doesn't seem impressive first, it is a considerably step for generative models (most state-of-the-art models improve upon previous models in a range of 0.02-0.1bpd on CIFAR with three times as high bpd). While it takes longer to evaluate the probability of an image due to the variational dequantization, which also leads to a longer training time, it does not have an effect on the sampling time. This is because inverting variational dequantization is the same as dequantization: finding the next lower integer.\n\nWhen we compare the two models to multi-scale architecture, we can see that the bits per dimension score again dropped by about 0.04bpd. Additionally, the inference time and sampling time improved notably despite having more parameters. Thus, we see that the multi-scale flow is not only stronger for density modeling, but also more efficient. \n\nNext, we can test the sampling quality of the models. We should note that the samples for variational dequantization and standard dequantization are very similar, and hence we visualize here only the ones for variational dequantization and the multi-scale model. However, feel free to also test out the `\"simple\"` model. The seeds are set to obtain reproducable generations and are not cherry picked.",
"_____no_output_____"
]
],
[
[
"pl.seed_everything(44)\nsamples = flow_dict[\"vardeq\"][\"model\"].sample(img_shape=[16,1,28,28])\nshow_imgs(samples.cpu())",
"_____no_output_____"
],
[
"pl.seed_everything(44)\nsamples = flow_dict[\"multiscale\"][\"model\"].sample(img_shape=[16,8,7,7])\nshow_imgs(samples.cpu())",
"/home/phillip/anaconda3/envs/nlp1/lib/python3.7/site-packages/ipykernel_launcher.py:51: UserWarning: Mixed memory format inputs detected while calling the operator. The operator will output contiguous tensor even if some of the inputs are in channels_last format. (Triggered internally at /pytorch/aten/src/ATen/native/TensorIterator.cpp:918.)\n"
]
],
[
[
"From the few samples, we can see a clear difference between the simple and the multi-scale model. The single-scale model has only learned local, small correlations while the multi-scale model was able to learn full, global relations that form digits. This show-cases another benefit of the multi-scale model. In contrast to VAEs, the outputs are sharp as normalizing flows can naturally model complex, multi-modal distributions while VAEs have the independent decoder output noise. Nevertheless, the samples from this flow are far from perfect as not all samples show true digits. ",
"_____no_output_____"
],
[
"### Interpolation in latent space\n\nAnother popular test for the smoothness of the latent space of generative models is to interpolate between two training examples. As normalizing flows are strictly invertible, we can guarantee that any image is represented in the latent space. We again compare the variational dequantization model with the multi-scale model below.",
"_____no_output_____"
]
],
[
[
"@torch.no_grad()\ndef interpolate(model, img1, img2, num_steps=8):\n \"\"\"\n Inputs:\n model - object of ImageFlow class that represents the (trained) flow model\n img1, img2 - Image tensors of shape [1, 28, 28]. Images between which should be interpolated.\n num_steps - Number of interpolation steps. 8 interpolation steps mean 6 intermediate pictures besides img1 and img2\n \"\"\"\n imgs = torch.stack([img1, img2], dim=0).to(model.device)\n z, _ = model.encode(imgs)\n alpha = torch.linspace(0, 1, steps=num_steps, device=z.device).view(-1, 1, 1, 1)\n interpolations = z[0:1] * alpha + z[1:2] * (1 - alpha)\n interp_imgs = model.sample(interpolations.shape[:1] + imgs.shape[1:], z_init=interpolations)\n show_imgs(interp_imgs, row_size=8)\n\nexmp_imgs, _ = next(iter(train_loader))",
"_____no_output_____"
],
[
"pl.seed_everything(42)\nfor i in range(2):\n interpolate(flow_dict[\"vardeq\"][\"model\"], exmp_imgs[2*i], exmp_imgs[2*i+1])",
"_____no_output_____"
],
[
"pl.seed_everything(42)\nfor i in range(2):\n interpolate(flow_dict[\"multiscale\"][\"model\"], exmp_imgs[2*i], exmp_imgs[2*i+1])",
"_____no_output_____"
]
],
[
[
"The interpolations of the multi-scale model result in more realistic digits (first row $7\\leftrightarrow 8\\leftrightarrow 6$, second row $9\\leftrightarrow 4\\leftrightarrow 6$), while the variational dequantization model focuses on local patterns that globally do not form a digit. For the multi-scale model, we actually did not do the \"true\" interpolation between the two images as we did not consider the variables that were split along the flow (they have been sampled randomly for all samples). However, as we will see in the next experiment, the early variables do not effect the overall image much.",
"_____no_output_____"
],
[
"### Visualization of latents in different levels of multi-scale\n\nIn the following we will focus more on the multi-scale flow. We want to analyse what information is being stored in the variables split at early layers, and what information for the final variables. For this, we sample 8 images where each of them share the same final latent variables, but differ in the other part of the latent variables. Below we visualize three examples of this:",
"_____no_output_____"
]
],
[
[
"pl.seed_everything(44)\nfor _ in range(3):\n z_init = flow_dict[\"multiscale\"][\"model\"].prior.sample(sample_shape=[1,8,7,7])\n z_init = z_init.expand(8, -1, -1, -1)\n samples = flow_dict[\"multiscale\"][\"model\"].sample(img_shape=z_init.shape, z_init=z_init)\n show_imgs(samples.cpu())",
"_____no_output_____"
]
],
[
[
"We see that the early split variables indeed have a smaller effect on the image. Still, small differences can be spot when we look carefully at the borders of the digits. For instance, the hole at the top of the 8 changes for different samples although all of them represent the same coarse structure. This shows that the flow indeed learns to separate the higher-level information in the final variables, while the early split ones contain local noise patterns.",
"_____no_output_____"
],
[
"### Visualizing Dequantization\n\nAs a final part of this notebook, we will look at the effect of variational dequantization. We have motivated variational dequantization by the issue of sharp edges/boarders being difficult to model, and a flow would rather prefer smooth, prior-like distributions. To check how what noise distribution $q(u|x)$ the flows in the variational dequantization module have learned, we can plot a histogram of output values from the dequantization and variational dequantization module. ",
"_____no_output_____"
]
],
[
[
"def visualize_dequant_distribution(model : ImageFlow, imgs : torch.Tensor, title:str=None):\n \"\"\"\n Inputs:\n model - The flow of which we want to visualize the dequantization distribution\n imgs - Example training images of which we want to visualize the dequantization distribution \n \"\"\"\n imgs = imgs.to(device)\n ldj = torch.zeros(imgs.shape[0], dtype=torch.float32).to(device)\n with torch.no_grad():\n dequant_vals = []\n for _ in tqdm(range(8), leave=False):\n d, _ = model.flows[0](imgs, ldj, reverse=False)\n dequant_vals.append(d)\n dequant_vals = torch.cat(dequant_vals, dim=0)\n dequant_vals = dequant_vals.view(-1).cpu().numpy()\n sns.set()\n plt.figure(figsize=(10,3))\n plt.hist(dequant_vals, bins=256, color=to_rgb(\"C0\")+(0.5,), edgecolor=\"C0\", density=True)\n if title is not None:\n plt.title(title)\n plt.show()\n plt.close()\n \nsample_imgs, _ = next(iter(train_loader))",
"_____no_output_____"
],
[
"visualize_dequant_distribution(flow_dict[\"simple\"][\"model\"], sample_imgs, title=\"Dequantization\")",
"_____no_output_____"
],
[
"visualize_dequant_distribution(flow_dict[\"vardeq\"][\"model\"], sample_imgs, title=\"Variational dequantization\")",
"_____no_output_____"
]
],
[
[
"The dequantization distribution in the first plot shows that the MNIST images have a strong bias towards 0 (black), and the distribution of them have a sharp border as mentioned before. The variational dequantization module has indeed learned a much smoother distribution with a Gaussian-like curve which can be modeled much better. For the other values, we would need to visualize the distribution $q(u|x)$ on a deeper level, depending on $x$. However, as all $u$'s interact and depend on each other, we would need to visualize a distribution in 784 dimensions, which is not that intuitive anymore.",
"_____no_output_____"
],
[
"## Conclusion\n\nIn conclusion, we have seen how to implement our own normalizing flow, and what difficulties arise if we want to apply them on images. Dequantization is a crucial step in mapping the discrete images into continuous space to prevent underisable delta-peak solutions. While dequantization creates hypercubes with hard border, variational dequantization allows us to fit a flow much better on the data. This allows us to obtain a lower bits per dimension score, while not affecting the sampling speed. The most common flow element, the coupling layer, is simple to implement, and yet effective. Furthermore, multi-scale architectures help to capture the global image context while allowing us to efficiently scale up the flow. Normalizing flows are an interesting alternative to VAEs as they allow an exact likelihood estimate in continuous space, and we have the guarantee that every possible input $x$ has a corresponding latent vector $z$. However, even beyond continuous inputs and images, flows can be applied and allow us to exploit the data structure in latent space, as e.g. on graphs for the task of molecule generation [6]. Recent advances in [Neural ODEs](https://arxiv.org/pdf/1806.07366.pdf) allow a flow with infinite number of layers, called Continuous Normalizing Flows, whose potential is yet to fully explore. Overall, normalizing flows are an exciting research area which will continue over the next couple of years.",
"_____no_output_____"
],
[
"## References\n\n[1] Dinh, L., Sohl-Dickstein, J., and Bengio, S. (2017). “Density estimation using Real NVP,” In: 5th International Conference on Learning Representations, ICLR 2017. [Link](https://arxiv.org/abs/1605.08803)\n\n[2] Kingma, D. P., and Dhariwal, P. (2018). “Glow: Generative Flow with Invertible 1x1 Convolutions,” In: Advances in Neural Information Processing Systems, vol. 31, pp. 10215--10224. [Link](http://papers.nips.cc/paper/8224-glow-generative-flow-with-invertible-1x1-convolutions.pdf)\n\n[3] Ho, J., Chen, X., Srinivas, A., Duan, Y., and Abbeel, P. (2019). “Flow++: Improving Flow-Based Generative Models with Variational Dequantization and Architecture Design,” in Proceedings of the 36th International Conference on Machine Learning, vol. 97, pp. 2722–2730. [Link](https://arxiv.org/abs/1902.00275)\n\n[4] Durkan, C., Bekasov, A., Murray, I., and Papamakarios, G. (2019). “Neural Spline Flows,” In: Advances in Neural Information Processing Systems, pp. 7509–7520. [Link](http://papers.neurips.cc/paper/8969-neural-spline-flows.pdf)\n\n[5] Hoogeboom, E., Cohen, T. S., and Tomczak, J. M. (2020). “Learning Discrete Distributions by Dequantization,” arXiv preprint arXiv2001.11235v1. [Link](https://arxiv.org/abs/2001.11235)\n\n[6] Lippe, P., and Gavves, E. (2021). “Categorical Normalizing Flows via Continuous Transformations,” In: International Conference on Learning Representations, ICLR 2021. [Link](https://openreview.net/pdf?id=-GLNZeVDuik)",
"_____no_output_____"
],
[
"---\n\n[](https://github.com/phlippe/uvadlc_notebooks/) If you found this tutorial helpful, consider ⭐-ing our repository. \n[](https://github.com/phlippe/uvadlc_notebooks/issues) For any questions, typos, or bugs that you found, please raise an issue on GitHub. \n\n---",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
c52879ab441428f69646eefb64eff203ba205013
| 47,915 |
ipynb
|
Jupyter Notebook
|
tutorials/mantisshrimp_data/2.fasterrcnn_voc.ipynb
|
ramaneswaran/mantisshrimp
|
d30c056f1f9f26a2ce42da73cfb32d591321f426
|
[
"Apache-2.0"
] | null | null | null |
tutorials/mantisshrimp_data/2.fasterrcnn_voc.ipynb
|
ramaneswaran/mantisshrimp
|
d30c056f1f9f26a2ce42da73cfb32d591321f426
|
[
"Apache-2.0"
] | 8 |
2020-06-16T18:06:42.000Z
|
2020-09-15T22:35:56.000Z
|
tutorials/mantisshrimp_data/2.fasterrcnn_voc.ipynb
|
ramaneswaran/mantisshrimp
|
d30c056f1f9f26a2ce42da73cfb32d591321f426
|
[
"Apache-2.0"
] | null | null | null | 50.172775 | 15,450 | 0.637838 |
[
[
[
"<a href=\"https://colab.research.google.com/github/lgvaz/mantisshrimp/blob/master/tutorials/fasterrcnn_voc.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# 2. VOC Parser with FasterRCNN",
"_____no_output_____"
]
],
[
[
"!pip install -q git+git://github.com/fastai/fastai2.git --upgrade\n!pip install -q git+git://github.com/lgvaz/mantisshrimp.git\n!pip install -q -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'\n!pip install -q albumentations --upgrade",
"_____no_output_____"
],
[
"from mantisshrimp import *\nfrom mantisshrimp.imports import *\nfrom mantisshrimp.hub.voc import *\nimport albumentations as A",
"_____no_output_____"
],
[
"source = get_voc_data()",
"_____no_output_____"
],
[
"parser = VOCAnnotationParser(\n annotations_dir=source / \"Annotations\",\n images_dir=source / \"JPEGImages\",\n categories=VOC_CATEGORIES,\n)",
"_____no_output_____"
],
[
"splitter = RandomSplitter([0.8, 0.2])\ntrain_records, valid_records = parser.parse(splitter)",
"_____no_output_____"
],
[
"train_transforms = AlbuTransform([A.HorizontalFlip()])",
"_____no_output_____"
],
[
"train_ds = Dataset(train_records, train_transforms)\nvalid_ds = Dataset(valid_records)",
"_____no_output_____"
],
[
"model = MantisFasterRCNN(num_classes=len(VOC_CATEGORIES)+1)",
"_____no_output_____"
],
[
"def get_dataloader(model, dataset, shuffle=False):\n return model.dataloader(dataset, batch_size=2, num_workers=2, shuffle=shuffle)\n\ntrain_dl = get_dataloader(model, train_ds, shuffle=True)\nvalid_dl = get_dataloader(model, valid_ds)",
"_____no_output_____"
],
[
"metrics = [COCOMetric(valid_records, bbox=True, mask=False)]",
"_____no_output_____"
],
[
"from mantisshrimp.engines.fastai import *",
"_____no_output_____"
],
[
"learn = rcnn_learner(dls=[train_dl, valid_dl], model=model, metrics=metrics)",
"/usr/local/lib/python3.6/dist-packages/fastai2/callback/core.py:29: UserWarning: You are setting an attribute (loss) that also exists in the learner. Please be advised that you're not setting it in the learner but in the callback. Use `self.learn.loss` if you would like to change it in the learner.\n warn(f\"You are setting an attribute ({name}) that also exists in the learner. Please be advised that you're not setting it in the learner but in the callback. Use `self.learn.{name}` if you would like to change it in the learner.\")\n"
],
[
"learn.lr_find(show_plot=False)\nlearn.recorder.plot_lr_find(skip_end=1)",
"_____no_output_____"
],
[
"learn.fine_tune(3, 5e-4)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52879c8ac331ea3481f2505aff60c92687f34d3
| 6,362 |
ipynb
|
Jupyter Notebook
|
notebooks/figures/discrete_obs/convert_jkm_to_csv.ipynb
|
marbl-ecosys/cesm2-marbl-book
|
daf50f593b6c21a4e065e4317f158bfffb81725c
|
[
"Apache-2.0"
] | null | null | null |
notebooks/figures/discrete_obs/convert_jkm_to_csv.ipynb
|
marbl-ecosys/cesm2-marbl-book
|
daf50f593b6c21a4e065e4317f158bfffb81725c
|
[
"Apache-2.0"
] | 4 |
2021-06-10T15:22:33.000Z
|
2021-06-21T19:29:03.000Z
|
notebooks/figures/discrete_obs/convert_jkm_to_csv.ipynb
|
marbl-ecosys/cesm2-marbl-book
|
daf50f593b6c21a4e065e4317f158bfffb81725c
|
[
"Apache-2.0"
] | 1 |
2021-05-18T18:41:57.000Z
|
2021-05-18T18:41:57.000Z
| 28.657658 | 115 | 0.38227 |
[
[
[
"import pandas as pd\nimport numpy as np\n\nfrom datetime import datetime, timezone",
"_____no_output_____"
],
[
"filename = 'master_Fe_gx3v7.txt'\n\ncolumns = ['month', 'lon', 'lat', 'depth', 'dFe_obs', 'gridx', 'gridy', 'gridlevel', 'inregion']\ndtypes = {k: 'int32' for k in ['month', 'gridx', 'gridy', 'gridlevel', 'inregion']}\n\nwith open(filename) as fid:\n lines = []\n part = 0\n for line in fid:\n if part == 0:\n values = [float(s) for s in line.split()]\n part = 1\n else: \n values.extend([float(s) for s in line.split()])\n values = np.array(values)\n values[values == -1e10] = np.nan\n lines.append({k: v for k, v in zip(columns, values)})\n part = 0\n\ndf = pd.DataFrame(lines).astype(dtypes).drop(['month', 'gridx', 'gridy', 'gridlevel', 'inregion'], axis=1)\ndf",
"_____no_output_____"
],
[
"datestamp = datetime.now(timezone.utc).strftime(\"%Y-%m-%d\")\ndf.to_csv(f'dFe-database-{datestamp}.csv', index=False, na_rep=-999.)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code"
]
] |
c52884c839155feb80c9214861d3f51f5ae90fe0
| 12,701 |
ipynb
|
Jupyter Notebook
|
hanban_crawler.ipynb
|
morningD/crawl-hanban-script
|
c174d124d721d446683da8c2403789aedcb78956
|
[
"MIT"
] | 2 |
2022-02-16T06:46:40.000Z
|
2022-02-16T06:49:56.000Z
|
hanban_crawler.ipynb
|
morningD/crawl-hanban
|
c174d124d721d446683da8c2403789aedcb78956
|
[
"MIT"
] | null | null | null |
hanban_crawler.ipynb
|
morningD/crawl-hanban
|
c174d124d721d446683da8c2403789aedcb78956
|
[
"MIT"
] | null | null | null | 34.513587 | 172 | 0.452327 |
[
[
[
"!pip install beautifulsoup4",
"Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.6/dist-packages (4.6.3)\n"
],
[
"import urllib.request\nimport pandas as pd\nimport numpy as np\nfrom bs4 import BeautifulSoup,Comment\nimport re",
"_____no_output_____"
],
[
"url = \"http://www.hanban.org/hanbancn/template/ciotab_cn1.htm?v1\"\nresponse = urllib.request.urlopen(url)\n#webContent = response.read().decode(response.headers.get_content_charset())\nwebContent = response.read().decode(\"utf-8\")\nprint(len(webContent))",
"233918\n"
],
[
"# info_dict->{name:{'country', 'city', 'type', 'date', 'status', 'url'}}\ninfo_dict = {}\n\ntabContent = BeautifulSoup(webContent).find(\"div\", class_=\"tabContent\")\ncontinents = tabContent.find_all(\"div\", class_=\"tcon\")\n\nfor con in continents:\n # 得到此大洲国家名的box\n nations = con.find(\"div\", class_=re.compile(r\"nation\\d*\"))\n \n # 去掉被注释的国家\n for comment_country in nations(text=lambda text: isinstance(text, Comment)):\n comment_country.extract()\n # 得到此大洲国家列表\n counties = [c.string for c in nations.find_all(\"a\")]\n \n # 得到此大洲的学校box\n schools = con.find(\"div\", class_=re.compile(\"tcon_nationBox\\d*\"))\n # 得到此大洲里各个国家的学校tab\n # find_all()会忽视被注释的tab,不需要再去掉注释\n schools_nation = schools.find_all(\"div\", class_=\"tcon_nation\")\n \n # 确认国家列表与学校列表是对应的\n if len(schools_nation) != len(counties):\n print(\"ERROR: schools tab no match the country.\")\n break\n \n # 处理各个国家的学校\n for idx, sc in enumerate(counties):\n # 处理孔子学院\n kys = schools_nation[idx].find(\"div\", class_=\"KY\")\n # 检查是否有被注释的学院\n comment_kys = kys.find_all(string=lambda text: isinstance(text, Comment))\n # 处理被注释的学院\n if comment_kys:\n for ckys in comment_kys:\n # 由于注释没有建树,所以需要在创建一个BeautifulSoup进行解析\n ckys_bs = BeautifulSoup(ckys)\n for cky in ckys_bs.find_all(\"a\"):\n ky_name = cky.string\n if ky_name:\n ky_name = ky_name.strip()\n ky_url = cky.get(\"href\") or \"NaN\"\n info_dict[ky_name] = {'type':\"孔子学院\", 'country':counties[idx], 'status': 'hide', 'url':ky_url}\n\n # 处理没有被注释的学院, 如果名字相同会覆盖\n kys = kys.find_all(\"a\")\n # 处理每个学院\n for ky in kys:\n ky_name = ky.string\n if ky_name:\n ky_name = ky_name.strip()\n ky_url = ky.get(\"href\") or \"NaN\"\n #ky_id = re.findall(r'\\d+', ky_url.split('/')[-1])[0]\n # 将信息保存到汇总字典中\n info_dict[ky_name] = {'type':\"孔子学院\", 'country':counties[idx], 'status': 'show', 'url':ky_url}\n \n # 处理孔子课堂\n coures = schools_nation[idx].find(\"div\", class_=\"coures\")\n # 检查是否有被注释的课堂\n comment_coures = coures.find_all(string=lambda text: isinstance(text, Comment))\n # 处理被注释的课堂\n if comment_coures:\n for ccoures in comment_coures:\n # 由于注释没有建树,所以需要在创建一个BeautifulSoup进行解析\n ccoures_bs = BeautifulSoup(ccoures)\n for ccoure in ccoures_bs.find_all(\"a\"):\n coure_name = ccoure.string\n if coure_name:\n coure_name = coure_name.strip()\n coure_url = ccoure.get(\"href\") or \"NaN\"\n info_dict[coure_name] = {'type':\"孔子课堂\", 'country':counties[idx], 'status': 'hide', 'url':coure_url}\n\n # 处理没有被注释的课题\n coures = coures.find_all(\"a\")\n # 处理每个课堂\n for coure in coures:\n coure_name = coure.string\n if coure_name:\n coure_name = coure_name.strip()\n coure_url = coure.get(\"href\") or \"NaN\"\n #coure_id = re.findall(r'\\d+', coure_url.split('/')[-1])[0]\n info_dict[coure_name] = {'type':\"孔子课堂\", 'country':counties[idx], 'status': 'show', 'url':coure_url}\n\nprint(len(info_dict))",
"1356\n"
],
[
"print(info_dict[\"伊利诺伊大学香槟分校孔子学院\"])\nprint(info_dict[\"北佛罗里达大学孔子学院\"])",
"{'type': '孔子学院', 'country': '美国', 'status': 'hide', 'url': 'http://www.hanban.org/confuciousinstitutes/node_40583.htm'}\n{'type': '孔子学院', 'country': '美国', 'status': 'hide', 'url': 'http://www.hanban.org/confuciousinstitutes/node_45557.htm '}\n"
],
[
"# 爬取所有子页面内容,保存到一个字典中\nsubsite_dict = {}",
"_____no_output_____"
],
[
"def is_url(string_url):\n urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', string_url)\n return urls\n\nfor idx, name in enumerate(info_dict.keys()):\n if is_url(info_dict[name]['url']) and name not in subsite_dict:\n # 爬取页面,如果中断则继续\n print(idx, \"\\tHandling:\", name, \"\\turl:\", info_dict[name]['url'])\n response = urllib.request.urlopen(info_dict[name]['url'])\n subwebContent = response.read().decode(\"utf-8\")\n subsite_dict[name] = subwebContent",
"_____no_output_____"
],
[
"# 解析各个孔子学院和孔子课题url中的\"城市\"和\"创立时间\"\nfor idx, name in enumerate(info_dict.keys()):\n \n print(idx, \"\\tHandling:\", name)\n\n # 默认NaN\n info_dict[name]['city'] = \"NaN\"\n info_dict[name]['date'] = \"NaN\"\n \n # 处理没有爬取到内容的情况\n if not subsite_dict[name]:\n print(\"Skip1\", name)\n continue\n\n # 创建解析器\n bs = BeautifulSoup(subsite_dict[name])\n \n # 有两种格式:<p>和<tbody>\n if bs.find(\"table\"):\n all_info = bs.find(\"div\", class_=\"main_leftCon\").find_all(\"table\")\n else:\n all_info = bs.find(\"div\", class_=\"main_leftCon\").find_all(\"p\")\n \n # 如果网页没有目标内容,跳过\n if not all_info:\n print(\"Skip2\", name)\n continue\n \n # 逐条解析\n for line in all_info:\n info = [word for word in line.stripped_strings]\n if not info:\n continue\n if info[0].find(\"城市\") != -1:\n if len(info) >= 2:\n # 确认城市名存在\n info_dict[name]['city'] = info[1]\n \n if info[0].find(\"时间\") != -1:\n # 匹配时间,格式****年**月**日\n date_string = re.findall(r'\\d{4}[-/.|年]\\d{1,2}[-\\/.|月]\\d{1,2}[-/.|日]*', info[-1])\n # debug\n # print(info)\n # 确认日期存在\n if date_string:\n # 去掉中文,转成标准格式为 ****-**-**\n date_list = re.findall(r'\\d+',date_string[0])\n date = '-'.join(date_list)\n info_dict[name]['date'] = date",
"_____no_output_____"
],
[
"print(info_dict[\"伊利诺伊大学香槟分校孔子学院\"])\nprint(info_dict[\"北佛罗里达大学孔子学院\"])\nprint(info_dict[\"南太平洋大学孔子学院\"])\nprint(info_dict[\"斯科奇•欧克伯恩学院孔子课堂\"])",
"{'type': '孔子学院', 'country': '美国', 'status': 'hide', 'url': 'http://www.hanban.org/confuciousinstitutes/node_40583.htm', 'city': 'NaN', 'date': 'NaN'}\n{'type': '孔子学院', 'country': '美国', 'status': 'hide', 'url': 'http://www.hanban.org/confuciousinstitutes/node_45557.htm ', 'city': 'NaN', 'date': 'NaN'}\n{'type': '孔子学院', 'country': '斐济', 'status': 'show', 'url': 'http://www.hanban.org/confuciousinstitutes/node_38667.htm', 'city': '苏瓦', 'date': '2011-02-18'}\n{'type': '孔子课堂', 'country': '澳大利亚', 'status': 'show', 'url': 'http://zhuanti.hanban.org/videolist/?cat=98&tag=cn', 'city': '朗赛斯顿', 'date': '2015-09-15'}\n"
],
[
"# Save the information to file\ndf = pd.DataFrame.from_dict(info_dict, orient='index')\ndf.to_excel(\"./hanban.xlsx\", encoding='utf-8')",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52889d0cae6fbfcdc2211e8fdf2a84df495f059
| 7,210 |
ipynb
|
Jupyter Notebook
|
results/distributional_dqn.ipynb
|
pluebcke/dqn_experiments
|
bbfade7ef7d514917dc3f2e3ad6af70906516942
|
[
"MIT"
] | 4 |
2020-09-05T01:28:46.000Z
|
2021-08-07T14:42:42.000Z
|
results/distributional_dqn.ipynb
|
pluebcke/dqn_experiments
|
bbfade7ef7d514917dc3f2e3ad6af70906516942
|
[
"MIT"
] | null | null | null |
results/distributional_dqn.ipynb
|
pluebcke/dqn_experiments
|
bbfade7ef7d514917dc3f2e3ad6af70906516942
|
[
"MIT"
] | null | null | null | 66.759259 | 713 | 0.722053 |
[
[
[
"# Distributional DQN\nThe final improvement to the DQN agent [1] is using distributions instead of simple average values for learning the q value function. This algorithm was presented by Bellemare et al. (2018) [2]. In their math heavy manuscript, the authors introduce the distributional Belman operator and show that it defines a contraction for the policy evaluation case. Bellemare et al. suggest that using distributions leads to a \"significantly better behaved\" [2] reinforcement learning and underline their theoretical findings with much better results on the Arcade Learning Environment [3]. Distributional DQN is also one of the improvements that seem to have one of the biggest impact in the Rainbow manuscript [4].",
"_____no_output_____"
],
[
"## Implementation\nThe distributional part of the algorithm is maybe the most influenced by the Deep Reinforcement Learning Hands-On book [5]. To get a good understanding of both, the distributional Bellman operator and how to use it in the DQN agent, I thoroughly studied the book's implementation as a starting point. I later implemented the distributional operator in a way that appears slightly more elegant to me, even though I still did not manage to get rid of the special treatment for the last step of an episode. \nSome of the functionality for distributions was implemented in a DistributionalNetHelper class. \nThis way other neural network architectures can just inherit this functionality, even though at this point I only implemented DuelingDQN with distrbutions.\n\n## Results\n\nI start by comparing the Distributional DQN agent with two other agents. The different agents I compare are:\n\n1. run024, which is the DQN agent with DDQN, Dueling DQN, n-step-reward and Prioritized Experience Replay. This agent so far had the most convincing results over the entire set of experiments.\n2. run028, which uses all of the improvements mentioned above as wel as using Noisy Networks\n3. run029 as run028 but using distributions instead of simple averages. \n\nThe values for the support in the distributional agent were chosen similar to the manuscript [2]; the number of atoms was 51 with values between -10 and 10. These values are not optimal for some of the experiments, as will become evident later.\n\nOn the radar plot, it appears that the distributional agent has the worst performance of all three agents. However, the barplot reveals that the DistributionalDQN agent shows good results on bandit, catch and catch_scale.\n\n\n\n\nThe problem that the DistributionalDQN agent faces is its use of a fixed support while the different experiments have very different reward scales. In the bandit experiment the final reward (and thus the q-value) varies between 0 and 1, while the cartpole experiment, for example, has q-values somewhere between 0 and 1001.\n\nTo investigate if using more appropriate vmin and vmax values yields better performance, I ran four of the experiments one more time with slightly different settings:\n4. run030 uses the same settings as run029 but vmin = -1000 and vmax = 0, these settings were used for mountaincar and mountaincar_scale\n5. run032 uses the same settings as run29 but vmin = 0, vmax = 1000, these settings were used for cartpole and cartpole_scale.\n\nThe results are shown below. It is apparent that the performance greatly improves when an appropriate scale is chosen. \n\n\nIn the scaled versions of the experiments, one can observe that the fine-tuned agent (run031) only performed well for the scale of 1.0, while the first set of parameters was better for smaller scales. This shows how strongly the choice of the support for the distribution influences the results.\n\n",
"_____no_output_____"
],
[
"## Discussion\n\nThe results above show that the DistributionalDQN agent can learn good policies very well if an appropriate scale for the support is chosen. However, this is also the obvious problem of the approach in the form presented in [2]. \nWhen the support is not chosen in an appropriate way, using a simple average value is probably more robust than using distributions.\n\nIn [2], distributions were used to improve the convergence properties of the DQN algorithms. \nUsing distributions has even more potential. One could use the distributions to improve action selection, for example in the case of multimodal distributions with very different (non-deterministic) rewards and probabilities.\nConsider one action that certainly gives a reward of +1 and another action that gives a reward of +100 with a probability of 1\\%. Even though both actions yield an average reward of 1 they certainly have very different risk profiles, and this could be assesed when the whole distributional information is available.",
"_____no_output_____"
],
[
"## References\n\nThe figures here were produced by the analysis Jupyter Notebook from [the BSuite code repository](https://github.com/deepmind/bsuite) and [6].\n\n[1] Mnih, Volodymyr, et al. Human-level control through deep reinforcement learning. Nature, 2015. \n[2] Bellemare, Marc G., Will Dabney, and Rémi Munos. \"A distributional perspective on reinforcement learning.\" Proceedings of the 34th International Conference on Machine Learning-Volume 70. JMLR. org, 2017. \n[3] Bellemare, Marc G., et al. \"The arcade learning environment: An evaluation platform for general agents.\" Journal of Artificial Intelligence Research 47 (2013): 253-279. \n[4] Hessel, Matteo, et al. Rainbow: Combining improvements in deep reinforcement learning. In: Thirty-Second AAAI Conference on Artificial Intelligence. 2018. \n[5] Lapan, Maxim. Deep Reinforcement Learning Hands-On, Packt Publishing Ltd, 2018.",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
c528900c62ca61486a5f02dd367b0ab89873195c
| 94,907 |
ipynb
|
Jupyter Notebook
|
Wk1/Regularization/Regularization.ipynb
|
ANKITPODDER2000/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-and-Optimization-Week-1
|
09e6c76e87b0709cab95ce6c81810c6acca26292
|
[
"MIT"
] | 125 |
2021-01-02T03:37:27.000Z
|
2022-03-23T21:58:13.000Z
|
Wk1/Regularization/Regularization.ipynb
|
ANKITPODDER2000/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-and-Optimization-Week-1
|
09e6c76e87b0709cab95ce6c81810c6acca26292
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
Wk1/Regularization/Regularization.ipynb
|
ANKITPODDER2000/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-and-Optimization-Week-1
|
09e6c76e87b0709cab95ce6c81810c6acca26292
|
[
"MIT"
] | 150 |
2021-01-02T00:27:46.000Z
|
2022-03-30T03:42:27.000Z
| 98.145812 | 56,104 | 0.791185 |
[
[
[
"# Regularization\n\nWelcome to the second assignment of this week. Deep Learning models have so much flexibility and capacity that **overfitting can be a serious problem**, if the training dataset is not big enough. Sure it does well on the training set, but the learned network **doesn't generalize to new examples** that it has never seen!\n\n**You will learn to:** Use regularization in your deep learning models.\n\nLet's first import the packages you are going to use.",
"_____no_output_____"
]
],
[
[
"# import packages\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom reg_utils import sigmoid, relu, plot_decision_boundary, initialize_parameters, load_2D_dataset, predict_dec\nfrom reg_utils import compute_cost, predict, forward_propagation, backward_propagation, update_parameters\nimport sklearn\nimport sklearn.datasets\nimport scipy.io\nfrom testCases import *\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'",
"_____no_output_____"
]
],
[
[
"**Problem Statement**: You have just been hired as an AI expert by the French Football Corporation. They would like you to recommend positions where France's goal keeper should kick the ball so that the French team's players can then hit it with their head. \n\n<img src=\"images/field_kiank.png\" style=\"width:600px;height:350px;\">\n<caption><center> <u> **Figure 1** </u>: **Football field**<br> The goal keeper kicks the ball in the air, the players of each team are fighting to hit the ball with their head </center></caption>\n\n\nThey give you the following 2D dataset from France's past 10 games.",
"_____no_output_____"
]
],
[
[
"train_X, train_Y, test_X, test_Y = load_2D_dataset()",
"_____no_output_____"
]
],
[
[
"Each dot corresponds to a position on the football field where a football player has hit the ball with his/her head after the French goal keeper has shot the ball from the left side of the football field.\n- If the dot is blue, it means the French player managed to hit the ball with his/her head\n- If the dot is red, it means the other team's player hit the ball with their head\n\n**Your goal**: Use a deep learning model to find the positions on the field where the goalkeeper should kick the ball.",
"_____no_output_____"
],
[
"**Analysis of the dataset**: This dataset is a little noisy, but it looks like a diagonal line separating the upper left half (blue) from the lower right half (red) would work well. \n\nYou will first try a non-regularized model. Then you'll learn how to regularize it and decide which model you will choose to solve the French Football Corporation's problem. ",
"_____no_output_____"
],
[
"## 1 - Non-regularized model\n\nYou will use the following neural network (already implemented for you below). This model can be used:\n- in *regularization mode* -- by setting the `lambd` input to a non-zero value. We use \"`lambd`\" instead of \"`lambda`\" because \"`lambda`\" is a reserved keyword in Python. \n- in *dropout mode* -- by setting the `keep_prob` to a value less than one\n\nYou will first try the model without any regularization. Then, you will implement:\n- *L2 regularization* -- functions: \"`compute_cost_with_regularization()`\" and \"`backward_propagation_with_regularization()`\"\n- *Dropout* -- functions: \"`forward_propagation_with_dropout()`\" and \"`backward_propagation_with_dropout()`\"\n\nIn each part, you will run this model with the correct inputs so that it calls the functions you've implemented. Take a look at the code below to familiarize yourself with the model.",
"_____no_output_____"
]
],
[
[
"def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1):\n \"\"\"\n Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.\n \n Arguments:\n X -- input data, of shape (input size, number of examples)\n Y -- true \"label\" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples)\n learning_rate -- learning rate of the optimization\n num_iterations -- number of iterations of the optimization loop\n print_cost -- If True, print the cost every 10000 iterations\n lambd -- regularization hyperparameter, scalar\n keep_prob - probability of keeping a neuron active during drop-out, scalar.\n \n Returns:\n parameters -- parameters learned by the model. They can then be used to predict.\n \"\"\"\n \n grads = {}\n costs = [] # to keep track of the cost\n m = X.shape[1] # number of examples\n layers_dims = [X.shape[0], 20, 3, 1]\n \n # Initialize parameters dictionary.\n parameters = initialize_parameters(layers_dims)\n\n # Loop (gradient descent)\n\n for i in range(0, num_iterations):\n\n # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.\n if keep_prob == 1:\n a3, cache = forward_propagation(X, parameters)\n elif keep_prob < 1:\n a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob)\n \n # Cost function\n if lambd == 0:\n cost = compute_cost(a3, Y)\n else:\n cost = compute_cost_with_regularization(a3, Y, parameters, lambd)\n \n # Backward propagation.\n assert(lambd==0 or keep_prob==1) # it is possible to use both L2 regularization and dropout, \n # but this assignment will only explore one at a time\n if lambd == 0 and keep_prob == 1:\n grads = backward_propagation(X, Y, cache)\n elif lambd != 0:\n grads = backward_propagation_with_regularization(X, Y, cache, lambd)\n elif keep_prob < 1:\n grads = backward_propagation_with_dropout(X, Y, cache, keep_prob)\n \n # Update parameters.\n parameters = update_parameters(parameters, grads, learning_rate)\n \n # Print the loss every 10000 iterations\n if print_cost and i % 10000 == 0:\n print(\"Cost after iteration {}: {}\".format(i, cost))\n if print_cost and i % 1000 == 0:\n costs.append(cost)\n \n # plot the cost\n plt.plot(costs)\n plt.ylabel('cost')\n plt.xlabel('iterations (x1,000)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n \n return parameters",
"_____no_output_____"
]
],
[
[
"Let's train the model without any regularization, and observe the accuracy on the train/test sets.",
"_____no_output_____"
]
],
[
[
"parameters = model(train_X, train_Y)\nprint (\"On the training set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint (\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)",
"_____no_output_____"
]
],
[
[
"The train accuracy is 94.8% while the test accuracy is 91.5%. This is the **baseline model** (you will observe the impact of regularization on this model). Run the following code to plot the decision boundary of your model.",
"_____no_output_____"
]
],
[
[
"plt.title(\"Model without regularization\")\naxes = plt.gca()\naxes.set_xlim([-0.75,0.40])\naxes.set_ylim([-0.75,0.65])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)",
"_____no_output_____"
]
],
[
[
"The non-regularized model is obviously overfitting the training set. It is fitting the noisy points! Lets now look at two techniques to reduce overfitting.",
"_____no_output_____"
],
[
"## 2 - L2 Regularization\n\nThe standard way to avoid overfitting is called **L2 regularization**. It consists of appropriately modifying your cost function, from:\n$$J = -\\frac{1}{m} \\sum\\limits_{i = 1}^{m} \\large{(}\\small y^{(i)}\\log\\left(a^{[L](i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{[L](i)}\\right) \\large{)} \\tag{1}$$\nTo:\n$$J_{regularized} = \\small \\underbrace{-\\frac{1}{m} \\sum\\limits_{i = 1}^{m} \\large{(}\\small y^{(i)}\\log\\left(a^{[L](i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{[L](i)}\\right) \\large{)} }_\\text{cross-entropy cost} + \\underbrace{\\frac{1}{m} \\frac{\\lambda}{2} \\sum\\limits_l\\sum\\limits_k\\sum\\limits_j W_{k,j}^{[l]2} }_\\text{L2 regularization cost} \\tag{2}$$\n\nLet's modify your cost and observe the consequences.\n\n**Exercise**: Implement `compute_cost_with_regularization()` which computes the cost given by formula (2). To calculate $\\sum\\limits_k\\sum\\limits_j W_{k,j}^{[l]2}$ , use :\n```python\nnp.sum(np.square(Wl))\n```\nNote that you have to do this for $W^{[1]}$, $W^{[2]}$ and $W^{[3]}$, then sum the three terms and multiply by $ \\frac{1}{m} \\frac{\\lambda}{2} $.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: compute_cost_with_regularization\n\ndef compute_cost_with_regularization(A3, Y, parameters, lambd):\n \"\"\"\n Implement the cost function with L2 regularization. See formula (2) above.\n \n Arguments:\n A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)\n Y -- \"true\" labels vector, of shape (output size, number of examples)\n parameters -- python dictionary containing parameters of the model\n \n Returns:\n cost - value of the regularized loss function (formula (2))\n \"\"\"\n m = Y.shape[1]\n W1 = parameters[\"W1\"]\n W2 = parameters[\"W2\"]\n W3 = parameters[\"W3\"]\n \n cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost\n \n ### START CODE HERE ### (approx. 1 line)\n L2_regularization_cost = None\n ### END CODER HERE ###\n \n cost = cross_entropy_cost + L2_regularization_cost\n \n return cost",
"_____no_output_____"
],
[
"A3, Y_assess, parameters = compute_cost_with_regularization_test_case()\n\nprint(\"cost = \" + str(compute_cost_with_regularization(A3, Y_assess, parameters, lambd = 0.1)))",
"_____no_output_____"
]
],
[
[
"**Expected Output**: \n\n<table> \n <tr>\n <td>\n **cost**\n </td>\n <td>\n 1.78648594516\n </td>\n \n </tr>\n\n</table> ",
"_____no_output_____"
],
[
"Of course, because you changed the cost, you have to change backward propagation as well! All the gradients have to be computed with respect to this new cost. \n\n**Exercise**: Implement the changes needed in backward propagation to take into account regularization. The changes only concern dW1, dW2 and dW3. For each, you have to add the regularization term's gradient ($\\frac{d}{dW} ( \\frac{1}{2}\\frac{\\lambda}{m} W^2) = \\frac{\\lambda}{m} W$).",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: backward_propagation_with_regularization\n\ndef backward_propagation_with_regularization(X, Y, cache, lambd):\n \"\"\"\n Implements the backward propagation of our baseline model to which we added an L2 regularization.\n \n Arguments:\n X -- input dataset, of shape (input size, number of examples)\n Y -- \"true\" labels vector, of shape (output size, number of examples)\n cache -- cache output from forward_propagation()\n lambd -- regularization hyperparameter, scalar\n \n Returns:\n gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables\n \"\"\"\n \n m = X.shape[1]\n (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache\n \n dZ3 = A3 - Y\n \n ### START CODE HERE ### (approx. 1 line)\n dW3 = 1./m * np.dot(dZ3, A2.T) + None\n ### END CODE HERE ###\n db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)\n \n dA2 = np.dot(W3.T, dZ3)\n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n ### START CODE HERE ### (approx. 1 line)\n dW2 = 1./m * np.dot(dZ2, A1.T) + None\n ### END CODE HERE ###\n db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)\n \n dA1 = np.dot(W2.T, dZ2)\n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n ### START CODE HERE ### (approx. 1 line)\n dW1 = 1./m * np.dot(dZ1, X.T) + None\n ### END CODE HERE ###\n db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)\n \n gradients = {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3,\"dA2\": dA2,\n \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2, \"dA1\": dA1, \n \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n \n return gradients",
"_____no_output_____"
],
[
"X_assess, Y_assess, cache = backward_propagation_with_regularization_test_case()\n\ngrads = backward_propagation_with_regularization(X_assess, Y_assess, cache, lambd = 0.7)\nprint (\"dW1 = \"+ str(grads[\"dW1\"]))\nprint (\"dW2 = \"+ str(grads[\"dW2\"]))\nprint (\"dW3 = \"+ str(grads[\"dW3\"]))",
"_____no_output_____"
]
],
[
[
"**Expected Output**:\n\n<table> \n <tr>\n <td>\n **dW1**\n </td>\n <td>\n [[-0.25604646 0.12298827 -0.28297129]\n [-0.17706303 0.34536094 -0.4410571 ]]\n </td>\n </tr>\n <tr>\n <td>\n **dW2**\n </td>\n <td>\n [[ 0.79276486 0.85133918]\n [-0.0957219 -0.01720463]\n [-0.13100772 -0.03750433]]\n </td>\n </tr>\n <tr>\n <td>\n **dW3**\n </td>\n <td>\n [[-1.77691347 -0.11832879 -0.09397446]]\n </td>\n </tr>\n</table> ",
"_____no_output_____"
],
[
"Let's now run the model with L2 regularization $(\\lambda = 0.7)$. The `model()` function will call: \n- `compute_cost_with_regularization` instead of `compute_cost`\n- `backward_propagation_with_regularization` instead of `backward_propagation`",
"_____no_output_____"
]
],
[
[
"parameters = model(train_X, train_Y, lambd = 0.7)\nprint (\"On the train set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint (\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)",
"_____no_output_____"
]
],
[
[
"Congrats, the test set accuracy increased to 93%. You have saved the French football team!\n\nYou are not overfitting the training data anymore. Let's plot the decision boundary.",
"_____no_output_____"
]
],
[
[
"plt.title(\"Model with L2-regularization\")\naxes = plt.gca()\naxes.set_xlim([-0.75,0.40])\naxes.set_ylim([-0.75,0.65])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)",
"_____no_output_____"
]
],
[
[
"**Observations**:\n- The value of $\\lambda$ is a hyperparameter that you can tune using a dev set.\n- L2 regularization makes your decision boundary smoother. If $\\lambda$ is too large, it is also possible to \"oversmooth\", resulting in a model with high bias.\n\n**What is L2-regularization actually doing?**:\n\nL2-regularization relies on the assumption that a model with small weights is simpler than a model with large weights. Thus, by penalizing the square values of the weights in the cost function you drive all the weights to smaller values. It becomes too costly for the cost to have large weights! This leads to a smoother model in which the output changes more slowly as the input changes. \n\n<font color='blue'>\n**What you should remember** -- the implications of L2-regularization on:\n- The cost computation:\n - A regularization term is added to the cost\n- The backpropagation function:\n - There are extra terms in the gradients with respect to weight matrices\n- Weights end up smaller (\"weight decay\"): \n - Weights are pushed to smaller values.",
"_____no_output_____"
],
[
"## 3 - Dropout\n\nFinally, **dropout** is a widely used regularization technique that is specific to deep learning. \n**It randomly shuts down some neurons in each iteration.** Watch these two videos to see what this means!\n\n<!--\nTo understand drop-out, consider this conversation with a friend:\n- Friend: \"Why do you need all these neurons to train your network and classify images?\". \n- You: \"Because each neuron contains a weight and can learn specific features/details/shape of an image. The more neurons I have, the more featurse my model learns!\"\n- Friend: \"I see, but are you sure that your neurons are learning different features and not all the same features?\"\n- You: \"Good point... Neurons in the same layer actually don't talk to each other. It should be definitly possible that they learn the same image features/shapes/forms/details... which would be redundant. There should be a solution.\"\n!--> \n\n\n<center>\n<video width=\"620\" height=\"440\" src=\"images/dropout1_kiank.mp4\" type=\"video/mp4\" controls>\n</video>\n</center>\n<br>\n<caption><center> <u> Figure 2 </u>: Drop-out on the second hidden layer. <br> At each iteration, you shut down (= set to zero) each neuron of a layer with probability $1 - keep\\_prob$ or keep it with probability $keep\\_prob$ (50% here). The dropped neurons don't contribute to the training in both the forward and backward propagations of the iteration. </center></caption>\n\n<center>\n<video width=\"620\" height=\"440\" src=\"images/dropout2_kiank.mp4\" type=\"video/mp4\" controls>\n</video>\n</center>\n\n<caption><center> <u> Figure 3 </u>: Drop-out on the first and third hidden layers. <br> $1^{st}$ layer: we shut down on average 40% of the neurons. $3^{rd}$ layer: we shut down on average 20% of the neurons. </center></caption>\n\n\nWhen you shut some neurons down, you actually modify your model. The idea behind drop-out is that at each iteration, you train a different model that uses only a subset of your neurons. With dropout, your neurons thus become less sensitive to the activation of one other specific neuron, because that other neuron might be shut down at any time. \n\n### 3.1 - Forward propagation with dropout\n\n**Exercise**: Implement the forward propagation with dropout. You are using a 3 layer neural network, and will add dropout to the first and second hidden layers. We will not apply dropout to the input layer or output layer. \n\n**Instructions**:\nYou would like to shut down some neurons in the first and second layers. To do that, you are going to carry out 4 Steps:\n1. In lecture, we dicussed creating a variable $d^{[1]}$ with the same shape as $a^{[1]}$ using `np.random.rand()` to randomly get numbers between 0 and 1. Here, you will use a vectorized implementation, so create a random matrix $D^{[1]} = [d^{[1](1)} d^{[1](2)} ... d^{[1](m)}] $ of the same dimension as $A^{[1]}$.\n2. Set each entry of $D^{[1]}$ to be 0 with probability (`1-keep_prob`) or 1 with probability (`keep_prob`), by thresholding values in $D^{[1]}$ appropriately. Hint: to set all the entries of a matrix X to 0 (if entry is less than 0.5) or 1 (if entry is more than 0.5) you would do: `X = (X < 0.5)`. Note that 0 and 1 are respectively equivalent to False and True.\n3. Set $A^{[1]}$ to $A^{[1]} * D^{[1]}$. (You are shutting down some neurons). You can think of $D^{[1]}$ as a mask, so that when it is multiplied with another matrix, it shuts down some of the values.\n4. Divide $A^{[1]}$ by `keep_prob`. By doing this you are assuring that the result of the cost will still have the same expected value as without drop-out. (This technique is also called inverted dropout.)",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: forward_propagation_with_dropout\n\ndef forward_propagation_with_dropout(X, parameters, keep_prob = 0.5):\n \"\"\"\n Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID.\n \n Arguments:\n X -- input dataset, of shape (2, number of examples)\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\":\n W1 -- weight matrix of shape (20, 2)\n b1 -- bias vector of shape (20, 1)\n W2 -- weight matrix of shape (3, 20)\n b2 -- bias vector of shape (3, 1)\n W3 -- weight matrix of shape (1, 3)\n b3 -- bias vector of shape (1, 1)\n keep_prob - probability of keeping a neuron active during drop-out, scalar\n \n Returns:\n A3 -- last activation value, output of the forward propagation, of shape (1,1)\n cache -- tuple, information stored for computing the backward propagation\n \"\"\"\n \n np.random.seed(1)\n \n # retrieve parameters\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n \n # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID\n Z1 = np.dot(W1, X) + b1\n A1 = relu(Z1)\n ### START CODE HERE ### (approx. 4 lines) # Steps 1-4 below correspond to the Steps 1-4 described above. \n D1 = None # Step 1: initialize matrix D1 = np.random.rand(..., ...)\n D1 = None # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold)\n A1 = None # Step 3: shut down some neurons of A1\n A1 = None # Step 4: scale the value of neurons that haven't been shut down\n ### END CODE HERE ###\n Z2 = np.dot(W2, A1) + b2\n A2 = relu(Z2)\n ### START CODE HERE ### (approx. 4 lines)\n D2 = None # Step 1: initialize matrix D2 = np.random.rand(..., ...)\n D2 = None # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold)\n A2 = None # Step 3: shut down some neurons of A2\n A2 = None # Step 4: scale the value of neurons that haven't been shut down\n ### END CODE HERE ###\n Z3 = np.dot(W3, A2) + b3\n A3 = sigmoid(Z3)\n \n cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)\n \n return A3, cache",
"_____no_output_____"
],
[
"X_assess, parameters = forward_propagation_with_dropout_test_case()\n\nA3, cache = forward_propagation_with_dropout(X_assess, parameters, keep_prob = 0.7)\nprint (\"A3 = \" + str(A3))",
"_____no_output_____"
]
],
[
[
"**Expected Output**: \n\n<table> \n <tr>\n <td>\n **A3**\n </td>\n <td>\n [[ 0.36974721 0.00305176 0.04565099 0.49683389 0.36974721]]\n </td>\n \n </tr>\n\n</table> ",
"_____no_output_____"
],
[
"### 3.2 - Backward propagation with dropout\n\n**Exercise**: Implement the backward propagation with dropout. As before, you are training a 3 layer network. Add dropout to the first and second hidden layers, using the masks $D^{[1]}$ and $D^{[2]}$ stored in the cache. \n\n**Instruction**:\nBackpropagation with dropout is actually quite easy. You will have to carry out 2 Steps:\n1. You had previously shut down some neurons during forward propagation, by applying a mask $D^{[1]}$ to `A1`. In backpropagation, you will have to shut down the same neurons, by reapplying the same mask $D^{[1]}$ to `dA1`. \n2. During forward propagation, you had divided `A1` by `keep_prob`. In backpropagation, you'll therefore have to divide `dA1` by `keep_prob` again (the calculus interpretation is that if $A^{[1]}$ is scaled by `keep_prob`, then its derivative $dA^{[1]}$ is also scaled by the same `keep_prob`).\n",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: backward_propagation_with_dropout\n\ndef backward_propagation_with_dropout(X, Y, cache, keep_prob):\n \"\"\"\n Implements the backward propagation of our baseline model to which we added dropout.\n \n Arguments:\n X -- input dataset, of shape (2, number of examples)\n Y -- \"true\" labels vector, of shape (output size, number of examples)\n cache -- cache output from forward_propagation_with_dropout()\n keep_prob - probability of keeping a neuron active during drop-out, scalar\n \n Returns:\n gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables\n \"\"\"\n \n m = X.shape[1]\n (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache\n \n dZ3 = A3 - Y\n dW3 = 1./m * np.dot(dZ3, A2.T)\n db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)\n dA2 = np.dot(W3.T, dZ3)\n ### START CODE HERE ### (≈ 2 lines of code)\n dA2 = None # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation\n dA2 = None # Step 2: Scale the value of neurons that haven't been shut down\n ### END CODE HERE ###\n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n dW2 = 1./m * np.dot(dZ2, A1.T)\n db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)\n \n dA1 = np.dot(W2.T, dZ2)\n ### START CODE HERE ### (≈ 2 lines of code)\n dA1 = None # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation\n dA1 = None # Step 2: Scale the value of neurons that haven't been shut down\n ### END CODE HERE ###\n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n dW1 = 1./m * np.dot(dZ1, X.T)\n db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)\n \n gradients = {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3,\"dA2\": dA2,\n \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2, \"dA1\": dA1, \n \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n \n return gradients",
"_____no_output_____"
],
[
"X_assess, Y_assess, cache = backward_propagation_with_dropout_test_case()\n\ngradients = backward_propagation_with_dropout(X_assess, Y_assess, cache, keep_prob = 0.8)\n\nprint (\"dA1 = \" + str(gradients[\"dA1\"]))\nprint (\"dA2 = \" + str(gradients[\"dA2\"]))",
"_____no_output_____"
]
],
[
[
"**Expected Output**: \n\n<table> \n <tr>\n <td>\n **dA1**\n </td>\n <td>\n [[ 0.36544439 0. -0.00188233 0. -0.17408748]\n [ 0.65515713 0. -0.00337459 0. -0. ]]\n </td>\n \n </tr>\n <tr>\n <td>\n **dA2**\n </td>\n <td>\n [[ 0.58180856 0. -0.00299679 0. -0.27715731]\n [ 0. 0.53159854 -0. 0.53159854 -0.34089673]\n [ 0. 0. -0.00292733 0. -0. ]]\n </td>\n \n </tr>\n</table> ",
"_____no_output_____"
],
[
"Let's now run the model with dropout (`keep_prob = 0.86`). It means at every iteration you shut down each neurons of layer 1 and 2 with 24% probability. The function `model()` will now call:\n- `forward_propagation_with_dropout` instead of `forward_propagation`.\n- `backward_propagation_with_dropout` instead of `backward_propagation`.",
"_____no_output_____"
]
],
[
[
"parameters = model(train_X, train_Y, keep_prob = 0.86, learning_rate = 0.3)\n\nprint (\"On the train set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint (\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)",
"_____no_output_____"
]
],
[
[
"Dropout works great! The test accuracy has increased again (to 95%)! Your model is not overfitting the training set and does a great job on the test set. The French football team will be forever grateful to you! \n\nRun the code below to plot the decision boundary.",
"_____no_output_____"
]
],
[
[
"plt.title(\"Model with dropout\")\naxes = plt.gca()\naxes.set_xlim([-0.75,0.40])\naxes.set_ylim([-0.75,0.65])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)",
"_____no_output_____"
]
],
[
[
"**Note**:\n- A **common mistake** when using dropout is to use it both in training and testing. You should use dropout (randomly eliminate nodes) only in training. \n- Deep learning frameworks like [tensorflow](https://www.tensorflow.org/api_docs/python/tf/nn/dropout), [PaddlePaddle](http://doc.paddlepaddle.org/release_doc/0.9.0/doc/ui/api/trainer_config_helpers/attrs.html), [keras](https://keras.io/layers/core/#dropout) or [caffe](http://caffe.berkeleyvision.org/tutorial/layers/dropout.html) come with a dropout layer implementation. Don't stress - you will soon learn some of these frameworks.\n\n<font color='blue'>\n**What you should remember about dropout:**\n- Dropout is a regularization technique.\n- You only use dropout during training. Don't use dropout (randomly eliminate nodes) during test time.\n- Apply dropout both during forward and backward propagation.\n- During training time, divide each dropout layer by keep_prob to keep the same expected value for the activations. For example, if keep_prob is 0.5, then we will on average shut down half the nodes, so the output will be scaled by 0.5 since only the remaining half are contributing to the solution. Dividing by 0.5 is equivalent to multiplying by 2. Hence, the output now has the same expected value. You can check that this works even when keep_prob is other values than 0.5. ",
"_____no_output_____"
],
[
"## 4 - Conclusions",
"_____no_output_____"
],
[
"**Here are the results of our three models**: \n\n<table> \n <tr>\n <td>\n **model**\n </td>\n <td>\n **train accuracy**\n </td>\n <td>\n **test accuracy**\n </td>\n\n </tr>\n <td>\n 3-layer NN without regularization\n </td>\n <td>\n 95%\n </td>\n <td>\n 91.5%\n </td>\n <tr>\n <td>\n 3-layer NN with L2-regularization\n </td>\n <td>\n 94%\n </td>\n <td>\n 93%\n </td>\n </tr>\n <tr>\n <td>\n 3-layer NN with dropout\n </td>\n <td>\n 93%\n </td>\n <td>\n 95%\n </td>\n </tr>\n</table> ",
"_____no_output_____"
],
[
"Note that regularization hurts training set performance! This is because it limits the ability of the network to overfit to the training set. But since it ultimately gives better test accuracy, it is helping your system. ",
"_____no_output_____"
],
[
"Congratulations for finishing this assignment! And also for revolutionizing French football. :-) ",
"_____no_output_____"
],
[
"<font color='blue'>\n**What we want you to remember from this notebook**:\n- Regularization will help you reduce overfitting.\n- Regularization will drive your weights to lower values.\n- L2 regularization and Dropout are two very effective regularization techniques.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
c5289e2ac2c38031d48d29648faa2edd1e2f8727
| 28,869 |
ipynb
|
Jupyter Notebook
|
assignment3/StyleTransfer-TensorFlow.ipynb
|
ishanbhandari-19/CS231n-Assignment-solutions
|
a02429b4af5e693d0793e8de968f96fc2351d909
|
[
"MIT"
] | 419 |
2019-09-02T08:25:31.000Z
|
2022-03-28T06:09:47.000Z
|
assignment3/StyleTransfer-TensorFlow.ipynb
|
ishanbhandari-19/CS231n-Assignment-solutions
|
a02429b4af5e693d0793e8de968f96fc2351d909
|
[
"MIT"
] | 32 |
2020-09-17T19:43:53.000Z
|
2022-03-12T00:55:26.000Z
|
assignment3/StyleTransfer-TensorFlow.ipynb
|
ishanbhandari-19/CS231n-Assignment-solutions
|
a02429b4af5e693d0793e8de968f96fc2351d909
|
[
"MIT"
] | 164 |
2019-09-20T07:58:04.000Z
|
2022-03-15T11:31:25.000Z
| 41.538129 | 1,105 | 0.600714 |
[
[
[
"# Style Transfer\nIn this notebook we will implement the style transfer technique from [\"Image Style Transfer Using Convolutional Neural Networks\" (Gatys et al., CVPR 2015)](http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Gatys_Image_Style_Transfer_CVPR_2016_paper.pdf).\n\nThe general idea is to take two images, and produce a new image that reflects the content of one but the artistic \"style\" of the other. We will do this by first formulating a loss function that matches the content and style of each respective image in the feature space of a deep network, and then performing gradient descent on the pixels of the image itself.\n\nThe deep network we use as a feature extractor is [SqueezeNet](https://arxiv.org/abs/1602.07360), a small model that has been trained on ImageNet. You could use any network, but we chose SqueezeNet here for its small size and efficiency.\n\nHere's an example of the images you'll be able to produce by the end of this notebook:\n\n\n\n",
"_____no_output_____"
],
[
"## Setup",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np\nfrom scipy.misc import imread, imresize\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\n# Helper functions to deal with image preprocessing\nfrom cs231n.image_utils import load_image, preprocess_image, deprocess_image\nfrom cs231n.classifiers.squeezenet import SqueezeNet\n\n%matplotlib inline\n%load_ext autoreload\n%autoreload 2\n\ndef rel_error(x,y):\n return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))\n\n# Older versions of scipy.misc.imresize yield different results\n# from newer versions, so we check to make sure scipy is up to date.\ndef check_scipy():\n import scipy\n version = scipy.__version__.split('.')\n if int(version[0]) < 1:\n assert int(version[1]) >= 16, \"You must install SciPy >= 0.16.0 to complete this notebook.\"\n\ncheck_scipy()",
"_____no_output_____"
]
],
[
[
"Load the pretrained SqueezeNet model. This model has been ported from PyTorch, see `cs231n/classifiers/squeezenet.py` for the model architecture. \n\nTo use SqueezeNet, you will need to first **download the weights** by descending into the `cs231n/datasets` directory and running `get_squeezenet_tf.sh` . Note that if you ran `get_assignment3_data.sh` then SqueezeNet will already be downloaded.",
"_____no_output_____"
]
],
[
[
"# Load pretrained SqueezeNet model\nSAVE_PATH = 'cs231n/datasets/squeezenet.ckpt'\nif not os.path.exists(SAVE_PATH + \".index\"):\n raise ValueError(\"You need to download SqueezeNet!\")",
"_____no_output_____"
],
[
"model=SqueezeNet()\nmodel.load_weights(SAVE_PATH)\nmodel.trainable=False\n\n# Load data for testing\ncontent_img_test = preprocess_image(load_image('styles/tubingen.jpg', size=192))[None]\nstyle_img_test = preprocess_image(load_image('styles/starry_night.jpg', size=192))[None]\nanswers = np.load('style-transfer-checks-tf.npz')\n",
"_____no_output_____"
]
],
[
[
"## Computing Loss\n\nWe're going to compute the three components of our loss function now. The loss function is a weighted sum of three terms: content loss + style loss + total variation loss. You'll fill in the functions that compute these weighted terms below.",
"_____no_output_____"
],
[
"## Content loss\nWe can generate an image that reflects the content of one image and the style of another by incorporating both in our loss function. We want to penalize deviations from the content of the content image and deviations from the style of the style image. We can then use this hybrid loss function to perform gradient descent **not on the parameters** of the model, but instead **on the pixel values** of our original image.\n\nLet's first write the content loss function. Content loss measures how much the feature map of the generated image differs from the feature map of the source image. We only care about the content representation of one layer of the network (say, layer $\\ell$), that has feature maps $A^\\ell \\in \\mathbb{R}^{1 \\times H_\\ell \\times W_\\ell \\times C_\\ell}$. $C_\\ell$ is the number of filters/channels in layer $\\ell$, $H_\\ell$ and $W_\\ell$ are the height and width. We will work with reshaped versions of these feature maps that combine all spatial positions into one dimension. Let $F^\\ell \\in \\mathbb{R}^{M_\\ell \\times C_\\ell}$ be the feature map for the current image and $P^\\ell \\in \\mathbb{R}^{M_\\ell \\times C_\\ell}$ be the feature map for the content source image where $M_\\ell=H_\\ell\\times W_\\ell$ is the number of elements in each feature map. Each row of $F^\\ell$ or $P^\\ell$ represents the vectorized activations of a particular filter, convolved over all positions of the image. Finally, let $w_c$ be the weight of the content loss term in the loss function.\n\nThen the content loss is given by:\n\n$L_c = w_c \\times \\sum_{i,j} (F_{ij}^{\\ell} - P_{ij}^{\\ell})^2$",
"_____no_output_____"
]
],
[
[
"def content_loss(content_weight, content_current, content_original):\n \"\"\"\n Compute the content loss for style transfer.\n \n Inputs:\n - content_weight: scalar constant we multiply the content_loss by.\n - content_current: features of the current image, Tensor with shape [1, height, width, channels]\n - content_target: features of the content image, Tensor with shape [1, height, width, channels]\n \n Returns:\n - scalar content loss\n \"\"\"\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n",
"_____no_output_____"
],
[
"# We provide this helper code which takes an image, a model (cnn), and returns a list of\n# feature maps, one per layer.\ndef extract_features(x, cnn):\n \"\"\"\n Use the CNN to extract features from the input image x.\n \n Inputs:\n - x: A Tensor of shape (N, H, W, C) holding a minibatch of images that\n will be fed to the CNN.\n - cnn: A Tensorflow model that we will use to extract features.\n \n Returns:\n - features: A list of feature for the input images x extracted using the cnn model.\n features[i] is a Tensor of shape (N, H_i, W_i, C_i); recall that features\n from different layers of the network may have different numbers of channels (C_i) and\n spatial dimensions (H_i, W_i).\n \"\"\"\n features = []\n prev_feat = x\n for i, layer in enumerate(cnn.net.layers[:-2]):\n next_feat = layer(prev_feat)\n features.append(next_feat)\n prev_feat = next_feat\n return features",
"_____no_output_____"
]
],
[
[
"Test your content loss. The error should be less than 1e-8.",
"_____no_output_____"
]
],
[
[
"def content_loss_test(correct):\n content_layer = 2\n content_weight = 6e-2\n c_feats = extract_features(content_img_test, model)[content_layer]\n bad_img = tf.zeros(content_img_test.shape)\n feats = extract_features(bad_img, model)[content_layer]\n student_output = content_loss(content_weight, c_feats, feats)\n error = rel_error(correct, student_output)\n print('Maximum error is {:.3f}'.format(error))\n\ncontent_loss_test(answers['cl_out'])",
"_____no_output_____"
]
],
[
[
"## Style loss\nNow we can tackle the style loss. For a given layer $\\ell$, the style loss is defined as follows:\n\nFirst, compute the Gram matrix G which represents the correlations between the responses of each filter, where F is as above. The Gram matrix is an approximation to the covariance matrix -- we want the activation statistics of our generated image to match the activation statistics of our style image, and matching the (approximate) covariance is one way to do that. There are a variety of ways you could do this, but the Gram matrix is nice because it's easy to compute and in practice shows good results.\n\nGiven a feature map $F^\\ell$ of shape $(M_\\ell, C_\\ell)$, the Gram matrix has shape $(C_\\ell, C_\\ell)$ and its elements are given by:\n\n$$G_{ij}^\\ell = \\sum_k F^{\\ell}_{ki} F^{\\ell}_{kj}$$\n\nAssuming $G^\\ell$ is the Gram matrix from the feature map of the current image, $A^\\ell$ is the Gram Matrix from the feature map of the source style image, and $w_\\ell$ a scalar weight term, then the style loss for the layer $\\ell$ is simply the weighted Euclidean distance between the two Gram matrices:\n\n$$L_s^\\ell = w_\\ell \\sum_{i, j} \\left(G^\\ell_{ij} - A^\\ell_{ij}\\right)^2$$\n\nIn practice we usually compute the style loss at a set of layers $\\mathcal{L}$ rather than just a single layer $\\ell$; then the total style loss is the sum of style losses at each layer:\n\n$$L_s = \\sum_{\\ell \\in \\mathcal{L}} L_s^\\ell$$\n\nBegin by implementing the Gram matrix computation below:",
"_____no_output_____"
]
],
[
[
"def gram_matrix(features, normalize=True):\n \"\"\"\n Compute the Gram matrix from features.\n \n Inputs:\n - features: Tensor of shape (1, H, W, C) giving features for\n a single image.\n - normalize: optional, whether to normalize the Gram matrix\n If True, divide the Gram matrix by the number of neurons (H * W * C)\n \n Returns:\n - gram: Tensor of shape (C, C) giving the (optionally normalized)\n Gram matrices for the input image.\n \"\"\"\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n",
"_____no_output_____"
]
],
[
[
"Test your Gram matrix code. You should see errors less than 0.001.",
"_____no_output_____"
]
],
[
[
"def gram_matrix_test(correct):\n gram = gram_matrix(extract_features(style_img_test, model)[4]) ### 4 instead of 5 - second MaxPooling layer\n error = rel_error(correct, gram)\n print('Maximum error is {:.3f}'.format(error))\n\ngram_matrix_test(answers['gm_out'])",
"_____no_output_____"
]
],
[
[
"Next, implement the style loss:",
"_____no_output_____"
]
],
[
[
"def style_loss(feats, style_layers, style_targets, style_weights):\n \"\"\"\n Computes the style loss at a set of layers.\n \n Inputs:\n - feats: list of the features at every layer of the current image, as produced by\n the extract_features function.\n - style_layers: List of layer indices into feats giving the layers to include in the\n style loss.\n - style_targets: List of the same length as style_layers, where style_targets[i] is\n a Tensor giving the Gram matrix of the source style image computed at\n layer style_layers[i].\n - style_weights: List of the same length as style_layers, where style_weights[i]\n is a scalar giving the weight for the style loss at layer style_layers[i].\n \n Returns:\n - style_loss: A Tensor containing the scalar style loss.\n \"\"\"\n # Hint: you can do this with one for loop over the style layers, and should\n # not be short code (~5 lines). You will need to use your gram_matrix function.\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n",
"_____no_output_____"
]
],
[
[
"Test your style loss implementation. The error should be less than 0.001.",
"_____no_output_____"
]
],
[
[
"def style_loss_test(correct):\n style_layers = [0, 3, 5, 6]\n style_weights = [300000, 1000, 15, 3]\n \n c_feats = extract_features(content_img_test, model)\n feats = extract_features(style_img_test, model)\n style_targets = []\n for idx in style_layers:\n style_targets.append(gram_matrix(feats[idx]))\n \n s_loss = style_loss(c_feats, style_layers, style_targets, style_weights)\n error = rel_error(correct, s_loss)\n print('Error is {:.3f}'.format(error))\n\nstyle_loss_test(answers['sl_out'])",
"_____no_output_____"
]
],
[
[
"## Total-variation regularization\nIt turns out that it's helpful to also encourage smoothness in the image. We can do this by adding another term to our loss that penalizes wiggles or \"total variation\" in the pixel values. \n\nYou can compute the \"total variation\" as the sum of the squares of differences in the pixel values for all pairs of pixels that are next to each other (horizontally or vertically). Here we sum the total-variation regualarization for each of the 3 input channels (RGB), and weight the total summed loss by the total variation weight, $w_t$:\n\n$L_{tv} = w_t \\times \\left(\\sum_{c=1}^3\\sum_{i=1}^{H-1}\\sum_{j=1}^{W} (x_{i+1,j,c} - x_{i,j,c})^2 + \\sum_{c=1}^3\\sum_{i=1}^{H}\\sum_{j=1}^{W - 1} (x_{i,j+1,c} - x_{i,j,c})^2\\right)$\n\nIn the next cell, fill in the definition for the TV loss term. To receive full credit, your implementation should not have any loops.",
"_____no_output_____"
]
],
[
[
"def tv_loss(img, tv_weight):\n \"\"\"\n Compute total variation loss.\n \n Inputs:\n - img: Tensor of shape (1, H, W, 3) holding an input image.\n - tv_weight: Scalar giving the weight w_t to use for the TV loss.\n \n Returns:\n - loss: Tensor holding a scalar giving the total variation loss\n for img weighted by tv_weight.\n \"\"\"\n # Your implementation should be vectorized and not require any loops!\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n",
"_____no_output_____"
]
],
[
[
"Test your TV loss implementation. Error should be less than 0.001.",
"_____no_output_____"
]
],
[
[
"def tv_loss_test(correct):\n tv_weight = 2e-2\n t_loss = tv_loss(content_img_test, tv_weight)\n error = rel_error(correct, t_loss)\n print('Error is {:.3f}'.format(error))\n\ntv_loss_test(answers['tv_out'])",
"_____no_output_____"
]
],
[
[
"## Style Transfer",
"_____no_output_____"
],
[
"Lets put it all together and make some beautiful images! The `style_transfer` function below combines all the losses you coded up above and optimizes for an image that minimizes the total loss.",
"_____no_output_____"
]
],
[
[
"def style_transfer(content_image, style_image, image_size, style_size, content_layer, content_weight,\n style_layers, style_weights, tv_weight, init_random = False):\n \"\"\"Run style transfer!\n \n Inputs:\n - content_image: filename of content image\n - style_image: filename of style image\n - image_size: size of smallest image dimension (used for content loss and generated image)\n - style_size: size of smallest style image dimension\n - content_layer: layer to use for content loss\n - content_weight: weighting on content loss\n - style_layers: list of layers to use for style loss\n - style_weights: list of weights to use for each layer in style_layers\n - tv_weight: weight of total variation regularization term\n - init_random: initialize the starting image to uniform random noise\n \"\"\"\n # Extract features from the content image\n content_img = preprocess_image(load_image(content_image, size=image_size))\n feats = extract_features(content_img[None], model)\n content_target = feats[content_layer]\n \n # Extract features from the style image\n style_img = preprocess_image(load_image(style_image, size=style_size))\n s_feats = extract_features(style_img[None], model)\n style_targets = []\n # Compute list of TensorFlow Gram matrices\n for idx in style_layers:\n style_targets.append(gram_matrix(s_feats[idx]))\n \n # Set up optimization hyperparameters\n initial_lr = 3.0\n decayed_lr = 0.1\n decay_lr_at = 180\n max_iter = 200\n \n step = tf.Variable(0, trainable=False)\n boundaries = [decay_lr_at]\n values = [initial_lr, decayed_lr]\n learning_rate_fn = tf.keras.optimizers.schedules.PiecewiseConstantDecay(boundaries, values)\n\n # Later, whenever we perform an optimization step, we pass in the step.\n learning_rate = learning_rate_fn(step)\n\n optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n \n # Initialize the generated image and optimization variables\n \n f, axarr = plt.subplots(1,2)\n axarr[0].axis('off')\n axarr[1].axis('off')\n axarr[0].set_title('Content Source Img.')\n axarr[1].set_title('Style Source Img.')\n axarr[0].imshow(deprocess_image(content_img))\n axarr[1].imshow(deprocess_image(style_img))\n plt.show()\n plt.figure()\n \n # Initialize generated image to content image\n if init_random:\n initializer = tf.random_uniform_initializer(0, 1)\n img = initializer(shape=content_img[None].shape)\n img_var = tf.Variable(img)\n print(\"Intializing randomly.\")\n else:\n img_var = tf.Variable(content_img[None])\n print(\"Initializing with content image.\")\n \n for t in range(max_iter):\n with tf.GradientTape() as tape:\n tape.watch(img_var)\n feats = extract_features(img_var, model)\n # Compute loss\n c_loss = content_loss(content_weight, feats[content_layer], content_target)\n s_loss = style_loss(feats, style_layers, style_targets, style_weights)\n t_loss = tv_loss(img_var, tv_weight)\n loss = c_loss + s_loss + t_loss\n # Compute gradient\n grad = tape.gradient(loss, img_var)\n optimizer.apply_gradients([(grad, img_var)])\n \n img_var.assign(tf.clip_by_value(img_var, -1.5, 1.5))\n \n if t % 100 == 0:\n print('Iteration {}'.format(t))\n plt.imshow(deprocess_image(img_var[0].numpy(), rescale=True))\n plt.axis('off')\n plt.show()\n print('Iteration {}'.format(t)) \n plt.imshow(deprocess_image(img_var[0].numpy(), rescale=True))\n plt.axis('off')\n plt.show()",
"_____no_output_____"
]
],
[
[
"## Generate some pretty pictures!\n\nTry out `style_transfer` on the three different parameter sets below. Make sure to run all three cells. Feel free to add your own, but make sure to include the results of style transfer on the third parameter set (starry night) in your submitted notebook.\n\n* The `content_image` is the filename of content image.\n* The `style_image` is the filename of style image.\n* The `image_size` is the size of smallest image dimension of the content image (used for content loss and generated image).\n* The `style_size` is the size of smallest style image dimension.\n* The `content_layer` specifies which layer to use for content loss.\n* The `content_weight` gives weighting on content loss in the overall loss function. Increasing the value of this parameter will make the final image look more realistic (closer to the original content).\n* `style_layers` specifies a list of which layers to use for style loss. \n* `style_weights` specifies a list of weights to use for each layer in style_layers (each of which will contribute a term to the overall style loss). We generally use higher weights for the earlier style layers because they describe more local/smaller scale features, which are more important to texture than features over larger receptive fields. In general, increasing these weights will make the resulting image look less like the original content and more distorted towards the appearance of the style image.\n* `tv_weight` specifies the weighting of total variation regularization in the overall loss function. Increasing this value makes the resulting image look smoother and less jagged, at the cost of lower fidelity to style and content. \n\nBelow the next three cells of code (in which you shouldn't change the hyperparameters), feel free to copy and paste the parameters to play around them and see how the resulting image changes. ",
"_____no_output_____"
]
],
[
[
"# Composition VII + Tubingen\nparams1 = {\n 'content_image' : 'styles/tubingen.jpg',\n 'style_image' : 'styles/composition_vii.jpg',\n 'image_size' : 192,\n 'style_size' : 512,\n 'content_layer' : 2,\n 'content_weight' : 5e-2, \n 'style_layers' : (0, 3, 5, 6),\n 'style_weights' : (20000, 500, 12, 1),\n 'tv_weight' : 5e-2\n}\n\nstyle_transfer(**params1)",
"_____no_output_____"
],
[
"# Scream + Tubingen\nparams2 = {\n 'content_image':'styles/tubingen.jpg',\n 'style_image':'styles/the_scream.jpg',\n 'image_size':192,\n 'style_size':224,\n 'content_layer':2,\n 'content_weight':3e-2,\n 'style_layers':[0, 3, 5, 6],\n 'style_weights':[200000, 800, 12, 1],\n 'tv_weight':2e-2\n}\n\nstyle_transfer(**params2)",
"_____no_output_____"
],
[
"# Starry Night + Tubingen\nparams3 = {\n 'content_image' : 'styles/tubingen.jpg',\n 'style_image' : 'styles/starry_night.jpg',\n 'image_size' : 192,\n 'style_size' : 192,\n 'content_layer' : 2,\n 'content_weight' : 6e-2,\n 'style_layers' : [0, 3, 5, 6],\n 'style_weights' : [300000, 1000, 15, 3],\n 'tv_weight' : 2e-2\n}\n\nstyle_transfer(**params3)",
"_____no_output_____"
]
],
[
[
"## Feature Inversion\n\nThe code you've written can do another cool thing. In an attempt to understand the types of features that convolutional networks learn to recognize, a recent paper [1] attempts to reconstruct an image from its feature representation. We can easily implement this idea using image gradients from the pretrained network, which is exactly what we did above (but with two different feature representations).\n\nNow, if you set the style weights to all be 0 and initialize the starting image to random noise instead of the content source image, you'll reconstruct an image from the feature representation of the content source image. You're starting with total noise, but you should end up with something that looks quite a bit like your original image.\n\n(Similarly, you could do \"texture synthesis\" from scratch if you set the content weight to 0 and initialize the starting image to random noise, but we won't ask you to do that here.) \n\nRun the following cell to try out feature inversion.\n\n[1] Aravindh Mahendran, Andrea Vedaldi, \"Understanding Deep Image Representations by Inverting them\", CVPR 2015\n",
"_____no_output_____"
]
],
[
[
"# Feature Inversion -- Starry Night + Tubingen\nparams_inv = {\n 'content_image' : 'styles/tubingen.jpg',\n 'style_image' : 'styles/starry_night.jpg',\n 'image_size' : 192,\n 'style_size' : 192,\n 'content_layer' : 2,\n 'content_weight' : 6e-2,\n 'style_layers' : [0, 3, 5, 6],\n 'style_weights' : [0, 0, 0, 0], # we discard any contributions from style to the loss\n 'tv_weight' : 2e-2,\n 'init_random': True # we want to initialize our image to be random\n}\n\nstyle_transfer(**params_inv)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
c5289fe3c5edb8187fbdcd301cec0dde021f1718
| 247,387 |
ipynb
|
Jupyter Notebook
|
QuasarValue.ipynb
|
fjaviersanchez/OSULymanAlpha
|
d3a62266246553eb370e40934f5f6167ba0d70eb
|
[
"MIT"
] | null | null | null |
QuasarValue.ipynb
|
fjaviersanchez/OSULymanAlpha
|
d3a62266246553eb370e40934f5f6167ba0d70eb
|
[
"MIT"
] | null | null | null |
QuasarValue.ipynb
|
fjaviersanchez/OSULymanAlpha
|
d3a62266246553eb370e40934f5f6167ba0d70eb
|
[
"MIT"
] | null | null | null | 444.940647 | 134,728 | 0.922114 |
[
[
[
"# Lya Quasar Weighting",
"_____no_output_____"
]
],
[
[
"%pylab inline",
"Populating the interactive namespace from numpy and matplotlib\n"
],
[
"import astropy.table",
"_____no_output_____"
]
],
[
[
"## Quasar Value",
"_____no_output_____"
],
[
"Read quasar values $V(r, z)$ tabulated on a grid of r-mag and redshift. For details see [here](https://desi.lbl.gov/trac/wiki/ValueQSO):",
"_____no_output_____"
]
],
[
[
"def load_weights():\n\n table = astropy.table.Table.read('quasarvalue.txt', format='ascii')\n z_col, r_col, w_col = table.columns[0], table.columns[1], table.columns[2]\n\n z_vec = np.unique(z_col)\n z_edges = np.linspace(2.025, 4.025, len(z_vec) + 1)\n assert np.allclose(z_vec, 0.5 * (z_edges[1:] + z_edges[:-1]))\n\n r_vec = np.unique(r_col)\n r_edges = np.linspace(18.05, 23.05, len(r_vec) + 1)\n assert np.allclose(r_vec, 0.5 * (r_edges[1:] + r_edges[:-1]))\n \n W = np.empty((len(r_vec), len(z_vec)))\n k = 0\n for j in range(len(z_vec)):\n for i in range(len(r_vec))[::-1]:\n assert r_col[k] == r_vec[i]\n assert z_col[k] == z_vec[j]\n W[i, j] = w_col[k]\n k += 1\n return W, r_edges, r_vec, z_edges, z_vec\n \nW, r_edges, r_vec, z_edges, z_vec = load_weights()",
"_____no_output_____"
],
[
"def plot_weights(W=W):\n plt.pcolormesh(r_edges, z_edges, W.T, cmap='magma')\n plt.colorbar().set_label('QSO Value [arb. units]')\n plt.contour(r_vec, z_vec, W.T, colors='w', alpha=0.5)\n plt.xlabel('QSO r-band magnitude')\n plt.ylabel('QSO redshift')\n\nplot_weights()",
"_____no_output_____"
]
],
[
[
"## Quasar Luminosity Function Prior",
"_____no_output_____"
],
[
"Copied from [this notebook](https://github.com/dkirkby/ArgonneLymanAlpha/blob/master/notebooks/SampleProperties.ipynb):",
"_____no_output_____"
]
],
[
[
"table2015a = np.array([\n 15.75, 30, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 16.25, 60, 8, 4, 5, 5, 4, 4, 4, 4, 3, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 16.75, 117, 29, 17, 19, 18, 17, 16, 16, 15, 12, 8, 6, 3, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 17.25, 216, 101, 62, 70, 69, 64, 61, 62, 59, 45, 32, 22, 13, 7, 4, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 17.75, 358, 312, 224, 255, 253, 235, 227, 231, 224, 171, 121, 82, 47, 25, 13, 7, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 18.25, 525, 788, 722, 855, 869, 819, 803, 824, 811, 630, 452, 309, 171, 88, 46, 22, 9, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 18.75, 703, 1563, 1890, 2393, 2544, 2493, 2507, 2612, 2622, 2112, 1572, 1096, 603, 309, 157, 76, 28, 8, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 19.25, 898, 2490, 3740, 5086, 5758, 5971, 6214, 6580, 6745, 5779, 4613, 3369, 1913, 1004, 516, 249, 93, 26, 10, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 19.75, 1125, 3445, 5827, 8319, 9913, 10805, 11590, 12422, 12937, 11839, 10261, 8011, 4902, 2771, 1499, 753, 289, 78, 31, 12, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0,\n 20.25, 1399, 4456, 7930, 11585, 14183, 15895, 17350, 18718, 19660, 18783, 17275, 14270, 9517, 5936, 3513, 1919, 804, 228, 91, 34, 10, 2, 1, 0, 0, 0, 0, 0, 0, 0,\n 20.75, 1734, 5616, 10195, 15029, 18589, 21065, 23176, 25094, 26479, 25795, 24410, 20801, 14695, 9899, 6391, 3856, 1851, 599, 248, 94, 27, 6, 1, 1, 0, 0, 0, 0, 0, 0,\n 21.25, 2141, 7016, 12842, 18997, 23563, 26793, 29584, 32124, 34027, 33395, 31948, 27600, 20026, 14047, 9572, 6217, 3399, 1325, 598, 241, 73, 17, 4, 1, 1, 1, 0, 0, 0, 0,\n 21.75, 2631, 8738, 16067, 23807, 29528, 33591, 37170, 40481, 43047, 42378, 40701, 35383, 25928, 18498, 12931, 8731, 5198, 2395, 1211, 541, 182, 45, 10, 3, 2, 2, 1, 1, 0, 0,\n 22.25, 3211, 10871, 20058, 29754, 36864, 41912, 46457, 50760, 54210, 53457, 51424, 44870, 32982, 23683, 16738, 11500, 7140, 3651, 2036, 1022, 394, 110, 25, 8, 6, 4, 2, 2, 1, 1,\n 22.75, 3875, 13520, 25026, 37157, 45968, 52212, 57971, 63564, 68202, 67344, 64840, 56742, 41732, 30041, 21339, 14774, 9334, 5003, 2969, 1636, 730, 239, 60, 21, 13, 8, 5, 3, 2, 1,\n 23.25, 4591, 16812, 31220, 46395, 57302, 65011, 72306, 79586, 85821, 84853, 81750, 71739, 52744, 38010, 27078, 18823, 11969, 6500, 3980, 2322, 1159, 450, 133, 48, 30, 19, 12, 7, 5, 3,\n 23.75, 5270, 20905, 38950, 57934, 71426, 80937, 90180, 99667, 108052, 106983, 103130, 90753, 66677, 48080, 34331, 23929, 15247, 8253, 5120, 3076, 1645, 733, 256, 102, 63, 39, 24, 15, 9, 6,\n 24.25, 5713, 25993, 48598, 72353, 89037, 100762, 112480, 124858, 136130, 134989, 130200, 114903, 84351, 60850, 43543, 30420, 19392, 10379, 6468, 3939, 2184, 1061, 432, 192, 119, 73, 45, 27, 17, 10,\n 24.75, 5464, 32318, 60643, 90374, 110997, 125444, 140309, 156474, 171625, 170467, 164509, 145614, 106798, 77072, 55275, 38702, 24667, 13006, 8110, 4969, 2803, 1428, 648, 317, 199, 123, 76, 46, 28, 17\n ]).reshape(19, 31)",
"_____no_output_____"
],
[
"table2015b = np.array([\n 15.75, 23, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,\n 16.25, 49, 4, 2, 2, 2, 2, 2, 2, 2, 1, 1, 16, 10, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,\n 16.75, 99, 16, 8, 10, 10, 9, 9, 8, 8, 5, 3, 40, 25, 12, 6, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,\n 17.25, 190, 69, 38, 44, 45, 41, 39, 39, 36, 24, 15, 104, 65, 32, 15, 7, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 17.75, 326, 248, 165, 196, 199, 185, 177, 176, 163, 113, 69, 268, 167, 82, 39, 17, 6, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,\n 18.25, 488, 699, 628, 775, 805, 763, 744, 751, 709, 501, 314, 679, 422, 211, 102, 46, 16, 5, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,\n 18.75, 664, 1453, 1808, 2389, 2615, 2602, 2624, 2702, 2629, 1968, 1308, 1650, 1027, 532, 262, 119, 42, 12, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,\n 19.25, 866, 2337, 3638, 5131, 5991, 6356, 6674, 7031, 7076, 5840, 4349, 3696, 2334, 1283, 657, 307, 111, 30, 11, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,\n 19.75, 1113, 3252, 5609, 8203, 9995, 11093, 11997, 12825, 13218, 11932, 10033, 7168, 4740, 2850, 1566, 769, 288, 77, 27, 9, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,\n 20.25, 1423, 4274, 7646, 11336, 14053, 15897, 17425, 18758, 19553, 18488, 16711, 11689, 8346, 5597, 3399, 1817, 723, 195, 70, 23, 6, 1, 0, 0, 0, 0, 0, 0, 0, 0 ,\n 20.75, 1814, 5517, 9994, 14889, 18542, 21093, 23243, 25116, 26334, 25315, 23523, 16660, 12773, 9487, 6456, 3873, 1709, 482, 177, 58, 15, 3, 1, 0, 0, 0, 0, 0, 0, 0 ,\n 21.25, 2303, 7084, 12906, 19262, 23994, 27320, 30181, 32721, 34466, 33311, 31222, 21941, 17567, 14054, 10566, 7166, 3647, 1143, 436, 148, 38, 7, 1, 0, 0, 0, 0, 0, 0, 0 ,\n 21.75, 2911, 9082, 16610, 24818, 30879, 35139, 38892, 42310, 44774, 43370, 40764, 27761, 22625, 18918, 15230, 11423, 6772, 2503, 1034, 367, 96, 19, 3, 1, 0, 0, 0, 0, 0, 0 ,\n 22.25, 3652, 11639, 21358, 31945, 39682, 45108, 50015, 54614, 58082, 56344, 53020, 34486, 28144, 24081, 20162, 16146, 10868, 4879, 2277, 876, 241, 49, 9, 2, 1, 1, 0, 0, 0, 0 ,\n 22.75, 4527, 14913, 27461, 41112, 50977, 57875, 64289, 70486, 75363, 73207, 68942, 42521, 34433, 29793, 25456, 21136, 15480, 8290, 4489, 1955, 585, 122, 22, 5, 3, 1, 1, 0, 0, 0 ,\n 23.25, 5504, 19106, 35311, 52915, 65485, 74247, 82634, 90995, 97849, 95188, 89701, 52304, 41818, 36375, 31400, 26555, 20387, 12378, 7728, 3931, 1345, 302, 55, 14, 7, 3, 2, 1, 0, 0,\n 23.75, 6479, 24477, 45409, 68119, 84128, 95249, 106225, 117520, 127141, 123880, 116812, 64335, 50637, 44159, 38330, 32717, 25709, 16775, 11668, 6924, 2830, 719, 137, 35, 18, 9, 4, 2, 1, 0 ,\n 24.25, 7195, 31358, 58404, 87705, 108088, 122196, 136567, 151843, 165336, 161372, 152261, 79215, 61261, 53494, 46590, 39969, 31740, 21404, 15927, 10682, 5278, 1609, 337, 89, 45, 22, 11, 5, 3, 1,\n 24.75, 7043, 40171, 75127, 112945, 138885, 156770, 175600, 196278, 215178, 210413, 198658, 97685, 74113, 64767, 56549, 48670, 38821, 26430, 20396, 14815, 8615, 3269, 793, 220, 112, 56, 28, 13, 6, 3\n ]).reshape(19, 31)",
"_____no_output_____"
],
[
"def bin_index(bin_centers, low_edge):\n \"\"\"Find the index of the bin with the specified low edge, where bins is an array of equally-spaced bin centers.\n \"\"\"\n delta = bin_centers[1] - bin_centers[0]\n min_value = bin_centers[0] - 0.5 * delta\n index = int(round((low_edge - min_value) / delta))\n if abs((low_edge - min_value) / delta - index) > 1e-5:\n raise ValueError('low_edge = {} is not aligned with specified bins.'.format(low_edge))\n return index",
"_____no_output_____"
],
[
"def luminosity_function(data, z_max=6.0, area_sq_deg=10000.):\n \"\"\"Transform a data array from Nathalie into a tuple gbin, zbin, nqso.\n \"\"\"\n ng, nz = data.shape\n # g-band magnitude bin centers are in the first column.\n gbin = data[:, 0]\n nz = nz - 1\n # Check that g-band bins are equally spaced.\n assert np.allclose(np.diff(gbin), gbin[1] - gbin[0])\n # redshift bins are equally spaced from 0 up to z_max.\n zbin = z_max * (0.5 + np.arange(nz)) / nz\n # The remaining columns give predicted numbers of QSO in a 10,000 sq.deg. sample.\n # Normalize to densisities per sq.deg.\n nqso = data[:, 1:].reshape((ng, nz)) / area_sq_deg\n return gbin, zbin, nqso",
"_____no_output_____"
],
[
"lumi_table = luminosity_function(0.5 * (table2015a + table2015b))",
"_____no_output_____"
],
[
"def lumi_plot(magbin, zbin, nqso, mag_min=18, mag_max=23, z_min=1, z_max=4):\n z_min_cut = bin_index(zbin, z_min)\n z_max_cut = bin_index(zbin, z_max)\n mag_min_cut = bin_index(magbin, mag_min)\n mag_max_cut = bin_index(magbin, mag_max)\n #\n plt.figure(figsize=(8,5))\n plt.imshow(nqso[mag_min_cut:mag_max_cut, z_min_cut:z_max_cut].T,\n origin='lower', interpolation='bicubic', cmap='magma',\n aspect='auto', extent=(mag_min, mag_max, z_min, z_max))\n plt.ylim(z_min, z_max)\n plt.xlim(mag_min, mag_max)\n plt.ylabel('QSO redshift')\n plt.xlabel('QSO g~r magnitude')\n plt.colorbar().set_label(\n 'N(z) / sq.deg. / $(\\Delta z = {:.1f})$ / $(\\Delta g = {:.1f})$'\n .format(zbin[1] - zbin[0], magbin[1] - magbin[0]))\n plt.contour(r_vec, z_vec, W.T, colors='w', alpha=0.5)\n #plt.grid(c='w')",
"_____no_output_____"
],
[
"lumi_plot(*lumi_table)",
"_____no_output_____"
]
],
[
[
"## Binning",
"_____no_output_____"
],
[
"Rebin weights to the redshift ranges of neural network output categories:",
"_____no_output_____"
],
[
"Given probability density $p(z|T)$ for a target $T$ to be a quasar at redshift $z$, calculate the \"value\" of re-observing $T$ as:\n$$\nt(r, z) = \\int dz\\, p(z|T, r) V(r, z)\n$$\nWrite:\n$$\np(z|T, r) = \\frac{P(T|z) P(z, r)}{P(T)}\n$$",
"_____no_output_____"
]
],
[
[
"def rebin_weights(new_z_edges=[2.0, 2.5, 3.0, 4.0]):\n n_z = len(new_z_edges) - 1\n W2 = np.empty((len(r_vec), n_z))\n for i in range(len(r_vec)):\n W2[i] = np.histogram(z_vec, bins=new_z_edges, weights=W[i])[0]\n return W2, new_z_edges\n \nW2, new_z_edges = rebin_weights()",
"_____no_output_____"
],
[
"def plot_reweights():\n plt.pcolormesh(r_edges, new_z_edges, W2.T, cmap='magma')\n plt.colorbar().set_label('QSO Value [arb. units]')\n plt.contour(r_vec, z_vec, W.T, colors='w', alpha=1)\n plt.xlabel('QSO r-band magnitude')\n plt.ylabel('QSO redshift')\n\nplot_reweights()",
"_____no_output_____"
]
],
[
[
"Calculate overall values for a quasar target based on its absolute probabilities of being a quasar in different redshift bins:",
"_____no_output_____"
]
],
[
[
"def get_values(r_mag, z_prob):\n \n r_mag = np.asarray(r_mag)\n z_prob = np.asarray(z_prob)\n \n assert np.all((r_edges[0] <= r_mag) & (r_mag < r_edges[-1]))\n assert z_prob.shape[-1] == len(new_z_edges) - 1\n assert z_prob.shape[:-1] == r_mag.shape\n assert np.all(z_prob.sum(axis=-1) <= 1.)\n\n r_index = np.digitize(r_mag, r_edges)\n \n return (W2[r_index] * z_prob).sum(axis=-1)",
"_____no_output_____"
],
[
"get_values([19, 19, 19], [[1,0,0],[0,1,0],[0,0,1]])",
"_____no_output_____"
]
],
[
[
"## Neural Network Results",
"_____no_output_____"
]
],
[
[
"def get_nn():\n t = astropy.table.Table.read('quassifier_results_dense_prob.fits', hdu=1)\n print t.colnames\n print t['Prob'].shape\n return t\n \nnn = get_nn()",
"['TARGETID', 'MAG', 'Prob']\n(4000, 5)\n"
],
[
"def plot_nn(i=8, save=None):\n x = np.arange(5)\n bins = np.linspace(-0.5, 4.5, 6)\n plt.hist(x, bins=bins, weights=nn['Prob'][i], histtype='stepfilled', color='r')\n plt.gca().get_yaxis().set_ticks([])\n plt.ylabel('Relative Probability')\n plt.gca().set_xticklabels(['', 'ELG', 'Galaxy', 'LyaQSO', 'TracerQSO', 'Star'])\n plt.xlim(bins[0], bins[-1])\n plt.tight_layout()\n if save:\n plt.savefig(save)",
"_____no_output_____"
],
[
"plot_nn(0, 'lyaprob.pdf')",
"_____no_output_____"
],
[
"plot_nn(1, 'bgprob.pdf')",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
c528a11a6af74aaa785d603208895dd68bfb0ef0
| 149,987 |
ipynb
|
Jupyter Notebook
|
fit_gender.ipynb
|
yiori-s/37880905
|
a2da9a21106ddb54ec2c5b5af2343139789ebf91
|
[
"MIT"
] | 13 |
2015-03-27T08:07:47.000Z
|
2018-11-13T02:28:08.000Z
|
fit_gender.ipynb
|
yiori-s/37880905
|
a2da9a21106ddb54ec2c5b5af2343139789ebf91
|
[
"MIT"
] | null | null | null |
fit_gender.ipynb
|
yiori-s/37880905
|
a2da9a21106ddb54ec2c5b5af2343139789ebf91
|
[
"MIT"
] | 2 |
2015-03-28T09:28:56.000Z
|
2018-04-13T17:42:18.000Z
| 37.619012 | 182 | 0.413769 |
[
[
[
"%matplotlib inline",
"_____no_output_____"
],
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.metrics import accuracy_score, classification_report, confusion_matrix\nfrom sklearn.cross_validation import train_test_split, cross_val_score, KFold\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier, export_graphviz\nfrom sklearn.grid_search import GridSearchCV\nfrom IPython.display import Image\npd.set_option('chained_assignment', None)\nplt.style.use('ggplot')\nplt.rc('xtick.major', size=0)\nplt.rc('ytick.major', size=0)",
"_____no_output_____"
],
[
"user_tags = pd.read_csv(\"user_tags_merge.csv\")",
"_____no_output_____"
],
[
"user_tags",
"_____no_output_____"
],
[
"X = user_tags[['nail', 'person', 'sport', 'food','hair', 'wedding']]\nX['mix'] = X['hair'] + X['nail'] + X['wedding']\nX.tail()",
"_____no_output_____"
],
[
"y = user_tags['gender_male']",
"_____no_output_____"
],
[
"X=X.drop(['nail', 'sport','hair', 'wedding','mix'], axis=1)",
"_____no_output_____"
],
[
"X.tail()",
"_____no_output_____"
],
[
"np.random.seed = 0\n\nxmin, xmax = -2, 12\nymin, ymax = -2, 17\n\nindex_male = y[y==1].index\nindex_female = y[y==0].index\n\nfig, ax = plt.subplots()\ncm = plt.cm.RdBu\ncm_bright = ListedColormap(['#FF0000', '#0000FF'])\nsc = ax.scatter(X.loc[index_male, 'food'],\n X.loc[index_male, 'person']+(np.random.rand(len(index_male))-0.5)*0.1,\n color='b', label='male', alpha=0.3)\nsc = ax.scatter(X.loc[index_female, 'food'],\n X.loc[index_female, 'person']+(np.random.rand(len(index_female))-0.5)*0.1,\n color='r', label='female', alpha=0.3)\nax.set_xlabel('food')\nax.set_ylabel('person')\nax.set_xlim(xmin, xmax)\nax.set_ylim(ymin, ymax)\nax.legend(bbox_to_anchor=(1.4, 1.03))\nplt.show()",
"_____no_output_____"
],
[
"X = user_tags[['nail', 'person', 'sport', 'food','coffee','cake','beer','sky']]\ny = user_tags[\"gender_male\"]",
"_____no_output_____"
],
[
"clf = LogisticRegression()\ndef cross_val(clf, X, y, K, random_state=0):\n cv = KFold(len(y), K, shuffle=True, random_state=random_state)\n scores = cross_val_score(clf, X, y, cv=cv)\n return scores\nscores = cross_val(clf, X, y, 6)\nprint('Scores:', scores)\nprint('Mean Score: {0:.3f} (+/-{1:.3f})'.format(scores.mean(), scores.std()*2))",
"Scores: [ 0.3 0.6 0.6 0.2 0.7 0.9]\nMean Score: 0.550 (+/-0.473)\n"
],
[
"clf = DecisionTreeClassifier(criterion='entropy', max_depth=2, min_samples_leaf=2)\nscores = cross_val(clf, X, y, 5)\nprint('Scores:', scores)\nprint('Mean Score: {0:.3f} (+/-{1:.3f})'.format(scores.mean(), scores.std()*2))",
"Scores: [ 0.33333333 0.5 0.58333333 0.25 0.66666667]\nMean Score: 0.467 (+/-0.309)\n"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)",
"_____no_output_____"
],
[
"tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],\n 'C': [1, 10, 100, 1000]},\n {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]",
"_____no_output_____"
],
[
"clf = SVC(kernel='rbf', C=100)\nX = user_tags[['nail', 'person', 'sport', 'food','coffee','wedding','cake','beer']]\ny = user_tags[\"gender_male\"]\nX_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.5, random_state=1)\nclf.fit(X_train, y_train)",
"_____no_output_____"
],
[
"clf.predict(X_val)",
"_____no_output_____"
],
[
"y_val\nclf.score(X_val, y_val) ",
"_____no_output_____"
],
[
"XX = user_tags\nXX.tail()",
"_____no_output_____"
],
[
"XX=XX.drop(['user_id', 'user_name','gender_male'], axis=1)",
"_____no_output_____"
],
[
"X",
"_____no_output_____"
],
[
"XX_train, XX_val, y_train, y_val = train_test_split(XX, y, train_size=0.5, random_state=1)",
"_____no_output_____"
],
[
"clf = LogisticRegression()\ndef cross_val(clf, XX, y, K, random_state=0):\n cv = KFold(len(y), K, shuffle=True, random_state=random_state)\n scores = cross_val_score(clf, XX, y, cv=cv)\n return scores\nscores = cross_val(clf, XX, y, 3)\nprint('Scores:', scores)\nprint('Mean Score: {0:.3f} (+/-{1:.3f})'.format(scores.mean(), scores.std()*2))",
"Scores: [ 0.55 0.7 0.8 ]\nMean Score: 0.683 (+/-0.205)\n"
],
[
"X = user_tags[['nail','hair', 'person', 'sport', 'food','night','coffee','wedding','cake','beer', 'dog', 'animal', 'tree','blossom','cat', 'flower','sky','nature','cherry']]\ny = user_tags[\"gender_male\"]",
"_____no_output_____"
],
[
"X['animal']=X['animal']+X['dog']+X['cat']\nX['cosme']=X['hair']+X['nail']\nX['nature']=X['nature']+X['sky']+X['flower']+X['tree']+X['blossom']+X['cherry']\nX = X.drop(['nail','hair', 'dog', 'cat', 'sky','flower','tree','blossom','cherry'],axis=1)",
"_____no_output_____"
],
[
"X.tail()",
"_____no_output_____"
],
[
"clf = LogisticRegression()\ndef cross_val(clf, X, y, K, random_state=0):\n cv = KFold(len(y), K, shuffle=True, random_state=random_state)\n scores = cross_val_score(clf, X, y, cv=cv)\n return scores\nfor i in range(2,12):\n scores = cross_val(clf, X, y, i)\n print(i)\n print('Scores:', scores)\n print('Mean Score: {0:.3f} (+/-{1:.3f})'.format(scores.mean(), scores.std()*2))",
"2\nScores: [ 0.6 0.5]\nMean Score: 0.550 (+/-0.100)\n3\nScores: [ 0.6 0.65 0.75]\nMean Score: 0.667 (+/-0.125)\n4\nScores: [ 0.66666667 0.6 0.33333333 0.8 ]\nMean Score: 0.600 (+/-0.340)\n5\nScores: [ 0.58333333 0.58333333 0.58333333 0.41666667 0.83333333]\nMean Score: 0.600 (+/-0.267)\n6\nScores: [ 0.5 0.5 0.6 0.2 0.8 0.8]\nMean Score: 0.567 (+/-0.411)\n7\nScores: [ 0.55555556 0.66666667 0.33333333 0.66666667 0.875 0.75 0.75 ]\nMean Score: 0.657 (+/-0.321)\n8\nScores: [ 0.625 0.625 0.5 0.625 0.57142857 0.85714286\n 0.71428571 0.71428571]\nMean Score: 0.654 (+/-0.202)\n9\nScores: [ 0.57142857 0.71428571 0.42857143 0.57142857 0.57142857 0.71428571\n 0.66666667 1. 0.66666667]\nMean Score: 0.656 (+/-0.297)\n10\nScores: [ 0.5 0.66666667 0.83333333 0.33333333 0.66666667 0.33333333\n 0.83333333 0.66666667 1. 0.66666667]\nMean Score: 0.650 (+/-0.407)\n11\nScores: [ 0.5 0.66666667 0.83333333 0.33333333 0.66666667 0.4 0.6\n 1. 0.6 0.8 0.8 ]\nMean Score: 0.655 (+/-0.377)\n"
],
[
"clf = SVC(kernel='rbf', C=1000)\nfor i in range(2,12):\n scores = cross_val(clf, X, y, i)\n print(i)\n print('Scores:', scores)\n print('Mean Score: {0:.3f} (+/-{1:.3f})'.format(scores.mean(), scores.std()*2))",
"2\nScores: [ 0.46666667 0.46666667]\nMean Score: 0.467 (+/-0.000)\n3\nScores: [ 0.45 0.5 0.55]\nMean Score: 0.500 (+/-0.082)\n4\nScores: [ 0.4 0.4 0.26666667 0.53333333]\nMean Score: 0.400 (+/-0.189)\n5\nScores: [ 0.25 0.58333333 0.41666667 0.41666667 0.75 ]\nMean Score: 0.483 (+/-0.340)\n6\nScores: [ 0.2 0.6 0.5 0.5 0.7 0.7]\nMean Score: 0.533 (+/-0.340)\n7\nScores: [ 0.22222222 0.55555556 0.44444444 0.55555556 0.625 0.5 0.625 ]\nMean Score: 0.504 (+/-0.259)\n8\nScores: [ 0.25 0.5 0.5 0.5 0.57142857 0.71428571\n 0.57142857 0.57142857]\nMean Score: 0.522 (+/-0.245)\n9\nScores: [ 0.28571429 0.42857143 0.42857143 0.28571429 0.57142857 0.42857143\n 0.5 0.83333333 0.5 ]\nMean Score: 0.474 (+/-0.311)\n10\nScores: [ 0.16666667 0.33333333 0.66666667 0.5 0.33333333 0.5 0.5\n 0.5 0.83333333 0.5 ]\nMean Score: 0.483 (+/-0.348)\n11\nScores: [ 0.16666667 0.33333333 0.66666667 0.5 0.33333333 0.6 0.4\n 0.8 0.6 0.6 0.6 ]\nMean Score: 0.509 (+/-0.348)\n"
],
[
"clf = DecisionTreeClassifier(criterion='entropy', max_depth=5, min_samples_leaf=2)\nfor i in range(2,12):\n scores = cross_val(clf, X, y, i)\n print(i)\n print('Scores:', scores)\n print('Mean Score: {0:.3f} (+/-{1:.3f})'.format(scores.mean(), scores.std()*2))",
"2\nScores: [ 0.6 0.4]\nMean Score: 0.500 (+/-0.200)\n3\nScores: [ 0.5 0.45 0.4 ]\nMean Score: 0.450 (+/-0.082)\n4\nScores: [ 0.46666667 0.46666667 0.33333333 0.53333333]\nMean Score: 0.450 (+/-0.145)\n5\nScores: [ 0.33333333 0.41666667 0.58333333 0.41666667 0.58333333]\nMean Score: 0.467 (+/-0.200)\n6\nScores: [ 0.4 0.6 0.4 0.4 0.7 0.6]\nMean Score: 0.517 (+/-0.243)\n7\nScores: [ 0.44444444 0.55555556 0.44444444 0.55555556 0.25 0.75 0.625 ]\nMean Score: 0.518 (+/-0.293)\n8\nScores: [ 0.5 0.625 0.25 0.625 0.85714286 0.42857143\n 0.85714286 0.57142857]\nMean Score: 0.589 (+/-0.384)\n9\nScores: [ 0.42857143 0.71428571 0.28571429 0.57142857 0.71428571 0.28571429\n 0.83333333 0.83333333 0.66666667]\nMean Score: 0.593 (+/-0.404)\n10\nScores: [ 0.33333333 0.66666667 0.83333333 0.33333333 0.83333333 0.5 0.5\n 0.83333333 0.5 0.66666667]\nMean Score: 0.600 (+/-0.371)\n11\nScores: [ 0.33333333 0.66666667 0.83333333 0.33333333 0.83333333 0.8 0.6\n 0.4 0.8 0.2 0.8 ]\nMean Score: 0.600 (+/-0.457)\n"
],
[
"from sklearn.externals import joblib",
"_____no_output_____"
],
[
"clf = LogisticRegression()\nclf.fit(X,y)",
"_____no_output_____"
],
[
"joblib.dump(clf, 'clf.pkl') ",
"_____no_output_____"
],
[
"X.tail()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c528a7085104630384044c5ee42574b38164cf8e
| 831,839 |
ipynb
|
Jupyter Notebook
|
python/Polygon Worlds Testing.ipynb
|
pkicki/bench-mr
|
db22f75062aff47cf0800c8a2189db1f674a93cc
|
[
"Apache-2.0",
"MIT"
] | 41 |
2021-02-10T08:40:34.000Z
|
2022-03-20T18:33:20.000Z
|
python/Polygon Worlds Testing.ipynb
|
pkicki/bench-mr
|
db22f75062aff47cf0800c8a2189db1f674a93cc
|
[
"Apache-2.0",
"MIT"
] | 5 |
2021-06-15T14:27:53.000Z
|
2022-02-20T08:16:55.000Z
|
python/Polygon Worlds Testing.ipynb
|
pkicki/bench-mr
|
db22f75062aff47cf0800c8a2189db1f674a93cc
|
[
"Apache-2.0",
"MIT"
] | 23 |
2021-04-07T08:09:49.000Z
|
2022-03-20T18:33:53.000Z
| 1,751.24 | 480,004 | 0.958466 |
[
[
[
"## Polygon Environment Building\nDevising scenarios for the polygon-based environments.",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2\nfrom mpb import MPB, MultipleMPB\nfrom plot_stats import plot_planner_stats, plot_smoother_stats\nfrom utils import latexify\nfrom table import latex_table\nfrom definitions import *\nimport matplotlib as mpl\nimport sys, os\nmpl.rcParams['mathtext.fontset'] = 'cm'\n# make sure to not use Level-3 fonts\nmpl.rcParams['pdf.fonttype'] = 42\nimport matplotlib.pyplot as plt\nfrom copy import deepcopy\n%config InlineBackend.figure_format='retina'",
"_____no_output_____"
]
],
[
[
"### Polygon Environments",
"_____no_output_____"
]
],
[
[
"def visualize(scenario: str, start: {str: float}, goal: {str: float}, robot_model: str = None):\n m = MPB()\n m[\"max_planning_time\"] = 60\n m[\"env.start\"] = start\n m[\"env.goal\"] = goal\n m[\"env.type\"] = \"polygon\"\n m[\"env.polygon.source\"] = \"polygon_mazes/%s.svg\" % scenario\n if robot_model:\n print(\"Using robot model %s.\" % robot_model)\n m[\"env.collision.robot_shape_source\"] = robot_model\n m.set_planners(['informed_rrt_star'])\n m.set_planners(['bfmt'])\n m[\"steer.car_turning_radius\"] = 2\n# m.set_planners([\"sbpl_mha\"])\n m[\"sbpl.scaling\"] = 1\n if m.run(id=\"test_%s\" % scenario, runs=1) == 0:\n m.visualize_trajectories(draw_start_goal_thetas=True, plot_every_nth_polygon=10, silence=True, save_file=\"plots/%s.pdf\" % scenario)\n m.print_info()\n \n# visualize(\"parking2\",\n# {\"theta\": -1.57, \"x\": 12.3, \"y\": -2.73},\n# {\"theta\": 0, \"x\": 2.5, \"y\": -7.27})\n# visualize(\"parking2\",\n# {\"theta\": -1.57, \"x\": 12.3, \"y\": -2.73},\n# {\"theta\": 3.14, \"x\": 2.5, \"y\": -7.27})",
"_____no_output_____"
],
[
"scenarios = [\n (\"parking1\", {\"theta\": 0, \"x\": 2, \"y\": -7.27}, {\"theta\": -1.58, \"x\": 9, \"y\": -11.72}),\n (\"parking2\", {\"theta\": 0, \"x\": 2.5, \"y\": -7.27}, {\"theta\": -1.57, \"x\": 12, \"y\": -3}),\n (\"parking3\", {\"theta\": 0, \"x\": 3.82, \"y\": -13}, {\"theta\": 0, \"x\": 29, \"y\": -15.5}),\n (\"warehouse\", {\"theta\": -1.58, \"x\": 7.5, \"y\": -10}, {\"theta\": 1.58, \"x\": 76.5, \"y\": -10}, \"polygon_mazes/warehouse_robot.svg\"),\n (\"warehouse2\", {\"theta\": -1.58, \"x\": 7.5, \"y\": -10}, {\"theta\": -1.58, \"x\": 116, \"y\": -70}, \"polygon_mazes/warehouse_robot.svg\")\n]\n\nlist(map(lambda x: visualize(*x), scenarios));",
"Running MPB with ID test_parking1 (log file at test_parking1.log)...\n"
]
],
[
[
"# Figurehead\nFigure 1 to showcase Bench-MR.",
"_____no_output_____"
]
],
[
[
"m = MPB()\nscenario = \"warehouse\"\nm[\"max_planning_time\"] = 30\nm[\"env.start\"] = {\"theta\": -1.58, \"x\": 7.5, \"y\": -10}\nm[\"env.goal\"] = {\"theta\": 1.58, \"x\": 76.5, \"y\": -10}\nm[\"env.type\"] = \"polygon\"\nm[\"env.polygon.source\"] = \"polygon_mazes/%s.svg\" % scenario\nm[\"env.collision.robot_shape_source\"] = \"polygon_mazes/warehouse_robot.svg\"\nm.set_planners([])\nm.set_planners(['bfmt', 'cforest', 'prm', 'prm_star', 'informed_rrt_star', 'sbpl_mha'])\nm[\"steer.car_turning_radius\"] = 2\nm[\"sbpl.scaling\"] = 1\nm.run(id=\"test_%s\" % scenario, runs=1) \nm.print_info()",
"Running MPB with ID test_warehouse (log file at test_warehouse.log)...\n"
],
[
"m.visualize_trajectories(ignore_planners='cforest, bfmt',\n draw_start_goal_thetas=True,\n plot_every_nth_polygon=8,\n fig_width=8,\n fig_height=8,\n silence=True,\n save_file=\"plots/%s.pdf\" % scenario,\n num_colors=10)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
c528b27ea3f7a0c357285eaa3d6cfc10ce9e13b9
| 12,064 |
ipynb
|
Jupyter Notebook
|
content/ch-ex/ex1.ipynb
|
NunoEdgarGFlowHub/qiskit-textbook
|
52a9f15d5217064e3885a277fd8a77d83e8a87a9
|
[
"Apache-2.0"
] | 1 |
2020-05-14T12:15:02.000Z
|
2020-05-14T12:15:02.000Z
|
content/ch-ex/ex1.ipynb
|
NunoEdgarGFlowHub/qiskit-textbook
|
52a9f15d5217064e3885a277fd8a77d83e8a87a9
|
[
"Apache-2.0"
] | null | null | null |
content/ch-ex/ex1.ipynb
|
NunoEdgarGFlowHub/qiskit-textbook
|
52a9f15d5217064e3885a277fd8a77d83e8a87a9
|
[
"Apache-2.0"
] | null | null | null | 27.110112 | 216 | 0.520723 |
[
[
[
"# Classical Logic Gates with Quantum Circuits",
"_____no_output_____"
]
],
[
[
"from qiskit import *\nfrom qiskit.tools.visualization import plot_histogram\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"Using the NOT gate (expressed as `x` in Qiskit), the CNOT gate (expressed as `cx` in Qiskit) and the Toffoli gate (expressed as `ccx` in Qiskit) create functions to implement the XOR, AND, NAND and OR gates.\n\nAn implementation of the NOT gate is provided as an example.",
"_____no_output_____"
],
[
"## NOT gate",
"_____no_output_____"
],
[
"This function takes a binary string input (`'0'` or `'1'`) and returns the opposite binary output'.",
"_____no_output_____"
]
],
[
[
"def NOT(input):\n\n q = QuantumRegister(1) # a qubit in which to encode and manipulate the input\n c = ClassicalRegister(1) # a bit to store the output\n qc = QuantumCircuit(q, c) # this is where the quantum program goes\n \n # We encode '0' as the qubit state |0⟩, and '1' as |1⟩\n # Since the qubit is initially |0⟩, we don't need to do anything for an input of '0'\n # For an input of '1', we do an x to rotate the |0⟩ to |1⟩\n if input=='1':\n qc.x( q[0] )\n \n # Now we've encoded the input, we can do a NOT on it using x\n qc.x( q[0] )\n \n # Finally, we extract the |0⟩/|1⟩ output of the qubit and encode it in the bit c[0]\n qc.measure( q[0], c[0] )\n \n # We'll run the program on a simulator\n backend = Aer.get_backend('qasm_simulator')\n # Since the output will be deterministic, we can use just a single shot to get it\n job = execute(qc,backend,shots=1)\n output = next(iter(job.result().get_counts()))\n \n return output",
"_____no_output_____"
]
],
[
[
"## XOR gate",
"_____no_output_____"
],
[
"Takes two binary strings as input and gives one as output.\n\nThe output is `'0'` when the inputs are equal and `'1'` otherwise.",
"_____no_output_____"
]
],
[
[
"def XOR(input1,input2):\n \n q = QuantumRegister(2) # two qubits in which to encode and manipulate the input\n c = ClassicalRegister(1) # a bit to store the output\n qc = QuantumCircuit(q, c) # this is where the quantum program goes\n \n # YOUR QUANTUM PROGRAM GOES HERE \n qc.measure(q[1],c[0]) # YOU CAN CHANGE THIS IF YOU WANT TO\n \n # We'll run the program on a simulator\n backend = Aer.get_backend('qasm_simulator')\n # Since the output will be deterministic, we can use just a single shot to get it\n job = execute(qc,backend,shots=1,memory=True)\n output = job.result().get_memory()[0]\n \n return output",
"_____no_output_____"
]
],
[
[
"## AND gate",
"_____no_output_____"
],
[
"Takes two binary strings as input and gives one as output.\n\nThe output is `'1'` only when both the inputs are `'1'`.",
"_____no_output_____"
]
],
[
[
"def AND(input1,input2):\n \n q = QuantumRegister(3) # two qubits in which to encode the input, and one for the output\n c = ClassicalRegister(1) # a bit to store the output\n qc = QuantumCircuit(q, c) # this is where the quantum program goes\n \n # YOUR QUANTUM PROGRAM GOES HERE\n qc.measure(q[2],c[0]) # YOU CAN CHANGE THIS IF YOU WANT TO\n \n # We'll run the program on a simulator\n backend = Aer.get_backend('qasm_simulator')\n # Since the output will be deterministic, we can use just a single shot to get it\n job = execute(qc,backend,shots=1,memory=True)\n output = job.result().get_memory()[0]\n \n return output",
"_____no_output_____"
]
],
[
[
"## NAND gate",
"_____no_output_____"
],
[
"Takes two binary strings as input and gives one as output.\n\nThe output is `'0'` only when both the inputs are `'1'`.",
"_____no_output_____"
]
],
[
[
"def NAND(input1,input2):\n \n q = QuantumRegister(3) # two qubits in which to encode the input, and one for the output\n c = ClassicalRegister(1) # a bit to store the output\n qc = QuantumCircuit(q, c) # this is where the quantum program goes\n \n # YOUR QUANTUM PROGRAM GOES HERE\n qc.measure(q[2],c[0]) # YOU CAN CHANGE THIS IF YOU WANT TO\n \n # We'll run the program on a simulator\n backend = Aer.get_backend('qasm_simulator')\n # Since the output will be deterministic, we can use just a single shot to get it\n job = execute(qc,backend,shots=1,memory=True)\n output = job.result().get_memory()[0]\n \n return output",
"_____no_output_____"
]
],
[
[
"## OR gate",
"_____no_output_____"
],
[
"Takes two binary strings as input and gives one as output.\n\nThe output is `'1'` if either input is `'1'`.",
"_____no_output_____"
]
],
[
[
"def OR(input1,input2):\n \n q = QuantumRegister(3) # two qubits in which to encode the input, and one for the output\n c = ClassicalRegister(1) # a bit to store the output\n qc = QuantumCircuit(q, c) # this is where the quantum program goes\n \n # YOUR QUANTUM PROGRAM GOES HERE\n qc.measure(q[2],c[0]) # YOU CAN CHANGE THIS IF YOU WANT TO\n \n # We'll run the program on a simulator\n backend = Aer.get_backend('qasm_simulator')\n # Since the output will be deterministic, we can use just a single shot to get it\n job = execute(qc,backend,shots=1,memory=True)\n output = job.result().get_memory()[0]\n \n return output",
"_____no_output_____"
]
],
[
[
"## Tests",
"_____no_output_____"
],
[
"The following code runs the functions above for all possible inputs, so that you can check whether they work.",
"_____no_output_____"
]
],
[
[
"print('\\nResults for the NOT gate')\nfor input in ['0','1']:\n print(' Input',input,'gives output',NOT(input))\n \nprint('\\nResults for the XOR gate')\nfor input1 in ['0','1']:\n for input2 in ['0','1']:\n print(' Inputs',input1,input2,'give output',XOR(input1,input2))\n\nprint('\\nResults for the AND gate')\nfor input1 in ['0','1']:\n for input2 in ['0','1']:\n print(' Inputs',input1,input2,'give output',AND(input1,input2))\n\nprint('\\nResults for the NAND gate')\nfor input1 in ['0','1']:\n for input2 in ['0','1']:\n print(' Inputs',input1,input2,'give output',NAND(input1,input2))\n\nprint('\\nResults for the OR gate')\nfor input1 in ['0','1']:\n for input2 in ['0','1']:\n print(' Inputs',input1,input2,'give output',OR(input1,input2))",
"\nResults for the NOT gate\n Input 0 gives output 1\n Input 1 gives output 0\n\nResults for the XOR gate\n Inputs 0 0 give output 0\n Inputs 0 1 give output 0\n Inputs 1 0 give output 0\n Inputs 1 1 give output 0\n\nResults for the AND gate\n Inputs 0 0 give output 0\n Inputs 0 1 give output 0\n Inputs 1 0 give output 0\n Inputs 1 1 give output 0\n\nResults for the NAND gate\n Inputs 0 0 give output 0\n Inputs 0 1 give output 0\n Inputs 1 0 give output 0\n Inputs 1 1 give output 0\n\nResults for the OR gate\n Inputs 0 0 give output 0\n Inputs 0 1 give output 0\n Inputs 1 0 give output 0\n Inputs 1 1 give output 0\n"
],
[
"import qiskit\nqiskit.__qiskit_version__",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
c528babfec679e2c6c3cdbc00fc8f3864726820e
| 111,082 |
ipynb
|
Jupyter Notebook
|
ED4/Analise dos Dados.ipynb
|
FabianaFerreira/topicos-matematica-avancada
|
71e0aef768b2d87511f698ab811faa49c172fa42
|
[
"MIT"
] | null | null | null |
ED4/Analise dos Dados.ipynb
|
FabianaFerreira/topicos-matematica-avancada
|
71e0aef768b2d87511f698ab811faa49c172fa42
|
[
"MIT"
] | null | null | null |
ED4/Analise dos Dados.ipynb
|
FabianaFerreira/topicos-matematica-avancada
|
71e0aef768b2d87511f698ab811faa49c172fa42
|
[
"MIT"
] | null | null | null | 544.519608 | 38,304 | 0.943816 |
[
[
[
"#-----------------------------------------\n# Fabiana Ferreira Fonseca\n# Universidade Federal do Rio de Janeiro\n# DRE: 115037241\n# ----------------------------------------\n\n# Obs.: Nao fiz nenhum tratamento dos dados,\n# apesar de perceber que ha algumas inconsistencias\n# nos valores possiveis de sexo e UF",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\nimport pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"h = ['ANO', 'UF', 'CIDADE', 'ZONA', 'SECAO', 'SEXO', 'FAIXA_ETARIA', 'GRAU_DE_INSTRUCAO', 'QNT_ELEITORES']",
"_____no_output_____"
],
[
"df = pd.read_csv('perfil_eleitorado_ATUAL.txt',delimiter=';',encoding = 'latin-1', names=h)",
"_____no_output_____"
],
[
"groupedByAge = df.groupby(['FAIXA_ETARIA', 'SEXO'])['FAIXA_ETARIA'].count().unstack(1)\ngroupedByUF = df.groupby(['UF', 'SEXO'])['UF'].count().unstack(1)\ngroupedByDeg = df.groupby(['GRAU_DE_INSTRUCAO', 'SEXO'])['GRAU_DE_INSTRUCAO'].count().unstack(1)",
"_____no_output_____"
],
[
"groupedByAge.plot.bar(title='Perfil do eleitorado por faixa etaria', figsize=(16, 8), rot=0)",
"_____no_output_____"
],
[
"groupedByUF.plot.bar(title='Perfil do eleitorado por UF', figsize=(16, 8), rot=0)",
"_____no_output_____"
],
[
"#Para o caso do groupby do UF, acho que pode ser mais interessante usar \n#os graficos com stacked em True\ngroupedByUF.plot.bar(title='Perfil do eleitorado por UF', figsize=(16, 8), rot=0, stacked=True)\n",
"_____no_output_____"
],
[
"groupedByDeg.plot.bar(title='Perfil do eleitorado por grau de escolaridade', figsize=(16, 8))",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c528bdae76e5952adaeb47a032b620e18b91a4fa
| 12,114 |
ipynb
|
Jupyter Notebook
|
notebooks/logistic_regression_non_linear.ipynb
|
mehrdad-dev/scikit-learn-mooc
|
9c03fb14784ab447a2477039c07f8e8a0d191742
|
[
"CC-BY-4.0"
] | null | null | null |
notebooks/logistic_regression_non_linear.ipynb
|
mehrdad-dev/scikit-learn-mooc
|
9c03fb14784ab447a2477039c07f8e8a0d191742
|
[
"CC-BY-4.0"
] | null | null | null |
notebooks/logistic_regression_non_linear.ipynb
|
mehrdad-dev/scikit-learn-mooc
|
9c03fb14784ab447a2477039c07f8e8a0d191742
|
[
"CC-BY-4.0"
] | null | null | null | 34.710602 | 122 | 0.611111 |
[
[
[
"# Beyond linear separation in classification\n\nAs we saw in the regression section, the linear classification model\nexpects the data to be linearly separable. When this assumption does not\nhold, the model is not expressive enough to properly fit the data.\nTherefore, we need to apply the same tricks as in regression: feature\naugmentation (potentially using expert-knowledge) or using a\nkernel-based method.\n\nWe will provide examples where we will use a kernel support vector machine\nto perform classification on some toy-datasets where it is impossible to\nfind a perfect linear separation.\n\nFirst, we redefine our plotting utility to show the decision boundary of a\nclassifier.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef plot_decision_function(fitted_classifier, range_features, ax=None):\n \"\"\"Plot the boundary of the decision function of a classifier.\"\"\"\n from sklearn.preprocessing import LabelEncoder\n\n feature_names = list(range_features.keys())\n # create a grid to evaluate all possible samples\n plot_step = 0.02\n xx, yy = np.meshgrid(\n np.arange(*range_features[feature_names[0]], plot_step),\n np.arange(*range_features[feature_names[1]], plot_step),\n )\n\n # compute the associated prediction\n Z = fitted_classifier.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = LabelEncoder().fit_transform(Z)\n Z = Z.reshape(xx.shape)\n\n # make the plot of the boundary and the data samples\n if ax is None:\n _, ax = plt.subplots()\n ax.contourf(xx, yy, Z, alpha=0.4, cmap=\"RdBu\")\n\n return ax",
"_____no_output_____"
]
],
[
[
"We will generate a first dataset where the data are represented as two\ninterlaced half circle. This dataset is generated using the function\n[`sklearn.datasets.make_moons`](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html).",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nfrom sklearn.datasets import make_moons\n\nfeature_names = [\"Feature #0\", \"Features #1\"]\ntarget_name = \"class\"\n\nX, y = make_moons(n_samples=100, noise=0.13, random_state=42)\n\n# We store both the data and target in a dataframe to ease plotting\nmoons = pd.DataFrame(np.concatenate([X, y[:, np.newaxis]], axis=1),\n columns=feature_names + [target_name])\ndata_moons, target_moons = moons[feature_names], moons[target_name]\n\nrange_features_moons = {\"Feature #0\": (-2, 2.5), \"Feature #1\": (-2, 2)}",
"_____no_output_____"
]
],
[
[
"Since the dataset contains only two features, we can make a scatter plot to\nhave a look at it.",
"_____no_output_____"
]
],
[
[
"import seaborn as sns\n\nsns.scatterplot(data=moons, x=feature_names[0], y=feature_names[1],\n hue=target_moons, palette=[\"tab:red\", \"tab:blue\"])\n_ = plt.title(\"Illustration of the moons dataset\")",
"_____no_output_____"
]
],
[
[
"From the intuitions that we got by studying linear model, it should be\nobvious that a linear classifier will not be able to find a perfect decision\nfunction to separate the two classes.\n\nLet's try to see what is the decision boundary of such a linear classifier.\nWe will create a predictive model by standardizing the dataset followed by\na linear support vector machine classifier.",
"_____no_output_____"
]
],
[
[
"from sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\n\nlinear_model = make_pipeline(StandardScaler(), SVC(kernel=\"linear\"))\nlinear_model.fit(data_moons, target_moons)",
"_____no_output_____"
]
],
[
[
"<div class=\"admonition warning alert alert-danger\">\n<p class=\"first admonition-title\" style=\"font-weight: bold;\">Warning</p>\n<p class=\"last\">Be aware that we fit and will check the boundary decision of the classifier\non the same dataset without splitting the dataset into a training set and a\ntesting set. While this is a bad practice, we use it for the sake of\nsimplicity to depict the model behavior. Always use cross-validation when\nyou want to assess the statistical performance of a machine-learning model.</p>\n</div>",
"_____no_output_____"
],
[
"Let's check the decision boundary of such a linear model on this dataset.",
"_____no_output_____"
]
],
[
[
"ax = sns.scatterplot(data=moons, x=feature_names[0], y=feature_names[1],\n hue=target_moons, palette=[\"tab:red\", \"tab:blue\"])\nplot_decision_function(linear_model, range_features_moons, ax=ax)\n_ = plt.title(\"Decision boundary of a linear model\")",
"_____no_output_____"
]
],
[
[
"As expected, a linear decision boundary is not enough flexible to split the\ntwo classes.\n\nTo push this example to the limit, we will create another dataset where\nsamples of a class will be surrounded by samples from the other class.",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import make_gaussian_quantiles\n\nfeature_names = [\"Feature #0\", \"Features #1\"]\ntarget_name = \"class\"\n\nX, y = make_gaussian_quantiles(\n n_samples=100, n_features=2, n_classes=2, random_state=42)\ngauss = pd.DataFrame(np.concatenate([X, y[:, np.newaxis]], axis=1),\n columns=feature_names + [target_name])\ndata_gauss, target_gauss = gauss[feature_names], gauss[target_name]\n\nrange_features_gauss = {\"Feature #0\": (-4, 4), \"Feature #1\": (-4, 4)}",
"_____no_output_____"
],
[
"ax = sns.scatterplot(data=gauss, x=feature_names[0], y=feature_names[1],\n hue=target_gauss, palette=[\"tab:red\", \"tab:blue\"])\n_ = plt.title(\"Illustration of the Gaussian quantiles dataset\")",
"_____no_output_____"
]
],
[
[
"Here, this is even more obvious that a linear decision function is not\nadapted. We can check what decision function, a linear support vector machine\nwill find.",
"_____no_output_____"
]
],
[
[
"linear_model.fit(data_gauss, target_gauss)\nax = sns.scatterplot(data=gauss, x=feature_names[0], y=feature_names[1],\n hue=target_gauss, palette=[\"tab:red\", \"tab:blue\"])\nplot_decision_function(linear_model, range_features_gauss, ax=ax)\n_ = plt.title(\"Decision boundary of a linear model\")",
"_____no_output_____"
]
],
[
[
"As expected, a linear separation cannot be used to separate the classes\nproperly: the model will under-fit as it will make errors even on\nthe training set.\n\nIn the section about linear regression, we saw that we could use several\ntricks to make a linear model more flexible by augmenting features or\nusing a kernel. Here, we will use the later solution by using a radial basis\nfunction (RBF) kernel together with a support vector machine classifier.\n\nWe will repeat the two previous experiments and check the obtained decision\nfunction.",
"_____no_output_____"
]
],
[
[
"kernel_model = make_pipeline(StandardScaler(), SVC(kernel=\"rbf\", gamma=5))",
"_____no_output_____"
],
[
"kernel_model.fit(data_moons, target_moons)\nax = sns.scatterplot(data=moons, x=feature_names[0], y=feature_names[1],\n hue=target_moons, palette=[\"tab:red\", \"tab:blue\"])\nplot_decision_function(kernel_model, range_features_moons, ax=ax)\n_ = plt.title(\"Decision boundary with a model using an RBF kernel\")",
"_____no_output_____"
]
],
[
[
"We see that the decision boundary is not anymore a straight line. Indeed,\nan area is defined around the red samples and we could imagine that this\nclassifier should be able to generalize on unseen data.\n\nLet's check the decision function on the second dataset.",
"_____no_output_____"
]
],
[
[
"kernel_model.fit(data_gauss, target_gauss)\nax = sns.scatterplot(data=gauss, x=feature_names[0], y=feature_names[1],\n hue=target_gauss, palette=[\"tab:red\", \"tab:blue\"])\nplot_decision_function(kernel_model, range_features_gauss, ax=ax)\n_ = plt.title(\"Decision boundary with a model using an RBF kernel\")",
"_____no_output_____"
]
],
[
[
"We observe something similar than in the previous case. The decision function\nis more flexible and does not underfit anymore.\n\nThus, kernel trick or feature expansion are the tricks to make a linear\nclassifier more expressive, exactly as we saw in regression.\n\nKeep in mind that adding flexibility to a model can also risk increasing\noverfitting by making the decision function to sensitive to individual\n(possibly noisy) data points of the training set. Here we can observe that\nthe decision functions remain smooth enough to preserve good generalization.\nIf you are curious, you can try repeated the above experiment with\n`gamma=100` and look at the decision functions.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
c528dd269c212c2f076e78c404085e706b6746d1
| 108,930 |
ipynb
|
Jupyter Notebook
|
PyTorch/LogisticRegressionMNIST.ipynb
|
vcasadei/deep-learning-facens-2018
|
7ea283bf33085c13ba01825022cf03e9922f06ec
|
[
"MIT"
] | null | null | null |
PyTorch/LogisticRegressionMNIST.ipynb
|
vcasadei/deep-learning-facens-2018
|
7ea283bf33085c13ba01825022cf03e9922f06ec
|
[
"MIT"
] | null | null | null |
PyTorch/LogisticRegressionMNIST.ipynb
|
vcasadei/deep-learning-facens-2018
|
7ea283bf33085c13ba01825022cf03e9922f06ec
|
[
"MIT"
] | null | null | null | 92.943686 | 23,524 | 0.817718 |
[
[
[
"# Regressão Softmax com dados do MNIST",
"_____no_output_____"
],
[
"## Objetivo",
"_____no_output_____"
],
[
"O objetivo deste notebook é ilustrar o uso de praticamente a mesma rede desenvolvida para a classificação das flores Íris, porém agora com o problema de classificação de dígitos manuscritos utilizando o dataset MNIST.\nAs principais diferenças são:\n- tipo do dado, agora imagem com muito atributos: 28 x 28 pixels\n- número de amostras, muito maior, 60 mil\nNeste exercício será possível a interpretação do significado dos parâmetros treinados",
"_____no_output_____"
],
[
"## Importação das bibliotecas",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport torch\nfrom torch.autograd import Variable\n\nimport torchvision\nfrom torchvision.datasets import MNIST",
"_____no_output_____"
]
],
[
[
"## Carregamento dos dados do MNIST",
"_____no_output_____"
]
],
[
[
"dataset_dir = 'data/datasets/MNIST/'\n\nMNIST(dataset_dir, train=False, transform=None, target_transform=None, download=True)",
"_____no_output_____"
],
[
"x_train, y_train = torch.load(dataset_dir + 'processed/training.pt')\n\nprint(\"Amostras de treinamento:\", x_train.size(0))\n\nprint(\"\\nDimensões dos dados das imagens: \", x_train.size())\nprint(\"Valores mínimo e máximo dos pixels:\", torch.min(x_train), torch.max(x_train))\nprint(\"Tipo dos dados das imagens: \", type(x_train))\nprint(\"Tipo das classes das imagens: \", type(y_train))",
"Amostras de treinamento: 60000\n\nDimensões dos dados das imagens: torch.Size([60000, 28, 28])\nValores mínimo e máximo dos pixels: 0 255\nTipo dos dados das imagens: <class 'torch.ByteTensor'>\nTipo das classes das imagens: <class 'torch.LongTensor'>\n"
]
],
[
[
"### Carregamento, normalização e seleção dos dados do MNIST\n\nNeste exemplo utilizaremos apenas 1000 amostras de treinamento.",
"_____no_output_____"
]
],
[
[
"x_train = x_train.float()\n\nx_train = x_train / 255.\n\nif True:\n n_samples_train = 1000\n\n x_train = x_train[:n_samples_train]\n y_train = y_train[:n_samples_train]\n\nprint(\"Amostras de treinamento:\", x_train.size(0))\n\nprint(\"\\nDimensões dos dados das imagens: \", x_train.size())\nprint(\"Valores mínimo e máximo dos pixels:\", torch.min(x_train), torch.max(x_train))\nprint(\"Tipo dos dados das imagens: \", type(x_train))\nprint(\"Tipo das classes das imagens: \", type(y_train))",
"Amostras de treinamento: 1000\n\nDimensões dos dados das imagens: torch.Size([1000, 28, 28])\nValores mínimo e máximo dos pixels: 0.0 1.0\nTipo dos dados das imagens: <class 'torch.FloatTensor'>\nTipo das classes das imagens: <class 'torch.LongTensor'>\n"
]
],
[
[
"### Visualizando os dados",
"_____no_output_____"
]
],
[
[
"n_samples = 24\n\n# cria um grid com as imagens\ngrid = torchvision.utils.make_grid(x_train[:n_samples].unsqueeze(1), pad_value=1.0, padding=1)\n\nplt.figure(figsize=(15, 10))\nplt.imshow(grid.numpy().transpose(1, 2, 0))\nplt.axis('off')",
"_____no_output_____"
]
],
[
[
"### Visualizando uma imagem com o matplotlib",
"_____no_output_____"
]
],
[
[
"image = x_train[2]\ntarget = y_train[2]\n\nplt.imshow(image.numpy().reshape(28,28), cmap='gray')\nprint('class:', target)",
"class: 4\n"
]
],
[
[
"## Modelo",
"_____no_output_____"
]
],
[
[
"model = torch.nn.Linear(28*28, 10) # 28*28 atributos de entrada e 10 neurônios na sáida",
"_____no_output_____"
]
],
[
[
"### Testando um predict com poucas amostras",
"_____no_output_____"
],
[
"## Treinamento",
"_____no_output_____"
],
[
"### Inicialização dos parâmetros",
"_____no_output_____"
]
],
[
[
"epochs = 100\nlearningRate = 0.5\n\n# Utilizaremos CrossEntropyLoss como função de perda\ncriterion = torch.nn.CrossEntropyLoss()\n\n# Gradiente descendente\noptimizer = torch.optim.SGD(model.parameters(), lr=learningRate)",
"_____no_output_____"
]
],
[
[
"### Visualização do grafo computacional da perda (loss)",
"_____no_output_____"
]
],
[
[
"y_pred = model(Variable(x_train.view(-1,28*28)))\nloss = criterion(y_pred, Variable(y_train))\nfrom lib.pytorch_visualize import make_dot\np = make_dot(loss, dict(model.named_parameters()))\np",
"['', '/opt/conda/envs/jupyterhub/lib/python36.zip', '/opt/conda/envs/jupyterhub/lib/python3.6', '/opt/conda/envs/jupyterhub/lib/python3.6/lib-dynload', '/opt/conda/envs/jupyterhub/lib/python3.6/site-packages', '/opt/conda/envs/jupyterhub/lib/python3.6/site-packages/Mako-1.0.7-py3.6.egg', '/opt/conda/envs/jupyterhub/lib/python3.6/site-packages/cycler-0.10.0-py3.6.egg', '/opt/conda/envs/jupyterhub/lib/python3.6/site-packages/torchvision-0.1.9-py3.6.egg', '/opt/conda/envs/jupyterhub/lib/python3.6/site-packages/IPython/extensions', '/home/vitorcasadei/.ipython']\n"
]
],
[
[
"### Laço de treinamento dos pesos",
"_____no_output_____"
]
],
[
[
"losses = []\n\nfor i in range(epochs):\n # Transforma a entrada para uma dimensão\n inputs = Variable(x_train.view(-1, 28 * 28))\n # Predict da rede\n outputs = model(inputs)\n\n # calcula a perda\n loss = criterion(outputs, Variable(y_train))\n\n # zero, backpropagation, ajusta parâmetros pelo gradiente descendente\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n losses.append(loss.data[0])",
"_____no_output_____"
],
[
"print('Final loss:', loss.data[0])",
"Final loss: 0.08770870417356491\n"
]
],
[
[
"### Visualizando gráfico de perda durante o treinamento",
"_____no_output_____"
]
],
[
[
"plt.plot(losses)",
"_____no_output_____"
]
],
[
[
"## Avaliação",
"_____no_output_____"
],
[
"### Acurácia tanto no conjunto de treinamento como no conjunto de testes",
"_____no_output_____"
]
],
[
[
"def predict(model, input_data):\n outputs = model(Variable(input_data))\n _, predicts = torch.max(outputs, 1)\n \n return predicts.data\n\ny_pred = predict(model, x_train.view(-1, 28*28))\naccuracy = (y_pred.numpy() == y_train.numpy()).mean()\nprint('Accuracy:', accuracy)",
"Accuracy: 0.996\n"
]
],
[
[
"### Matriz de confusão com dados de treinamento e teste",
"_____no_output_____"
]
],
[
[
"print('Matriz de confusão:')\npd.crosstab(y_pred.numpy(), y_train.numpy())",
"Matriz de confusão:\n"
]
],
[
[
"## Visualizando a matriz de pesos treinados",
"_____no_output_____"
],
[
"Observe que a matriz de peso treinado para cada classe mostra a importância dos pesos associados aos caracteres de cada classe.",
"_____no_output_____"
]
],
[
[
"weights = model.state_dict()['weight']\nprint('weights:', weights.shape)\n\nbias = model.state_dict()['bias']\nprint('bias: ', bias.shape)\n\n# Visualizando pesos da classe 3\nplt.imshow(weights[8, :].numpy().reshape((28,28)),cmap = 'gray')\nplt.show()",
"weights: torch.Size([10, 784])\nbias: torch.Size([10])\n"
]
],
[
[
"### Visualizando os pesos de todas as classes",
"_____no_output_____"
]
],
[
[
"# cria um grid com as imagens\ngrid = torchvision.utils.make_grid(weights.view(-1, 1, 28, 28), normalize=True, pad_value=1.0, padding=1, nrow=10)\n\nplt.figure(figsize=(15, 10))\nplt.imshow(grid.numpy().transpose(1, 2, 0))\nplt.axis('off');",
"_____no_output_____"
]
],
[
[
"### Diagrama da regressão softmax com visualização dos pesos W",
"_____no_output_____"
],
[
"<img src=\"../figures/RegressaoSoftmaxArgmaxNMIST.png\",width = 400>",
"_____no_output_____"
],
[
"# Atividades",
"_____no_output_____"
],
[
"## Exercícios",
"_____no_output_____"
],
[
"- 1) Na configuração da figura acima, mostre os valores de z0 até z9, os valores das probabilidades y_hat, após o softmax, quando a rede recebe como entrada a nona amostra que contém o manuscrito do dígito '4':",
"_____no_output_____"
]
],
[
[
"image = x_train[9]\ntarget = y_train[9]\n\nplt.imshow(image.numpy().reshape(28,28), cmap='gray')\nprint('class:', target)",
"class: 4\n"
]
],
[
[
"- 2) Insira código no laço do treinamento para que no final de cada época, \n seja impresso: o número da época e a perda e a acurácia",
"_____no_output_____"
],
[
"- 3) Insira código no laço do treinamento para visualização dos valores dos gradientes referentes à classe do dígito 4, no final de cada época.",
"_____no_output_____"
],
[
"## Perguntas",
"_____no_output_____"
],
[
"1. Qual é o shape da matriz de entrada na rede?\n2. Qual é o shape da saída da rede?\n3. Qual é o número total de parâmetros da rede, incluindo o bias?",
"_____no_output_____"
],
[
"# Aprendizados\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
c528ea160983c68d317f3f72a170065da56f1eaf
| 11,403 |
ipynb
|
Jupyter Notebook
|
tensorflow/lite/micro/examples/micro_speech/train_speech_model.ipynb
|
gimyeongmin/tensorflow
|
07ae96ee309e3474108059736814ffae2bfd4a8e
|
[
"Apache-2.0"
] | 57 |
2017-09-03T07:08:31.000Z
|
2022-02-28T04:33:42.000Z
|
tensorflow/lite/micro/examples/micro_speech/train_speech_model.ipynb
|
gimyeongmin/tensorflow
|
07ae96ee309e3474108059736814ffae2bfd4a8e
|
[
"Apache-2.0"
] | 58 |
2021-11-22T05:41:28.000Z
|
2022-01-19T01:33:40.000Z
|
tensorflow/lite/micro/examples/micro_speech/train_speech_model.ipynb
|
gimyeongmin/tensorflow
|
07ae96ee309e3474108059736814ffae2bfd4a8e
|
[
"Apache-2.0"
] | 66 |
2020-05-15T10:05:12.000Z
|
2022-02-14T07:28:18.000Z
| 34.87156 | 434 | 0.553802 |
[
[
[
"# Train a Simple Audio Recognition model for microcontroller use",
"_____no_output_____"
],
[
"This notebook demonstrates how to train a 20kb [Simple Audio Recognition](https://www.tensorflow.org/tutorials/sequences/audio_recognition) model for [TensorFlow Lite for Microcontrollers](https://tensorflow.org/lite/microcontrollers/overview). It will produce the same model used in the [micro_speech](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/micro_speech) example application.\n\nThe model is designed to be used with [Google Colaboratory](https://colab.research.google.com).\n\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/examples/micro_speech/train_speech_model.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/examples/micro_speech/train_speech_model.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>\n",
"_____no_output_____"
],
[
"The notebook runs Python scripts to train and freeze the model, and uses the TensorFlow Lite converter to convert it for use with TensorFlow Lite for Microcontrollers.\n\n**Training is much faster using GPU acceleration.** Before you proceed, ensure you are using a GPU runtime by going to **Runtime -> Change runtime type** and selecting **GPU**. Training 18,000 iterations will take 1.5-2 hours on a GPU runtime.\n\n## Configure training\n\nThe following `os.environ` lines can be customized to set the words that will be trained for, and the steps and learning rate of the training. The default values will result in the same model that is used in the micro_speech example. Run the cell to set the configuration:",
"_____no_output_____"
]
],
[
[
"import os\n\n# A comma-delimited list of the words you want to train for.\n# The options are: yes,no,up,down,left,right,on,off,stop,go\n# All other words will be used to train an \"unknown\" category.\nos.environ[\"WANTED_WORDS\"] = \"yes,no\"\n\n# The number of steps and learning rates can be specified as comma-separated\n# lists to define the rate at each stage. For example,\n# TRAINING_STEPS=15000,3000 and LEARNING_RATE=0.001,0.0001\n# will run 18,000 training loops in total, with a rate of 0.001 for the first\n# 15,000, and 0.0001 for the final 3,000.\nos.environ[\"TRAINING_STEPS\"]=\"15000,3000\"\nos.environ[\"LEARNING_RATE\"]=\"0.001,0.0001\"\n\n# Calculate the total number of steps, which is used to identify the checkpoint\n# file name.\ntotal_steps = sum(map(lambda string: int(string),\n os.environ[\"TRAINING_STEPS\"].split(\",\")))\nos.environ[\"TOTAL_STEPS\"] = str(total_steps)\n\n# Print the configuration to confirm it\n!echo \"Training these words: ${WANTED_WORDS}\"\n!echo \"Training steps in each stage: ${TRAINING_STEPS}\"\n!echo \"Learning rate in each stage: ${LEARNING_RATE}\"\n!echo \"Total number of training steps: ${TOTAL_STEPS}\"\n",
"_____no_output_____"
]
],
[
[
"## Install dependencies\n\nNext, we'll install a GPU build of TensorFlow, so we can use GPU acceleration for training.",
"_____no_output_____"
]
],
[
[
"# Replace Colab's default TensorFlow install with a more recent\n# build that contains the operations that are needed for training\n!pip uninstall -y tensorflow tensorflow_estimator tensorboard\n!pip install -q tf-estimator-nightly==1.14.0.dev2019072901 tf-nightly-gpu==1.15.0.dev20190729",
"_____no_output_____"
]
],
[
[
"We'll also clone the TensorFlow repository, which contains the scripts that train and freeze the model.",
"_____no_output_____"
]
],
[
[
"# Clone the repository from GitHub\n!git clone -q https://github.com/tensorflow/tensorflow\n# Check out a commit that has been tested to work\n# with the build of TensorFlow we're using\n!git -c advice.detachedHead=false -C tensorflow checkout 17ce384df70",
"_____no_output_____"
]
],
[
[
"## Load TensorBoard\n\nNow, set up TensorBoard so that we can graph our accuracy and loss as training proceeds.",
"_____no_output_____"
]
],
[
[
"# Delete any old logs from previous runs\n!rm -rf /content/retrain_logs\n# Load TensorBoard\n%load_ext tensorboard\n%tensorboard --logdir /content/retrain_logs",
"_____no_output_____"
]
],
[
[
"## Begin training\n\nNext, run the following script to begin training. The script will first download the training data:",
"_____no_output_____"
]
],
[
[
"!python tensorflow/tensorflow/examples/speech_commands/train.py \\\n--model_architecture=tiny_conv --window_stride=20 --preprocess=micro \\\n--wanted_words=${WANTED_WORDS} --silence_percentage=25 --unknown_percentage=25 \\\n--quantize=1 --verbosity=WARN --how_many_training_steps=${TRAINING_STEPS} \\\n--learning_rate=${LEARNING_RATE} --summaries_dir=/content/retrain_logs \\\n--data_dir=/content/speech_dataset --train_dir=/content/speech_commands_train \\\n",
"_____no_output_____"
]
],
[
[
"## Freeze the graph\n\nOnce training is complete, run the following cell to freeze the graph.",
"_____no_output_____"
]
],
[
[
"!python tensorflow/tensorflow/examples/speech_commands/freeze.py \\\n--model_architecture=tiny_conv --window_stride=20 --preprocess=micro \\\n--wanted_words=${WANTED_WORDS} --quantize=1 --output_file=/content/tiny_conv.pb \\\n--start_checkpoint=/content/speech_commands_train/tiny_conv.ckpt-${TOTAL_STEPS}",
"_____no_output_____"
]
],
[
[
"## Convert the model\n\nRun this cell to use the TensorFlow Lite converter to convert the frozen graph into the TensorFlow Lite format, fully quantized for use with embedded devices.",
"_____no_output_____"
]
],
[
[
"!toco \\\n--graph_def_file=/content/tiny_conv.pb --output_file=/content/tiny_conv.tflite \\\n--input_shapes=1,49,40,1 --input_arrays=Reshape_2 --output_arrays='labels_softmax' \\\n--inference_type=QUANTIZED_UINT8 --mean_values=0 --std_dev_values=9.8077",
"_____no_output_____"
]
],
[
[
"The following cell will print the model size, which will be under 20 kilobytes.",
"_____no_output_____"
]
],
[
[
"import os\nmodel_size = os.path.getsize(\"/content/tiny_conv.tflite\")\nprint(\"Model is %d bytes\" % model_size)",
"_____no_output_____"
]
],
[
[
"Finally, we use xxd to transform the model into a source file that can be included in a C++ project and loaded by TensorFlow Lite for Microcontrollers.",
"_____no_output_____"
]
],
[
[
"# Install xxd if it is not available\n!apt-get -qq install xxd\n# Save the file as a C source file\n!xxd -i /content/tiny_conv.tflite > /content/tiny_conv.cc\n# Print the source file\n!cat /content/tiny_conv.cc",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
c528fdbc3105f75335f4273363d5b14b6cd293ec
| 6,875 |
ipynb
|
Jupyter Notebook
|
notebooks/16-Curso_de_creacion_de_GUIs_con_Qt5_y_Python-Widgets_en_Designer_(VI).ipynb
|
kikocorreoso/pyboqt
|
174a4c7ce7da5f6ec97809a660286a84af573536
|
[
"MIT"
] | 2 |
2020-02-27T23:44:40.000Z
|
2021-03-27T07:40:01.000Z
|
notebooks/16-Curso_de_creacion_de_GUIs_con_Qt5_y_Python-Widgets_en_Designer_(VI).ipynb
|
Jimmy-INL/pyboqt
|
174a4c7ce7da5f6ec97809a660286a84af573536
|
[
"MIT"
] | null | null | null |
notebooks/16-Curso_de_creacion_de_GUIs_con_Qt5_y_Python-Widgets_en_Designer_(VI).ipynb
|
Jimmy-INL/pyboqt
|
174a4c7ce7da5f6ec97809a660286a84af573536
|
[
"MIT"
] | 4 |
2020-02-26T18:54:33.000Z
|
2022-01-08T15:03:53.000Z
| 50.925926 | 1,129 | 0.681018 |
[
[
[
"En el mundo Qt tenemos una herramienta [RAD (Rapid Application Development)](https://es.wikipedia.org/wiki/Desarrollo_r%C3%A1pido_de_aplicaciones). Esta herramienta se llama Qt DesigneEste nuevo capítulo es el último en los que enumeramos los widgets disponibles dentro de Designer, en este caso le toca el turno a los *display widgets* o widgets que nos permiten mostrar información en distintos \"formatos\".**",
"_____no_output_____"
],
[
"Índice:\n\n* [Instalación de lo que vamos a necesitar](https://pybonacci.org/2019/11/12/curso-de-creacion-de-guis-con-qt5-y-python-capitulo-00-instalacion/).\n* [Qt, versiones y diferencias](https://pybonacci.org/2019/11/21/curso-de-creacion-de-guis-con-qt5-y-python-capitulo-01-qt-versiones-y-bindings/).\n* [Hola, Mundo](https://pybonacci.org/2019/11/26/curso-de-creacion-de-guis-con-qt5-y-python-capitulo-02-hola-mundo/).\n* [Módulos en Qt](https://pybonacci.org/2019/12/02/curso-de-creacion-de-guis-con-qt5-y-python-capitulo-03-modulos-qt/).\n* [Añadimos icono a la ventana principal](https://pybonacci.org/2019/12/26/curso-de-creacion-de-guis-con-qt5-y-python-capitulo-04-icono-de-la-ventana/).\n* [Tipos de ventana en un GUI](https://pybonacci.org/2020/01/31/curso-de-creacion-de-guis-con-qt-capitulo-05-ventanas-principales-diferencias/).\n* [Ventana inicial de carga o Splashscreen](https://pybonacci.org/2020/02/26/curso-de-creacion-de-guis-con-qt-capitulo-06-splash-screen/)\n* [Menu principal. Introducción](https://pybonacci.org/2020/03/18/curso-de-creacion-de-guis-con-qt-capitulo-07-menu/).\n* [Mejorando algunas cosas vistas](https://pybonacci.org/2020/03/26/curso-de-creacion-de-guis-con-qt-capitulo-08-mejorando-lo-visto/).\n* [Gestión de eventos o Acción y reacción](https://pybonacci.org/2020/03/27/curso-de-creacion-de-guis-con-qt-capitulo-09-signals-y-slots/).\n* [Introducción a Designer](https://pybonacci.org/2020/04/14/curso-de-creacion-de-guis-con-qt-capitulo-10-introduccion-a-designer/).\n* [Los Widgets vistos a través de Designer: Primera parte](https://pybonacci.org/2020/05/01/curso-de-creacion-de-guis-con-qt-capitulo-11-widgets-en-designer-i/).\n* [Los Widgets vistos a través de Designer: Segunda parte](https://pybonacci.org/2020/05/02/curso-de-creacion-de-guis-con-qt-capitulo-12:-widgets-en-designer-(ii)/).\n* [Los Widgets vistos a través de Designer: Tercera parte](https://pybonacci.org/2020/05/03/curso-de-creacion-de-guis-con-qt-capitulo-13-widgets-en-designer-iii/).\n* [Los Widgets vistos a través de Designer: Cuarta parte](https://pybonacci.org/2020/05/04/curso-de-creacion-de-guis-con-qt-capitulo-14-widgets-en-designer-iv/).\n* [Los Widgets vistos a través de Designer: Quinta parte](https://pybonacci.org/2020/05/05/curso-de-creacion-de-guis-con-qt-capitulo-15-widgets-en-designer-v/).\n* [Los Widgets vistos a través de Designer: Sexta parte](https://pybonacci.org/2020/05/06/curso-de-creacion-de-guis-con-qt-capitulo-16:-widgets-en-designer-(vi)/) (este capítulo).\n* TBD… (lo actualizaré cuando tenga más claro los siguientes pasos).",
"_____no_output_____"
],
[
"[Los materiales para este capítulo los podéis descargar de [aquí](https://github.com/kikocorreoso/pyboqt/tree/chapter16)]",
"_____no_output_____"
],
[
"**[INSTALACIÓN] Si todavía no has pasado por el [inicio del curso, donde explico cómo poner a punto todo](https://pybonacci.org/2019/11/12/curso-de-creacion-de-guis-con-qt-capitulo-00:-instalacion/), ahora es un buen momento para hacerlo y después podrás seguir con esta nueva receta.**",
"_____no_output_____"
],
[
"En este último vídeo donde vemos los widgets que tenemos en Designer le damos un repaso a widgets que permiten mostrar información en distintas formas.",
"_____no_output_____"
],
[
"El vídeo está a continuación:",
"_____no_output_____"
]
],
[
[
"from IPython.display import Video\n\nVideo(\"https://libre.video/download/videos/9ee5e77e-5eac-4359-b75b-569060091a3b-360.mp4\")",
"_____no_output_____"
]
],
[
[
"Transcripción del vídeo a continuación:",
"_____no_output_____"
],
[
"*Y ya solo nos queda una sección, la de los Widgets de exhibir cosas o de display. Tenemos, por ejemplo, las etiquetas que podemos usar en múltiples sitios. Luego tenemos el textbrowser que es como una etiqueta grande. Extiende al TextEdit que vimos anteriormente, en los widgets de input, pero es de solo lectura. Se puede usar para mostrar texto que el usuario no pueda editar. Tenemos un GraphicView que nos puede valer para meter, por ejemplo, gráficos de Matplotlib. Tenemos un CalendarWidget para seleccionar fechas. Podemos usar un LCD para enseñar números, similar al de una calculadora de mano. Podemos usar barras de progreso. Líneas horizontales o verticales que nos permitan separar cosas dentro de la ventana. Un Widget para meter gráficos OpenGL. El QQuickWidget no lo voy a comentar ahora, quizá en el futuro hagamos algo con esto pero a día de hoy no lo tengo todavía muy claro. Por último, tenemos el QWebEngineView que nos permite visualizar documentos web. Por ejemplo, le puedo pedir que me muestre una url y podemos ver el curso de creación de GUIs con Qt dentro del GUI con el que estamos trasteando.*",
"_____no_output_____"
],
[
"Y con todo esto creo que ya es suficiente. En el próximo capítulo más.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
c5291ada29f28e0ee7493a6ae09c1a605dbaa57f
| 7,225 |
ipynb
|
Jupyter Notebook
|
ScriptBuilder.ipynb
|
NYU-CS6313-SPRING2016/Group-1-INET-Twitter-Human-Rights
|
ec90454c5ee4a5bf84a09cf13609b65083fa2b28
|
[
"MIT"
] | null | null | null |
ScriptBuilder.ipynb
|
NYU-CS6313-SPRING2016/Group-1-INET-Twitter-Human-Rights
|
ec90454c5ee4a5bf84a09cf13609b65083fa2b28
|
[
"MIT"
] | null | null | null |
ScriptBuilder.ipynb
|
NYU-CS6313-SPRING2016/Group-1-INET-Twitter-Human-Rights
|
ec90454c5ee4a5bf84a09cf13609b65083fa2b28
|
[
"MIT"
] | null | null | null | 25.989209 | 91 | 0.461315 |
[
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"# read in the file\n# the main body\ndata = pd.read_csv('output/twitterDB_all.csv',header=None)\ndata.columns = ['tweet', 'city']\n# dropna\ndata_clean = data.dropna()\n# create a new column\ndata_clean.loc[:, 'senti_score'] = np.nan\n# select the tweet content before the http\nimport re\nregex = '(\\shttp[s]:\\\\\\\\)'\ndata_clean.loc[:,'tweet_content'] = data_clean.tweet \\\n .apply(lambda x:\n re.split(regex, x)[0])\n# select\nregex2 = '\\s@.+\\:\\s'\ndata_clean.loc[:, 'tweet_content'] = data_clean.tweet_content \\\n .apply(lambda x:\n re.split(regex2, x)[-1])",
"_____no_output_____"
],
[
"# sentimental analysis\nfrom textblob import TextBlob\ndef sentiAnalyze(x):\n return TextBlob(x).sentiment[0]\ndata_clean.loc[:, 'senti_score'] = data_clean.tweet_content \\\n .apply(lambda x: sentiAnalyze(x))",
"_____no_output_____"
],
[
"# dataframe with sentimental score and city names\ndata_city = data_clean[['city', 'senti_score', 'tweet_content']]\ndata_city.reset_index(drop=True, inplace=True)",
"_____no_output_____"
],
[
"# change city name to country name\nimport os\nimport requests\n\n\ngoogle_api_key = os.getenv('GOOGLE_MAPS_API_KEY')\n# country = {}\ndef getCountry(city):\n url = \"https://maps.googleapis.com/maps/api/geocode/json?\"\n params = {'address': city,\n 'key': google_api_key}\n data_json = requests.get(url, params).json()\n if data_json:\n for entry in data_json['results']:\n return entry['formatted_address'].split(', ')[-1]\n\n\ndata_city.loc[:, 'country'] = data_city.loc[:, 'city'].apply(lambda x: getCountry(x))",
"_____no_output_____"
],
[
"data_country = data_city[['tweet_content', 'senti_score', 'country']]",
"_____no_output_____"
],
[
"twitter = data_country[['country', 'tweet_content', 'senti_score']]",
"_____no_output_____"
],
[
"# wordcount\nfrom string import punctuation\n\n\nj = reduce(lambda x, y: x + y, twitters.groupby('country_list'))\ndic = {}\nfor n in range(len(j)/2):\n dic[str(j[2*n])] = j[2*n+1]\ndic2 = {}\nfor p in dic.iterkeys():\n dic2[p] = reduce(lambda x, y: x+y, dic[p]['tweet_content'])",
"_____no_output_____"
],
[
"unit = twitter.head(5)\nunit",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52921381909409fa7a1c668c0eef7039cdfd509
| 13,844 |
ipynb
|
Jupyter Notebook
|
1.Defining_a_model_problems.ipynb
|
jeroenvanbaar/Computational_modeling_tutorial
|
7967e4f7ba7ebebc5eac65b97b3b37314d458623
|
[
"MIT"
] | 8 |
2020-01-23T19:16:18.000Z
|
2021-12-14T06:02:01.000Z
|
1.Defining_a_model_problems.ipynb
|
psychNerdJae/Computational_modeling_tutorial
|
497e873a2e8906c25a23b49c0ff040b3b8051755
|
[
"MIT"
] | 5 |
2020-04-03T15:34:58.000Z
|
2020-04-17T14:23:10.000Z
|
1.Defining_a_model_problems.ipynb
|
psychNerdJae/Computational_modeling_tutorial
|
497e873a2e8906c25a23b49c0ff040b3b8051755
|
[
"MIT"
] | 4 |
2020-01-28T18:20:45.000Z
|
2021-11-25T22:05:24.000Z
| 24.076522 | 409 | 0.393961 |
[
[
[
"import os, sys, glob, scipy\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns",
"_____no_output_____"
]
],
[
[
"## Plan",
"_____no_output_____"
],
[
"1. Describe the task\n2. Make the simplest visualization you can think of that contains:\n - the Dependent Variable, i.e. the behavior of the participants that you're trying to model/predict/explain/account for/etc\n - the Independent Variable(s), i.e. the features of the trial that you think might influence behavior\n - draw each trial as a point on this graph\n3. Think of possible models that would generate similar values for the DV given the observed values for the IV",
"_____no_output_____"
],
[
"## 2. Make a visualization",
"_____no_output_____"
],
[
"##### Load some data",
"_____no_output_____"
]
],
[
[
"base_dir = os.path.realpath('') \ndata_dir = base_dir + '/Data'",
"_____no_output_____"
],
[
"data = pd.read_csv(data_dir + '/Study1_UG.csv')\ndata = data[['sub','trial','unfairness','choice']]\ndata['offer'] = 100 - data['unfairness']\ndata.head()",
"_____no_output_____"
]
],
[
[
"##### Make a simple plot",
"_____no_output_____"
]
],
[
[
"sub = 2\nsub_data = data.query('sub == 2')\nsub_data.head()",
"_____no_output_____"
]
],
[
[
"##### Problem 1. Plot each trial independently, use transparency to visualize overlap",
"_____no_output_____"
],
[
"##### Problem 2. Plot the average over trials with the same offer",
"_____no_output_____"
],
[
"## 3. Think of a model that can recreate this plot",
"_____no_output_____"
],
[
"###### Problem 3. Define the following models\n- Model 1: always accept.\n- Model 2: always reject.\n- Model 3: act randomly.\n- Model 4: maximize payoff ('greed').\n- Model 5: minimize payoff ('inverse greed').\n- Model 6: unfairness punisher (reject with a probability P proportional to the unfairness of the offer).\n- Model 7: inequity aversion.",
"_____no_output_____"
]
],
[
[
"# Always accept\ndef model_1(offer):\n \n \n \n return choice",
"_____no_output_____"
],
[
"# Always reject\ndef model_2(offer):\n \n \n \n return choice",
"_____no_output_____"
],
[
"# Act random\ndef model_3(offer):\n \n \n \n return choice",
"_____no_output_____"
],
[
"# Maximize payoff\ndef model_4(offer):\n \n \n \n \n return choice",
"_____no_output_____"
],
[
"# Minimize payoff\ndef model_5(offer):\n \n \n \n \n return choice",
"_____no_output_____"
],
[
"# Unfairness punisher\ndef model_6(offer):\n \n \n \n \n return choice",
"_____no_output_____"
],
[
"# Inequity aversion\ndef model_7(offer):\n \n \n \n \n return choice",
"_____no_output_____"
]
],
[
[
"## 4. Simulating task data",
"_____no_output_____"
]
],
[
[
"simulated_sub_data = sub_data[['trial','offer','choice']].copy()\nsimulated_sub_data['choice'] = np.nan\nsimulated_sub_data.head()",
"_____no_output_____"
]
],
[
[
"##### Problem 4. Simulate task data using a model\nUse one of the models you have defined above to simulate choices for the simulated_sub_data dataframe. ",
"_____no_output_____"
],
[
"So here we have a dataset – basically a list of trials that together constitute an experiment – with simulated task data! We've basically generated a pseudo-subject based on one of the models we defined. In the next steps, we will compare such simulated datasets to our actually observed subject data. The more similar a model's simulation is to observed task data, the better the model 'fits' the data.",
"_____no_output_____"
],
[
"## For next time",
"_____no_output_____"
],
[
"- Get Joey's data from GitHub\n- Try to code models 5, 6, and 7\n- Simulate data from each model",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
c52924f229df044d651c0e1c1f87aacdb4fe5793
| 21,679 |
ipynb
|
Jupyter Notebook
|
tutorials/High_performance_graph_algorithms.ipynb
|
caufieldjh/grape
|
e4169aad575231db8e4a262bd1759191048a72ee
|
[
"MIT"
] | 6 |
2021-09-22T17:40:01.000Z
|
2022-03-24T04:28:00.000Z
|
tutorials/High_performance_graph_algorithms.ipynb
|
caufieldjh/grape
|
e4169aad575231db8e4a262bd1759191048a72ee
|
[
"MIT"
] | 5 |
2021-10-14T10:48:27.000Z
|
2022-03-23T11:03:05.000Z
|
tutorials/High_performance_graph_algorithms.ipynb
|
caufieldjh/grape
|
e4169aad575231db8e4a262bd1759191048a72ee
|
[
"MIT"
] | 2 |
2021-09-13T16:24:08.000Z
|
2021-09-24T16:23:35.000Z
| 46.421842 | 4,488 | 0.607731 |
[
[
[
"<a href=\"https://colab.research.google.com/github/AnacletoLAB/grape/blob/main/tutorials/High_performance_graph_algorithms.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# High performance graph algorithms\nA number of high performance algorithms have been implemented in Ensmallen, a considerable portion of which is an implementation of algorithms described in the literature by [David Bader](https://davidbader.net/), who we thank for his contribution to the field of graph algorithms.\n\nSee below for the algorithms available in Ensmallen.\n\nNote that all of these algorithms are highly parallel implementations, and these benchmarks are being run on COLAB which typically provides virtual machines with a very small number of cores: on a machine with a reasonable number of cores they will execute much faster.\n\nTo install the GraPE library run:\n\n```bash\npip install grape\n```\n\nTo install exclusively the Ensmallen module, which may be useful when the TensorFlow dependency causes problems, do run:\n\n```bash\npip install ensmallen\n```",
"_____no_output_____"
]
],
[
[
"! pip install -q ensmallen",
"_____no_output_____"
]
],
[
[
"## Retrieving a graph to run the sampling on\nIn this tutorial we will run samples on one of the graph from the ones available from the automatic graph retrieval of Ensmallen, namely the [Homo Sapiens graph from STRING](https://string-db.org/cgi/organisms). If you want to load a graph from an edge list, just follow the examples provided from the [add reference to tutorial].",
"_____no_output_____"
]
],
[
[
"from ensmallen.datasets.string import HomoSapiens",
"_____no_output_____"
]
],
[
[
"Retrieving and loading the graph",
"_____no_output_____"
]
],
[
[
"graph = HomoSapiens()",
"_____no_output_____"
]
],
[
[
"We compute the graph report:",
"_____no_output_____"
]
],
[
[
"graph",
"_____no_output_____"
]
],
[
[
"Enable the speedups",
"_____no_output_____"
]
],
[
[
"graph.enable()",
"_____no_output_____"
]
],
[
[
"## Random Spanning arborescence\nThe spanning arborescence algorithm computes a set of edges, an [Arborescence](https://en.wikipedia.org/wiki/Arborescence_(graph_theory)), that is spanning, i.e cover all the nodes in the graph.\n\nThis is an implementation of [A fast, parallel spanning tree algorithm for symmetric multiprocessors\n(SMPs)](https://davidbader.net/publication/2005-bc/2005-bc.pdf).",
"_____no_output_____"
]
],
[
[
"%%time\nspanning_arborescence_edges = graph.spanning_arborescence()",
"CPU times: user 132 ms, sys: 211 µs, total: 132 ms\nWall time: 73.5 ms\n"
]
],
[
[
"## Connected components\nThe [connected components](https://en.wikipedia.org/wiki/Component_(graph_theory)) of a graph are the set of nodes connected one another by edges.",
"_____no_output_____"
]
],
[
[
"%%time\n(\n connected_component_ids,\n number_of_connected_components,\n minimum_component_size,\n maximum_component_size\n) = graph.connected_components()",
"CPU times: user 240 ms, sys: 123 µs, total: 240 ms\nWall time: 127 ms\n"
]
],
[
[
"## Diameter\nThe following is an implementation of [On computing the diameter of real-world undirected graphs](https://who.rocq.inria.fr/Laurent.Viennot/road/papers/ifub.pdf).",
"_____no_output_____"
]
],
[
[
"%%time\ndiameter = graph.get_diameter(ignore_infinity=True)",
"CPU times: user 7.61 s, sys: 16.2 ms, total: 7.63 s\nWall time: 3.99 s\n"
]
],
[
[
"Note that most properties that boil down to a single value once computed are stored in a cache structure, so recomputing the diameter once it is done takes a significant smaller time.",
"_____no_output_____"
]
],
[
[
"%%time\ndiameter = graph.get_diameter(ignore_infinity=True)",
"CPU times: user 13 µs, sys: 1 µs, total: 14 µs\nWall time: 16.5 µs\n"
]
],
[
[
"## Clustering coefficient and triangles\nThis is an implementation of [Faster Clustering Coefficient Using Vertex Covers](https://davidbader.net/publication/2013-g-ba/2013-g-ba.pdf), proving the average clustering coefficient, the total number of triangles and the number of triangles per node.",
"_____no_output_____"
]
],
[
[
"%%time\ngraph.get_number_of_triangles()",
"CPU times: user 6min 10s, sys: 1 s, total: 6min 11s\nWall time: 3min 8s\n"
],
[
"%%time\ngraph.get_number_of_triangles_per_node()",
"CPU times: user 6min 26s, sys: 1.07 s, total: 6min 27s\nWall time: 3min 16s\n"
],
[
"%%time\ngraph.get_average_clustering_coefficient()",
"CPU times: user 6min 27s, sys: 1.1 s, total: 6min 28s\nWall time: 3min 16s\n"
],
[
"%%time\ngraph.get_clustering_coefficient_per_node()",
"CPU times: user 6min 26s, sys: 1.1 s, total: 6min 27s\nWall time: 3min 16s\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
c5292dbe2e6c18b18b833929cd9228a9babc57e8
| 39,568 |
ipynb
|
Jupyter Notebook
|
day4.ipynb
|
onyks/dw_matrix_car
|
6b734bec4491bd829389b31097430ef678102761
|
[
"MIT"
] | null | null | null |
day4.ipynb
|
onyks/dw_matrix_car
|
6b734bec4491bd829389b31097430ef678102761
|
[
"MIT"
] | null | null | null |
day4.ipynb
|
onyks/dw_matrix_car
|
6b734bec4491bd829389b31097430ef678102761
|
[
"MIT"
] | null | null | null | 39,568 | 39,568 | 0.626466 |
[
[
[
"!pip install --upgrade tables\n!pip install eli5\n!pip install xgboost",
"Collecting tables\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/ed/c3/8fd9e3bb21872f9d69eb93b3014c86479864cca94e625fd03713ccacec80/tables-3.6.1-cp36-cp36m-manylinux1_x86_64.whl (4.3MB)\n\u001b[K |████████████████████████████████| 4.3MB 2.7MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: numpy>=1.9.3 in /usr/local/lib/python3.6/dist-packages (from tables) (1.17.5)\nRequirement already satisfied, skipping upgrade: numexpr>=2.6.2 in /usr/local/lib/python3.6/dist-packages (from tables) (2.7.1)\nInstalling collected packages: tables\n Found existing installation: tables 3.4.4\n Uninstalling tables-3.4.4:\n Successfully uninstalled tables-3.4.4\nSuccessfully installed tables-3.6.1\nRequirement already satisfied: eli5 in /usr/local/lib/python3.6/dist-packages (0.10.1)\nRequirement already satisfied: graphviz in /usr/local/lib/python3.6/dist-packages (from eli5) (0.10.1)\nRequirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (1.17.5)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from eli5) (1.12.0)\nRequirement already satisfied: tabulate>=0.7.7 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.8.6)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from eli5) (1.4.1)\nRequirement already satisfied: scikit-learn>=0.18 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.22.2)\nRequirement already satisfied: attrs>16.0.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (19.3.0)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.6/dist-packages (from eli5) (2.11.1)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.18->eli5) (0.14.1)\nRequirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from jinja2->eli5) (1.1.1)\nRequirement already satisfied: xgboost in /usr/local/lib/python3.6/dist-packages (0.90)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from xgboost) (1.17.5)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from xgboost) (1.4.1)\n"
],
[
"import pandas as pd\nimport numpy as np\n\nfrom sklearn.dummy import DummyRegressor\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\n\nimport xgboost as xgb\n\nfrom sklearn.metrics import mean_absolute_error as mae\nfrom sklearn.model_selection import cross_val_score, KFold\n\nimport eli5\nfrom eli5.sklearn import PermutationImportance",
"/usr/local/lib/python3.6/dist-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.metrics.scorer module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.metrics. Anything that cannot be imported from sklearn.metrics is now part of the private API.\n warnings.warn(message, FutureWarning)\n/usr/local/lib/python3.6/dist-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.feature_selection.base module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.feature_selection. Anything that cannot be imported from sklearn.feature_selection is now part of the private API.\n warnings.warn(message, FutureWarning)\nUsing TensorFlow backend.\n"
],
[
"cd \"/content/drive/My Drive/Colab Notebooks/matrix/matrix_two/dw_matrix_car/\"",
"/content/drive/My Drive/Colab Notebooks/matrix/matrix_two/dw_matrix_car\n"
],
[
"df = pd.read_hdf('data/car.h5')",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"#feat=dynamiczna nazwa poszczególnej kolumny\nSUFFIX_CAT = '__cat'\nfor feat in df.columns:\n if isinstance(df[feat][0],list):continue #jeśli wartości jesst listą to pomiń\\\n\n factorized_values=df[feat].factorize()[0]\n if SUFFIX_CAT in feat:\n df[feat]=factorized_values\n else:\n df[feat + SUFFIX_CAT] = factorized_values",
"_____no_output_____"
],
[
"cat_feats=[x for x in df.columns if SUFFIX_CAT in x]\ncat_feats=[x for x in cat_feats if 'price' not in x]\nlen(cat_feats)",
"_____no_output_____"
],
[
"x = df[cat_feats].values\ny=df['price_value'].values\n\nmodel= DecisionTreeRegressor(max_depth=5)\nscores = cross_val_score(model, x, y, cv=3, scoring='neg_mean_absolute_error')\nnp.mean(scores), np.std(scores)",
"_____no_output_____"
],
[
"def run_model(model, feats):\n x = df[feats].values\n y=df['price_value'].values\n\n\n scores = cross_val_score(model, x, y, cv=3, scoring='neg_mean_absolute_error')\n return np.mean(scores), np.std(scores)",
"_____no_output_____"
],
[
"run_model( DecisionTreeRegressor(max_depth=5), cat_feats)",
"_____no_output_____"
]
],
[
[
"## random forest",
"_____no_output_____"
]
],
[
[
"model= RandomForestRegressor(max_depth = 5, n_estimators=50, random_state=0 )\nrun_model(model, cat_feats)",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"##xgboost",
"_____no_output_____"
]
],
[
[
"xgb_param={\n 'max_depth': 5,\n 'n_estimators': 50,\n 'learning_rate': 0.1,\n 'seed': 0\n}\nmodel = xgb.XGBRegressor(**xgb_param)\nrun_model(model, cat_feats)",
"[20:59:20] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[20:59:41] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[21:00:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n"
],
[
"xgb_param={\n 'max_depth': 5,\n 'n_estimators': 50,\n 'learning_rate': 0.1,\n 'seed': 0\n}\nm = xgb.XGBRegressor(**xgb_param)\nm.fit(x,y)\nimp=PermutationImportance(m, random_state=0).fit(x,y)\neli5.show_weights(imp, feature_names=cat_feats)",
"[21:02:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n"
],
[
"feats=['param_napęd__cat',\n'param_rok-produkcji__cat',\n'param_stan__cat',\n'param_skrzynia-biegów__cat',\n'param_faktura-vat__cat',\n'param_moc__cat',\n'param_marka-pojazdu__cat',\n'feature_kamera-cofania__cat',\n'param_typ__cat',\n'param_pojemność-skokowa__cat',\n'seller_name__cat',\n'feature_wspomaganie-kierownicy__cat',\n'param_model-pojazdu__cat',\n'param_wersja__cat',\n'param_kod-silnika__cat',\n'feature_system-start-stop__cat',\n'feature_asystent-pasa-ruchu__cat',\n'feature_czujniki-parkowania-przednie__cat',\n'feature_łopatki-zmiany-biegów__cat',\n'feature_regulowane-zawieszenie__cat']\nlen(feats)",
"_____no_output_____"
],
[
"\nxgb_param={\n 'max_depth': 5,\n 'n_estimators': 50,\n 'learning_rate': 0.1,\n 'seed': 0\n}\nmodel = xgb.XGBRegressor(**xgb_param)\nrun_model(model, feats)",
"_____no_output_____"
],
[
"df['param_rok-produkcji']=df['param_rok-produkcji'].map(lambda x: -1 if str(x) =='None' else int(x))\n\nfeats=['param_napęd__cat',\n'param_rok-produkcji',\n'param_stan__cat',\n'param_skrzynia-biegów__cat',\n'param_faktura-vat__cat',\n'param_moc__cat',\n'param_marka-pojazdu__cat',\n'feature_kamera-cofania__cat',\n'param_typ__cat',\n'param_pojemność-skokowa__cat',\n'seller_name__cat',\n'feature_wspomaganie-kierownicy__cat',\n'param_model-pojazdu__cat',\n'param_wersja__cat',\n'param_kod-silnika__cat',\n'feature_system-start-stop__cat',\n'feature_asystent-pasa-ruchu__cat',\n'feature_czujniki-parkowania-przednie__cat',\n'feature_łopatki-zmiany-biegów__cat',\n'feature_regulowane-zawieszenie__cat']\n\nxgb_param={\n 'max_depth': 5,\n 'n_estimators': 50,\n 'learning_rate': 0.1,\n 'seed': 0\n}\nmodel = xgb.XGBRegressor(**xgb_param)\nrun_model(model, feats)",
"[21:30:06] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[21:30:10] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[21:30:14] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n"
],
[
"",
"_____no_output_____"
],
[
"df['param_moc']=df['param_moc'].map(lambda x: -1 if str(x) =='None' else int(x.split(' ')[0]))\ndf['param_rok-produkcji']=df['param_rok-produkcji'].map(lambda x: -1 if str(x) =='None' else int(x))\n\n\nfeats=['param_napęd__cat',\n'param_rok-produkcji',\n'param_stan__cat',\n'param_skrzynia-biegów__cat',\n'param_faktura-vat__cat',\n'param_moc',\n'param_marka-pojazdu__cat',\n'feature_kamera-cofania__cat',\n'param_typ__cat',\n'param_pojemność-skokowa__cat',\n'seller_name__cat',\n'feature_wspomaganie-kierownicy__cat',\n'param_model-pojazdu__cat',\n'param_wersja__cat',\n'param_kod-silnika__cat',\n'feature_system-start-stop__cat',\n'feature_asystent-pasa-ruchu__cat',\n'feature_czujniki-parkowania-przednie__cat',\n'feature_łopatki-zmiany-biegów__cat',\n'feature_regulowane-zawieszenie__cat']\n\nxgb_param={\n 'max_depth': 5,\n 'n_estimators': 50,\n 'learning_rate': 0.1,\n 'seed': 0\n}\nmodel = xgb.XGBRegressor(**xgb_param)\nrun_model(model, feats)",
"[21:35:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[21:35:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[21:35:28] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n"
],
[
"df['param_pojemność-skokowa']=df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) =='None' else int(str(x).split('cm')[0].replace(' ', '')) )\n\n\nfeats=['param_napęd__cat',\n'param_rok-produkcji',\n'param_stan__cat',\n'param_skrzynia-biegów__cat',\n'param_faktura-vat__cat',\n'param_moc',\n'param_marka-pojazdu__cat',\n'feature_kamera-cofania__cat',\n'param_typ__cat',\n'param_pojemność-skokowa',\n'seller_name__cat',\n'feature_wspomaganie-kierownicy__cat',\n'param_model-pojazdu__cat',\n'param_wersja__cat',\n'param_kod-silnika__cat',\n'feature_system-start-stop__cat',\n'feature_asystent-pasa-ruchu__cat',\n'feature_czujniki-parkowania-przednie__cat',\n'feature_łopatki-zmiany-biegów__cat',\n'feature_regulowane-zawieszenie__cat']\n\nxgb_param={\n 'max_depth': 5,\n 'n_estimators': 50,\n 'learning_rate': 0.1,\n 'seed': 0\n}\nmodel = xgb.XGBRegressor(**xgb_param)\nrun_model(model, feats)",
"[21:46:37] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[21:46:42] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[21:46:46] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n"
],
[
"",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c5292fa8d4285234158b12c0db905e765e7efa6b
| 165,710 |
ipynb
|
Jupyter Notebook
|
Power Transformation.ipynb
|
DSC-KIIT/Detection-of-Diabetic-Retinopathy
|
3575f16af88bc0e923a615de33da75605a125c9e
|
[
"MIT"
] | null | null | null |
Power Transformation.ipynb
|
DSC-KIIT/Detection-of-Diabetic-Retinopathy
|
3575f16af88bc0e923a615de33da75605a125c9e
|
[
"MIT"
] | null | null | null |
Power Transformation.ipynb
|
DSC-KIIT/Detection-of-Diabetic-Retinopathy
|
3575f16af88bc0e923a615de33da75605a125c9e
|
[
"MIT"
] | null | null | null | 654.980237 | 139,097 | 0.95185 |
[
[
[
"# Power Law Transformation",
"_____no_output_____"
],
[
"Normally the quality of an image is improved by enhancing contrast and sharpness.\n",
"_____no_output_____"
],
[
"Power law transformations or piece-wise linear transformation functions require lot of user input. In the former case one has to choose the exponent\nappearing in the transformation function, while in the latter case one has to choose the slopes\nand ranges of the straight lines which form the transformation function.\nThe power-law transformation is usually defined as\n\ns = cr^γ\n\nwhere s and r are the gray levels of the pixels in the output and the input images, respectively and\nc is a constant.",
"_____no_output_____"
],
[
"Maximum contrast stretching occurs by choosing the\nvalue of γ for which the transformation function has the maximum slope at r = rmax.\nThat is, if m is the slope of the transformation function, then we should find the value of γ ,\nwhich results in the maximum value of m at r = rmax.",
"_____no_output_____"
],
[
"Given the value of rmax, which corresponds to the peak in the histogram, we can determine the corresponding value of the exponent which would maximize the extent of the contrast\nstretching.",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\nPATH = \"/Users/KIIT/Downloads/\"\nImage(filename = PATH + \"powerlaw.png\", width=400, height=400)",
"_____no_output_____"
],
[
"import cv2\nimport glob\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom PIL import Image",
"_____no_output_____"
],
[
"files = glob.glob (\"C:/DATA/gaussian_filtered_images/gaussian_filtered_images/Mild/*.png\") #reading files from te directory\nfor myFile in files:\n img = cv2.imread(myFile,1)\n img = img.astype('float32')\n img = img/255.0\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.pow(img,1.5) # Power law Trnasformation with γ=1.5\n img = img*255.0\n img = img.astype('uint8')\n img = Image.fromarray(img)\n img.save(\"C:/DATA1/gaussian_filtered_images/gaussian_filtered_images/Mild/\"+str(i)+\".png\") #saving the transformed image to anther directory\n i=i+1\nplt.imshow(img)\n",
"_____no_output_____"
],
[
"\nfiles = glob.glob (\"C:/DATA/gaussian_filtered_images/gaussian_filtered_images/Moderate/*.png\")\nfor myFile in files:\n img = cv2.imread(myFile,1)\n img = img.astype('float32')\n img = img/255.0\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.pow(img,1.5)\n img = img*255.0\n img = img.astype('uint8')\n img = Image.fromarray(img)\n img.save(\"C:/DATA1/gaussian_filtered_images/gaussian_filtered_images/Moderate/\"+str(i)+\".png\")\n i=i+1\n \n",
"Mild_data shape: (0,)\n"
],
[
"\nfiles = glob.glob (\"C:/DATA/gaussian_filtered_images/gaussian_filtered_images/No_DR/*.png\")\nfor myFile in files:\n img = cv2.imread(myFile,1)\n img = img.astype('float32')\n img = img/255.0\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.pow(img,1.5)\n img = img*255.0\n img = img.astype('uint8')\n img = Image.fromarray(img)\n img.save(\"C:/DATA1/gaussian_filtered_images/gaussian_filtered_images/No_DR/\"+str(i)+\".png\")\n i=i+1",
"_____no_output_____"
],
[
"\nfiles = glob.glob (\"C:/DATA/gaussian_filtered_images/gaussian_filtered_images/Proliferate_DR/*.png\")\nfor myFile in files:\n img = cv2.imread(myFile,1)\n img = img.astype('float32')\n img = img/255.0\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.pow(img,1.5)\n img = img*255.0\n img = img.astype('uint8')\n img = Image.fromarray(img)\n img.save(\"C:/DATA1/gaussian_filtered_images/gaussian_filtered_images/Proliferate_DR/\"+str(i)+\".png\")\n i=i+1",
"_____no_output_____"
],
[
"\nfiles = glob.glob (\"C:/DATA/gaussian_filtered_images/gaussian_filtered_images/Severe/*.png\")\nfor myFile in files:\n img = cv2.imread(myFile,1)\n img = img.astype('float32')\n img = img/255.0\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.pow(img,1.5)\n img = img*255.0\n img = img.astype('uint8')\n img = Image.fromarray(img)\n img.save(\"C:/DATA1/gaussian_filtered_images/gaussian_filtered_images/Severe/\"+str(i)+\".png\")\n i=i+1",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52958e57e8ef9295e522737ba8a6cd10ce9df20
| 4,633 |
ipynb
|
Jupyter Notebook
|
WorkingWithKey-ValuePairs.ipynb
|
bhchen1008/SparkJupyterNotebook
|
1a90ecdc768c13bbae5d4edae7fec49fe424618e
|
[
"MIT"
] | 1 |
2016-11-30T05:53:45.000Z
|
2016-11-30T05:53:45.000Z
|
WorkingWithKey-ValuePairs.ipynb
|
bhchen1008/SparkJupyterNotebook
|
1a90ecdc768c13bbae5d4edae7fec49fe424618e
|
[
"MIT"
] | null | null | null |
WorkingWithKey-ValuePairs.ipynb
|
bhchen1008/SparkJupyterNotebook
|
1a90ecdc768c13bbae5d4edae7fec49fe424618e
|
[
"MIT"
] | null | null | null | 22.490291 | 83 | 0.389812 |
[
[
[
"from pyspark import SparkContext, SparkConf\nconf = SparkConf().setAppName(\"Key-ValuePairs\").setMaster(\"local[*]\")\nsc = SparkContext(conf=conf)",
"_____no_output_____"
],
[
"lines = sc.textFile(\"/data/key-valueExample.txt\")\nlines.collect()",
"_____no_output_____"
],
[
"pairs = lines.map(lambda s: (s, 1))",
"_____no_output_____"
],
[
"pairs.collect()",
"_____no_output_____"
],
[
"counts = pairs.reduceByKey(lambda a, b: a + b)",
"_____no_output_____"
],
[
"counts.collect()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c5296a77962fc4f73e6658375e90b4d4914db429
| 11,955 |
ipynb
|
Jupyter Notebook
|
notebooks/downscaling_pipeline/prototype_global_bias_correction_scaling.ipynb
|
brews/downscaleCMIP6
|
7ce377f50c5a1b9d554668efeb30e969dd6ede18
|
[
"MIT"
] | 19 |
2020-10-31T11:37:10.000Z
|
2022-03-30T22:44:43.000Z
|
notebooks/downscaling_pipeline/prototype_global_bias_correction_scaling.ipynb
|
brews/downscaleCMIP6
|
7ce377f50c5a1b9d554668efeb30e969dd6ede18
|
[
"MIT"
] | 413 |
2020-09-18T00:11:42.000Z
|
2022-03-30T22:42:49.000Z
|
notebooks/downscaling_pipeline/prototype_global_bias_correction_scaling.ipynb
|
brews/downscaleCMIP6
|
7ce377f50c5a1b9d554668efeb30e969dd6ede18
|
[
"MIT"
] | 11 |
2021-01-28T01:05:10.000Z
|
2022-03-31T02:57:20.000Z
| 27.997658 | 414 | 0.57591 |
[
[
[
"%matplotlib inline \nimport xarray as xr\nimport os \nimport pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"import skdownscale",
"_____no_output_____"
],
[
"import dask\nimport dask.array as da\nimport dask.distributed as dd\nimport rhg_compute_tools.kubernetes as rhgk\n\nfrom utils import _convert_lons, _remove_leap_days, _convert_ds_longitude\nfrom regridding import apply_weights\n\nimport intake\nimport xesmf as xe",
"_____no_output_____"
],
[
"import warnings\nwarnings.filterwarnings(\"ignore\")",
"_____no_output_____"
],
[
"# output directory\nwrite_direc = '/gcs/rhg-data/climate/downscaled/workdir'",
"_____no_output_____"
]
],
[
[
"This notebook implements a scaling test for bias correction, using the BCSDTemperature model from `scikit-downscale`, with the daily BCSD bias correction method as implemented in the NASA-NEX dataset. \n\nDatasets used include a CMIP6 model from a historical run (`GISS-E2-1-G` from NASA) and GMFD (obs). Historical/training period is taken as 1980-1982, and the future/predict period is 1990-1991. \n\nGMFD is coarsened to the NASA `GISS-E2-1-G` grid for this bias correction test. \n\nNote that the purpose of this notebook is intended to allow us to get a better estimate of timing for global daily bias correction. Future work will build on this notebook to: \n- replace GMFD with ERA5\n- combine this notebook with `SD_prototype.ipynb`, along with NASA-NEX data and a corresponding CMIP5 model, and over a limited domain, to test our implementation of BCSD against NASA-NEX for a limited domain. That notebook will be used as a prototype for our downscaling pipeline and can be modified to become a system test for the pipeline (1-3 gridcells for CI/CD, limited domain for science testing). \n\nThis notebook was also used as a resource and checkpoint for this workflow: https://github.com/jhamman/scikit-downscale/blob/ecahm2020/examples/2020ECAHM-scikit-downscale.ipynb",
"_____no_output_____"
]
],
[
[
"# client, cluster = rhgk.get_standard_cluster(extra_pip_packages=\"git+https://github.com/dgergel/xsd.git@feature/implement_daily_bcsd\")\nclient, cluster = rhgk.get_standard_cluster(extra_pip_packages=\"git+https://github.com/jhamman/scikit-downscale.git\")",
"_____no_output_____"
],
[
"cluster",
"_____no_output_____"
],
[
"cluster.scale(40)",
"_____no_output_____"
],
[
"'''a = da.ones((1000, 1000, 1000))\na.mean().compute()'''",
"_____no_output_____"
],
[
"from skdownscale.pointwise_models import PointWiseDownscaler, BcsdTemperature",
"_____no_output_____"
],
[
"train_slice = slice('1980', '1989') # train time range\nholdout_slice = slice('1990', '2000') # prediction time range",
"_____no_output_____"
],
[
"# client.get_versions(check=True)",
"_____no_output_____"
],
[
"# use GMFD as standin for ERA-5\ntmax_obs = xr.open_mfdataset(os.path.join('/gcs/rhg-data/climate/source_data/GMFD/tmax', \n 'tmax_0p25_daily_198*'), concat_dim='time', combine='nested',\n parallel=True).squeeze(drop=True).rename({'latitude': 'lat', 'longitude': 'lon'})\n\n'''tmax_obs = xr.open_dataset(os.path.join('/gcs/rhg-data/climate/source_data/GMFD/tmax', \n 'tmax_0p25_daily_1980-1980.nc')).rename({'latitude': 'lat', 'longitude': 'lon'})'''\n\n# standardize longitudes \ntmax_obs = _convert_ds_longitude(tmax_obs, lon_name='lon')\n\n# remove leap days \ntmax_obs = _remove_leap_days(tmax_obs)\n\nobs_subset = tmax_obs.sel(time=train_slice)",
"_____no_output_____"
]
],
[
[
"get some CMIP6 data ",
"_____no_output_____"
]
],
[
[
"# search the cmip6 catalog\ncol = intake.open_esm_datastore(\"https://storage.googleapis.com/cmip6/pangeo-cmip6.json\")\ncat = col.search(experiment_id=['historical', 'ssp585'], table_id='day', variable_id='tasmax',\n grid_label='gn')",
"_____no_output_____"
],
[
"# cat['CMIP.NASA-GISS.GISS-E2-1-G.historical.day.gn']",
"_____no_output_____"
],
[
"# access the data and do some cleanup\nds_model = cat['CMIP.NASA-GISS.GISS-E2-1-G.historical.day.gn'].to_dask(\n ).isel(member_id=0).squeeze(drop=True).drop(['height', 'lat_bnds', 'lon_bnds', 'time_bnds', \n 'member_id'])\n\nds_model.lon.values[ds_model.lon.values > 180] -= 360\nds_model = ds_model.roll(lon=72, roll_coords=True)",
"_____no_output_____"
]
],
[
[
"regrid obs to model resolution ",
"_____no_output_____"
]
],
[
[
"# first rechunk in space for xESMF \nchunks = {'lat': len(obs_subset.lat), 'lon': len(obs_subset.lon), 'time': 100}\nobs_subset = obs_subset.chunk(chunks)",
"_____no_output_____"
],
[
"%%time\nobs_to_mod_weights = os.path.join(write_direc, 'bias_correction_bilinear_weights_new.nc')\n\nregridder_obs_to_mod = xe.Regridder(obs_subset.isel(time=0, drop=True), \n ds_model.isel(time=0, drop=True), \n 'bilinear', \n filename=obs_to_mod_weights, \n reuse_weights=True)\n\nobs_subset_modres_lazy = xr.map_blocks(apply_weights, regridder_obs_to_mod, \n args=[tmax_obs['tmax']])\n\nobs_subset_modres = obs_subset_modres_lazy.compute()",
"_____no_output_____"
]
],
[
[
"### subset datasets to get ready for bias correcting ",
"_____no_output_____"
]
],
[
[
"chunks = {'lat': 10, 'lon': 10, 'time': -1}\n\ntrain_subset = ds_model['tasmax'].sel(time=train_slice)\ntrain_subset['time'] = train_subset.indexes['time'].to_datetimeindex()\ntrain_subset = train_subset.resample(time='1d').mean().load(scheduler='threads').chunk(chunks)\n\n\nholdout_subset = ds_model['tasmax'].sel(time=holdout_slice)\nholdout_subset['time'] = holdout_subset.indexes['time'].to_datetimeindex()\nholdout_subset = holdout_subset.resample(time='1d').mean().load(scheduler='threads').chunk(chunks)",
"_____no_output_____"
]
],
[
[
"### fit BcsdTemperature models at each x/y point in domain using the `PointwiseDownscaler` with the `daily_nasa-nex` option",
"_____no_output_____"
]
],
[
[
"%%time\n# model = PointWiseDownscaler(BcsdTemperature(return_anoms=False, time_grouper='daily_nasa-nex'))\nmodel = PointWiseDownscaler(BcsdTemperature(return_anoms=False))",
"_____no_output_____"
],
[
"model = BcsdTemperature(return_anoms=False)",
"_____no_output_____"
],
[
"# remove leap days from model data\ntrain_subset_noleap = _remove_leap_days(train_subset)\n\nholdout_subset_noleap = _remove_leap_days(holdout_subset)",
"_____no_output_____"
],
[
"# chunk datasets \ntrain_subset_noleap = train_subset_noleap.chunk(chunks)\nholdout_subset_noleap = holdout_subset_noleap.chunk(chunks)\nobs_subset_modres = obs_subset_modres.chunk(chunks)",
"_____no_output_____"
],
[
"%%time\nmodel.fit(train_subset_noleap, obs_subset_modres)",
"_____no_output_____"
],
[
"model",
"_____no_output_____"
],
[
"display(model, model._models)",
"_____no_output_____"
],
[
"%%time\npredicted = model.predict(holdout_subset_noleap).load()",
"_____no_output_____"
],
[
"predicted.isel(time=0).plot(vmin=250, vmax=325)",
"_____no_output_____"
],
[
"predicted.sel(lat=47, lon=-122, method='nearest').plot()",
"_____no_output_____"
]
],
[
[
"### save data",
"_____no_output_____"
]
],
[
[
"ds_predicted = predicted.to_dataset(name='tmax')\n\nds_new_attrs = {\"file description\": \"daily bias correction test for 1980s, output from global bias correction scaling test\",\n \"author\": \"Diana Gergel\", \"contact\": \"[email protected]\"}\nds_predicted.attrs.update(ds_new_attrs)\nds_predicted.to_netcdf(os.path.join(write_direc, 'global_bias_corrected_tenyears.nc'))",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
c5297027bd8bdbfbff40d585a65e961989fe676f
| 2,983 |
ipynb
|
Jupyter Notebook
|
Internet Speed Test analysis.ipynb
|
kevdever/InternetSpeedLogger
|
2c77b2003e4c04b2e180ae581964e56ab2893757
|
[
"Apache-2.0"
] | null | null | null |
Internet Speed Test analysis.ipynb
|
kevdever/InternetSpeedLogger
|
2c77b2003e4c04b2e180ae581964e56ab2893757
|
[
"Apache-2.0"
] | null | null | null |
Internet Speed Test analysis.ipynb
|
kevdever/InternetSpeedLogger
|
2c77b2003e4c04b2e180ae581964e56ab2893757
|
[
"Apache-2.0"
] | null | null | null | 23.674603 | 198 | 0.537714 |
[
[
[
"import pandas as pd\nimport plotly\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nfrom datetime import datetime\nfrom dateutil import tz",
"_____no_output_____"
],
[
"def get_sql_connection():\n '''imports the necessary odbc package and returns a connection to the db'''\n import platform\n os = platform.system()\n if os == 'Windows':\n import pypyodbc\n return pypyodbc.connect('<REDACTED connection string; see string from app.config>')\n elif os == 'Darwin':\n import pyodbc\n return pyodbc.connect(driver='/usr/local/lib/libtdsodbc.so', TDS_Version='8.0', server='<REDACTED host>',port=1433, database='InternetSpeedLogger',uid='<USERNAME>',pwd='<PASSWORD>')\n else:\n raise ValueError('Unsupported OS detected (%s)' % os)",
"_____no_output_____"
],
[
"connection = get_sql_connection()\ncmd = \"select* from internetspeedlogger.dbo.results\"\ndata = pd.read_sql(cmd,connection)",
"_____no_output_____"
],
[
"data['dlmbit'] = data.Download/1000000\ndata.dlmbit.describe()",
"_____no_output_____"
],
[
"data['localtime'] = pd.DatetimeIndex(pd.to_datetime(data['Timestamp'])).tz_localize('UTC').tz_convert('US/Pacific')\n",
"_____no_output_____"
],
[
"data['time'] = data['localtime'].dt.strftime('%H:%M')",
"_____no_output_____"
],
[
"data = data.sort_values('time')",
"_____no_output_____"
],
[
"trace = go.Scatter(\n x=data['time'],\n y=data['dlmbit'],\n mode = 'markers'\n)\n\npltData = [trace]\n\nfig = go.Figure(data=pltData)\nplotly.offline.plot(fig)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52984f9552d0d922b4833ccb1a9cb182cf96a34
| 79,626 |
ipynb
|
Jupyter Notebook
|
jupyter/e5-neutrinos.ipynb
|
cosmicrays/hermes-examples
|
b2b7c1b3776acadeab43223f0be8d17363f88f66
|
[
"MIT"
] | 4 |
2020-02-18T15:25:41.000Z
|
2020-10-29T07:59:43.000Z
|
jupyter/e5-neutrinos.ipynb
|
tospines/hermes-examples
|
5db874ac6132feb73b4fa20dc6bb5af099ce1fd0
|
[
"MIT"
] | 2 |
2021-03-22T14:58:13.000Z
|
2021-05-27T12:44:08.000Z
|
jupyter/e5-neutrinos.ipynb
|
tospines/hermes-examples
|
5db874ac6132feb73b4fa20dc6bb5af099ce1fd0
|
[
"MIT"
] | 1 |
2021-02-22T16:10:48.000Z
|
2021-02-22T16:10:48.000Z
| 307.436293 | 45,404 | 0.928252 |
[
[
[
"# Jupyter Example 5 for HERMES: Neutrinos",
"_____no_output_____"
]
],
[
[
"from pyhermes import *\nfrom pyhermes.units import PeV, TeV, GeV, mbarn, kpc, pc, deg, rad\n\nimport astropy.units as u\n\nimport numpy as np\nimport healpy\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"HEMRES has available two cross-section modules for $pp \\rightarrow \\nu$:\n * one built on top of cparamlib: Kamae et al. 2006\n * one based on Kelner-Aharonian parametrization",
"_____no_output_____"
]
],
[
[
"kamae06 = interactions.Kamae06Neutrino()\nkelahar = interactions.KelnerAharonianNeutrino()",
"_____no_output_____"
],
[
"E_neutrino_range = np.logspace(0,6,100)*GeV\nE_proton_list = [10*GeV, 100*GeV, 1*TeV, 100*TeV, 1*PeV]\n\ndiff_sigma = lambda model, E_proton: [\n E_neutrino*model.getDiffCrossSection(E_proton, E_neutrino)/mbarn\n for E_neutrino in E_neutrino_range\n]\ndiff_sigma_kamae06 = lambda E_proton: diff_sigma(kamae06, E_proton)\ndiff_sigma_kelahar = lambda E_proton: diff_sigma(kelahar, E_proton)",
"_____no_output_____"
],
[
"colors = ['tab:brown', 'tab:red', 'tab:green', 'tab:blue', 'tab:orange']\nfor E_proton, c in zip(E_proton_list, colors):\n plt.loglog(E_neutrino_range/GeV, diff_sigma_kamae06(E_proton),\n ls='-', color=c, label=\"{}\".format(E_proton.toAstroPy().to('TeV').round(2)))\n plt.loglog(E_neutrino_range/GeV, diff_sigma_kelahar(E_proton),\n ls='--', color=c)\nplt.ylim(top=1e3, bottom=1e-2)\nplt.title(\"Kamae06 (solid) and K&A (dashed) for a list of $E_p$\")\nplt.xlabel(r\"$E_\\nu$ / GeV\")\nplt.ylabel(r\"$E_\\nu\\, \\mathrm{d}\\sigma_{pp \\rightarrow \\nu} / \\mathrm{d} E_\\nu$ [mbarn]\")\n_ = plt.legend(loc=\"upper right\", frameon=False)",
"_____no_output_____"
],
[
"def integrate_template(integrator, nside):\n \n integrator.setupCacheTable(60, 60, 12)\n sun_pos = Vector3QLength(8.0*kpc, 0*pc, 0*pc)\n integrator.setSunPosition(sun_pos)\n \n mask_edges = ([5*deg, 0*deg], [-5*deg, 180*deg])\n mask = RectangularWindow(*mask_edges)\n \n skymap_range = GammaSkymapRange(nside, 0.05*TeV, 1e4*TeV, 20)\n skymap_range.setIntegrator(integrator)\n skymap_range.setMask(mask)\n \n skymap_range.compute()\n \n return skymap_range\n\ndef integrate_neutrino(cosmicrays, gas, crosssection):\n nside = 256\n integrator = PiZeroIntegrator(cosmicrays, gas, crosssection)\n return integrate_template(integrator, nside)",
"_____no_output_____"
],
[
"neutral_gas_HI = neutralgas.RingModel(neutralgas.RingType.HI)\nproton = cosmicrays.Dragon2D(Proton)",
"_____no_output_____"
],
[
"skymap_range_neutrino_HI_kamae06 = integrate_neutrino(proton, neutral_gas_HI, kamae06)\nskymap_range_neutrino_HI_kelahar = integrate_neutrino(proton, neutral_gas_HI, kelahar)",
"_____no_output_____"
],
[
"#use_units = skymap_range_HI[0].getUnits() # default units for GammaSkymap (GeV^-1 m^-2 s^-1 sr^-1)\nuse_units = \"GeV^-1 cm^-2 s^-1 sr^-1\" # override default\nskymap_units = u.Quantity(1, use_units)\nbase_units = skymap_units.unit.si.scale",
"_____no_output_____"
],
[
"def calc_mean_flux(skymap_range):\n energies = np.array([float(s.getEnergy()/GeV) for s in skymap_range])\n fluxes = np.array([s.getMean() for s in skymap_range]) / base_units\n return energies, fluxes",
"_____no_output_____"
],
[
"def plot_spectrum(skymap_range, label, style):\n energies, fluxes = calc_mean_flux(skymap_range)\n plt.plot(energies, fluxes*energies**2, style, label=label)\n \ndef plot_total_spectrum(list_of_skymap_range, label, style):\n fluxes = QDifferentialIntensity(0)\n for skymap_range in list_of_skymap_range:\n energies, fluxes_i = calc_mean_flux(skymap_range)\n fluxes = fluxes + fluxes_i\n plt.plot(energies, fluxes*energies**2, style, label=label)\n\nfig, ax = plt.subplots()\n\nplot_spectrum(skymap_range_neutrino_HI_kamae06, r'$\\nu $ @ p + HI (Kamae06)', '-')\nplot_spectrum(skymap_range_neutrino_HI_kelahar, r'$\\nu $ @ p + HI (K&A)', '--')\n\nplt.title(\"Neutrinos from diffuse emission (Fornieri20, Remy18)\\n $|b| < 5^\\degree$, $0^\\degree \\leq l \\leq 180^\\degree$\")\nplt.legend(loc=\"lower left\")\n\nplt.xlabel(r\"$E_\\nu$ / GeV\")\nplt.ylabel(r\"$E_\\nu\\, \\mathrm{d}\\Phi_\\gamma / \\mathrm{d} E_\\gamma$ / \" + (skymap_units*u.GeV**2).unit.to_string(format='latex_inline'))\n\nax.tick_params(which='minor', direction='in', axis='both', bottom=True, top=True, left=True, right=True, length=3)\nax.tick_params(which='major', direction='in', axis='both', bottom=True, top=True, left=True, right=True, length=5)\nplt.xscale(\"log\")\nplt.yscale(\"log\")\n\nplt.ylim(10**(-9), 10**(-6))\nplt.xlim(10**(2), 10**(6))\n\n#plt.savefig(\"img/neutrinos-from-diffuse-emission-spectrum-180.pdf\", dpi=150)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52991b6bc1f1477404b40da647042813e48f724
| 32,100 |
ipynb
|
Jupyter Notebook
|
downloads/wisconsin/wisconsin_cmmt.ipynb
|
DamienIrving/climate-analysis
|
4b9911101f85bde30ac6e7764119e6410d7f633d
|
[
"MIT"
] | 37 |
2016-04-24T09:05:06.000Z
|
2021-11-08T08:47:40.000Z
|
downloads/wisconsin/wisconsin_cmmt.ipynb
|
YonSci/climate-analysis
|
4b9911101f85bde30ac6e7764119e6410d7f633d
|
[
"MIT"
] | 35 |
2015-02-18T23:42:34.000Z
|
2017-03-12T01:33:48.000Z
|
downloads/wisconsin/wisconsin_cmmt.ipynb
|
YonSci/climate-analysis
|
4b9911101f85bde30ac6e7764119e6410d7f633d
|
[
"MIT"
] | 16 |
2015-02-18T23:38:01.000Z
|
2022-02-17T08:39:29.000Z
| 40.996169 | 252 | 0.428162 |
[
[
[
"## Date list",
"_____no_output_____"
],
[
"#### Find start and end dates",
"_____no_output_____"
]
],
[
[
"import pandas",
"_____no_output_____"
],
[
"dates_df = pandas.read_csv('enderby_CMMTdatetimes.csv', header=1)\nprint dates_df",
" Start YYYY-MM-DD Start Time UTC End YYYY-MM-DD End Time UTC Unnamed: 4\n0 1994-07-06 03:00 1994-07-09 00:00 NaN\n1 1994-11-04 03:00 1994-11-05 15:00 skirt\n2 1995-04-19 18:00 1995-04-21 03:00 skirt\n3 1996-09-18 18:00 1996-09-20 21:00 skirt\n4 1996-10-07 06:00 1996-10-08 21:00 skirt\n5 1996-10-27 00:00 1996-10-28 06:00 skirt\n6 1997-03-04 15:00 1997-03-06 12:00 skirt\n7 1997-04-26 12:00 1997-04-27 15:00 skirt\n8 1997-06-17 18:00 1997-06-18 21:00 skirt\n9 1998-05-13 21:00 1998-05-15 06:00 skirt\n10 1998-07-20 00:00 1998-07-22 00:00 skirt\n11 1998-07-25 18:00 1998-07-28 03:00 skirt\n12 1999-01-07 03:00 1999-01-10 21:00 NaN\n13 1999-02-04 18:00 1999-02-07 21:00 skirt\n14 1999-02-15 12:00 1999-02-16 21:00 skirt\n15 1999-03-04 18:00 1999-03-06 18:00 skirt\n16 1999-04-12 18:00 1999-04-17 21:00 NaN\n17 1999-05-10 18:00 1999-05-12 00:00 skirt\n18 1999-06-10 21:00 1999-06-13 15:00 NaN\n19 1999-06-24 21:00 1999-06-27 06:00 NaN\n20 1999-07-02 06:00 1999-07-03 21:00 skirt\n21 1999-09-07 18:00 1999-09-09 16:00 skirt\n22 1999-09-11 06:00 1999-09-12 09:00 skirt\n23 1999-10-05 03:00 1999-10-06 00:00 skirt\n24 1999-10-26 03:00 1999-10-27 06:00 skirt\n25 1999-11-04 18:00 1999-11-07 06:00 skirt\n26 1999-12-20 12:00 1999-12-22 09:00 skirt\n27 2000-01-09 06:00 2000-01-11 18:00 NaN\n28 2000-05-08 21:00 2000-05-09 21:00 skirt\n29 2000-05-11 12:00 2000-05-16 03:00 NaN\n.. ... ... ... ... ...\n117 2009-10-09 19:00 2009-10-13 12:00 NaN\n118 2009-10-15 09:00 2009-10-16 10:00 skirt\n119 2009-10-18 21:00 2009-10-20 12:00 skirt\n120 2009-10-23 07:00 2009-10-27 12:00 NaN\n121 2009-11-20 07:00 2009-11-23 10:00 NaN\n122 2009-12-22 15:00 2009-12-24 18:00 NaN\n123 2009-12-29 09:00 2010-01-01 01:00 NaN\n124 2010-01-15 08:00 2010-01-18 00:00 NaN\n125 2010-04-13 01:00 2010-04-15 03:00 NaN\n126 2010-10-11 19:00 2010-10-13 10:00 skirt\n127 2010-10-16 06:00 2010-10-18 16:00 NaN\n128 2010-10-28 01:00 2010-10-29 23:00 skirt\n129 2010-11-04 01:00 2010-11-09 00:00 NaN\n130 2010-11-13 10:00 2010-11-17 18:00 NaN\n131 2010-12-08 20:00 2010-12-11 10:00 NaN\n132 2011-01-02 19:00 2011-01-07 02:00 NaN\n133 2011-01-15 15:00 2011-01-19 00:00 NaN\n134 2011-02-15 21:00 2011-02-17 00:00 skirt\n135 2011-03-14 15:00 2011-03-16 04:00 skirt\n136 2011-04-02 12:00 2011-04-04 14:00 NaN\n137 2011-04-05 09:00 2011-04-07 00:00 NaN\n138 2011-04-29 01:00 2011-04-30 08:00 skirt\n139 2011-05-24 21:00 2011-05-26 10:00 skirt\n140 2011-06-20 04:00 2011-06-21 11:00 skirt\n141 2011-06-24 17:00 2011-06-26 07:00 skirt\n142 2012-04-16 18:00 2012-04-17 17:00 skirt\n143 2012-06-11 23:00 2012-06-15 22:00 NaN\n144 2012-08-03 05:00 2012-08-04 10:00 skirt\n145 2012-08-24 00:00 2012-08-27 11:00 NaN\n146 2012-10-28 13:00 2012-10-30 20:00 skirt\n\n[147 rows x 5 columns]\n"
],
[
"for index, row in dates_df[['Start YYYY-MM-DD', 'End YYYY-MM-DD']].iterrows():\n start, end = row\n print start, end",
"1994-07-06 1994-07-09\n1994-11-04 1994-11-05\n1995-04-19 1995-04-21\n1996-09-18 1996-09-20\n1996-10-07 1996-10-08\n1996-10-27 1996-10-28\n1997-03-04 1997-03-06\n1997-04-26 1997-04-27\n1997-06-17 1997-06-18\n1998-05-13 1998-05-15\n1998-07-20 1998-07-22\n1998-07-25 1998-07-28\n1999-01-07 1999-01-10\n1999-02-04 1999-02-07\n1999-02-15 1999-02-16\n1999-03-04 1999-03-06\n1999-04-12 1999-04-17\n1999-05-10 1999-05-12\n1999-06-10 1999-06-13\n1999-06-24 1999-06-27\n1999-07-02 1999-07-03\n1999-09-07 1999-09-09\n1999-09-11 1999-09-12\n1999-10-05 1999-10-06\n1999-10-26 1999-10-27\n1999-11-04 1999-11-07\n1999-12-20 1999-12-22\n2000-01-09 2000-01-11\n2000-05-08 2000-05-09\n2000-05-11 2000-05-16\n2000-06-11 2000-06-16\n2000-07-10 2000-07-11\n2000-08-11 2000-08-12\n2000-11-07 2000-11-09\n2000-12-26 2000-12-29\n2001-01-12 2001-01-13\n2001-02-13 2001-02-15\n2001-03-18 2001-03-19\n2001-05-06 2001-05-08\n2001-06-11 2001-06-13\n2001-06-18 2001-06-19\n2001-06-26 2001-06-27\n2001-07-10 2001-07-12\n2001-07-20 2001-07-22\n2001-08-08 2001-08-09\n2001-10-05 2001-10-08\n2001-10-27 2001-10-28\n2002-01-22 2002-01-24\n2002-02-16 2002-02-20\n2002-03-09 2002-03-12\n2002-04-09 2002-04-11\n2002-04-20 2002-04-22\n2002-05-02 2002-05-04\n2002-07-01 2002-07-02\n2002-07-15 2002-07-16\n2002-10-05 2002-10-09\n2002-12-17 2002-12-20\n2003-01-21 2003-01-24\n2003-01-27 2003-01-29\n2003-04-10 2003-04-13\n2003-06-13 2003-06-15\n2003-07-18 2003-07-20\n2003-10-17 2003-10-19\n2003-10-31 2003-11-02\n2003-11-10 2003-11-11\n2003-11-17 2003-11-19\n2003-12-28 2003-12-29\n2004-01-01 2004-01-03\n2004-01-16 2004-01-19\n2004-02-02 2004-02-04\n2004-02-15 2004-02-20\n2004-04-07 2004-04-08\n2004-04-10 2004-04-12\n2004-08-15 2004-08-17\n2004-10-08 2004-10-11\n2004-10-22 2004-10-23\n2004-11-19 2004-11-21\n2004-11-25 2004-11-26\n2004-11-30 2004-12-02\n2005-02-06 2005-02-08\n2005-02-16 2005-02-18\n2005-02-24 2005-02-27\n2005-03-10 2005-03-12\n2005-04-24 2005-04-26\n2005-05-13 2005-05-15\n2005-08-03 2005-08-06\n2005-10-22 2005-10-27\n2005-11-27 2005-11-29\n2005-12-07 2005-12-08\n2006-01-15 2006-01-18\n2006-02-18 2006-02-20\n2006-08-08 2006-08-09\n2006-10-08 2006-10-09\n2007-01-13 2007-01-15\n2007-01-21 2007-01-24\n2007-04-15 2007-04-17\n2007-06-19 2007-06-21\n2007-10-01 2007-10-03\n2007-11-14 2007-11-18\n2007-12-06 2007-12-07\n2008-04-06 2008-04-08\n2008-07-25 2008-07-26\n2008-12-04 2008-12-05\n2009-01-01 2009-01-02\n2009-01-10 2009-01-13\n2009-01-27 2009-01-31\n2009-02-02 2009-02-04\n2009-02-19 2009-02-21\n2009-02-22 2009-02-24\n2009-03-13 2009-03-16\n2009-03-16 2009-03-17\n2009-05-17 2009-05-18\n2009-06-09 2009-06-12\n2009-06-24 2009-06-27\n2009-07-02 2009-07-04\n2009-09-08 2009-09-12\n2009-10-06 2009-10-07\n2009-10-09 2009-10-13\n2009-10-15 2009-10-16\n2009-10-18 2009-10-20\n2009-10-23 2009-10-27\n2009-11-20 2009-11-23\n2009-12-22 2009-12-24\n2009-12-29 2010-01-01\n2010-01-15 2010-01-18\n2010-04-13 2010-04-15\n2010-10-11 2010-10-13\n2010-10-16 2010-10-18\n2010-10-28 2010-10-29\n2010-11-04 2010-11-09\n2010-11-13 2010-11-17\n2010-12-08 2010-12-11\n2011-01-02 2011-01-07\n2011-01-15 2011-01-19\n2011-02-15 2011-02-17\n2011-03-14 2011-03-16\n2011-04-02 2011-04-04\n2011-04-05 2011-04-07\n2011-04-29 2011-04-30\n2011-05-24 2011-05-26\n2011-06-20 2011-06-21\n2011-06-24 2011-06-26\n2012-04-16 2012-04-17\n2012-06-11 2012-06-15\n2012-08-03 2012-08-04\n2012-08-24 2012-08-27\n2012-10-28 2012-10-30\n"
]
],
[
[
"#### Create a list of dates spanning a time period",
"_____no_output_____"
]
],
[
[
"import dateutil",
"_____no_output_____"
],
[
"from dateutil import rrule, parser\n\ndate1 = '2011-05-03'\ndate2 = '2011-05-10'\n\ndates = list(rrule.rrule(rrule.DAILY,\n dtstart=parser.parse(date1),\n until=parser.parse(date2)))\n\ndate_list = map(lambda x: x.strftime('%Y-%m-%d'), dates)\n\nprint date_list",
"['2011-05-03', '2011-05-04', '2011-05-05', '2011-05-06', '2011-05-07', '2011-05-08', '2011-05-09', '2011-05-10']\n"
],
[
"test = ['2003-12-31', '2005-06-07']\ntest2 = ['1996-12-31', '1985-06-07']\n\ntest.extend(test2)\n\nprint test",
"['2003-12-31', '2005-06-07', '1996-12-31', '1985-06-07']\n"
]
],
[
[
"Then `gio.write_dates(outfile, date_list)`",
"_____no_output_____"
],
[
"## Pre-processing",
"_____no_output_____"
]
],
[
[
"infile = '/g/data/ub4/erai/netcdf/6hr/atmos/oper_an_pl/v01/ua/ua_6hrs_ERAI_historical_an-pl_20000401_20000430.nc'",
"_____no_output_____"
],
[
"import iris\nimport iris.coord_categorisation",
"_____no_output_____"
],
[
"level_constraint = iris.Constraint(air_pressure=50000)",
"_____no_output_____"
],
[
"cube = iris.load_cube(infile, level_constraint)",
"_____no_output_____"
],
[
"print cube",
"eastward_wind / (m s**-1) (time: 120; latitude: 241; longitude: 480)\n Dimension coordinates:\n time x - -\n latitude - x -\n longitude - - x\n Scalar coordinates:\n air_pressure: 50000.0 Pa\n Attributes:\n CDI: Climate Data Interface version 1.6.4 (http://code.zmaw.de/projects/cdi...\n CDO: Climate Data Operators version 1.6.4 (http://code.zmaw.de/projects/cdo...\n Conventions: CF-1.4\n MD5: 3ec3b5f919729c5c7ddc59ff5146603b\n NCO: 4.3.8\n code: 131\n history: Tue Jan 20 21:26:31 2015: ncks -O --md5_wrt_att -v ua U_2000_04.nc -o ua_6hrs_ERAI_historical_an-pl_20000401_20000430.nc\nTue...\n institution: ARCCSS ARC Centre of Excellence for Climate System Science www.climate...\n references: Please acknowledge both ECMWF for original files and the ARCCSS for conversion...\n source: Original grib files obtained from http://apps.ecmwf.int/datasets/data/interim_full_daily/...\n table: 128\n title: ERA-Interim U velocity [m s**-1] analysis on pressure (global 0.75X0.75...\n"
],
[
"cube.coord('longitude')",
"_____no_output_____"
],
[
"cube.coord('latitude')",
"_____no_output_____"
]
],
[
[
"Will need to flip the latitude axis and make longitude 0 - 360 offline, as I can't figure out how to do it with iris. The command would be:\n```\n$ cdo invertlat -sellonlatbox 0,359.9,-90,90\n```",
"_____no_output_____"
]
],
[
[
"cube.coord('time')",
"_____no_output_____"
],
[
"iris.coord_categorisation.add_day_of_year(cube, 'time')\niris.coord_categorisation.add_year(cube, 'time')\ncube = cube.aggregated_by(['day_of_year', 'year'], iris.analysis.MEAN)\ncube.remove_coord('day_of_year')\ncube.remove_coord('year')",
"_____no_output_____"
],
[
"print(cube)",
"eastward_wind / (m s**-1) (time: 30; latitude: 241; longitude: 480)\n Dimension coordinates:\n time x - -\n latitude - x -\n longitude - - x\n Scalar coordinates:\n air_pressure: 50000.0 Pa\n Attributes:\n CDI: Climate Data Interface version 1.6.4 (http://code.zmaw.de/projects/cdi...\n CDO: Climate Data Operators version 1.6.4 (http://code.zmaw.de/projects/cdo...\n Conventions: CF-1.4\n MD5: 3ec3b5f919729c5c7ddc59ff5146603b\n NCO: 4.3.8\n code: 131\n history: Tue Jan 20 21:26:31 2015: ncks -O --md5_wrt_att -v ua U_2000_04.nc -o ua_6hrs_ERAI_historical_an-pl_20000401_20000430.nc\nTue...\n institution: ARCCSS ARC Centre of Excellence for Climate System Science www.climate...\n references: Please acknowledge both ECMWF for original files and the ARCCSS for conversion...\n source: Original grib files obtained from http://apps.ecmwf.int/datasets/data/interim_full_daily/...\n table: 128\n title: ERA-Interim U velocity [m s**-1] analysis on pressure (global 0.75X0.75...\n Cell methods:\n mean: day_of_year, year\n"
],
[
"cube.coord('time')",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
c5299c9baf91580dae23421ad0cfade59dfe92a2
| 5,306 |
ipynb
|
Jupyter Notebook
|
chapter2/homework/computer/3-29/201611580487-3.29.ipynb
|
hpishacker/python_tutorial
|
9005f0db9dae10bdc1d1c3e9e5cf2268036cd5bd
|
[
"MIT"
] | 76 |
2017-09-26T01:07:26.000Z
|
2021-02-23T03:06:25.000Z
|
chapter2/homework/computer/3-29/201611580487-3.29.ipynb
|
hpishacker/python_tutorial
|
9005f0db9dae10bdc1d1c3e9e5cf2268036cd5bd
|
[
"MIT"
] | 5 |
2017-12-10T08:40:11.000Z
|
2020-01-10T03:39:21.000Z
|
chapter2/homework/computer/3-29/201611580487-3.29.ipynb
|
hacker-14/python_tutorial
|
4a110b12aaab1313ded253f5207ff263d85e1b56
|
[
"MIT"
] | 112 |
2017-09-26T01:07:30.000Z
|
2021-11-25T19:46:51.000Z
| 23.900901 | 103 | 0.437618 |
[
[
[
"def fact (end):\n i=1\n j=1\n while i<end :\n i=i+1 \n j=i*j\n return j\nn = int(input('请输入第1个整数,以回车结束。'))\nm = int(input('请输入第2个整数,以回车结束。'))\nk = int(input('请输入第3个整数,以回车结束。'))\n\nprint('最终的和是:', fact(m) + fact(n) + fact(k))",
"请输入第1个整数,以回车结束。2\n请输入第2个整数,以回车结束。2\n请输入第3个整数,以回车结束。2\n最终的和是: 6\n"
],
[
"def fun (end):\n i=1\n j=1\n m=1\n p=0\n sum=0\n while p<end:\n m=j/i\n i=i+2\n j=-1*j\n sum=sum+m\n p=p+1\n return sum\nn=int(input(\"请输入一个整数\"))\nprint(\"最终的和为:\",4*fun(n))\nn=1000\nprint(\"最终的和为:\",4*fun(n))\nn=100000\nprint(\"最终的和为:\",4*fun(n))",
"请输入一个整数2\n最终的和为: 2.666666666666667\n最终的和为: 3.140592653839794\n最终的和为: 3.1415826535897198\n"
],
[
"name=input (\"请输入你的名字\\n\")\nm=int(input(\"请输入你出生月份\\n\"))\nd=int(input(\"请输入你的出生日期\\n\"))\nconstell(name,m,d)\ndef constell (name,m,d):\n if m==3 and d>20 or (m==4 and d<20):\n print(name,\"你是白羊座\")\n elif m==4 and d>19 or (m==5 and d<21):\n print(name,\"你是金牛座\")\n elif m==5 and d>20 or (m==6 and d<22):\n print(name,\"你是双子座\")\n elif m==6 and d>21 or (m==7 and d<23):\n print(name,\"你是巨蟹座\")\n elif m==7 and d>22 or (m==8 and d<23):\n print(name,\"你是狮子座\")\n elif m==8 and d>22 or (m==9 and d<23):\n print(name,\"你是处女座\")\n elif m==9 and d>22 or(m==10 and d<24):\n print(name,\"你是天秤座\")\n elif m==10 and d>23 or (m==11 and d<23):\n print(name,\"你是天蝎座\")\n elif m==11 and d>22 or (m==12 and d<22):\n print(name,\"你是射手座\")\n elif m==12 and d>21 or (m==1 and d<18):\n print(name,\"你是摩羯座\")\n elif m==1 and d>19 or (m==2 and d<19):\n print(name,\"你是水瓶座\")\n elif m==2 and d>18 or(m==3 and d<21):\n print (name,\"你是双鱼座\")\n elif m>12 or m<1 or d>31 or d <1 :\n print(\"非人类\")\n \n",
"请输入你的名字\npp\n请输入你出生月份\n13\n请输入你的出生日期\n1\n"
],
[
"def plrule (word):\n if word.endswith('s') or word.endswith('x') or word.endswith('sh') or word.endswith('ch'):\n word+='es'\n print(word)\n elif word.endswith('y'):\n print(\"You can change the last letter into i plus es\")\n print(word,\"->\",word,\"ies\",sep='')\n else:\n word+='s'\n print(word)\nw=input(\"Please enter a countable noun\\n\")\nplrule(w)",
"Please enter a countable noun\nbus\nbuses\n"
],
[
"def total (m,n,k):\n sum=0\n while(m<=n):\n sum=sum+m \n m=m+k\n \n return sum\n\na=int(input(\"plz enter an positive interger\\n\"))\nb=int(input(\"plz enter an positive interger and bigger than a\\n\"))\nc=int(input(\"plz enter an positive interger\\n\"))\nprint(\"The sum is\",total (a,b,c))",
"plz enter an positive interger\n8\nplz enter an positive interger and bigger than a\n5\nplz enter an positive interger\n2\nThe sum is 0\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
]
] |
c529bf416410f2dd3aa79b1ee87f2fcdae06e5f9
| 38,843 |
ipynb
|
Jupyter Notebook
|
02_machine_learning_regression/week_1/quiz.ipynb
|
juanhenao21/machine_learning
|
a0611d3edc6833ee40b1fd0160543276ec5b627e
|
[
"MIT"
] | null | null | null |
02_machine_learning_regression/week_1/quiz.ipynb
|
juanhenao21/machine_learning
|
a0611d3edc6833ee40b1fd0160543276ec5b627e
|
[
"MIT"
] | null | null | null |
02_machine_learning_regression/week_1/quiz.ipynb
|
juanhenao21/machine_learning
|
a0611d3edc6833ee40b1fd0160543276ec5b627e
|
[
"MIT"
] | null | null | null | 46.131829 | 321 | 0.563679 |
[
[
[
"# Regression Week 1: Simple Linear Regression",
"_____no_output_____"
],
[
"In this notebook we will use data on house sales in King County to predict house prices using simple (one input) linear regression. You will:\n* Use Turi Create SArray and SFrame functions to compute important summary statistics\n* Write a function to compute the Simple Linear Regression weights using the closed form solution\n* Write a function to make predictions of the output given the input feature\n* Turn the regression around to predict the input given the output\n* Compare two different models for predicting house prices\n\nIn this notebook you will be provided with some already complete code as well as some code that you should complete yourself in order to answer quiz questions. The code we provide to complte is optional and is there to assist you with solving the problems but feel free to ignore the helper code and write your own.",
"_____no_output_____"
],
[
"# Fire up Turi Create",
"_____no_output_____"
]
],
[
[
"import turicreate as tc",
"_____no_output_____"
]
],
[
[
"# Load house sales data\n\nDataset is from house sales in King County, the region where the city of Seattle, WA is located.",
"_____no_output_____"
]
],
[
[
"sales = tc.SFrame('home_data.sframe/')",
"_____no_output_____"
],
[
"sales.head(5)",
"_____no_output_____"
]
],
[
[
"# Split data into training and testing",
"_____no_output_____"
],
[
"We use seed=0 so that everyone running this notebook gets the same results. In practice, you may set a random seed (or let Turi Create pick a random seed for you). ",
"_____no_output_____"
]
],
[
[
"train_data,test_data = sales.random_split(0.8, seed=0)",
"_____no_output_____"
]
],
[
[
"# Useful SFrame summary functions",
"_____no_output_____"
],
[
"In order to make use of the closed form solution as well as take advantage of turi create's built in functions we will review some important ones. In particular:\n* Computing the sum of an SArray\n* Computing the arithmetic average (mean) of an SArray\n* multiplying SArrays by constants\n* multiplying SArrays by other SArrays",
"_____no_output_____"
]
],
[
[
"# Let's compute the mean of the House Prices in King County in 2 different ways.\nprices = sales['price'] # extract the price column of the sales SFrame -- this is now an SArray\n\n# recall that the arithmetic average (the mean) is the sum of the prices divided by the total number of houses:\nsum_prices = prices.sum()\nnum_houses = len(prices) # when prices is an SArray len() returns its length\navg_price_1 = sum_prices/num_houses\navg_price_2 = prices.mean() # if you just want the average, the .mean() function\nprint(f'average price via method 1: {avg_price_1:.2f}')\nprint(f'average price via method 2: {avg_price_2:.2f}')",
"average price via method 1: 540088.14\naverage price via method 2: 540088.14\n"
]
],
[
[
"As we see we get the same answer both ways",
"_____no_output_____"
]
],
[
[
"# if we want to multiply every price by 0.5 it's a simple as:\nhalf_prices = 0.5 * prices\n# Let's compute the sum of squares of price. We can multiply two SArrays of the same length elementwise also with *\nprices_squared = prices * prices\nsum_prices_squared = prices_squared.sum() # price_squared is an SArray of the squares and we want to add them up.\nprint(f'the sum of price squared is: {sum_prices_squared}')",
"the sum of price squared is: 9217325133550736.0\n"
]
],
[
[
"Aside: The python notation x.xxe+yy means x.xx \\* 10^(yy). e.g 100 = 10^2 = 1*10^2 = 1e2 ",
"_____no_output_____"
],
[
"# Build a generic simple linear regression function ",
"_____no_output_____"
],
[
"Armed with these SArray functions we can use the closed form solution found from lecture to compute the slope and intercept for a simple linear regression on observations stored as SArrays: input_feature, output.\n\nComplete the following function (or write your own) to compute the simple linear regression slope and intercept:",
"_____no_output_____"
]
],
[
[
"def simple_linear_regression(input_feature, output):\n # compute the sum of input_feature and output\n sum_input = input_feature.sum()\n sum_output = output.sum()\n \n # compute the product of the output and the input_feature and its sum\n prod_input_output = input_feature * output\n sum_prod_input_output = prod_input_output.sum()\n \n # compute the squared value of the input_feature and its sum\n squared_input_feature = input_feature * input_feature\n sum_squared_input_feature = squared_input_feature.sum()\n \n # use the formula for the slope\n slope = (sum_prod_input_output - (sum_input * sum_output) / len(output)) \\\n / (sum_squared_input_feature - (sum_input * sum_input) / len(output))\n \n # use the formula for the intercept\n intercept = sum_output / len(output) - slope * sum_input / len(output)\n \n return (intercept, slope)",
"_____no_output_____"
]
],
[
[
"We can test that our function works by passing it something where we know the answer. In particular we can generate a feature and then put the output exactly on a line: output = 1 + 1\\*input_feature then we know both our slope and intercept should be 1",
"_____no_output_____"
]
],
[
[
"test_feature = tc.SArray(range(5))\ntest_output = tc.SArray(1 + 1 * test_feature)\ntest_intercept, test_slope = simple_linear_regression(test_feature, test_output)\nprint(f'Intercept: {test_intercept}')\nprint(f'Slope: {test_slope}')",
"Intercept: 1.0\nSlope: 1.0\n"
]
],
[
[
"Now that we know it works let's build a regression model for predicting price based on sqft_living. Rembember that we train on train_data!",
"_____no_output_____"
]
],
[
[
"sqft_intercept, sqft_slope = simple_linear_regression(train_data['sqft_living'], train_data['price'])\n\nprint(f'Intercept: {sqft_intercept}')\nprint(f'Slope: {sqft_slope}')",
"Intercept: -47116.07657494\nSlope: 281.9588385676974\n"
]
],
[
[
"# Predicting Values",
"_____no_output_____"
],
[
"Now that we have the model parameters: intercept & slope we can make predictions. Using SArrays it's easy to multiply an SArray by a constant and add a constant value. Complete the following function to return the predicted output given the input_feature, slope and intercept:",
"_____no_output_____"
]
],
[
[
"def get_regression_predictions(input_feature, intercept, slope):\n # calculate the predicted values:\n predicted_values = slope * input_feature + intercept\n \n return predicted_values",
"_____no_output_____"
]
],
[
[
"Now that we can calculate a prediction given the slope and intercept let's make a prediction. Use (or alter) the following to find out the estimated price for a house with 2650 squarefeet according to the squarefeet model we estiamted above.\n\n**Quiz Question: Using your Slope and Intercept from (4), What is the predicted price for a house with 2650 sqft?**",
"_____no_output_____"
]
],
[
[
"my_house_sqft = 2650\nestimated_price = get_regression_predictions(my_house_sqft, sqft_intercept, sqft_slope)\nprint(f'The estimated price for a house with {my_house_sqft} squarefeet is ${estimated_price:.2f}')",
"The estimated price for a house with 2650 squarefeet is $700074.85\n"
]
],
[
[
"# Residual Sum of Squares",
"_____no_output_____"
],
[
"Now that we have a model and can make predictions let's evaluate our model using Residual Sum of Squares (RSS). Recall that RSS is the sum of the squares of the residuals and the residuals is just a fancy word for the difference between the predicted output and the true output. \n\nComplete the following (or write your own) function to compute the RSS of a simple linear regression model given the input_feature, output, intercept and slope:",
"_____no_output_____"
]
],
[
[
"def get_residual_sum_of_squares(input_feature, output, intercept, slope):\n # First get the predictions\n pred_output = input_feature * slope + intercept\n # then compute the residuals (since we are squaring it doesn't matter which order you subtract)\n residuals = output - pred_output\n\n # square the residuals and add them up\n square_residuals = residuals * residuals\n RSS = square_residuals.sum()\n\n return(RSS)",
"_____no_output_____"
]
],
[
[
"Let's test our get_residual_sum_of_squares function by applying it to the test model where the data lie exactly on a line. Since they lie exactly on a line the residual sum of squares should be zero!",
"_____no_output_____"
]
],
[
[
"print(get_residual_sum_of_squares(test_feature, test_output, test_intercept, test_slope)) # should be 0.0",
"0.0\n"
]
],
[
[
"Now use your function to calculate the RSS on training data from the squarefeet model calculated above.\n\n**Quiz Question: According to this function and the slope and intercept from the squarefeet model What is the RSS for the simple linear regression using squarefeet to predict prices on TRAINING data?**",
"_____no_output_____"
]
],
[
[
"rss_prices_on_sqft = get_residual_sum_of_squares(train_data['sqft_living'], train_data['price'],\n sqft_intercept, sqft_slope)\nprint(f'The RSS of predicting Prices based on Square Feet is : {rss_prices_on_sqft:e}')",
"The RSS of predicting Prices based on Square Feet is : 1.201918e+15\n"
]
],
[
[
"# Predict the squarefeet given price",
"_____no_output_____"
],
[
"What if we want to predict the squarefoot given the price? Since we have an equation y = a + b\\*x we can solve the function for x. So that if we have the intercept (a) and the slope (b) and the price (y) we can solve for the estimated squarefeet (x).\n\nComplete the following function to compute the inverse regression estimate, i.e. predict the input_feature given the output.",
"_____no_output_____"
]
],
[
[
"def inverse_regression_predictions(output, intercept, slope):\n # solve output = intercept + slope*input_feature for input_feature. Use this equation to compute the inverse predictions:\n estimated_feature = (output - intercept) / slope\n\n return estimated_feature",
"_____no_output_____"
]
],
[
[
"Now that we have a function to compute the squarefeet given the price from our simple regression model let's see how big we might expect a house that costs $800,000 to be.\n\n**Quiz Question: According to this function and the regression slope and intercept from (3) what is the estimated square-feet for a house costing $800,000?**",
"_____no_output_____"
]
],
[
[
"my_house_price = 800000\nestimated_squarefeet = inverse_regression_predictions(my_house_price, sqft_intercept, sqft_slope)\nprint(f'The estimated squarefeet for a house worth ${my_house_price} is {estimated_squarefeet:.2f}')",
"The estimated squarefeet for a house worth $800000 is 3004.40\n"
]
],
[
[
"# New Model: estimate prices from bedrooms",
"_____no_output_____"
],
[
"We have made one model for predicting house prices using squarefeet, but there are many other features in the sales SFrame. \nUse your simple linear regression function to estimate the regression parameters from predicting Prices based on number of bedrooms. Use the training data!",
"_____no_output_____"
]
],
[
[
"# Estimate the slope and intercept for predicting 'price' based on 'bedrooms'\nsqft_intercept_bed, sqft_slope_bed = simple_linear_regression(train_data['bedrooms'], train_data['price'])\n\nprint(f'Intercept: {sqft_intercept}')\nprint(f'Slope: {sqft_slope}')",
"Intercept: -47116.07657494\nSlope: 281.9588385676974\n"
]
],
[
[
"# Test your Linear Regression Algorithm",
"_____no_output_____"
],
[
"Now we have two models for predicting the price of a house. How do we know which one is better? Calculate the RSS on the TEST data (remember this data wasn't involved in learning the model). Compute the RSS from predicting prices using bedrooms and from predicting prices using squarefeet.\n\n**Quiz Question: Which model (square feet or bedrooms) has lowest RSS on TEST data? Think about why this might be the case.**",
"_____no_output_____"
]
],
[
[
"# Compute RSS when using bedrooms on TEST data:\nrss_prices_on_bed_test = get_residual_sum_of_squares(test_data['bedrooms'], test_data['price'],\n sqft_intercept, sqft_slope)\nprint(f'The RSS of predicting Prices based on Bedrooms is : {rss_prices_on_bed_test:e}')",
"The RSS of predicting Prices based on Bedrooms is : 2.005069e+15\n"
],
[
"# Compute RSS when using squarefeet on TEST data:\nrss_prices_on_sqft_test = get_residual_sum_of_squares(test_data['sqft_living'], test_data['price'],\n sqft_intercept, sqft_slope)\nprint(f'The RSS of predicting Prices based on Square Feet is : {rss_prices_on_sqft_test:e}')",
"The RSS of predicting Prices based on Square Feet is : 2.754029e+14\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
c529ceb7b73ba039f3b99d162b1497865e5c950c
| 22,737 |
ipynb
|
Jupyter Notebook
|
notebooks/S3/test_modulec_v2.ipynb
|
luissian/opentrons_covid19
|
d5b0332954ea0446f311d19eb905c39b665ed72d
|
[
"MIT"
] | null | null | null |
notebooks/S3/test_modulec_v2.ipynb
|
luissian/opentrons_covid19
|
d5b0332954ea0446f311d19eb905c39b665ed72d
|
[
"MIT"
] | null | null | null |
notebooks/S3/test_modulec_v2.ipynb
|
luissian/opentrons_covid19
|
d5b0332954ea0446f311d19eb905c39b665ed72d
|
[
"MIT"
] | null | null | null | 39.405546 | 477 | 0.539473 |
[
[
[
"from opentrons import simulate\nfrom opentrons.drivers.rpi_drivers import gpio\nimport time\nimport math\n\nctx = simulate.get_protocol_api('2.1')\n\n# Metadata\nmetadata = {\n 'protocolName': 'S3 Station C Version 1',\n 'author': 'Nick <[email protected]>, Sara <[email protected]>, Miguel <[email protected]>',\n 'source': 'Custom Protocol Request',\n 'apiLevel': '2.1'\n}\n\n# Parameters to adapt the protocol\nNUM_SAMPLES = 96\nMM_LABWARE = 'opentrons aluminum block'\nPCR_LABWARE = 'opentrons aluminum nest plate'\nELUTION_LABWARE = 'opentrons aluminum biorad plate'\nMMTUBE_LABWARE = '2ml tubes'\nPREPARE_MASTERMIX = True\nMM_TYPE = 'MM1'\nTRANSFER_MASTERMIX = False\nTRANSFER_SAMPLES = False\n\n# Calculated variables\nif MM_TYPE == 'mm3':\n VOLUME_MMIX = 15\nelse:\n VOLUME_MMIX = 20\n\n# Constants\nMM_LW_DICT = {\n 'opentrons plastic block': 'opentrons_24_tuberack_generic_2ml_screwcap',\n 'opentrons aluminum block': 'opentrons_24_aluminumblock_generic_2ml_screwcap',\n 'covidwarriors aluminum block': 'covidwarriors_aluminumblock_24_screwcap_2000ul'\n}\n\nPCR_LW_DICT = {\n 'opentrons aluminum biorad plate': 'opentrons_96_aluminumblock_biorad_wellplate_200ul',\n 'opentrons aluminum nest plate': 'opentrons_96_aluminumblock_nest_wellplate_100ul',\n 'opentrons aluminum strip short': 'opentrons_aluminumblock_96_pcrstrips_100ul',\n 'covidwarriors aluminum biorad plate': 'covidwarriors_aluminumblock_96_bioradwellplate_200ul',\n 'covidwarriors aluminum biorad strip short': 'covidwarriors_aluminumblock_96_bioradwellplate_pcrstrips_100ul'\n}\n\nEL_LW_DICT = {\n # tubes\n 'opentrons plastic 2ml tubes': 'opentrons_24_tuberack_generic_2ml_screwcap',\n 'opentrons plastic 1.5ml tubes': 'opentrons_24_tuberack_nest_1.5ml_screwcap',\n 'opentrons aluminum 2ml tubes': 'opentrons_24_aluminumblock_generic_2ml_screwcap',\n 'opentrons aluminum 1.5ml tubes': 'opentrons_24_aluminumblock_nest_1.5ml_screwcap',\n 'covidwarriors aluminum 2ml tubes': 'covidwarriors_aluminumblock_24_screwcap_2000ul',\n 'covidwarriors aluminum 1.5ml tubes': 'covidwarriors_aluminumblock_24_screwcap_2000ul',\n # PCR plate\n 'opentrons aluminum biorad plate': 'opentrons_96_aluminumblock_biorad_wellplate_200ul',\n 'opentrons aluminum nest plate': 'opentrons_96_aluminumblock_nest_wellplate_100ul',\n 'covidwarriors aluminum biorad plate': 'covidwarriors_aluminumblock_96_bioradwellplate_200ul',\n # Strips\n #'large strips': 'opentrons_96_aluminumblock_generic_pcr_strip_200ul',\n #'short strips': 'opentrons_96_aluminumblock_generic_pcr_strip_200ul',\n 'opentrons aluminum strip alpha': 'opentrons_aluminumblock_96_pcrstripsalpha_200ul',\n 'opentrons aluminum strip short': 'opentrons_aluminumblock_96_pcrstrips_100ul',\n 'covidwarriors aluminum biorad strip alpha': 'covidwarriors_aluminumblock_96_bioradwellplate_pcrstripsalpha_200ul',\n 'covidwarriors aluminum biorad strip short': 'covidwarriors_aluminumblock_96_bioradwellplate_pcrstrips_100ul'\n}\n\nMMTUBE_LW_DICT = {\n # Radius of each possible tube\n '2ml tubes': 4\n}\n\n# Function definitions\ndef check_door():\n return gpio.read_window_switches()\n\ndef confirm_door_is_closed(ctx):\n #Check if door is opened\n if check_door() == False:\n #Set light color to red and pause\n gpio.set_button_light(1,0,0)\n ctx.pause(f\"Please, close the door\")\n time.sleep(3)\n confirm_door_is_closed(ctx)\n else:\n #Set light color to green\n gpio.set_button_light(0,1,0)\n\ndef finish_run():\n #Set light color to blue\n gpio.set_button_light(0,0,1)\n\ndef get_source_dest_coordinates(ELUTION_LABWARE,source_racks, pcr_plate):\n if 'strip' in ELUTION_LABWARE:\n sources = [\n tube\n for i, rack in enumerate(source_racks)\n for col in [\n rack.columns()[c] if i < 2 else rack.columns()[c+1]\n for c in [0, 5, 10]\n ]\n for tube in col\n ][:NUM_SAMPLES]\n dests = pcr_plate.wells()[:NUM_SAMPLES]\n elif 'plate' in ELUTION_LABWARE:\n sources = source_racks.wells()[:NUM_SAMPLES]\n dests = pcr_plate.wells()[:NUM_SAMPLES]\n else:\n sources = [\n tube\n for rack in source_racks for tube in rack.wells()][:NUM_SAMPLES]\n dests = [\n well\n for v_block in range(2)\n for h_block in range(2)\n for col in pcr_plate.columns()[6*v_block:6*(v_block+1)]\n for well in col[4*h_block:4*(h_block+1)]][:NUM_SAMPLES]\n return sources, dests\n\ndef get_mm_hight(volume):\n # depending on the volume in tube, get mm fluid hight\n hight = volume // (3.14 * 3.14 * MMTUBE_LW_DICT[MMTUBE_LABWARE])\n hight -= 10\n if hight < 5:\n return 1\n else:\n return hight\n\ndef homogenize_mm(mm_tube, p300, times=5):\n # homogenize mastermix tube a given number of times\n p300.pick_up_tip()\n volume_hight = get_mm_hight(VOLUME_MMIX)\n #p300.mix(5, 200, mm_tube.bottom(5))\n for i in range(times):\n for j in range(5):\n # depending on the number of samples, start at a different hight and move as it aspires\n aspirate_hight = volume_hight-(3*j)\n if aspirate_hight < 5:\n p300.aspirate(40, mm_tube.bottom(1))\n else:\n p300.aspirate(40, mm_tube.bottom(aspirate_hight))\n # empty pipete\n p300.dispense(200, mm_tube.bottom(volume_hight))\n # clow out before dropping tip\n p300.blow_out(mm_tube.top(-2))\n p300.drop_tip()\n\ndef prepare_mastermix(MM_TYPE, mm_rack, p300, p20):\n # setup mastermix coordinates\n \"\"\" mastermix component maps \"\"\"\n mm1 = {\n tube: vol\n for tube, vol in zip(\n [well for col in mm_rack.columns()[2:5] for well in col][:10],\n [2.85, 12.5, 0.4, 1, 1, 0.25, 0.25, 0.5, 0.25, 1]\n )\n }\n mm2 = {\n tube: vol\n for tube, vol in zip(\n [mm_rack.wells_by_name()[well] for well in ['A3', 'C5', 'D5']],\n [10, 4, 1]\n )\n }\n mm3 = {\n tube: vol\n for tube, vol in zip(\n [mm_rack.wells_by_name()[well] for well in ['A6', 'B6']],\n [13, 2]\n )\n }\n mm_dict = {'MM1': mm1, 'MM2': mm2, 'MM3': mm3}\n\n # create mastermix\n mm_tube = mm_rack.wells()[0]\n for tube, vol in mm_dict[MM_TYPE].items():\n mm_vol = vol*(NUM_SAMPLES+5)\n disp_loc = mm_tube.top(-10)\n pip = p300 if mm_vol > 20 else p20\n pip.pick_up_tip()\n #pip.transfer(mm_vol, tube.bottom(0.5), disp_loc, air_gap=2, touch_tip=True, new_tip='never')\n air_gap_vol = 5\n num_transfers = math.ceil(mm_vol/(200-air_gap_vol))\n for i in range(num_transfers):\n if i == 0:\n transfer_vol = mm_vol % (200-air_gap_vol)\n else:\n transfer_vol = (200-air_gap_vol)\n pip.transfer(transfer_vol, tube.bottom(0.5), disp_loc, air_gap=air_gap_vol, new_tip='never')\n pip.blow_out(disp_loc)\n pip.aspirate(5, mm_tube.top(2))\n pip.drop_tip()\n\n # homogenize mastermix\n homogenize_mm(mm_tube, p300)\n\n return mm_tube\n\ndef transfer_mastermix(mm_tube, dests, VOLUME_MMIX, p300, p20):\n max_trans_per_asp = 8 #230//(VOLUME_MMIX+5)\n split_ind = [ind for ind in range(0, NUM_SAMPLES, max_trans_per_asp)]\n dest_sets = [dests[split_ind[i]:split_ind[i+1]]\n for i in range(len(split_ind)-1)] + [dests[split_ind[-1]:]]\n pip = p300 if VOLUME_MMIX >= 20 else p20\n pip.pick_up_tip()\n # get initial fluid hight to avoid overflowing mm when aspiring\n mm_volume = VOLUME_MMIX * NUM_SAMPLES\n volume_hight = get_mm_hight(mm_volume)\n for set in dest_sets:\n # check hight and if it is low enought, aim for the bottom\n if volume_hight < 5:\n disp_loc = mm_tube.bottom(1)\n else:\n disp_loc = mm_tube.bottom(volume_hight)\n # reclaculate volume hight\n mm_volume -= VOLUME_MMIX * max_trans_per_asp\n volume_hight = get_mm_hight(mm_volume)\n pip.aspirate(4, disp_loc)\n pip.distribute(VOLUME_MMIX, disp_loc, [d.bottom(2) for d in set],\n air_gap=1, disposal_volume=0, new_tip='never')\n pip.blow_out(disp_loc)\n pip.drop_tip()\n\ndef transfer_samples(ELUTION_LABWARE, sources, dests, p20):\n # hight for aspiration has to be different depending if you ar useing tubes or wells\n if 'strip' in ELUTION_LABWARE or 'plate' in ELUTION_LABWARE:\n hight = 1.5\n else:\n hight = 1\n # transfer\n for s, d in zip(sources, dests):\n p20.pick_up_tip()\n p20.transfer(7, s.bottom(hight), d.bottom(2), air_gap=2, new_tip='never')\n #p20.mix(1, 10, d.bottom(2))\n #p20.blow_out(d.top(-2))\n p20.aspirate(1, d.top(-2))\n p20.drop_tip()",
"C:\\Users\\Adm\\.opentrons\\deck_calibration.json not found. Loading defaults\nC:\\Users\\Adm\\.opentrons\\robot_settings.json not found. Loading defaults\n"
],
[
" # define tips\n tips20 = [\n ctx.load_labware('opentrons_96_filtertiprack_20ul', slot)\n for slot in ['6', '9', '8', '7']\n ]\n tips300 = [ctx.load_labware('opentrons_96_filtertiprack_200ul', '3')]\n\n # define pipettes\n p20 = ctx.load_instrument('p20_single_gen2', 'right', tip_racks=tips20)\n p300 = ctx.load_instrument('p300_single_gen2', 'left', tip_racks=tips300)\n\n # tempdeck module\n tempdeck = ctx.load_module('tempdeck', '10')\n #tempdeck.set_temperature(4)\n\n # check mastermix labware type\n if MM_LABWARE not in MM_LW_DICT:\n raise Exception('Invalid MM_LABWARE. Must be one of the \\\nfollowing:\\nopentrons plastic block\\nopentrons aluminum block\\ncovidwarriors aluminum block')\n\n # load mastermix labware\n mm_rack = ctx.load_labware(\n MM_LW_DICT[MM_LABWARE], '11',\n MM_LABWARE)\n\n # check pcr plate\n if PCR_LABWARE not in PCR_LW_DICT:\n raise Exception('Invalid PCR_LABWARE. Must be one of the \\\nfollowing:\\nopentrons aluminum biorad plate\\nopentrons aluminum nest plate\\nopentrons aluminum strip short\\ncovidwarriors aluminum biorad plate\\ncovidwarriors aluminum biorad strip short')\n\n # load pcr plate\n pcr_plate = tempdeck.load_labware(\n PCR_LW_DICT[PCR_LABWARE], 'PCR plate')\n\n # check source (elution) labware type\n if ELUTION_LABWARE not in EL_LW_DICT:\n raise Exception('Invalid ELUTION_LABWARE. Must be one of the \\\nfollowing:\\nopentrons plastic 2ml tubes\\nopentrons plastic 1.5ml tubes\\nopentrons aluminum 2ml tubes\\nopentrons aluminum 1.5ml tubes\\ncovidwarriors aluminum 2ml tubes\\ncovidwarriors aluminum 1.5ml tubes\\nopentrons aluminum biorad plate\\nopentrons aluminum nest plate\\ncovidwarriors aluminum biorad plate\\nopentrons aluminum strip alpha\\nopentrons aluminum strip short\\ncovidwarriors aluminum biorad strip alpha\\ncovidwarriors aluminum biorad strip short')\n\n # load elution labware\n if 'plate' in ELUTION_LABWARE:\n source_racks = ctx.load_labware(\n EL_LW_DICT[ELUTION_LABWARE], '1',\n 'RNA elution labware')\n else:\n source_racks = [\n ctx.load_labware(EL_LW_DICT[ELUTION_LABWARE], slot,\n 'RNA elution labware ' + str(i+1))\n for i, slot in enumerate(['4', '1', '5', '2'])\n ]\n\n # setup sample sources and destinations\n sources, dests = get_source_dest_coordinates(ELUTION_LABWARE, source_racks, pcr_plate)\n",
"_____no_output_____"
],
[
"def prepare_mastermix(MM_TYPE, mm_rack, p300, p20):\n # setup mastermix coordinates\n \"\"\" mastermix component maps \"\"\"\n mm1 = {\n tube: vol\n for tube, vol in zip(\n [well for col in mm_rack.columns()[2:5] for well in col][:10],\n [2.85, 12.5, 0.4, 1, 1, 0.25, 0.25, 0.5, 0.25, 1]\n )\n }\n mm2 = {\n tube: vol\n for tube, vol in zip(\n [mm_rack.wells_by_name()[well] for well in ['A3', 'C5', 'D5']],\n [10, 4, 1]\n )\n }\n mm3 = {\n tube: vol\n for tube, vol in zip(\n [mm_rack.wells_by_name()[well] for well in ['A6', 'B6']],\n [13, 2]\n )\n }\n mm_dict = {'MM1': mm1, 'MM2': mm2, 'MM3': mm3}\n\n # create mastermix\n mm_tube = mm_rack.wells()[0]\n mm_tube_vol = 0\n for tube, vol in mm_dict[MM_TYPE].items():\n mm_vol = vol*(NUM_SAMPLES+5)\n #disp_loc = mm_tube.top(-10)\n disp_loc = mm_tube.bottom(5) if mm_vol < 50 else mm_tube.top(-5)\n pip = p300 if mm_vol > 20 else p20\n #pip.pick_up_tip()\n #pip.transfer(mm_vol, tube.bottom(0.5), disp_loc, air_gap=2, touch_tip=True, new_tip='never')\n air_gap_vol = 5\n num_transfers = math.ceil(mm_vol/(200-air_gap_vol))\n #num_transfers = int(mm_vol//(200-air_gap_vol))\n print(tube)\n print(\"mmvol:\" + str(mm_vol) )\n mm_tube_vol = mm_tube_vol + mm_vol\n print(\"Num transfers:\" + str(num_transfers))\n for i in range(num_transfers):\n if(i == 0 ):\n transfer_vol = mm_vol % (200-air_gap_vol)\n else:\n transfer_vol = 200-air_gap_vol\n print(\"Transfer vol:\" + str(transfer_vol))\n #pip.transfer(transfer_vol, tube.bottom(0.5), disp_loc, air_gap=air_gap_vol, new_tip='never')\n #pip.blow_out(disp_loc)\n #pip.aspirate(5, mm_tube.top(2))\n #pip.drop_tip()\n print(\"Total mm vol:\" + str(mm_tube_vol))\n #p300.pick_up_tip()\n #p300.mix(5, 200, mm_tube.bottom(5))\n for i in range(5):\n for j in range(5):\n disp_loc = -10-(3*i)\n #p300.aspirate(40, mm_tube.top(disp_loc))\n #p300.dispense(200, mm_tube.top(-22))\n #p300.drop_tip()\n\n return mm_tube\n\nmm_tube = prepare_mastermix(MM_TYPE, mm_rack, p300, p20)\n",
"A3 of opentrons aluminum block on 11\nmmvol:287.85\nNum transfers:2\nTransfer vol:92.85000000000002\nTransfer vol:195\nTotal mm vol:287.85\nB3 of opentrons aluminum block on 11\nmmvol:1262.5\nNum transfers:7\nTransfer vol:92.5\nTransfer vol:195\nTransfer vol:195\nTransfer vol:195\nTransfer vol:195\nTransfer vol:195\nTransfer vol:195\nTotal mm vol:1550.35\nC3 of opentrons aluminum block on 11\nmmvol:40.400000000000006\nNum transfers:1\nTransfer vol:40.400000000000006\nTotal mm vol:1590.75\nD3 of opentrons aluminum block on 11\nmmvol:101\nNum transfers:1\nTransfer vol:101\nTotal mm vol:1691.75\nA4 of opentrons aluminum block on 11\nmmvol:101\nNum transfers:1\nTransfer vol:101\nTotal mm vol:1792.75\nB4 of opentrons aluminum block on 11\nmmvol:25.25\nNum transfers:1\nTransfer vol:25.25\nTotal mm vol:1818.0\nC4 of opentrons aluminum block on 11\nmmvol:25.25\nNum transfers:1\nTransfer vol:25.25\nTotal mm vol:1843.25\nD4 of opentrons aluminum block on 11\nmmvol:50.5\nNum transfers:1\nTransfer vol:50.5\nTotal mm vol:1893.75\nA5 of opentrons aluminum block on 11\nmmvol:25.25\nNum transfers:1\nTransfer vol:25.25\nTotal mm vol:1919.0\nB5 of opentrons aluminum block on 11\nmmvol:101\nNum transfers:1\nTransfer vol:101\nTotal mm vol:2020.0\n"
],
[
"def homogenize_mm(mm_tube, p300, times=5):\n # homogenize mastermix tube a given number of times\n #p300.pick_up_tip()\n volume_hight = get_mm_hight(VOLUME_MMIX)\n \n for i in range(times):\n for j in range(5):\n # depending on the number of samples, start at a different hight and move as it aspires\n aspirate_hight = volume_hight-(3*j)\n \n if aspirate_hight < 5:\n print(\"if < 5 aspirate_hight:\" + str(aspirate_hight))\n #p300.aspirate(40, mm_tube.bottom(1))\n else:\n print(\"else aspirate_hight:\" + str(aspirate_hight))\n #p300.aspirate(40, mm_tube.bottom(aspirate_hight))\n # empty pipete\n #p300.dispense(200, mm_tube.bottom(volume_hight))\n # clow out before dropping tip\n #p300.blow_out(mm_tube.top(-2))\n #p300.drop_tip()\n\n\n\nhomogenize_mm(mm_tube, p300)",
"if < 5 aspirate_hight:1\nif < 5 aspirate_hight:-2\nif < 5 aspirate_hight:-5\nif < 5 aspirate_hight:-8\nif < 5 aspirate_hight:-11\nif < 5 aspirate_hight:1\nif < 5 aspirate_hight:-2\nif < 5 aspirate_hight:-5\nif < 5 aspirate_hight:-8\nif < 5 aspirate_hight:-11\nif < 5 aspirate_hight:1\nif < 5 aspirate_hight:-2\nif < 5 aspirate_hight:-5\nif < 5 aspirate_hight:-8\nif < 5 aspirate_hight:-11\nif < 5 aspirate_hight:1\nif < 5 aspirate_hight:-2\nif < 5 aspirate_hight:-5\nif < 5 aspirate_hight:-8\nif < 5 aspirate_hight:-11\nif < 5 aspirate_hight:1\nif < 5 aspirate_hight:-2\nif < 5 aspirate_hight:-5\nif < 5 aspirate_hight:-8\nif < 5 aspirate_hight:-11\n"
],
[
"split_ind = [ind for ind in range(0, 24, 3)]\ndest_sets = [dests[split_ind[i]:split_ind[i+1]]\n for i in range(len(split_ind)-1)] + [dests[split_ind[-1]:]]\n\nprint(range(len(split_ind)-1))\nprint(split_ind[-1])\nprint(split_ind)",
"range(0, 7)\n21\n[0, 3, 6, 9, 12, 15, 18, 21]\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
]
] |
c529dffc4019fcf779afb9f6c1d3d0baf65d1ba6
| 25,532 |
ipynb
|
Jupyter Notebook
|
.ipynb_checkpoints/Usage-checkpoint.ipynb
|
AyrtonB/RED-Electricity-API-Wrapper
|
26b2e8c7fe6c13e979c48c136ef20f52d8bd4733
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/Usage-checkpoint.ipynb
|
AyrtonB/RED-Electricity-API-Wrapper
|
26b2e8c7fe6c13e979c48c136ef20f52d8bd4733
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/Usage-checkpoint.ipynb
|
AyrtonB/RED-Electricity-API-Wrapper
|
26b2e8c7fe6c13e979c48c136ef20f52d8bd4733
|
[
"MIT"
] | null | null | null | 47.992481 | 1,630 | 0.501762 |
[
[
[
"# USing the REData API Wrapper Module\n\n<br>\n\n### Imports",
"_____no_output_____"
]
],
[
[
"from REData import REData",
"_____no_output_____"
]
],
[
[
"<br>\n\n### Querying the API\n\nThere is a standardised API to query any of the data streams and retrieve a dataframe of the results ",
"_____no_output_____"
]
],
[
[
"category = 'balance' \nwidget = 'balance-electrico' \n\nstart_date = '2019-01-01T00:00'\nend_date = '2019-01-12T00:00'\ntime_trunc = 'day'\n\nRED_stream = REData(category, widget)\ndf = RED_stream.query_REData(start_date, end_date, time_trunc)\n\ndf.head()",
"_____no_output_____"
]
],
[
[
"<br>\n\nSometimes you may want to access the raw response so that functionality has been made available as well",
"_____no_output_____"
]
],
[
[
"r = RED_stream.make_request(start_date, end_date, time_trunc)\n\nr",
"_____no_output_____"
]
],
[
[
"<br>\n\nAdditionaly, rather than re-initialising the class each time you want to query a new set of data you could instead simply update the stream info",
"_____no_output_____"
]
],
[
[
"category = 'demanda'\nwidget = 'evolucion'\n\nRED_stream.update_stream(category, widget)\ndf = RED_stream.query_REData(start_date, end_date, time_trunc)\n\ndf.head()",
"_____no_output_____"
]
],
[
[
"<br>\n\n### Error Checking\n\nThe wrapper also exposes any error messages sent by the API",
"_____no_output_____"
]
],
[
[
"start_date = '2018-01-01T00:00'\nend_date = '2019-01-12T00:00'\ntime_trunc = 'day'\n\nRED_stream = REData(category, widget)\ndf = RED_stream.query_REData(start_date, end_date, time_trunc)\n\ndf.head()",
"_____no_output_____"
]
],
[
[
"<br>\n\nAs well as carrying out checks on the input itself",
"_____no_output_____"
]
],
[
[
"category = 'mistyped_category' \nwidget = 'balance-electrico' \n\nstart_date = '2019-01-01T00:00'\nend_date = '2019-01-12T00:00'\ntime_trunc = 'day'\n\nRED_stream = REData(category, widget)\ndf = RED_stream.query_REData(start_date, end_date, time_trunc)\n\ndf.head()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
c529e3d257cc50c94d5074ee55c06db6aa3124d4
| 217,976 |
ipynb
|
Jupyter Notebook
|
Code/.ipynb_checkpoints/TwitterScraper-checkpoint.ipynb
|
SamyPal/Project4-UnsupervisedLearning-NLP
|
b48507514cf87cd6bec0da455d8b33e2fd45f6c3
|
[
"FTL"
] | 1 |
2020-03-07T23:13:24.000Z
|
2020-03-07T23:13:24.000Z
|
Code/.ipynb_checkpoints/TwitterScraper-checkpoint.ipynb
|
SamyPal/Project4-UnsupervisedLearning-NLP
|
b48507514cf87cd6bec0da455d8b33e2fd45f6c3
|
[
"FTL"
] | null | null | null |
Code/.ipynb_checkpoints/TwitterScraper-checkpoint.ipynb
|
SamyPal/Project4-UnsupervisedLearning-NLP
|
b48507514cf87cd6bec0da455d8b33e2fd45f6c3
|
[
"FTL"
] | null | null | null | 69.132889 | 357 | 0.562686 |
[
[
[
"import GetOldTweets3 as got\nimport lxml\nimport pyquery\nimport requests\nimport sys\nimport time\nimport pandas as pd\nimport spacy\nimport datetime as dt\n\n\nfrom newsplease.config import CrawlerConfig\nfrom newsplease.config import JsonConfig\nfrom newsplease.helper import Helper\nfrom newspaper import Article\n\nimport nltk\nimport warnings\nwarnings.filterwarnings('ignore')\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n# nltk.download('vader_lexicon')\n\nsia = SentimentIntensityAnalyzer()",
"_____no_output_____"
],
[
"tweetCriteria = got.manager.TweetCriteria().setUsername('barackobama').setMaxTweets(1)\ntweet = got.manager.TweetManager.getTweets(tweetCriteria)[0]\n \nprint (tweet.text)",
"After a lifetime of reaching for the stars, today, Katherine Johnson landed among them. She spent decades as a hidden figure, breaking barriers behind the scenes. But by the end of her life, she had become a hero to millions—including Michelle and me.\n"
],
[
"username = 'jack'\ncount = 2000\n# Creation of query object\ntweetCriteria = got.manager.TweetCriteria().setQuerySearch('europe refugees').setSince(\"2015-05-01\").setUntil(\"2015-09-30\").setMaxTweets(1)\n# Creation of list that contains all tweets\ntweets = got.manager.TweetManager.getTweets(tweetCriteria)\n# Creating list of chosen tweet data\nuser_tweets = [[tweet.date, tweet.text] for tweet in tweets]\nuser_tweets",
"_____no_output_____"
],
[
"def get_tweets(keyword, subreddit, end_utc, start_utc):\n comments=[]\n day = 1\n for i in range(start_utc, end_utc+86400, 86400):\n print (day)\n try:\n res_comm = requests.get(f'https://api.pushshift.io/reddit/search/submission/?q={keyword}&subreddit={subreddit}&after={i}&before=1d&sort=desc&sort_type=score&size=5')\n new_comm_data = res_comm.json()['data']\n #next_utc = new_comm_data[-1]['created_utc'] # grabbing the last comment time\n comments = comments + new_comm_data\n time.sleep(0.3)\n except Exception as e:\n print(e)\n retries += 1\n if retries > 3:\n print('errored but we dont care')\n return comments\n raise e\n day+=1\n return comments ",
"_____no_output_____"
],
[
"comments = get_tweets('finance', 'Apple', 1582502400, 1546300800)",
"Days scraped: 0.0\nDays scraped: 5.0\nDays scraped: 10.0\nDays scraped: 15.0\nDays scraped: 20.0\nDays scraped: 25.0\nDays scraped: 30.0\nDays scraped: 35.0\nDays scraped: 40.0\nDays scraped: 45.0\nDays scraped: 50.0\nDays scraped: 55.0\nDays scraped: 60.0\nDays scraped: 65.0\nDays scraped: 70.0\nDays scraped: 75.0\nDays scraped: 80.0\nDays scraped: 85.0\nDays scraped: 90.0\nDays scraped: 95.0\nDays scraped: 100.0\nDays scraped: 105.0\nDays scraped: 110.0\nDays scraped: 115.0\nDays scraped: 120.0\nDays scraped: 125.0\nDays scraped: 130.0\nDays scraped: 135.0\nDays scraped: 140.0\nDays scraped: 145.0\nDays scraped: 150.0\nDays scraped: 155.0\nDays scraped: 160.0\nDays scraped: 165.0\nDays scraped: 170.0\nDays scraped: 175.0\nDays scraped: 180.0\nDays scraped: 185.0\nDays scraped: 190.0\nDays scraped: 195.0\nDays scraped: 200.0\nDays scraped: 205.0\nDays scraped: 210.0\nDays scraped: 215.0\nDays scraped: 220.0\nDays scraped: 225.0\nDays scraped: 230.0\nDays scraped: 235.0\nDays scraped: 240.0\nDays scraped: 245.0\nDays scraped: 250.0\nDays scraped: 255.0\nDays scraped: 260.0\nDays scraped: 265.0\nDays scraped: 270.0\nDays scraped: 275.0\nDays scraped: 280.0\nDays scraped: 285.0\nDays scraped: 290.0\nDays scraped: 295.0\nDays scraped: 300.0\nDays scraped: 305.0\nDays scraped: 310.0\nDays scraped: 315.0\nDays scraped: 320.0\nDays scraped: 325.0\nDays scraped: 330.0\nDays scraped: 335.0\nDays scraped: 340.0\nDays scraped: 345.0\nDays scraped: 350.0\nDays scraped: 355.0\nDays scraped: 360.0\nDays scraped: 365.0\nDays scraped: 370.0\nDays scraped: 375.0\nDays scraped: 380.0\nDays scraped: 385.0\nDays scraped: 390.0\nDays scraped: 395.0\nDays scraped: 400.0\nDays scraped: 405.0\nDays scraped: 410.0\nDays scraped: 415.0\n"
],
[
"len(comments)",
"_____no_output_____"
],
[
"comments = pd.DataFrame(comments)\nprint(comments.shape)\nprint(comments.info)\ncomments.head()",
"(419, 34)\n<bound method DataFrame.info of all_awardings associated_award author \\\n0 [] NaN LastNightOsiris \n1 [] NaN Paraleia \n2 [] NaN LiveLaughLibor \n3 [] NaN LiveLaughLibor \n4 [] NaN LiveLaughLibor \n.. ... ... ... \n414 [] NaN sordfyshe \n415 [] NaN sordfyshe \n416 [] NaN sordfyshe \n417 [] NaN sordfyshe \n418 [] NaN sordfyshe \n\n author_flair_background_color author_flair_css_class \\\n0 None None \n1 None None \n2 None None \n3 None None \n4 None None \n.. ... ... \n414 None None \n415 None None \n416 None None \n417 None None \n418 None None \n\n author_flair_richtext author_flair_template_id author_flair_text \\\n0 [] None None \n1 [] None None \n2 [] None None \n3 [] None None \n4 [] None None \n.. ... ... ... \n414 [] None None \n415 [] None None \n416 [] None None \n417 [] None None \n418 [] None None \n\n author_flair_text_color author_flair_type ... \\\n0 None text ... \n1 None text ... \n2 None text ... \n3 None text ... \n4 None text ... \n.. ... ... ... \n414 None text ... \n415 None text ... \n416 None text ... \n417 None text ... \n418 None text ... \n\n permalink retrieved_on score \\\n0 /r/finance/comments/f8qx84/moronic_monday_febr... 1582568979 2 \n1 /r/finance/comments/f81lut/lunch_with_the_ft_l... 1582430244 12 \n2 /r/finance/comments/f7zolo/bank/fihgetw/ 1582416841 50 \n3 /r/finance/comments/f7zolo/bank/fihgetw/ 1582416841 50 \n4 /r/finance/comments/f7zolo/bank/fihgetw/ 1582416841 50 \n.. ... ... ... \n414 /r/finance/comments/c93agd/wework_isnt_even_cl... 1562252138 586 \n415 /r/finance/comments/c93agd/wework_isnt_even_cl... 1562252138 586 \n416 /r/finance/comments/c93agd/wework_isnt_even_cl... 1562252138 586 \n417 /r/finance/comments/c93agd/wework_isnt_even_cl... 1562252138 586 \n418 /r/finance/comments/c93agd/wework_isnt_even_cl... 1562252138 586 \n\n send_replies stickied subreddit subreddit_id total_awards_received \\\n0 True False finance t5_2qhfj 0 \n1 True False finance t5_2qhfj 0 \n2 True False finance t5_2qhfj 0 \n3 True False finance t5_2qhfj 0 \n4 True False finance t5_2qhfj 0 \n.. ... ... ... ... ... \n414 True False finance t5_2qhfj 0 \n415 True False finance t5_2qhfj 0 \n416 True False finance t5_2qhfj 0 \n417 True False finance t5_2qhfj 0 \n418 True False finance t5_2qhfj 0 \n\n steward_reports updated_utc \n0 NaN NaN \n1 NaN NaN \n2 NaN NaN \n3 NaN NaN \n4 NaN NaN \n.. ... ... \n414 NaN 1.562426e+09 \n415 NaN 1.562426e+09 \n416 NaN 1.562426e+09 \n417 NaN 1.562426e+09 \n418 NaN 1.562426e+09 \n\n[419 rows x 34 columns]>\n"
],
[
"cols = ['author', 'body', 'retrieved_on', 'score']\ndf = comments[cols]\ndf.head()",
"_____no_output_____"
],
[
"df.isnull().sum()",
"_____no_output_____"
],
[
"sia.polarity_scores(df.loc[0,'body'])",
"_____no_output_____"
],
[
"senti = []\nfor row in range(len(df)-1):\n senti.append(sia.polarity_scores(df.loc[row,'body']))\n \ndf_senti = pd.DataFrame(senti)\ndf_senti.head()\nresult = pd.concat([df, df_senti]), axis=1)\nresult.head()",
"_____no_output_____"
],
[
"df_senti",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c529f2eef0d05c45e842e27b7dd15f3c689ec277
| 798,886 |
ipynb
|
Jupyter Notebook
|
Project2_Forecasting_Barrios_Anandkrishnan_Task3_.ipynb
|
tiannymonti/network_analytics
|
fde22c4576fc0cfbe5132b4de52813111e960b78
|
[
"MIT"
] | null | null | null |
Project2_Forecasting_Barrios_Anandkrishnan_Task3_.ipynb
|
tiannymonti/network_analytics
|
fde22c4576fc0cfbe5132b4de52813111e960b78
|
[
"MIT"
] | null | null | null |
Project2_Forecasting_Barrios_Anandkrishnan_Task3_.ipynb
|
tiannymonti/network_analytics
|
fde22c4576fc0cfbe5132b4de52813111e960b78
|
[
"MIT"
] | 1 |
2021-12-16T15:20:37.000Z
|
2021-12-16T15:20:37.000Z
| 207.826743 | 85,568 | 0.885258 |
[
[
[
"# Project 2 - Forecasting Service Metrics\n\nAuthors: Tatiana Barrios, Anisha Anandkrishnan",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom statsmodels.graphics.tsaplots import plot_acf\nimport os\nimport seaborn as sn\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn import preprocessing\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.multioutput import MultiOutputRegressor\nfrom pandas import concat\nimport seaborn as sns\nimport scipy.stats as st\nfrom scipy.fft import fft, ifft\nfrom statsmodels.tsa.ar_model import AutoReg\nfrom statsmodels.tsa.seasonal import DecomposeResult\n\n%matplotlib inline",
"/home/tatiana/anaconda3/lib/python3.7/site-packages/pandas/compat/_optional.py:138: UserWarning: Pandas requires version '2.7.0' or newer of 'numexpr' (version '2.6.9' currently installed).\n warnings.warn(msg, UserWarning)\n/home/tatiana/anaconda3/lib/python3.7/site-packages/sklearn/ensemble/gradient_boosting.py:34: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n from ._gradient_boosting import predict_stages\n/home/tatiana/anaconda3/lib/python3.7/site-packages/sklearn/ensemble/gradient_boosting.py:34: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n from ._gradient_boosting import predict_stages\n/home/tatiana/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/least_angle.py:35: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n eps=np.finfo(np.float).eps,\n/home/tatiana/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/least_angle.py:597: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,\n/home/tatiana/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/least_angle.py:836: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,\n/home/tatiana/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/least_angle.py:862: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n eps=np.finfo(np.float).eps, positive=False):\n/home/tatiana/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/least_angle.py:1097: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n max_n_alphas=1000, n_jobs=None, eps=np.finfo(np.float).eps,\n/home/tatiana/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/least_angle.py:1344: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n max_n_alphas=1000, n_jobs=None, eps=np.finfo(np.float).eps,\n/home/tatiana/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/least_angle.py:1480: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n eps=np.finfo(np.float).eps, copy_X=True, positive=False):\n/home/tatiana/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/randomized_l1.py:152: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n precompute=False, eps=np.finfo(np.float).eps,\n/home/tatiana/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/randomized_l1.py:320: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n eps=np.finfo(np.float).eps, random_state=None,\n/home/tatiana/anaconda3/lib/python3.7/site-packages/sklearn/linear_model/randomized_l1.py:580: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n eps=4 * np.finfo(np.float).eps, n_jobs=None,\n"
],
[
"#NMAE function\ndef nmae_get(y, y_hat):\n y_av = np.mean(y)\n y_sum = np.sum(np.abs(y - y_hat))\n return y_sum/(len(y)*y_av)",
"_____no_output_____"
],
[
"#NMAE for h=0:::10\ndef nmaes_array(df_test, df_pre, h):\n nmaes = []\n for i in range(0, h+1):\n y_predict_i = df_pre.iloc[:, i]\n y_test_o = df_test.iloc[:, i].to_numpy()\n nmaes.append(nmae_get(y_test_o, y_predict_i))\n \n return nmaes",
"_____no_output_____"
],
[
"def future_columns(df, h):\n nv = df.shape[1]\n original_names = df.columns\n col, names = list(), list()\n for i in range(0, h+1):\n col.append(df.shift(-i))\n if i == 0:\n names += [('%s(t)' % (original_names[j])) for j in range(nv)]\n else:\n names += [('%s(t+%d)' % (original_names[j], i)) for j in range(nv)]\n concated_ = concat(col, axis=1)\n concated_.columns = names\n \n #This might bring errors, but i dont know if its better to drop them or to fill them at this point\n concated_.fillna(0, inplace=True)\n return concated_",
"_____no_output_____"
]
],
[
[
"# Task III - Time series analysis",
"_____no_output_____"
],
[
"1. In this task, we apply traditional univariate time-series analysis methods. This means we only consider the target values y(t) of the trace and do not consider the input values x(t)\n\n2. Outliers elimination. Before applying any method, we remove outliers if there are any. Use one of the methods from project 1 (Advanced) to eliminate the outliers.",
"_____no_output_____"
]
],
[
[
"Y = pd.read_csv('Y.csv')\n#Y.index = pd.to_datetime(Y['TimeStamp'])\nY_dropped = Y.drop(labels=[\"Unnamed: 0\", \"WritesAvg\"], axis=1, inplace=False)\n\nY_dropped1 = Y.drop(labels=[\"Unnamed: 0\", \"TimeStamp\", \"WritesAvg\"], axis=1, inplace=False)\n\nY_preprocessed = pd.DataFrame()\nY_tmp = preprocessing.StandardScaler().fit_transform(Y_dropped1)\nfor i, n in enumerate(Y_dropped1):\n Y_preprocessed[n] = Y_tmp[:, i]",
"_____no_output_____"
],
[
"Y_preprocessed.head()",
"_____no_output_____"
],
[
"Y_dropped.head()",
"_____no_output_____"
],
[
"print(Y_dropped.shape)\nprint(Y_preprocessed.shape)",
"(14481, 2)\n(14481, 1)\n"
],
[
"remove = []\nfor i in Y_preprocessed:\n for j in range(len(Y_preprocessed[i])):\n if j not in remove and abs(Y_preprocessed[i][j]) > 3.5:\n remove.append(j)\nY_clean = Y_dropped.drop(index=remove, axis=0, inplace=False)\n\nprint(\"Number of dropped samples: \", (len(remove)))\n%store Y_clean",
"Number of dropped samples: 135\nStored 'Y_clean' (DataFrame)\n"
],
[
"Y_clean = Y_clean.reset_index()\nY_clean = Y_clean.drop(Y_clean.columns[0], axis=1)\nY_clean.head()",
"_____no_output_____"
],
[
"reads = Y_clean['ReadsAvg']",
"_____no_output_____"
],
[
"fig, ax1 = plt.subplots(figsize = (10,6), dpi = 100)\nmn_point = min(reads)\nmx_point = max(reads)\nbins = np.arange(mn_point, mx_point + 1, 1)\ndens_vals = ax1.hist(reads, density=True, bins=bins, label='Hist')\nmn_point, mx_point = plt.xlim()\nplt.xlim(mn_point, mx_point)",
"_____no_output_____"
],
[
"Y_clean.head()",
"_____no_output_____"
],
[
"features_file = 'FedCSIS_X.csv'\ntargets_file = 'FedCSIS_Y.csv'\ndirectory = 'FedCSIS'\n\nfn1 = os.path.join(directory, features_file)\nfn2 = os.path.join(directory, targets_file)\n\nXfed = pd.read_csv(fn1)\nYfed = pd.read_csv(fn2)",
"_____no_output_____"
],
[
"Xfed.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1917 entries, 0 to 1916\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 1917 non-null int64 \n 1 host1619_/dev/shm 1917 non-null float64\n 2 host1619_cpu_idle 1917 non-null float64\n 3 host1619_cpu_iowait 1917 non-null float64\n 4 host1619_cpu_sys 1917 non-null float64\n 5 host1619_cpu_user 1917 non-null float64\n 6 host1619_memory.0 1917 non-null float64\n 7 host1619_memory.1 1917 non-null float64\n 8 host1619_memory.2 1917 non-null float64\n 9 host1619_memory.3 1917 non-null float64\ndtypes: float64(9), int64(1)\nmemory usage: 149.9 KB\n"
],
[
"Yfed.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1917 entries, 0 to 1916\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 1917 non-null int64 \n 1 host1619_/ 1917 non-null float64\ndtypes: float64(1), int64(1)\nmemory usage: 30.1 KB\n"
],
[
"Yfed.head()",
"_____no_output_____"
],
[
"Xfed = Xfed.drop(Xfed.columns[0], axis=1)",
"_____no_output_____"
],
[
"Yfed = Yfed.drop(Yfed.columns[0], axis=1)",
"_____no_output_____"
],
[
"Yfed.head()",
"_____no_output_____"
],
[
"hostdata = Yfed['host1619_/']",
"_____no_output_____"
],
[
"#Time series of this plot\nfig_, linep_ = plt.subplots(figsize = (10,6), dpi = 100)\nlinep_ = sn.lineplot(data=hostdata, color='green')\nlinep_.set(xlabel='Time index', ylabel='host1619')\n\nlinep_.set_title(\"Time series of FedCSIS data\")",
"_____no_output_____"
],
[
"fig_.savefig('timesDT1.png', dpi=300, bbox_inches='tight')",
"_____no_output_____"
],
[
"fig, ax1 = plt.subplots(figsize = (10,6), dpi = 100)\nmn_point = min(hostdata)\nmx_point = max(hostdata)\nbins = np.arange(mn_point, mx_point + 1, 0.1)\ndens_vals = ax1.hist(hostdata, density=True, bins=bins, label='Hist')\nmn_point, mx_point = plt.xlim()\nplt.xlim(mn_point, mx_point)\n\n#Kernel density estimation\nkde = st.gaussian_kde(hostdata)\nkde_x = np.linspace(mn_point, mx_point, 500)\nax1.plot(kde_x, kde.pdf(kde_x), color='orange', label='Density', linewidth=3.0)\nplt.legend(loc=\"upper right\")\nplt.xlabel('host1619')\nplt.ylabel('Density')\nplt.title('Density and histogram for target in FedCSIS data')",
"_____no_output_____"
],
[
"fig.savefig('densitytargetfcsis.png', dpi=300, bbox_inches='tight')",
"_____no_output_____"
],
[
"Xfed_preprocessed = pd.DataFrame()\nXfed_tmp = preprocessing.StandardScaler().fit_transform(Xfed)\nfor i, n in enumerate(Xfed):\n Xfed_preprocessed[n] = Xfed_tmp[:, i]",
"_____no_output_____"
],
[
"Xfed.describe(percentiles=[.25, .95])",
"_____no_output_____"
],
[
"#converting Y_dropped to make it of the same form as X_preprocessed\nYfed_new = pd.DataFrame()\nYfed_tmp=Yfed.to_numpy()\n\nfor i, n in enumerate(Yfed):\n Yfed_new[n] = Yfed_tmp[:, i] \n# outlier rejection \nremove = []\nfor i in Xfed_preprocessed:\n for j in range(len(Xfed_preprocessed[i])):\n if j not in remove and abs(Xfed_preprocessed[i][j]) > 3.8:\n remove.append(j)\nXfed_clean = Xfed_preprocessed.drop(labels=remove, axis=0, inplace=False)\nYfed_clean = Yfed_new.drop(labels=remove, axis=0, inplace=False)\n\nprint(\"Number of dropped samples: \", (len(remove)))\n%store Xfed_clean\n%store Yfed_clean",
"Number of dropped samples: 19\nStored 'Xfed_clean' (DataFrame)\nStored 'Yfed_clean' (DataFrame)\n"
],
[
"Xfed_clean.describe(percentiles=[.25, .95])",
"_____no_output_____"
],
[
"hostdata_cln = Yfed_clean['host1619_/']",
"_____no_output_____"
],
[
"fedcsis_everything = Xfed_clean.join(hostdata_cln) \ngen_corr_mat = fedcsis_everything.corr()\nprint(gen_corr_mat)",
" host1619_/dev/shm host1619_cpu_idle \\\nhost1619_/dev/shm 1.000000 0.073272 \nhost1619_cpu_idle 0.073272 1.000000 \nhost1619_cpu_iowait -0.001780 -0.971436 \nhost1619_cpu_sys -0.050581 -0.966673 \nhost1619_cpu_user -0.214932 0.581027 \nhost1619_memory.0 0.202105 -0.484524 \nhost1619_memory.1 -0.115305 -0.883080 \nhost1619_memory.2 -0.198455 0.780803 \nhost1619_memory.3 0.072743 -0.293824 \nhost1619_/ 0.253803 -0.110171 \n\n host1619_cpu_iowait host1619_cpu_sys host1619_cpu_user \\\nhost1619_/dev/shm -0.001780 -0.050581 -0.214932 \nhost1619_cpu_idle -0.971436 -0.966673 0.581027 \nhost1619_cpu_iowait 1.000000 0.972122 -0.751905 \nhost1619_cpu_sys 0.972122 1.000000 -0.673770 \nhost1619_cpu_user -0.751905 -0.673770 1.000000 \nhost1619_memory.0 0.523623 0.482167 -0.477403 \nhost1619_memory.1 0.892088 0.872673 -0.613716 \nhost1619_memory.2 -0.844676 -0.776115 0.762858 \nhost1619_memory.3 0.358882 0.304793 -0.388457 \nhost1619_/ 0.230074 0.205943 -0.516463 \n\n host1619_memory.0 host1619_memory.1 host1619_memory.2 \\\nhost1619_/dev/shm 0.202105 -0.115305 -0.198455 \nhost1619_cpu_idle -0.484524 -0.883080 0.780803 \nhost1619_cpu_iowait 0.523623 0.892088 -0.844676 \nhost1619_cpu_sys 0.482167 0.872673 -0.776115 \nhost1619_cpu_user -0.477403 -0.613716 0.762858 \nhost1619_memory.0 1.000000 0.418829 -0.578285 \nhost1619_memory.1 0.418829 1.000000 -0.605354 \nhost1619_memory.2 -0.578285 -0.605354 1.000000 \nhost1619_memory.3 0.094953 0.224196 -0.334432 \nhost1619_/ 0.227259 0.065084 -0.261472 \n\n host1619_memory.3 host1619_/ \nhost1619_/dev/shm 0.072743 0.253803 \nhost1619_cpu_idle -0.293824 -0.110171 \nhost1619_cpu_iowait 0.358882 0.230074 \nhost1619_cpu_sys 0.304793 0.205943 \nhost1619_cpu_user -0.388457 -0.516463 \nhost1619_memory.0 0.094953 0.227259 \nhost1619_memory.1 0.224196 0.065084 \nhost1619_memory.2 -0.334432 -0.261472 \nhost1619_memory.3 1.000000 -0.078566 \nhost1619_/ -0.078566 1.000000 \n"
],
[
"#Heatmap of correlation matrix\nparams = {'legend.fontsize': 'x-large',\n 'figure.figsize': (12, 12),\n 'axes.labelsize': 'x-large',\n 'axes.titlesize':'x-large',\n 'xtick.labelsize':'x-large',\n 'ytick.labelsize':'x-large'}\nplt.rcParams.update(params)\nheatmaplt = sn.heatmap(gen_corr_mat, cmap=\"YlGnBu\")\nheatmaplt.set_title('Correlation matrix of FedCSIS data')\nplt.show()",
"_____no_output_____"
],
[
"fig = heatmaplt.get_figure()\nfig.savefig('corrmat_fedcsis.png', dpi=300, bbox_inches='tight')",
"_____no_output_____"
]
],
[
[
"3. The auto-correlation function (ACF) computes the correlation of observations in a time series with respect to lag values. Compute the values of this function for the KTH trace and the FedCSIS trace. For each trace, plot the ACF values (correlogram) in two ways.The first plot shows the lag values in the interval l = 0; :::; 100, the second plot shows in the interval l = 0; :::; 4000. The x-axis of the plots shows the lag values and the y-axis shows the correlation coefficients (Pearson correlation) with values between -1 and 1 for negative and positive correlation, respectively.",
"_____no_output_____"
]
],
[
[
"Y_clean_ = Y_clean.drop(labels=[\"TimeStamp\"], axis=1, inplace=False)",
"_____no_output_____"
],
[
"# For the KTH trace (KV_periodic)\nfig, plotss = plt.subplots(figsize = (10,6), dpi = 100)\n\nplot_acf(x=Y_clean_, lags=100, ax=plotss)\nplotss.set(title=\"ACF for KV data trace until lag=100\")\nplotss.set(xlabel='lag', ylabel='Coefficients')",
"_____no_output_____"
],
[
"fig.savefig('kv_acf100.png', dpi=300, bbox_inches='tight')",
"_____no_output_____"
],
[
"fig, plotss = plt.subplots(figsize = (10,6), dpi = 100)\n\nplot_acf(x=Y_clean_, lags=4000, ax=plotss)\nplotss.set(title=\"ACF for KV data trace until lag=4000\")\nplotss.set(xlabel='lag', ylabel='Coefficients')",
"_____no_output_____"
],
[
"import statsmodels.api as sm\n\nacf, ci = sm.tsa.acf(Y_clean_, nlags=3000, alpha=0.05)\n\nplt.plot(acf)",
"_____no_output_____"
],
[
"#period\nperiod_index = np.where(acf == max(acf[500:3000]))\nprint(\"Period is: \", period_index)",
"Period is: (array([2661]),)\n"
],
[
"fig, plotss = plt.subplots(figsize = (10,6), dpi = 100)\n\nplot_acf(x=Y_clean_, lags=2570, ax=plotss)\nplotss.set(title=\"ACF for KV data trace until lag=4000\")\nplotss.set(xlabel='lag', ylabel='Coefficients')",
"_____no_output_____"
],
[
"fig.savefig('kv_acf4000.png', dpi=300, bbox_inches='tight')",
"_____no_output_____"
],
[
"# For the FedCSIS trace\nfig, plotss = plt.subplots(figsize = (10,6), dpi = 100)\n\nplot_acf(x=Yfed_clean, lags=100, ax=plotss)\nplotss.set(title=\"ACF for FedCSIS data trace until lag=100\")\nplotss.set(xlabel='lag', ylabel='Coefficients')",
"_____no_output_____"
],
[
"fig.savefig('fed_acf100.png', dpi=300, bbox_inches='tight')",
"_____no_output_____"
],
[
"Yfed_clean.shape",
"_____no_output_____"
],
[
"fig, plotss = plt.subplots(figsize = (10,6), dpi = 100)\n\nplot_acf(x=Yfed_clean, lags=1866, ax=plotss)\nplotss.set(title=\"ACF for FedCSIS data trace until lag=1866\")\nplotss.set(xlabel='lag', ylabel='Coefficients')",
"_____no_output_____"
],
[
"fig.savefig('fed_acf4000.png', dpi=300, bbox_inches='tight')",
"_____no_output_____"
]
],
[
[
"# Task IV - Time series forecasting",
"_____no_output_____"
],
[
"1. Fit an Autoregression (AR) model to the KTH time series. Perform forecasting using the AR model, which formulates the next step in the sequence as a linear function of the observations at previous time steps. The method is suitable for time series without trend and seasonal components. Evaluate the method for the AR model parameter p = 1; :::; 10.",
"_____no_output_____"
]
],
[
[
"Y_clean.head()",
"_____no_output_____"
],
[
"# This data is seasonal, so we want to remove the seasonality before implementing AR or MA\n# Before removing seasonality, we need to find out the period, for that we use fft\n#Time series of this plot (for finding the period)\nfig_, linep_ = plt.subplots(figsize = (10,6), dpi = 100)\n\nlinep_ = sn.lineplot(data=Y_clean, x=Y_clean.index, y=\"ReadsAvg\", color='green')\nlinep_.set(xlabel='Time index', ylabel='ReadsAvg (ms)')\n\nlinep_.set_title(\"Time series of reads data (clean)\")",
"_____no_output_____"
],
[
"#Period is around index 2661 (according to the acf). Let's use Seasonal Adjustment with Modeling (because it allows us to have a trial and error method)\n# I tried with periods around 2400, it seems like the best fit is 2570 roughly",
"_____no_output_____"
],
[
"period = 2661\nX = [i%period for i in range(0, len(reads))]\ndegree = 5\ncoef = np.polyfit(X, reads, degree)\nprint('Coefficients: %s' % coef)",
"Coefficients: [-1.20281264e-15 7.05940380e-12 -1.58353289e-08 2.10203966e-05\n -1.72637180e-02 5.97693815e+01]\n"
],
[
"#Splitting train and test\n\n#MAKE SURE YOU RUN THIS BEFORE THE MA THING\nY_train, Y_test = train_test_split(Y_clean_, test_size=0.3, shuffle = False)\nprint(Y_train.shape,\"(70% of the samples in training set)\")\nY_train = Y_train.sort_index(axis = 0)\nY_test = Y_test.sort_index(axis = 0)",
"(10042, 1) (70% of the samples in training set)\n"
],
[
"curve = list()\nfor i in range(len(X)):\n value = coef[-1]\n for d in range(degree):\n value += X[i]**(degree-d) * coef[d]\n curve.append(value)\n# plot curve over original data\nfig_, linep_ = plt.subplots(figsize = (10,6), dpi = 100)\n\nlinep_ = sn.lineplot(data=reads, color='blue')\nplt.plot(curve, color='red', linewidth=3)\nlinep_.set(xlabel='Time index', ylabel='ReadsAvg (ms)', title=\"Seasonality curve\")",
"_____no_output_____"
],
[
"fig_.savefig('seasonality.png', dpi=300, bbox_inches='tight')",
"_____no_output_____"
],
[
"#Removing seasonality\ndiff = list()\nfor i in range(len(reads)):\n read = reads[i] - curve[i]\n diff.append(read)\nplt.plot(diff)\nplt.show()",
"_____no_output_____"
],
[
"Y_clean_ws = pd.DataFrame(diff, columns =['ReadsAvg'], dtype = float) ",
"_____no_output_____"
],
[
"# Seasonal Adjustment with Differencing (it makes the first period unavailable for modeling)\ndiff_ = list()\nfor i in range(period, len(reads)):\n value = reads[i] - reads[i - period]\n diff_.append(value)\nfig_, linep_ = plt.subplots(figsize = (10,6), dpi = 100)\nplt.plot(diff_)\nplt.show()",
"_____no_output_____"
],
[
"plt.plot(Y_test)",
"_____no_output_____"
],
[
"Y_new_test = future_columns(Y_test, 10)\nh = 10",
"_____no_output_____"
],
[
"# train autoregression\nmodel = AutoReg(train, lags=29)\nmodel_fit = model.fit()\nprint('Coefficients: %s' % model_fit.params)\n# make predictions\npredictions = model_fit.predict(start=len(train), end=len(train)+len(test)-1, dynamic=False)\nfor i in range(len(predictions)):\n print('predicted=%f, expected=%f' % (predictions[i], test[i]))\nrmse = sqrt(mean_squared_error(test, predictions))\nprint('Test RMSE: %.3f' % rmse)\n# plot results\npyplot.plot(test)\npyplot.plot(predictions, color='red')\npyplot.show()",
"_____no_output_____"
],
[
"#p=1, h=10 using rolling forecast (AR model)\nhistory = Y_train.values\ntest = Y_test.values\npredictions = list()\nfor t in range(len(test)):\n model_fit = AutoReg(history, lags=1).fit()\n output = model_fit.forecast(steps=11)\n predictions.append(output)\n obs = test[t]\n history = np.append(history, obs)",
"_____no_output_____"
],
[
"yin = Y_new_test.index\nycol =Y_new_test.columns\nY_pred = pd.DataFrame(predictions, columns = ycol, index=yin)",
"_____no_output_____"
],
[
"Y_pred.head()",
"_____no_output_____"
],
[
"nmaes_l1 = nmaes_array(Y_new_test, Y_pred, h)",
"_____no_output_____"
],
[
"d = {'nmaes_l1': nmaes_l1}\nnmaes_df = pd.DataFrame(data=d)",
"_____no_output_____"
],
[
"nmaes_df",
"_____no_output_____"
],
[
"#p=2 onwards, h=10 using rolling forecast \nfor p in range(2,11):\n history = Y_train.values\n test = Y_test.values\n predictions = list()\n for t in range(len(test)):\n model_fit = AutoReg(history, lags=p).fit()\n output = model_fit.forecast(steps=h+1)\n predictions.append(output)\n obs = test[t]\n history = np.append(history, obs)\n Y_pred_ = pd.DataFrame(predictions, columns = ycol, index=yin)\n nme = nmaes_array(Y_new_test, Y_pred_, h)\n nmaes_df['nmaes_l'+str(p)] = nme\n print(p)\n",
"2\n3\n4\n5\n6\n7\n8\n9\n10\n"
],
[
"nmaes_df",
"_____no_output_____"
],
[
"nmaes_df.to_excel(\"ARnmae.xlsx\") ",
"_____no_output_____"
],
[
"fig, linep_ = plt.subplots(figsize = (10,6), dpi = 100)\n\nlinep_ = sns.lineplot(data=nmaes_df, x=nmaes_df.index, y=\"nmaes_l1\", label=\"q=1\", linewidth =1)\nsns.lineplot(data=nmaes_df, x=nmaes_df.index, y=\"nmaes_l2\", label=\"q=2\", linewidth =1.2)\nsns.lineplot(data=nmaes_df, x=nmaes_df.index, y=\"nmaes_l3\", label=\"q=3\", linewidth =1.4)\nsns.lineplot(data=nmaes_df, x=nmaes_df.index, y=\"nmaes_l4\", label=\"q=4\", linewidth =1.6)\nsns.lineplot(data=nmaes_df, x=nmaes_df.index, y=\"nmaes_l5\", label=\"q=5\", linewidth =1.8)\nsns.lineplot(data=nmaes_df, x=nmaes_df.index, y=\"nmaes_l6\", label=\"q=6\", linewidth =2)\nsns.lineplot(data=nmaes_df, x=nmaes_df.index, y=\"nmaes_l7\", label=\"q=7\", linewidth =2.2)\nsns.lineplot(data=nmaes_df, x=nmaes_df.index, y=\"nmaes_l8\", label=\"q=8\", linewidth =2.4)\nsns.lineplot(data=nmaes_df, x=nmaes_df.index, y=\"nmaes_l9\", label=\"q=9\", linewidth =2.6)\nsns.lineplot(data=nmaes_df, x=nmaes_df.index, y=\"nmaes_l10\", label=\"q=10\", linewidth =2.8)\nlinep_.set(title=\"NMAE vs horizon value for AR model\")\n\nlinep_.set(xlabel='h', ylabel='NMAE')",
"_____no_output_____"
],
[
"fig.savefig('ARnmae.png', dpi=300, bbox_inches='tight')",
"_____no_output_____"
]
],
[
[
"2. Fit a Moving Average (MA) model to the KTH time series. Perform forecasting using the MA model, which formulates the next step in the sequence as a linear function of the residual errors from a meanprocess at previous time steps. Note that MA is different from calculating the moving average of a time series. The method is suitable for time series without trend and seasonal components. Evaluate the method for the model parameter q = 1; :::; 10.",
"_____no_output_____"
]
],
[
[
"# Equation form X = miu + Zt + beta1()\nfrom statsmodels.tsa.arima.model import ARIMA",
"_____no_output_____"
],
[
"#Splitting train and test\n\n#MAKE SURE YOU RUN THIS BEFORE THE MA THING\nY_train, Y_test = train_test_split(Y_clean_, test_size=0.007, shuffle = False)\nprint(Y_train.shape,\"(100 samples in testing set)\")\nY_train = Y_train.sort_index(axis = 0)\nY_test = Y_test.sort_index(axis = 0)",
"(14245, 1) (100 samples in testing set)\n"
],
[
"Y_test.shape",
"_____no_output_____"
],
[
"import multiprocessing as mp\nprint(\"Number of processors: \", mp.cpu_count())\n",
"Number of processors: 48\n"
],
[
"#q=1, h=10 using rolling forecast (MA model) #takes like an hour to run :c\nhistory = Y_train.values\ntest = Y_test.values\npredictions = list()\n\nfor t in range(len(test)):\n model_fit = ARIMA(history, order=(0,0,1), trend='c').fit()\n output = model_fit.forecast(steps=11)\n predictions.append(output)\n obs = test[t]\n history = np.append(history, obs)\n print(t)",
"0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n27\n28\n29\n30\n31\n32\n33\n34\n35\n36\n37\n38\n39\n40\n41\n42\n43\n44\n45\n46\n47\n48\n49\n50\n51\n52\n53\n54\n55\n56\n57\n58\n59\n60\n61\n62\n63\n64\n65\n66\n67\n68\n69\n70\n71\n72\n73\n74\n75\n76\n77\n78\n79\n80\n81\n82\n83\n84\n85\n86\n87\n88\n89\n90\n91\n92\n93\n94\n95\n96\n97\n98\n99\n100\n"
],
[
"Y_new_test = future_columns(Y_test, 10)\nh = 10\nyin = Y_new_test.index\nycol =Y_new_test.columns\n#Y_pred = pd.DataFrame(predictions, columns = ycol, index=yin)",
"_____no_output_____"
],
[
"nmaes_l1 = [0.022795, 0.036474, 0.046999, 0.057806, 0.068842, 0.079894, 0.091058, 0.102502, 0.114425, 0.126540, 0.139031]",
"_____no_output_____"
],
[
"#nmaes_l1 = nmaes_array(Y_new_test, Y_pred, h)\nd = {'nmaes_q1': nmaes_l1}\nnmaes_df_MA = pd.DataFrame(data=d)",
"_____no_output_____"
],
[
"nmaes_df_MA",
"_____no_output_____"
],
[
"#q=2 onwards, h=10 using rolling forecast \nfor q in range(2,11):\n history = Y_train.values\n test = Y_test.values\n predictions = list()\n for t in range(len(test)):\n model_fit = ARIMA(history, order=(0,0,q), trend='c').fit()\n output = model_fit.forecast(steps=11)\n predictions.append(output)\n obs = test[t]\n history = np.append(history, obs)\n Y_pred_ = pd.DataFrame(predictions, columns = ycol, index=yin)\n nme = nmaes_array(Y_new_test, Y_pred_, h)\n print(q)\n print(\"NMAE: \", nme)\n nmaes_df_MA['nmaes_l'+str(q)] = nme\n \n\n",
"2\nNMAE: [0.020268444935871537, 0.033572082427815106, 0.04699959586886911, 0.057806934838007765, 0.0688426870949042, 0.07989457443870815, 0.09105915759705513, 0.1025031661740809, 0.11442618313071577, 0.12654131069948, 0.1390319656419216]\n3\nNMAE: [0.020544891651949393, 0.03302454538882994, 0.04611042254405253, 0.05780620105027403, 0.06884192676516186, 0.07989382735335014, 0.0910584426918028, 0.10250245843902353, 0.11442541014906707, 0.12654050615755522, 0.13903118378909285]\n4\nNMAE: [0.020176480760983318, 0.03297682758395667, 0.04552158424229804, 0.05706580936368814, 0.06884207380951478, 0.07989401671432678, 0.09105864389960848, 0.10250262962338888, 0.11442558273296338, 0.12654069756122957, 0.13903139008432824]\n5\nNMAE: [0.019412161904150755, 0.03196134095585803, 0.04428455913803603, 0.055505321977343404, 0.06733029554653797, 0.07989431373828916, 0.09105892859631672, 0.102502911130387, 0.11442587971491663, 0.12654100271666635, 0.13903168303860788]\n6\nNMAE: [0.019104550494311787, 0.031270557798257326, 0.0430644119814718, 0.05411695051841191, 0.06588203597484617, 0.0786696262595417, 0.09105922406283712, 0.1025032149556471, 0.11442619340753933, 0.12654130224577564, 0.13903199844604658]\n7\nNMAE: [0.018886435604565776, 0.030900866874740206, 0.04244373996684563, 0.05320188530399197, 0.06484362871499935, 0.07775614499685511, 0.09041098982075617, 0.10250327216471006, 0.11442624669652877, 0.1265413663634638, 0.13903207125209904]\n8\nNMAE: [0.018851140371004875, 0.030581341923499154, 0.04206103446253349, 0.05265998973326961, 0.06411043945347698, 0.07710020842021753, 0.08988314730397964, 0.10202531649569754, 0.11442613486157201, 0.12654126502057078, 0.13903196196982312]\n9\nNMAE: [0.0185378549934669, 0.03028447857620228, 0.041477294081816385, 0.05198947686731554, 0.06351913276327777, 0.07651514007089123, 0.08936925257321356, 0.10149508208687823, 0.1139977540589933, 0.1265415329265209, 0.1390322319031397]\n"
],
[
"nmaes_MA.to_excel(\"nmaesMAAA.xlsx\")",
"_____no_output_____"
],
[
"fig, linep_ = plt.subplots(figsize = (10,6), dpi = 100)\n\nlinep_ = sns.lineplot(data=nmaes_MA, x=nmaes_MA.index, y=\"nmaes_q1\", label=\"q=1\", linewidth =2.4)\nsns.lineplot(data=nmaes_MA, x=nmaes_MA.index, y=\"nmaes_q2\", label=\"q=2\", linewidth =2.2)\nsns.lineplot(data=nmaes_MA, x=nmaes_MA.index, y=\"nmaes_q3\", label=\"q=3\", linewidth =2.0)\nsns.lineplot(data=nmaes_MA, x=nmaes_MA.index, y=\"nmaes_q4\", label=\"q=4\", linewidth =1.8)\nsns.lineplot(data=nmaes_MA, x=nmaes_MA.index, y=\"nmaes_q5\", label=\"q=5\", linewidth =1.6)\nsns.lineplot(data=nmaes_MA, x=nmaes_MA.index, y=\"nmaes_q6\", label=\"q=6\", linewidth =1.4)\nsns.lineplot(data=nmaes_MA, x=nmaes_MA.index, y=\"nmaes_q7\", label=\"q=7\", linewidth =1.2)\nsns.lineplot(data=nmaes_MA, x=nmaes_MA.index, y=\"nmaes_q8\", label=\"q=8\", linewidth =1)\nsns.lineplot(data=nmaes_MA, x=nmaes_MA.index, y=\"nmaes_q9\", label=\"q=9\", linewidth =4.2)\n#sns.lineplot(data=nmaes_MA, x=nmaes_MA.index, y=\"nmaes_q10\", label=\"q=10\", linewidth =4.6)\nlinep_.set(yscale=\"log\")\nlinep_.set(title=\"NMAE vs horizon value for MA model\") \n\nlinep_.set(xlabel='h', ylabel='NMAE')",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c529fd269f5540fd82cd14bd6876c1da07ae9b78
| 914,073 |
ipynb
|
Jupyter Notebook
|
project01_Facial_Keypoint_Detection/1. Load and Visualize Data.ipynb
|
GabrielPila/udacity-computer-vision-nanodegree
|
16d0d60d76633e4c6137cd658fcf41d46b0a8806
|
[
"MIT"
] | 1 |
2021-05-29T21:23:43.000Z
|
2021-05-29T21:23:43.000Z
|
project01_Facial_Keypoint_Detection/1. Load and Visualize Data.ipynb
|
GabrielPila/udacity-computer-vision-nanodegree
|
16d0d60d76633e4c6137cd658fcf41d46b0a8806
|
[
"MIT"
] | null | null | null |
project01_Facial_Keypoint_Detection/1. Load and Visualize Data.ipynb
|
GabrielPila/udacity-computer-vision-nanodegree
|
16d0d60d76633e4c6137cd658fcf41d46b0a8806
|
[
"MIT"
] | null | null | null | 135.1979 | 106,576 | 0.83378 |
[
[
[
"# Facial Keypoint Detection\n \nThis project will be all about defining and training a convolutional neural network to perform facial keypoint detection, and using computer vision techniques to transform images of faces. The first step in any challenge like this will be to load and visualize the data you'll be working with. \n\nLet's take a look at some examples of images and corresponding facial keypoints.\n\n<img src='images/key_pts_example.png' width=50% height=50%/>\n\nFacial keypoints (also called facial landmarks) are the small magenta dots shown on each of the faces in the image above. In each training and test image, there is a single face and **68 keypoints, with coordinates (x, y), for that face**. These keypoints mark important areas of the face: the eyes, corners of the mouth, the nose, etc. These keypoints are relevant for a variety of tasks, such as face filters, emotion recognition, pose recognition, and so on. Here they are, numbered, and you can see that specific ranges of points match different portions of the face.\n\n<img src='images/landmarks_numbered.jpg' width=30% height=30%/>\n\n---",
"_____no_output_____"
],
[
"## Load and Visualize Data\n\nThe first step in working with any dataset is to become familiar with your data; you'll need to load in the images of faces and their keypoints and visualize them! This set of image data has been extracted from the [YouTube Faces Dataset](https://www.cs.tau.ac.il/~wolf/ytfaces/), which includes videos of people in YouTube videos. These videos have been fed through some processing steps and turned into sets of image frames containing one face and the associated keypoints.\n\n#### Training and Testing Data\n\nThis facial keypoints dataset consists of 5770 color images. All of these images are separated into either a training or a test set of data.\n\n* 3462 of these images are training images, for you to use as you create a model to predict keypoints.\n* 2308 are test images, which will be used to test the accuracy of your model.\n\nThe information about the images and keypoints in this dataset are summarized in CSV files, which we can read in using `pandas`. Let's read the training CSV and get the annotations in an (N, 2) array where N is the number of keypoints and 2 is the dimension of the keypoint coordinates (x, y).\n\n---",
"_____no_output_____"
],
[
"First, before we do anything, we have to load in our image data. This data is stored in a zip file and in the below cell, we access it by it's URL and unzip the data in a `/data/` directory that is separate from the workspace home directory.",
"_____no_output_____"
]
],
[
[
"# -- DO NOT CHANGE THIS CELL -- #\n!mkdir /data\n!wget -P /data/ https://s3.amazonaws.com/video.udacity-data.com/topher/2018/May/5aea1b91_train-test-data/train-test-data.zip\n!unzip -n /data/train-test-data.zip -d /data",
"--2021-04-04 04:07:35-- https://s3.amazonaws.com/video.udacity-data.com/topher/2018/May/5aea1b91_train-test-data/train-test-data.zip\nResolving s3.amazonaws.com (s3.amazonaws.com)... 52.217.109.118\nConnecting to s3.amazonaws.com (s3.amazonaws.com)|52.217.109.118|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 338613624 (323M) [application/zip]\nSaving to: ‘/data/train-test-data.zip’\n\ntrain-test-data.zip 100%[===================>] 322.93M 71.1MB/s in 4.8s \n\n2021-04-04 04:07:40 (67.9 MB/s) - ‘/data/train-test-data.zip’ saved [338613624/338613624]\n\nArchive: /data/train-test-data.zip\n creating: /data/test/\n inflating: /data/test/Abdel_Aziz_Al-Hakim_00.jpg \n inflating: /data/test/Abdel_Aziz_Al-Hakim_01.jpg \n inflating: /data/test/Abdel_Aziz_Al-Hakim_10.jpg \n inflating: /data/test/Abdel_Aziz_Al-Hakim_11.jpg \n inflating: /data/test/Abdel_Aziz_Al-Hakim_40.jpg \n inflating: /data/test/Abdel_Aziz_Al-Hakim_41.jpg \n inflating: /data/test/Abdullah_Gul_10.jpg \n inflating: /data/test/Abdullah_Gul_11.jpg \n inflating: /data/test/Abdullah_Gul_30.jpg \n inflating: /data/test/Abdullah_Gul_31.jpg \n inflating: /data/test/Abdullah_Gul_50.jpg \n inflating: /data/test/Abdullah_Gul_51.jpg \n inflating: /data/test/Adam_Sandler_00.jpg \n inflating: /data/test/Adam_Sandler_01.jpg \n inflating: /data/test/Adam_Sandler_10.jpg \n inflating: /data/test/Adam_Sandler_11.jpg \n inflating: /data/test/Adam_Sandler_40.jpg \n inflating: /data/test/Adam_Sandler_41.jpg \n inflating: /data/test/Adrian_Nastase_10.jpg \n inflating: /data/test/Adrian_Nastase_11.jpg \n inflating: /data/test/Adrian_Nastase_40.jpg \n inflating: /data/test/Adrian_Nastase_41.jpg \n inflating: /data/test/Adrian_Nastase_50.jpg \n inflating: /data/test/Adrian_Nastase_51.jpg \n inflating: /data/test/Agbani_Darego_00.jpg \n inflating: /data/test/Agbani_Darego_01.jpg \n inflating: /data/test/Agbani_Darego_20.jpg \n inflating: /data/test/Agbani_Darego_21.jpg \n inflating: /data/test/Agbani_Darego_40.jpg \n inflating: /data/test/Agbani_Darego_41.jpg \n inflating: /data/test/Agbani_Darego_50.jpg \n inflating: /data/test/Agbani_Darego_51.jpg \n inflating: /data/test/Agnes_Bruckner_00.jpg \n inflating: /data/test/Agnes_Bruckner_01.jpg \n inflating: /data/test/Agnes_Bruckner_10.jpg \n inflating: /data/test/Agnes_Bruckner_11.jpg \n inflating: /data/test/Agnes_Bruckner_20.jpg \n inflating: /data/test/Agnes_Bruckner_21.jpg \n inflating: /data/test/Agnes_Bruckner_40.jpg \n inflating: /data/test/Agnes_Bruckner_41.jpg \n inflating: /data/test/Ahmad_Masood_00.jpg \n inflating: /data/test/Ahmad_Masood_01.jpg \n inflating: /data/test/Ahmad_Masood_30.jpg \n inflating: /data/test/Ahmad_Masood_31.jpg \n inflating: /data/test/Ahmad_Masood_40.jpg \n inflating: /data/test/Ahmad_Masood_41.jpg \n inflating: /data/test/Ahmed_Ahmed_00.jpg \n inflating: /data/test/Ahmed_Ahmed_01.jpg \n inflating: /data/test/Ahmed_Ahmed_10.jpg \n inflating: /data/test/Ahmed_Ahmed_11.jpg \n inflating: /data/test/Ahmed_Ahmed_40.jpg \n inflating: /data/test/Ahmed_Ahmed_41.jpg \n inflating: /data/test/Ahmed_Ahmed_50.jpg \n inflating: /data/test/Ahmed_Ahmed_51.jpg \n inflating: /data/test/Aidan_Quinn_00.jpg \n inflating: /data/test/Aidan_Quinn_01.jpg \n inflating: /data/test/Aidan_Quinn_10.jpg \n inflating: /data/test/Aidan_Quinn_11.jpg \n inflating: /data/test/Aidan_Quinn_20.jpg \n inflating: /data/test/Aidan_Quinn_21.jpg \n inflating: /data/test/Aidan_Quinn_30.jpg \n inflating: /data/test/Aidan_Quinn_31.jpg \n inflating: /data/test/Aishwarya_Rai_00.jpg \n inflating: /data/test/Aishwarya_Rai_01.jpg \n inflating: /data/test/Aishwarya_Rai_10.jpg \n inflating: /data/test/Aishwarya_Rai_11.jpg \n inflating: /data/test/Aishwarya_Rai_40.jpg \n inflating: /data/test/Aishwarya_Rai_41.jpg \n inflating: /data/test/Aishwarya_Rai_50.jpg \n inflating: /data/test/Aishwarya_Rai_51.jpg \n inflating: /data/test/Albert_Brooks_00.jpg \n inflating: /data/test/Albert_Brooks_01.jpg \n inflating: /data/test/Albert_Brooks_10.jpg \n inflating: /data/test/Albert_Brooks_11.jpg \n inflating: /data/test/Albert_Brooks_30.jpg \n inflating: /data/test/Albert_Brooks_31.jpg \n inflating: /data/test/Alejandro_Toledo_10.jpg \n inflating: /data/test/Alejandro_Toledo_11.jpg \n inflating: /data/test/Alejandro_Toledo_30.jpg \n inflating: /data/test/Alejandro_Toledo_31.jpg \n inflating: /data/test/Alejandro_Toledo_50.jpg \n inflating: /data/test/Alejandro_Toledo_51.jpg \n inflating: /data/test/Aleksander_Kwasniewski_00.jpg \n inflating: /data/test/Aleksander_Kwasniewski_01.jpg \n inflating: /data/test/Aleksander_Kwasniewski_10.jpg \n inflating: /data/test/Aleksander_Kwasniewski_11.jpg \n inflating: /data/test/Aleksander_Kwasniewski_20.jpg \n inflating: /data/test/Aleksander_Kwasniewski_21.jpg \n inflating: /data/test/Aleksander_Kwasniewski_30.jpg \n inflating: /data/test/Aleksander_Kwasniewski_31.jpg \n inflating: /data/test/Alex_Ferguson_00.jpg \n inflating: /data/test/Alex_Ferguson_01.jpg \n inflating: /data/test/Alex_Ferguson_10.jpg \n inflating: /data/test/Alex_Ferguson_11.jpg \n inflating: /data/test/Alex_Ferguson_50.jpg \n inflating: /data/test/Alex_Ferguson_51.jpg \n inflating: /data/test/Alexandra_Pelosi_00.jpg \n inflating: /data/test/Alexandra_Pelosi_01.jpg \n inflating: /data/test/Alexandra_Pelosi_10.jpg \n inflating: /data/test/Alexandra_Pelosi_11.jpg \n inflating: /data/test/Alexandra_Pelosi_30.jpg \n inflating: /data/test/Alexandra_Pelosi_31.jpg \n inflating: /data/test/Alfredo_di_Stefano_00.jpg \n inflating: /data/test/Alfredo_di_Stefano_01.jpg \n inflating: /data/test/Alfredo_di_Stefano_20.jpg \n inflating: /data/test/Alfredo_di_Stefano_21.jpg \n inflating: /data/test/Alfredo_di_Stefano_50.jpg \n inflating: /data/test/Alfredo_di_Stefano_51.jpg \n inflating: /data/test/Ali_Abbas_20.jpg \n inflating: /data/test/Ali_Abbas_21.jpg \n inflating: /data/test/Ali_Abbas_30.jpg \n inflating: /data/test/Ali_Abbas_31.jpg \n inflating: /data/test/Ali_Abbas_40.jpg \n inflating: /data/test/Ali_Abbas_41.jpg \n inflating: /data/test/Ali_Abbas_50.jpg \n inflating: /data/test/Ali_Abbas_51.jpg \n inflating: /data/test/Alicia_Silverstone_00.jpg \n inflating: /data/test/Alicia_Silverstone_01.jpg \n inflating: /data/test/Alicia_Silverstone_10.jpg \n inflating: /data/test/Alicia_Silverstone_11.jpg \n inflating: /data/test/Alicia_Silverstone_20.jpg \n inflating: /data/test/Alicia_Silverstone_21.jpg \n inflating: /data/test/Alicia_Silverstone_50.jpg \n inflating: /data/test/Alicia_Silverstone_51.jpg \n inflating: /data/test/Alma_Powell_00.jpg \n inflating: /data/test/Alma_Powell_01.jpg \n inflating: /data/test/Alma_Powell_10.jpg \n inflating: /data/test/Alma_Powell_11.jpg \n inflating: /data/test/Alma_Powell_40.jpg \n inflating: /data/test/Alma_Powell_41.jpg \n inflating: /data/test/Alma_Powell_50.jpg \n inflating: /data/test/Alma_Powell_51.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_00.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_01.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_10.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_11.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_20.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_21.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_30.jpg \n inflating: /data/test/Alvaro_Silva_Calderon_31.jpg \n inflating: /data/test/Amelia_Vega_10.jpg \n inflating: /data/test/Amelia_Vega_11.jpg \n inflating: /data/test/Amelia_Vega_20.jpg \n inflating: /data/test/Amelia_Vega_21.jpg \n inflating: /data/test/Amelia_Vega_30.jpg \n inflating: /data/test/Amelia_Vega_31.jpg \n inflating: /data/test/Amelia_Vega_40.jpg \n inflating: /data/test/Amelia_Vega_41.jpg \n inflating: /data/test/Amy_Brenneman_10.jpg \n inflating: /data/test/Amy_Brenneman_11.jpg \n inflating: /data/test/Amy_Brenneman_30.jpg \n inflating: /data/test/Amy_Brenneman_31.jpg \n inflating: /data/test/Amy_Brenneman_50.jpg \n inflating: /data/test/Amy_Brenneman_51.jpg \n inflating: /data/test/Andrea_Bocelli_10.jpg \n inflating: /data/test/Andrea_Bocelli_11.jpg \n inflating: /data/test/Andrea_Bocelli_20.jpg \n inflating: /data/test/Andrea_Bocelli_21.jpg \n inflating: /data/test/Andrea_Bocelli_30.jpg \n inflating: /data/test/Andrea_Bocelli_31.jpg \n inflating: /data/test/Andy_Roddick_20.jpg \n inflating: /data/test/Andy_Roddick_21.jpg \n inflating: /data/test/Andy_Roddick_40.jpg \n inflating: /data/test/Andy_Roddick_41.jpg \n inflating: /data/test/Andy_Roddick_50.jpg \n inflating: /data/test/Andy_Roddick_51.jpg \n inflating: /data/test/Andy_Rooney_10.jpg \n inflating: /data/test/Andy_Rooney_11.jpg \n inflating: /data/test/Andy_Rooney_20.jpg \n inflating: /data/test/Andy_Rooney_21.jpg \n inflating: /data/test/Andy_Rooney_50.jpg \n inflating: /data/test/Andy_Rooney_51.jpg \n inflating: /data/test/Angel_Lockward_30.jpg \n inflating: /data/test/Angel_Lockward_31.jpg \n inflating: /data/test/Angel_Lockward_40.jpg \n inflating: /data/test/Angel_Lockward_41.jpg \n inflating: /data/test/Angel_Lockward_50.jpg \n inflating: /data/test/Angel_Lockward_51.jpg \n inflating: /data/test/Angela_Bassett_20.jpg \n inflating: /data/test/Angela_Bassett_21.jpg \n inflating: /data/test/Angela_Bassett_30.jpg \n"
],
[
"# import the required libraries\nimport glob\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nimport cv2",
"_____no_output_____"
]
],
[
[
"Then, let's load in our training data and display some stats about that dat ato make sure it's been loaded in correctly!",
"_____no_output_____"
]
],
[
[
"key_pts_frame = pd.read_csv('/data/training_frames_keypoints.csv')\n\nn = 0\nimage_name = key_pts_frame.iloc[n, 0]\nkey_pts = key_pts_frame.iloc[n, 1:].as_matrix()\nkey_pts = key_pts.astype('float').reshape(-1, 2)\n\nprint('Image name: ', image_name)\nprint('Landmarks shape: ', key_pts.shape)\nprint('First 4 key pts: {}'.format(key_pts[:4]))",
"Image name: Luis_Fonsi_21.jpg\nLandmarks shape: (68, 2)\nFirst 4 key pts: [[ 45. 98.]\n [ 47. 106.]\n [ 49. 110.]\n [ 53. 119.]]\n"
],
[
"# print out some stats about the data\nprint('Number of images: ', key_pts_frame.shape[0])",
"Number of images: 3462\n"
]
],
[
[
"## Look at some images\n\nBelow, is a function `show_keypoints` that takes in an image and keypoints and displays them. As you look at this data, **note that these images are not all of the same size**, and neither are the faces! To eventually train a neural network on these images, we'll need to standardize their shape.",
"_____no_output_____"
]
],
[
[
"def show_keypoints(image, key_pts):\n \"\"\"Show image with keypoints\"\"\"\n plt.imshow(image)\n plt.scatter(key_pts[:, 0], key_pts[:, 1], s=20, marker='x', c='orange')\n",
"_____no_output_____"
],
[
"# Display a few different types of images by changing the index n\n\n# select an image by index in our data frame\nn = 84\nimage_name = key_pts_frame.iloc[n, 0]\nkey_pts = key_pts_frame.iloc[n, 1:].as_matrix()\nkey_pts = key_pts.astype('float').reshape(-1, 2)\n\nplt.figure(figsize=(5, 5))\nshow_keypoints(mpimg.imread(os.path.join('/data/training/', image_name)), key_pts)\nplt.show()",
"/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:6: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n \n"
]
],
[
[
"## Dataset class and Transformations\n\nTo prepare our data for training, we'll be using PyTorch's Dataset class. Much of this this code is a modified version of what can be found in the [PyTorch data loading tutorial](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html).\n\n#### Dataset class\n\n``torch.utils.data.Dataset`` is an abstract class representing a\ndataset. This class will allow us to load batches of image/keypoint data, and uniformly apply transformations to our data, such as rescaling and normalizing images for training a neural network.\n\n\nYour custom dataset should inherit ``Dataset`` and override the following\nmethods:\n\n- ``__len__`` so that ``len(dataset)`` returns the size of the dataset.\n- ``__getitem__`` to support the indexing such that ``dataset[i]`` can\n be used to get the i-th sample of image/keypoint data.\n\nLet's create a dataset class for our face keypoints dataset. We will\nread the CSV file in ``__init__`` but leave the reading of images to\n``__getitem__``. This is memory efficient because all the images are not\nstored in the memory at once but read as required.\n\nA sample of our dataset will be a dictionary\n``{'image': image, 'keypoints': key_pts}``. Our dataset will take an\noptional argument ``transform`` so that any required processing can be\napplied on the sample. We will see the usefulness of ``transform`` in the\nnext section.\n",
"_____no_output_____"
]
],
[
[
"from torch.utils.data import Dataset, DataLoader\n\nclass FacialKeypointsDataset(Dataset):\n \"\"\"Face Landmarks dataset.\"\"\"\n\n def __init__(self, csv_file, root_dir, transform=None):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.key_pts_frame = pd.read_csv(csv_file)\n self.root_dir = root_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.key_pts_frame)\n\n def __getitem__(self, idx):\n image_name = os.path.join(self.root_dir,\n self.key_pts_frame.iloc[idx, 0])\n \n image = mpimg.imread(image_name)\n \n # if image has an alpha color channel, get rid of it\n if(image.shape[2] == 4):\n image = image[:,:,0:3]\n \n key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix()\n key_pts = key_pts.astype('float').reshape(-1, 2)\n sample = {'image': image, 'keypoints': key_pts}\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample",
"_____no_output_____"
]
],
[
[
"Now that we've defined this class, let's instantiate the dataset and display some images.",
"_____no_output_____"
]
],
[
[
"# Construct the dataset\nface_dataset = FacialKeypointsDataset(csv_file='/data/training_frames_keypoints.csv',\n root_dir='/data/training/')\n\n# print some stats about the dataset\nprint('Length of dataset: ', len(face_dataset))",
"Length of dataset: 3462\n"
],
[
"# Display a few of the images from the dataset\nnum_to_display = 5\n\nfor i in range(num_to_display):\n \n # define the size of images\n fig = plt.figure(figsize=(20,10))\n \n # randomly select a sample\n rand_i = np.random.randint(0, len(face_dataset))\n sample = face_dataset[rand_i]\n\n # print the shape of the image and keypoints\n print(i, sample['image'].shape, sample['keypoints'].shape)\n\n ax = plt.subplot(1, num_to_display, i + 1)\n ax.set_title('Sample #{}'.format(i))\n \n # Using the same display function, defined earlier\n show_keypoints(sample['image'], sample['keypoints'])\n",
"0 (330, 276, 3) (68, 2)\n1 (96, 80, 3) (68, 2)\n2 (357, 350, 3) (68, 2)\n"
]
],
[
[
"## Transforms\n\nNow, the images above are not of the same size, and neural networks often expect images that are standardized; a fixed size, with a normalized range for color ranges and coordinates, and (for PyTorch) converted from numpy lists and arrays to Tensors.\n\nTherefore, we will need to write some pre-processing code.\nLet's create four transforms:\n\n- ``Normalize``: to convert a color image to grayscale values with a range of [0,1] and normalize the keypoints to be in a range of about [-1, 1]\n- ``Rescale``: to rescale an image to a desired size.\n- ``RandomCrop``: to crop an image randomly.\n- ``ToTensor``: to convert numpy images to torch images.\n\n\nWe will write them as callable classes instead of simple functions so\nthat parameters of the transform need not be passed everytime it's\ncalled. For this, we just need to implement ``__call__`` method and \n(if we require parameters to be passed in), the ``__init__`` method. \nWe can then use a transform like this:\n\n tx = Transform(params)\n transformed_sample = tx(sample)\n\nObserve below how these transforms are generally applied to both the image and its keypoints.\n\n",
"_____no_output_____"
]
],
[
[
"import torch\nfrom torchvision import transforms, utils\n# tranforms\n\nclass Normalize(object):\n \"\"\"Convert a color image to grayscale and normalize the color range to [0,1].\"\"\" \n\n def __call__(self, sample):\n image, key_pts = sample['image'], sample['keypoints']\n \n image_copy = np.copy(image)\n key_pts_copy = np.copy(key_pts)\n\n # convert image to grayscale\n image_copy = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n \n # scale color range from [0, 255] to [0, 1]\n image_copy= image_copy/255.0\n \n # scale keypoints to be centered around 0 with a range of [-1, 1]\n # mean = 100, sqrt = 50, so, pts should be (pts - 100)/50\n key_pts_copy = (key_pts_copy - 100)/50.0\n\n\n return {'image': image_copy, 'keypoints': key_pts_copy}\n\n\nclass Rescale(object):\n \"\"\"Rescale the image in a sample to a given size.\n\n Args:\n output_size (tuple or int): Desired output size. If tuple, output is\n matched to output_size. If int, smaller of image edges is matched\n to output_size keeping aspect ratio the same.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n self.output_size = output_size\n\n def __call__(self, sample):\n image, key_pts = sample['image'], sample['keypoints']\n\n h, w = image.shape[:2]\n if isinstance(self.output_size, int):\n if h > w:\n new_h, new_w = self.output_size * h / w, self.output_size\n else:\n new_h, new_w = self.output_size, self.output_size * w / h\n else:\n new_h, new_w = self.output_size\n\n new_h, new_w = int(new_h), int(new_w)\n\n img = cv2.resize(image, (new_w, new_h))\n \n # scale the pts, too\n key_pts = key_pts * [new_w / w, new_h / h]\n\n return {'image': img, 'keypoints': key_pts}\n\n\nclass RandomCrop(object):\n \"\"\"Crop randomly the image in a sample.\n\n Args:\n output_size (tuple or int): Desired output size. If int, square crop\n is made.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n if isinstance(output_size, int):\n self.output_size = (output_size, output_size)\n else:\n assert len(output_size) == 2\n self.output_size = output_size\n\n def __call__(self, sample):\n image, key_pts = sample['image'], sample['keypoints']\n\n h, w = image.shape[:2]\n new_h, new_w = self.output_size\n\n top = np.random.randint(0, h - new_h)\n left = np.random.randint(0, w - new_w)\n\n image = image[top: top + new_h,\n left: left + new_w]\n\n key_pts = key_pts - [left, top]\n\n return {'image': image, 'keypoints': key_pts}\n\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n image, key_pts = sample['image'], sample['keypoints']\n \n # if image has no grayscale color channel, add one\n if(len(image.shape) == 2):\n # add that third color dim\n image = image.reshape(image.shape[0], image.shape[1], 1)\n \n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1))\n \n return {'image': torch.from_numpy(image),\n 'keypoints': torch.from_numpy(key_pts)}",
"_____no_output_____"
]
],
[
[
"## Test out the transforms\n\nLet's test these transforms out to make sure they behave as expected. As you look at each transform, note that, in this case, **order does matter**. For example, you cannot crop a image using a value smaller than the original image (and the orginal images vary in size!), but, if you first rescale the original image, you can then crop it to any size smaller than the rescaled size.",
"_____no_output_____"
]
],
[
[
"# test out some of these transforms\nrescale = Rescale(100)\ncrop = RandomCrop(50)\ncomposed = transforms.Compose([Rescale(250),\n RandomCrop(224)])\n\n# apply the transforms to a sample image\ntest_num = 500\nsample = face_dataset[test_num]\n\nfig = plt.figure()\nfor i, tx in enumerate([rescale, crop, composed]):\n transformed_sample = tx(sample)\n\n ax = plt.subplot(1, 3, i + 1)\n plt.tight_layout()\n ax.set_title(type(tx).__name__)\n show_keypoints(transformed_sample['image'], transformed_sample['keypoints'])\n\nplt.show()",
"/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:31: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n"
]
],
[
[
"## Create the transformed dataset\n\nApply the transforms in order to get grayscale images of the same shape. Verify that your transform works by printing out the shape of the resulting data (printing out a few examples should show you a consistent tensor size).",
"_____no_output_____"
]
],
[
[
"# define the data tranform\n# order matters! i.e. rescaling should come before a smaller crop\ndata_transform = transforms.Compose([Rescale(250),\n RandomCrop(224),\n Normalize(),\n ToTensor()])\n\n# create the transformed dataset\ntransformed_dataset = FacialKeypointsDataset(csv_file='/data/training_frames_keypoints.csv',\n root_dir='/data/training/',\n transform=data_transform)\n",
"_____no_output_____"
],
[
"transformed_dataset[3]['image'].shape",
"/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:31: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n"
],
[
"# print some stats about the transformed data\nprint('Number of images: ', len(transformed_dataset))\n\n# make sure the sample tensors are the expected size\nfor i in range(5):\n sample = transformed_dataset[i]\n print(i, sample['image'].size(), sample['keypoints'].size())\n",
"Number of images: 3462\n0 torch.Size([1, 224, 224]) torch.Size([68, 2])\n1 torch.Size([1, 224, 224]) torch.Size([68, 2])\n2 torch.Size([1, 224, 224]) torch.Size([68, 2])\n3 torch.Size([1, 224, 224]) torch.Size([68, 2])\n4 torch.Size([1, 224, 224]) torch.Size([68, 2])\n"
]
],
[
[
"## Data Iteration and Batching\n\nRight now, we are iterating over this data using a ``for`` loop, but we are missing out on a lot of PyTorch's dataset capabilities, specifically the abilities to:\n\n- Batch the data\n- Shuffle the data\n- Load the data in parallel using ``multiprocessing`` workers.\n\n``torch.utils.data.DataLoader`` is an iterator which provides all these\nfeatures, and we'll see this in use in the *next* notebook, Notebook 2, when we load data in batches to train a neural network!\n\n---\n\n",
"_____no_output_____"
],
[
"## Ready to Train!\n\nNow that you've seen how to load and transform our data, you're ready to build a neural network to train on this data.\n\nIn the next notebook, you'll be tasked with creating a CNN for facial keypoint detection.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
c52a10f52fb3c9f8967b0594dd12c1a11e3eadce
| 3,418 |
ipynb
|
Jupyter Notebook
|
scraping_genie.ipynb
|
00FFEF/test_webscraping
|
880198f3931b8e802137272673b54abd3553684f
|
[
"Apache-2.0"
] | null | null | null |
scraping_genie.ipynb
|
00FFEF/test_webscraping
|
880198f3931b8e802137272673b54abd3553684f
|
[
"Apache-2.0"
] | null | null | null |
scraping_genie.ipynb
|
00FFEF/test_webscraping
|
880198f3931b8e802137272673b54abd3553684f
|
[
"Apache-2.0"
] | null | null | null | 27.344 | 1,008 | 0.530135 |
[
[
[
"import requests",
"_____no_output_____"
],
[
"req = requests.get('https://www.genie.co.kr/chart/top200')",
"_____no_output_____"
],
[
"req.content",
"_____no_output_____"
],
[
"from bs4 import BeautifulSoup as bs",
"_____no_output_____"
],
[
"soup = bs(req.content, 'html.parser')",
"_____no_output_____"
],
[
"soup",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52a28d68a5daf7a5a382b2076851ab363e39ece
| 5,527 |
ipynb
|
Jupyter Notebook
|
vmfiles/IPNB/Examples/f Spark/01 Spark Essentials.ipynb
|
paulovn/ml-vm-notebook
|
52f14b311d488f68c6454edac0ee7d93584e646f
|
[
"BSD-3-Clause"
] | 51 |
2016-02-22T17:43:40.000Z
|
2022-03-08T13:56:46.000Z
|
vmfiles/IPNB/Examples/f Spark/01 Spark Essentials.ipynb
|
paulovn/ml-vm-notebook
|
52f14b311d488f68c6454edac0ee7d93584e646f
|
[
"BSD-3-Clause"
] | 1 |
2017-06-05T16:26:35.000Z
|
2017-06-05T17:14:53.000Z
|
vmfiles/IPNB/Examples/f Spark/01 Spark Essentials.ipynb
|
paulovn/ml-vm-notebook
|
52f14b311d488f68c6454edac0ee7d93584e646f
|
[
"BSD-3-Clause"
] | 31 |
2016-08-13T05:45:16.000Z
|
2022-02-02T11:18:48.000Z
| 24.455752 | 354 | 0.580242 |
[
[
[
"# 04 Spark essentials",
"_____no_output_____"
]
],
[
[
"# Make it Python2 & Python3 compatible\nfrom __future__ import print_function\nimport sys\nif sys.version[0] == 3:\n xrange = range",
"_____no_output_____"
]
],
[
[
"## Spark context",
"_____no_output_____"
],
[
"The notebook deployment includes Spark automatically within each Python notebook kernel. This means that, upon kernel instantiation, there is an [SparkContext](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.SparkContext) object called `sc` immediatelly available in the Notebook, as in a PySpark shell. Let's take a look at it:",
"_____no_output_____"
]
],
[
[
"?sc",
"_____no_output_____"
]
],
[
[
"We can inspect some of the SparkContext properties:",
"_____no_output_____"
]
],
[
[
"# Spark version we are using\nprint( sc.version )",
"_____no_output_____"
],
[
"# Name of the application we are running\nprint(sc.appName)",
"_____no_output_____"
],
[
"sc.appName",
"_____no_output_____"
],
[
"# Some configuration variables\nprint( sc.defaultParallelism )\nprint( sc.defaultMinPartitions )",
"_____no_output_____"
],
[
"# Username running all Spark processes\n# --> Note this is a method, not a property\nprint( sc.sparkUser() )",
"_____no_output_____"
]
],
[
[
"# Spark configuration",
"_____no_output_____"
]
],
[
[
"# Print out the SparkContext configuration\nprint( sc._conf.toDebugString() )",
"_____no_output_____"
],
[
"# Another way to get similar information\nfrom pyspark import SparkConf, SparkContext\nSparkConf().getAll()",
"_____no_output_____"
]
],
[
[
"## Spark execution modes",
"_____no_output_____"
],
[
"We can also take a look at the Spark configuration this kernel is running under, by using the above configuration data:",
"_____no_output_____"
]
],
[
[
"print( sc._conf.toDebugString() )",
"_____no_output_____"
]
],
[
[
"... this includes the execution mode for Spark. The default mode is *local*, i.e. all Spark processes run locally in the launched Virtual Machine. This is fine for developing and testing with small datasets.\n\nBut to run Spark applications on bigger datasets, they must be executed in a remote cluster. This deployment comes with configuration modes for that, which require:\n* network adjustments to make the VM \"visible\" from the cluster: the virtual machine must be started in _bridged_ mode (the default *Vagrantfile* already contains code for doingso, but it must be uncommented)\n* configuring the addresses for the cluster. This is done within the VM by using the `spark-notebook` script, such as\n sudo service spark-notebook set-addr <master-ip> <namenode-ip> <historyserver-ip>\n* activating the desired mode, by executing\n sudo service spark-notebook set-mode (local | standalone | yarn)\n\nThese operations can also be performed outside the VM by telling vagrant to relay them, e.g.\n\n vagrant ssh -c \"sudo service spark-notebook set-mode local\"",
"_____no_output_____"
],
[
"## A trivial test\n\nLet's do a trivial operation that creates an RDD and executes an action on it. So that we can test that the kernel is capable of launching executors",
"_____no_output_____"
]
],
[
[
"from operator import add\n\nl = sc.range(10000)\nprint( l.reduce(add) )",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
c52a2eefca9e3d2ee48d8bb20c4b02adb09baf94
| 145,915 |
ipynb
|
Jupyter Notebook
|
DA.ipynb
|
ElisaNguyen/dl-defense-pgd
|
bd0b5eefa54fad46cbb2287f69ea2b2ca821872a
|
[
"MIT"
] | null | null | null |
DA.ipynb
|
ElisaNguyen/dl-defense-pgd
|
bd0b5eefa54fad46cbb2287f69ea2b2ca821872a
|
[
"MIT"
] | null | null | null |
DA.ipynb
|
ElisaNguyen/dl-defense-pgd
|
bd0b5eefa54fad46cbb2287f69ea2b2ca821872a
|
[
"MIT"
] | null | null | null | 145,915 | 145,915 | 0.755447 |
[
[
[
"# Setup",
"_____no_output_____"
],
[
"##Import libraries",
"_____no_output_____"
]
],
[
[
"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport torchvision\r\nfrom torchvision import transforms\r\nfrom torchvision import datasets\r\nfrom torch.utils.data import DataLoader\r\nimport random\r\nrandom.seed(123)\r\n\r\nimport time\r\nimport os",
"_____no_output_____"
],
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"Mounted at /content/drive\n"
]
],
[
[
"##Check CUDA version",
"_____no_output_____"
]
],
[
[
"\r\nuse_cuda = True\r\n\r\nif use_cuda and torch.cuda.is_available():\r\n device = torch.device('cuda')\r\nelse:\r\n device = torch.device('cpu')\r\n\r\ndevice",
"_____no_output_____"
]
],
[
[
"##Visualisation functions",
"_____no_output_____"
]
],
[
[
"\r\n%matplotlib inline\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# Function to show an image tensor\r\ndef show(X):\r\n if X.dim() == 3 and X.size(2) == 3:\r\n plt.imshow(X.numpy())\r\n #plt.show()\r\n elif X.dim() == 2:\r\n plt.imshow( X.numpy() , cmap='gray' )\r\n #plt.show()\r\n else:\r\n print('WRONG TENSOR SIZE')\r\n\r\ndef show_saliency(X):\r\n if X.dim() == 3 and X.size(2) == 3:\r\n plt.imshow(X.numpy())\r\n plt.show()\r\n elif X.dim() == 2:\r\n plt.imshow( X.numpy() , cmap='viridis' )\r\n plt.show()\r\n else:\r\n print('WRONG TENSOR SIZE')",
"_____no_output_____"
]
],
[
[
"##Download dataset",
"_____no_output_____"
]
],
[
[
"transform = transforms.Compose([transforms.ToTensor(),\r\n transforms.Lambda(lambda x: x.squeeze()), # Squeeze the data to remove the redundant channel dimension\r\n ])\r\n\r\ntrainset = torchvision.datasets.FashionMNIST(root='./data_FashionMNIST',\r\n train=True,\r\n download=True,\r\n transform=transform\r\n )\r\n\r\n\r\ntestset = torchvision.datasets.FashionMNIST(root='./data_FashionMNIST',\r\n train=False,\r\n download=True,\r\n transform=transform\r\n )\r\n\r\nclasses = (\r\n 'T-shirt/top',\r\n 'Trouser',\r\n 'Pullover',\r\n 'Dress',\r\n 'Coat',\r\n 'Sandal',\r\n 'Shirt',\r\n 'Sneaker',\r\n 'Bag',\r\n 'Ankle boot',\r\n)",
"Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz to ./data_FashionMNIST/FashionMNIST/raw/train-images-idx3-ubyte.gz\n"
]
],
[
[
"#Data preprocessing",
"_____no_output_____"
],
[
"##Augment the data",
"_____no_output_____"
]
],
[
[
"train_hflip = transforms.functional.hflip(trainset.data)\r\n\r\ntrain_brightness = [transforms.functional.adjust_brightness(x, brightness_factor=random.choice([0.5, 0.75, 1.25, 1.5])) for x in trainset.data]\r\ntrain_brightness = torch.stack(train_brightness)\r\n\r\ntrain_blur = transforms.functional.gaussian_blur(trainset.data, kernel_size=3)\r\n\r\ntrain_rotate = [transforms.functional.rotate(torch.unsqueeze(x, dim=0), angle=random.randrange(30,330,5)).squeeze() for x in trainset.data]\r\ntrain_rotate = torch.stack(train_rotate)",
"_____no_output_____"
]
],
[
[
"##Visualise the augmented data",
"_____no_output_____"
]
],
[
[
"show(trainset.data[0])",
"_____no_output_____"
],
[
"show(train_hflip[0])",
"_____no_output_____"
],
[
"show(train_blur[0])",
"_____no_output_____"
],
[
"show(train_brightness[0])",
"_____no_output_____"
],
[
"show(train_rotate[0])",
"_____no_output_____"
]
],
[
[
"##Split training data into train and validation data",
"_____no_output_____"
]
],
[
[
"trainset.data = torch.cat((trainset.data, train_hflip, train_brightness, train_blur, train_rotate),dim=0)\r\ntrainset.targets = torch.cat((trainset.targets, trainset.targets, trainset.targets, trainset.targets, trainset.targets))",
"_____no_output_____"
],
[
"trainset",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\r\ntargets = trainset.targets\r\ntrain_idx, val_idx= train_test_split(np.arange(len(targets)),test_size=0.2,shuffle=True, stratify=targets, random_state=123)\r\n\r\ntrain_sampler = torch.utils.data.SubsetRandomSampler(train_idx)\r\nval_sampler = torch.utils.data.SubsetRandomSampler(val_idx)\r\n\r\nbatch_size=128\r\n\r\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, sampler=train_sampler)\r\nvalloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, sampler=val_sampler)\r\ntestloader = torch.utils.data.DataLoader(testset,\r\n batch_size=batch_size,\r\n shuffle=True,\r\n drop_last=True\r\n )",
"_____no_output_____"
]
],
[
[
"#Model architecture",
"_____no_output_____"
],
[
"##Create the model",
"_____no_output_____"
]
],
[
[
"class Net(nn.Module):\r\n\r\n def __init__(self, kernel_size, pool_function, nfilters_conv1, nfilters_conv2):\r\n\r\n super(Net, self).__init__()\r\n self.nfilters_conv2 = nfilters_conv2\r\n\r\n # CL1: 1 x 28 x 28 (grayscale) --> nfilters_conv1 x 28 x 28 \r\n self.conv1 = nn.Conv2d(1, nfilters_conv1, kernel_size=kernel_size, padding=kernel_size//2)\r\n\r\n # MP1: nfilters_conv1 x 28 x 28 --> nfilters_conv1 x 14 x 14\r\n self.pool1 = pool_function(2,2)\r\n \r\n # CL2: nfilters_conv1 x 14 x 14 --> nfilters_conv2 x 14 x 14\r\n self.conv2 = nn.Conv2d(nfilters_conv1, nfilters_conv2, kernel_size=kernel_size, padding=kernel_size//2)\r\n \r\n # MP2: nfilters_conv2 x 14 x 14 --> nfilters_conv2 x 7 x 7\r\n self.pool2 = pool_function(2,2)\r\n \r\n # LL1: nfilters_conv2 x 7 x 7 --> 100 \r\n self.linear1 = nn.Linear((nfilters_conv2*7*7), 100)\r\n \r\n # LL2: 100 --> 10 \r\n self.linear2 = nn.Linear(100,10)\r\n\r\n\r\n def forward(self, x):\r\n x = x.unsqueeze(1)\r\n\r\n # CL1: \r\n x = self.conv1(x)\r\n x = F.relu(x)\r\n \r\n # MP1: \r\n x = self.pool1(x)\r\n \r\n # CL2: \r\n x = self.conv2(x)\r\n x = F.relu(x)\r\n \r\n # MP2: \r\n x = self.pool2(x)\r\n\r\n # LL1: \r\n x = x.view(-1, self.nfilters_conv2*7*7)\r\n x = self.linear1(x)\r\n x = F.relu(x)\r\n \r\n # LL2: \r\n x = self.linear2(x)\r\n \r\n return x",
"_____no_output_____"
],
[
"# best results from hyperparameter tuning\nkernel_size= 5\npool_function = nn.AvgPool2d\nnfilters_conv1 = 128\nnfilters_conv2 = 128\n\nmodel_aug = Net(kernel_size=kernel_size,pool_function=pool_function,nfilters_conv1=nfilters_conv1,nfilters_conv2=nfilters_conv2).to(device)\n\ncriterion = nn.CrossEntropyLoss()\nmy_lr=0.01\n\noptimizer=torch.optim.Adam(model_aug.parameters(), lr=my_lr) # change here",
"_____no_output_____"
],
[
"print(model_aug)",
"LeNet(\n (conv1): Conv2d(1, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))\n (pool1): AvgPool2d(kernel_size=2, stride=2, padding=0)\n (conv2): Conv2d(128, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))\n (pool2): AvgPool2d(kernel_size=2, stride=2, padding=0)\n (linear1): Linear(in_features=6272, out_features=100, bias=True)\n (linear2): Linear(in_features=100, out_features=10, bias=True)\n)\n"
]
],
[
[
"# Attack!",
"_____no_output_____"
],
[
"##Import libraries",
"_____no_output_____"
]
],
[
[
"!pip install advertorch",
"Collecting advertorch\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/2f/b1/84602596294c32f49396bac9c36f1f72b00577bbcb26ebbe776e64791cac/advertorch-0.2.3.tar.gz (5.7MB)\n\u001b[K |████████████████████████████████| 5.7MB 4.4MB/s \n\u001b[?25hBuilding wheels for collected packages: advertorch\n Building wheel for advertorch (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for advertorch: filename=advertorch-0.2.3-cp36-none-any.whl size=5696220 sha256=a367814e37545145c59238cf918a8edf8f085333852f412328347b8c6506b6b9\n Stored in directory: /root/.cache/pip/wheels/9b/53/6e/6b2509701b0da68443fa3d4499733f5455d6d583afa8c46676\nSuccessfully built advertorch\nInstalling collected packages: advertorch\nSuccessfully installed advertorch-0.2.3\n"
],
[
"from advertorch.attacks import PGDAttack",
"_____no_output_____"
]
],
[
[
"##Create adversary",
"_____no_output_____"
]
],
[
[
"# prepare your pytorch model as \"model\"\r\n# prepare a batch of data and label as \"cln_data\" and \"true_label\"\r\n# prepare attack instance\r\n\r\nadversary = PGDAttack(\r\n model_aug, loss_fn=nn.CrossEntropyLoss(), eps=0.3,\r\n nb_iter=10, eps_iter=0.01, rand_init=True, clip_min=0.0, clip_max=1.0,\r\n targeted=False)",
"_____no_output_____"
],
[
"plot_valloss = []",
"_____no_output_____"
]
],
[
[
"##Train the model",
"_____no_output_____"
]
],
[
[
"start=time.time()\n\nmin_loss = 20 #initial loss to be overwritten\n\nepochs_no_improve = 0\npatience = 20 # high patience to overcome local minima\n\nfor epoch in range(1,200):\n\n model_aug.train()\n for i, (x_batch, y_batch) in enumerate(trainloader):\n x_batch, y_batch = x_batch.to(device), y_batch.to(device) # Move the data to the device that is used\n\n optimizer.zero_grad() # Set all currenly stored gradients to zero \n\n y_pred = model_aug(x_batch)\n\n loss = criterion(y_pred, y_batch)\n\n loss.backward()\n\n optimizer.step()\n\n # Compute relevant metrics\n \n y_pred_max = torch.argmax(y_pred, dim=1) # Get the labels with highest output probability\n\n correct = torch.sum(torch.eq(y_pred_max, y_batch)).item() # Count how many are equal to the true labels\n\n elapsed = time.time() - start # Keep track of how much time has elapsed\n\n # Show progress every 50 batches \n if not i % 100:\n print(f'epoch: {epoch}, time: {elapsed:.3f}s, loss: {loss.item():.3f}, train accuracy: {correct / batch_size:.3f}')\n\n model_aug.eval()\n val_loss = 0\n counter = 0\n for i, (x_batch, y_batch) in enumerate(valloader):\n counter += 1\n x_batch, y_batch = x_batch.to(device), y_batch.to(device) # Move the data to the device that is used\n\n y_pred = model_aug(x_batch)\n val_loss += criterion(y_pred, y_batch).item()\n\n val_loss = val_loss/counter\n print(f'epoch: {epoch}, validation loss: {val_loss}')\n plot_valloss.append([val_loss, epoch])\n \n # save the model\n if val_loss < min_loss:\n torch.save(model_aug, \"/content/drive/MyDrive/Deep Learning/Project/model_aug.pckl\")\n epochs_no_improve = 0\n min_loss = val_loss\n else:\n epochs_no_improve += 1\n if epochs_no_improve == patience:\n print(\"Early Stopping!\")\n break\n",
"epoch: 1, time: 0.873s, loss: 2.294, train accuracy: 0.109\nepoch: 1, time: 5.740s, loss: 0.844, train accuracy: 0.648\nepoch: 1, time: 10.371s, loss: 0.639, train accuracy: 0.789\nepoch: 1, time: 15.068s, loss: 0.832, train accuracy: 0.664\nepoch: 1, time: 19.699s, loss: 0.569, train accuracy: 0.828\nepoch: 1, time: 24.408s, loss: 0.554, train accuracy: 0.797\nepoch: 1, time: 29.085s, loss: 0.359, train accuracy: 0.891\nepoch: 1, time: 33.751s, loss: 0.709, train accuracy: 0.766\nepoch: 1, time: 38.548s, loss: 0.441, train accuracy: 0.836\nepoch: 1, time: 43.239s, loss: 0.474, train accuracy: 0.844\nepoch: 1, time: 47.946s, loss: 0.398, train accuracy: 0.867\nepoch: 1, time: 52.616s, loss: 0.400, train accuracy: 0.852\nepoch: 1, time: 57.362s, loss: 0.589, train accuracy: 0.781\nepoch: 1, time: 62.024s, loss: 0.415, train accuracy: 0.812\nepoch: 1, time: 66.770s, loss: 0.428, train accuracy: 0.836\nepoch: 1, time: 71.518s, loss: 0.566, train accuracy: 0.781\nepoch: 1, time: 76.125s, loss: 0.359, train accuracy: 0.828\nepoch: 1, time: 80.803s, loss: 0.496, train accuracy: 0.836\nepoch: 1, time: 85.667s, loss: 0.426, train accuracy: 0.852\nepoch: 1, validation loss: 0.41329245926983066\nepoch: 2, time: 106.848s, loss: 0.493, train accuracy: 0.812\nepoch: 2, time: 111.647s, loss: 0.317, train accuracy: 0.891\nepoch: 2, time: 116.336s, loss: 0.412, train accuracy: 0.805\nepoch: 2, time: 121.080s, loss: 0.324, train accuracy: 0.867\nepoch: 2, time: 125.690s, loss: 0.364, train accuracy: 0.883\nepoch: 2, time: 130.408s, loss: 0.377, train accuracy: 0.875\nepoch: 2, time: 135.189s, loss: 0.360, train accuracy: 0.875\nepoch: 2, time: 139.908s, loss: 0.304, train accuracy: 0.898\nepoch: 2, time: 144.655s, loss: 0.445, train accuracy: 0.836\nepoch: 2, time: 149.261s, loss: 0.410, train accuracy: 0.859\nepoch: 2, time: 153.893s, loss: 0.321, train accuracy: 0.875\nepoch: 2, time: 158.528s, loss: 0.331, train accuracy: 0.867\nepoch: 2, time: 163.189s, loss: 0.397, train accuracy: 0.828\nepoch: 2, time: 167.825s, loss: 0.497, train accuracy: 0.828\nepoch: 2, time: 172.631s, loss: 0.343, train accuracy: 0.844\nepoch: 2, time: 177.257s, loss: 0.339, train accuracy: 0.844\nepoch: 2, time: 181.881s, loss: 0.390, train accuracy: 0.914\nepoch: 2, time: 186.619s, loss: 0.289, train accuracy: 0.898\nepoch: 2, time: 191.254s, loss: 0.342, train accuracy: 0.875\nepoch: 2, validation loss: 0.4029934276014503\nepoch: 3, time: 210.255s, loss: 0.263, train accuracy: 0.922\nepoch: 3, time: 215.065s, loss: 0.316, train accuracy: 0.906\nepoch: 3, time: 219.726s, loss: 0.345, train accuracy: 0.859\nepoch: 3, time: 224.511s, loss: 0.359, train accuracy: 0.875\nepoch: 3, time: 229.311s, loss: 0.421, train accuracy: 0.844\nepoch: 3, time: 234.214s, loss: 0.289, train accuracy: 0.891\nepoch: 3, time: 238.912s, loss: 0.379, train accuracy: 0.836\nepoch: 3, time: 243.620s, loss: 0.334, train accuracy: 0.875\nepoch: 3, time: 248.345s, loss: 0.439, train accuracy: 0.812\nepoch: 3, time: 253.032s, loss: 0.423, train accuracy: 0.836\nepoch: 3, time: 257.739s, loss: 0.266, train accuracy: 0.906\nepoch: 3, time: 262.445s, loss: 0.427, train accuracy: 0.852\nepoch: 3, time: 267.159s, loss: 0.447, train accuracy: 0.812\nepoch: 3, time: 271.912s, loss: 0.392, train accuracy: 0.836\nepoch: 3, time: 276.508s, loss: 0.300, train accuracy: 0.867\nepoch: 3, time: 281.286s, loss: 0.395, train accuracy: 0.844\nepoch: 3, time: 286.027s, loss: 0.366, train accuracy: 0.875\nepoch: 3, time: 290.660s, loss: 0.366, train accuracy: 0.836\nepoch: 3, time: 295.447s, loss: 0.300, train accuracy: 0.898\nepoch: 3, validation loss: 0.3725654235653786\nepoch: 4, time: 314.570s, loss: 0.356, train accuracy: 0.859\nepoch: 4, time: 319.388s, loss: 0.290, train accuracy: 0.906\nepoch: 4, time: 324.131s, loss: 0.360, train accuracy: 0.852\nepoch: 4, time: 328.885s, loss: 0.316, train accuracy: 0.891\nepoch: 4, time: 333.499s, loss: 0.240, train accuracy: 0.898\nepoch: 4, time: 338.187s, loss: 0.460, train accuracy: 0.789\nepoch: 4, time: 342.845s, loss: 0.299, train accuracy: 0.883\nepoch: 4, time: 347.577s, loss: 0.313, train accuracy: 0.852\nepoch: 4, time: 352.290s, loss: 0.445, train accuracy: 0.859\nepoch: 4, time: 356.954s, loss: 0.371, train accuracy: 0.875\nepoch: 4, time: 361.684s, loss: 0.307, train accuracy: 0.875\nepoch: 4, time: 366.405s, loss: 0.314, train accuracy: 0.906\nepoch: 4, time: 371.268s, loss: 0.295, train accuracy: 0.914\nepoch: 4, time: 375.973s, loss: 0.281, train accuracy: 0.883\nepoch: 4, time: 380.615s, loss: 0.365, train accuracy: 0.859\nepoch: 4, time: 385.267s, loss: 0.249, train accuracy: 0.883\nepoch: 4, time: 389.864s, loss: 0.462, train accuracy: 0.844\nepoch: 4, time: 394.497s, loss: 0.249, train accuracy: 0.906\nepoch: 4, time: 399.186s, loss: 0.368, train accuracy: 0.836\nepoch: 4, validation loss: 0.34379442763735235\nepoch: 5, time: 418.341s, loss: 0.321, train accuracy: 0.875\nepoch: 5, time: 423.206s, loss: 0.312, train accuracy: 0.859\nepoch: 5, time: 427.850s, loss: 0.260, train accuracy: 0.914\nepoch: 5, time: 432.586s, loss: 0.347, train accuracy: 0.852\nepoch: 5, time: 437.394s, loss: 0.265, train accuracy: 0.875\nepoch: 5, time: 442.102s, loss: 0.319, train accuracy: 0.859\nepoch: 5, time: 446.706s, loss: 0.338, train accuracy: 0.875\nepoch: 5, time: 451.415s, loss: 0.342, train accuracy: 0.883\nepoch: 5, time: 456.041s, loss: 0.297, train accuracy: 0.883\nepoch: 5, time: 460.824s, loss: 0.385, train accuracy: 0.844\nepoch: 5, time: 465.517s, loss: 0.489, train accuracy: 0.852\nepoch: 5, time: 470.286s, loss: 0.285, train accuracy: 0.875\nepoch: 5, time: 474.982s, loss: 0.275, train accuracy: 0.859\nepoch: 5, time: 479.599s, loss: 0.437, train accuracy: 0.820\nepoch: 5, time: 484.302s, loss: 0.340, train accuracy: 0.883\nepoch: 5, time: 489.069s, loss: 0.371, train accuracy: 0.844\nepoch: 5, time: 493.671s, loss: 0.339, train accuracy: 0.875\nepoch: 5, time: 498.456s, loss: 0.293, train accuracy: 0.891\nepoch: 5, time: 503.175s, loss: 0.378, train accuracy: 0.844\nepoch: 5, validation loss: 0.36943064375853996\nepoch: 6, time: 521.877s, loss: 0.335, train accuracy: 0.883\nepoch: 6, time: 526.647s, loss: 0.316, train accuracy: 0.891\nepoch: 6, time: 531.304s, loss: 0.228, train accuracy: 0.891\nepoch: 6, time: 535.962s, loss: 0.381, train accuracy: 0.875\nepoch: 6, time: 540.660s, loss: 0.267, train accuracy: 0.867\nepoch: 6, time: 545.252s, loss: 0.228, train accuracy: 0.891\nepoch: 6, time: 549.910s, loss: 0.405, train accuracy: 0.836\nepoch: 6, time: 554.617s, loss: 0.316, train accuracy: 0.867\nepoch: 6, time: 559.353s, loss: 0.302, train accuracy: 0.914\nepoch: 6, time: 564.007s, loss: 0.302, train accuracy: 0.906\nepoch: 6, time: 568.611s, loss: 0.376, train accuracy: 0.875\nepoch: 6, time: 573.221s, loss: 0.296, train accuracy: 0.898\nepoch: 6, time: 577.914s, loss: 0.286, train accuracy: 0.898\nepoch: 6, time: 582.717s, loss: 0.288, train accuracy: 0.914\nepoch: 6, time: 587.327s, loss: 0.297, train accuracy: 0.906\nepoch: 6, time: 592.066s, loss: 0.318, train accuracy: 0.859\nepoch: 6, time: 596.786s, loss: 0.225, train accuracy: 0.922\nepoch: 6, time: 601.616s, loss: 0.397, train accuracy: 0.883\nepoch: 6, time: 606.310s, loss: 0.355, train accuracy: 0.875\nepoch: 6, validation loss: 0.3386965770838357\nepoch: 7, time: 625.394s, loss: 0.265, train accuracy: 0.883\nepoch: 7, time: 630.164s, loss: 0.253, train accuracy: 0.914\nepoch: 7, time: 634.777s, loss: 0.346, train accuracy: 0.883\nepoch: 7, time: 639.388s, loss: 0.297, train accuracy: 0.906\nepoch: 7, time: 644.046s, loss: 0.251, train accuracy: 0.914\nepoch: 7, time: 648.872s, loss: 0.308, train accuracy: 0.891\nepoch: 7, time: 653.509s, loss: 0.259, train accuracy: 0.914\nepoch: 7, time: 658.221s, loss: 0.339, train accuracy: 0.891\nepoch: 7, time: 662.945s, loss: 0.463, train accuracy: 0.820\nepoch: 7, time: 667.662s, loss: 0.341, train accuracy: 0.844\nepoch: 7, time: 672.273s, loss: 0.317, train accuracy: 0.867\nepoch: 7, time: 676.999s, loss: 0.368, train accuracy: 0.836\nepoch: 7, time: 681.727s, loss: 0.235, train accuracy: 0.922\nepoch: 7, time: 686.340s, loss: 0.260, train accuracy: 0.898\nepoch: 7, time: 691.035s, loss: 0.385, train accuracy: 0.859\nepoch: 7, time: 695.624s, loss: 0.282, train accuracy: 0.883\nepoch: 7, time: 700.340s, loss: 0.279, train accuracy: 0.906\nepoch: 7, time: 705.065s, loss: 0.333, train accuracy: 0.906\nepoch: 7, time: 709.742s, loss: 0.277, train accuracy: 0.891\nepoch: 7, validation loss: 0.351558247450064\nepoch: 8, time: 728.711s, loss: 0.334, train accuracy: 0.906\nepoch: 8, time: 733.374s, loss: 0.296, train accuracy: 0.883\nepoch: 8, time: 738.036s, loss: 0.289, train accuracy: 0.867\nepoch: 8, time: 742.694s, loss: 0.297, train accuracy: 0.898\nepoch: 8, time: 747.385s, loss: 0.423, train accuracy: 0.867\nepoch: 8, time: 752.042s, loss: 0.287, train accuracy: 0.898\nepoch: 8, time: 756.685s, loss: 0.343, train accuracy: 0.867\nepoch: 8, time: 761.515s, loss: 0.393, train accuracy: 0.898\nepoch: 8, time: 766.151s, loss: 0.226, train accuracy: 0.922\nepoch: 8, time: 770.823s, loss: 0.395, train accuracy: 0.852\nepoch: 8, time: 775.523s, loss: 0.244, train accuracy: 0.930\nepoch: 8, time: 780.164s, loss: 0.286, train accuracy: 0.914\nepoch: 8, time: 784.775s, loss: 0.336, train accuracy: 0.875\nepoch: 8, time: 789.451s, loss: 0.263, train accuracy: 0.906\nepoch: 8, time: 794.065s, loss: 0.287, train accuracy: 0.914\nepoch: 8, time: 798.864s, loss: 0.284, train accuracy: 0.891\nepoch: 8, time: 803.498s, loss: 0.271, train accuracy: 0.891\nepoch: 8, time: 808.134s, loss: 0.323, train accuracy: 0.891\nepoch: 8, time: 812.771s, loss: 0.194, train accuracy: 0.945\nepoch: 8, validation loss: 0.33390339756253434\nepoch: 9, time: 831.367s, loss: 0.324, train accuracy: 0.883\nepoch: 9, time: 836.192s, loss: 0.227, train accuracy: 0.898\nepoch: 9, time: 840.814s, loss: 0.209, train accuracy: 0.898\nepoch: 9, time: 845.405s, loss: 0.430, train accuracy: 0.820\nepoch: 9, time: 850.037s, loss: 0.202, train accuracy: 0.906\nepoch: 9, time: 854.687s, loss: 0.263, train accuracy: 0.891\nepoch: 9, time: 859.262s, loss: 0.271, train accuracy: 0.891\nepoch: 9, time: 863.948s, loss: 0.237, train accuracy: 0.922\nepoch: 9, time: 868.565s, loss: 0.257, train accuracy: 0.891\nepoch: 9, time: 873.242s, loss: 0.292, train accuracy: 0.891\nepoch: 9, time: 877.806s, loss: 0.403, train accuracy: 0.852\nepoch: 9, time: 882.445s, loss: 0.194, train accuracy: 0.930\nepoch: 9, time: 887.071s, loss: 0.250, train accuracy: 0.898\nepoch: 9, time: 891.675s, loss: 0.327, train accuracy: 0.891\nepoch: 9, time: 896.378s, loss: 0.319, train accuracy: 0.867\nepoch: 9, time: 901.116s, loss: 0.365, train accuracy: 0.844\nepoch: 9, time: 905.736s, loss: 0.293, train accuracy: 0.906\nepoch: 9, time: 910.359s, loss: 0.259, train accuracy: 0.883\nepoch: 9, time: 915.131s, loss: 0.287, train accuracy: 0.898\nepoch: 9, validation loss: 0.3674401198940745\nepoch: 10, time: 933.734s, loss: 0.316, train accuracy: 0.875\nepoch: 10, time: 938.471s, loss: 0.229, train accuracy: 0.938\nepoch: 10, time: 943.189s, loss: 0.213, train accuracy: 0.953\nepoch: 10, time: 947.895s, loss: 0.280, train accuracy: 0.891\nepoch: 10, time: 952.570s, loss: 0.342, train accuracy: 0.859\nepoch: 10, time: 957.253s, loss: 0.219, train accuracy: 0.914\nepoch: 10, time: 961.930s, loss: 0.218, train accuracy: 0.906\nepoch: 10, time: 966.644s, loss: 0.212, train accuracy: 0.930\nepoch: 10, time: 971.306s, loss: 0.271, train accuracy: 0.875\nepoch: 10, time: 975.992s, loss: 0.273, train accuracy: 0.914\nepoch: 10, time: 980.603s, loss: 0.249, train accuracy: 0.891\nepoch: 10, time: 985.224s, loss: 0.350, train accuracy: 0.875\nepoch: 10, time: 989.991s, loss: 0.316, train accuracy: 0.906\nepoch: 10, time: 994.766s, loss: 0.177, train accuracy: 0.938\nepoch: 10, time: 999.431s, loss: 0.266, train accuracy: 0.906\nepoch: 10, time: 1004.176s, loss: 0.233, train accuracy: 0.898\nepoch: 10, time: 1008.839s, loss: 0.274, train accuracy: 0.891\nepoch: 10, time: 1013.480s, loss: 0.400, train accuracy: 0.828\nepoch: 10, time: 1018.085s, loss: 0.258, train accuracy: 0.914\nepoch: 10, validation loss: 0.3640514860021026\nepoch: 11, time: 1036.607s, loss: 0.240, train accuracy: 0.891\nepoch: 11, time: 1041.501s, loss: 0.348, train accuracy: 0.828\nepoch: 11, time: 1046.057s, loss: 0.247, train accuracy: 0.891\nepoch: 11, time: 1050.617s, loss: 0.352, train accuracy: 0.859\nepoch: 11, time: 1055.307s, loss: 0.215, train accuracy: 0.914\nepoch: 11, time: 1059.978s, loss: 0.245, train accuracy: 0.922\nepoch: 11, time: 1064.638s, loss: 0.290, train accuracy: 0.875\nepoch: 11, time: 1069.288s, loss: 0.274, train accuracy: 0.938\nepoch: 11, time: 1073.889s, loss: 0.291, train accuracy: 0.891\nepoch: 11, time: 1078.472s, loss: 0.283, train accuracy: 0.883\nepoch: 11, time: 1083.101s, loss: 0.172, train accuracy: 0.930\nepoch: 11, time: 1087.807s, loss: 0.296, train accuracy: 0.898\nepoch: 11, time: 1092.395s, loss: 0.342, train accuracy: 0.891\nepoch: 11, time: 1096.989s, loss: 0.213, train accuracy: 0.914\nepoch: 11, time: 1101.663s, loss: 0.256, train accuracy: 0.898\nepoch: 11, time: 1106.318s, loss: 0.256, train accuracy: 0.914\nepoch: 11, time: 1111.011s, loss: 0.397, train accuracy: 0.852\nepoch: 11, time: 1115.652s, loss: 0.413, train accuracy: 0.859\nepoch: 11, time: 1120.310s, loss: 0.319, train accuracy: 0.891\nepoch: 11, validation loss: 0.3498491298224626\nepoch: 12, time: 1138.920s, loss: 0.269, train accuracy: 0.883\nepoch: 12, time: 1143.723s, loss: 0.324, train accuracy: 0.883\nepoch: 12, time: 1148.419s, loss: 0.219, train accuracy: 0.906\nepoch: 12, time: 1153.032s, loss: 0.278, train accuracy: 0.867\nepoch: 12, time: 1157.618s, loss: 0.210, train accuracy: 0.914\nepoch: 12, time: 1162.282s, loss: 0.357, train accuracy: 0.867\nepoch: 12, time: 1166.867s, loss: 0.278, train accuracy: 0.906\nepoch: 12, time: 1171.520s, loss: 0.307, train accuracy: 0.883\nepoch: 12, time: 1176.115s, loss: 0.230, train accuracy: 0.906\nepoch: 12, time: 1180.753s, loss: 0.254, train accuracy: 0.898\nepoch: 12, time: 1185.501s, loss: 0.213, train accuracy: 0.922\nepoch: 12, time: 1190.137s, loss: 0.255, train accuracy: 0.914\nepoch: 12, time: 1194.754s, loss: 0.160, train accuracy: 0.953\nepoch: 12, time: 1199.546s, loss: 0.275, train accuracy: 0.906\nepoch: 12, time: 1204.224s, loss: 0.257, train accuracy: 0.859\nepoch: 12, time: 1208.867s, loss: 0.362, train accuracy: 0.883\nepoch: 12, time: 1213.496s, loss: 0.274, train accuracy: 0.914\nepoch: 12, time: 1218.129s, loss: 0.151, train accuracy: 0.945\nepoch: 12, time: 1222.795s, loss: 0.262, train accuracy: 0.898\nepoch: 12, validation loss: 0.35557099975057754\nepoch: 13, time: 1241.660s, loss: 0.292, train accuracy: 0.875\nepoch: 13, time: 1246.540s, loss: 0.290, train accuracy: 0.883\nepoch: 13, time: 1251.291s, loss: 0.218, train accuracy: 0.945\nepoch: 13, time: 1255.923s, loss: 0.309, train accuracy: 0.914\nepoch: 13, time: 1260.826s, loss: 0.145, train accuracy: 0.922\nepoch: 13, time: 1265.483s, loss: 0.350, train accuracy: 0.883\nepoch: 13, time: 1270.138s, loss: 0.263, train accuracy: 0.914\nepoch: 13, time: 1274.790s, loss: 0.189, train accuracy: 0.930\nepoch: 13, time: 1279.329s, loss: 0.267, train accuracy: 0.883\nepoch: 13, time: 1283.924s, loss: 0.383, train accuracy: 0.852\nepoch: 13, time: 1288.534s, loss: 0.184, train accuracy: 0.914\nepoch: 13, time: 1293.142s, loss: 0.339, train accuracy: 0.867\nepoch: 13, time: 1297.732s, loss: 0.189, train accuracy: 0.922\nepoch: 13, time: 1302.438s, loss: 0.277, train accuracy: 0.914\nepoch: 13, time: 1307.056s, loss: 0.136, train accuracy: 0.953\nepoch: 13, time: 1311.845s, loss: 0.191, train accuracy: 0.930\nepoch: 13, time: 1316.507s, loss: 0.286, train accuracy: 0.898\nepoch: 13, time: 1321.228s, loss: 0.240, train accuracy: 0.914\nepoch: 13, time: 1325.912s, loss: 0.219, train accuracy: 0.914\nepoch: 13, validation loss: 0.35088980137539316\nepoch: 14, time: 1344.664s, loss: 0.266, train accuracy: 0.898\nepoch: 14, time: 1349.504s, loss: 0.297, train accuracy: 0.883\nepoch: 14, time: 1354.128s, loss: 0.362, train accuracy: 0.844\nepoch: 14, time: 1358.890s, loss: 0.247, train accuracy: 0.914\nepoch: 14, time: 1363.581s, loss: 0.186, train accuracy: 0.914\nepoch: 14, time: 1368.237s, loss: 0.339, train accuracy: 0.883\nepoch: 14, time: 1372.959s, loss: 0.266, train accuracy: 0.906\nepoch: 14, time: 1377.563s, loss: 0.199, train accuracy: 0.914\nepoch: 14, time: 1382.254s, loss: 0.201, train accuracy: 0.914\nepoch: 14, time: 1386.944s, loss: 0.137, train accuracy: 0.938\nepoch: 14, time: 1391.594s, loss: 0.245, train accuracy: 0.898\nepoch: 14, time: 1396.221s, loss: 0.320, train accuracy: 0.875\nepoch: 14, time: 1400.962s, loss: 0.220, train accuracy: 0.914\nepoch: 14, time: 1405.614s, loss: 0.270, train accuracy: 0.914\nepoch: 14, time: 1410.360s, loss: 0.218, train accuracy: 0.930\nepoch: 14, time: 1414.977s, loss: 0.311, train accuracy: 0.930\nepoch: 14, time: 1419.759s, loss: 0.305, train accuracy: 0.859\nepoch: 14, time: 1424.462s, loss: 0.355, train accuracy: 0.867\nepoch: 14, time: 1429.169s, loss: 0.285, train accuracy: 0.891\nepoch: 14, validation loss: 0.35084098485360016\nepoch: 15, time: 1448.316s, loss: 0.178, train accuracy: 0.930\nepoch: 15, time: 1453.065s, loss: 0.324, train accuracy: 0.898\nepoch: 15, time: 1457.820s, loss: 0.317, train accuracy: 0.914\nepoch: 15, time: 1462.566s, loss: 0.302, train accuracy: 0.883\nepoch: 15, time: 1467.141s, loss: 0.305, train accuracy: 0.891\nepoch: 15, time: 1471.822s, loss: 0.182, train accuracy: 0.922\nepoch: 15, time: 1476.543s, loss: 0.239, train accuracy: 0.906\nepoch: 15, time: 1481.234s, loss: 0.185, train accuracy: 0.938\nepoch: 15, time: 1485.888s, loss: 0.311, train accuracy: 0.859\nepoch: 15, time: 1490.605s, loss: 0.332, train accuracy: 0.898\nepoch: 15, time: 1495.237s, loss: 0.236, train accuracy: 0.922\nepoch: 15, time: 1499.889s, loss: 0.241, train accuracy: 0.914\nepoch: 15, time: 1504.548s, loss: 0.335, train accuracy: 0.914\nepoch: 15, time: 1509.176s, loss: 0.304, train accuracy: 0.922\nepoch: 15, time: 1513.761s, loss: 0.238, train accuracy: 0.906\nepoch: 15, time: 1518.550s, loss: 0.251, train accuracy: 0.906\nepoch: 15, time: 1523.178s, loss: 0.280, train accuracy: 0.906\nepoch: 15, time: 1527.840s, loss: 0.221, train accuracy: 0.906\nepoch: 15, time: 1532.524s, loss: 0.277, train accuracy: 0.883\nepoch: 15, validation loss: 0.3633408567417405\nepoch: 16, time: 1551.234s, loss: 0.191, train accuracy: 0.922\nepoch: 16, time: 1556.047s, loss: 0.202, train accuracy: 0.938\nepoch: 16, time: 1560.724s, loss: 0.177, train accuracy: 0.953\nepoch: 16, time: 1565.372s, loss: 0.152, train accuracy: 0.938\nepoch: 16, time: 1570.000s, loss: 0.235, train accuracy: 0.922\nepoch: 16, time: 1574.690s, loss: 0.261, train accuracy: 0.875\nepoch: 16, time: 1579.329s, loss: 0.298, train accuracy: 0.898\nepoch: 16, time: 1584.051s, loss: 0.282, train accuracy: 0.898\nepoch: 16, time: 1588.604s, loss: 0.186, train accuracy: 0.906\nepoch: 16, time: 1593.248s, loss: 0.250, train accuracy: 0.914\nepoch: 16, time: 1597.902s, loss: 0.305, train accuracy: 0.898\nepoch: 16, time: 1602.550s, loss: 0.216, train accuracy: 0.914\nepoch: 16, time: 1607.156s, loss: 0.235, train accuracy: 0.891\nepoch: 16, time: 1611.777s, loss: 0.347, train accuracy: 0.883\nepoch: 16, time: 1616.503s, loss: 0.256, train accuracy: 0.938\nepoch: 16, time: 1621.141s, loss: 0.283, train accuracy: 0.898\nepoch: 16, time: 1625.803s, loss: 0.285, train accuracy: 0.891\nepoch: 16, time: 1630.434s, loss: 0.294, train accuracy: 0.898\nepoch: 16, time: 1635.081s, loss: 0.239, train accuracy: 0.906\nepoch: 16, validation loss: 0.35872740595579655\nepoch: 17, time: 1654.056s, loss: 0.257, train accuracy: 0.883\nepoch: 17, time: 1658.861s, loss: 0.309, train accuracy: 0.906\nepoch: 17, time: 1663.572s, loss: 0.193, train accuracy: 0.914\nepoch: 17, time: 1668.237s, loss: 0.197, train accuracy: 0.922\nepoch: 17, time: 1672.921s, loss: 0.265, train accuracy: 0.891\nepoch: 17, time: 1677.540s, loss: 0.213, train accuracy: 0.922\nepoch: 17, time: 1682.200s, loss: 0.180, train accuracy: 0.930\nepoch: 17, time: 1686.803s, loss: 0.265, train accuracy: 0.875\nepoch: 17, time: 1691.433s, loss: 0.231, train accuracy: 0.922\nepoch: 17, time: 1696.158s, loss: 0.256, train accuracy: 0.883\nepoch: 17, time: 1700.817s, loss: 0.180, train accuracy: 0.922\nepoch: 17, time: 1705.500s, loss: 0.151, train accuracy: 0.938\nepoch: 17, time: 1710.270s, loss: 0.400, train accuracy: 0.875\nepoch: 17, time: 1714.942s, loss: 0.207, train accuracy: 0.914\nepoch: 17, time: 1719.674s, loss: 0.180, train accuracy: 0.930\nepoch: 17, time: 1724.399s, loss: 0.239, train accuracy: 0.906\nepoch: 17, time: 1729.048s, loss: 0.358, train accuracy: 0.906\nepoch: 17, time: 1733.696s, loss: 0.282, train accuracy: 0.875\nepoch: 17, time: 1738.490s, loss: 0.198, train accuracy: 0.914\nepoch: 17, validation loss: 0.366751571009154\nepoch: 18, time: 1757.617s, loss: 0.212, train accuracy: 0.898\nepoch: 18, time: 1762.332s, loss: 0.238, train accuracy: 0.875\nepoch: 18, time: 1767.006s, loss: 0.163, train accuracy: 0.961\nepoch: 18, time: 1771.673s, loss: 0.287, train accuracy: 0.898\nepoch: 18, time: 1776.241s, loss: 0.194, train accuracy: 0.930\nepoch: 18, time: 1780.913s, loss: 0.171, train accuracy: 0.922\nepoch: 18, time: 1785.595s, loss: 0.198, train accuracy: 0.922\nepoch: 18, time: 1790.203s, loss: 0.256, train accuracy: 0.914\nepoch: 18, time: 1795.018s, loss: 0.273, train accuracy: 0.898\nepoch: 18, time: 1799.634s, loss: 0.305, train accuracy: 0.891\nepoch: 18, time: 1804.288s, loss: 0.273, train accuracy: 0.922\nepoch: 18, time: 1808.955s, loss: 0.166, train accuracy: 0.922\nepoch: 18, time: 1813.596s, loss: 0.285, train accuracy: 0.945\nepoch: 18, time: 1818.211s, loss: 0.227, train accuracy: 0.914\nepoch: 18, time: 1822.918s, loss: 0.208, train accuracy: 0.938\nepoch: 18, time: 1827.643s, loss: 0.180, train accuracy: 0.922\nepoch: 18, time: 1832.348s, loss: 0.206, train accuracy: 0.914\nepoch: 18, time: 1837.032s, loss: 0.281, train accuracy: 0.922\nepoch: 18, time: 1841.613s, loss: 0.237, train accuracy: 0.891\nepoch: 18, validation loss: 0.37612124575353634\nepoch: 19, time: 1860.522s, loss: 0.174, train accuracy: 0.922\nepoch: 19, time: 1865.168s, loss: 0.141, train accuracy: 0.938\nepoch: 19, time: 1869.865s, loss: 0.224, train accuracy: 0.891\nepoch: 19, time: 1874.622s, loss: 0.314, train accuracy: 0.930\nepoch: 19, time: 1879.270s, loss: 0.214, train accuracy: 0.938\nepoch: 19, time: 1883.900s, loss: 0.187, train accuracy: 0.922\nepoch: 19, time: 1888.539s, loss: 0.151, train accuracy: 0.945\nepoch: 19, time: 1893.138s, loss: 0.145, train accuracy: 0.945\nepoch: 19, time: 1897.724s, loss: 0.273, train accuracy: 0.883\nepoch: 19, time: 1902.471s, loss: 0.344, train accuracy: 0.891\nepoch: 19, time: 1907.060s, loss: 0.184, train accuracy: 0.891\nepoch: 19, time: 1911.720s, loss: 0.147, train accuracy: 0.938\nepoch: 19, time: 1916.332s, loss: 0.199, train accuracy: 0.938\nepoch: 19, time: 1920.958s, loss: 0.163, train accuracy: 0.945\nepoch: 19, time: 1925.597s, loss: 0.236, train accuracy: 0.883\nepoch: 19, time: 1930.314s, loss: 0.222, train accuracy: 0.898\nepoch: 19, time: 1935.019s, loss: 0.337, train accuracy: 0.859\nepoch: 19, time: 1939.611s, loss: 0.217, train accuracy: 0.906\nepoch: 19, time: 1944.205s, loss: 0.177, train accuracy: 0.922\nepoch: 19, validation loss: 0.3897295405806255\nepoch: 20, time: 1962.930s, loss: 0.305, train accuracy: 0.875\nepoch: 20, time: 1967.684s, loss: 0.197, train accuracy: 0.930\nepoch: 20, time: 1972.338s, loss: 0.201, train accuracy: 0.922\nepoch: 20, time: 1976.867s, loss: 0.194, train accuracy: 0.938\nepoch: 20, time: 1981.507s, loss: 0.292, train accuracy: 0.914\nepoch: 20, time: 1986.295s, loss: 0.247, train accuracy: 0.922\nepoch: 20, time: 1990.981s, loss: 0.257, train accuracy: 0.945\nepoch: 20, time: 1995.580s, loss: 0.328, train accuracy: 0.906\nepoch: 20, time: 2000.240s, loss: 0.237, train accuracy: 0.914\nepoch: 20, time: 2005.067s, loss: 0.162, train accuracy: 0.953\nepoch: 20, time: 2009.813s, loss: 0.473, train accuracy: 0.844\nepoch: 20, time: 2014.534s, loss: 0.216, train accuracy: 0.914\nepoch: 20, time: 2019.251s, loss: 0.261, train accuracy: 0.906\nepoch: 20, time: 2023.885s, loss: 0.273, train accuracy: 0.906\nepoch: 20, time: 2028.498s, loss: 0.254, train accuracy: 0.906\nepoch: 20, time: 2033.273s, loss: 0.320, train accuracy: 0.906\nepoch: 20, time: 2037.838s, loss: 0.326, train accuracy: 0.906\nepoch: 20, time: 2042.501s, loss: 0.155, train accuracy: 0.938\nepoch: 20, time: 2047.099s, loss: 0.212, train accuracy: 0.938\nepoch: 20, validation loss: 0.36104964082047886\nepoch: 21, time: 2065.792s, loss: 0.155, train accuracy: 0.945\nepoch: 21, time: 2070.545s, loss: 0.203, train accuracy: 0.914\nepoch: 21, time: 2075.342s, loss: 0.367, train accuracy: 0.867\nepoch: 21, time: 2080.006s, loss: 0.238, train accuracy: 0.914\nepoch: 21, time: 2084.746s, loss: 0.370, train accuracy: 0.883\nepoch: 21, time: 2089.461s, loss: 0.196, train accuracy: 0.930\nepoch: 21, time: 2094.160s, loss: 0.152, train accuracy: 0.953\nepoch: 21, time: 2098.921s, loss: 0.226, train accuracy: 0.898\nepoch: 21, time: 2103.549s, loss: 0.234, train accuracy: 0.922\nepoch: 21, time: 2108.237s, loss: 0.221, train accuracy: 0.914\nepoch: 21, time: 2112.804s, loss: 0.422, train accuracy: 0.867\nepoch: 21, time: 2117.448s, loss: 0.194, train accuracy: 0.945\nepoch: 21, time: 2122.111s, loss: 0.197, train accuracy: 0.930\nepoch: 21, time: 2126.740s, loss: 0.234, train accuracy: 0.891\nepoch: 21, time: 2131.366s, loss: 0.208, train accuracy: 0.906\nepoch: 21, time: 2135.986s, loss: 0.239, train accuracy: 0.922\nepoch: 21, time: 2140.532s, loss: 0.283, train accuracy: 0.914\nepoch: 21, time: 2145.262s, loss: 0.339, train accuracy: 0.906\nepoch: 21, time: 2149.997s, loss: 0.212, train accuracy: 0.922\nepoch: 21, validation loss: 0.37477968998555183\nepoch: 22, time: 2168.807s, loss: 0.192, train accuracy: 0.914\nepoch: 22, time: 2173.513s, loss: 0.295, train accuracy: 0.883\nepoch: 22, time: 2178.165s, loss: 0.201, train accuracy: 0.945\nepoch: 22, time: 2182.802s, loss: 0.189, train accuracy: 0.945\nepoch: 22, time: 2187.523s, loss: 0.236, train accuracy: 0.906\nepoch: 22, time: 2192.140s, loss: 0.298, train accuracy: 0.906\nepoch: 22, time: 2196.731s, loss: 0.229, train accuracy: 0.898\nepoch: 22, time: 2201.433s, loss: 0.283, train accuracy: 0.898\nepoch: 22, time: 2206.046s, loss: 0.362, train accuracy: 0.859\nepoch: 22, time: 2210.672s, loss: 0.184, train accuracy: 0.945\nepoch: 22, time: 2215.422s, loss: 0.248, train accuracy: 0.922\nepoch: 22, time: 2219.992s, loss: 0.171, train accuracy: 0.938\nepoch: 22, time: 2224.722s, loss: 0.149, train accuracy: 0.922\nepoch: 22, time: 2229.298s, loss: 0.134, train accuracy: 0.938\nepoch: 22, time: 2234.062s, loss: 0.208, train accuracy: 0.914\nepoch: 22, time: 2238.855s, loss: 0.367, train accuracy: 0.891\nepoch: 22, time: 2243.512s, loss: 0.225, train accuracy: 0.922\nepoch: 22, time: 2248.315s, loss: 0.212, train accuracy: 0.930\nepoch: 22, time: 2253.007s, loss: 0.170, train accuracy: 0.945\nepoch: 22, validation loss: 0.3869190960804791\nepoch: 23, time: 2271.483s, loss: 0.198, train accuracy: 0.930\nepoch: 23, time: 2276.200s, loss: 0.193, train accuracy: 0.938\nepoch: 23, time: 2280.866s, loss: 0.285, train accuracy: 0.898\nepoch: 23, time: 2285.510s, loss: 0.230, train accuracy: 0.906\nepoch: 23, time: 2290.117s, loss: 0.243, train accuracy: 0.914\nepoch: 23, time: 2294.714s, loss: 0.135, train accuracy: 0.930\nepoch: 23, time: 2299.368s, loss: 0.245, train accuracy: 0.898\nepoch: 23, time: 2304.073s, loss: 0.247, train accuracy: 0.891\nepoch: 23, time: 2308.646s, loss: 0.170, train accuracy: 0.938\nepoch: 23, time: 2313.264s, loss: 0.231, train accuracy: 0.922\nepoch: 23, time: 2317.961s, loss: 0.301, train accuracy: 0.891\nepoch: 23, time: 2322.605s, loss: 0.219, train accuracy: 0.914\nepoch: 23, time: 2327.288s, loss: 0.445, train accuracy: 0.867\nepoch: 23, time: 2331.953s, loss: 0.157, train accuracy: 0.914\nepoch: 23, time: 2336.669s, loss: 0.272, train accuracy: 0.898\nepoch: 23, time: 2341.340s, loss: 0.187, train accuracy: 0.930\nepoch: 23, time: 2345.885s, loss: 0.321, train accuracy: 0.859\nepoch: 23, time: 2350.564s, loss: 0.190, train accuracy: 0.922\nepoch: 23, time: 2355.208s, loss: 0.260, train accuracy: 0.914\nepoch: 23, validation loss: 0.36951833326361583\nepoch: 24, time: 2374.055s, loss: 0.170, train accuracy: 0.922\nepoch: 24, time: 2378.935s, loss: 0.125, train accuracy: 0.961\nepoch: 24, time: 2383.538s, loss: 0.413, train accuracy: 0.852\nepoch: 24, time: 2388.133s, loss: 0.266, train accuracy: 0.922\nepoch: 24, time: 2392.754s, loss: 0.114, train accuracy: 0.969\nepoch: 24, time: 2397.450s, loss: 0.246, train accuracy: 0.898\nepoch: 24, time: 2402.181s, loss: 0.105, train accuracy: 0.969\nepoch: 24, time: 2406.827s, loss: 0.309, train accuracy: 0.875\nepoch: 24, time: 2411.549s, loss: 0.138, train accuracy: 0.930\nepoch: 24, time: 2416.261s, loss: 0.286, train accuracy: 0.914\nepoch: 24, time: 2420.884s, loss: 0.195, train accuracy: 0.953\nepoch: 24, time: 2425.480s, loss: 0.319, train accuracy: 0.891\nepoch: 24, time: 2430.179s, loss: 0.224, train accuracy: 0.906\nepoch: 24, time: 2434.830s, loss: 0.218, train accuracy: 0.891\nepoch: 24, time: 2439.546s, loss: 0.309, train accuracy: 0.867\nepoch: 24, time: 2444.152s, loss: 0.203, train accuracy: 0.906\nepoch: 24, time: 2448.778s, loss: 0.195, train accuracy: 0.930\nepoch: 24, time: 2453.397s, loss: 0.257, train accuracy: 0.914\nepoch: 24, time: 2458.077s, loss: 0.117, train accuracy: 0.953\nepoch: 24, validation loss: 0.3921877811077053\nepoch: 25, time: 2477.139s, loss: 0.196, train accuracy: 0.914\nepoch: 25, time: 2481.886s, loss: 0.153, train accuracy: 0.945\nepoch: 25, time: 2486.634s, loss: 0.174, train accuracy: 0.945\nepoch: 25, time: 2491.327s, loss: 0.139, train accuracy: 0.938\nepoch: 25, time: 2495.921s, loss: 0.185, train accuracy: 0.914\nepoch: 25, time: 2500.571s, loss: 0.174, train accuracy: 0.938\nepoch: 25, time: 2505.262s, loss: 0.187, train accuracy: 0.922\nepoch: 25, time: 2509.925s, loss: 0.262, train accuracy: 0.898\nepoch: 25, time: 2514.745s, loss: 0.236, train accuracy: 0.922\nepoch: 25, time: 2519.389s, loss: 0.225, train accuracy: 0.922\nepoch: 25, time: 2524.055s, loss: 0.210, train accuracy: 0.906\nepoch: 25, time: 2528.734s, loss: 0.269, train accuracy: 0.914\nepoch: 25, time: 2533.451s, loss: 0.176, train accuracy: 0.922\nepoch: 25, time: 2538.184s, loss: 0.168, train accuracy: 0.945\nepoch: 25, time: 2542.870s, loss: 0.175, train accuracy: 0.938\nepoch: 25, time: 2547.531s, loss: 0.288, train accuracy: 0.867\nepoch: 25, time: 2552.159s, loss: 0.196, train accuracy: 0.945\nepoch: 25, time: 2556.859s, loss: 0.150, train accuracy: 0.922\nepoch: 25, time: 2561.491s, loss: 0.210, train accuracy: 0.922\nepoch: 25, validation loss: 0.3790416830320602\nepoch: 26, time: 2580.352s, loss: 0.204, train accuracy: 0.930\nepoch: 26, time: 2585.223s, loss: 0.281, train accuracy: 0.906\nepoch: 26, time: 2589.950s, loss: 0.205, train accuracy: 0.938\nepoch: 26, time: 2594.597s, loss: 0.210, train accuracy: 0.930\nepoch: 26, time: 2599.251s, loss: 0.208, train accuracy: 0.938\nepoch: 26, time: 2603.975s, loss: 0.314, train accuracy: 0.891\nepoch: 26, time: 2608.725s, loss: 0.116, train accuracy: 0.930\nepoch: 26, time: 2613.435s, loss: 0.231, train accuracy: 0.922\nepoch: 26, time: 2618.105s, loss: 0.122, train accuracy: 0.961\nepoch: 26, time: 2622.838s, loss: 0.222, train accuracy: 0.922\nepoch: 26, time: 2627.461s, loss: 0.237, train accuracy: 0.914\nepoch: 26, time: 2632.131s, loss: 0.163, train accuracy: 0.914\nepoch: 26, time: 2636.830s, loss: 0.211, train accuracy: 0.914\nepoch: 26, time: 2641.578s, loss: 0.310, train accuracy: 0.883\nepoch: 26, time: 2646.267s, loss: 0.220, train accuracy: 0.930\nepoch: 26, time: 2650.954s, loss: 0.327, train accuracy: 0.875\nepoch: 26, time: 2655.667s, loss: 0.239, train accuracy: 0.930\nepoch: 26, time: 2660.294s, loss: 0.273, train accuracy: 0.898\nepoch: 26, time: 2664.977s, loss: 0.232, train accuracy: 0.922\nepoch: 26, validation loss: 0.41837670674710387\nepoch: 27, time: 2684.062s, loss: 0.181, train accuracy: 0.930\nepoch: 27, time: 2688.878s, loss: 0.196, train accuracy: 0.930\nepoch: 27, time: 2693.520s, loss: 0.164, train accuracy: 0.930\nepoch: 27, time: 2698.231s, loss: 0.226, train accuracy: 0.930\nepoch: 27, time: 2702.886s, loss: 0.187, train accuracy: 0.953\nepoch: 27, time: 2707.540s, loss: 0.339, train accuracy: 0.883\nepoch: 27, time: 2712.200s, loss: 0.271, train accuracy: 0.930\nepoch: 27, time: 2716.848s, loss: 0.314, train accuracy: 0.898\nepoch: 27, time: 2721.535s, loss: 0.122, train accuracy: 0.945\nepoch: 27, time: 2726.104s, loss: 0.231, train accuracy: 0.906\nepoch: 27, time: 2730.661s, loss: 0.211, train accuracy: 0.938\nepoch: 27, time: 2735.323s, loss: 0.247, train accuracy: 0.891\nepoch: 27, time: 2740.073s, loss: 0.236, train accuracy: 0.914\nepoch: 27, time: 2744.808s, loss: 0.178, train accuracy: 0.914\nepoch: 27, time: 2749.443s, loss: 0.175, train accuracy: 0.930\nepoch: 27, time: 2754.051s, loss: 0.232, train accuracy: 0.930\nepoch: 27, time: 2758.739s, loss: 0.187, train accuracy: 0.930\nepoch: 27, time: 2763.377s, loss: 0.338, train accuracy: 0.852\nepoch: 27, time: 2768.072s, loss: 0.160, train accuracy: 0.938\nepoch: 27, validation loss: 0.4109298016216709\nepoch: 28, time: 2787.178s, loss: 0.266, train accuracy: 0.898\nepoch: 28, time: 2792.070s, loss: 0.132, train accuracy: 0.938\nepoch: 28, time: 2796.641s, loss: 0.156, train accuracy: 0.930\nepoch: 28, time: 2801.401s, loss: 0.113, train accuracy: 0.953\nepoch: 28, time: 2806.160s, loss: 0.198, train accuracy: 0.930\nepoch: 28, time: 2810.814s, loss: 0.230, train accuracy: 0.906\nepoch: 28, time: 2815.425s, loss: 0.152, train accuracy: 0.922\nepoch: 28, time: 2820.030s, loss: 0.195, train accuracy: 0.930\nepoch: 28, time: 2824.754s, loss: 0.238, train accuracy: 0.922\nepoch: 28, time: 2829.394s, loss: 0.212, train accuracy: 0.891\nepoch: 28, time: 2834.162s, loss: 0.242, train accuracy: 0.945\nepoch: 28, time: 2838.839s, loss: 0.249, train accuracy: 0.930\nepoch: 28, time: 2843.514s, loss: 0.242, train accuracy: 0.898\nepoch: 28, time: 2848.185s, loss: 0.164, train accuracy: 0.930\nepoch: 28, time: 2852.857s, loss: 0.321, train accuracy: 0.906\nepoch: 28, time: 2857.632s, loss: 0.256, train accuracy: 0.891\nepoch: 28, time: 2862.291s, loss: 0.199, train accuracy: 0.945\nepoch: 28, time: 2866.958s, loss: 0.261, train accuracy: 0.914\nepoch: 28, time: 2871.632s, loss: 0.286, train accuracy: 0.906\nepoch: 28, validation loss: 0.4235882515401474\nEarly Stopping!\n"
]
],
[
[
"##Show validation loss",
"_____no_output_____"
]
],
[
[
"import pandas as pd\r\n\r\ndf_plot = pd.DataFrame(data = plot_valloss, columns=['Validation Loss', 'Epoch'])\r\ndf_plot.head()",
"_____no_output_____"
],
[
"import plotly.express as px\r\n\r\nfig = px.line(df_plot, x=\"Epoch\", y=\"Validation Loss\", title='Validation Loss DA')\r\nfig.show()",
"_____no_output_____"
],
[
"df_plot.to_csv(\"/content/drive/MyDrive/Deep Learning/Project/DA_valloss.csv\", index=False)",
"_____no_output_____"
]
],
[
[
"# Testing",
"_____no_output_____"
],
[
"##Test model on clean data",
"_____no_output_____"
]
],
[
[
"model_aug_inf = torch.load(\"/content/drive/MyDrive/Deep Learning/Project/model_aug.pckl\")",
"_____no_output_____"
],
[
"correct_total = 0\r\n\r\nfor i, (x_batch, y_batch) in enumerate(testloader):\r\n x_batch, y_batch = x_batch.to(device), y_batch.to(device) # Move the data to the device that is used\r\n\r\n y_pred = model_aug_inf(x_batch)\r\n y_pred_max = torch.argmax(y_pred, dim=1)\r\n\r\n correct_total += torch.sum(torch.eq(y_pred_max, y_batch)).item()\r\n\r\nprint(f'Accuracy on the test set: {correct_total / len(testset):.3f}')",
"Accuracy on the test set: 0.900\n"
],
[
"accuracy = correct_total / len(testset)\r\nz = 1.96 #for 95% CI\r\nn = len(testset)\r\n\r\ninterval = z * np.sqrt( (accuracy * (1 - accuracy)) / n)\r\ninterval",
"_____no_output_____"
]
],
[
[
"## Test model on perturbed data",
"_____no_output_____"
]
],
[
[
"import pandas as pd\r\nimport seaborn as sn\r\nfrom advertorch.utils import predict_from_logits",
"_____no_output_____"
],
[
"correct_total = 0\r\nall_preds = []\r\ny_true = []\r\n\r\nfor i, (x_batch, y_batch) in enumerate(testloader):\r\n x_batch, y_batch = x_batch.to(device), y_batch.to(device) # Move the data to the device that is used\r\n y_true.extend(y_batch)\r\n\r\n adv = adversary.perturb(x_batch, y_batch)\r\n\r\n y_adv_pred = predict_from_logits(model_aug_inf(adv))\r\n\r\n all_preds.extend(y_adv_pred)\r\n correct_total += torch.sum(torch.eq(y_adv_pred, y_batch)).item()\r\n\r\nprint(f'Accuracy on the test set: {correct_total / len(testset):.3f}')",
"Accuracy on the test set: 0.069\n"
],
[
"accuracy = correct_total / len(testset)\r\nz = 1.96 #for 95% CI\r\nn = len(all_preds)\r\n\r\ninterval = z * np.sqrt( (accuracy * (1 - accuracy)) / n)\r\ninterval",
"_____no_output_____"
]
],
[
[
"## Visualise results",
"_____no_output_____"
]
],
[
[
"y_true_int = [int(x.cpu()) for x in y_true]\r\ny_pred_int = [int(x.cpu()) for x in all_preds]",
"_____no_output_____"
],
[
"data = {'y_Actual': y_true_int,\r\n 'y_Predicted': y_pred_int\r\n }\r\ncm_df = pd.DataFrame(data, columns=['y_Actual', 'y_Predicted'])\r\n\r\ncm_df.head()",
"_____no_output_____"
],
[
"confusion_matrix = pd.crosstab(cm_df['y_Actual'], cm_df['y_Predicted'], rownames=['Actual'], colnames=['Predicted'])\r\nprint(confusion_matrix)",
"Predicted 0 1 2 3 4 5 6 7 8 9\nActual \n0 37 16 314 73 51 16 369 2 105 15\n1 216 115 134 58 15 27 341 1 82 8\n2 55 43 92 18 60 3 369 0 334 21\n3 117 14 174 35 35 10 419 12 161 23\n4 106 23 88 14 13 6 155 2 552 41\n5 120 52 237 3 16 3 177 3 365 21\n6 143 15 213 44 63 4 55 0 423 39\n7 124 26 178 4 11 6 361 0 269 20\n8 170 10 87 28 56 25 295 1 291 36\n9 43 26 141 1 17 15 109 3 595 50\n"
],
[
"sn.heatmap(confusion_matrix, annot=False)\r\nplt.show()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
c52a31b629e1b314d5468b0f0e87878888486e01
| 19,527 |
ipynb
|
Jupyter Notebook
|
demo/MMSegmentation_Tutorial.ipynb
|
pprp/mmsegmentation
|
5d615401358dea2d6527a033bef505a9c7e0f034
|
[
"Apache-2.0"
] | null | null | null |
demo/MMSegmentation_Tutorial.ipynb
|
pprp/mmsegmentation
|
5d615401358dea2d6527a033bef505a9c7e0f034
|
[
"Apache-2.0"
] | null | null | null |
demo/MMSegmentation_Tutorial.ipynb
|
pprp/mmsegmentation
|
5d615401358dea2d6527a033bef505a9c7e0f034
|
[
"Apache-2.0"
] | null | null | null | 29.995392 | 533 | 0.585958 |
[
[
[
"<a href=\"https://colab.research.google.com/github/open-mmlab/mmsegmentation/blob/master/demo/MMSegmentation_Tutorial.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# MMSegmentation Tutorial\nWelcome to MMSegmentation! \n\nIn this tutorial, we demo\n* How to do inference with MMSeg trained weight\n* How to train on your own dataset and visualize the results. ",
"_____no_output_____"
],
[
"## Install MMSegmentation\nThis step may take several minutes. \n\nWe use PyTorch 1.6 and CUDA 10.1 for this tutorial. You may install other versions by change the version number in pip install command. ",
"_____no_output_____"
]
],
[
[
"# Check nvcc version\n!nvcc -V\n# Check GCC version\n!gcc --version",
"_____no_output_____"
],
[
"# Install PyTorch\n!conda install pytorch=1.6.0 torchvision cudatoolkit=10.1 -c pytorch\n# Install MMCV\n!pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/torch1.6/index.html",
"_____no_output_____"
],
[
"!rm -rf mmsegmentation\n!git clone https://github.com/open-mmlab/mmsegmentation.git \n%cd mmsegmentation\n!pip install -e .",
"_____no_output_____"
],
[
"# Check Pytorch installation\nimport torch, torchvision\nprint(torch.__version__, torch.cuda.is_available())\n\n# Check MMSegmentation installation\nimport mmseg\nprint(mmseg.__version__)",
"_____no_output_____"
]
],
[
[
"## Run Inference with MMSeg trained weight",
"_____no_output_____"
]
],
[
[
"!mkdir checkpoints\n!wget https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth -P checkpoints",
"_____no_output_____"
],
[
"from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot\nfrom mmseg.core.evaluation import get_palette",
"_____no_output_____"
],
[
"config_file = '../configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py'\ncheckpoint_file = '../checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth'",
"_____no_output_____"
],
[
"# build the model from a config file and a checkpoint file\nmodel = init_segmentor(config_file, checkpoint_file, device='cuda:0')",
"_____no_output_____"
],
[
"# test a single image\nimg = './demo.png'\nresult = inference_segmentor(model, img)",
"_____no_output_____"
],
[
"# show the results\nshow_result_pyplot(model, img, result, get_palette('cityscapes'))",
"_____no_output_____"
]
],
[
[
"## Train a semantic segmentation model on a new dataset\n\nTo train on a customized dataset, the following steps are necessary. \n1. Add a new dataset class. \n2. Create a config file accordingly. \n3. Perform training and evaluation. ",
"_____no_output_____"
],
[
"### Add a new dataset\n\nDatasets in MMSegmentation require image and semantic segmentation maps to be placed in folders with the same prefix. To support a new dataset, we may need to modify the original file structure. \n\nIn this tutorial, we give an example of converting the dataset. You may refer to [docs](https://github.com/open-mmlab/mmsegmentation/docs/en/tutorials/new_dataset.md) for details about dataset reorganization. \n\nWe use [Stanford Background Dataset](http://dags.stanford.edu/projects/scenedataset.html) as an example. The dataset contains 715 images chosen from existing public datasets [LabelMe](http://labelme.csail.mit.edu), [MSRC](http://research.microsoft.com/en-us/projects/objectclassrecognition), [PASCAL VOC](http://pascallin.ecs.soton.ac.uk/challenges/VOC) and [Geometric Context](http://www.cs.illinois.edu/homes/dhoiem/). Images from these datasets are mainly outdoor scenes, each containing approximately 320-by-240 pixels. \nIn this tutorial, we use the region annotations as labels. There are 8 classes in total, i.e. sky, tree, road, grass, water, building, mountain, and foreground object. ",
"_____no_output_____"
]
],
[
[
"# download and unzip\n!wget http://dags.stanford.edu/data/iccv09Data.tar.gz -O stanford_background.tar.gz\n!tar xf stanford_background.tar.gz",
"_____no_output_____"
],
[
"# Let's take a look at the dataset\nimport mmcv\nimport matplotlib.pyplot as plt\n\nimg = mmcv.imread('iccv09Data/images/6000124.jpg')\nplt.figure(figsize=(8, 6))\nplt.imshow(mmcv.bgr2rgb(img))\nplt.show()",
"_____no_output_____"
]
],
[
[
"We need to convert the annotation into semantic map format as an image.",
"_____no_output_____"
]
],
[
[
"import os.path as osp\nimport numpy as np\nfrom PIL import Image\n# convert dataset annotation to semantic segmentation map\ndata_root = 'iccv09Data'\nimg_dir = 'images'\nann_dir = 'labels'\n# define class and plaette for better visualization\nclasses = ('sky', 'tree', 'road', 'grass', 'water', 'bldg', 'mntn', 'fg obj')\npalette = [[128, 128, 128], [129, 127, 38], [120, 69, 125], [53, 125, 34], \n [0, 11, 123], [118, 20, 12], [122, 81, 25], [241, 134, 51]]\nfor file in mmcv.scandir(osp.join(data_root, ann_dir), suffix='.regions.txt'):\n seg_map = np.loadtxt(osp.join(data_root, ann_dir, file)).astype(np.uint8)\n seg_img = Image.fromarray(seg_map).convert('P')\n seg_img.putpalette(np.array(palette, dtype=np.uint8))\n seg_img.save(osp.join(data_root, ann_dir, file.replace('.regions.txt', \n '.png')))",
"_____no_output_____"
],
[
"# Let's take a look at the segmentation map we got\nimport matplotlib.patches as mpatches\nimg = Image.open('iccv09Data/labels/6000124.png')\nplt.figure(figsize=(8, 6))\nim = plt.imshow(np.array(img.convert('RGB')))\n\n# create a patch (proxy artist) for every color \npatches = [mpatches.Patch(color=np.array(palette[i])/255., \n label=classes[i]) for i in range(8)]\n# put those patched as legend-handles into the legend\nplt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., \n fontsize='large')\n\nplt.show()",
"_____no_output_____"
],
[
"# split train/val set randomly\nsplit_dir = 'splits'\nmmcv.mkdir_or_exist(osp.join(data_root, split_dir))\nfilename_list = [osp.splitext(filename)[0] for filename in mmcv.scandir(\n osp.join(data_root, ann_dir), suffix='.png')]\nwith open(osp.join(data_root, split_dir, 'train.txt'), 'w') as f:\n # select first 4/5 as train set\n train_length = int(len(filename_list)*4/5)\n f.writelines(line + '\\n' for line in filename_list[:train_length])\nwith open(osp.join(data_root, split_dir, 'val.txt'), 'w') as f:\n # select last 1/5 as train set\n f.writelines(line + '\\n' for line in filename_list[train_length:])",
"_____no_output_____"
]
],
[
[
"After downloading the data, we need to implement `load_annotations` function in the new dataset class `StanfordBackgroundDataset`.",
"_____no_output_____"
]
],
[
[
"from mmseg.datasets.builder import DATASETS\nfrom mmseg.datasets.custom import CustomDataset\n\[email protected]_module()\nclass StanfordBackgroundDataset(CustomDataset):\n CLASSES = classes\n PALETTE = palette\n def __init__(self, split, **kwargs):\n super().__init__(img_suffix='.jpg', seg_map_suffix='.png', \n split=split, **kwargs)\n assert osp.exists(self.img_dir) and self.split is not None\n\n ",
"_____no_output_____"
]
],
[
[
"### Create a config file\nIn the next step, we need to modify the config for the training. To accelerate the process, we finetune the model from trained weights.",
"_____no_output_____"
]
],
[
[
"from mmcv import Config\ncfg = Config.fromfile('../configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py')",
"_____no_output_____"
]
],
[
[
"Since the given config is used to train PSPNet on the cityscapes dataset, we need to modify it accordingly for our new dataset. ",
"_____no_output_____"
]
],
[
[
"from mmseg.apis import set_random_seed\n\n# Since we use only one GPU, BN is used instead of SyncBN\ncfg.norm_cfg = dict(type='BN', requires_grad=True)\ncfg.model.backbone.norm_cfg = cfg.norm_cfg\ncfg.model.decode_head.norm_cfg = cfg.norm_cfg\ncfg.model.auxiliary_head.norm_cfg = cfg.norm_cfg\n# modify num classes of the model in decode/auxiliary head\ncfg.model.decode_head.num_classes = 8\ncfg.model.auxiliary_head.num_classes = 8\n\n# Modify dataset type and path\ncfg.dataset_type = 'StanfordBackgroundDataset'\ncfg.data_root = data_root\n\ncfg.data.samples_per_gpu = 8\ncfg.data.workers_per_gpu=8\n\ncfg.img_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ncfg.crop_size = (256, 256)\ncfg.train_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(type='Resize', img_scale=(320, 240), ratio_range=(0.5, 2.0)),\n dict(type='RandomCrop', crop_size=cfg.crop_size, cat_max_ratio=0.75),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(type='Normalize', **cfg.img_norm_cfg),\n dict(type='Pad', size=cfg.crop_size, pad_val=0, seg_pad_val=255),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_semantic_seg']),\n]\n\ncfg.test_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(320, 240),\n # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(type='Normalize', **cfg.img_norm_cfg),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\n\n\ncfg.data.train.type = cfg.dataset_type\ncfg.data.train.data_root = cfg.data_root\ncfg.data.train.img_dir = img_dir\ncfg.data.train.ann_dir = ann_dir\ncfg.data.train.pipeline = cfg.train_pipeline\ncfg.data.train.split = 'splits/train.txt'\n\ncfg.data.val.type = cfg.dataset_type\ncfg.data.val.data_root = cfg.data_root\ncfg.data.val.img_dir = img_dir\ncfg.data.val.ann_dir = ann_dir\ncfg.data.val.pipeline = cfg.test_pipeline\ncfg.data.val.split = 'splits/val.txt'\n\ncfg.data.test.type = cfg.dataset_type\ncfg.data.test.data_root = cfg.data_root\ncfg.data.test.img_dir = img_dir\ncfg.data.test.ann_dir = ann_dir\ncfg.data.test.pipeline = cfg.test_pipeline\ncfg.data.test.split = 'splits/val.txt'\n\n# We can still use the pre-trained Mask RCNN model though we do not need to\n# use the mask branch\ncfg.load_from = 'checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth'\n\n# Set up working dir to save files and logs.\ncfg.work_dir = './work_dirs/tutorial'\n\ncfg.runner.max_iters = 200\ncfg.log_config.interval = 10\ncfg.evaluation.interval = 200\ncfg.checkpoint_config.interval = 200\n\n# Set seed to facitate reproducing the result\ncfg.seed = 0\nset_random_seed(0, deterministic=False)\ncfg.gpu_ids = range(1)\n\n# Let's have a look at the final config used for training\nprint(f'Config:\\n{cfg.pretty_text}')",
"_____no_output_____"
]
],
[
[
"### Train and Evaluation",
"_____no_output_____"
]
],
[
[
"from mmseg.datasets import build_dataset\nfrom mmseg.models import build_segmentor\nfrom mmseg.apis import train_segmentor\n\n\n# Build the dataset\ndatasets = [build_dataset(cfg.data.train)]\n\n# Build the detector\nmodel = build_segmentor(\n cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg'))\n# Add an attribute for visualization convenience\nmodel.CLASSES = datasets[0].CLASSES\n\n# Create work_dir\nmmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))\ntrain_segmentor(model, datasets, cfg, distributed=False, validate=True, \n meta=dict())",
"_____no_output_____"
]
],
[
[
"Inference with trained model",
"_____no_output_____"
]
],
[
[
"img = mmcv.imread('iccv09Data/images/6000124.jpg')\n\nmodel.cfg = cfg\nresult = inference_segmentor(model, img)\nplt.figure(figsize=(8, 6))\nshow_result_pyplot(model, img, result, palette)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
c52a347e2ddcfdd5e7df8eac8645b522eb93c5f6
| 296,005 |
ipynb
|
Jupyter Notebook
|
tensorflow/day2/answer/A_01_08_tensor_cross_entropy.ipynb
|
daludaluking/LG_AI_all_in_one-
|
e0855af811deb1e5cf1695430bd52a8eb3d48827
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/day2/answer/A_01_08_tensor_cross_entropy.ipynb
|
daludaluking/LG_AI_all_in_one-
|
e0855af811deb1e5cf1695430bd52a8eb3d48827
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/day2/answer/A_01_08_tensor_cross_entropy.ipynb
|
daludaluking/LG_AI_all_in_one-
|
e0855af811deb1e5cf1695430bd52a8eb3d48827
|
[
"Apache-2.0"
] | null | null | null | 296,005 | 296,005 | 0.480269 |
[
[
[
"\nimport tensorflow as tf\n\n## data 선언\nx_data = [[5.], [30.], [95.], [100.], [265.], [270.], [290.], [300.],[365.]]\ny_data = [[0.], [0.], [0.], [0.], [1.], [1.], [1.], [1.], [1.]]\ntest_data= [[7.]]\ntest_data2= [[80.]]\ntest_data3= [[110.]]\ntest_data4= [[180.]]\ntest_data5= [[320.]]\n\n",
"_____no_output_____"
],
[
"## tf.keras를 활용한 perceptron 모델 구현.\nmodel = tf.keras.Sequential() ## 모델 선언\nmodel.add(tf.keras.layers.Dense(1, input_dim=1,activation='sigmoid')) # 선언된 모델에 add를통해 쌓아감.은닉층\nmodel.summary()\n\n",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense (Dense) (None, 1) 2 \n=================================================================\nTotal params: 2\nTrainable params: 2\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"# 모델 loss, 학습 방법 결정하기\noptimizer=tf.keras.optimizers.SGD(learning_rate=0.007) ### 경사 하강법으로 global min 에 찾아가는 최적화 방법 선언.\nloss=tf.keras.losses.binary_crossentropy\nmetrics=tf.keras.metrics.binary_accuracy\n\n# 모델 컴파일하기 - 모델 및 loss 등 구조화한 모델을 컴퓨터가 동작 할수 있도록 변환\nmodel.compile(loss=loss, optimizer=optimizer, metrics=[metrics])\n\n# 모델 동작하기\nmodel.fit(x_data, y_data, epochs=5000, batch_size=9)\n\n",
"\u001b[1;30;43m스트리밍 출력 내용이 길어서 마지막 5000줄이 삭제되었습니다.\u001b[0m\nEpoch 2501/5000\n1/1 [==============================] - 0s 16ms/step - loss: 9.2028 - binary_accuracy: 0.6667\nEpoch 2502/5000\n1/1 [==============================] - 0s 10ms/step - loss: 4.8193 - binary_accuracy: 0.6667\nEpoch 2503/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.7258 - binary_accuracy: 0.7778\nEpoch 2504/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.8534 - binary_accuracy: 0.4444\nEpoch 2505/5000\n1/1 [==============================] - 0s 13ms/step - loss: 25.8692 - binary_accuracy: 0.5556\nEpoch 2506/5000\n1/1 [==============================] - 0s 12ms/step - loss: 21.3653 - binary_accuracy: 0.6667\nEpoch 2507/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.9034 - binary_accuracy: 0.6667\nEpoch 2508/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.4794 - binary_accuracy: 0.6667\nEpoch 2509/5000\n1/1 [==============================] - 0s 13ms/step - loss: 8.0803 - binary_accuracy: 0.6667\nEpoch 2510/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7137 - binary_accuracy: 0.6667\nEpoch 2511/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.0797 - binary_accuracy: 1.0000\nEpoch 2512/5000\n1/1 [==============================] - 0s 8ms/step - loss: 3.1360 - binary_accuracy: 0.4444\nEpoch 2513/5000\n1/1 [==============================] - 0s 10ms/step - loss: 27.3746 - binary_accuracy: 0.5556\nEpoch 2514/5000\n1/1 [==============================] - 0s 12ms/step - loss: 22.8587 - binary_accuracy: 0.5556\nEpoch 2515/5000\n1/1 [==============================] - 0s 5ms/step - loss: 18.3832 - binary_accuracy: 0.6667\nEpoch 2516/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.9484 - binary_accuracy: 0.6667\nEpoch 2517/5000\n1/1 [==============================] - 0s 10ms/step - loss: 9.5427 - binary_accuracy: 0.6667\nEpoch 2518/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.1570 - binary_accuracy: 0.6667\nEpoch 2519/5000\n1/1 [==============================] - 0s 6ms/step - loss: 1.0123 - binary_accuracy: 0.7778\nEpoch 2520/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.5150 - binary_accuracy: 0.4444\nEpoch 2521/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.0660 - binary_accuracy: 0.5556\nEpoch 2522/5000\n1/1 [==============================] - 0s 16ms/step - loss: 21.5614 - binary_accuracy: 0.6667\nEpoch 2523/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.0988 - binary_accuracy: 0.6667\nEpoch 2524/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.6743 - binary_accuracy: 0.6667\nEpoch 2525/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.2748 - binary_accuracy: 0.6667\nEpoch 2526/5000\n1/1 [==============================] - 0s 5ms/step - loss: 3.9044 - binary_accuracy: 0.6667\nEpoch 2527/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.1356 - binary_accuracy: 1.0000\nEpoch 2528/5000\n1/1 [==============================] - 0s 14ms/step - loss: 6.3201 - binary_accuracy: 0.4444\nEpoch 2529/5000\n1/1 [==============================] - 0s 6ms/step - loss: 27.0061 - binary_accuracy: 0.5556\nEpoch 2530/5000\n1/1 [==============================] - 0s 9ms/step - loss: 22.4942 - binary_accuracy: 0.5556\nEpoch 2531/5000\n1/1 [==============================] - 0s 6ms/step - loss: 18.0233 - binary_accuracy: 0.6667\nEpoch 2532/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.5922 - binary_accuracy: 0.6667\nEpoch 2533/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.1889 - binary_accuracy: 0.6667\nEpoch 2534/5000\n1/1 [==============================] - 0s 8ms/step - loss: 4.8063 - binary_accuracy: 0.6667\nEpoch 2535/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.7188 - binary_accuracy: 0.7778\nEpoch 2536/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.8017 - binary_accuracy: 0.4444\nEpoch 2537/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.8607 - binary_accuracy: 0.5556\nEpoch 2538/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.3590 - binary_accuracy: 0.6667\nEpoch 2539/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.8994 - binary_accuracy: 0.6667\nEpoch 2540/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.4771 - binary_accuracy: 0.6667\nEpoch 2541/5000\n1/1 [==============================] - 0s 5ms/step - loss: 8.0789 - binary_accuracy: 0.6667\nEpoch 2542/5000\n1/1 [==============================] - 0s 5ms/step - loss: 3.7133 - binary_accuracy: 0.6667\nEpoch 2543/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.0808 - binary_accuracy: 1.0000\nEpoch 2544/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.2096 - binary_accuracy: 0.4444\nEpoch 2545/5000\n1/1 [==============================] - 0s 8ms/step - loss: 27.3618 - binary_accuracy: 0.5556\nEpoch 2546/5000\n1/1 [==============================] - 0s 8ms/step - loss: 22.8479 - binary_accuracy: 0.5556\nEpoch 2547/5000\n1/1 [==============================] - 0s 7ms/step - loss: 18.3748 - binary_accuracy: 0.6667\nEpoch 2548/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.9419 - binary_accuracy: 0.6667\nEpoch 2549/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.5374 - binary_accuracy: 0.6667\nEpoch 2550/5000\n1/1 [==============================] - 0s 5ms/step - loss: 5.1524 - binary_accuracy: 0.6667\nEpoch 2551/5000\n1/1 [==============================] - 0s 6ms/step - loss: 1.0118 - binary_accuracy: 0.7778\nEpoch 2552/5000\n1/1 [==============================] - 0s 13ms/step - loss: 12.4404 - binary_accuracy: 0.4444\nEpoch 2553/5000\n1/1 [==============================] - 0s 16ms/step - loss: 26.0611 - binary_accuracy: 0.5556\nEpoch 2554/5000\n1/1 [==============================] - 0s 13ms/step - loss: 21.5586 - binary_accuracy: 0.6667\nEpoch 2555/5000\n1/1 [==============================] - 0s 11ms/step - loss: 17.0982 - binary_accuracy: 0.6667\nEpoch 2556/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.6754 - binary_accuracy: 0.6667\nEpoch 2557/5000\n1/1 [==============================] - 0s 16ms/step - loss: 8.2768 - binary_accuracy: 0.6667\nEpoch 2558/5000\n1/1 [==============================] - 0s 10ms/step - loss: 3.9073 - binary_accuracy: 0.6667\nEpoch 2559/5000\n1/1 [==============================] - 0s 17ms/step - loss: 0.1387 - binary_accuracy: 1.0000\nEpoch 2560/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.4165 - binary_accuracy: 0.4444\nEpoch 2561/5000\n1/1 [==============================] - 0s 12ms/step - loss: 26.9754 - binary_accuracy: 0.5556\nEpoch 2562/5000\n1/1 [==============================] - 0s 9ms/step - loss: 22.4656 - binary_accuracy: 0.5556\nEpoch 2563/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.9972 - binary_accuracy: 0.6667\nEpoch 2564/5000\n1/1 [==============================] - 0s 11ms/step - loss: 13.5682 - binary_accuracy: 0.6667\nEpoch 2565/5000\n1/1 [==============================] - 0s 6ms/step - loss: 9.1660 - binary_accuracy: 0.6667\nEpoch 2566/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.7843 - binary_accuracy: 0.6667\nEpoch 2567/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.7044 - binary_accuracy: 0.7778\nEpoch 2568/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.7682 - binary_accuracy: 0.4444\nEpoch 2569/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.8496 - binary_accuracy: 0.5556\nEpoch 2570/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.3501 - binary_accuracy: 0.6667\nEpoch 2571/5000\n1/1 [==============================] - 0s 12ms/step - loss: 16.8928 - binary_accuracy: 0.6667\nEpoch 2572/5000\n1/1 [==============================] - 0s 10ms/step - loss: 12.4721 - binary_accuracy: 0.6667\nEpoch 2573/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.0748 - binary_accuracy: 0.6667\nEpoch 2574/5000\n1/1 [==============================] - 0s 14ms/step - loss: 3.7103 - binary_accuracy: 0.6667\nEpoch 2575/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.0813 - binary_accuracy: 1.0000\nEpoch 2576/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.2351 - binary_accuracy: 0.4444\nEpoch 2577/5000\n1/1 [==============================] - 0s 6ms/step - loss: 27.3469 - binary_accuracy: 0.5556\nEpoch 2578/5000\n1/1 [==============================] - 0s 21ms/step - loss: 22.8350 - binary_accuracy: 0.5556\nEpoch 2579/5000\n1/1 [==============================] - 0s 10ms/step - loss: 18.3642 - binary_accuracy: 0.6667\nEpoch 2580/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.9332 - binary_accuracy: 0.6667\nEpoch 2581/5000\n1/1 [==============================] - 0s 11ms/step - loss: 9.5299 - binary_accuracy: 0.6667\nEpoch 2582/5000\n1/1 [==============================] - 0s 10ms/step - loss: 5.1456 - binary_accuracy: 0.6667\nEpoch 2583/5000\n1/1 [==============================] - 0s 9ms/step - loss: 1.0095 - binary_accuracy: 0.7778\nEpoch 2584/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.3772 - binary_accuracy: 0.4444\nEpoch 2585/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.0545 - binary_accuracy: 0.5556\nEpoch 2586/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.5542 - binary_accuracy: 0.6667\nEpoch 2587/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.0961 - binary_accuracy: 0.6667\nEpoch 2588/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.6748 - binary_accuracy: 0.6667\nEpoch 2589/5000\n1/1 [==============================] - 0s 15ms/step - loss: 8.2772 - binary_accuracy: 0.6667\nEpoch 2590/5000\n1/1 [==============================] - 0s 8ms/step - loss: 3.9086 - binary_accuracy: 0.6667\nEpoch 2591/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.1412 - binary_accuracy: 1.0000\nEpoch 2592/5000\n1/1 [==============================] - 0s 11ms/step - loss: 6.4843 - binary_accuracy: 0.4444\nEpoch 2593/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.9490 - binary_accuracy: 0.5556\nEpoch 2594/5000\n1/1 [==============================] - 0s 7ms/step - loss: 22.4414 - binary_accuracy: 0.5556\nEpoch 2595/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.9755 - binary_accuracy: 0.6667\nEpoch 2596/5000\n1/1 [==============================] - 0s 10ms/step - loss: 13.5483 - binary_accuracy: 0.6667\nEpoch 2597/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.1473 - binary_accuracy: 0.6667\nEpoch 2598/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.7665 - binary_accuracy: 0.6667\nEpoch 2599/5000\n1/1 [==============================] - 0s 12ms/step - loss: 0.6936 - binary_accuracy: 0.7778\nEpoch 2600/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.7242 - binary_accuracy: 0.4444\nEpoch 2601/5000\n1/1 [==============================] - 0s 11ms/step - loss: 25.8403 - binary_accuracy: 0.5556\nEpoch 2602/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.3429 - binary_accuracy: 0.6667\nEpoch 2603/5000\n1/1 [==============================] - 0s 10ms/step - loss: 16.8878 - binary_accuracy: 0.6667\nEpoch 2604/5000\n1/1 [==============================] - 0s 11ms/step - loss: 12.4687 - binary_accuracy: 0.6667\nEpoch 2605/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.0723 - binary_accuracy: 0.6667\nEpoch 2606/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.7089 - binary_accuracy: 0.6667\nEpoch 2607/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.0822 - binary_accuracy: 1.0000\nEpoch 2608/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.2800 - binary_accuracy: 0.4444\nEpoch 2609/5000\n1/1 [==============================] - 0s 10ms/step - loss: 27.3319 - binary_accuracy: 0.5556\nEpoch 2610/5000\n1/1 [==============================] - 0s 12ms/step - loss: 22.8221 - binary_accuracy: 0.5556\nEpoch 2611/5000\n1/1 [==============================] - 0s 9ms/step - loss: 18.3536 - binary_accuracy: 0.6667\nEpoch 2612/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.9245 - binary_accuracy: 0.6667\nEpoch 2613/5000\n1/1 [==============================] - 0s 7ms/step - loss: 9.5223 - binary_accuracy: 0.6667\nEpoch 2614/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.1387 - binary_accuracy: 0.6667\nEpoch 2615/5000\n1/1 [==============================] - 0s 6ms/step - loss: 1.0070 - binary_accuracy: 0.7778\nEpoch 2616/5000\n1/1 [==============================] - 0s 14ms/step - loss: 12.3152 - binary_accuracy: 0.4444\nEpoch 2617/5000\n1/1 [==============================] - 0s 15ms/step - loss: 26.0479 - binary_accuracy: 0.5556\nEpoch 2618/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.5497 - binary_accuracy: 0.6667\nEpoch 2619/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.0938 - binary_accuracy: 0.6667\nEpoch 2620/5000\n1/1 [==============================] - 0s 13ms/step - loss: 12.6741 - binary_accuracy: 0.6667\nEpoch 2621/5000\n1/1 [==============================] - 0s 15ms/step - loss: 8.2774 - binary_accuracy: 0.6667\nEpoch 2622/5000\n1/1 [==============================] - 0s 10ms/step - loss: 3.9097 - binary_accuracy: 0.6667\nEpoch 2623/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.1438 - binary_accuracy: 1.0000\nEpoch 2624/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.5488 - binary_accuracy: 0.4444\nEpoch 2625/5000\n1/1 [==============================] - 0s 8ms/step - loss: 26.9232 - binary_accuracy: 0.5556\nEpoch 2626/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.4178 - binary_accuracy: 0.5556\nEpoch 2627/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.9543 - binary_accuracy: 0.6667\nEpoch 2628/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.5290 - binary_accuracy: 0.6667\nEpoch 2629/5000\n1/1 [==============================] - 0s 6ms/step - loss: 9.1291 - binary_accuracy: 0.6667\nEpoch 2630/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.7491 - binary_accuracy: 0.6667\nEpoch 2631/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.6831 - binary_accuracy: 0.7778\nEpoch 2632/5000\n1/1 [==============================] - 0s 12ms/step - loss: 13.6778 - binary_accuracy: 0.4444\nEpoch 2633/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.8313 - binary_accuracy: 0.5556\nEpoch 2634/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.3361 - binary_accuracy: 0.6667\nEpoch 2635/5000\n1/1 [==============================] - 0s 5ms/step - loss: 16.8832 - binary_accuracy: 0.6667\nEpoch 2636/5000\n1/1 [==============================] - 0s 11ms/step - loss: 12.4657 - binary_accuracy: 0.6667\nEpoch 2637/5000\n1/1 [==============================] - 0s 11ms/step - loss: 8.0702 - binary_accuracy: 0.6667\nEpoch 2638/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7078 - binary_accuracy: 0.6667\nEpoch 2639/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.0832 - binary_accuracy: 1.0000\nEpoch 2640/5000\n1/1 [==============================] - 0s 15ms/step - loss: 3.3258 - binary_accuracy: 0.4444\nEpoch 2641/5000\n1/1 [==============================] - 0s 19ms/step - loss: 27.3164 - binary_accuracy: 0.5556\nEpoch 2642/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.8086 - binary_accuracy: 0.5556\nEpoch 2643/5000\n1/1 [==============================] - 0s 11ms/step - loss: 18.3425 - binary_accuracy: 0.6667\nEpoch 2644/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.9152 - binary_accuracy: 0.6667\nEpoch 2645/5000\n1/1 [==============================] - 0s 6ms/step - loss: 9.5141 - binary_accuracy: 0.6667\nEpoch 2646/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.1312 - binary_accuracy: 0.6667\nEpoch 2647/5000\n1/1 [==============================] - 0s 6ms/step - loss: 1.0041 - binary_accuracy: 0.7778\nEpoch 2648/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.2568 - binary_accuracy: 0.4444\nEpoch 2649/5000\n1/1 [==============================] - 0s 7ms/step - loss: 26.0409 - binary_accuracy: 0.5556\nEpoch 2650/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.5448 - binary_accuracy: 0.6667\nEpoch 2651/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.0910 - binary_accuracy: 0.6667\nEpoch 2652/5000\n1/1 [==============================] - 0s 15ms/step - loss: 12.6729 - binary_accuracy: 0.6667\nEpoch 2653/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.2771 - binary_accuracy: 0.6667\nEpoch 2654/5000\n1/1 [==============================] - 0s 13ms/step - loss: 3.9102 - binary_accuracy: 0.6667\nEpoch 2655/5000\n1/1 [==============================] - 0s 14ms/step - loss: 0.1461 - binary_accuracy: 1.0000\nEpoch 2656/5000\n1/1 [==============================] - 0s 8ms/step - loss: 6.6042 - binary_accuracy: 0.4444\nEpoch 2657/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.8988 - binary_accuracy: 0.5556\nEpoch 2658/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.3956 - binary_accuracy: 0.6667\nEpoch 2659/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.9345 - binary_accuracy: 0.6667\nEpoch 2660/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.5110 - binary_accuracy: 0.6667\nEpoch 2661/5000\n1/1 [==============================] - 0s 10ms/step - loss: 9.1121 - binary_accuracy: 0.6667\nEpoch 2662/5000\n1/1 [==============================] - 0s 13ms/step - loss: 4.7330 - binary_accuracy: 0.6667\nEpoch 2663/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.6738 - binary_accuracy: 0.7778\nEpoch 2664/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.6279 - binary_accuracy: 0.4444\nEpoch 2665/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.8230 - binary_accuracy: 0.5556\nEpoch 2666/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.3300 - binary_accuracy: 0.6667\nEpoch 2667/5000\n1/1 [==============================] - 0s 11ms/step - loss: 16.8793 - binary_accuracy: 0.6667\nEpoch 2668/5000\n1/1 [==============================] - 0s 12ms/step - loss: 12.4633 - binary_accuracy: 0.6667\nEpoch 2669/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.0686 - binary_accuracy: 0.6667\nEpoch 2670/5000\n1/1 [==============================] - 0s 17ms/step - loss: 3.7072 - binary_accuracy: 0.6667\nEpoch 2671/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.0844 - binary_accuracy: 1.0000\nEpoch 2672/5000\n1/1 [==============================] - 0s 14ms/step - loss: 3.3755 - binary_accuracy: 0.4444\nEpoch 2673/5000\n1/1 [==============================] - 0s 15ms/step - loss: 27.3003 - binary_accuracy: 0.5556\nEpoch 2674/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.7947 - binary_accuracy: 0.5556\nEpoch 2675/5000\n1/1 [==============================] - 0s 6ms/step - loss: 18.3309 - binary_accuracy: 0.6667\nEpoch 2676/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.9054 - binary_accuracy: 0.6667\nEpoch 2677/5000\n1/1 [==============================] - 0s 10ms/step - loss: 9.5053 - binary_accuracy: 0.6667\nEpoch 2678/5000\n1/1 [==============================] - 0s 7ms/step - loss: 5.1231 - binary_accuracy: 0.6667\nEpoch 2679/5000\n1/1 [==============================] - 0s 7ms/step - loss: 1.0007 - binary_accuracy: 0.7778\nEpoch 2680/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.2021 - binary_accuracy: 0.4444\nEpoch 2681/5000\n1/1 [==============================] - 0s 5ms/step - loss: 26.0333 - binary_accuracy: 0.5556\nEpoch 2682/5000\n1/1 [==============================] - 0s 11ms/step - loss: 21.5394 - binary_accuracy: 0.6667\nEpoch 2683/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.0878 - binary_accuracy: 0.6667\nEpoch 2684/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.6712 - binary_accuracy: 0.6667\nEpoch 2685/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.2762 - binary_accuracy: 0.6667\nEpoch 2686/5000\n1/1 [==============================] - 0s 8ms/step - loss: 3.9103 - binary_accuracy: 0.6667\nEpoch 2687/5000\n1/1 [==============================] - 0s 14ms/step - loss: 0.1484 - binary_accuracy: 0.8889\nEpoch 2688/5000\n1/1 [==============================] - 0s 9ms/step - loss: 6.6504 - binary_accuracy: 0.4444\nEpoch 2689/5000\n1/1 [==============================] - 0s 21ms/step - loss: 26.8759 - binary_accuracy: 0.5556\nEpoch 2690/5000\n1/1 [==============================] - 0s 9ms/step - loss: 22.3750 - binary_accuracy: 0.6667\nEpoch 2691/5000\n1/1 [==============================] - 0s 17ms/step - loss: 17.9162 - binary_accuracy: 0.6667\nEpoch 2692/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.4945 - binary_accuracy: 0.6667\nEpoch 2693/5000\n1/1 [==============================] - 0s 7ms/step - loss: 9.0966 - binary_accuracy: 0.6667\nEpoch 2694/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.7182 - binary_accuracy: 0.6667\nEpoch 2695/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.6656 - binary_accuracy: 0.7778\nEpoch 2696/5000\n1/1 [==============================] - 0s 13ms/step - loss: 13.5750 - binary_accuracy: 0.4444\nEpoch 2697/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.8153 - binary_accuracy: 0.5556\nEpoch 2698/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.3245 - binary_accuracy: 0.6667\nEpoch 2699/5000\n1/1 [==============================] - 0s 10ms/step - loss: 16.8759 - binary_accuracy: 0.6667\nEpoch 2700/5000\n1/1 [==============================] - 0s 10ms/step - loss: 12.4613 - binary_accuracy: 0.6667\nEpoch 2701/5000\n1/1 [==============================] - 0s 11ms/step - loss: 8.0674 - binary_accuracy: 0.6667\nEpoch 2702/5000\n1/1 [==============================] - 0s 14ms/step - loss: 3.7071 - binary_accuracy: 0.6667\nEpoch 2703/5000\n1/1 [==============================] - 0s 16ms/step - loss: 0.0858 - binary_accuracy: 1.0000\nEpoch 2704/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.4287 - binary_accuracy: 0.4444\nEpoch 2705/5000\n1/1 [==============================] - 0s 9ms/step - loss: 27.2836 - binary_accuracy: 0.5556\nEpoch 2706/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.7801 - binary_accuracy: 0.6667\nEpoch 2707/5000\n1/1 [==============================] - 0s 12ms/step - loss: 18.3186 - binary_accuracy: 0.6667\nEpoch 2708/5000\n1/1 [==============================] - 0s 26ms/step - loss: 13.8949 - binary_accuracy: 0.6667\nEpoch 2709/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.4958 - binary_accuracy: 0.6667\nEpoch 2710/5000\n1/1 [==============================] - 0s 8ms/step - loss: 5.1142 - binary_accuracy: 0.6667\nEpoch 2711/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.9966 - binary_accuracy: 0.7778\nEpoch 2712/5000\n1/1 [==============================] - 0s 13ms/step - loss: 12.1515 - binary_accuracy: 0.4444\nEpoch 2713/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.0252 - binary_accuracy: 0.5556\nEpoch 2714/5000\n1/1 [==============================] - 0s 15ms/step - loss: 21.5335 - binary_accuracy: 0.6667\nEpoch 2715/5000\n1/1 [==============================] - 0s 14ms/step - loss: 17.0841 - binary_accuracy: 0.6667\nEpoch 2716/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.6690 - binary_accuracy: 0.6667\nEpoch 2717/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.2747 - binary_accuracy: 0.6667\nEpoch 2718/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.9097 - binary_accuracy: 0.6667\nEpoch 2719/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.1504 - binary_accuracy: 0.8889\nEpoch 2720/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.6868 - binary_accuracy: 0.4444\nEpoch 2721/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.8546 - binary_accuracy: 0.5556\nEpoch 2722/5000\n1/1 [==============================] - 0s 7ms/step - loss: 22.3559 - binary_accuracy: 0.6667\nEpoch 2723/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.8994 - binary_accuracy: 0.6667\nEpoch 2724/5000\n1/1 [==============================] - 0s 15ms/step - loss: 13.4794 - binary_accuracy: 0.6667\nEpoch 2725/5000\n1/1 [==============================] - 0s 7ms/step - loss: 9.0824 - binary_accuracy: 0.6667\nEpoch 2726/5000\n1/1 [==============================] - 0s 14ms/step - loss: 4.7048 - binary_accuracy: 0.6667\nEpoch 2727/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.6586 - binary_accuracy: 0.7778\nEpoch 2728/5000\n1/1 [==============================] - 0s 15ms/step - loss: 13.5195 - binary_accuracy: 0.4444\nEpoch 2729/5000\n1/1 [==============================] - 0s 4ms/step - loss: 25.8081 - binary_accuracy: 0.5556\nEpoch 2730/5000\n1/1 [==============================] - 0s 5ms/step - loss: 21.3195 - binary_accuracy: 0.6667\nEpoch 2731/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.8730 - binary_accuracy: 0.6667\nEpoch 2732/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.4598 - binary_accuracy: 0.6667\nEpoch 2733/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.0667 - binary_accuracy: 0.6667\nEpoch 2734/5000\n1/1 [==============================] - 0s 10ms/step - loss: 3.7073 - binary_accuracy: 0.6667\nEpoch 2735/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.0873 - binary_accuracy: 1.0000\nEpoch 2736/5000\n1/1 [==============================] - 0s 11ms/step - loss: 3.4846 - binary_accuracy: 0.4444\nEpoch 2737/5000\n1/1 [==============================] - 0s 6ms/step - loss: 27.2662 - binary_accuracy: 0.5556\nEpoch 2738/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.7649 - binary_accuracy: 0.6667\nEpoch 2739/5000\n1/1 [==============================] - 0s 6ms/step - loss: 18.3057 - binary_accuracy: 0.6667\nEpoch 2740/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.8837 - binary_accuracy: 0.6667\nEpoch 2741/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.4856 - binary_accuracy: 0.6667\nEpoch 2742/5000\n1/1 [==============================] - 0s 9ms/step - loss: 5.1046 - binary_accuracy: 0.6667\nEpoch 2743/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.9920 - binary_accuracy: 0.7778\nEpoch 2744/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.1052 - binary_accuracy: 0.4444\nEpoch 2745/5000\n1/1 [==============================] - 0s 8ms/step - loss: 26.0166 - binary_accuracy: 0.5556\nEpoch 2746/5000\n1/1 [==============================] - 0s 13ms/step - loss: 21.5271 - binary_accuracy: 0.6667\nEpoch 2747/5000\n1/1 [==============================] - 0s 8ms/step - loss: 17.0798 - binary_accuracy: 0.6667\nEpoch 2748/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.6661 - binary_accuracy: 0.6667\nEpoch 2749/5000\n1/1 [==============================] - 0s 15ms/step - loss: 8.2726 - binary_accuracy: 0.6667\nEpoch 2750/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.9085 - binary_accuracy: 0.6667\nEpoch 2751/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1522 - binary_accuracy: 0.8889\nEpoch 2752/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.7131 - binary_accuracy: 0.4444\nEpoch 2753/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.8349 - binary_accuracy: 0.5556\nEpoch 2754/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.3384 - binary_accuracy: 0.6667\nEpoch 2755/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.8842 - binary_accuracy: 0.6667\nEpoch 2756/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.4659 - binary_accuracy: 0.6667\nEpoch 2757/5000\n1/1 [==============================] - 0s 7ms/step - loss: 9.0698 - binary_accuracy: 0.6667\nEpoch 2758/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.6929 - binary_accuracy: 0.6667\nEpoch 2759/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.6528 - binary_accuracy: 0.7778\nEpoch 2760/5000\n1/1 [==============================] - 0s 8ms/step - loss: 13.4619 - binary_accuracy: 0.4444\nEpoch 2761/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.8013 - binary_accuracy: 0.5556\nEpoch 2762/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.3149 - binary_accuracy: 0.6667\nEpoch 2763/5000\n1/1 [==============================] - 0s 8ms/step - loss: 16.8704 - binary_accuracy: 0.6667\nEpoch 2764/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.4587 - binary_accuracy: 0.6667\nEpoch 2765/5000\n1/1 [==============================] - 0s 13ms/step - loss: 8.0662 - binary_accuracy: 0.6667\nEpoch 2766/5000\n1/1 [==============================] - 0s 14ms/step - loss: 3.7078 - binary_accuracy: 0.6667\nEpoch 2767/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.0889 - binary_accuracy: 1.0000\nEpoch 2768/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.5432 - binary_accuracy: 0.4444\nEpoch 2769/5000\n1/1 [==============================] - 0s 7ms/step - loss: 27.2481 - binary_accuracy: 0.5556\nEpoch 2770/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.7490 - binary_accuracy: 0.6667\nEpoch 2771/5000\n1/1 [==============================] - 0s 7ms/step - loss: 18.2921 - binary_accuracy: 0.6667\nEpoch 2772/5000\n1/1 [==============================] - 0s 17ms/step - loss: 13.8717 - binary_accuracy: 0.6667\nEpoch 2773/5000\n1/1 [==============================] - 0s 13ms/step - loss: 9.4746 - binary_accuracy: 0.6667\nEpoch 2774/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.0943 - binary_accuracy: 0.6667\nEpoch 2775/5000\n1/1 [==============================] - 0s 12ms/step - loss: 0.9867 - binary_accuracy: 0.7778\nEpoch 2776/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.0630 - binary_accuracy: 0.4444\nEpoch 2777/5000\n1/1 [==============================] - 0s 13ms/step - loss: 26.0074 - binary_accuracy: 0.5556\nEpoch 2778/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.5202 - binary_accuracy: 0.6667\nEpoch 2779/5000\n1/1 [==============================] - 0s 12ms/step - loss: 17.0749 - binary_accuracy: 0.6667\nEpoch 2780/5000\n1/1 [==============================] - 0s 13ms/step - loss: 12.6626 - binary_accuracy: 0.6667\nEpoch 2781/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.2699 - binary_accuracy: 0.6667\nEpoch 2782/5000\n1/1 [==============================] - 0s 14ms/step - loss: 3.9067 - binary_accuracy: 0.6667\nEpoch 2783/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.1538 - binary_accuracy: 0.8889\nEpoch 2784/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.7292 - binary_accuracy: 0.4444\nEpoch 2785/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.8168 - binary_accuracy: 0.5556\nEpoch 2786/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.3225 - binary_accuracy: 0.6667\nEpoch 2787/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.8706 - binary_accuracy: 0.6667\nEpoch 2788/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.4538 - binary_accuracy: 0.6667\nEpoch 2789/5000\n1/1 [==============================] - 0s 5ms/step - loss: 9.0586 - binary_accuracy: 0.6667\nEpoch 2790/5000\n1/1 [==============================] - 0s 5ms/step - loss: 4.6825 - binary_accuracy: 0.6667\nEpoch 2791/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.6483 - binary_accuracy: 0.7778\nEpoch 2792/5000\n1/1 [==============================] - 0s 10ms/step - loss: 13.4025 - binary_accuracy: 0.4444\nEpoch 2793/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.7948 - binary_accuracy: 0.5556\nEpoch 2794/5000\n1/1 [==============================] - 0s 24ms/step - loss: 21.3107 - binary_accuracy: 0.6667\nEpoch 2795/5000\n1/1 [==============================] - 0s 16ms/step - loss: 16.8682 - binary_accuracy: 0.6667\nEpoch 2796/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.4578 - binary_accuracy: 0.6667\nEpoch 2797/5000\n1/1 [==============================] - 0s 15ms/step - loss: 8.0661 - binary_accuracy: 0.6667\nEpoch 2798/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7086 - binary_accuracy: 0.6667\nEpoch 2799/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.0906 - binary_accuracy: 1.0000\nEpoch 2800/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.6037 - binary_accuracy: 0.4444\nEpoch 2801/5000\n1/1 [==============================] - 0s 11ms/step - loss: 27.2294 - binary_accuracy: 0.5556\nEpoch 2802/5000\n1/1 [==============================] - 0s 11ms/step - loss: 22.7324 - binary_accuracy: 0.6667\nEpoch 2803/5000\n1/1 [==============================] - 0s 11ms/step - loss: 18.2778 - binary_accuracy: 0.6667\nEpoch 2804/5000\n1/1 [==============================] - 0s 20ms/step - loss: 13.8591 - binary_accuracy: 0.6667\nEpoch 2805/5000\n1/1 [==============================] - 0s 12ms/step - loss: 9.4628 - binary_accuracy: 0.6667\nEpoch 2806/5000\n1/1 [==============================] - 0s 12ms/step - loss: 5.0832 - binary_accuracy: 0.6667\nEpoch 2807/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.9809 - binary_accuracy: 0.7778\nEpoch 2808/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.0249 - binary_accuracy: 0.4444\nEpoch 2809/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.9977 - binary_accuracy: 0.5556\nEpoch 2810/5000\n1/1 [==============================] - 0s 5ms/step - loss: 21.5127 - binary_accuracy: 0.6667\nEpoch 2811/5000\n1/1 [==============================] - 0s 19ms/step - loss: 17.0695 - binary_accuracy: 0.6667\nEpoch 2812/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.6586 - binary_accuracy: 0.6667\nEpoch 2813/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.2666 - binary_accuracy: 0.6667\nEpoch 2814/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.9043 - binary_accuracy: 0.6667\nEpoch 2815/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.1552 - binary_accuracy: 0.8889\nEpoch 2816/5000\n1/1 [==============================] - 0s 9ms/step - loss: 6.7359 - binary_accuracy: 0.4444\nEpoch 2817/5000\n1/1 [==============================] - 0s 11ms/step - loss: 26.8002 - binary_accuracy: 0.5556\nEpoch 2818/5000\n1/1 [==============================] - 0s 9ms/step - loss: 22.3082 - binary_accuracy: 0.6667\nEpoch 2819/5000\n1/1 [==============================] - 0s 10ms/step - loss: 17.8585 - binary_accuracy: 0.6667\nEpoch 2820/5000\n1/1 [==============================] - 0s 13ms/step - loss: 13.4432 - binary_accuracy: 0.6667\nEpoch 2821/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.0488 - binary_accuracy: 0.6667\nEpoch 2822/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.6734 - binary_accuracy: 0.6667\nEpoch 2823/5000\n1/1 [==============================] - 0s 16ms/step - loss: 0.6448 - binary_accuracy: 0.7778\nEpoch 2824/5000\n1/1 [==============================] - 0s 11ms/step - loss: 13.3417 - binary_accuracy: 0.4444\nEpoch 2825/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.7887 - binary_accuracy: 0.5556\nEpoch 2826/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.3067 - binary_accuracy: 0.6667\nEpoch 2827/5000\n1/1 [==============================] - 0s 8ms/step - loss: 16.8663 - binary_accuracy: 0.6667\nEpoch 2828/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.4571 - binary_accuracy: 0.6667\nEpoch 2829/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.0661 - binary_accuracy: 0.6667\nEpoch 2830/5000\n1/1 [==============================] - 0s 11ms/step - loss: 3.7096 - binary_accuracy: 0.6667\nEpoch 2831/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.0925 - binary_accuracy: 1.0000\nEpoch 2832/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.6659 - binary_accuracy: 0.4444\nEpoch 2833/5000\n1/1 [==============================] - 0s 6ms/step - loss: 27.2100 - binary_accuracy: 0.5556\nEpoch 2834/5000\n1/1 [==============================] - 0s 5ms/step - loss: 22.7153 - binary_accuracy: 0.6667\nEpoch 2835/5000\n1/1 [==============================] - 0s 6ms/step - loss: 18.2629 - binary_accuracy: 0.6667\nEpoch 2836/5000\n1/1 [==============================] - 0s 8ms/step - loss: 13.8458 - binary_accuracy: 0.6667\nEpoch 2837/5000\n1/1 [==============================] - 0s 5ms/step - loss: 9.4504 - binary_accuracy: 0.6667\nEpoch 2838/5000\n1/1 [==============================] - 0s 8ms/step - loss: 5.0714 - binary_accuracy: 0.6667\nEpoch 2839/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.9744 - binary_accuracy: 0.7778\nEpoch 2840/5000\n1/1 [==============================] - 0s 7ms/step - loss: 11.9906 - binary_accuracy: 0.4444\nEpoch 2841/5000\n1/1 [==============================] - 0s 4ms/step - loss: 25.9875 - binary_accuracy: 0.5556\nEpoch 2842/5000\n1/1 [==============================] - 0s 13ms/step - loss: 21.5048 - binary_accuracy: 0.6667\nEpoch 2843/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.0636 - binary_accuracy: 0.6667\nEpoch 2844/5000\n1/1 [==============================] - 0s 12ms/step - loss: 12.6540 - binary_accuracy: 0.6667\nEpoch 2845/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.2627 - binary_accuracy: 0.6667\nEpoch 2846/5000\n1/1 [==============================] - 0s 17ms/step - loss: 3.9014 - binary_accuracy: 0.6667\nEpoch 2847/5000\n1/1 [==============================] - 0s 23ms/step - loss: 0.1564 - binary_accuracy: 0.8889\nEpoch 2848/5000\n1/1 [==============================] - 0s 16ms/step - loss: 6.7337 - binary_accuracy: 0.4444\nEpoch 2849/5000\n1/1 [==============================] - 0s 7ms/step - loss: 26.7851 - binary_accuracy: 0.5556\nEpoch 2850/5000\n1/1 [==============================] - 0s 10ms/step - loss: 22.2953 - binary_accuracy: 0.6667\nEpoch 2851/5000\n1/1 [==============================] - 0s 12ms/step - loss: 17.8478 - binary_accuracy: 0.6667\nEpoch 2852/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.4340 - binary_accuracy: 0.6667\nEpoch 2853/5000\n1/1 [==============================] - 0s 8ms/step - loss: 9.0404 - binary_accuracy: 0.6667\nEpoch 2854/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.6656 - binary_accuracy: 0.6667\nEpoch 2855/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.6424 - binary_accuracy: 0.7778\nEpoch 2856/5000\n1/1 [==============================] - 0s 17ms/step - loss: 13.2800 - binary_accuracy: 0.4444\nEpoch 2857/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.7828 - binary_accuracy: 0.5556\nEpoch 2858/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.3031 - binary_accuracy: 0.6667\nEpoch 2859/5000\n1/1 [==============================] - 0s 26ms/step - loss: 16.8646 - binary_accuracy: 0.6667\nEpoch 2860/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.4567 - binary_accuracy: 0.6667\nEpoch 2861/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0662 - binary_accuracy: 0.6667\nEpoch 2862/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7107 - binary_accuracy: 0.6667\nEpoch 2863/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.0944 - binary_accuracy: 1.0000\nEpoch 2864/5000\n1/1 [==============================] - 0s 4ms/step - loss: 3.7294 - binary_accuracy: 0.4444\nEpoch 2865/5000\n1/1 [==============================] - 0s 12ms/step - loss: 27.1900 - binary_accuracy: 0.5556\nEpoch 2866/5000\n1/1 [==============================] - 0s 11ms/step - loss: 22.6975 - binary_accuracy: 0.6667\nEpoch 2867/5000\n1/1 [==============================] - 0s 6ms/step - loss: 18.2474 - binary_accuracy: 0.6667\nEpoch 2868/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.8318 - binary_accuracy: 0.6667\nEpoch 2869/5000\n1/1 [==============================] - 0s 6ms/step - loss: 9.4373 - binary_accuracy: 0.6667\nEpoch 2870/5000\n1/1 [==============================] - 0s 5ms/step - loss: 5.0589 - binary_accuracy: 0.6667\nEpoch 2871/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.9675 - binary_accuracy: 0.7778\nEpoch 2872/5000\n1/1 [==============================] - 0s 13ms/step - loss: 11.9599 - binary_accuracy: 0.4444\nEpoch 2873/5000\n1/1 [==============================] - 0s 17ms/step - loss: 25.9769 - binary_accuracy: 0.5556\nEpoch 2874/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.4964 - binary_accuracy: 0.6667\nEpoch 2875/5000\n1/1 [==============================] - 0s 20ms/step - loss: 17.0573 - binary_accuracy: 0.6667\nEpoch 2876/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.6489 - binary_accuracy: 0.6667\nEpoch 2877/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.2583 - binary_accuracy: 0.6667\nEpoch 2878/5000\n1/1 [==============================] - 0s 8ms/step - loss: 3.8979 - binary_accuracy: 0.6667\nEpoch 2879/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.1575 - binary_accuracy: 0.8889\nEpoch 2880/5000\n1/1 [==============================] - 0s 10ms/step - loss: 6.7233 - binary_accuracy: 0.4444\nEpoch 2881/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.7712 - binary_accuracy: 0.5556\nEpoch 2882/5000\n1/1 [==============================] - 0s 9ms/step - loss: 22.2838 - binary_accuracy: 0.6667\nEpoch 2883/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.8383 - binary_accuracy: 0.6667\nEpoch 2884/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.4259 - binary_accuracy: 0.6667\nEpoch 2885/5000\n1/1 [==============================] - 0s 16ms/step - loss: 9.0331 - binary_accuracy: 0.6667\nEpoch 2886/5000\n1/1 [==============================] - 0s 13ms/step - loss: 4.6590 - binary_accuracy: 0.6667\nEpoch 2887/5000\n1/1 [==============================] - 0s 26ms/step - loss: 0.6409 - binary_accuracy: 0.7778\nEpoch 2888/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.2174 - binary_accuracy: 0.4444\nEpoch 2889/5000\n1/1 [==============================] - 0s 14ms/step - loss: 25.7771 - binary_accuracy: 0.5556\nEpoch 2890/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2996 - binary_accuracy: 0.6667\nEpoch 2891/5000\n1/1 [==============================] - 0s 7ms/step - loss: 16.8630 - binary_accuracy: 0.6667\nEpoch 2892/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.4563 - binary_accuracy: 0.6667\nEpoch 2893/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0665 - binary_accuracy: 0.6667\nEpoch 2894/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7120 - binary_accuracy: 0.6667\nEpoch 2895/5000\n1/1 [==============================] - 0s 13ms/step - loss: 0.0964 - binary_accuracy: 1.0000\nEpoch 2896/5000\n1/1 [==============================] - 0s 11ms/step - loss: 3.7942 - binary_accuracy: 0.4444\nEpoch 2897/5000\n1/1 [==============================] - 0s 9ms/step - loss: 27.1694 - binary_accuracy: 0.5556\nEpoch 2898/5000\n1/1 [==============================] - 0s 8ms/step - loss: 22.6793 - binary_accuracy: 0.6667\nEpoch 2899/5000\n1/1 [==============================] - 0s 9ms/step - loss: 18.2314 - binary_accuracy: 0.6667\nEpoch 2900/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.8173 - binary_accuracy: 0.6667\nEpoch 2901/5000\n1/1 [==============================] - 0s 7ms/step - loss: 9.4235 - binary_accuracy: 0.6667\nEpoch 2902/5000\n1/1 [==============================] - 0s 14ms/step - loss: 5.0458 - binary_accuracy: 0.6667\nEpoch 2903/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.9600 - binary_accuracy: 0.7778\nEpoch 2904/5000\n1/1 [==============================] - 0s 5ms/step - loss: 11.9325 - binary_accuracy: 0.4444\nEpoch 2905/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.9659 - binary_accuracy: 0.5556\nEpoch 2906/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.4876 - binary_accuracy: 0.6667\nEpoch 2907/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.0505 - binary_accuracy: 0.6667\nEpoch 2908/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.6434 - binary_accuracy: 0.6667\nEpoch 2909/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.2533 - binary_accuracy: 0.6667\nEpoch 2910/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.8940 - binary_accuracy: 0.6667\nEpoch 2911/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1583 - binary_accuracy: 0.8889\nEpoch 2912/5000\n1/1 [==============================] - 0s 12ms/step - loss: 6.7057 - binary_accuracy: 0.4444\nEpoch 2913/5000\n1/1 [==============================] - 0s 8ms/step - loss: 26.7586 - binary_accuracy: 0.5556\nEpoch 2914/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.2734 - binary_accuracy: 0.6667\nEpoch 2915/5000\n1/1 [==============================] - 0s 13ms/step - loss: 17.8300 - binary_accuracy: 0.6667\nEpoch 2916/5000\n1/1 [==============================] - 0s 14ms/step - loss: 13.4190 - binary_accuracy: 0.6667\nEpoch 2917/5000\n1/1 [==============================] - 0s 19ms/step - loss: 9.0268 - binary_accuracy: 0.6667\nEpoch 2918/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.6535 - binary_accuracy: 0.6667\nEpoch 2919/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.6403 - binary_accuracy: 0.7778\nEpoch 2920/5000\n1/1 [==============================] - 0s 37ms/step - loss: 13.1543 - binary_accuracy: 0.4444\nEpoch 2921/5000\n1/1 [==============================] - 0s 13ms/step - loss: 25.7716 - binary_accuracy: 0.5556\nEpoch 2922/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.2963 - binary_accuracy: 0.6667\nEpoch 2923/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.8616 - binary_accuracy: 0.6667\nEpoch 2924/5000\n1/1 [==============================] - 0s 12ms/step - loss: 12.4561 - binary_accuracy: 0.6667\nEpoch 2925/5000\n1/1 [==============================] - 0s 17ms/step - loss: 8.0669 - binary_accuracy: 0.6667\nEpoch 2926/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7133 - binary_accuracy: 0.6667\nEpoch 2927/5000\n1/1 [==============================] - 0s 17ms/step - loss: 0.0985 - binary_accuracy: 1.0000\nEpoch 2928/5000\n1/1 [==============================] - 0s 13ms/step - loss: 3.8603 - binary_accuracy: 0.4444\nEpoch 2929/5000\n1/1 [==============================] - 0s 13ms/step - loss: 27.1484 - binary_accuracy: 0.5556\nEpoch 2930/5000\n1/1 [==============================] - 0s 17ms/step - loss: 22.6605 - binary_accuracy: 0.6667\nEpoch 2931/5000\n1/1 [==============================] - 0s 12ms/step - loss: 18.2148 - binary_accuracy: 0.6667\nEpoch 2932/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.8022 - binary_accuracy: 0.6667\nEpoch 2933/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.4092 - binary_accuracy: 0.6667\nEpoch 2934/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.0321 - binary_accuracy: 0.6667\nEpoch 2935/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.9521 - binary_accuracy: 0.7778\nEpoch 2936/5000\n1/1 [==============================] - 0s 7ms/step - loss: 11.9081 - binary_accuracy: 0.4444\nEpoch 2937/5000\n1/1 [==============================] - 0s 5ms/step - loss: 25.9545 - binary_accuracy: 0.5556\nEpoch 2938/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.4785 - binary_accuracy: 0.6667\nEpoch 2939/5000\n1/1 [==============================] - 0s 11ms/step - loss: 17.0433 - binary_accuracy: 0.6667\nEpoch 2940/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.6374 - binary_accuracy: 0.6667\nEpoch 2941/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.2479 - binary_accuracy: 0.6667\nEpoch 2942/5000\n1/1 [==============================] - 0s 17ms/step - loss: 3.8896 - binary_accuracy: 0.6667\nEpoch 2943/5000\n1/1 [==============================] - 0s 5ms/step - loss: 0.1590 - binary_accuracy: 0.8889\nEpoch 2944/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.6812 - binary_accuracy: 0.4444\nEpoch 2945/5000\n1/1 [==============================] - 0s 11ms/step - loss: 26.7471 - binary_accuracy: 0.5556\nEpoch 2946/5000\n1/1 [==============================] - 0s 5ms/step - loss: 22.2642 - binary_accuracy: 0.6667\nEpoch 2947/5000\n1/1 [==============================] - 0s 11ms/step - loss: 17.8228 - binary_accuracy: 0.6667\nEpoch 2948/5000\n1/1 [==============================] - 0s 18ms/step - loss: 13.4131 - binary_accuracy: 0.6667\nEpoch 2949/5000\n1/1 [==============================] - 0s 26ms/step - loss: 9.0216 - binary_accuracy: 0.6667\nEpoch 2950/5000\n1/1 [==============================] - 0s 14ms/step - loss: 4.6489 - binary_accuracy: 0.6667\nEpoch 2951/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.6406 - binary_accuracy: 0.7778\nEpoch 2952/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.0906 - binary_accuracy: 0.4444\nEpoch 2953/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.7663 - binary_accuracy: 0.5556\nEpoch 2954/5000\n1/1 [==============================] - 0s 13ms/step - loss: 21.2932 - binary_accuracy: 0.6667\nEpoch 2955/5000\n1/1 [==============================] - 0s 21ms/step - loss: 16.8604 - binary_accuracy: 0.6667\nEpoch 2956/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.4559 - binary_accuracy: 0.6667\nEpoch 2957/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.0673 - binary_accuracy: 0.6667\nEpoch 2958/5000\n1/1 [==============================] - 0s 8ms/step - loss: 3.7147 - binary_accuracy: 0.6667\nEpoch 2959/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.1006 - binary_accuracy: 1.0000\nEpoch 2960/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.9275 - binary_accuracy: 0.4444\nEpoch 2961/5000\n1/1 [==============================] - 0s 5ms/step - loss: 27.1268 - binary_accuracy: 0.5556\nEpoch 2962/5000\n1/1 [==============================] - 0s 7ms/step - loss: 22.6413 - binary_accuracy: 0.6667\nEpoch 2963/5000\n1/1 [==============================] - 0s 8ms/step - loss: 18.1977 - binary_accuracy: 0.6667\nEpoch 2964/5000\n1/1 [==============================] - 0s 10ms/step - loss: 13.7865 - binary_accuracy: 0.6667\nEpoch 2965/5000\n1/1 [==============================] - 0s 8ms/step - loss: 9.3942 - binary_accuracy: 0.6667\nEpoch 2966/5000\n1/1 [==============================] - 0s 13ms/step - loss: 5.0178 - binary_accuracy: 0.6667\nEpoch 2967/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.9437 - binary_accuracy: 0.7778\nEpoch 2968/5000\n1/1 [==============================] - 0s 14ms/step - loss: 11.8866 - binary_accuracy: 0.4444\nEpoch 2969/5000\n1/1 [==============================] - 0s 16ms/step - loss: 25.9427 - binary_accuracy: 0.5556\nEpoch 2970/5000\n1/1 [==============================] - 0s 11ms/step - loss: 21.4690 - binary_accuracy: 0.6667\nEpoch 2971/5000\n1/1 [==============================] - 0s 8ms/step - loss: 17.0357 - binary_accuracy: 0.6667\nEpoch 2972/5000\n1/1 [==============================] - 0s 10ms/step - loss: 12.6309 - binary_accuracy: 0.6667\nEpoch 2973/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.2421 - binary_accuracy: 0.6667\nEpoch 2974/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.8848 - binary_accuracy: 0.6667\nEpoch 2975/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1595 - binary_accuracy: 0.8889\nEpoch 2976/5000\n1/1 [==============================] - 0s 13ms/step - loss: 6.6505 - binary_accuracy: 0.4444\nEpoch 2977/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.7366 - binary_accuracy: 0.5556\nEpoch 2978/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.2559 - binary_accuracy: 0.6667\nEpoch 2979/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.8166 - binary_accuracy: 0.6667\nEpoch 2980/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.4081 - binary_accuracy: 0.6667\nEpoch 2981/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.0173 - binary_accuracy: 0.6667\nEpoch 2982/5000\n1/1 [==============================] - 0s 14ms/step - loss: 4.6452 - binary_accuracy: 0.6667\nEpoch 2983/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.6415 - binary_accuracy: 0.7778\nEpoch 2984/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.0263 - binary_accuracy: 0.4444\nEpoch 2985/5000\n1/1 [==============================] - 0s 5ms/step - loss: 25.7612 - binary_accuracy: 0.5556\nEpoch 2986/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.2903 - binary_accuracy: 0.6667\nEpoch 2987/5000\n1/1 [==============================] - 0s 7ms/step - loss: 16.8593 - binary_accuracy: 0.6667\nEpoch 2988/5000\n1/1 [==============================] - 0s 13ms/step - loss: 12.4559 - binary_accuracy: 0.6667\nEpoch 2989/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.0678 - binary_accuracy: 0.6667\nEpoch 2990/5000\n1/1 [==============================] - 0s 8ms/step - loss: 3.7161 - binary_accuracy: 0.6667\nEpoch 2991/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1028 - binary_accuracy: 1.0000\nEpoch 2992/5000\n1/1 [==============================] - 0s 10ms/step - loss: 3.9961 - binary_accuracy: 0.4444\nEpoch 2993/5000\n1/1 [==============================] - 0s 10ms/step - loss: 27.1048 - binary_accuracy: 0.5556\nEpoch 2994/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.6216 - binary_accuracy: 0.6667\nEpoch 2995/5000\n1/1 [==============================] - 0s 9ms/step - loss: 18.1801 - binary_accuracy: 0.6667\nEpoch 2996/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.7703 - binary_accuracy: 0.6667\nEpoch 2997/5000\n1/1 [==============================] - 0s 8ms/step - loss: 9.3788 - binary_accuracy: 0.6667\nEpoch 2998/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.0029 - binary_accuracy: 0.6667\nEpoch 2999/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.9350 - binary_accuracy: 0.7778\nEpoch 3000/5000\n1/1 [==============================] - 0s 11ms/step - loss: 11.8677 - binary_accuracy: 0.4444\nEpoch 3001/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.9307 - binary_accuracy: 0.5556\nEpoch 3002/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.4592 - binary_accuracy: 0.6667\nEpoch 3003/5000\n1/1 [==============================] - 0s 10ms/step - loss: 17.0277 - binary_accuracy: 0.6667\nEpoch 3004/5000\n1/1 [==============================] - 0s 12ms/step - loss: 12.6241 - binary_accuracy: 0.6667\nEpoch 3005/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.2359 - binary_accuracy: 0.6667\nEpoch 3006/5000\n1/1 [==============================] - 0s 12ms/step - loss: 3.8796 - binary_accuracy: 0.6667\nEpoch 3007/5000\n1/1 [==============================] - 0s 5ms/step - loss: 0.1600 - binary_accuracy: 0.8889\nEpoch 3008/5000\n1/1 [==============================] - 0s 9ms/step - loss: 6.6143 - binary_accuracy: 0.4444\nEpoch 3009/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.7271 - binary_accuracy: 0.5556\nEpoch 3010/5000\n1/1 [==============================] - 0s 9ms/step - loss: 22.2486 - binary_accuracy: 0.6667\nEpoch 3011/5000\n1/1 [==============================] - 0s 12ms/step - loss: 17.8112 - binary_accuracy: 0.6667\nEpoch 3012/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.4040 - binary_accuracy: 0.6667\nEpoch 3013/5000\n1/1 [==============================] - 0s 10ms/step - loss: 9.0138 - binary_accuracy: 0.6667\nEpoch 3014/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.6423 - binary_accuracy: 0.6667\nEpoch 3015/5000\n1/1 [==============================] - 0s 17ms/step - loss: 0.6430 - binary_accuracy: 0.7778\nEpoch 3016/5000\n1/1 [==============================] - 0s 11ms/step - loss: 12.9616 - binary_accuracy: 0.4444\nEpoch 3017/5000\n1/1 [==============================] - 0s 12ms/step - loss: 25.7562 - binary_accuracy: 0.5556\nEpoch 3018/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.2875 - binary_accuracy: 0.6667\nEpoch 3019/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.8582 - binary_accuracy: 0.6667\nEpoch 3020/5000\n1/1 [==============================] - 0s 11ms/step - loss: 12.4559 - binary_accuracy: 0.6667\nEpoch 3021/5000\n1/1 [==============================] - 0s 13ms/step - loss: 8.0683 - binary_accuracy: 0.6667\nEpoch 3022/5000\n1/1 [==============================] - 0s 13ms/step - loss: 3.7177 - binary_accuracy: 0.6667\nEpoch 3023/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.1051 - binary_accuracy: 1.0000\nEpoch 3024/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.0664 - binary_accuracy: 0.4444\nEpoch 3025/5000\n1/1 [==============================] - 0s 11ms/step - loss: 27.0823 - binary_accuracy: 0.5556\nEpoch 3026/5000\n1/1 [==============================] - 0s 9ms/step - loss: 22.6015 - binary_accuracy: 0.6667\nEpoch 3027/5000\n1/1 [==============================] - 0s 6ms/step - loss: 18.1621 - binary_accuracy: 0.6667\nEpoch 3028/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.7535 - binary_accuracy: 0.6667\nEpoch 3029/5000\n1/1 [==============================] - 0s 13ms/step - loss: 9.3627 - binary_accuracy: 0.6667\nEpoch 3030/5000\n1/1 [==============================] - 0s 5ms/step - loss: 4.9875 - binary_accuracy: 0.6667\nEpoch 3031/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.9257 - binary_accuracy: 0.7778\nEpoch 3032/5000\n1/1 [==============================] - 0s 7ms/step - loss: 11.8514 - binary_accuracy: 0.4444\nEpoch 3033/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.9183 - binary_accuracy: 0.5556\nEpoch 3034/5000\n1/1 [==============================] - 0s 15ms/step - loss: 21.4492 - binary_accuracy: 0.6667\nEpoch 3035/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.0195 - binary_accuracy: 0.6667\nEpoch 3036/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.6169 - binary_accuracy: 0.6667\nEpoch 3037/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.2292 - binary_accuracy: 0.6667\nEpoch 3038/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.8741 - binary_accuracy: 0.6667\nEpoch 3039/5000\n1/1 [==============================] - 0s 5ms/step - loss: 0.1602 - binary_accuracy: 0.8889\nEpoch 3040/5000\n1/1 [==============================] - 0s 15ms/step - loss: 6.5726 - binary_accuracy: 0.4444\nEpoch 3041/5000\n1/1 [==============================] - 0s 8ms/step - loss: 26.7185 - binary_accuracy: 0.5556\nEpoch 3042/5000\n1/1 [==============================] - 0s 13ms/step - loss: 22.2422 - binary_accuracy: 0.6667\nEpoch 3043/5000\n1/1 [==============================] - 0s 8ms/step - loss: 17.8067 - binary_accuracy: 0.6667\nEpoch 3044/5000\n1/1 [==============================] - 0s 8ms/step - loss: 13.4007 - binary_accuracy: 0.6667\nEpoch 3045/5000\n1/1 [==============================] - 0s 6ms/step - loss: 9.0111 - binary_accuracy: 0.6667\nEpoch 3046/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.6402 - binary_accuracy: 0.6667\nEpoch 3047/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.6452 - binary_accuracy: 0.7778\nEpoch 3048/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.8962 - binary_accuracy: 0.4444\nEpoch 3049/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.7514 - binary_accuracy: 0.5556\nEpoch 3050/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.2849 - binary_accuracy: 0.6667\nEpoch 3051/5000\n1/1 [==============================] - 0s 8ms/step - loss: 16.8574 - binary_accuracy: 0.6667\nEpoch 3052/5000\n1/1 [==============================] - 0s 13ms/step - loss: 12.4561 - binary_accuracy: 0.6667\nEpoch 3053/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.0690 - binary_accuracy: 0.6667\nEpoch 3054/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7193 - binary_accuracy: 0.6667\nEpoch 3055/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.1075 - binary_accuracy: 1.0000\nEpoch 3056/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.1384 - binary_accuracy: 0.4444\nEpoch 3057/5000\n1/1 [==============================] - 0s 6ms/step - loss: 27.0593 - binary_accuracy: 0.5556\nEpoch 3058/5000\n1/1 [==============================] - 0s 7ms/step - loss: 22.5808 - binary_accuracy: 0.6667\nEpoch 3059/5000\n1/1 [==============================] - 0s 9ms/step - loss: 18.1435 - binary_accuracy: 0.6667\nEpoch 3060/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.7363 - binary_accuracy: 0.6667\nEpoch 3061/5000\n1/1 [==============================] - 0s 16ms/step - loss: 9.3461 - binary_accuracy: 0.6667\nEpoch 3062/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.9715 - binary_accuracy: 0.6667\nEpoch 3063/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.9161 - binary_accuracy: 0.7778\nEpoch 3064/5000\n1/1 [==============================] - 0s 16ms/step - loss: 11.8375 - binary_accuracy: 0.4444\nEpoch 3065/5000\n1/1 [==============================] - 0s 20ms/step - loss: 25.9057 - binary_accuracy: 0.5556\nEpoch 3066/5000\n1/1 [==============================] - 0s 11ms/step - loss: 21.4388 - binary_accuracy: 0.6667\nEpoch 3067/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.0109 - binary_accuracy: 0.6667\nEpoch 3068/5000\n1/1 [==============================] - 0s 11ms/step - loss: 12.6094 - binary_accuracy: 0.6667\nEpoch 3069/5000\n1/1 [==============================] - 0s 20ms/step - loss: 8.2222 - binary_accuracy: 0.6667\nEpoch 3070/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.8681 - binary_accuracy: 0.6667\nEpoch 3071/5000\n1/1 [==============================] - 0s 13ms/step - loss: 0.1604 - binary_accuracy: 0.8889\nEpoch 3072/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.5260 - binary_accuracy: 0.4444\nEpoch 3073/5000\n1/1 [==============================] - 0s 13ms/step - loss: 26.7107 - binary_accuracy: 0.5556\nEpoch 3074/5000\n1/1 [==============================] - 0s 9ms/step - loss: 22.2366 - binary_accuracy: 0.6667\nEpoch 3075/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.8030 - binary_accuracy: 0.6667\nEpoch 3076/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.3981 - binary_accuracy: 0.6667\nEpoch 3077/5000\n1/1 [==============================] - 0s 8ms/step - loss: 9.0091 - binary_accuracy: 0.6667\nEpoch 3078/5000\n1/1 [==============================] - 0s 14ms/step - loss: 4.6387 - binary_accuracy: 0.6667\nEpoch 3079/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.6480 - binary_accuracy: 0.7778\nEpoch 3080/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.8302 - binary_accuracy: 0.4444\nEpoch 3081/5000\n1/1 [==============================] - 0s 13ms/step - loss: 25.7468 - binary_accuracy: 0.5556\nEpoch 3082/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.2825 - binary_accuracy: 0.6667\nEpoch 3083/5000\n1/1 [==============================] - 0s 7ms/step - loss: 16.8566 - binary_accuracy: 0.6667\nEpoch 3084/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.4563 - binary_accuracy: 0.6667\nEpoch 3085/5000\n1/1 [==============================] - 0s 11ms/step - loss: 8.0697 - binary_accuracy: 0.6667\nEpoch 3086/5000\n1/1 [==============================] - 0s 11ms/step - loss: 3.7210 - binary_accuracy: 0.6667\nEpoch 3087/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.1100 - binary_accuracy: 1.0000\nEpoch 3088/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.2125 - binary_accuracy: 0.4444\nEpoch 3089/5000\n1/1 [==============================] - 0s 7ms/step - loss: 27.0358 - binary_accuracy: 0.5556\nEpoch 3090/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.5597 - binary_accuracy: 0.6667\nEpoch 3091/5000\n1/1 [==============================] - 0s 6ms/step - loss: 18.1244 - binary_accuracy: 0.6667\nEpoch 3092/5000\n1/1 [==============================] - 0s 8ms/step - loss: 13.7184 - binary_accuracy: 0.6667\nEpoch 3093/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.3289 - binary_accuracy: 0.6667\nEpoch 3094/5000\n1/1 [==============================] - 0s 11ms/step - loss: 4.9550 - binary_accuracy: 0.6667\nEpoch 3095/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.9061 - binary_accuracy: 0.7778\nEpoch 3096/5000\n1/1 [==============================] - 0s 15ms/step - loss: 11.8258 - binary_accuracy: 0.4444\nEpoch 3097/5000\n1/1 [==============================] - 0s 16ms/step - loss: 25.8928 - binary_accuracy: 0.5556\nEpoch 3098/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.4282 - binary_accuracy: 0.6667\nEpoch 3099/5000\n1/1 [==============================] - 0s 8ms/step - loss: 17.0020 - binary_accuracy: 0.6667\nEpoch 3100/5000\n1/1 [==============================] - 0s 11ms/step - loss: 12.6015 - binary_accuracy: 0.6667\nEpoch 3101/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.2148 - binary_accuracy: 0.6667\nEpoch 3102/5000\n1/1 [==============================] - 0s 14ms/step - loss: 3.8619 - binary_accuracy: 0.6667\nEpoch 3103/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.1604 - binary_accuracy: 0.8889\nEpoch 3104/5000\n1/1 [==============================] - 0s 9ms/step - loss: 6.4748 - binary_accuracy: 0.4444\nEpoch 3105/5000\n1/1 [==============================] - 0s 7ms/step - loss: 26.7037 - binary_accuracy: 0.5556\nEpoch 3106/5000\n1/1 [==============================] - 0s 9ms/step - loss: 22.2318 - binary_accuracy: 0.6667\nEpoch 3107/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.8000 - binary_accuracy: 0.6667\nEpoch 3108/5000\n1/1 [==============================] - 0s 13ms/step - loss: 13.3962 - binary_accuracy: 0.6667\nEpoch 3109/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.0077 - binary_accuracy: 0.6667\nEpoch 3110/5000\n1/1 [==============================] - 0s 13ms/step - loss: 4.6380 - binary_accuracy: 0.6667\nEpoch 3111/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.6513 - binary_accuracy: 0.7778\nEpoch 3112/5000\n1/1 [==============================] - 0s 13ms/step - loss: 12.7635 - binary_accuracy: 0.4444\nEpoch 3113/5000\n1/1 [==============================] - 0s 13ms/step - loss: 25.7425 - binary_accuracy: 0.6667\nEpoch 3114/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2803 - binary_accuracy: 0.6667\nEpoch 3115/5000\n1/1 [==============================] - 0s 15ms/step - loss: 16.8560 - binary_accuracy: 0.6667\nEpoch 3116/5000\n1/1 [==============================] - 0s 10ms/step - loss: 12.4566 - binary_accuracy: 0.6667\nEpoch 3117/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.0705 - binary_accuracy: 0.6667\nEpoch 3118/5000\n1/1 [==============================] - 0s 5ms/step - loss: 3.7227 - binary_accuracy: 0.6667\nEpoch 3119/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.1125 - binary_accuracy: 1.0000\nEpoch 3120/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.2889 - binary_accuracy: 0.4444\nEpoch 3121/5000\n1/1 [==============================] - 0s 11ms/step - loss: 27.0118 - binary_accuracy: 0.5556\nEpoch 3122/5000\n1/1 [==============================] - 0s 11ms/step - loss: 22.5381 - binary_accuracy: 0.6667\nEpoch 3123/5000\n1/1 [==============================] - 0s 7ms/step - loss: 18.1047 - binary_accuracy: 0.6667\nEpoch 3124/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.7000 - binary_accuracy: 0.6667\nEpoch 3125/5000\n1/1 [==============================] - 0s 10ms/step - loss: 9.3110 - binary_accuracy: 0.6667\nEpoch 3126/5000\n1/1 [==============================] - 0s 8ms/step - loss: 4.9378 - binary_accuracy: 0.6667\nEpoch 3127/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.8956 - binary_accuracy: 0.7778\nEpoch 3128/5000\n1/1 [==============================] - 0s 6ms/step - loss: 11.8164 - binary_accuracy: 0.4444\nEpoch 3129/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.8797 - binary_accuracy: 0.6667\nEpoch 3130/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.4173 - binary_accuracy: 0.6667\nEpoch 3131/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.9928 - binary_accuracy: 0.6667\nEpoch 3132/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.5933 - binary_accuracy: 0.6667\nEpoch 3133/5000\n1/1 [==============================] - 0s 12ms/step - loss: 8.2071 - binary_accuracy: 0.6667\nEpoch 3134/5000\n1/1 [==============================] - 0s 17ms/step - loss: 3.8553 - binary_accuracy: 0.6667\nEpoch 3135/5000\n1/1 [==============================] - 0s 14ms/step - loss: 0.1604 - binary_accuracy: 0.8889\nEpoch 3136/5000\n1/1 [==============================] - 0s 10ms/step - loss: 6.4190 - binary_accuracy: 0.4444\nEpoch 3137/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.6974 - binary_accuracy: 0.5556\nEpoch 3138/5000\n1/1 [==============================] - 0s 7ms/step - loss: 22.2278 - binary_accuracy: 0.6667\nEpoch 3139/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.7977 - binary_accuracy: 0.6667\nEpoch 3140/5000\n1/1 [==============================] - 0s 25ms/step - loss: 13.3950 - binary_accuracy: 0.6667\nEpoch 3141/5000\n1/1 [==============================] - 0s 6ms/step - loss: 9.0070 - binary_accuracy: 0.6667\nEpoch 3142/5000\n1/1 [==============================] - 0s 15ms/step - loss: 4.6378 - binary_accuracy: 0.6667\nEpoch 3143/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.6551 - binary_accuracy: 0.7778\nEpoch 3144/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.6959 - binary_accuracy: 0.4444\nEpoch 3145/5000\n1/1 [==============================] - 0s 19ms/step - loss: 25.7383 - binary_accuracy: 0.6667\nEpoch 3146/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2783 - binary_accuracy: 0.6667\nEpoch 3147/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.8556 - binary_accuracy: 0.6667\nEpoch 3148/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.4571 - binary_accuracy: 0.6667\nEpoch 3149/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.0714 - binary_accuracy: 0.6667\nEpoch 3150/5000\n1/1 [==============================] - 0s 8ms/step - loss: 3.7246 - binary_accuracy: 0.6667\nEpoch 3151/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.1151 - binary_accuracy: 1.0000\nEpoch 3152/5000\n1/1 [==============================] - 0s 10ms/step - loss: 4.3680 - binary_accuracy: 0.4444\nEpoch 3153/5000\n1/1 [==============================] - 0s 11ms/step - loss: 26.9873 - binary_accuracy: 0.5556\nEpoch 3154/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.5160 - binary_accuracy: 0.6667\nEpoch 3155/5000\n1/1 [==============================] - 0s 6ms/step - loss: 18.0845 - binary_accuracy: 0.6667\nEpoch 3156/5000\n1/1 [==============================] - 0s 14ms/step - loss: 13.6809 - binary_accuracy: 0.6667\nEpoch 3157/5000\n1/1 [==============================] - 0s 10ms/step - loss: 9.2926 - binary_accuracy: 0.6667\nEpoch 3158/5000\n1/1 [==============================] - 0s 10ms/step - loss: 4.9200 - binary_accuracy: 0.6667\nEpoch 3159/5000\n1/1 [==============================] - 0s 12ms/step - loss: 0.8847 - binary_accuracy: 0.7778\nEpoch 3160/5000\n1/1 [==============================] - 0s 8ms/step - loss: 11.8092 - binary_accuracy: 0.4444\nEpoch 3161/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.8663 - binary_accuracy: 0.6667\nEpoch 3162/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.4061 - binary_accuracy: 0.6667\nEpoch 3163/5000\n1/1 [==============================] - 0s 15ms/step - loss: 16.9834 - binary_accuracy: 0.6667\nEpoch 3164/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.5848 - binary_accuracy: 0.6667\nEpoch 3165/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.1990 - binary_accuracy: 0.6667\nEpoch 3166/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.8484 - binary_accuracy: 0.6667\nEpoch 3167/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1602 - binary_accuracy: 0.8889\nEpoch 3168/5000\n1/1 [==============================] - 0s 16ms/step - loss: 6.3590 - binary_accuracy: 0.4444\nEpoch 3169/5000\n1/1 [==============================] - 0s 11ms/step - loss: 26.6919 - binary_accuracy: 0.5556\nEpoch 3170/5000\n1/1 [==============================] - 0s 7ms/step - loss: 22.2245 - binary_accuracy: 0.6667\nEpoch 3171/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.7961 - binary_accuracy: 0.6667\nEpoch 3172/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.3944 - binary_accuracy: 0.6667\nEpoch 3173/5000\n1/1 [==============================] - 0s 14ms/step - loss: 9.0069 - binary_accuracy: 0.6667\nEpoch 3174/5000\n1/1 [==============================] - 0s 15ms/step - loss: 4.6383 - binary_accuracy: 0.6667\nEpoch 3175/5000\n1/1 [==============================] - 0s 24ms/step - loss: 0.6594 - binary_accuracy: 0.7778\nEpoch 3176/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.6273 - binary_accuracy: 0.4444\nEpoch 3177/5000\n1/1 [==============================] - 0s 13ms/step - loss: 25.7344 - binary_accuracy: 0.6667\nEpoch 3178/5000\n1/1 [==============================] - 0s 14ms/step - loss: 21.2765 - binary_accuracy: 0.6667\nEpoch 3179/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.8554 - binary_accuracy: 0.6667\nEpoch 3180/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.4577 - binary_accuracy: 0.6667\nEpoch 3181/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.0725 - binary_accuracy: 0.6667\nEpoch 3182/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7267 - binary_accuracy: 0.6667\nEpoch 3183/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.1179 - binary_accuracy: 1.0000\nEpoch 3184/5000\n1/1 [==============================] - 0s 11ms/step - loss: 4.4500 - binary_accuracy: 0.4444\nEpoch 3185/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.9622 - binary_accuracy: 0.5556\nEpoch 3186/5000\n1/1 [==============================] - 0s 15ms/step - loss: 22.4932 - binary_accuracy: 0.6667\nEpoch 3187/5000\n1/1 [==============================] - 0s 15ms/step - loss: 18.0637 - binary_accuracy: 0.6667\nEpoch 3188/5000\n1/1 [==============================] - 0s 12ms/step - loss: 13.6612 - binary_accuracy: 0.6667\nEpoch 3189/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.2734 - binary_accuracy: 0.6667\nEpoch 3190/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.9016 - binary_accuracy: 0.6667\nEpoch 3191/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.8733 - binary_accuracy: 0.7778\nEpoch 3192/5000\n1/1 [==============================] - 0s 9ms/step - loss: 11.8039 - binary_accuracy: 0.4444\nEpoch 3193/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.8527 - binary_accuracy: 0.6667\nEpoch 3194/5000\n1/1 [==============================] - 0s 12ms/step - loss: 21.3948 - binary_accuracy: 0.6667\nEpoch 3195/5000\n1/1 [==============================] - 0s 13ms/step - loss: 16.9736 - binary_accuracy: 0.6667\nEpoch 3196/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.5760 - binary_accuracy: 0.6667\nEpoch 3197/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.1906 - binary_accuracy: 0.6667\nEpoch 3198/5000\n1/1 [==============================] - 0s 16ms/step - loss: 3.8413 - binary_accuracy: 0.6667\nEpoch 3199/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.1599 - binary_accuracy: 0.8889\nEpoch 3200/5000\n1/1 [==============================] - 0s 17ms/step - loss: 6.2951 - binary_accuracy: 0.4444\nEpoch 3201/5000\n1/1 [==============================] - 0s 10ms/step - loss: 26.6871 - binary_accuracy: 0.5556\nEpoch 3202/5000\n1/1 [==============================] - 0s 9ms/step - loss: 22.2218 - binary_accuracy: 0.6667\nEpoch 3203/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.7951 - binary_accuracy: 0.6667\nEpoch 3204/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.3943 - binary_accuracy: 0.6667\nEpoch 3205/5000\n1/1 [==============================] - 0s 10ms/step - loss: 9.0074 - binary_accuracy: 0.6667\nEpoch 3206/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.6393 - binary_accuracy: 0.6667\nEpoch 3207/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.6641 - binary_accuracy: 0.7778\nEpoch 3208/5000\n1/1 [==============================] - 0s 11ms/step - loss: 12.5576 - binary_accuracy: 0.4444\nEpoch 3209/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.7307 - binary_accuracy: 0.6667\nEpoch 3210/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.2749 - binary_accuracy: 0.6667\nEpoch 3211/5000\n1/1 [==============================] - 0s 10ms/step - loss: 16.8553 - binary_accuracy: 0.6667\nEpoch 3212/5000\n1/1 [==============================] - 0s 10ms/step - loss: 12.4585 - binary_accuracy: 0.6667\nEpoch 3213/5000\n1/1 [==============================] - 0s 14ms/step - loss: 8.0736 - binary_accuracy: 0.6667\nEpoch 3214/5000\n1/1 [==============================] - 0s 10ms/step - loss: 3.7288 - binary_accuracy: 0.6667\nEpoch 3215/5000\n1/1 [==============================] - 0s 5ms/step - loss: 0.1207 - binary_accuracy: 1.0000\nEpoch 3216/5000\n1/1 [==============================] - 0s 12ms/step - loss: 4.5351 - binary_accuracy: 0.4444\nEpoch 3217/5000\n1/1 [==============================] - 0s 10ms/step - loss: 26.9366 - binary_accuracy: 0.5556\nEpoch 3218/5000\n1/1 [==============================] - 0s 13ms/step - loss: 22.4699 - binary_accuracy: 0.6667\nEpoch 3219/5000\n1/1 [==============================] - 0s 7ms/step - loss: 18.0423 - binary_accuracy: 0.6667\nEpoch 3220/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.6409 - binary_accuracy: 0.6667\nEpoch 3221/5000\n1/1 [==============================] - 0s 6ms/step - loss: 9.2536 - binary_accuracy: 0.6667\nEpoch 3222/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.8825 - binary_accuracy: 0.6667\nEpoch 3223/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.8614 - binary_accuracy: 0.7778\nEpoch 3224/5000\n1/1 [==============================] - 0s 6ms/step - loss: 11.8005 - binary_accuracy: 0.4444\nEpoch 3225/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.8390 - binary_accuracy: 0.6667\nEpoch 3226/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.3832 - binary_accuracy: 0.6667\nEpoch 3227/5000\n1/1 [==============================] - 0s 12ms/step - loss: 16.9636 - binary_accuracy: 0.6667\nEpoch 3228/5000\n1/1 [==============================] - 0s 18ms/step - loss: 12.5669 - binary_accuracy: 0.6667\nEpoch 3229/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.1820 - binary_accuracy: 0.6667\nEpoch 3230/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.8338 - binary_accuracy: 0.6667\nEpoch 3231/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1596 - binary_accuracy: 0.8889\nEpoch 3232/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.2279 - binary_accuracy: 0.4444\nEpoch 3233/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.6828 - binary_accuracy: 0.6667\nEpoch 3234/5000\n1/1 [==============================] - 0s 7ms/step - loss: 22.2197 - binary_accuracy: 0.6667\nEpoch 3235/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.7946 - binary_accuracy: 0.6667\nEpoch 3236/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.3948 - binary_accuracy: 0.6667\nEpoch 3237/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.0083 - binary_accuracy: 0.6667\nEpoch 3238/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.6408 - binary_accuracy: 0.6667\nEpoch 3239/5000\n1/1 [==============================] - 0s 12ms/step - loss: 0.6692 - binary_accuracy: 0.7778\nEpoch 3240/5000\n1/1 [==============================] - 0s 16ms/step - loss: 12.4868 - binary_accuracy: 0.4444\nEpoch 3241/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.7273 - binary_accuracy: 0.6667\nEpoch 3242/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.2735 - binary_accuracy: 0.6667\nEpoch 3243/5000\n1/1 [==============================] - 0s 10ms/step - loss: 16.8554 - binary_accuracy: 0.6667\nEpoch 3244/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.4595 - binary_accuracy: 0.6667\nEpoch 3245/5000\n1/1 [==============================] - 0s 20ms/step - loss: 8.0750 - binary_accuracy: 0.6667\nEpoch 3246/5000\n1/1 [==============================] - 0s 17ms/step - loss: 3.7311 - binary_accuracy: 0.6667\nEpoch 3247/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.1236 - binary_accuracy: 1.0000\nEpoch 3248/5000\n1/1 [==============================] - 0s 10ms/step - loss: 4.6235 - binary_accuracy: 0.4444\nEpoch 3249/5000\n1/1 [==============================] - 0s 10ms/step - loss: 26.9103 - binary_accuracy: 0.5556\nEpoch 3250/5000\n1/1 [==============================] - 0s 11ms/step - loss: 22.4461 - binary_accuracy: 0.6667\nEpoch 3251/5000\n1/1 [==============================] - 0s 6ms/step - loss: 18.0202 - binary_accuracy: 0.6667\nEpoch 3252/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.6199 - binary_accuracy: 0.6667\nEpoch 3253/5000\n1/1 [==============================] - 0s 8ms/step - loss: 9.2331 - binary_accuracy: 0.6667\nEpoch 3254/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.8627 - binary_accuracy: 0.6667\nEpoch 3255/5000\n1/1 [==============================] - 0s 13ms/step - loss: 0.8491 - binary_accuracy: 0.7778\nEpoch 3256/5000\n1/1 [==============================] - 0s 7ms/step - loss: 11.7988 - binary_accuracy: 0.4444\nEpoch 3257/5000\n1/1 [==============================] - 0s 11ms/step - loss: 25.8251 - binary_accuracy: 0.6667\nEpoch 3258/5000\n1/1 [==============================] - 0s 11ms/step - loss: 21.3714 - binary_accuracy: 0.6667\nEpoch 3259/5000\n1/1 [==============================] - 0s 14ms/step - loss: 16.9534 - binary_accuracy: 0.6667\nEpoch 3260/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.5575 - binary_accuracy: 0.6667\nEpoch 3261/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.1730 - binary_accuracy: 0.6667\nEpoch 3262/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.8261 - binary_accuracy: 0.6667\nEpoch 3263/5000\n1/1 [==============================] - 0s 5ms/step - loss: 0.1592 - binary_accuracy: 0.8889\nEpoch 3264/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.1576 - binary_accuracy: 0.4444\nEpoch 3265/5000\n1/1 [==============================] - 0s 11ms/step - loss: 26.6791 - binary_accuracy: 0.6667\nEpoch 3266/5000\n1/1 [==============================] - 0s 9ms/step - loss: 22.2181 - binary_accuracy: 0.6667\nEpoch 3267/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.7947 - binary_accuracy: 0.6667\nEpoch 3268/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.3957 - binary_accuracy: 0.6667\nEpoch 3269/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.0097 - binary_accuracy: 0.6667\nEpoch 3270/5000\n1/1 [==============================] - 0s 10ms/step - loss: 4.6426 - binary_accuracy: 0.6667\nEpoch 3271/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.6747 - binary_accuracy: 0.7778\nEpoch 3272/5000\n1/1 [==============================] - 0s 15ms/step - loss: 12.4149 - binary_accuracy: 0.4444\nEpoch 3273/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.7241 - binary_accuracy: 0.6667\nEpoch 3274/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.2724 - binary_accuracy: 0.6667\nEpoch 3275/5000\n1/1 [==============================] - 0s 13ms/step - loss: 16.8557 - binary_accuracy: 0.6667\nEpoch 3276/5000\n1/1 [==============================] - 0s 5ms/step - loss: 12.4606 - binary_accuracy: 0.6667\nEpoch 3277/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0764 - binary_accuracy: 0.6667\nEpoch 3278/5000\n1/1 [==============================] - 0s 13ms/step - loss: 3.7336 - binary_accuracy: 0.6667\nEpoch 3279/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.1267 - binary_accuracy: 1.0000\nEpoch 3280/5000\n1/1 [==============================] - 0s 8ms/step - loss: 4.7152 - binary_accuracy: 0.4444\nEpoch 3281/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.8835 - binary_accuracy: 0.6667\nEpoch 3282/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.4216 - binary_accuracy: 0.6667\nEpoch 3283/5000\n1/1 [==============================] - 0s 5ms/step - loss: 17.9975 - binary_accuracy: 0.6667\nEpoch 3284/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.5982 - binary_accuracy: 0.6667\nEpoch 3285/5000\n1/1 [==============================] - 0s 6ms/step - loss: 9.2120 - binary_accuracy: 0.6667\nEpoch 3286/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.8423 - binary_accuracy: 0.6667\nEpoch 3287/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.8363 - binary_accuracy: 0.7778\nEpoch 3288/5000\n1/1 [==============================] - 0s 11ms/step - loss: 11.7982 - binary_accuracy: 0.4444\nEpoch 3289/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.8110 - binary_accuracy: 0.6667\nEpoch 3290/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.3596 - binary_accuracy: 0.6667\nEpoch 3291/5000\n1/1 [==============================] - 0s 10ms/step - loss: 16.9430 - binary_accuracy: 0.6667\nEpoch 3292/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.5479 - binary_accuracy: 0.6667\nEpoch 3293/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.1638 - binary_accuracy: 0.6667\nEpoch 3294/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.8183 - binary_accuracy: 0.6667\nEpoch 3295/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1587 - binary_accuracy: 0.8889\nEpoch 3296/5000\n1/1 [==============================] - 0s 13ms/step - loss: 6.0852 - binary_accuracy: 0.4444\nEpoch 3297/5000\n1/1 [==============================] - 0s 7ms/step - loss: 26.6758 - binary_accuracy: 0.6667\nEpoch 3298/5000\n1/1 [==============================] - 0s 8ms/step - loss: 22.2168 - binary_accuracy: 0.6667\nEpoch 3299/5000\n1/1 [==============================] - 0s 8ms/step - loss: 17.7950 - binary_accuracy: 0.6667\nEpoch 3300/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.3969 - binary_accuracy: 0.6667\nEpoch 3301/5000\n1/1 [==============================] - 0s 10ms/step - loss: 9.0113 - binary_accuracy: 0.6667\nEpoch 3302/5000\n1/1 [==============================] - 0s 10ms/step - loss: 4.6448 - binary_accuracy: 0.6667\nEpoch 3303/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.6804 - binary_accuracy: 0.7778\nEpoch 3304/5000\n1/1 [==============================] - 0s 13ms/step - loss: 12.3419 - binary_accuracy: 0.4444\nEpoch 3305/5000\n1/1 [==============================] - 0s 15ms/step - loss: 25.7212 - binary_accuracy: 0.6667\nEpoch 3306/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2715 - binary_accuracy: 0.6667\nEpoch 3307/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.8562 - binary_accuracy: 0.6667\nEpoch 3308/5000\n1/1 [==============================] - 0s 5ms/step - loss: 12.4618 - binary_accuracy: 0.6667\nEpoch 3309/5000\n1/1 [==============================] - 0s 5ms/step - loss: 8.0780 - binary_accuracy: 0.6667\nEpoch 3310/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.7362 - binary_accuracy: 0.6667\nEpoch 3311/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1298 - binary_accuracy: 1.0000\nEpoch 3312/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.8097 - binary_accuracy: 0.4444\nEpoch 3313/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.8562 - binary_accuracy: 0.6667\nEpoch 3314/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.3966 - binary_accuracy: 0.6667\nEpoch 3315/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.9742 - binary_accuracy: 0.6667\nEpoch 3316/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.5759 - binary_accuracy: 0.6667\nEpoch 3317/5000\n1/1 [==============================] - 0s 7ms/step - loss: 9.1901 - binary_accuracy: 0.6667\nEpoch 3318/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.8212 - binary_accuracy: 0.6667\nEpoch 3319/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.8232 - binary_accuracy: 0.7778\nEpoch 3320/5000\n1/1 [==============================] - 0s 6ms/step - loss: 11.7984 - binary_accuracy: 0.4444\nEpoch 3321/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.7970 - binary_accuracy: 0.6667\nEpoch 3322/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.3476 - binary_accuracy: 0.6667\nEpoch 3323/5000\n1/1 [==============================] - 0s 11ms/step - loss: 16.9326 - binary_accuracy: 0.6667\nEpoch 3324/5000\n1/1 [==============================] - 0s 16ms/step - loss: 12.5383 - binary_accuracy: 0.6667\nEpoch 3325/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.1545 - binary_accuracy: 0.6667\nEpoch 3326/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.8103 - binary_accuracy: 0.6667\nEpoch 3327/5000\n1/1 [==============================] - 0s 13ms/step - loss: 0.1582 - binary_accuracy: 0.8889\nEpoch 3328/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.0121 - binary_accuracy: 0.4444\nEpoch 3329/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.6726 - binary_accuracy: 0.6667\nEpoch 3330/5000\n1/1 [==============================] - 0s 11ms/step - loss: 22.2158 - binary_accuracy: 0.6667\nEpoch 3331/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.7954 - binary_accuracy: 0.6667\nEpoch 3332/5000\n1/1 [==============================] - 0s 18ms/step - loss: 13.3982 - binary_accuracy: 0.6667\nEpoch 3333/5000\n1/1 [==============================] - 0s 11ms/step - loss: 9.0130 - binary_accuracy: 0.6667\nEpoch 3334/5000\n1/1 [==============================] - 0s 10ms/step - loss: 4.6470 - binary_accuracy: 0.6667\nEpoch 3335/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.6862 - binary_accuracy: 0.7778\nEpoch 3336/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.2682 - binary_accuracy: 0.4444\nEpoch 3337/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.7185 - binary_accuracy: 0.6667\nEpoch 3338/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2708 - binary_accuracy: 0.6667\nEpoch 3339/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.8569 - binary_accuracy: 0.6667\nEpoch 3340/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.4632 - binary_accuracy: 0.6667\nEpoch 3341/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0797 - binary_accuracy: 0.6667\nEpoch 3342/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7389 - binary_accuracy: 0.6667\nEpoch 3343/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1331 - binary_accuracy: 1.0000\nEpoch 3344/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.9066 - binary_accuracy: 0.4444\nEpoch 3345/5000\n1/1 [==============================] - 0s 10ms/step - loss: 26.8284 - binary_accuracy: 0.6667\nEpoch 3346/5000\n1/1 [==============================] - 0s 10ms/step - loss: 22.3711 - binary_accuracy: 0.6667\nEpoch 3347/5000\n1/1 [==============================] - 0s 5ms/step - loss: 17.9505 - binary_accuracy: 0.6667\nEpoch 3348/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.5531 - binary_accuracy: 0.6667\nEpoch 3349/5000\n1/1 [==============================] - 0s 10ms/step - loss: 9.1678 - binary_accuracy: 0.6667\nEpoch 3350/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.7997 - binary_accuracy: 0.6667\nEpoch 3351/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.8097 - binary_accuracy: 0.7778\nEpoch 3352/5000\n1/1 [==============================] - 0s 21ms/step - loss: 11.7986 - binary_accuracy: 0.4444\nEpoch 3353/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.7830 - binary_accuracy: 0.6667\nEpoch 3354/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.3358 - binary_accuracy: 0.6667\nEpoch 3355/5000\n1/1 [==============================] - 0s 4ms/step - loss: 16.9221 - binary_accuracy: 0.6667\nEpoch 3356/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.5286 - binary_accuracy: 0.6667\nEpoch 3357/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.1452 - binary_accuracy: 0.6667\nEpoch 3358/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.8023 - binary_accuracy: 0.6667\nEpoch 3359/5000\n1/1 [==============================] - 0s 13ms/step - loss: 0.1577 - binary_accuracy: 0.8889\nEpoch 3360/5000\n1/1 [==============================] - 0s 13ms/step - loss: 5.9397 - binary_accuracy: 0.4444\nEpoch 3361/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.6695 - binary_accuracy: 0.6667\nEpoch 3362/5000\n1/1 [==============================] - 0s 14ms/step - loss: 22.2147 - binary_accuracy: 0.6667\nEpoch 3363/5000\n1/1 [==============================] - 0s 22ms/step - loss: 17.7958 - binary_accuracy: 0.6667\nEpoch 3364/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.3994 - binary_accuracy: 0.6667\nEpoch 3365/5000\n1/1 [==============================] - 0s 18ms/step - loss: 9.0146 - binary_accuracy: 0.6667\nEpoch 3366/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.6491 - binary_accuracy: 0.6667\nEpoch 3367/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.6919 - binary_accuracy: 0.7778\nEpoch 3368/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.1942 - binary_accuracy: 0.4444\nEpoch 3369/5000\n1/1 [==============================] - 0s 21ms/step - loss: 25.7159 - binary_accuracy: 0.6667\nEpoch 3370/5000\n1/1 [==============================] - 0s 5ms/step - loss: 21.2701 - binary_accuracy: 0.6667\nEpoch 3371/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.8575 - binary_accuracy: 0.6667\nEpoch 3372/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.4645 - binary_accuracy: 0.6667\nEpoch 3373/5000\n1/1 [==============================] - 0s 11ms/step - loss: 8.0815 - binary_accuracy: 0.6667\nEpoch 3374/5000\n1/1 [==============================] - 0s 5ms/step - loss: 3.7416 - binary_accuracy: 0.6667\nEpoch 3375/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.1364 - binary_accuracy: 1.0000\nEpoch 3376/5000\n1/1 [==============================] - 0s 7ms/step - loss: 5.0047 - binary_accuracy: 0.4444\nEpoch 3377/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.8004 - binary_accuracy: 0.6667\nEpoch 3378/5000\n1/1 [==============================] - 0s 16ms/step - loss: 22.3454 - binary_accuracy: 0.6667\nEpoch 3379/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.9264 - binary_accuracy: 0.6667\nEpoch 3380/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.5300 - binary_accuracy: 0.6667\nEpoch 3381/5000\n1/1 [==============================] - 0s 10ms/step - loss: 9.1451 - binary_accuracy: 0.6667\nEpoch 3382/5000\n1/1 [==============================] - 0s 8ms/step - loss: 4.7778 - binary_accuracy: 0.6667\nEpoch 3383/5000\n1/1 [==============================] - 0s 5ms/step - loss: 0.7961 - binary_accuracy: 0.7778\nEpoch 3384/5000\n1/1 [==============================] - 0s 6ms/step - loss: 11.7979 - binary_accuracy: 0.4444\nEpoch 3385/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.7693 - binary_accuracy: 0.6667\nEpoch 3386/5000\n1/1 [==============================] - 0s 11ms/step - loss: 21.3241 - binary_accuracy: 0.6667\nEpoch 3387/5000\n1/1 [==============================] - 0s 18ms/step - loss: 16.9118 - binary_accuracy: 0.6667\nEpoch 3388/5000\n1/1 [==============================] - 0s 13ms/step - loss: 12.5190 - binary_accuracy: 0.6667\nEpoch 3389/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.1360 - binary_accuracy: 0.6667\nEpoch 3390/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7945 - binary_accuracy: 0.6667\nEpoch 3391/5000\n1/1 [==============================] - 0s 17ms/step - loss: 0.1573 - binary_accuracy: 0.8889\nEpoch 3392/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.8702 - binary_accuracy: 0.4444\nEpoch 3393/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.6659 - binary_accuracy: 0.6667\nEpoch 3394/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.2132 - binary_accuracy: 0.6667\nEpoch 3395/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.7958 - binary_accuracy: 0.6667\nEpoch 3396/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.4002 - binary_accuracy: 0.6667\nEpoch 3397/5000\n1/1 [==============================] - 0s 19ms/step - loss: 9.0157 - binary_accuracy: 0.6667\nEpoch 3398/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.6508 - binary_accuracy: 0.6667\nEpoch 3399/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.6972 - binary_accuracy: 0.7778\nEpoch 3400/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.1206 - binary_accuracy: 0.4444\nEpoch 3401/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.7133 - binary_accuracy: 0.6667\nEpoch 3402/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.2695 - binary_accuracy: 0.6667\nEpoch 3403/5000\n1/1 [==============================] - 0s 17ms/step - loss: 16.8582 - binary_accuracy: 0.6667\nEpoch 3404/5000\n1/1 [==============================] - 0s 5ms/step - loss: 12.4659 - binary_accuracy: 0.6667\nEpoch 3405/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0831 - binary_accuracy: 0.6667\nEpoch 3406/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7442 - binary_accuracy: 0.6667\nEpoch 3407/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1398 - binary_accuracy: 1.0000\nEpoch 3408/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.1023 - binary_accuracy: 0.4444\nEpoch 3409/5000\n1/1 [==============================] - 0s 15ms/step - loss: 26.7725 - binary_accuracy: 0.6667\nEpoch 3410/5000\n1/1 [==============================] - 0s 10ms/step - loss: 22.3197 - binary_accuracy: 0.6667\nEpoch 3411/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.9023 - binary_accuracy: 0.6667\nEpoch 3412/5000\n1/1 [==============================] - 0s 8ms/step - loss: 13.5068 - binary_accuracy: 0.6667\nEpoch 3413/5000\n1/1 [==============================] - 0s 7ms/step - loss: 9.1223 - binary_accuracy: 0.6667\nEpoch 3414/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.7558 - binary_accuracy: 0.6667\nEpoch 3415/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.7825 - binary_accuracy: 0.7778\nEpoch 3416/5000\n1/1 [==============================] - 0s 12ms/step - loss: 11.7952 - binary_accuracy: 0.4444\nEpoch 3417/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.7560 - binary_accuracy: 0.6667\nEpoch 3418/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.3128 - binary_accuracy: 0.6667\nEpoch 3419/5000\n1/1 [==============================] - 0s 7ms/step - loss: 16.9018 - binary_accuracy: 0.6667\nEpoch 3420/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.5097 - binary_accuracy: 0.6667\nEpoch 3421/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.1270 - binary_accuracy: 0.6667\nEpoch 3422/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.7869 - binary_accuracy: 0.6667\nEpoch 3423/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.1570 - binary_accuracy: 0.8889\nEpoch 3424/5000\n1/1 [==============================] - 0s 9ms/step - loss: 5.8060 - binary_accuracy: 0.4444\nEpoch 3425/5000\n1/1 [==============================] - 0s 7ms/step - loss: 26.6617 - binary_accuracy: 0.6667\nEpoch 3426/5000\n1/1 [==============================] - 0s 7ms/step - loss: 22.2109 - binary_accuracy: 0.6667\nEpoch 3427/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.7949 - binary_accuracy: 0.6667\nEpoch 3428/5000\n1/1 [==============================] - 0s 19ms/step - loss: 13.4001 - binary_accuracy: 0.6667\nEpoch 3429/5000\n1/1 [==============================] - 0s 14ms/step - loss: 9.0160 - binary_accuracy: 0.6667\nEpoch 3430/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.6516 - binary_accuracy: 0.6667\nEpoch 3431/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.7020 - binary_accuracy: 0.7778\nEpoch 3432/5000\n1/1 [==============================] - 0s 13ms/step - loss: 12.0483 - binary_accuracy: 0.4444\nEpoch 3433/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.7106 - binary_accuracy: 0.6667\nEpoch 3434/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2687 - binary_accuracy: 0.6667\nEpoch 3435/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.8587 - binary_accuracy: 0.6667\nEpoch 3436/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.4670 - binary_accuracy: 0.6667\nEpoch 3437/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0845 - binary_accuracy: 0.6667\nEpoch 3438/5000\n1/1 [==============================] - 0s 15ms/step - loss: 3.7467 - binary_accuracy: 0.6667\nEpoch 3439/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.1432 - binary_accuracy: 0.8889\nEpoch 3440/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.1973 - binary_accuracy: 0.4444\nEpoch 3441/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.7448 - binary_accuracy: 0.6667\nEpoch 3442/5000\n1/1 [==============================] - 0s 20ms/step - loss: 22.2943 - binary_accuracy: 0.6667\nEpoch 3443/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.8785 - binary_accuracy: 0.6667\nEpoch 3444/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.4838 - binary_accuracy: 0.6667\nEpoch 3445/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.0997 - binary_accuracy: 0.6667\nEpoch 3446/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.7341 - binary_accuracy: 0.6667\nEpoch 3447/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.7692 - binary_accuracy: 0.7778\nEpoch 3448/5000\n1/1 [==============================] - 0s 11ms/step - loss: 11.7893 - binary_accuracy: 0.4444\nEpoch 3449/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.7432 - binary_accuracy: 0.6667\nEpoch 3450/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.3020 - binary_accuracy: 0.6667\nEpoch 3451/5000\n1/1 [==============================] - 0s 7ms/step - loss: 16.8923 - binary_accuracy: 0.6667\nEpoch 3452/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.5009 - binary_accuracy: 0.6667\nEpoch 3453/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.1185 - binary_accuracy: 0.6667\nEpoch 3454/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7798 - binary_accuracy: 0.6667\nEpoch 3455/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.1569 - binary_accuracy: 0.8889\nEpoch 3456/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.7497 - binary_accuracy: 0.4444\nEpoch 3457/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.6563 - binary_accuracy: 0.6667\nEpoch 3458/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.2076 - binary_accuracy: 0.6667\nEpoch 3459/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.7930 - binary_accuracy: 0.6667\nEpoch 3460/5000\n1/1 [==============================] - 0s 8ms/step - loss: 13.3989 - binary_accuracy: 0.6667\nEpoch 3461/5000\n1/1 [==============================] - 0s 17ms/step - loss: 9.0151 - binary_accuracy: 0.6667\nEpoch 3462/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.6513 - binary_accuracy: 0.6667\nEpoch 3463/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.7058 - binary_accuracy: 0.7778\nEpoch 3464/5000\n1/1 [==============================] - 0s 13ms/step - loss: 11.9784 - binary_accuracy: 0.4444\nEpoch 3465/5000\n1/1 [==============================] - 0s 15ms/step - loss: 25.7076 - binary_accuracy: 0.6667\nEpoch 3466/5000\n1/1 [==============================] - 0s 13ms/step - loss: 21.2676 - binary_accuracy: 0.6667\nEpoch 3467/5000\n1/1 [==============================] - 0s 10ms/step - loss: 16.8588 - binary_accuracy: 0.6667\nEpoch 3468/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.4677 - binary_accuracy: 0.6667\nEpoch 3469/5000\n1/1 [==============================] - 0s 14ms/step - loss: 8.0855 - binary_accuracy: 0.6667\nEpoch 3470/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7488 - binary_accuracy: 0.6667\nEpoch 3471/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1465 - binary_accuracy: 0.8889\nEpoch 3472/5000\n1/1 [==============================] - 0s 8ms/step - loss: 5.2873 - binary_accuracy: 0.4444\nEpoch 3473/5000\n1/1 [==============================] - 0s 11ms/step - loss: 26.7179 - binary_accuracy: 0.6667\nEpoch 3474/5000\n1/1 [==============================] - 0s 9ms/step - loss: 22.2696 - binary_accuracy: 0.6667\nEpoch 3475/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.8553 - binary_accuracy: 0.6667\nEpoch 3476/5000\n1/1 [==============================] - 0s 10ms/step - loss: 13.4614 - binary_accuracy: 0.6667\nEpoch 3477/5000\n1/1 [==============================] - 0s 10ms/step - loss: 9.0777 - binary_accuracy: 0.6667\nEpoch 3478/5000\n1/1 [==============================] - 0s 16ms/step - loss: 4.7130 - binary_accuracy: 0.6667\nEpoch 3479/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.7565 - binary_accuracy: 0.7778\nEpoch 3480/5000\n1/1 [==============================] - 0s 9ms/step - loss: 11.7790 - binary_accuracy: 0.4444\nEpoch 3481/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.7312 - binary_accuracy: 0.6667\nEpoch 3482/5000\n1/1 [==============================] - 0s 13ms/step - loss: 21.2919 - binary_accuracy: 0.6667\nEpoch 3483/5000\n1/1 [==============================] - 0s 14ms/step - loss: 16.8835 - binary_accuracy: 0.6667\nEpoch 3484/5000\n1/1 [==============================] - 0s 14ms/step - loss: 12.4927 - binary_accuracy: 0.6667\nEpoch 3485/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.1106 - binary_accuracy: 0.6667\nEpoch 3486/5000\n1/1 [==============================] - 0s 15ms/step - loss: 3.7733 - binary_accuracy: 0.6667\nEpoch 3487/5000\n1/1 [==============================] - 0s 12ms/step - loss: 0.1571 - binary_accuracy: 0.8889\nEpoch 3488/5000\n1/1 [==============================] - 0s 10ms/step - loss: 5.7038 - binary_accuracy: 0.4444\nEpoch 3489/5000\n1/1 [==============================] - 0s 10ms/step - loss: 26.6494 - binary_accuracy: 0.6667\nEpoch 3490/5000\n1/1 [==============================] - 0s 9ms/step - loss: 22.2027 - binary_accuracy: 0.6667\nEpoch 3491/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.7895 - binary_accuracy: 0.6667\nEpoch 3492/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.3961 - binary_accuracy: 0.6667\nEpoch 3493/5000\n1/1 [==============================] - 0s 13ms/step - loss: 9.0127 - binary_accuracy: 0.6667\nEpoch 3494/5000\n1/1 [==============================] - 0s 20ms/step - loss: 4.6494 - binary_accuracy: 0.6667\nEpoch 3495/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.7085 - binary_accuracy: 0.7778\nEpoch 3496/5000\n1/1 [==============================] - 0s 12ms/step - loss: 11.9117 - binary_accuracy: 0.4444\nEpoch 3497/5000\n1/1 [==============================] - 0s 14ms/step - loss: 25.7042 - binary_accuracy: 0.6667\nEpoch 3498/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.2661 - binary_accuracy: 0.6667\nEpoch 3499/5000\n1/1 [==============================] - 0s 16ms/step - loss: 16.8584 - binary_accuracy: 0.6667\nEpoch 3500/5000\n1/1 [==============================] - 0s 13ms/step - loss: 12.4679 - binary_accuracy: 0.6667\nEpoch 3501/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0860 - binary_accuracy: 0.6667\nEpoch 3502/5000\n1/1 [==============================] - 0s 10ms/step - loss: 3.7504 - binary_accuracy: 0.6667\nEpoch 3503/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.1497 - binary_accuracy: 0.8889\nEpoch 3504/5000\n1/1 [==============================] - 0s 8ms/step - loss: 5.3698 - binary_accuracy: 0.4444\nEpoch 3505/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.6922 - binary_accuracy: 0.6667\nEpoch 3506/5000\n1/1 [==============================] - 0s 11ms/step - loss: 22.2460 - binary_accuracy: 0.6667\nEpoch 3507/5000\n1/1 [==============================] - 0s 11ms/step - loss: 17.8332 - binary_accuracy: 0.6667\nEpoch 3508/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.4400 - binary_accuracy: 0.6667\nEpoch 3509/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.0567 - binary_accuracy: 0.6667\nEpoch 3510/5000\n1/1 [==============================] - 0s 17ms/step - loss: 4.6929 - binary_accuracy: 0.6667\nEpoch 3511/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.7447 - binary_accuracy: 0.7778\nEpoch 3512/5000\n1/1 [==============================] - 0s 8ms/step - loss: 11.7635 - binary_accuracy: 0.4444\nEpoch 3513/5000\n1/1 [==============================] - 0s 11ms/step - loss: 25.7200 - binary_accuracy: 0.6667\nEpoch 3514/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.2827 - binary_accuracy: 0.6667\nEpoch 3515/5000\n1/1 [==============================] - 0s 10ms/step - loss: 16.8755 - binary_accuracy: 0.6667\nEpoch 3516/5000\n1/1 [==============================] - 0s 16ms/step - loss: 12.4852 - binary_accuracy: 0.6667\nEpoch 3517/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.1035 - binary_accuracy: 0.6667\nEpoch 3518/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7676 - binary_accuracy: 0.6667\nEpoch 3519/5000\n1/1 [==============================] - 0s 14ms/step - loss: 0.1576 - binary_accuracy: 0.8889\nEpoch 3520/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.6703 - binary_accuracy: 0.4444\nEpoch 3521/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.6408 - binary_accuracy: 0.6667\nEpoch 3522/5000\n1/1 [==============================] - 0s 11ms/step - loss: 22.1961 - binary_accuracy: 0.6667\nEpoch 3523/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.7842 - binary_accuracy: 0.6667\nEpoch 3524/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.3915 - binary_accuracy: 0.6667\nEpoch 3525/5000\n1/1 [==============================] - 0s 7ms/step - loss: 9.0084 - binary_accuracy: 0.6667\nEpoch 3526/5000\n1/1 [==============================] - 0s 10ms/step - loss: 4.6458 - binary_accuracy: 0.6667\nEpoch 3527/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.7098 - binary_accuracy: 0.7778\nEpoch 3528/5000\n1/1 [==============================] - 0s 6ms/step - loss: 11.8493 - binary_accuracy: 0.4444\nEpoch 3529/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.7002 - binary_accuracy: 0.6667\nEpoch 3530/5000\n1/1 [==============================] - 0s 12ms/step - loss: 21.2639 - binary_accuracy: 0.6667\nEpoch 3531/5000\n1/1 [==============================] - 0s 7ms/step - loss: 16.8574 - binary_accuracy: 0.6667\nEpoch 3532/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.4675 - binary_accuracy: 0.6667\nEpoch 3533/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0859 - binary_accuracy: 0.6667\nEpoch 3534/5000\n1/1 [==============================] - 0s 5ms/step - loss: 3.7513 - binary_accuracy: 0.6667\nEpoch 3535/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.1527 - binary_accuracy: 0.8889\nEpoch 3536/5000\n1/1 [==============================] - 0s 13ms/step - loss: 5.4429 - binary_accuracy: 0.4444\nEpoch 3537/5000\n1/1 [==============================] - 0s 19ms/step - loss: 26.6678 - binary_accuracy: 0.6667\nEpoch 3538/5000\n1/1 [==============================] - 0s 7ms/step - loss: 22.2238 - binary_accuracy: 0.6667\nEpoch 3539/5000\n1/1 [==============================] - 0s 12ms/step - loss: 17.8123 - binary_accuracy: 0.6667\nEpoch 3540/5000\n1/1 [==============================] - 0s 14ms/step - loss: 13.4199 - binary_accuracy: 0.6667\nEpoch 3541/5000\n1/1 [==============================] - 0s 11ms/step - loss: 9.0369 - binary_accuracy: 0.6667\nEpoch 3542/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.6740 - binary_accuracy: 0.6667\nEpoch 3543/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.7340 - binary_accuracy: 0.7778\nEpoch 3544/5000\n1/1 [==============================] - 0s 10ms/step - loss: 11.7425 - binary_accuracy: 0.4444\nEpoch 3545/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.7098 - binary_accuracy: 0.6667\nEpoch 3546/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2743 - binary_accuracy: 0.6667\nEpoch 3547/5000\n1/1 [==============================] - 0s 12ms/step - loss: 16.8683 - binary_accuracy: 0.6667\nEpoch 3548/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.4786 - binary_accuracy: 0.6667\nEpoch 3549/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0971 - binary_accuracy: 0.6667\nEpoch 3550/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7626 - binary_accuracy: 0.6667\nEpoch 3551/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.1583 - binary_accuracy: 0.8889\nEpoch 3552/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.6497 - binary_accuracy: 0.4444\nEpoch 3553/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.6303 - binary_accuracy: 0.6667\nEpoch 3554/5000\n1/1 [==============================] - 0s 7ms/step - loss: 22.1876 - binary_accuracy: 0.6667\nEpoch 3555/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.7769 - binary_accuracy: 0.6667\nEpoch 3556/5000\n1/1 [==============================] - 0s 11ms/step - loss: 13.3850 - binary_accuracy: 0.6667\nEpoch 3557/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.0022 - binary_accuracy: 0.6667\nEpoch 3558/5000\n1/1 [==============================] - 0s 25ms/step - loss: 4.6402 - binary_accuracy: 0.6667\nEpoch 3559/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.7096 - binary_accuracy: 0.7778\nEpoch 3560/5000\n1/1 [==============================] - 0s 9ms/step - loss: 11.7915 - binary_accuracy: 0.4444\nEpoch 3561/5000\n1/1 [==============================] - 0s 11ms/step - loss: 25.6956 - binary_accuracy: 0.6667\nEpoch 3562/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.2612 - binary_accuracy: 0.6667\nEpoch 3563/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.8557 - binary_accuracy: 0.6667\nEpoch 3564/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.4664 - binary_accuracy: 0.6667\nEpoch 3565/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.0850 - binary_accuracy: 0.6667\nEpoch 3566/5000\n1/1 [==============================] - 0s 13ms/step - loss: 3.7517 - binary_accuracy: 0.6667\nEpoch 3567/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.1555 - binary_accuracy: 0.8889\nEpoch 3568/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.5058 - binary_accuracy: 0.4444\nEpoch 3569/5000\n1/1 [==============================] - 0s 19ms/step - loss: 26.6450 - binary_accuracy: 0.6667\nEpoch 3570/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.2030 - binary_accuracy: 0.6667\nEpoch 3571/5000\n1/1 [==============================] - 0s 8ms/step - loss: 17.7929 - binary_accuracy: 0.6667\nEpoch 3572/5000\n1/1 [==============================] - 0s 11ms/step - loss: 13.4012 - binary_accuracy: 0.6667\nEpoch 3573/5000\n1/1 [==============================] - 0s 10ms/step - loss: 9.0186 - binary_accuracy: 0.6667\nEpoch 3574/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.6566 - binary_accuracy: 0.6667\nEpoch 3575/5000\n1/1 [==============================] - 0s 5ms/step - loss: 0.7245 - binary_accuracy: 0.7778\nEpoch 3576/5000\n1/1 [==============================] - 0s 6ms/step - loss: 11.7159 - binary_accuracy: 0.4444\nEpoch 3577/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.7004 - binary_accuracy: 0.6667\nEpoch 3578/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2668 - binary_accuracy: 0.6667\nEpoch 3579/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.8619 - binary_accuracy: 0.6667\nEpoch 3580/5000\n1/1 [==============================] - 0s 12ms/step - loss: 12.4728 - binary_accuracy: 0.6667\nEpoch 3581/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.0916 - binary_accuracy: 0.6667\nEpoch 3582/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7585 - binary_accuracy: 0.6667\nEpoch 3583/5000\n1/1 [==============================] - 0s 13ms/step - loss: 0.1594 - binary_accuracy: 0.8889\nEpoch 3584/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.6418 - binary_accuracy: 0.4444\nEpoch 3585/5000\n1/1 [==============================] - 0s 12ms/step - loss: 26.6180 - binary_accuracy: 0.6667\nEpoch 3586/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.1773 - binary_accuracy: 0.6667\nEpoch 3587/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.7679 - binary_accuracy: 0.6667\nEpoch 3588/5000\n1/1 [==============================] - 0s 8ms/step - loss: 13.3766 - binary_accuracy: 0.6667\nEpoch 3589/5000\n1/1 [==============================] - 0s 15ms/step - loss: 8.9941 - binary_accuracy: 0.6667\nEpoch 3590/5000\n1/1 [==============================] - 0s 5ms/step - loss: 4.6329 - binary_accuracy: 0.6667\nEpoch 3591/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.7081 - binary_accuracy: 0.7778\nEpoch 3592/5000\n1/1 [==============================] - 0s 8ms/step - loss: 11.7380 - binary_accuracy: 0.4444\nEpoch 3593/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.6904 - binary_accuracy: 0.6667\nEpoch 3594/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2577 - binary_accuracy: 0.6667\nEpoch 3595/5000\n1/1 [==============================] - 0s 11ms/step - loss: 16.8534 - binary_accuracy: 0.6667\nEpoch 3596/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.4646 - binary_accuracy: 0.6667\nEpoch 3597/5000\n1/1 [==============================] - 0s 5ms/step - loss: 8.0835 - binary_accuracy: 0.6667\nEpoch 3598/5000\n1/1 [==============================] - 0s 8ms/step - loss: 3.7513 - binary_accuracy: 0.6667\nEpoch 3599/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.1580 - binary_accuracy: 0.8889\nEpoch 3600/5000\n1/1 [==============================] - 0s 16ms/step - loss: 5.5589 - binary_accuracy: 0.4444\nEpoch 3601/5000\n1/1 [==============================] - 0s 14ms/step - loss: 26.6237 - binary_accuracy: 0.6667\nEpoch 3602/5000\n1/1 [==============================] - 0s 10ms/step - loss: 22.1838 - binary_accuracy: 0.6667\nEpoch 3603/5000\n1/1 [==============================] - 0s 15ms/step - loss: 17.7749 - binary_accuracy: 0.6667\nEpoch 3604/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.3839 - binary_accuracy: 0.6667\nEpoch 3605/5000\n1/1 [==============================] - 0s 10ms/step - loss: 9.0016 - binary_accuracy: 0.6667\nEpoch 3606/5000\n1/1 [==============================] - 0s 22ms/step - loss: 4.6405 - binary_accuracy: 0.6667\nEpoch 3607/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.7162 - binary_accuracy: 0.7778\nEpoch 3608/5000\n1/1 [==============================] - 0s 6ms/step - loss: 11.6846 - binary_accuracy: 0.4444\nEpoch 3609/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.6919 - binary_accuracy: 0.6667\nEpoch 3610/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.2601 - binary_accuracy: 0.6667\nEpoch 3611/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.8562 - binary_accuracy: 0.6667\nEpoch 3612/5000\n1/1 [==============================] - 0s 10ms/step - loss: 12.4677 - binary_accuracy: 0.6667\nEpoch 3613/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0867 - binary_accuracy: 0.6667\nEpoch 3614/5000\n1/1 [==============================] - 0s 15ms/step - loss: 3.7550 - binary_accuracy: 0.6667\nEpoch 3615/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.1608 - binary_accuracy: 0.8889\nEpoch 3616/5000\n1/1 [==============================] - 0s 9ms/step - loss: 5.6449 - binary_accuracy: 0.4444\nEpoch 3617/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.6042 - binary_accuracy: 0.6667\nEpoch 3618/5000\n1/1 [==============================] - 0s 12ms/step - loss: 22.1654 - binary_accuracy: 0.6667\nEpoch 3619/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.7572 - binary_accuracy: 0.6667\nEpoch 3620/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.3665 - binary_accuracy: 0.6667\nEpoch 3621/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.9844 - binary_accuracy: 0.6667\nEpoch 3622/5000\n1/1 [==============================] - 0s 8ms/step - loss: 4.6239 - binary_accuracy: 0.6667\nEpoch 3623/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.7054 - binary_accuracy: 0.7778\nEpoch 3624/5000\n1/1 [==============================] - 0s 9ms/step - loss: 11.6882 - binary_accuracy: 0.4444\nEpoch 3625/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.6848 - binary_accuracy: 0.6667\nEpoch 3626/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.2538 - binary_accuracy: 0.6667\nEpoch 3627/5000\n1/1 [==============================] - 0s 10ms/step - loss: 16.8505 - binary_accuracy: 0.6667\nEpoch 3628/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.4622 - binary_accuracy: 0.6667\nEpoch 3629/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.0814 - binary_accuracy: 0.6667\nEpoch 3630/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7505 - binary_accuracy: 0.6667\nEpoch 3631/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.1605 - binary_accuracy: 0.8889\nEpoch 3632/5000\n1/1 [==============================] - 0s 9ms/step - loss: 5.6034 - binary_accuracy: 0.4444\nEpoch 3633/5000\n1/1 [==============================] - 0s 11ms/step - loss: 26.6037 - binary_accuracy: 0.6667\nEpoch 3634/5000\n1/1 [==============================] - 0s 8ms/step - loss: 22.1658 - binary_accuracy: 0.6667\nEpoch 3635/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.7582 - binary_accuracy: 0.6667\nEpoch 3636/5000\n1/1 [==============================] - 0s 10ms/step - loss: 13.3678 - binary_accuracy: 0.6667\nEpoch 3637/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.9858 - binary_accuracy: 0.6667\nEpoch 3638/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.6256 - binary_accuracy: 0.6667\nEpoch 3639/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.7088 - binary_accuracy: 0.7778\nEpoch 3640/5000\n1/1 [==============================] - 0s 5ms/step - loss: 11.6493 - binary_accuracy: 0.4444\nEpoch 3641/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.6841 - binary_accuracy: 0.6667\nEpoch 3642/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2540 - binary_accuracy: 0.6667\nEpoch 3643/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.8512 - binary_accuracy: 0.6667\nEpoch 3644/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.4632 - binary_accuracy: 0.6667\nEpoch 3645/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.0824 - binary_accuracy: 0.6667\nEpoch 3646/5000\n1/1 [==============================] - 0s 10ms/step - loss: 3.7520 - binary_accuracy: 0.6667\nEpoch 3647/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1625 - binary_accuracy: 0.8889\nEpoch 3648/5000\n1/1 [==============================] - 0s 14ms/step - loss: 5.6571 - binary_accuracy: 0.4444\nEpoch 3649/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.5891 - binary_accuracy: 0.6667\nEpoch 3650/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.1522 - binary_accuracy: 0.6667\nEpoch 3651/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.7452 - binary_accuracy: 0.6667\nEpoch 3652/5000\n1/1 [==============================] - 0s 5ms/step - loss: 13.3551 - binary_accuracy: 0.6667\nEpoch 3653/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.9733 - binary_accuracy: 0.6667\nEpoch 3654/5000\n1/1 [==============================] - 0s 16ms/step - loss: 4.6136 - binary_accuracy: 0.6667\nEpoch 3655/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.7017 - binary_accuracy: 0.7778\nEpoch 3656/5000\n1/1 [==============================] - 0s 6ms/step - loss: 11.6414 - binary_accuracy: 0.4444\nEpoch 3657/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.6787 - binary_accuracy: 0.6667\nEpoch 3658/5000\n1/1 [==============================] - 0s 17ms/step - loss: 21.2495 - binary_accuracy: 0.6667\nEpoch 3659/5000\n1/1 [==============================] - 0s 7ms/step - loss: 16.8472 - binary_accuracy: 0.6667\nEpoch 3660/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.4594 - binary_accuracy: 0.6667\nEpoch 3661/5000\n1/1 [==============================] - 0s 13ms/step - loss: 8.0788 - binary_accuracy: 0.6667\nEpoch 3662/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.7492 - binary_accuracy: 0.6667\nEpoch 3663/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.1627 - binary_accuracy: 0.8889\nEpoch 3664/5000\n1/1 [==============================] - 0s 11ms/step - loss: 5.6415 - binary_accuracy: 0.4444\nEpoch 3665/5000\n1/1 [==============================] - 0s 7ms/step - loss: 26.5848 - binary_accuracy: 0.6667\nEpoch 3666/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.1488 - binary_accuracy: 0.6667\nEpoch 3667/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.7424 - binary_accuracy: 0.6667\nEpoch 3668/5000\n1/1 [==============================] - 0s 10ms/step - loss: 13.3526 - binary_accuracy: 0.6667\nEpoch 3669/5000\n1/1 [==============================] - 0s 5ms/step - loss: 8.9709 - binary_accuracy: 0.6667\nEpoch 3670/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.6116 - binary_accuracy: 0.6667\nEpoch 3671/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.7023 - binary_accuracy: 0.7778\nEpoch 3672/5000\n1/1 [==============================] - 0s 8ms/step - loss: 11.6111 - binary_accuracy: 0.4444\nEpoch 3673/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.6767 - binary_accuracy: 0.6667\nEpoch 3674/5000\n1/1 [==============================] - 0s 17ms/step - loss: 21.2483 - binary_accuracy: 0.6667\nEpoch 3675/5000\n1/1 [==============================] - 0s 11ms/step - loss: 16.8466 - binary_accuracy: 0.6667\nEpoch 3676/5000\n1/1 [==============================] - 0s 19ms/step - loss: 12.4590 - binary_accuracy: 0.6667\nEpoch 3677/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.0785 - binary_accuracy: 0.6667\nEpoch 3678/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7495 - binary_accuracy: 0.6667\nEpoch 3679/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1643 - binary_accuracy: 0.8889\nEpoch 3680/5000\n1/1 [==============================] - 0s 16ms/step - loss: 5.6757 - binary_accuracy: 0.4444\nEpoch 3681/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.5730 - binary_accuracy: 0.6667\nEpoch 3682/5000\n1/1 [==============================] - 0s 12ms/step - loss: 22.1380 - binary_accuracy: 0.6667\nEpoch 3683/5000\n1/1 [==============================] - 0s 10ms/step - loss: 17.7323 - binary_accuracy: 0.6667\nEpoch 3684/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.3428 - binary_accuracy: 0.6667\nEpoch 3685/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.9612 - binary_accuracy: 0.6667\nEpoch 3686/5000\n1/1 [==============================] - 0s 8ms/step - loss: 4.6024 - binary_accuracy: 0.6667\nEpoch 3687/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.6973 - binary_accuracy: 0.7778\nEpoch 3688/5000\n1/1 [==============================] - 0s 11ms/step - loss: 11.5964 - binary_accuracy: 0.4444\nEpoch 3689/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.6724 - binary_accuracy: 0.6667\nEpoch 3690/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2449 - binary_accuracy: 0.6667\nEpoch 3691/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.8436 - binary_accuracy: 0.6667\nEpoch 3692/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.4563 - binary_accuracy: 0.6667\nEpoch 3693/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.0759 - binary_accuracy: 0.6667\nEpoch 3694/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7476 - binary_accuracy: 0.6667\nEpoch 3695/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.1649 - binary_accuracy: 0.8889\nEpoch 3696/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.6753 - binary_accuracy: 0.4444\nEpoch 3697/5000\n1/1 [==============================] - 0s 3ms/step - loss: 26.5665 - binary_accuracy: 0.6667\nEpoch 3698/5000\n1/1 [==============================] - 0s 10ms/step - loss: 22.1324 - binary_accuracy: 0.6667\nEpoch 3699/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.7272 - binary_accuracy: 0.6667\nEpoch 3700/5000\n1/1 [==============================] - 0s 8ms/step - loss: 13.3380 - binary_accuracy: 0.6667\nEpoch 3701/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.9565 - binary_accuracy: 0.6667\nEpoch 3702/5000\n1/1 [==============================] - 0s 20ms/step - loss: 4.5982 - binary_accuracy: 0.6667\nEpoch 3703/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.6962 - binary_accuracy: 0.7778\nEpoch 3704/5000\n1/1 [==============================] - 0s 6ms/step - loss: 11.5709 - binary_accuracy: 0.4444\nEpoch 3705/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.6698 - binary_accuracy: 0.6667\nEpoch 3706/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2430 - binary_accuracy: 0.6667\nEpoch 3707/5000\n1/1 [==============================] - 0s 10ms/step - loss: 16.8422 - binary_accuracy: 0.6667\nEpoch 3708/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.4552 - binary_accuracy: 0.6667\nEpoch 3709/5000\n1/1 [==============================] - 0s 12ms/step - loss: 8.0749 - binary_accuracy: 0.6667\nEpoch 3710/5000\n1/1 [==============================] - 0s 12ms/step - loss: 3.7473 - binary_accuracy: 0.6667\nEpoch 3711/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.1662 - binary_accuracy: 0.8889\nEpoch 3712/5000\n1/1 [==============================] - 0s 12ms/step - loss: 5.6989 - binary_accuracy: 0.4444\nEpoch 3713/5000\n1/1 [==============================] - 0s 8ms/step - loss: 26.5564 - binary_accuracy: 0.6667\nEpoch 3714/5000\n1/1 [==============================] - 0s 8ms/step - loss: 22.1233 - binary_accuracy: 0.6667\nEpoch 3715/5000\n1/1 [==============================] - 0s 8ms/step - loss: 17.7186 - binary_accuracy: 0.6667\nEpoch 3716/5000\n1/1 [==============================] - 0s 12ms/step - loss: 13.3297 - binary_accuracy: 0.6667\nEpoch 3717/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.9484 - binary_accuracy: 0.6667\nEpoch 3718/5000\n1/1 [==============================] - 0s 8ms/step - loss: 4.5905 - binary_accuracy: 0.6667\nEpoch 3719/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.6925 - binary_accuracy: 0.7778\nEpoch 3720/5000\n1/1 [==============================] - 0s 6ms/step - loss: 11.5526 - binary_accuracy: 0.4444\nEpoch 3721/5000\n1/1 [==============================] - 0s 18ms/step - loss: 25.6661 - binary_accuracy: 0.6667\nEpoch 3722/5000\n1/1 [==============================] - 0s 28ms/step - loss: 21.2401 - binary_accuracy: 0.6667\nEpoch 3723/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.8398 - binary_accuracy: 0.6667\nEpoch 3724/5000\n1/1 [==============================] - 0s 11ms/step - loss: 12.4530 - binary_accuracy: 0.6667\nEpoch 3725/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0728 - binary_accuracy: 0.6667\nEpoch 3726/5000\n1/1 [==============================] - 0s 13ms/step - loss: 3.7459 - binary_accuracy: 0.6667\nEpoch 3727/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.1671 - binary_accuracy: 0.8889\nEpoch 3728/5000\n1/1 [==============================] - 0s 8ms/step - loss: 5.7065 - binary_accuracy: 0.4444\nEpoch 3729/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.5487 - binary_accuracy: 0.6667\nEpoch 3730/5000\n1/1 [==============================] - 0s 12ms/step - loss: 22.1165 - binary_accuracy: 0.6667\nEpoch 3731/5000\n1/1 [==============================] - 0s 10ms/step - loss: 17.7124 - binary_accuracy: 0.6667\nEpoch 3732/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.3238 - binary_accuracy: 0.6667\nEpoch 3733/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.9426 - binary_accuracy: 0.6667\nEpoch 3734/5000\n1/1 [==============================] - 0s 20ms/step - loss: 4.5851 - binary_accuracy: 0.6667\nEpoch 3735/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.6905 - binary_accuracy: 0.7778\nEpoch 3736/5000\n1/1 [==============================] - 0s 7ms/step - loss: 11.5294 - binary_accuracy: 0.4444\nEpoch 3737/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.6631 - binary_accuracy: 0.6667\nEpoch 3738/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.2380 - binary_accuracy: 0.6667\nEpoch 3739/5000\n1/1 [==============================] - 0s 7ms/step - loss: 16.8381 - binary_accuracy: 0.6667\nEpoch 3740/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.4515 - binary_accuracy: 0.6667\nEpoch 3741/5000\n1/1 [==============================] - 0s 11ms/step - loss: 8.0714 - binary_accuracy: 0.6667\nEpoch 3742/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7452 - binary_accuracy: 0.6667\nEpoch 3743/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1682 - binary_accuracy: 0.8889\nEpoch 3744/5000\n1/1 [==============================] - 0s 8ms/step - loss: 5.7250 - binary_accuracy: 0.4444\nEpoch 3745/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.5394 - binary_accuracy: 0.6667\nEpoch 3746/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.1081 - binary_accuracy: 0.6667\nEpoch 3747/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.7046 - binary_accuracy: 0.6667\nEpoch 3748/5000\n1/1 [==============================] - 0s 11ms/step - loss: 13.3162 - binary_accuracy: 0.6667\nEpoch 3749/5000\n1/1 [==============================] - 0s 12ms/step - loss: 8.9351 - binary_accuracy: 0.6667\nEpoch 3750/5000\n1/1 [==============================] - 0s 10ms/step - loss: 4.5782 - binary_accuracy: 0.6667\nEpoch 3751/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.6874 - binary_accuracy: 0.7778\nEpoch 3752/5000\n1/1 [==============================] - 0s 9ms/step - loss: 11.5092 - binary_accuracy: 0.4444\nEpoch 3753/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.6597 - binary_accuracy: 0.6667\nEpoch 3754/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.2354 - binary_accuracy: 0.6667\nEpoch 3755/5000\n1/1 [==============================] - 0s 12ms/step - loss: 16.8360 - binary_accuracy: 0.6667\nEpoch 3756/5000\n1/1 [==============================] - 0s 10ms/step - loss: 12.4496 - binary_accuracy: 0.6667\nEpoch 3757/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.0696 - binary_accuracy: 0.6667\nEpoch 3758/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7441 - binary_accuracy: 0.6667\nEpoch 3759/5000\n1/1 [==============================] - 0s 12ms/step - loss: 0.1692 - binary_accuracy: 0.8889\nEpoch 3760/5000\n1/1 [==============================] - 0s 9ms/step - loss: 5.7366 - binary_accuracy: 0.4444\nEpoch 3761/5000\n1/1 [==============================] - 0s 12ms/step - loss: 26.5312 - binary_accuracy: 0.6667\nEpoch 3762/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.1007 - binary_accuracy: 0.6667\nEpoch 3763/5000\n1/1 [==============================] - 0s 11ms/step - loss: 17.6978 - binary_accuracy: 0.6667\nEpoch 3764/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.3097 - binary_accuracy: 0.6667\nEpoch 3765/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.9287 - binary_accuracy: 0.6667\nEpoch 3766/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.5722 - binary_accuracy: 0.6667\nEpoch 3767/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.6850 - binary_accuracy: 0.7778\nEpoch 3768/5000\n1/1 [==============================] - 0s 6ms/step - loss: 11.4870 - binary_accuracy: 0.4444\nEpoch 3769/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.6566 - binary_accuracy: 0.6667\nEpoch 3770/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2331 - binary_accuracy: 0.6667\nEpoch 3771/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.8341 - binary_accuracy: 0.6667\nEpoch 3772/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.4480 - binary_accuracy: 0.6667\nEpoch 3773/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.0681 - binary_accuracy: 0.6667\nEpoch 3774/5000\n1/1 [==============================] - 0s 15ms/step - loss: 3.7433 - binary_accuracy: 0.6667\nEpoch 3775/5000\n1/1 [==============================] - 0s 23ms/step - loss: 0.1703 - binary_accuracy: 0.8889\nEpoch 3776/5000\n1/1 [==============================] - 0s 10ms/step - loss: 5.7528 - binary_accuracy: 0.4444\nEpoch 3777/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.5222 - binary_accuracy: 0.6667\nEpoch 3778/5000\n1/1 [==============================] - 0s 10ms/step - loss: 22.0927 - binary_accuracy: 0.6667\nEpoch 3779/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.6903 - binary_accuracy: 0.6667\nEpoch 3780/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.3024 - binary_accuracy: 0.6667\nEpoch 3781/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.9216 - binary_accuracy: 0.6667\nEpoch 3782/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.5656 - binary_accuracy: 0.6667\nEpoch 3783/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.6821 - binary_accuracy: 0.7778\nEpoch 3784/5000\n1/1 [==============================] - 0s 9ms/step - loss: 11.4660 - binary_accuracy: 0.4444\nEpoch 3785/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.6534 - binary_accuracy: 0.6667\nEpoch 3786/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2306 - binary_accuracy: 0.6667\nEpoch 3787/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.8321 - binary_accuracy: 0.6667\nEpoch 3788/5000\n1/1 [==============================] - 0s 15ms/step - loss: 12.4462 - binary_accuracy: 0.6667\nEpoch 3789/5000\n1/1 [==============================] - 0s 16ms/step - loss: 8.0664 - binary_accuracy: 0.6667\nEpoch 3790/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7423 - binary_accuracy: 0.6667\nEpoch 3791/5000\n1/1 [==============================] - 0s 16ms/step - loss: 0.1713 - binary_accuracy: 0.8889\nEpoch 3792/5000\n1/1 [==============================] - 0s 21ms/step - loss: 5.7664 - binary_accuracy: 0.4444\nEpoch 3793/5000\n1/1 [==============================] - 0s 15ms/step - loss: 26.5137 - binary_accuracy: 0.6667\nEpoch 3794/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.0851 - binary_accuracy: 0.6667\nEpoch 3795/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.6831 - binary_accuracy: 0.6667\nEpoch 3796/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.2956 - binary_accuracy: 0.6667\nEpoch 3797/5000\n1/1 [==============================] - 0s 12ms/step - loss: 8.9148 - binary_accuracy: 0.6667\nEpoch 3798/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.5593 - binary_accuracy: 0.6667\nEpoch 3799/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.6796 - binary_accuracy: 0.7778\nEpoch 3800/5000\n1/1 [==============================] - 0s 6ms/step - loss: 11.4441 - binary_accuracy: 0.4444\nEpoch 3801/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.6503 - binary_accuracy: 0.6667\nEpoch 3802/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2283 - binary_accuracy: 0.6667\nEpoch 3803/5000\n1/1 [==============================] - 0s 12ms/step - loss: 16.8302 - binary_accuracy: 0.6667\nEpoch 3804/5000\n1/1 [==============================] - 0s 10ms/step - loss: 12.4445 - binary_accuracy: 0.6667\nEpoch 3805/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.0648 - binary_accuracy: 0.6667\nEpoch 3806/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7414 - binary_accuracy: 0.6667\nEpoch 3807/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1724 - binary_accuracy: 0.8889\nEpoch 3808/5000\n1/1 [==============================] - 0s 13ms/step - loss: 5.7819 - binary_accuracy: 0.4444\nEpoch 3809/5000\n1/1 [==============================] - 0s 7ms/step - loss: 26.5049 - binary_accuracy: 0.6667\nEpoch 3810/5000\n1/1 [==============================] - 0s 9ms/step - loss: 22.0772 - binary_accuracy: 0.6667\nEpoch 3811/5000\n1/1 [==============================] - 0s 15ms/step - loss: 17.6758 - binary_accuracy: 0.6667\nEpoch 3812/5000\n1/1 [==============================] - 0s 15ms/step - loss: 13.2884 - binary_accuracy: 0.6667\nEpoch 3813/5000\n1/1 [==============================] - 0s 17ms/step - loss: 8.9078 - binary_accuracy: 0.6667\nEpoch 3814/5000\n1/1 [==============================] - 0s 15ms/step - loss: 4.5528 - binary_accuracy: 0.6667\nEpoch 3815/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.6768 - binary_accuracy: 0.7778\nEpoch 3816/5000\n1/1 [==============================] - 0s 10ms/step - loss: 11.4225 - binary_accuracy: 0.4444\nEpoch 3817/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.6471 - binary_accuracy: 0.6667\nEpoch 3818/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2259 - binary_accuracy: 0.6667\nEpoch 3819/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.8283 - binary_accuracy: 0.6667\nEpoch 3820/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.4427 - binary_accuracy: 0.6667\nEpoch 3821/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0631 - binary_accuracy: 0.6667\nEpoch 3822/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7405 - binary_accuracy: 0.6667\nEpoch 3823/5000\n1/1 [==============================] - 0s 12ms/step - loss: 0.1735 - binary_accuracy: 0.8889\nEpoch 3824/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.7965 - binary_accuracy: 0.4444\nEpoch 3825/5000\n1/1 [==============================] - 0s 13ms/step - loss: 26.4963 - binary_accuracy: 0.6667\nEpoch 3826/5000\n1/1 [==============================] - 0s 9ms/step - loss: 22.0694 - binary_accuracy: 0.6667\nEpoch 3827/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.6685 - binary_accuracy: 0.6667\nEpoch 3828/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.2814 - binary_accuracy: 0.6667\nEpoch 3829/5000\n1/1 [==============================] - 0s 15ms/step - loss: 8.9009 - binary_accuracy: 0.6667\nEpoch 3830/5000\n1/1 [==============================] - 0s 8ms/step - loss: 4.5464 - binary_accuracy: 0.6667\nEpoch 3831/5000\n1/1 [==============================] - 0s 17ms/step - loss: 0.6742 - binary_accuracy: 0.7778\nEpoch 3832/5000\n1/1 [==============================] - 0s 12ms/step - loss: 11.4006 - binary_accuracy: 0.4444\nEpoch 3833/5000\n1/1 [==============================] - 0s 15ms/step - loss: 25.6441 - binary_accuracy: 0.6667\nEpoch 3834/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2236 - binary_accuracy: 0.6667\nEpoch 3835/5000\n1/1 [==============================] - 0s 11ms/step - loss: 16.8263 - binary_accuracy: 0.6667\nEpoch 3836/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.4410 - binary_accuracy: 0.6667\nEpoch 3837/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0615 - binary_accuracy: 0.6667\nEpoch 3838/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7396 - binary_accuracy: 0.6667\nEpoch 3839/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1746 - binary_accuracy: 0.8889\nEpoch 3840/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.8118 - binary_accuracy: 0.4444\nEpoch 3841/5000\n1/1 [==============================] - 0s 12ms/step - loss: 26.4876 - binary_accuracy: 0.6667\nEpoch 3842/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.0615 - binary_accuracy: 0.6667\nEpoch 3843/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.6611 - binary_accuracy: 0.6667\nEpoch 3844/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.2743 - binary_accuracy: 0.6667\nEpoch 3845/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.8938 - binary_accuracy: 0.6667\nEpoch 3846/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.5399 - binary_accuracy: 0.6667\nEpoch 3847/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.6715 - binary_accuracy: 0.7778\nEpoch 3848/5000\n1/1 [==============================] - 0s 6ms/step - loss: 11.3788 - binary_accuracy: 0.4444\nEpoch 3849/5000\n1/1 [==============================] - 0s 16ms/step - loss: 25.6410 - binary_accuracy: 0.6667\nEpoch 3850/5000\n1/1 [==============================] - 0s 11ms/step - loss: 21.2213 - binary_accuracy: 0.6667\nEpoch 3851/5000\n1/1 [==============================] - 0s 21ms/step - loss: 16.8244 - binary_accuracy: 0.6667\nEpoch 3852/5000\n1/1 [==============================] - 0s 20ms/step - loss: 12.4393 - binary_accuracy: 0.6667\nEpoch 3853/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0599 - binary_accuracy: 0.6667\nEpoch 3854/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7387 - binary_accuracy: 0.6667\nEpoch 3855/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1757 - binary_accuracy: 0.8889\nEpoch 3856/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.8269 - binary_accuracy: 0.4444\nEpoch 3857/5000\n1/1 [==============================] - 0s 20ms/step - loss: 26.4789 - binary_accuracy: 0.6667\nEpoch 3858/5000\n1/1 [==============================] - 0s 6ms/step - loss: 22.0537 - binary_accuracy: 0.6667\nEpoch 3859/5000\n1/1 [==============================] - 0s 8ms/step - loss: 17.6537 - binary_accuracy: 0.6667\nEpoch 3860/5000\n1/1 [==============================] - 0s 4ms/step - loss: 13.2671 - binary_accuracy: 0.6667\nEpoch 3861/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.8868 - binary_accuracy: 0.6667\nEpoch 3862/5000\n1/1 [==============================] - 0s 4ms/step - loss: 4.5334 - binary_accuracy: 0.6667\nEpoch 3863/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.6688 - binary_accuracy: 0.7778\nEpoch 3864/5000\n1/1 [==============================] - 0s 10ms/step - loss: 11.3567 - binary_accuracy: 0.4444\nEpoch 3865/5000\n1/1 [==============================] - 0s 13ms/step - loss: 25.6380 - binary_accuracy: 0.6667\nEpoch 3866/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2190 - binary_accuracy: 0.6667\nEpoch 3867/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.8226 - binary_accuracy: 0.6667\nEpoch 3868/5000\n1/1 [==============================] - 0s 11ms/step - loss: 12.4376 - binary_accuracy: 0.6667\nEpoch 3869/5000\n1/1 [==============================] - 0s 5ms/step - loss: 8.0583 - binary_accuracy: 0.6667\nEpoch 3870/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.7379 - binary_accuracy: 0.6667\nEpoch 3871/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1769 - binary_accuracy: 0.7778\nEpoch 3872/5000\n1/1 [==============================] - 0s 7ms/step - loss: 5.8424 - binary_accuracy: 0.4444\nEpoch 3873/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.4702 - binary_accuracy: 0.6667\nEpoch 3874/5000\n1/1 [==============================] - 0s 7ms/step - loss: 22.0458 - binary_accuracy: 0.6667\nEpoch 3875/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.6463 - binary_accuracy: 0.6667\nEpoch 3876/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.2599 - binary_accuracy: 0.6667\nEpoch 3877/5000\n1/1 [==============================] - 0s 12ms/step - loss: 8.8798 - binary_accuracy: 0.6667\nEpoch 3878/5000\n1/1 [==============================] - 0s 14ms/step - loss: 4.5268 - binary_accuracy: 0.6667\nEpoch 3879/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.6661 - binary_accuracy: 0.7778\nEpoch 3880/5000\n1/1 [==============================] - 0s 9ms/step - loss: 11.3347 - binary_accuracy: 0.4444\nEpoch 3881/5000\n1/1 [==============================] - 0s 15ms/step - loss: 25.6350 - binary_accuracy: 0.6667\nEpoch 3882/5000\n1/1 [==============================] - 0s 12ms/step - loss: 21.2167 - binary_accuracy: 0.6667\nEpoch 3883/5000\n1/1 [==============================] - 0s 10ms/step - loss: 16.8207 - binary_accuracy: 0.6667\nEpoch 3884/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.4359 - binary_accuracy: 0.6667\nEpoch 3885/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0567 - binary_accuracy: 0.6667\nEpoch 3886/5000\n1/1 [==============================] - 0s 11ms/step - loss: 3.7370 - binary_accuracy: 0.6667\nEpoch 3887/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.1780 - binary_accuracy: 0.7778\nEpoch 3888/5000\n1/1 [==============================] - 0s 9ms/step - loss: 5.8579 - binary_accuracy: 0.4444\nEpoch 3889/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.4614 - binary_accuracy: 0.6667\nEpoch 3890/5000\n1/1 [==============================] - 0s 7ms/step - loss: 22.0379 - binary_accuracy: 0.6667\nEpoch 3891/5000\n1/1 [==============================] - 0s 12ms/step - loss: 17.6389 - binary_accuracy: 0.6667\nEpoch 3892/5000\n1/1 [==============================] - 0s 13ms/step - loss: 13.2527 - binary_accuracy: 0.6667\nEpoch 3893/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.8727 - binary_accuracy: 0.6667\nEpoch 3894/5000\n1/1 [==============================] - 0s 23ms/step - loss: 4.5203 - binary_accuracy: 0.6667\nEpoch 3895/5000\n1/1 [==============================] - 0s 13ms/step - loss: 0.6634 - binary_accuracy: 0.7778\nEpoch 3896/5000\n1/1 [==============================] - 0s 6ms/step - loss: 11.3125 - binary_accuracy: 0.4444\nEpoch 3897/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.6320 - binary_accuracy: 0.6667\nEpoch 3898/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2145 - binary_accuracy: 0.6667\nEpoch 3899/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.8188 - binary_accuracy: 0.6667\nEpoch 3900/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.4343 - binary_accuracy: 0.6667\nEpoch 3901/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.0551 - binary_accuracy: 0.6667\nEpoch 3902/5000\n1/1 [==============================] - 0s 8ms/step - loss: 3.7362 - binary_accuracy: 0.6667\nEpoch 3903/5000\n1/1 [==============================] - 0s 12ms/step - loss: 0.1791 - binary_accuracy: 0.7778\nEpoch 3904/5000\n1/1 [==============================] - 0s 9ms/step - loss: 5.8736 - binary_accuracy: 0.4444\nEpoch 3905/5000\n1/1 [==============================] - 0s 7ms/step - loss: 26.4527 - binary_accuracy: 0.6667\nEpoch 3906/5000\n1/1 [==============================] - 0s 10ms/step - loss: 22.0300 - binary_accuracy: 0.6667\nEpoch 3907/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.6315 - binary_accuracy: 0.6667\nEpoch 3908/5000\n1/1 [==============================] - 0s 16ms/step - loss: 13.2455 - binary_accuracy: 0.6667\nEpoch 3909/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.8655 - binary_accuracy: 0.6667\nEpoch 3910/5000\n1/1 [==============================] - 0s 11ms/step - loss: 4.5137 - binary_accuracy: 0.6667\nEpoch 3911/5000\n1/1 [==============================] - 0s 20ms/step - loss: 0.6607 - binary_accuracy: 0.7778\nEpoch 3912/5000\n1/1 [==============================] - 0s 7ms/step - loss: 11.2901 - binary_accuracy: 0.4444\nEpoch 3913/5000\n1/1 [==============================] - 0s 11ms/step - loss: 25.6291 - binary_accuracy: 0.6667\nEpoch 3914/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.2123 - binary_accuracy: 0.6667\nEpoch 3915/5000\n1/1 [==============================] - 0s 7ms/step - loss: 16.8170 - binary_accuracy: 0.6667\nEpoch 3916/5000\n1/1 [==============================] - 0s 13ms/step - loss: 12.4326 - binary_accuracy: 0.6667\nEpoch 3917/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.0535 - binary_accuracy: 0.6667\nEpoch 3918/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7353 - binary_accuracy: 0.6667\nEpoch 3919/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1803 - binary_accuracy: 0.7778\nEpoch 3920/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.8896 - binary_accuracy: 0.4444\nEpoch 3921/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.4440 - binary_accuracy: 0.6667\nEpoch 3922/5000\n1/1 [==============================] - 0s 13ms/step - loss: 22.0220 - binary_accuracy: 0.6667\nEpoch 3923/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.6240 - binary_accuracy: 0.6667\nEpoch 3924/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.2382 - binary_accuracy: 0.6667\nEpoch 3925/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.8584 - binary_accuracy: 0.6667\nEpoch 3926/5000\n1/1 [==============================] - 0s 13ms/step - loss: 4.5071 - binary_accuracy: 0.6667\nEpoch 3927/5000\n1/1 [==============================] - 0s 21ms/step - loss: 0.6580 - binary_accuracy: 0.7778\nEpoch 3928/5000\n1/1 [==============================] - 0s 13ms/step - loss: 11.2677 - binary_accuracy: 0.4444\nEpoch 3929/5000\n1/1 [==============================] - 0s 11ms/step - loss: 25.6262 - binary_accuracy: 0.6667\nEpoch 3930/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.2101 - binary_accuracy: 0.6667\nEpoch 3931/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.8152 - binary_accuracy: 0.6667\nEpoch 3932/5000\n1/1 [==============================] - 0s 10ms/step - loss: 12.4310 - binary_accuracy: 0.6667\nEpoch 3933/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.0520 - binary_accuracy: 0.6667\nEpoch 3934/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7345 - binary_accuracy: 0.6667\nEpoch 3935/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1814 - binary_accuracy: 0.7778\nEpoch 3936/5000\n1/1 [==============================] - 0s 16ms/step - loss: 5.9056 - binary_accuracy: 0.4444\nEpoch 3937/5000\n1/1 [==============================] - 0s 10ms/step - loss: 26.4352 - binary_accuracy: 0.6667\nEpoch 3938/5000\n1/1 [==============================] - 0s 10ms/step - loss: 22.0141 - binary_accuracy: 0.6667\nEpoch 3939/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.6165 - binary_accuracy: 0.6667\nEpoch 3940/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.2310 - binary_accuracy: 0.6667\nEpoch 3941/5000\n1/1 [==============================] - 0s 14ms/step - loss: 8.8512 - binary_accuracy: 0.6667\nEpoch 3942/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.5005 - binary_accuracy: 0.6667\nEpoch 3943/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.6553 - binary_accuracy: 0.7778\nEpoch 3944/5000\n1/1 [==============================] - 0s 9ms/step - loss: 11.2452 - binary_accuracy: 0.4444\nEpoch 3945/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.6233 - binary_accuracy: 0.6667\nEpoch 3946/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.2079 - binary_accuracy: 0.6667\nEpoch 3947/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.8134 - binary_accuracy: 0.6667\nEpoch 3948/5000\n1/1 [==============================] - 0s 14ms/step - loss: 12.4293 - binary_accuracy: 0.6667\nEpoch 3949/5000\n1/1 [==============================] - 0s 11ms/step - loss: 8.0504 - binary_accuracy: 0.6667\nEpoch 3950/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.7337 - binary_accuracy: 0.6667\nEpoch 3951/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.1826 - binary_accuracy: 0.7778\nEpoch 3952/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.9219 - binary_accuracy: 0.4444\nEpoch 3953/5000\n1/1 [==============================] - 0s 10ms/step - loss: 26.4264 - binary_accuracy: 0.6667\nEpoch 3954/5000\n1/1 [==============================] - 0s 15ms/step - loss: 22.0061 - binary_accuracy: 0.6667\nEpoch 3955/5000\n1/1 [==============================] - 0s 21ms/step - loss: 17.6089 - binary_accuracy: 0.6667\nEpoch 3956/5000\n1/1 [==============================] - 0s 13ms/step - loss: 13.2236 - binary_accuracy: 0.6667\nEpoch 3957/5000\n1/1 [==============================] - 0s 19ms/step - loss: 8.8439 - binary_accuracy: 0.6667\nEpoch 3958/5000\n1/1 [==============================] - 0s 10ms/step - loss: 4.4938 - binary_accuracy: 0.6667\nEpoch 3959/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.6526 - binary_accuracy: 0.7778\nEpoch 3960/5000\n1/1 [==============================] - 0s 12ms/step - loss: 11.2225 - binary_accuracy: 0.4444\nEpoch 3961/5000\n1/1 [==============================] - 0s 12ms/step - loss: 25.6205 - binary_accuracy: 0.6667\nEpoch 3962/5000\n1/1 [==============================] - 0s 22ms/step - loss: 21.2057 - binary_accuracy: 0.6667\nEpoch 3963/5000\n1/1 [==============================] - 0s 5ms/step - loss: 16.8116 - binary_accuracy: 0.6667\nEpoch 3964/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.4277 - binary_accuracy: 0.6667\nEpoch 3965/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.0489 - binary_accuracy: 0.6667\nEpoch 3966/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7330 - binary_accuracy: 0.6667\nEpoch 3967/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1838 - binary_accuracy: 0.7778\nEpoch 3968/5000\n1/1 [==============================] - 0s 6ms/step - loss: 5.9382 - binary_accuracy: 0.4444\nEpoch 3969/5000\n1/1 [==============================] - 0s 8ms/step - loss: 26.4177 - binary_accuracy: 0.6667\nEpoch 3970/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.9981 - binary_accuracy: 0.6667\nEpoch 3971/5000\n1/1 [==============================] - 0s 10ms/step - loss: 17.6014 - binary_accuracy: 0.6667\nEpoch 3972/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.2163 - binary_accuracy: 0.6667\nEpoch 3973/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.8367 - binary_accuracy: 0.6667\nEpoch 3974/5000\n1/1 [==============================] - 0s 15ms/step - loss: 4.4871 - binary_accuracy: 0.6667\nEpoch 3975/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.6499 - binary_accuracy: 0.7778\nEpoch 3976/5000\n1/1 [==============================] - 0s 8ms/step - loss: 11.1997 - binary_accuracy: 0.4444\nEpoch 3977/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.6177 - binary_accuracy: 0.6667\nEpoch 3978/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.2036 - binary_accuracy: 0.6667\nEpoch 3979/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.8098 - binary_accuracy: 0.6667\nEpoch 3980/5000\n1/1 [==============================] - 0s 14ms/step - loss: 12.4261 - binary_accuracy: 0.6667\nEpoch 3981/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.0474 - binary_accuracy: 0.6667\nEpoch 3982/5000\n1/1 [==============================] - 0s 14ms/step - loss: 3.7322 - binary_accuracy: 0.6667\nEpoch 3983/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.1850 - binary_accuracy: 0.7778\nEpoch 3984/5000\n1/1 [==============================] - 0s 17ms/step - loss: 5.9547 - binary_accuracy: 0.4444\nEpoch 3985/5000\n1/1 [==============================] - 0s 12ms/step - loss: 26.4089 - binary_accuracy: 0.6667\nEpoch 3986/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.9901 - binary_accuracy: 0.6667\nEpoch 3987/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.5938 - binary_accuracy: 0.6667\nEpoch 3988/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.2089 - binary_accuracy: 0.6667\nEpoch 3989/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.8294 - binary_accuracy: 0.6667\nEpoch 3990/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.4804 - binary_accuracy: 0.6667\nEpoch 3991/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.6472 - binary_accuracy: 0.7778\nEpoch 3992/5000\n1/1 [==============================] - 0s 8ms/step - loss: 11.1768 - binary_accuracy: 0.4444\nEpoch 3993/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.6149 - binary_accuracy: 0.6667\nEpoch 3994/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.2015 - binary_accuracy: 0.6667\nEpoch 3995/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.8080 - binary_accuracy: 0.6667\nEpoch 3996/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.4245 - binary_accuracy: 0.6667\nEpoch 3997/5000\n1/1 [==============================] - 0s 14ms/step - loss: 8.0458 - binary_accuracy: 0.6667\nEpoch 3998/5000\n1/1 [==============================] - 0s 8ms/step - loss: 3.7315 - binary_accuracy: 0.6667\nEpoch 3999/5000\n1/1 [==============================] - 0s 30ms/step - loss: 0.1862 - binary_accuracy: 0.7778\nEpoch 4000/5000\n1/1 [==============================] - 0s 7ms/step - loss: 5.9715 - binary_accuracy: 0.4444\nEpoch 4001/5000\n1/1 [==============================] - 0s 8ms/step - loss: 26.4001 - binary_accuracy: 0.6667\nEpoch 4002/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.9821 - binary_accuracy: 0.6667\nEpoch 4003/5000\n1/1 [==============================] - 0s 5ms/step - loss: 17.5861 - binary_accuracy: 0.6667\nEpoch 4004/5000\n1/1 [==============================] - 0s 5ms/step - loss: 13.2014 - binary_accuracy: 0.6667\nEpoch 4005/5000\n1/1 [==============================] - 0s 11ms/step - loss: 8.8220 - binary_accuracy: 0.6667\nEpoch 4006/5000\n1/1 [==============================] - 0s 8ms/step - loss: 4.4737 - binary_accuracy: 0.6667\nEpoch 4007/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.6445 - binary_accuracy: 0.7778\nEpoch 4008/5000\n1/1 [==============================] - 0s 9ms/step - loss: 11.1537 - binary_accuracy: 0.4444\nEpoch 4009/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.6122 - binary_accuracy: 0.6667\nEpoch 4010/5000\n1/1 [==============================] - 0s 13ms/step - loss: 21.1994 - binary_accuracy: 0.6667\nEpoch 4011/5000\n1/1 [==============================] - 0s 10ms/step - loss: 16.8063 - binary_accuracy: 0.6667\nEpoch 4012/5000\n1/1 [==============================] - 0s 20ms/step - loss: 12.4229 - binary_accuracy: 0.6667\nEpoch 4013/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.0443 - binary_accuracy: 0.6667\nEpoch 4014/5000\n1/1 [==============================] - 0s 12ms/step - loss: 3.7307 - binary_accuracy: 0.6667\nEpoch 4015/5000\n1/1 [==============================] - 0s 12ms/step - loss: 0.1874 - binary_accuracy: 0.7778\nEpoch 4016/5000\n1/1 [==============================] - 0s 9ms/step - loss: 5.9884 - binary_accuracy: 0.4444\nEpoch 4017/5000\n1/1 [==============================] - 0s 11ms/step - loss: 26.3913 - binary_accuracy: 0.6667\nEpoch 4018/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.9740 - binary_accuracy: 0.6667\nEpoch 4019/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.5785 - binary_accuracy: 0.6667\nEpoch 4020/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.1940 - binary_accuracy: 0.6667\nEpoch 4021/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.8146 - binary_accuracy: 0.6667\nEpoch 4022/5000\n1/1 [==============================] - 0s 5ms/step - loss: 4.4669 - binary_accuracy: 0.6667\nEpoch 4023/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.6418 - binary_accuracy: 0.7778\nEpoch 4024/5000\n1/1 [==============================] - 0s 6ms/step - loss: 11.1306 - binary_accuracy: 0.4444\nEpoch 4025/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.6095 - binary_accuracy: 0.6667\nEpoch 4026/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.1974 - binary_accuracy: 0.6667\nEpoch 4027/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.8046 - binary_accuracy: 0.6667\nEpoch 4028/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.4213 - binary_accuracy: 0.6667\nEpoch 4029/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.0428 - binary_accuracy: 0.6667\nEpoch 4030/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.7300 - binary_accuracy: 0.6667\nEpoch 4031/5000\n1/1 [==============================] - 0s 13ms/step - loss: 0.1886 - binary_accuracy: 0.7778\nEpoch 4032/5000\n1/1 [==============================] - 0s 12ms/step - loss: 6.0054 - binary_accuracy: 0.4444\nEpoch 4033/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.3825 - binary_accuracy: 0.6667\nEpoch 4034/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.9659 - binary_accuracy: 0.6667\nEpoch 4035/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.5708 - binary_accuracy: 0.6667\nEpoch 4036/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.1865 - binary_accuracy: 0.6667\nEpoch 4037/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.8072 - binary_accuracy: 0.6667\nEpoch 4038/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.4602 - binary_accuracy: 0.6667\nEpoch 4039/5000\n1/1 [==============================] - 0s 12ms/step - loss: 0.6391 - binary_accuracy: 0.7778\nEpoch 4040/5000\n1/1 [==============================] - 0s 7ms/step - loss: 11.1072 - binary_accuracy: 0.4444\nEpoch 4041/5000\n1/1 [==============================] - 0s 14ms/step - loss: 25.6069 - binary_accuracy: 0.6667\nEpoch 4042/5000\n1/1 [==============================] - 0s 13ms/step - loss: 21.1953 - binary_accuracy: 0.6667\nEpoch 4043/5000\n1/1 [==============================] - 0s 10ms/step - loss: 16.8029 - binary_accuracy: 0.6667\nEpoch 4044/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.4198 - binary_accuracy: 0.6667\nEpoch 4045/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.0414 - binary_accuracy: 0.6667\nEpoch 4046/5000\n1/1 [==============================] - 0s 8ms/step - loss: 3.7294 - binary_accuracy: 0.6667\nEpoch 4047/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.1899 - binary_accuracy: 0.7778\nEpoch 4048/5000\n1/1 [==============================] - 0s 10ms/step - loss: 6.0226 - binary_accuracy: 0.4444\nEpoch 4049/5000\n1/1 [==============================] - 0s 15ms/step - loss: 26.3737 - binary_accuracy: 0.6667\nEpoch 4050/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.9578 - binary_accuracy: 0.6667\nEpoch 4051/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.5631 - binary_accuracy: 0.6667\nEpoch 4052/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.1790 - binary_accuracy: 0.6667\nEpoch 4053/5000\n1/1 [==============================] - 0s 15ms/step - loss: 8.7998 - binary_accuracy: 0.6667\nEpoch 4054/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.4534 - binary_accuracy: 0.6667\nEpoch 4055/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.6364 - binary_accuracy: 0.7778\nEpoch 4056/5000\n1/1 [==============================] - 0s 31ms/step - loss: 11.0838 - binary_accuracy: 0.4444\nEpoch 4057/5000\n1/1 [==============================] - 0s 11ms/step - loss: 25.6042 - binary_accuracy: 0.6667\nEpoch 4058/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.1933 - binary_accuracy: 0.6667\nEpoch 4059/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.8012 - binary_accuracy: 0.6667\nEpoch 4060/5000\n1/1 [==============================] - 0s 13ms/step - loss: 12.4183 - binary_accuracy: 0.6667\nEpoch 4061/5000\n1/1 [==============================] - 0s 4ms/step - loss: 8.0399 - binary_accuracy: 0.6667\nEpoch 4062/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.7287 - binary_accuracy: 0.6667\nEpoch 4063/5000\n1/1 [==============================] - 0s 13ms/step - loss: 0.1911 - binary_accuracy: 0.7778\nEpoch 4064/5000\n1/1 [==============================] - 0s 11ms/step - loss: 6.0399 - binary_accuracy: 0.4444\nEpoch 4065/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.3648 - binary_accuracy: 0.6667\nEpoch 4066/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.9497 - binary_accuracy: 0.6667\nEpoch 4067/5000\n1/1 [==============================] - 0s 10ms/step - loss: 17.5554 - binary_accuracy: 0.6667\nEpoch 4068/5000\n1/1 [==============================] - 0s 5ms/step - loss: 13.1714 - binary_accuracy: 0.6667\nEpoch 4069/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.7923 - binary_accuracy: 0.6667\nEpoch 4070/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.4465 - binary_accuracy: 0.6667\nEpoch 4071/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.6337 - binary_accuracy: 0.7778\nEpoch 4072/5000\n1/1 [==============================] - 0s 7ms/step - loss: 11.0602 - binary_accuracy: 0.4444\nEpoch 4073/5000\n1/1 [==============================] - 0s 13ms/step - loss: 25.6016 - binary_accuracy: 0.6667\nEpoch 4074/5000\n1/1 [==============================] - 0s 13ms/step - loss: 21.1913 - binary_accuracy: 0.6667\nEpoch 4075/5000\n1/1 [==============================] - 0s 18ms/step - loss: 16.7995 - binary_accuracy: 0.6667\nEpoch 4076/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.4167 - binary_accuracy: 0.6667\nEpoch 4077/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.0385 - binary_accuracy: 0.6667\nEpoch 4078/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.7280 - binary_accuracy: 0.6667\nEpoch 4079/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.1924 - binary_accuracy: 0.7778\nEpoch 4080/5000\n1/1 [==============================] - 0s 12ms/step - loss: 6.0575 - binary_accuracy: 0.4444\nEpoch 4081/5000\n1/1 [==============================] - 0s 16ms/step - loss: 26.3560 - binary_accuracy: 0.6667\nEpoch 4082/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.9416 - binary_accuracy: 0.6667\nEpoch 4083/5000\n1/1 [==============================] - 0s 10ms/step - loss: 17.5476 - binary_accuracy: 0.6667\nEpoch 4084/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.1638 - binary_accuracy: 0.6667\nEpoch 4085/5000\n1/1 [==============================] - 0s 12ms/step - loss: 8.7848 - binary_accuracy: 0.6667\nEpoch 4086/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.4397 - binary_accuracy: 0.6667\nEpoch 4087/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.6310 - binary_accuracy: 0.7778\nEpoch 4088/5000\n1/1 [==============================] - 0s 7ms/step - loss: 11.0364 - binary_accuracy: 0.4444\nEpoch 4089/5000\n1/1 [==============================] - 0s 13ms/step - loss: 25.5991 - binary_accuracy: 0.6667\nEpoch 4090/5000\n1/1 [==============================] - 0s 16ms/step - loss: 21.1894 - binary_accuracy: 0.6667\nEpoch 4091/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.7979 - binary_accuracy: 0.6667\nEpoch 4092/5000\n1/1 [==============================] - 0s 14ms/step - loss: 12.4152 - binary_accuracy: 0.6667\nEpoch 4093/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.0370 - binary_accuracy: 0.6667\nEpoch 4094/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7274 - binary_accuracy: 0.6667\nEpoch 4095/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.1937 - binary_accuracy: 0.7778\nEpoch 4096/5000\n1/1 [==============================] - 0s 13ms/step - loss: 6.0752 - binary_accuracy: 0.4444\nEpoch 4097/5000\n1/1 [==============================] - 0s 15ms/step - loss: 26.3471 - binary_accuracy: 0.6667\nEpoch 4098/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.9334 - binary_accuracy: 0.6667\nEpoch 4099/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.5398 - binary_accuracy: 0.6667\nEpoch 4100/5000\n1/1 [==============================] - 0s 15ms/step - loss: 13.1562 - binary_accuracy: 0.6667\nEpoch 4101/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.7773 - binary_accuracy: 0.6667\nEpoch 4102/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.4328 - binary_accuracy: 0.6667\nEpoch 4103/5000\n1/1 [==============================] - 0s 5ms/step - loss: 0.6283 - binary_accuracy: 0.7778\nEpoch 4104/5000\n1/1 [==============================] - 0s 6ms/step - loss: 11.0125 - binary_accuracy: 0.4444\nEpoch 4105/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.5965 - binary_accuracy: 0.6667\nEpoch 4106/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.1874 - binary_accuracy: 0.6667\nEpoch 4107/5000\n1/1 [==============================] - 0s 8ms/step - loss: 16.7963 - binary_accuracy: 0.6667\nEpoch 4108/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.4138 - binary_accuracy: 0.6667\nEpoch 4109/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.0356 - binary_accuracy: 0.6667\nEpoch 4110/5000\n1/1 [==============================] - 0s 10ms/step - loss: 3.7268 - binary_accuracy: 0.6667\nEpoch 4111/5000\n1/1 [==============================] - 0s 12ms/step - loss: 0.1950 - binary_accuracy: 0.7778\nEpoch 4112/5000\n1/1 [==============================] - 0s 16ms/step - loss: 6.0931 - binary_accuracy: 0.4444\nEpoch 4113/5000\n1/1 [==============================] - 0s 7ms/step - loss: 26.3383 - binary_accuracy: 0.6667\nEpoch 4114/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.9252 - binary_accuracy: 0.6667\nEpoch 4115/5000\n1/1 [==============================] - 0s 12ms/step - loss: 17.5320 - binary_accuracy: 0.6667\nEpoch 4116/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.1486 - binary_accuracy: 0.6667\nEpoch 4117/5000\n1/1 [==============================] - 0s 13ms/step - loss: 8.7697 - binary_accuracy: 0.6667\nEpoch 4118/5000\n1/1 [==============================] - 0s 11ms/step - loss: 4.4259 - binary_accuracy: 0.6667\nEpoch 4119/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.6256 - binary_accuracy: 0.7778\nEpoch 4120/5000\n1/1 [==============================] - 0s 13ms/step - loss: 10.9884 - binary_accuracy: 0.4444\nEpoch 4121/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.5941 - binary_accuracy: 0.6667\nEpoch 4122/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.1855 - binary_accuracy: 0.6667\nEpoch 4123/5000\n1/1 [==============================] - 0s 15ms/step - loss: 16.7947 - binary_accuracy: 0.6667\nEpoch 4124/5000\n1/1 [==============================] - 0s 15ms/step - loss: 12.4123 - binary_accuracy: 0.6667\nEpoch 4125/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.0342 - binary_accuracy: 0.6667\nEpoch 4126/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7262 - binary_accuracy: 0.6667\nEpoch 4127/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.1963 - binary_accuracy: 0.7778\nEpoch 4128/5000\n1/1 [==============================] - 0s 15ms/step - loss: 6.1111 - binary_accuracy: 0.4444\nEpoch 4129/5000\n1/1 [==============================] - 0s 21ms/step - loss: 26.3294 - binary_accuracy: 0.6667\nEpoch 4130/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.9170 - binary_accuracy: 0.6667\nEpoch 4131/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.5241 - binary_accuracy: 0.6667\nEpoch 4132/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.1409 - binary_accuracy: 0.6667\nEpoch 4133/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.7621 - binary_accuracy: 0.6667\nEpoch 4134/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.4190 - binary_accuracy: 0.6667\nEpoch 4135/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.6228 - binary_accuracy: 0.7778\nEpoch 4136/5000\n1/1 [==============================] - 0s 13ms/step - loss: 10.9642 - binary_accuracy: 0.4444\nEpoch 4137/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.5916 - binary_accuracy: 0.6667\nEpoch 4138/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.1837 - binary_accuracy: 0.6667\nEpoch 4139/5000\n1/1 [==============================] - 0s 8ms/step - loss: 16.7931 - binary_accuracy: 0.6667\nEpoch 4140/5000\n1/1 [==============================] - 0s 14ms/step - loss: 12.4109 - binary_accuracy: 0.6667\nEpoch 4141/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.0328 - binary_accuracy: 0.6667\nEpoch 4142/5000\n1/1 [==============================] - 0s 11ms/step - loss: 3.7257 - binary_accuracy: 0.6667\nEpoch 4143/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.1976 - binary_accuracy: 0.7778\nEpoch 4144/5000\n1/1 [==============================] - 0s 14ms/step - loss: 6.1294 - binary_accuracy: 0.4444\nEpoch 4145/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.3205 - binary_accuracy: 0.6667\nEpoch 4146/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.9088 - binary_accuracy: 0.6667\nEpoch 4147/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.5163 - binary_accuracy: 0.6667\nEpoch 4148/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.1331 - binary_accuracy: 0.6667\nEpoch 4149/5000\n1/1 [==============================] - 0s 17ms/step - loss: 8.7545 - binary_accuracy: 0.6667\nEpoch 4150/5000\n1/1 [==============================] - 0s 12ms/step - loss: 4.4121 - binary_accuracy: 0.6667\nEpoch 4151/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.6201 - binary_accuracy: 0.7778\nEpoch 4152/5000\n1/1 [==============================] - 0s 7ms/step - loss: 10.9398 - binary_accuracy: 0.4444\nEpoch 4153/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.5892 - binary_accuracy: 0.6667\nEpoch 4154/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.1818 - binary_accuracy: 0.6667\nEpoch 4155/5000\n1/1 [==============================] - 0s 11ms/step - loss: 16.7915 - binary_accuracy: 0.6667\nEpoch 4156/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.4094 - binary_accuracy: 0.6667\nEpoch 4157/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0315 - binary_accuracy: 0.6667\nEpoch 4158/5000\n1/1 [==============================] - 0s 11ms/step - loss: 3.7252 - binary_accuracy: 0.6667\nEpoch 4159/5000\n1/1 [==============================] - 0s 15ms/step - loss: 0.1989 - binary_accuracy: 0.7778\nEpoch 4160/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.1478 - binary_accuracy: 0.4444\nEpoch 4161/5000\n1/1 [==============================] - 0s 10ms/step - loss: 26.3116 - binary_accuracy: 0.6667\nEpoch 4162/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.9005 - binary_accuracy: 0.6667\nEpoch 4163/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.5083 - binary_accuracy: 0.6667\nEpoch 4164/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.1254 - binary_accuracy: 0.6667\nEpoch 4165/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.7468 - binary_accuracy: 0.6667\nEpoch 4166/5000\n1/1 [==============================] - 0s 14ms/step - loss: 4.4051 - binary_accuracy: 0.6667\nEpoch 4167/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.6174 - binary_accuracy: 0.7778\nEpoch 4168/5000\n1/1 [==============================] - 0s 9ms/step - loss: 10.9152 - binary_accuracy: 0.4444\nEpoch 4169/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.5868 - binary_accuracy: 0.6667\nEpoch 4170/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.1800 - binary_accuracy: 0.6667\nEpoch 4171/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.7900 - binary_accuracy: 0.6667\nEpoch 4172/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.4080 - binary_accuracy: 0.6667\nEpoch 4173/5000\n1/1 [==============================] - 0s 12ms/step - loss: 8.0301 - binary_accuracy: 0.6667\nEpoch 4174/5000\n1/1 [==============================] - 0s 15ms/step - loss: 3.7246 - binary_accuracy: 0.6667\nEpoch 4175/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.2003 - binary_accuracy: 0.7778\nEpoch 4176/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.1663 - binary_accuracy: 0.4444\nEpoch 4177/5000\n1/1 [==============================] - 0s 7ms/step - loss: 26.3026 - binary_accuracy: 0.6667\nEpoch 4178/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.8923 - binary_accuracy: 0.6667\nEpoch 4179/5000\n1/1 [==============================] - 0s 18ms/step - loss: 17.5004 - binary_accuracy: 0.6667\nEpoch 4180/5000\n1/1 [==============================] - 0s 6ms/step - loss: 13.1176 - binary_accuracy: 0.6667\nEpoch 4181/5000\n1/1 [==============================] - 0s 14ms/step - loss: 8.7391 - binary_accuracy: 0.6667\nEpoch 4182/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.3981 - binary_accuracy: 0.6667\nEpoch 4183/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.6147 - binary_accuracy: 0.7778\nEpoch 4184/5000\n1/1 [==============================] - 0s 16ms/step - loss: 10.8905 - binary_accuracy: 0.4444\nEpoch 4185/5000\n1/1 [==============================] - 0s 5ms/step - loss: 25.5845 - binary_accuracy: 0.6667\nEpoch 4186/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.1782 - binary_accuracy: 0.6667\nEpoch 4187/5000\n1/1 [==============================] - 0s 5ms/step - loss: 16.7885 - binary_accuracy: 0.6667\nEpoch 4188/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.4067 - binary_accuracy: 0.6667\nEpoch 4189/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.0288 - binary_accuracy: 0.6667\nEpoch 4190/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.7242 - binary_accuracy: 0.6667\nEpoch 4191/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.2017 - binary_accuracy: 0.7778\nEpoch 4192/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.1851 - binary_accuracy: 0.4444\nEpoch 4193/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.2937 - binary_accuracy: 0.6667\nEpoch 4194/5000\n1/1 [==============================] - 0s 19ms/step - loss: 21.8840 - binary_accuracy: 0.6667\nEpoch 4195/5000\n1/1 [==============================] - 0s 10ms/step - loss: 17.4924 - binary_accuracy: 0.6667\nEpoch 4196/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.1098 - binary_accuracy: 0.6667\nEpoch 4197/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.7313 - binary_accuracy: 0.6667\nEpoch 4198/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.3911 - binary_accuracy: 0.6667\nEpoch 4199/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.6120 - binary_accuracy: 0.7778\nEpoch 4200/5000\n1/1 [==============================] - 0s 6ms/step - loss: 10.8656 - binary_accuracy: 0.4444\nEpoch 4201/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.5822 - binary_accuracy: 0.6667\nEpoch 4202/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.1765 - binary_accuracy: 0.6667\nEpoch 4203/5000\n1/1 [==============================] - 0s 7ms/step - loss: 16.7870 - binary_accuracy: 0.6667\nEpoch 4204/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.4053 - binary_accuracy: 0.6667\nEpoch 4205/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0275 - binary_accuracy: 0.6667\nEpoch 4206/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7237 - binary_accuracy: 0.6667\nEpoch 4207/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.2030 - binary_accuracy: 0.7778\nEpoch 4208/5000\n1/1 [==============================] - 0s 14ms/step - loss: 6.2039 - binary_accuracy: 0.4444\nEpoch 4209/5000\n1/1 [==============================] - 0s 17ms/step - loss: 26.2848 - binary_accuracy: 0.6667\nEpoch 4210/5000\n1/1 [==============================] - 0s 11ms/step - loss: 21.8756 - binary_accuracy: 0.6667\nEpoch 4211/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.4844 - binary_accuracy: 0.6667\nEpoch 4212/5000\n1/1 [==============================] - 0s 17ms/step - loss: 13.1019 - binary_accuracy: 0.6667\nEpoch 4213/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.7235 - binary_accuracy: 0.6667\nEpoch 4214/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.3841 - binary_accuracy: 0.6667\nEpoch 4215/5000\n1/1 [==============================] - 0s 5ms/step - loss: 0.6093 - binary_accuracy: 0.7778\nEpoch 4216/5000\n1/1 [==============================] - 0s 7ms/step - loss: 10.8405 - binary_accuracy: 0.4444\nEpoch 4217/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.5799 - binary_accuracy: 0.6667\nEpoch 4218/5000\n1/1 [==============================] - 0s 21ms/step - loss: 21.1747 - binary_accuracy: 0.6667\nEpoch 4219/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.7855 - binary_accuracy: 0.6667\nEpoch 4220/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.4040 - binary_accuracy: 0.6667\nEpoch 4221/5000\n1/1 [==============================] - 0s 16ms/step - loss: 8.0262 - binary_accuracy: 0.6667\nEpoch 4222/5000\n1/1 [==============================] - 0s 5ms/step - loss: 3.7233 - binary_accuracy: 0.6667\nEpoch 4223/5000\n1/1 [==============================] - 0s 16ms/step - loss: 0.2044 - binary_accuracy: 0.7778\nEpoch 4224/5000\n1/1 [==============================] - 0s 8ms/step - loss: 6.2230 - binary_accuracy: 0.4444\nEpoch 4225/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.2758 - binary_accuracy: 0.6667\nEpoch 4226/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.8673 - binary_accuracy: 0.6667\nEpoch 4227/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.4764 - binary_accuracy: 0.6667\nEpoch 4228/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.0941 - binary_accuracy: 0.6667\nEpoch 4229/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.7157 - binary_accuracy: 0.6667\nEpoch 4230/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.3771 - binary_accuracy: 0.6667\nEpoch 4231/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.6066 - binary_accuracy: 0.7778\nEpoch 4232/5000\n1/1 [==============================] - 0s 14ms/step - loss: 10.8153 - binary_accuracy: 0.4444\nEpoch 4233/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.5777 - binary_accuracy: 0.6667\nEpoch 4234/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.1730 - binary_accuracy: 0.6667\nEpoch 4235/5000\n1/1 [==============================] - 0s 11ms/step - loss: 16.7841 - binary_accuracy: 0.6667\nEpoch 4236/5000\n1/1 [==============================] - 0s 5ms/step - loss: 12.4026 - binary_accuracy: 0.6667\nEpoch 4237/5000\n1/1 [==============================] - 0s 13ms/step - loss: 8.0250 - binary_accuracy: 0.6667\nEpoch 4238/5000\n1/1 [==============================] - 0s 8ms/step - loss: 3.7229 - binary_accuracy: 0.6667\nEpoch 4239/5000\n1/1 [==============================] - 0s 12ms/step - loss: 0.2058 - binary_accuracy: 0.7778\nEpoch 4240/5000\n1/1 [==============================] - 0s 15ms/step - loss: 6.2422 - binary_accuracy: 0.4444\nEpoch 4241/5000\n1/1 [==============================] - 0s 14ms/step - loss: 26.2668 - binary_accuracy: 0.6667\nEpoch 4242/5000\n1/1 [==============================] - 0s 15ms/step - loss: 21.8589 - binary_accuracy: 0.6667\nEpoch 4243/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.4684 - binary_accuracy: 0.6667\nEpoch 4244/5000\n1/1 [==============================] - 0s 42ms/step - loss: 13.0862 - binary_accuracy: 0.6667\nEpoch 4245/5000\n1/1 [==============================] - 0s 11ms/step - loss: 8.7079 - binary_accuracy: 0.6667\nEpoch 4246/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.3700 - binary_accuracy: 0.6667\nEpoch 4247/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.6039 - binary_accuracy: 0.7778\nEpoch 4248/5000\n1/1 [==============================] - 0s 10ms/step - loss: 10.7898 - binary_accuracy: 0.4444\nEpoch 4249/5000\n1/1 [==============================] - 0s 11ms/step - loss: 25.5755 - binary_accuracy: 0.6667\nEpoch 4250/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.1714 - binary_accuracy: 0.6667\nEpoch 4251/5000\n1/1 [==============================] - 0s 15ms/step - loss: 16.7827 - binary_accuracy: 0.6667\nEpoch 4252/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.4013 - binary_accuracy: 0.6667\nEpoch 4253/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.0238 - binary_accuracy: 0.6667\nEpoch 4254/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7225 - binary_accuracy: 0.6667\nEpoch 4255/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.2073 - binary_accuracy: 0.7778\nEpoch 4256/5000\n1/1 [==============================] - 0s 7ms/step - loss: 6.2616 - binary_accuracy: 0.4444\nEpoch 4257/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.2578 - binary_accuracy: 0.6667\nEpoch 4258/5000\n1/1 [==============================] - 0s 12ms/step - loss: 21.8505 - binary_accuracy: 0.6667\nEpoch 4259/5000\n1/1 [==============================] - 0s 10ms/step - loss: 17.4603 - binary_accuracy: 0.6667\nEpoch 4260/5000\n1/1 [==============================] - 0s 14ms/step - loss: 13.0782 - binary_accuracy: 0.6667\nEpoch 4261/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.7000 - binary_accuracy: 0.6667\nEpoch 4262/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.3629 - binary_accuracy: 0.6667\nEpoch 4263/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.6011 - binary_accuracy: 0.7778\nEpoch 4264/5000\n1/1 [==============================] - 0s 10ms/step - loss: 10.7642 - binary_accuracy: 0.4444\nEpoch 4265/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.5733 - binary_accuracy: 0.6667\nEpoch 4266/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.1697 - binary_accuracy: 0.6667\nEpoch 4267/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.7813 - binary_accuracy: 0.6667\nEpoch 4268/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.4001 - binary_accuracy: 0.6667\nEpoch 4269/5000\n1/1 [==============================] - 0s 20ms/step - loss: 8.0225 - binary_accuracy: 0.6667\nEpoch 4270/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7221 - binary_accuracy: 0.6667\nEpoch 4271/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.2087 - binary_accuracy: 0.7778\nEpoch 4272/5000\n1/1 [==============================] - 0s 18ms/step - loss: 6.2811 - binary_accuracy: 0.4444\nEpoch 4273/5000\n1/1 [==============================] - 0s 5ms/step - loss: 26.2488 - binary_accuracy: 0.6667\nEpoch 4274/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.8421 - binary_accuracy: 0.6667\nEpoch 4275/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.4522 - binary_accuracy: 0.6667\nEpoch 4276/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.0702 - binary_accuracy: 0.6667\nEpoch 4277/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.6921 - binary_accuracy: 0.6667\nEpoch 4278/5000\n1/1 [==============================] - 0s 19ms/step - loss: 4.3558 - binary_accuracy: 0.6667\nEpoch 4279/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.5984 - binary_accuracy: 0.7778\nEpoch 4280/5000\n1/1 [==============================] - 0s 13ms/step - loss: 10.7383 - binary_accuracy: 0.4444\nEpoch 4281/5000\n1/1 [==============================] - 0s 5ms/step - loss: 25.5712 - binary_accuracy: 0.6667\nEpoch 4282/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.1681 - binary_accuracy: 0.6667\nEpoch 4283/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.7800 - binary_accuracy: 0.6667\nEpoch 4284/5000\n1/1 [==============================] - 0s 14ms/step - loss: 12.3988 - binary_accuracy: 0.6667\nEpoch 4285/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.0214 - binary_accuracy: 0.6667\nEpoch 4286/5000\n1/1 [==============================] - 0s 10ms/step - loss: 3.7218 - binary_accuracy: 0.6667\nEpoch 4287/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.2101 - binary_accuracy: 0.7778\nEpoch 4288/5000\n1/1 [==============================] - 0s 11ms/step - loss: 6.3008 - binary_accuracy: 0.4444\nEpoch 4289/5000\n1/1 [==============================] - 0s 7ms/step - loss: 26.2398 - binary_accuracy: 0.6667\nEpoch 4290/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.8337 - binary_accuracy: 0.6667\nEpoch 4291/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.4440 - binary_accuracy: 0.6667\nEpoch 4292/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.0622 - binary_accuracy: 0.6667\nEpoch 4293/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.6841 - binary_accuracy: 0.6667\nEpoch 4294/5000\n1/1 [==============================] - 0s 17ms/step - loss: 4.3487 - binary_accuracy: 0.6667\nEpoch 4295/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.5957 - binary_accuracy: 0.7778\nEpoch 4296/5000\n1/1 [==============================] - 0s 11ms/step - loss: 10.7123 - binary_accuracy: 0.4444\nEpoch 4297/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.5692 - binary_accuracy: 0.6667\nEpoch 4298/5000\n1/1 [==============================] - 0s 27ms/step - loss: 21.1666 - binary_accuracy: 0.6667\nEpoch 4299/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.7786 - binary_accuracy: 0.6667\nEpoch 4300/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.3976 - binary_accuracy: 0.6667\nEpoch 4301/5000\n1/1 [==============================] - 0s 5ms/step - loss: 8.0202 - binary_accuracy: 0.6667\nEpoch 4302/5000\n1/1 [==============================] - 0s 16ms/step - loss: 3.7215 - binary_accuracy: 0.6667\nEpoch 4303/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.2116 - binary_accuracy: 0.7778\nEpoch 4304/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.3207 - binary_accuracy: 0.4444\nEpoch 4305/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.2308 - binary_accuracy: 0.6667\nEpoch 4306/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.8252 - binary_accuracy: 0.6667\nEpoch 4307/5000\n1/1 [==============================] - 0s 14ms/step - loss: 17.4359 - binary_accuracy: 0.6667\nEpoch 4308/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.0542 - binary_accuracy: 0.6667\nEpoch 4309/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.6762 - binary_accuracy: 0.6667\nEpoch 4310/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.3415 - binary_accuracy: 0.6667\nEpoch 4311/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.5930 - binary_accuracy: 0.7778\nEpoch 4312/5000\n1/1 [==============================] - 0s 8ms/step - loss: 10.6861 - binary_accuracy: 0.4444\nEpoch 4313/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.5672 - binary_accuracy: 0.6667\nEpoch 4314/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.1650 - binary_accuracy: 0.6667\nEpoch 4315/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.7773 - binary_accuracy: 0.6667\nEpoch 4316/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.3964 - binary_accuracy: 0.6667\nEpoch 4317/5000\n1/1 [==============================] - 0s 5ms/step - loss: 8.0191 - binary_accuracy: 0.6667\nEpoch 4318/5000\n1/1 [==============================] - 0s 28ms/step - loss: 3.7212 - binary_accuracy: 0.6667\nEpoch 4319/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.2131 - binary_accuracy: 0.7778\nEpoch 4320/5000\n1/1 [==============================] - 0s 18ms/step - loss: 6.3407 - binary_accuracy: 0.4444\nEpoch 4321/5000\n1/1 [==============================] - 0s 8ms/step - loss: 26.2218 - binary_accuracy: 0.6667\nEpoch 4322/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.8168 - binary_accuracy: 0.6667\nEpoch 4323/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.4277 - binary_accuracy: 0.6667\nEpoch 4324/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.0461 - binary_accuracy: 0.6667\nEpoch 4325/5000\n1/1 [==============================] - 0s 13ms/step - loss: 8.6682 - binary_accuracy: 0.6667\nEpoch 4326/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.3344 - binary_accuracy: 0.6667\nEpoch 4327/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.5903 - binary_accuracy: 0.7778\nEpoch 4328/5000\n1/1 [==============================] - 0s 8ms/step - loss: 10.6597 - binary_accuracy: 0.4444\nEpoch 4329/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.5652 - binary_accuracy: 0.6667\nEpoch 4330/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.1635 - binary_accuracy: 0.6667\nEpoch 4331/5000\n1/1 [==============================] - 0s 5ms/step - loss: 16.7761 - binary_accuracy: 0.6667\nEpoch 4332/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.3953 - binary_accuracy: 0.6667\nEpoch 4333/5000\n1/1 [==============================] - 0s 12ms/step - loss: 8.0179 - binary_accuracy: 0.6667\nEpoch 4334/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7210 - binary_accuracy: 0.7778\nEpoch 4335/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.2146 - binary_accuracy: 0.7778\nEpoch 4336/5000\n1/1 [==============================] - 0s 8ms/step - loss: 6.3610 - binary_accuracy: 0.4444\nEpoch 4337/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.2127 - binary_accuracy: 0.6667\nEpoch 4338/5000\n1/1 [==============================] - 0s 20ms/step - loss: 21.8083 - binary_accuracy: 0.6667\nEpoch 4339/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.4195 - binary_accuracy: 0.6667\nEpoch 4340/5000\n1/1 [==============================] - 0s 15ms/step - loss: 13.0380 - binary_accuracy: 0.6667\nEpoch 4341/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.6601 - binary_accuracy: 0.6667\nEpoch 4342/5000\n1/1 [==============================] - 0s 8ms/step - loss: 4.3272 - binary_accuracy: 0.6667\nEpoch 4343/5000\n1/1 [==============================] - 0s 14ms/step - loss: 0.5876 - binary_accuracy: 0.7778\nEpoch 4344/5000\n1/1 [==============================] - 0s 14ms/step - loss: 10.6331 - binary_accuracy: 0.4444\nEpoch 4345/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.5632 - binary_accuracy: 0.6667\nEpoch 4346/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.1620 - binary_accuracy: 0.6667\nEpoch 4347/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.7748 - binary_accuracy: 0.6667\nEpoch 4348/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.3941 - binary_accuracy: 0.6667\nEpoch 4349/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.0169 - binary_accuracy: 0.6667\nEpoch 4350/5000\n1/1 [==============================] - 0s 14ms/step - loss: 3.7208 - binary_accuracy: 0.7778\nEpoch 4351/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.2161 - binary_accuracy: 0.7778\nEpoch 4352/5000\n1/1 [==============================] - 0s 10ms/step - loss: 6.3813 - binary_accuracy: 0.4444\nEpoch 4353/5000\n1/1 [==============================] - 0s 22ms/step - loss: 26.2036 - binary_accuracy: 0.6667\nEpoch 4354/5000\n1/1 [==============================] - 0s 13ms/step - loss: 21.7998 - binary_accuracy: 0.6667\nEpoch 4355/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.4112 - binary_accuracy: 0.6667\nEpoch 4356/5000\n1/1 [==============================] - 0s 9ms/step - loss: 13.0299 - binary_accuracy: 0.6667\nEpoch 4357/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.6521 - binary_accuracy: 0.6667\nEpoch 4358/5000\n1/1 [==============================] - 0s 8ms/step - loss: 4.3200 - binary_accuracy: 0.6667\nEpoch 4359/5000\n1/1 [==============================] - 0s 16ms/step - loss: 0.5849 - binary_accuracy: 0.7778\nEpoch 4360/5000\n1/1 [==============================] - 0s 7ms/step - loss: 10.6062 - binary_accuracy: 0.4444\nEpoch 4361/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.5613 - binary_accuracy: 0.6667\nEpoch 4362/5000\n1/1 [==============================] - 0s 17ms/step - loss: 21.1606 - binary_accuracy: 0.6667\nEpoch 4363/5000\n1/1 [==============================] - 0s 8ms/step - loss: 16.7736 - binary_accuracy: 0.6667\nEpoch 4364/5000\n1/1 [==============================] - 0s 10ms/step - loss: 12.3930 - binary_accuracy: 0.6667\nEpoch 4365/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0158 - binary_accuracy: 0.6667\nEpoch 4366/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7206 - binary_accuracy: 0.7778\nEpoch 4367/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.2177 - binary_accuracy: 0.7778\nEpoch 4368/5000\n1/1 [==============================] - 0s 12ms/step - loss: 6.4017 - binary_accuracy: 0.4444\nEpoch 4369/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.1945 - binary_accuracy: 0.6667\nEpoch 4370/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.7912 - binary_accuracy: 0.6667\nEpoch 4371/5000\n1/1 [==============================] - 0s 16ms/step - loss: 17.4029 - binary_accuracy: 0.6667\nEpoch 4372/5000\n1/1 [==============================] - 0s 12ms/step - loss: 13.0217 - binary_accuracy: 0.6667\nEpoch 4373/5000\n1/1 [==============================] - 0s 18ms/step - loss: 8.6440 - binary_accuracy: 0.6667\nEpoch 4374/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.3128 - binary_accuracy: 0.6667\nEpoch 4375/5000\n1/1 [==============================] - 0s 14ms/step - loss: 0.5822 - binary_accuracy: 0.7778\nEpoch 4376/5000\n1/1 [==============================] - 0s 10ms/step - loss: 10.5792 - binary_accuracy: 0.4444\nEpoch 4377/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.5595 - binary_accuracy: 0.6667\nEpoch 4378/5000\n1/1 [==============================] - 0s 12ms/step - loss: 21.1592 - binary_accuracy: 0.6667\nEpoch 4379/5000\n1/1 [==============================] - 0s 29ms/step - loss: 16.7724 - binary_accuracy: 0.6667\nEpoch 4380/5000\n1/1 [==============================] - 0s 13ms/step - loss: 12.3919 - binary_accuracy: 0.6667\nEpoch 4381/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.0148 - binary_accuracy: 0.6667\nEpoch 4382/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7205 - binary_accuracy: 0.7778\nEpoch 4383/5000\n1/1 [==============================] - 0s 4ms/step - loss: 0.2192 - binary_accuracy: 0.7778\nEpoch 4384/5000\n1/1 [==============================] - 0s 5ms/step - loss: 6.4224 - binary_accuracy: 0.4444\nEpoch 4385/5000\n1/1 [==============================] - 0s 7ms/step - loss: 26.1855 - binary_accuracy: 0.6667\nEpoch 4386/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.7826 - binary_accuracy: 0.6667\nEpoch 4387/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.3946 - binary_accuracy: 0.6667\nEpoch 4388/5000\n1/1 [==============================] - 0s 7ms/step - loss: 13.0136 - binary_accuracy: 0.6667\nEpoch 4389/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.6358 - binary_accuracy: 0.6667\nEpoch 4390/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.3056 - binary_accuracy: 0.6667\nEpoch 4391/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.5794 - binary_accuracy: 0.7778\nEpoch 4392/5000\n1/1 [==============================] - 0s 6ms/step - loss: 10.5519 - binary_accuracy: 0.4444\nEpoch 4393/5000\n1/1 [==============================] - 0s 14ms/step - loss: 25.5576 - binary_accuracy: 0.6667\nEpoch 4394/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.1578 - binary_accuracy: 0.6667\nEpoch 4395/5000\n1/1 [==============================] - 0s 7ms/step - loss: 16.7713 - binary_accuracy: 0.6667\nEpoch 4396/5000\n1/1 [==============================] - 0s 14ms/step - loss: 12.3909 - binary_accuracy: 0.6667\nEpoch 4397/5000\n1/1 [==============================] - 0s 12ms/step - loss: 8.0138 - binary_accuracy: 0.6667\nEpoch 4398/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7203 - binary_accuracy: 0.7778\nEpoch 4399/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.2208 - binary_accuracy: 0.7778\nEpoch 4400/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.4432 - binary_accuracy: 0.4444\nEpoch 4401/5000\n1/1 [==============================] - 0s 7ms/step - loss: 26.1764 - binary_accuracy: 0.6667\nEpoch 4402/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.7741 - binary_accuracy: 0.6667\nEpoch 4403/5000\n1/1 [==============================] - 0s 15ms/step - loss: 17.3863 - binary_accuracy: 0.6667\nEpoch 4404/5000\n1/1 [==============================] - 0s 5ms/step - loss: 13.0053 - binary_accuracy: 0.6667\nEpoch 4405/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.6277 - binary_accuracy: 0.6667\nEpoch 4406/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.2983 - binary_accuracy: 0.6667\nEpoch 4407/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.5767 - binary_accuracy: 0.7778\nEpoch 4408/5000\n1/1 [==============================] - 0s 6ms/step - loss: 10.5244 - binary_accuracy: 0.4444\nEpoch 4409/5000\n1/1 [==============================] - 0s 18ms/step - loss: 25.5559 - binary_accuracy: 0.6667\nEpoch 4410/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.1565 - binary_accuracy: 0.6667\nEpoch 4411/5000\n1/1 [==============================] - 0s 10ms/step - loss: 16.7701 - binary_accuracy: 0.6667\nEpoch 4412/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.3898 - binary_accuracy: 0.6667\nEpoch 4413/5000\n1/1 [==============================] - 0s 11ms/step - loss: 8.0128 - binary_accuracy: 0.6667\nEpoch 4414/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7202 - binary_accuracy: 0.7778\nEpoch 4415/5000\n1/1 [==============================] - 0s 26ms/step - loss: 0.2224 - binary_accuracy: 0.7778\nEpoch 4416/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.4641 - binary_accuracy: 0.4444\nEpoch 4417/5000\n1/1 [==============================] - 0s 7ms/step - loss: 26.1672 - binary_accuracy: 0.6667\nEpoch 4418/5000\n1/1 [==============================] - 0s 16ms/step - loss: 21.7655 - binary_accuracy: 0.6667\nEpoch 4419/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.3779 - binary_accuracy: 0.6667\nEpoch 4420/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.9971 - binary_accuracy: 0.6667\nEpoch 4421/5000\n1/1 [==============================] - 0s 19ms/step - loss: 8.6195 - binary_accuracy: 0.6667\nEpoch 4422/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.2911 - binary_accuracy: 0.6667\nEpoch 4423/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.5740 - binary_accuracy: 0.7778\nEpoch 4424/5000\n1/1 [==============================] - 0s 16ms/step - loss: 10.4967 - binary_accuracy: 0.4444\nEpoch 4425/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.5541 - binary_accuracy: 0.6667\nEpoch 4426/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.1552 - binary_accuracy: 0.6667\nEpoch 4427/5000\n1/1 [==============================] - 0s 10ms/step - loss: 16.7691 - binary_accuracy: 0.6667\nEpoch 4428/5000\n1/1 [==============================] - 0s 10ms/step - loss: 12.3888 - binary_accuracy: 0.6667\nEpoch 4429/5000\n1/1 [==============================] - 0s 15ms/step - loss: 8.0118 - binary_accuracy: 0.6667\nEpoch 4430/5000\n1/1 [==============================] - 0s 10ms/step - loss: 3.7202 - binary_accuracy: 0.7778\nEpoch 4431/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.2240 - binary_accuracy: 0.7778\nEpoch 4432/5000\n1/1 [==============================] - 0s 13ms/step - loss: 6.4852 - binary_accuracy: 0.4444\nEpoch 4433/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.1581 - binary_accuracy: 0.6667\nEpoch 4434/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.7568 - binary_accuracy: 0.6667\nEpoch 4435/5000\n1/1 [==============================] - 0s 18ms/step - loss: 17.3696 - binary_accuracy: 0.6667\nEpoch 4436/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.9888 - binary_accuracy: 0.6667\nEpoch 4437/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.6113 - binary_accuracy: 0.6667\nEpoch 4438/5000\n1/1 [==============================] - 0s 10ms/step - loss: 4.2838 - binary_accuracy: 0.6667\nEpoch 4439/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.5713 - binary_accuracy: 0.7778\nEpoch 4440/5000\n1/1 [==============================] - 0s 10ms/step - loss: 10.4688 - binary_accuracy: 0.4444\nEpoch 4441/5000\n1/1 [==============================] - 0s 11ms/step - loss: 25.5525 - binary_accuracy: 0.6667\nEpoch 4442/5000\n1/1 [==============================] - 0s 13ms/step - loss: 21.1539 - binary_accuracy: 0.6667\nEpoch 4443/5000\n1/1 [==============================] - 0s 14ms/step - loss: 16.7680 - binary_accuracy: 0.6667\nEpoch 4444/5000\n1/1 [==============================] - 0s 12ms/step - loss: 12.3879 - binary_accuracy: 0.6667\nEpoch 4445/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.0109 - binary_accuracy: 0.6667\nEpoch 4446/5000\n1/1 [==============================] - 0s 10ms/step - loss: 3.7202 - binary_accuracy: 0.7778\nEpoch 4447/5000\n1/1 [==============================] - 0s 16ms/step - loss: 0.2256 - binary_accuracy: 0.7778\nEpoch 4448/5000\n1/1 [==============================] - 0s 9ms/step - loss: 6.5064 - binary_accuracy: 0.4444\nEpoch 4449/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.1490 - binary_accuracy: 0.6667\nEpoch 4450/5000\n1/1 [==============================] - 0s 11ms/step - loss: 21.7482 - binary_accuracy: 0.6667\nEpoch 4451/5000\n1/1 [==============================] - 0s 13ms/step - loss: 17.3612 - binary_accuracy: 0.6667\nEpoch 4452/5000\n1/1 [==============================] - 0s 13ms/step - loss: 12.9805 - binary_accuracy: 0.6667\nEpoch 4453/5000\n1/1 [==============================] - 0s 15ms/step - loss: 8.6031 - binary_accuracy: 0.6667\nEpoch 4454/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.2765 - binary_accuracy: 0.6667\nEpoch 4455/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.5686 - binary_accuracy: 0.7778\nEpoch 4456/5000\n1/1 [==============================] - 0s 10ms/step - loss: 10.4406 - binary_accuracy: 0.4444\nEpoch 4457/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.5508 - binary_accuracy: 0.6667\nEpoch 4458/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.1527 - binary_accuracy: 0.6667\nEpoch 4459/5000\n1/1 [==============================] - 0s 12ms/step - loss: 16.7670 - binary_accuracy: 0.6667\nEpoch 4460/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.3869 - binary_accuracy: 0.6667\nEpoch 4461/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0100 - binary_accuracy: 0.6667\nEpoch 4462/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7202 - binary_accuracy: 0.7778\nEpoch 4463/5000\n1/1 [==============================] - 0s 18ms/step - loss: 0.2272 - binary_accuracy: 0.7778\nEpoch 4464/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.5278 - binary_accuracy: 0.4444\nEpoch 4465/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.1398 - binary_accuracy: 0.6667\nEpoch 4466/5000\n1/1 [==============================] - 0s 12ms/step - loss: 21.7395 - binary_accuracy: 0.6667\nEpoch 4467/5000\n1/1 [==============================] - 0s 17ms/step - loss: 17.3527 - binary_accuracy: 0.6667\nEpoch 4468/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.9722 - binary_accuracy: 0.6667\nEpoch 4469/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.5948 - binary_accuracy: 0.6667\nEpoch 4470/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.2692 - binary_accuracy: 0.6667\nEpoch 4471/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.5659 - binary_accuracy: 0.7778\nEpoch 4472/5000\n1/1 [==============================] - 0s 13ms/step - loss: 10.4122 - binary_accuracy: 0.4444\nEpoch 4473/5000\n1/1 [==============================] - 0s 17ms/step - loss: 25.5492 - binary_accuracy: 0.6667\nEpoch 4474/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.1515 - binary_accuracy: 0.6667\nEpoch 4475/5000\n1/1 [==============================] - 0s 7ms/step - loss: 16.7660 - binary_accuracy: 0.6667\nEpoch 4476/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.3860 - binary_accuracy: 0.6667\nEpoch 4477/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.0092 - binary_accuracy: 0.6667\nEpoch 4478/5000\n1/1 [==============================] - 0s 11ms/step - loss: 3.7202 - binary_accuracy: 0.7778\nEpoch 4479/5000\n1/1 [==============================] - 0s 19ms/step - loss: 0.2289 - binary_accuracy: 0.7778\nEpoch 4480/5000\n1/1 [==============================] - 0s 13ms/step - loss: 6.5493 - binary_accuracy: 0.4444\nEpoch 4481/5000\n1/1 [==============================] - 0s 7ms/step - loss: 26.1307 - binary_accuracy: 0.6667\nEpoch 4482/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.7308 - binary_accuracy: 0.6667\nEpoch 4483/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.3443 - binary_accuracy: 0.6667\nEpoch 4484/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.9639 - binary_accuracy: 0.6667\nEpoch 4485/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.5865 - binary_accuracy: 0.6667\nEpoch 4486/5000\n1/1 [==============================] - 0s 10ms/step - loss: 4.2619 - binary_accuracy: 0.6667\nEpoch 4487/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.5632 - binary_accuracy: 0.7778\nEpoch 4488/5000\n1/1 [==============================] - 0s 6ms/step - loss: 10.3836 - binary_accuracy: 0.4444\nEpoch 4489/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.5477 - binary_accuracy: 0.6667\nEpoch 4490/5000\n1/1 [==============================] - 0s 15ms/step - loss: 21.1504 - binary_accuracy: 0.6667\nEpoch 4491/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.7650 - binary_accuracy: 0.6667\nEpoch 4492/5000\n1/1 [==============================] - 0s 19ms/step - loss: 12.3852 - binary_accuracy: 0.6667\nEpoch 4493/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0084 - binary_accuracy: 0.6667\nEpoch 4494/5000\n1/1 [==============================] - 0s 21ms/step - loss: 3.7203 - binary_accuracy: 0.7778\nEpoch 4495/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.2306 - binary_accuracy: 0.7778\nEpoch 4496/5000\n1/1 [==============================] - 0s 19ms/step - loss: 6.5710 - binary_accuracy: 0.4444\nEpoch 4497/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.1215 - binary_accuracy: 0.6667\nEpoch 4498/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.7221 - binary_accuracy: 0.6667\nEpoch 4499/5000\n1/1 [==============================] - 0s 10ms/step - loss: 17.3358 - binary_accuracy: 0.6667\nEpoch 4500/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.9555 - binary_accuracy: 0.6667\nEpoch 4501/5000\n1/1 [==============================] - 0s 14ms/step - loss: 8.5782 - binary_accuracy: 0.6667\nEpoch 4502/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.2546 - binary_accuracy: 0.6667\nEpoch 4503/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.5605 - binary_accuracy: 0.7778\nEpoch 4504/5000\n1/1 [==============================] - 0s 8ms/step - loss: 10.3547 - binary_accuracy: 0.4444\nEpoch 4505/5000\n1/1 [==============================] - 0s 13ms/step - loss: 25.5462 - binary_accuracy: 0.6667\nEpoch 4506/5000\n1/1 [==============================] - 0s 15ms/step - loss: 21.1493 - binary_accuracy: 0.6667\nEpoch 4507/5000\n1/1 [==============================] - 0s 8ms/step - loss: 16.7641 - binary_accuracy: 0.6667\nEpoch 4508/5000\n1/1 [==============================] - 0s 11ms/step - loss: 12.3843 - binary_accuracy: 0.6667\nEpoch 4509/5000\n1/1 [==============================] - 0s 12ms/step - loss: 8.0076 - binary_accuracy: 0.6667\nEpoch 4510/5000\n1/1 [==============================] - 0s 15ms/step - loss: 3.7204 - binary_accuracy: 0.7778\nEpoch 4511/5000\n1/1 [==============================] - 0s 13ms/step - loss: 0.2323 - binary_accuracy: 0.7778\nEpoch 4512/5000\n1/1 [==============================] - 0s 8ms/step - loss: 6.5928 - binary_accuracy: 0.4444\nEpoch 4513/5000\n1/1 [==============================] - 0s 11ms/step - loss: 26.1123 - binary_accuracy: 0.6667\nEpoch 4514/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.7134 - binary_accuracy: 0.6667\nEpoch 4515/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.3273 - binary_accuracy: 0.6667\nEpoch 4516/5000\n1/1 [==============================] - 0s 10ms/step - loss: 12.9471 - binary_accuracy: 0.6667\nEpoch 4517/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.5698 - binary_accuracy: 0.6667\nEpoch 4518/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.2473 - binary_accuracy: 0.6667\nEpoch 4519/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.5577 - binary_accuracy: 0.7778\nEpoch 4520/5000\n1/1 [==============================] - 0s 10ms/step - loss: 10.3256 - binary_accuracy: 0.4444\nEpoch 4521/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.5447 - binary_accuracy: 0.6667\nEpoch 4522/5000\n1/1 [==============================] - 0s 5ms/step - loss: 21.1482 - binary_accuracy: 0.6667\nEpoch 4523/5000\n1/1 [==============================] - 0s 7ms/step - loss: 16.7632 - binary_accuracy: 0.6667\nEpoch 4524/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.3835 - binary_accuracy: 0.6667\nEpoch 4525/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0068 - binary_accuracy: 0.6667\nEpoch 4526/5000\n1/1 [==============================] - 0s 5ms/step - loss: 3.7206 - binary_accuracy: 0.7778\nEpoch 4527/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.2340 - binary_accuracy: 0.7778\nEpoch 4528/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.6147 - binary_accuracy: 0.4444\nEpoch 4529/5000\n1/1 [==============================] - 0s 14ms/step - loss: 26.1031 - binary_accuracy: 0.6667\nEpoch 4530/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.7047 - binary_accuracy: 0.6667\nEpoch 4531/5000\n1/1 [==============================] - 0s 23ms/step - loss: 17.3188 - binary_accuracy: 0.6667\nEpoch 4532/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.9387 - binary_accuracy: 0.6667\nEpoch 4533/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.5614 - binary_accuracy: 0.6667\nEpoch 4534/5000\n1/1 [==============================] - 0s 11ms/step - loss: 4.2399 - binary_accuracy: 0.6667\nEpoch 4535/5000\n1/1 [==============================] - 0s 17ms/step - loss: 0.5550 - binary_accuracy: 0.7778\nEpoch 4536/5000\n1/1 [==============================] - 0s 8ms/step - loss: 10.2962 - binary_accuracy: 0.4444\nEpoch 4537/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.5433 - binary_accuracy: 0.6667\nEpoch 4538/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.1472 - binary_accuracy: 0.6667\nEpoch 4539/5000\n1/1 [==============================] - 0s 8ms/step - loss: 16.7624 - binary_accuracy: 0.6667\nEpoch 4540/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.3828 - binary_accuracy: 0.6667\nEpoch 4541/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0061 - binary_accuracy: 0.6667\nEpoch 4542/5000\n1/1 [==============================] - 0s 20ms/step - loss: 3.7208 - binary_accuracy: 0.7778\nEpoch 4543/5000\n1/1 [==============================] - 0s 13ms/step - loss: 0.2358 - binary_accuracy: 0.7778\nEpoch 4544/5000\n1/1 [==============================] - 0s 12ms/step - loss: 6.6367 - binary_accuracy: 0.4444\nEpoch 4545/5000\n1/1 [==============================] - 0s 10ms/step - loss: 26.0939 - binary_accuracy: 0.6667\nEpoch 4546/5000\n1/1 [==============================] - 0s 15ms/step - loss: 21.6959 - binary_accuracy: 0.6667\nEpoch 4547/5000\n1/1 [==============================] - 0s 8ms/step - loss: 17.3102 - binary_accuracy: 0.6667\nEpoch 4548/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.9302 - binary_accuracy: 0.6667\nEpoch 4549/5000\n1/1 [==============================] - 0s 11ms/step - loss: 8.5530 - binary_accuracy: 0.6667\nEpoch 4550/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.2326 - binary_accuracy: 0.6667\nEpoch 4551/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.5523 - binary_accuracy: 0.7778\nEpoch 4552/5000\n1/1 [==============================] - 0s 17ms/step - loss: 10.2665 - binary_accuracy: 0.4444\nEpoch 4553/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.5419 - binary_accuracy: 0.6667\nEpoch 4554/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.1462 - binary_accuracy: 0.6667\nEpoch 4555/5000\n1/1 [==============================] - 0s 13ms/step - loss: 16.7616 - binary_accuracy: 0.6667\nEpoch 4556/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.3820 - binary_accuracy: 0.6667\nEpoch 4557/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.0054 - binary_accuracy: 0.6667\nEpoch 4558/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7210 - binary_accuracy: 0.7778\nEpoch 4559/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.2375 - binary_accuracy: 0.7778\nEpoch 4560/5000\n1/1 [==============================] - 0s 7ms/step - loss: 6.6589 - binary_accuracy: 0.4444\nEpoch 4561/5000\n1/1 [==============================] - 0s 13ms/step - loss: 26.0847 - binary_accuracy: 0.6667\nEpoch 4562/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.6871 - binary_accuracy: 0.6667\nEpoch 4563/5000\n1/1 [==============================] - 0s 13ms/step - loss: 17.3017 - binary_accuracy: 0.6667\nEpoch 4564/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.9217 - binary_accuracy: 0.6667\nEpoch 4565/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.5446 - binary_accuracy: 0.6667\nEpoch 4566/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.2252 - binary_accuracy: 0.6667\nEpoch 4567/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.5496 - binary_accuracy: 0.7778\nEpoch 4568/5000\n1/1 [==============================] - 0s 14ms/step - loss: 10.2367 - binary_accuracy: 0.4444\nEpoch 4569/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.5406 - binary_accuracy: 0.6667\nEpoch 4570/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.1452 - binary_accuracy: 0.6667\nEpoch 4571/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.7608 - binary_accuracy: 0.6667\nEpoch 4572/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.3813 - binary_accuracy: 0.6667\nEpoch 4573/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0047 - binary_accuracy: 0.6667\nEpoch 4574/5000\n1/1 [==============================] - 0s 10ms/step - loss: 3.7213 - binary_accuracy: 0.7778\nEpoch 4575/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.2393 - binary_accuracy: 0.7778\nEpoch 4576/5000\n1/1 [==============================] - 0s 21ms/step - loss: 6.6811 - binary_accuracy: 0.4444\nEpoch 4577/5000\n1/1 [==============================] - 0s 8ms/step - loss: 26.0755 - binary_accuracy: 0.6667\nEpoch 4578/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.6784 - binary_accuracy: 0.6667\nEpoch 4579/5000\n1/1 [==============================] - 0s 15ms/step - loss: 17.2931 - binary_accuracy: 0.6667\nEpoch 4580/5000\n1/1 [==============================] - 0s 5ms/step - loss: 12.9132 - binary_accuracy: 0.6667\nEpoch 4581/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.5362 - binary_accuracy: 0.6667\nEpoch 4582/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.2179 - binary_accuracy: 0.6667\nEpoch 4583/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.5469 - binary_accuracy: 0.7778\nEpoch 4584/5000\n1/1 [==============================] - 0s 7ms/step - loss: 10.2065 - binary_accuracy: 0.4444\nEpoch 4585/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.5393 - binary_accuracy: 0.6667\nEpoch 4586/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.1443 - binary_accuracy: 0.6667\nEpoch 4587/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.7600 - binary_accuracy: 0.6667\nEpoch 4588/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.3807 - binary_accuracy: 0.6667\nEpoch 4589/5000\n1/1 [==============================] - 0s 13ms/step - loss: 8.0041 - binary_accuracy: 0.6667\nEpoch 4590/5000\n1/1 [==============================] - 0s 12ms/step - loss: 3.7216 - binary_accuracy: 0.7778\nEpoch 4591/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.2411 - binary_accuracy: 0.7778\nEpoch 4592/5000\n1/1 [==============================] - 0s 32ms/step - loss: 6.7035 - binary_accuracy: 0.4444\nEpoch 4593/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.0662 - binary_accuracy: 0.6667\nEpoch 4594/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.6695 - binary_accuracy: 0.6667\nEpoch 4595/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.2845 - binary_accuracy: 0.6667\nEpoch 4596/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.9047 - binary_accuracy: 0.6667\nEpoch 4597/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.5277 - binary_accuracy: 0.6667\nEpoch 4598/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.2105 - binary_accuracy: 0.6667\nEpoch 4599/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.5442 - binary_accuracy: 0.7778\nEpoch 4600/5000\n1/1 [==============================] - 0s 11ms/step - loss: 10.1761 - binary_accuracy: 0.4444\nEpoch 4601/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.5381 - binary_accuracy: 0.6667\nEpoch 4602/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.1435 - binary_accuracy: 0.6667\nEpoch 4603/5000\n1/1 [==============================] - 0s 13ms/step - loss: 16.7594 - binary_accuracy: 0.6667\nEpoch 4604/5000\n1/1 [==============================] - 0s 12ms/step - loss: 12.3800 - binary_accuracy: 0.6667\nEpoch 4605/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.0035 - binary_accuracy: 0.6667\nEpoch 4606/5000\n1/1 [==============================] - 0s 10ms/step - loss: 3.7219 - binary_accuracy: 0.7778\nEpoch 4607/5000\n1/1 [==============================] - 0s 15ms/step - loss: 0.2429 - binary_accuracy: 0.7778\nEpoch 4608/5000\n1/1 [==============================] - 0s 10ms/step - loss: 6.7260 - binary_accuracy: 0.4444\nEpoch 4609/5000\n1/1 [==============================] - 0s 9ms/step - loss: 26.0570 - binary_accuracy: 0.6667\nEpoch 4610/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.6607 - binary_accuracy: 0.6667\nEpoch 4611/5000\n1/1 [==============================] - 0s 10ms/step - loss: 17.2758 - binary_accuracy: 0.6667\nEpoch 4612/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.8962 - binary_accuracy: 0.6667\nEpoch 4613/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.5192 - binary_accuracy: 0.6667\nEpoch 4614/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.2031 - binary_accuracy: 0.6667\nEpoch 4615/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.5415 - binary_accuracy: 0.7778\nEpoch 4616/5000\n1/1 [==============================] - 0s 18ms/step - loss: 10.1454 - binary_accuracy: 0.4444\nEpoch 4617/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.5369 - binary_accuracy: 0.6667\nEpoch 4618/5000\n1/1 [==============================] - 0s 36ms/step - loss: 21.1426 - binary_accuracy: 0.6667\nEpoch 4619/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.7587 - binary_accuracy: 0.6667\nEpoch 4620/5000\n1/1 [==============================] - 0s 12ms/step - loss: 12.3794 - binary_accuracy: 0.6667\nEpoch 4621/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.0030 - binary_accuracy: 0.6667\nEpoch 4622/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7223 - binary_accuracy: 0.7778\nEpoch 4623/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.2448 - binary_accuracy: 0.7778\nEpoch 4624/5000\n1/1 [==============================] - 0s 12ms/step - loss: 6.7487 - binary_accuracy: 0.4444\nEpoch 4625/5000\n1/1 [==============================] - 0s 18ms/step - loss: 26.0477 - binary_accuracy: 0.6667\nEpoch 4626/5000\n1/1 [==============================] - 0s 14ms/step - loss: 21.6519 - binary_accuracy: 0.6667\nEpoch 4627/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.2672 - binary_accuracy: 0.6667\nEpoch 4628/5000\n1/1 [==============================] - 0s 16ms/step - loss: 12.8876 - binary_accuracy: 0.6667\nEpoch 4629/5000\n1/1 [==============================] - 0s 14ms/step - loss: 8.5107 - binary_accuracy: 0.6667\nEpoch 4630/5000\n1/1 [==============================] - 0s 11ms/step - loss: 4.1957 - binary_accuracy: 0.6667\nEpoch 4631/5000\n1/1 [==============================] - 0s 12ms/step - loss: 0.5387 - binary_accuracy: 0.7778\nEpoch 4632/5000\n1/1 [==============================] - 0s 19ms/step - loss: 10.1144 - binary_accuracy: 0.4444\nEpoch 4633/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.5358 - binary_accuracy: 0.6667\nEpoch 4634/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.1418 - binary_accuracy: 0.6667\nEpoch 4635/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.7581 - binary_accuracy: 0.6667\nEpoch 4636/5000\n1/1 [==============================] - 0s 13ms/step - loss: 12.3789 - binary_accuracy: 0.6667\nEpoch 4637/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0025 - binary_accuracy: 0.6667\nEpoch 4638/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.7227 - binary_accuracy: 0.7778\nEpoch 4639/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.2467 - binary_accuracy: 0.7778\nEpoch 4640/5000\n1/1 [==============================] - 0s 6ms/step - loss: 6.7714 - binary_accuracy: 0.4444\nEpoch 4641/5000\n1/1 [==============================] - 0s 5ms/step - loss: 26.0385 - binary_accuracy: 0.6667\nEpoch 4642/5000\n1/1 [==============================] - 0s 13ms/step - loss: 21.6430 - binary_accuracy: 0.6667\nEpoch 4643/5000\n1/1 [==============================] - 0s 10ms/step - loss: 17.2585 - binary_accuracy: 0.6667\nEpoch 4644/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.8790 - binary_accuracy: 0.6667\nEpoch 4645/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.5021 - binary_accuracy: 0.6667\nEpoch 4646/5000\n1/1 [==============================] - 0s 10ms/step - loss: 4.1884 - binary_accuracy: 0.6667\nEpoch 4647/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.5360 - binary_accuracy: 0.7778\nEpoch 4648/5000\n1/1 [==============================] - 0s 11ms/step - loss: 10.0832 - binary_accuracy: 0.4444\nEpoch 4649/5000\n1/1 [==============================] - 0s 11ms/step - loss: 25.5347 - binary_accuracy: 0.6667\nEpoch 4650/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.1411 - binary_accuracy: 0.6667\nEpoch 4651/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.7575 - binary_accuracy: 0.6667\nEpoch 4652/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.3784 - binary_accuracy: 0.6667\nEpoch 4653/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.0020 - binary_accuracy: 0.6667\nEpoch 4654/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7232 - binary_accuracy: 0.7778\nEpoch 4655/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.2486 - binary_accuracy: 0.7778\nEpoch 4656/5000\n1/1 [==============================] - 0s 17ms/step - loss: 6.7943 - binary_accuracy: 0.4444\nEpoch 4657/5000\n1/1 [==============================] - 0s 6ms/step - loss: 26.0292 - binary_accuracy: 0.6667\nEpoch 4658/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.6341 - binary_accuracy: 0.6667\nEpoch 4659/5000\n1/1 [==============================] - 0s 10ms/step - loss: 17.2498 - binary_accuracy: 0.6667\nEpoch 4660/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.8704 - binary_accuracy: 0.6667\nEpoch 4661/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.4936 - binary_accuracy: 0.6667\nEpoch 4662/5000\n1/1 [==============================] - 0s 12ms/step - loss: 4.1810 - binary_accuracy: 0.6667\nEpoch 4663/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.5333 - binary_accuracy: 0.7778\nEpoch 4664/5000\n1/1 [==============================] - 0s 10ms/step - loss: 10.0516 - binary_accuracy: 0.4444\nEpoch 4665/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.5337 - binary_accuracy: 0.6667\nEpoch 4666/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.1404 - binary_accuracy: 0.6667\nEpoch 4667/5000\n1/1 [==============================] - 0s 14ms/step - loss: 16.7570 - binary_accuracy: 0.6667\nEpoch 4668/5000\n1/1 [==============================] - 0s 25ms/step - loss: 12.3779 - binary_accuracy: 0.6667\nEpoch 4669/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.0016 - binary_accuracy: 0.6667\nEpoch 4670/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7237 - binary_accuracy: 0.7778\nEpoch 4671/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.2505 - binary_accuracy: 0.7778\nEpoch 4672/5000\n1/1 [==============================] - 0s 11ms/step - loss: 6.8173 - binary_accuracy: 0.4444\nEpoch 4673/5000\n1/1 [==============================] - 0s 7ms/step - loss: 26.0199 - binary_accuracy: 0.6667\nEpoch 4674/5000\n1/1 [==============================] - 0s 12ms/step - loss: 21.6252 - binary_accuracy: 0.6667\nEpoch 4675/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.2411 - binary_accuracy: 0.6667\nEpoch 4676/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.8618 - binary_accuracy: 0.6667\nEpoch 4677/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.4850 - binary_accuracy: 0.6667\nEpoch 4678/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.1736 - binary_accuracy: 0.6667\nEpoch 4679/5000\n1/1 [==============================] - 0s 7ms/step - loss: 0.5306 - binary_accuracy: 0.7778\nEpoch 4680/5000\n1/1 [==============================] - 0s 8ms/step - loss: 10.0198 - binary_accuracy: 0.4444\nEpoch 4681/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.5327 - binary_accuracy: 0.6667\nEpoch 4682/5000\n1/1 [==============================] - 0s 13ms/step - loss: 21.1398 - binary_accuracy: 0.6667\nEpoch 4683/5000\n1/1 [==============================] - 0s 12ms/step - loss: 16.7565 - binary_accuracy: 0.6667\nEpoch 4684/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.3775 - binary_accuracy: 0.6667\nEpoch 4685/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.0012 - binary_accuracy: 0.6667\nEpoch 4686/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7243 - binary_accuracy: 0.7778\nEpoch 4687/5000\n1/1 [==============================] - 0s 23ms/step - loss: 0.2524 - binary_accuracy: 0.7778\nEpoch 4688/5000\n1/1 [==============================] - 0s 5ms/step - loss: 6.8403 - binary_accuracy: 0.4444\nEpoch 4689/5000\n1/1 [==============================] - 0s 17ms/step - loss: 26.0107 - binary_accuracy: 0.6667\nEpoch 4690/5000\n1/1 [==============================] - 0s 12ms/step - loss: 21.6163 - binary_accuracy: 0.6667\nEpoch 4691/5000\n1/1 [==============================] - 0s 12ms/step - loss: 17.2324 - binary_accuracy: 0.6667\nEpoch 4692/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.8531 - binary_accuracy: 0.6667\nEpoch 4693/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.4764 - binary_accuracy: 0.6667\nEpoch 4694/5000\n1/1 [==============================] - 0s 19ms/step - loss: 4.1662 - binary_accuracy: 0.6667\nEpoch 4695/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.5279 - binary_accuracy: 0.7778\nEpoch 4696/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.9877 - binary_accuracy: 0.4444\nEpoch 4697/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.5318 - binary_accuracy: 0.6667\nEpoch 4698/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.1392 - binary_accuracy: 0.6667\nEpoch 4699/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.7560 - binary_accuracy: 0.6667\nEpoch 4700/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.3771 - binary_accuracy: 0.6667\nEpoch 4701/5000\n1/1 [==============================] - 0s 10ms/step - loss: 8.0008 - binary_accuracy: 0.6667\nEpoch 4702/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.7248 - binary_accuracy: 0.7778\nEpoch 4703/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.2544 - binary_accuracy: 0.7778\nEpoch 4704/5000\n1/1 [==============================] - 0s 18ms/step - loss: 6.8635 - binary_accuracy: 0.4444\nEpoch 4705/5000\n1/1 [==============================] - 0s 14ms/step - loss: 26.0014 - binary_accuracy: 0.6667\nEpoch 4706/5000\n1/1 [==============================] - 0s 20ms/step - loss: 21.6074 - binary_accuracy: 0.6667\nEpoch 4707/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.2236 - binary_accuracy: 0.6667\nEpoch 4708/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.8445 - binary_accuracy: 0.6667\nEpoch 4709/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.4677 - binary_accuracy: 0.6667\nEpoch 4710/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.1587 - binary_accuracy: 0.6667\nEpoch 4711/5000\n1/1 [==============================] - 0s 20ms/step - loss: 0.5251 - binary_accuracy: 0.7778\nEpoch 4712/5000\n1/1 [==============================] - 0s 8ms/step - loss: 9.9552 - binary_accuracy: 0.4444\nEpoch 4713/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.5309 - binary_accuracy: 0.6667\nEpoch 4714/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.1386 - binary_accuracy: 0.6667\nEpoch 4715/5000\n1/1 [==============================] - 0s 12ms/step - loss: 16.7556 - binary_accuracy: 0.6667\nEpoch 4716/5000\n1/1 [==============================] - 0s 10ms/step - loss: 12.3768 - binary_accuracy: 0.6667\nEpoch 4717/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.0005 - binary_accuracy: 0.6667\nEpoch 4718/5000\n1/1 [==============================] - 0s 8ms/step - loss: 3.7255 - binary_accuracy: 0.7778\nEpoch 4719/5000\n1/1 [==============================] - 0s 13ms/step - loss: 0.2564 - binary_accuracy: 0.7778\nEpoch 4720/5000\n1/1 [==============================] - 0s 12ms/step - loss: 6.8868 - binary_accuracy: 0.4444\nEpoch 4721/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.9921 - binary_accuracy: 0.6667\nEpoch 4722/5000\n1/1 [==============================] - 0s 15ms/step - loss: 21.5985 - binary_accuracy: 0.6667\nEpoch 4723/5000\n1/1 [==============================] - 0s 10ms/step - loss: 17.2149 - binary_accuracy: 0.6667\nEpoch 4724/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.8358 - binary_accuracy: 0.6667\nEpoch 4725/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.4591 - binary_accuracy: 0.6667\nEpoch 4726/5000\n1/1 [==============================] - 0s 10ms/step - loss: 4.1513 - binary_accuracy: 0.6667\nEpoch 4727/5000\n1/1 [==============================] - 0s 13ms/step - loss: 0.5224 - binary_accuracy: 0.7778\nEpoch 4728/5000\n1/1 [==============================] - 0s 6ms/step - loss: 9.9225 - binary_accuracy: 0.4444\nEpoch 4729/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.5301 - binary_accuracy: 0.6667\nEpoch 4730/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.1381 - binary_accuracy: 0.6667\nEpoch 4731/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.7553 - binary_accuracy: 0.6667\nEpoch 4732/5000\n1/1 [==============================] - 0s 26ms/step - loss: 12.3765 - binary_accuracy: 0.6667\nEpoch 4733/5000\n1/1 [==============================] - 0s 15ms/step - loss: 8.0003 - binary_accuracy: 0.6667\nEpoch 4734/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7262 - binary_accuracy: 0.7778\nEpoch 4735/5000\n1/1 [==============================] - 0s 15ms/step - loss: 0.2584 - binary_accuracy: 0.7778\nEpoch 4736/5000\n1/1 [==============================] - 0s 11ms/step - loss: 6.9101 - binary_accuracy: 0.4444\nEpoch 4737/5000\n1/1 [==============================] - 0s 26ms/step - loss: 25.9828 - binary_accuracy: 0.6667\nEpoch 4738/5000\n1/1 [==============================] - 0s 14ms/step - loss: 21.5895 - binary_accuracy: 0.6667\nEpoch 4739/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.2061 - binary_accuracy: 0.6667\nEpoch 4740/5000\n1/1 [==============================] - 0s 11ms/step - loss: 12.8271 - binary_accuracy: 0.6667\nEpoch 4741/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.4504 - binary_accuracy: 0.6667\nEpoch 4742/5000\n1/1 [==============================] - 0s 11ms/step - loss: 4.1439 - binary_accuracy: 0.6667\nEpoch 4743/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.5197 - binary_accuracy: 0.7778\nEpoch 4744/5000\n1/1 [==============================] - 0s 6ms/step - loss: 9.8894 - binary_accuracy: 0.4444\nEpoch 4745/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.5294 - binary_accuracy: 0.6667\nEpoch 4746/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.1377 - binary_accuracy: 0.6667\nEpoch 4747/5000\n1/1 [==============================] - 0s 15ms/step - loss: 16.7549 - binary_accuracy: 0.6667\nEpoch 4748/5000\n1/1 [==============================] - 0s 12ms/step - loss: 12.3762 - binary_accuracy: 0.6667\nEpoch 4749/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.0001 - binary_accuracy: 0.6667\nEpoch 4750/5000\n1/1 [==============================] - 0s 14ms/step - loss: 3.7269 - binary_accuracy: 0.7778\nEpoch 4751/5000\n1/1 [==============================] - 0s 4ms/step - loss: 0.2605 - binary_accuracy: 0.7778\nEpoch 4752/5000\n1/1 [==============================] - 0s 13ms/step - loss: 6.9336 - binary_accuracy: 0.4444\nEpoch 4753/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.9734 - binary_accuracy: 0.6667\nEpoch 4754/5000\n1/1 [==============================] - 0s 5ms/step - loss: 21.5806 - binary_accuracy: 0.6667\nEpoch 4755/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.1973 - binary_accuracy: 0.6667\nEpoch 4756/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.8183 - binary_accuracy: 0.6667\nEpoch 4757/5000\n1/1 [==============================] - 0s 8ms/step - loss: 8.4417 - binary_accuracy: 0.6667\nEpoch 4758/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.1365 - binary_accuracy: 0.6667\nEpoch 4759/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.5169 - binary_accuracy: 0.7778\nEpoch 4760/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.8561 - binary_accuracy: 0.4444\nEpoch 4761/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.5286 - binary_accuracy: 0.6667\nEpoch 4762/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.1373 - binary_accuracy: 0.6667\nEpoch 4763/5000\n1/1 [==============================] - 0s 21ms/step - loss: 16.7547 - binary_accuracy: 0.6667\nEpoch 4764/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.3760 - binary_accuracy: 0.6667\nEpoch 4765/5000\n1/1 [==============================] - 0s 17ms/step - loss: 7.9999 - binary_accuracy: 0.6667\nEpoch 4766/5000\n1/1 [==============================] - 0s 14ms/step - loss: 3.7277 - binary_accuracy: 0.7778\nEpoch 4767/5000\n1/1 [==============================] - 0s 11ms/step - loss: 0.2626 - binary_accuracy: 0.7778\nEpoch 4768/5000\n1/1 [==============================] - 0s 7ms/step - loss: 6.9571 - binary_accuracy: 0.4444\nEpoch 4769/5000\n1/1 [==============================] - 0s 14ms/step - loss: 25.9641 - binary_accuracy: 0.6667\nEpoch 4770/5000\n1/1 [==============================] - 0s 11ms/step - loss: 21.5716 - binary_accuracy: 0.6667\nEpoch 4771/5000\n1/1 [==============================] - 0s 8ms/step - loss: 17.1885 - binary_accuracy: 0.6667\nEpoch 4772/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.8096 - binary_accuracy: 0.6667\nEpoch 4773/5000\n1/1 [==============================] - 0s 25ms/step - loss: 8.4330 - binary_accuracy: 0.6667\nEpoch 4774/5000\n1/1 [==============================] - 0s 5ms/step - loss: 4.1291 - binary_accuracy: 0.6667\nEpoch 4775/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.5142 - binary_accuracy: 0.7778\nEpoch 4776/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.8224 - binary_accuracy: 0.4444\nEpoch 4777/5000\n1/1 [==============================] - 0s 15ms/step - loss: 25.5280 - binary_accuracy: 0.6667\nEpoch 4778/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.1369 - binary_accuracy: 0.6667\nEpoch 4779/5000\n1/1 [==============================] - 0s 15ms/step - loss: 16.7545 - binary_accuracy: 0.6667\nEpoch 4780/5000\n1/1 [==============================] - 0s 8ms/step - loss: 12.3759 - binary_accuracy: 0.6667\nEpoch 4781/5000\n1/1 [==============================] - 0s 10ms/step - loss: 7.9998 - binary_accuracy: 0.6667\nEpoch 4782/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7285 - binary_accuracy: 0.7778\nEpoch 4783/5000\n1/1 [==============================] - 0s 19ms/step - loss: 0.2647 - binary_accuracy: 0.7778\nEpoch 4784/5000\n1/1 [==============================] - 0s 9ms/step - loss: 6.9808 - binary_accuracy: 0.4444\nEpoch 4785/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.9548 - binary_accuracy: 0.6667\nEpoch 4786/5000\n1/1 [==============================] - 0s 16ms/step - loss: 21.5626 - binary_accuracy: 0.6667\nEpoch 4787/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.1797 - binary_accuracy: 0.6667\nEpoch 4788/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.8008 - binary_accuracy: 0.6667\nEpoch 4789/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.4243 - binary_accuracy: 0.6667\nEpoch 4790/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.1217 - binary_accuracy: 0.6667\nEpoch 4791/5000\n1/1 [==============================] - 0s 12ms/step - loss: 0.5114 - binary_accuracy: 0.7778\nEpoch 4792/5000\n1/1 [==============================] - 0s 16ms/step - loss: 9.7883 - binary_accuracy: 0.4444\nEpoch 4793/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.5274 - binary_accuracy: 0.6667\nEpoch 4794/5000\n1/1 [==============================] - 0s 12ms/step - loss: 21.1366 - binary_accuracy: 0.6667\nEpoch 4795/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.7543 - binary_accuracy: 0.6667\nEpoch 4796/5000\n1/1 [==============================] - 0s 10ms/step - loss: 12.3757 - binary_accuracy: 0.6667\nEpoch 4797/5000\n1/1 [==============================] - 0s 10ms/step - loss: 7.9997 - binary_accuracy: 0.6667\nEpoch 4798/5000\n1/1 [==============================] - 0s 10ms/step - loss: 3.7294 - binary_accuracy: 0.7778\nEpoch 4799/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.2668 - binary_accuracy: 0.7778\nEpoch 4800/5000\n1/1 [==============================] - 0s 9ms/step - loss: 7.0046 - binary_accuracy: 0.4444\nEpoch 4801/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.9455 - binary_accuracy: 0.6667\nEpoch 4802/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.5536 - binary_accuracy: 0.6667\nEpoch 4803/5000\n1/1 [==============================] - 0s 13ms/step - loss: 17.1708 - binary_accuracy: 0.6667\nEpoch 4804/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.7920 - binary_accuracy: 0.6667\nEpoch 4805/5000\n1/1 [==============================] - 0s 14ms/step - loss: 8.4156 - binary_accuracy: 0.6667\nEpoch 4806/5000\n1/1 [==============================] - 0s 12ms/step - loss: 4.1142 - binary_accuracy: 0.6667\nEpoch 4807/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.5087 - binary_accuracy: 0.7778\nEpoch 4808/5000\n1/1 [==============================] - 0s 10ms/step - loss: 9.7539 - binary_accuracy: 0.4444\nEpoch 4809/5000\n1/1 [==============================] - 0s 29ms/step - loss: 25.5269 - binary_accuracy: 0.6667\nEpoch 4810/5000\n1/1 [==============================] - 0s 5ms/step - loss: 21.1364 - binary_accuracy: 0.6667\nEpoch 4811/5000\n1/1 [==============================] - 0s 14ms/step - loss: 16.7542 - binary_accuracy: 0.6667\nEpoch 4812/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.3757 - binary_accuracy: 0.6667\nEpoch 4813/5000\n1/1 [==============================] - 0s 7ms/step - loss: 7.9997 - binary_accuracy: 0.6667\nEpoch 4814/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.7303 - binary_accuracy: 0.7778\nEpoch 4815/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.2690 - binary_accuracy: 0.7778\nEpoch 4816/5000\n1/1 [==============================] - 0s 6ms/step - loss: 7.0284 - binary_accuracy: 0.4444\nEpoch 4817/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.9361 - binary_accuracy: 0.6667\nEpoch 4818/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.5446 - binary_accuracy: 0.6667\nEpoch 4819/5000\n1/1 [==============================] - 0s 11ms/step - loss: 17.1619 - binary_accuracy: 0.6667\nEpoch 4820/5000\n1/1 [==============================] - 0s 10ms/step - loss: 12.7833 - binary_accuracy: 0.6667\nEpoch 4821/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.4068 - binary_accuracy: 0.6667\nEpoch 4822/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.1068 - binary_accuracy: 0.6667\nEpoch 4823/5000\n1/1 [==============================] - 0s 13ms/step - loss: 0.5059 - binary_accuracy: 0.7778\nEpoch 4824/5000\n1/1 [==============================] - 0s 11ms/step - loss: 9.7192 - binary_accuracy: 0.4444\nEpoch 4825/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.5264 - binary_accuracy: 0.6667\nEpoch 4826/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.1362 - binary_accuracy: 0.6667\nEpoch 4827/5000\n1/1 [==============================] - 0s 8ms/step - loss: 16.7541 - binary_accuracy: 0.6667\nEpoch 4828/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.3757 - binary_accuracy: 0.6667\nEpoch 4829/5000\n1/1 [==============================] - 0s 8ms/step - loss: 7.9997 - binary_accuracy: 0.6667\nEpoch 4830/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7312 - binary_accuracy: 0.7778\nEpoch 4831/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.2712 - binary_accuracy: 0.7778\nEpoch 4832/5000\n1/1 [==============================] - 0s 5ms/step - loss: 7.0524 - binary_accuracy: 0.4444\nEpoch 4833/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.9268 - binary_accuracy: 0.6667\nEpoch 4834/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.5356 - binary_accuracy: 0.6667\nEpoch 4835/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.1531 - binary_accuracy: 0.6667\nEpoch 4836/5000\n1/1 [==============================] - 0s 25ms/step - loss: 12.7744 - binary_accuracy: 0.6667\nEpoch 4837/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.3980 - binary_accuracy: 0.6667\nEpoch 4838/5000\n1/1 [==============================] - 0s 11ms/step - loss: 4.0994 - binary_accuracy: 0.6667\nEpoch 4839/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.5032 - binary_accuracy: 0.7778\nEpoch 4840/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.6841 - binary_accuracy: 0.4444\nEpoch 4841/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.5260 - binary_accuracy: 0.6667\nEpoch 4842/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.1360 - binary_accuracy: 0.6667\nEpoch 4843/5000\n1/1 [==============================] - 0s 9ms/step - loss: 16.7541 - binary_accuracy: 0.6667\nEpoch 4844/5000\n1/1 [==============================] - 0s 15ms/step - loss: 12.3757 - binary_accuracy: 0.6667\nEpoch 4845/5000\n1/1 [==============================] - 0s 13ms/step - loss: 7.9998 - binary_accuracy: 0.6667\nEpoch 4846/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7323 - binary_accuracy: 0.7778\nEpoch 4847/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.2734 - binary_accuracy: 0.7778\nEpoch 4848/5000\n1/1 [==============================] - 0s 6ms/step - loss: 7.0764 - binary_accuracy: 0.4444\nEpoch 4849/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.9174 - binary_accuracy: 0.6667\nEpoch 4850/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.5265 - binary_accuracy: 0.6667\nEpoch 4851/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.1442 - binary_accuracy: 0.6667\nEpoch 4852/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.7656 - binary_accuracy: 0.6667\nEpoch 4853/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.3892 - binary_accuracy: 0.6667\nEpoch 4854/5000\n1/1 [==============================] - 0s 8ms/step - loss: 4.0920 - binary_accuracy: 0.6667\nEpoch 4855/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.5004 - binary_accuracy: 0.7778\nEpoch 4856/5000\n1/1 [==============================] - 0s 10ms/step - loss: 9.6486 - binary_accuracy: 0.4444\nEpoch 4857/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.5257 - binary_accuracy: 0.6667\nEpoch 4858/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.1360 - binary_accuracy: 0.6667\nEpoch 4859/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.7541 - binary_accuracy: 0.6667\nEpoch 4860/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.3758 - binary_accuracy: 0.6667\nEpoch 4861/5000\n1/1 [==============================] - 0s 9ms/step - loss: 7.9999 - binary_accuracy: 0.6667\nEpoch 4862/5000\n1/1 [==============================] - 0s 7ms/step - loss: 3.7333 - binary_accuracy: 0.7778\nEpoch 4863/5000\n1/1 [==============================] - 0s 14ms/step - loss: 0.2756 - binary_accuracy: 0.7778\nEpoch 4864/5000\n1/1 [==============================] - 0s 8ms/step - loss: 7.1004 - binary_accuracy: 0.4444\nEpoch 4865/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.9081 - binary_accuracy: 0.6667\nEpoch 4866/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.5175 - binary_accuracy: 0.6667\nEpoch 4867/5000\n1/1 [==============================] - 0s 15ms/step - loss: 17.1353 - binary_accuracy: 0.6667\nEpoch 4868/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.7568 - binary_accuracy: 0.6667\nEpoch 4869/5000\n1/1 [==============================] - 0s 12ms/step - loss: 8.3804 - binary_accuracy: 0.6667\nEpoch 4870/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.0846 - binary_accuracy: 0.7778\nEpoch 4871/5000\n1/1 [==============================] - 0s 15ms/step - loss: 0.4976 - binary_accuracy: 0.7778\nEpoch 4872/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.6127 - binary_accuracy: 0.4444\nEpoch 4873/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.5254 - binary_accuracy: 0.6667\nEpoch 4874/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.1359 - binary_accuracy: 0.6667\nEpoch 4875/5000\n1/1 [==============================] - 0s 10ms/step - loss: 16.7542 - binary_accuracy: 0.6667\nEpoch 4876/5000\n1/1 [==============================] - 0s 10ms/step - loss: 12.3760 - binary_accuracy: 0.6667\nEpoch 4877/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0001 - binary_accuracy: 0.6667\nEpoch 4878/5000\n1/1 [==============================] - 0s 9ms/step - loss: 3.7345 - binary_accuracy: 0.7778\nEpoch 4879/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.2779 - binary_accuracy: 0.7778\nEpoch 4880/5000\n1/1 [==============================] - 0s 6ms/step - loss: 7.1246 - binary_accuracy: 0.4444\nEpoch 4881/5000\n1/1 [==============================] - 0s 6ms/step - loss: 25.8987 - binary_accuracy: 0.6667\nEpoch 4882/5000\n1/1 [==============================] - 0s 7ms/step - loss: 21.5084 - binary_accuracy: 0.6667\nEpoch 4883/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.1263 - binary_accuracy: 0.6667\nEpoch 4884/5000\n1/1 [==============================] - 0s 12ms/step - loss: 12.7479 - binary_accuracy: 0.6667\nEpoch 4885/5000\n1/1 [==============================] - 0s 14ms/step - loss: 8.3716 - binary_accuracy: 0.6667\nEpoch 4886/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.0772 - binary_accuracy: 0.7778\nEpoch 4887/5000\n1/1 [==============================] - 0s 15ms/step - loss: 0.4948 - binary_accuracy: 0.7778\nEpoch 4888/5000\n1/1 [==============================] - 0s 6ms/step - loss: 9.5765 - binary_accuracy: 0.4444\nEpoch 4889/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.5252 - binary_accuracy: 0.6667\nEpoch 4890/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.1360 - binary_accuracy: 0.6667\nEpoch 4891/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.7544 - binary_accuracy: 0.6667\nEpoch 4892/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.3762 - binary_accuracy: 0.6667\nEpoch 4893/5000\n1/1 [==============================] - 0s 16ms/step - loss: 8.0003 - binary_accuracy: 0.6667\nEpoch 4894/5000\n1/1 [==============================] - 0s 10ms/step - loss: 3.7357 - binary_accuracy: 0.7778\nEpoch 4895/5000\n1/1 [==============================] - 0s 13ms/step - loss: 0.2803 - binary_accuracy: 0.7778\nEpoch 4896/5000\n1/1 [==============================] - 0s 6ms/step - loss: 7.1489 - binary_accuracy: 0.4444\nEpoch 4897/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.8893 - binary_accuracy: 0.6667\nEpoch 4898/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.4993 - binary_accuracy: 0.6667\nEpoch 4899/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.1174 - binary_accuracy: 0.6667\nEpoch 4900/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.7390 - binary_accuracy: 0.6667\nEpoch 4901/5000\n1/1 [==============================] - 0s 15ms/step - loss: 8.3628 - binary_accuracy: 0.6667\nEpoch 4902/5000\n1/1 [==============================] - 0s 10ms/step - loss: 4.0697 - binary_accuracy: 0.7778\nEpoch 4903/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.4920 - binary_accuracy: 0.7778\nEpoch 4904/5000\n1/1 [==============================] - 0s 7ms/step - loss: 9.5399 - binary_accuracy: 0.4444\nEpoch 4905/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.5250 - binary_accuracy: 0.6667\nEpoch 4906/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.1361 - binary_accuracy: 0.6667\nEpoch 4907/5000\n1/1 [==============================] - 0s 16ms/step - loss: 16.7546 - binary_accuracy: 0.6667\nEpoch 4908/5000\n1/1 [==============================] - 0s 14ms/step - loss: 12.3764 - binary_accuracy: 0.6667\nEpoch 4909/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.0006 - binary_accuracy: 0.6667\nEpoch 4910/5000\n1/1 [==============================] - 0s 10ms/step - loss: 3.7369 - binary_accuracy: 0.7778\nEpoch 4911/5000\n1/1 [==============================] - 0s 8ms/step - loss: 0.2826 - binary_accuracy: 0.7778\nEpoch 4912/5000\n1/1 [==============================] - 0s 14ms/step - loss: 7.1732 - binary_accuracy: 0.4444\nEpoch 4913/5000\n1/1 [==============================] - 0s 8ms/step - loss: 25.8799 - binary_accuracy: 0.6667\nEpoch 4914/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.4903 - binary_accuracy: 0.6667\nEpoch 4915/5000\n1/1 [==============================] - 0s 7ms/step - loss: 17.1084 - binary_accuracy: 0.6667\nEpoch 4916/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.7301 - binary_accuracy: 0.6667\nEpoch 4917/5000\n1/1 [==============================] - 0s 7ms/step - loss: 8.3539 - binary_accuracy: 0.6667\nEpoch 4918/5000\n1/1 [==============================] - 0s 18ms/step - loss: 4.0623 - binary_accuracy: 0.7778\nEpoch 4919/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.4893 - binary_accuracy: 0.7778\nEpoch 4920/5000\n1/1 [==============================] - 0s 8ms/step - loss: 9.5028 - binary_accuracy: 0.4444\nEpoch 4921/5000\n1/1 [==============================] - 0s 12ms/step - loss: 25.5249 - binary_accuracy: 0.6667\nEpoch 4922/5000\n1/1 [==============================] - 0s 16ms/step - loss: 21.1362 - binary_accuracy: 0.6667\nEpoch 4923/5000\n1/1 [==============================] - 0s 6ms/step - loss: 16.7549 - binary_accuracy: 0.6667\nEpoch 4924/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.3767 - binary_accuracy: 0.6667\nEpoch 4925/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.0010 - binary_accuracy: 0.6667\nEpoch 4926/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7382 - binary_accuracy: 0.7778\nEpoch 4927/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.2850 - binary_accuracy: 0.7778\nEpoch 4928/5000\n1/1 [==============================] - 0s 7ms/step - loss: 7.1976 - binary_accuracy: 0.4444\nEpoch 4929/5000\n1/1 [==============================] - 0s 7ms/step - loss: 25.8706 - binary_accuracy: 0.6667\nEpoch 4930/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.4812 - binary_accuracy: 0.6667\nEpoch 4931/5000\n1/1 [==============================] - 0s 9ms/step - loss: 17.0995 - binary_accuracy: 0.6667\nEpoch 4932/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.7212 - binary_accuracy: 0.6667\nEpoch 4933/5000\n1/1 [==============================] - 0s 21ms/step - loss: 8.3450 - binary_accuracy: 0.6667\nEpoch 4934/5000\n1/1 [==============================] - 0s 8ms/step - loss: 4.0549 - binary_accuracy: 0.7778\nEpoch 4935/5000\n1/1 [==============================] - 0s 16ms/step - loss: 0.4865 - binary_accuracy: 0.7778\nEpoch 4936/5000\n1/1 [==============================] - 0s 8ms/step - loss: 9.4654 - binary_accuracy: 0.4444\nEpoch 4937/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.5249 - binary_accuracy: 0.6667\nEpoch 4938/5000\n1/1 [==============================] - 0s 14ms/step - loss: 21.1365 - binary_accuracy: 0.6667\nEpoch 4939/5000\n1/1 [==============================] - 0s 16ms/step - loss: 16.7552 - binary_accuracy: 0.6667\nEpoch 4940/5000\n1/1 [==============================] - 0s 7ms/step - loss: 12.3771 - binary_accuracy: 0.6667\nEpoch 4941/5000\n1/1 [==============================] - 0s 11ms/step - loss: 8.0014 - binary_accuracy: 0.6667\nEpoch 4942/5000\n1/1 [==============================] - 0s 6ms/step - loss: 3.7396 - binary_accuracy: 0.7778\nEpoch 4943/5000\n1/1 [==============================] - 0s 17ms/step - loss: 0.2875 - binary_accuracy: 0.7778\nEpoch 4944/5000\n1/1 [==============================] - 0s 9ms/step - loss: 7.2220 - binary_accuracy: 0.4444\nEpoch 4945/5000\n1/1 [==============================] - 0s 13ms/step - loss: 25.8612 - binary_accuracy: 0.6667\nEpoch 4946/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.4721 - binary_accuracy: 0.6667\nEpoch 4947/5000\n1/1 [==============================] - 0s 22ms/step - loss: 17.0905 - binary_accuracy: 0.6667\nEpoch 4948/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.7123 - binary_accuracy: 0.6667\nEpoch 4949/5000\n1/1 [==============================] - 0s 11ms/step - loss: 8.3362 - binary_accuracy: 0.6667\nEpoch 4950/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.0475 - binary_accuracy: 0.7778\nEpoch 4951/5000\n1/1 [==============================] - 0s 9ms/step - loss: 0.4836 - binary_accuracy: 0.7778\nEpoch 4952/5000\n1/1 [==============================] - 0s 7ms/step - loss: 9.4275 - binary_accuracy: 0.4444\nEpoch 4953/5000\n1/1 [==============================] - 0s 11ms/step - loss: 25.5249 - binary_accuracy: 0.6667\nEpoch 4954/5000\n1/1 [==============================] - 0s 9ms/step - loss: 21.1367 - binary_accuracy: 0.6667\nEpoch 4955/5000\n1/1 [==============================] - 0s 11ms/step - loss: 16.7556 - binary_accuracy: 0.6667\nEpoch 4956/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.3776 - binary_accuracy: 0.6667\nEpoch 4957/5000\n1/1 [==============================] - 0s 26ms/step - loss: 8.0019 - binary_accuracy: 0.6667\nEpoch 4958/5000\n1/1 [==============================] - 0s 5ms/step - loss: 3.7410 - binary_accuracy: 0.7778\nEpoch 4959/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.2899 - binary_accuracy: 0.7778\nEpoch 4960/5000\n1/1 [==============================] - 0s 18ms/step - loss: 7.2466 - binary_accuracy: 0.4444\nEpoch 4961/5000\n1/1 [==============================] - 0s 9ms/step - loss: 25.8518 - binary_accuracy: 0.6667\nEpoch 4962/5000\n1/1 [==============================] - 0s 14ms/step - loss: 21.4630 - binary_accuracy: 0.6667\nEpoch 4963/5000\n1/1 [==============================] - 0s 6ms/step - loss: 17.0815 - binary_accuracy: 0.6667\nEpoch 4964/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.7034 - binary_accuracy: 0.6667\nEpoch 4965/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.3273 - binary_accuracy: 0.6667\nEpoch 4966/5000\n1/1 [==============================] - 0s 6ms/step - loss: 4.0401 - binary_accuracy: 0.7778\nEpoch 4967/5000\n1/1 [==============================] - 0s 6ms/step - loss: 0.4808 - binary_accuracy: 0.7778\nEpoch 4968/5000\n1/1 [==============================] - 0s 7ms/step - loss: 9.3893 - binary_accuracy: 0.4444\nEpoch 4969/5000\n1/1 [==============================] - 0s 22ms/step - loss: 25.5251 - binary_accuracy: 0.6667\nEpoch 4970/5000\n1/1 [==============================] - 0s 10ms/step - loss: 21.1371 - binary_accuracy: 0.6667\nEpoch 4971/5000\n1/1 [==============================] - 0s 8ms/step - loss: 16.7561 - binary_accuracy: 0.6667\nEpoch 4972/5000\n1/1 [==============================] - 0s 9ms/step - loss: 12.3781 - binary_accuracy: 0.6667\nEpoch 4973/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.0024 - binary_accuracy: 0.6667\nEpoch 4974/5000\n1/1 [==============================] - 0s 12ms/step - loss: 3.7424 - binary_accuracy: 0.7778\nEpoch 4975/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.2924 - binary_accuracy: 0.7778\nEpoch 4976/5000\n1/1 [==============================] - 0s 9ms/step - loss: 7.2711 - binary_accuracy: 0.4444\nEpoch 4977/5000\n1/1 [==============================] - 0s 26ms/step - loss: 25.8424 - binary_accuracy: 0.6667\nEpoch 4978/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.4538 - binary_accuracy: 0.6667\nEpoch 4979/5000\n1/1 [==============================] - 0s 8ms/step - loss: 17.0725 - binary_accuracy: 0.6667\nEpoch 4980/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.6944 - binary_accuracy: 0.6667\nEpoch 4981/5000\n1/1 [==============================] - 0s 13ms/step - loss: 8.3184 - binary_accuracy: 0.6667\nEpoch 4982/5000\n1/1 [==============================] - 0s 9ms/step - loss: 4.0327 - binary_accuracy: 0.7778\nEpoch 4983/5000\n1/1 [==============================] - 0s 12ms/step - loss: 0.4780 - binary_accuracy: 0.7778\nEpoch 4984/5000\n1/1 [==============================] - 0s 9ms/step - loss: 9.3505 - binary_accuracy: 0.4444\nEpoch 4985/5000\n1/1 [==============================] - 0s 10ms/step - loss: 25.5253 - binary_accuracy: 0.6667\nEpoch 4986/5000\n1/1 [==============================] - 0s 8ms/step - loss: 21.1375 - binary_accuracy: 0.6667\nEpoch 4987/5000\n1/1 [==============================] - 0s 11ms/step - loss: 16.7566 - binary_accuracy: 0.6667\nEpoch 4988/5000\n1/1 [==============================] - 0s 11ms/step - loss: 12.3786 - binary_accuracy: 0.6667\nEpoch 4989/5000\n1/1 [==============================] - 0s 9ms/step - loss: 8.0030 - binary_accuracy: 0.6667\nEpoch 4990/5000\n1/1 [==============================] - 0s 15ms/step - loss: 3.7440 - binary_accuracy: 0.7778\nEpoch 4991/5000\n1/1 [==============================] - 0s 14ms/step - loss: 0.2950 - binary_accuracy: 0.7778\nEpoch 4992/5000\n1/1 [==============================] - 0s 14ms/step - loss: 7.2958 - binary_accuracy: 0.4444\nEpoch 4993/5000\n1/1 [==============================] - 0s 15ms/step - loss: 25.8330 - binary_accuracy: 0.6667\nEpoch 4994/5000\n1/1 [==============================] - 0s 6ms/step - loss: 21.4447 - binary_accuracy: 0.6667\nEpoch 4995/5000\n1/1 [==============================] - 0s 22ms/step - loss: 17.0635 - binary_accuracy: 0.6667\nEpoch 4996/5000\n1/1 [==============================] - 0s 6ms/step - loss: 12.6854 - binary_accuracy: 0.6667\nEpoch 4997/5000\n1/1 [==============================] - 0s 6ms/step - loss: 8.3094 - binary_accuracy: 0.6667\nEpoch 4998/5000\n1/1 [==============================] - 0s 7ms/step - loss: 4.0253 - binary_accuracy: 0.7778\nEpoch 4999/5000\n1/1 [==============================] - 0s 10ms/step - loss: 0.4752 - binary_accuracy: 0.7778\nEpoch 5000/5000\n1/1 [==============================] - 0s 21ms/step - loss: 9.3113 - binary_accuracy: 0.4444\n"
],
[
"# 결과를 출력합니다.\nprint(\" test data [7.] 예측 값 : \", model.predict(test_data))\nprint(\" test data [80.] 예측 값 : \", model.predict(test_data2))\nprint(\" test data [110.] 예측 값 : \", model.predict(test_data3))\nprint(\" test data [180.] 예측 값 : \", model.predict(test_data4))\nprint(\" test data [320.] 예측 값 : \", model.predict(test_data5))",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code"
]
] |
c52a399dd8f6df965eb5be96b134dc02e5c8e8de
| 11,603 |
ipynb
|
Jupyter Notebook
|
content/AnimeGAN.ipynb
|
ThickFive/Shell
|
6091bca08dcda0c00b3e0c176057e6e36e0356a5
|
[
"MIT"
] | null | null | null |
content/AnimeGAN.ipynb
|
ThickFive/Shell
|
6091bca08dcda0c00b3e0c176057e6e36e0356a5
|
[
"MIT"
] | null | null | null |
content/AnimeGAN.ipynb
|
ThickFive/Shell
|
6091bca08dcda0c00b3e0c176057e6e36e0356a5
|
[
"MIT"
] | null | null | null | 11,603 | 11,603 | 0.708955 |
[
[
[
"- 参考 [天秀!GitHub 硬核项目:动漫生成器让照片秒变手绘日漫风!!!](https://mp.weixin.qq.com/s?__biz=MzAxOTcxNTIwNQ==&mid=310435176&idx=1&sn=9d3f5916ae5126c4e3233b26595e02cb&chksm=0cb6b8823bc13194bb38ce5eabe344e59a6881f1ae6a4ac0aa183874ec71c2586b73ce3c0f96#rd) \n以下下步骤中 [ ] 表示非必须",
"_____no_output_____"
],
[
"- **[查看分配到的GPU型号]**",
"_____no_output_____"
]
],
[
[
"!nvidia-smi",
"_____no_output_____"
]
],
[
[
"- **1. 安装运行环境**",
"_____no_output_____"
]
],
[
[
"!pip3 install tensorflow-gpu==1.13.1\n!pip3 install opencv-python\n!pip3 install tqdm\n!pip3 install numpy\n!pip3 install argparse",
"_____no_output_____"
]
],
[
[
"- **[检查是否安装成功]**",
"_____no_output_____"
]
],
[
[
"!pip3 show tensorflow-gpu\n!pip3 show opencv-python\n!pip3 show tqdm\n!pip3 show numpy\n!pip3 show argparse\nfrom tensorflow.python.client import device_lib\nprint(device_lib.list_local_devices())",
"_____no_output_____"
]
],
[
[
"- **2. 下载AnimeGAN源码**",
"_____no_output_____"
]
],
[
[
"%cd /content\n!git clone https://github.com/TachibanaYoshino/AnimeGAN",
"_____no_output_____"
]
],
[
[
"- **3. 添加(or上传)并执行预训练模型下载脚本 download_staffs.sh**",
"_____no_output_____"
]
],
[
[
"%cd /content/AnimeGAN\n!echo -e 'URL=https://github.com/TachibanaYoshino/AnimeGAN/releases/download/Haoyao-style_V1.0/Haoyao-style.zip\\nZIP_FILE=./checkpoint/Haoyao-style.zip\\nTARGET_DIR=./checkpoint/saved_model\\n\\nmkdir -p ./checkpoint\\nwget -N $URL -O $ZIP_FILE\\nmkdir -p $TARGET_DIR\\nunzip $ZIP_FILE -d $TARGET_DIR\\nrm $ZIP_FILE\\n\\nDatesetURL=https://github.com/TachibanaYoshino/AnimeGAN/releases/download/dataset-1/dataset.zip\\nZIP_FILE=./dataset.zip\\nTARGET_DIR=./dataset\\n\\nrm -rf dataset\\nwget -N $DatesetURL -O $ZIP_FILE\\nunzip $ZIP_FILE -d $TARGET_DIR\\nrm $ZIP_FILE\\n\\nVGG_FILE=./vgg19_weight/vgg19.npy\\nwget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate \\0047https://docs.google.com/uc?export=download&id=1U5HCRpZWAbDVLipNoF8t0ZHpwCRX7kdF\\0047 -O- | sed -rn \\0047s/.*confirm=([0-9A-Za-z_]+).*/\\01341\\0134n/p\\0047)&id=1U5HCRpZWAbDVLipNoF8t0ZHpwCRX7kdF\" -O $VGG_FILE && rm -rf /tmp/cookies.txt' > download_staffs.sh\n!bash download_staffs.sh",
"_____no_output_____"
]
],
[
[
"- **4. [训练模型(非常耗时, 可以跳过这一步)]**",
"_____no_output_____"
]
],
[
[
"!python main.py --phase train --dataset Hayao --epoch 101 --init_epoch 1",
"_____no_output_____"
]
],
[
[
"- **5. 添加(or上传)视频处理脚本 video.py**",
"_____no_output_____"
]
],
[
[
"!echo -e 'import\\0040argparse\\nimport\\0040cv2\\nimport\\0040os\\nimport\\0040re\\nimport\\0040shutil\\n\\nfg_video_path\\0040=\\0040\\0047/content/AnimeGAN/video/input_video.mp4\\0047\\nEXTRACT_FREQUENCY\\0040=\\00401\\n\\ndef\\0040extract(video_path,\\0040image_path,\\0040index=EXTRACT_FREQUENCY):\\n\\0040\\0040\\0040\\0040try:\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040shutil.rmtree(image_path)\\n\\0040\\0040\\0040\\0040except\\0040OSError:\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040pass\\n\\0040\\0040\\0040\\0040os.mkdir(image_path)\\n\\0040\\0040\\0040\\0040video\\0040=\\0040cv2.VideoCapture()\\n\\0040\\0040\\0040\\0040if\\0040not\\0040video.open(video_path):\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040print(\"can\\0040not\\0040open\\0040the\\0040video\")\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040exit(1)\\n\\0040\\0040\\0040\\0040count\\0040=\\00401\\n\\0040\\0040\\0040\\0040while\\0040True:\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040_,\\0040frame\\0040=\\0040video.read()\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040if\\0040frame\\0040is\\0040None:\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040break\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040if\\0040count\\0040%\\0040EXTRACT_FREQUENCY\\0040==\\00400:\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040save_path\\0040=\\0040\"{}/{:>04d}.jpg\".format(image_path,\\0040index)\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040cv2.imwrite(save_path,\\0040frame)\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040index\\0040+=\\00401\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040count\\0040+=\\00401\\n\\0040\\0040\\0040\\0040video.release()\\n\\0040\\0040\\0040\\0040print(\"Totally\\0040save\\0040{:d}\\0040pics\".format(index\\0040-\\00401))\\n\\ndef\\0040is_frame(path):\\n\\0040\\0040\\0040\\0040res\\0040=\\0040re.match(r\\0047\\0134d{4}\\0134.jpg$\\0047,\\0040path)\\n\\0040\\0040\\0040\\0040return\\0040True\\0040if\\0040res\\0040!=\\0040None\\0040else\\0040False\\n\\ndef\\0040filter_frame(array):\\n\\0040\\0040\\0040\\0040res\\0040=\\0040[]\\n\\0040\\0040\\0040\\0040for\\0040item\\0040in\\0040filter(is_frame,\\0040array):\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040res.append(item)\\n\\0040\\0040\\0040\\0040res.sort(reverse=False)\\n\\0040\\0040\\0040\\0040return\\0040res\\n\\ndef\\0040combine(image_path,\\0040output_path):\\n\\0040\\0040\\0040\\0040cap\\0040=\\0040cv2.VideoCapture(fg_video_path)\\n\\0040\\0040\\0040\\0040fgs\\0040=\\0040int(cap.get(cv2.CAP_PROP_FPS))\\0040\\n\\0040\\0040\\0040\\0040fgs\\0040=\\0040fgs\\0040if\\0040fgs\\0040>\\00400\\0040else\\004025\\n\\0040\\0040\\0040\\0040pictrue_in_filelist\\0040=\\0040filter_frame(os.listdir(image_path))\\n\\0040\\0040\\0040\\0040print(pictrue_in_filelist)\\n\\0040\\0040\\0040\\0040name\\0040=\\0040image_path\\0040+\\0040\"/\"\\0040+\\0040pictrue_in_filelist[0]\\n\\0040\\0040\\0040\\0040img\\0040=\\0040cv2.imread(name)\\n\\0040\\0040\\0040\\0040h,\\0040w,\\0040c\\0040=\\0040img.shape\\n\\0040\\0040\\0040\\0040size\\0040=\\0040(w,\\0040h)\\n\\0040\\0040\\0040\\0040print(f\\0047size:\\0040{size},\\0040fgs:\\0040{fgs}\\0047)\\n\\n\\0040\\0040\\0040\\0040fourcc\\0040=\\0040cv2.VideoWriter_fourcc(*\\0047XVID\\0047)\\n\\0040\\0040\\0040\\0040out_video\\0040=\\0040output_path\\0040+\\0040\\0047.mp4\\0047\\n\\0040\\0040\\0040\\0040video_writer\\0040=\\0040cv2.VideoWriter(out_video,\\0040fourcc,\\0040fgs,\\0040size)\\n\\n\\0040\\0040\\0040\\0040for\\0040i\\0040in\\0040range(len(pictrue_in_filelist)):\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040pictrue_in_filename\\0040=\\0040image_path\\0040+\\0040\"/\"\\0040+\\0040pictrue_in_filelist[i]\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040img12\\0040=\\0040cv2.imread(pictrue_in_filename)\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040video_writer.write(img12)\\n\\0040\\0040\\0040\\0040video_writer.release()\\n\\0040\\0040\\0040\\0040#print(\"删除合成的图片数据集\")\\n\\0040\\0040\\0040\\0040#shutil.rmtree(fg_in_bg)\\n\\0040\\0040\\0040\\0040return\\0040out_video\\n\\ndef\\0040parse_args():\\n\\0040\\0040\\0040\\0040desc\\0040=\\0040\"video\\0040util\"\\n\\0040\\0040\\0040\\0040parser\\0040=\\0040argparse.ArgumentParser(description=desc)\\n\\0040\\0040\\0040\\0040parser.add_argument(\\0047--type\\0047,\\0040type=str,\\0040default=\\0047extract\\0047,\\0040help=\\0047specify\\0040which\\0040action\\0040to\\0040take\\0047)\\n\\0040\\0040\\0040\\0040parser.add_argument(\\0047--video_path\\0047,\\0040type=str,\\0040default=\\0047/content/AnimeGAN/video/input_video.mp4\\0047,\\0040help=\\0047input\\0040video\\0040path\\0047)\\n\\0040\\0040\\0040\\0040parser.add_argument(\\0047--image_path\\0047,\\0040type=str,\\0040default=\\0047/content/AnimeGAN/video/input_video\\0047,\\0040help=\\0047output\\0040images\\0040path\\0047)\\n\\0040\\0040\\0040\\0040parser.add_argument(\\0047--output_path\\0047,\\0040type=str,\\0040default=\\0047/content/AnimeGAN/video/output_video.mp4\\0047,\\0040help=\\0047output\\0040video\\0040path\\0047)\\n\\0040\\0040\\0040\\0040\"\"\"checking\\0040arguments\"\"\"\\n\\0040\\0040\\0040\\0040return\\0040parser.parse_args()\\n\\nif\\0040__name__\\0040==\\0040\"__main__\":\\n\\0040\\0040\\0040\\0040args\\0040=\\0040parse_args()\\n\\0040\\0040\\0040\\0040if\\0040args.type\\0040==\\0040\\0047extract\\0047:\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040extract(args.video_path,\\0040args.image_path)\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040print(f\\0047extract\\0040video\\0040{args.video_path}\\0040into\\0040images\\0040{args.image_path}\\0047)\\n\\0040\\0040\\0040\\0040elif\\0040args.type\\0040==\\0040\\0047combine\\0047:\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040combine(args.image_path,\\0040args.output_path)\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040print(f\\0047combine\\0040images\\0040from\\0040{args.image_path}\\0040into\\0040{args.output_path}\\0047)\\n\\0040\\0040\\0040\\0040else:\\n\\0040\\0040\\0040\\0040\\0040\\0040\\0040\\0040print(f\\0047Error:\\0040you\\0040must\\0040specify\\0040the\\0040argument\\0040of\\0040type,\\0040extract\\0040or\\0040combine\\0047)' > video.py",
"_____no_output_____"
]
],
[
[
"- **6. 上传视频**",
"_____no_output_____"
]
],
[
[
"%mkdir video\nprint('Upload video in this dir: /content/AnimeGAN/video')",
"_____no_output_____"
]
],
[
[
"- **7. 视频分解为序列帧图片**",
"_____no_output_____"
]
],
[
[
"!python video.py --type extract --video_path /content/AnimeGAN/video/input_video.mp4 --image_path /content/AnimeGAN/video/input_video",
"_____no_output_____"
]
],
[
[
"- **8. 序列帧图片动漫化**",
"_____no_output_____"
]
],
[
[
"!python test.py --checkpoint_dir checkpoint/saved_model --test_dir video/input_video --style_name H",
"_____no_output_____"
]
],
[
[
"- **9. 序列帧合成视频**",
"_____no_output_____"
]
],
[
[
"!python video.py --type combine --image_path /content/AnimeGAN/results/H --output_path /content/AnimeGAN/video/output_video.mp4",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
c52a4a9d20b4d13c7034ff11de8e0630e5dddb2a
| 30,408 |
ipynb
|
Jupyter Notebook
|
notebooks/cmssw.ipynb
|
jpata/particleflow
|
a3b9be59b42c6eb0d5f6941ab4995147c7baeb3b
|
[
"Apache-2.0"
] | 12 |
2019-09-29T21:24:18.000Z
|
2022-02-22T13:20:38.000Z
|
notebooks/cmssw.ipynb
|
jpata/particleflow
|
a3b9be59b42c6eb0d5f6941ab4995147c7baeb3b
|
[
"Apache-2.0"
] | 39 |
2019-10-03T18:21:01.000Z
|
2021-12-07T11:58:57.000Z
|
notebooks/cmssw.ipynb
|
jpata/particleflow
|
a3b9be59b42c6eb0d5f6941ab4995147c7baeb3b
|
[
"Apache-2.0"
] | 19 |
2019-09-29T21:24:27.000Z
|
2022-03-31T12:17:04.000Z
| 39.542263 | 141 | 0.585011 |
[
[
[
"import pickle\nimport numpy as np\nimport mplhep\nimport awkward\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\n\nimport uproot\nimport boost_histogram as bh\n",
"_____no_output_____"
],
[
"physics_process = \"qcd\"\n\ndata_baseline = awkward.Array(pickle.load(open(\"/home/joosep/reco/mlpf/CMSSW_12_1_0_pre3/11843.0/out.pkl\", \"rb\")))\ndata_mlpf = awkward.Array(pickle.load(open(\"/home/joosep/reco/mlpf/CMSSW_12_1_0_pre3/11843.13/out.pkl\", \"rb\")))\n\nfi1 = uproot.open(\"/home/joosep/reco/mlpf/CMSSW_12_1_0_pre3/11843.0/DQM_V0001_R000000001__Global__CMSSW_X_Y_Z__RECO.root\")\nfi2 = uproot.open(\"/home/joosep/reco/mlpf/CMSSW_12_1_0_pre3/11843.13/DQM_V0001_R000000001__Global__CMSSW_X_Y_Z__RECO.root\")",
"_____no_output_____"
],
[
"physics_process = \"ttbar\"\ndata_mlpf = awkward.Array(pickle.load(open(\"/home/joosep/reco/mlpf/CMSSW_12_1_0_pre3/11834.13/out.pkl\", \"rb\")))\ndata_baseline = awkward.Array(pickle.load(open(\"/home/joosep/reco/mlpf/CMSSW_12_1_0_pre3/11834.0/out.pkl\", \"rb\")))\n\nfi1 = uproot.open(\"/home/joosep/reco/mlpf/CMSSW_12_1_0_pre3/11834.0/DQM_V0001_R000000001__Global__CMSSW_X_Y_Z__RECO.root\")\nfi2 = uproot.open(\"/home/joosep/reco/mlpf/CMSSW_12_1_0_pre3/11834.13/DQM_V0001_R000000001__Global__CMSSW_X_Y_Z__RECO.root\")",
"_____no_output_____"
],
[
"# physics_process = \"singlepi\"\n# data_mlpf = awkward.Array(pickle.load(open(\"/home/joosep/reco/mlpf/CMSSW_11_3_0_pre2/11688.0_mlpf/out.pkl\", \"rb\")))\n# data_baseline = awkward.Array(pickle.load(open(\"/home/joosep/reco/mlpf/CMSSW_11_3_0_pre2/11688.0_baseline/out.pkl\", \"rb\")))",
"_____no_output_____"
],
[
"def cms_label(x0=0.12, x1=0.23, x2=0.67, y=0.90):\n plt.figtext(x0, y,'CMS',fontweight='bold', wrap=True, horizontalalignment='left', fontsize=12)\n plt.figtext(x1, y,'Simulation Preliminary', style='italic', wrap=True, horizontalalignment='left', fontsize=10)\n plt.figtext(x2, y,'Run 3 (14 TeV)', wrap=True, horizontalalignment='left', fontsize=10)\n \nphysics_process_str = {\n \"ttbar\": \"$\\mathrm{t}\\overline{\\mathrm{t}}$ events\",\n \"singlepi\": \"single $\\pi^{\\pm}$ events\",\n \"qcd\": \"QCD\",\n}\n\ndef sample_label(ax, x=0.03, y=0.98, additional_text=\"\", physics_process=physics_process):\n plt.text(x, y,\n physics_process_str[physics_process]+additional_text,\n va=\"top\", ha=\"left\", size=10, transform=ax.transAxes)\n",
"_____no_output_____"
],
[
"plt.figure(figsize=(5, 5))\nax = plt.axes()\n\nbins = np.linspace(0, 500, 61)\nplt.hist(awkward.flatten(data_baseline[\"ak4PFJetsCHS\"][\"pt\"]), bins=bins, histtype=\"step\", lw=2, label=\"PF\");\nplt.hist(awkward.flatten(data_mlpf[\"ak4PFJetsCHS\"][\"pt\"]), bins=bins, histtype=\"step\", lw=2, label=\"MLPF\");\nplt.yscale(\"log\")\nplt.ylim(top=1e5)\ncms_label()\nsample_label(ax, x=0.02)\nplt.xlabel(\"ak4PFJetsCHS $p_T$ [GeV]\")\nplt.ylabel(\"Number of jets\")\nplt.legend(loc=\"best\")\n\nplt.savefig(\"ak4jet_pt_{}.pdf\".format(physics_process), bbox_inches=\"tight\")",
"_____no_output_____"
],
[
"plt.figure(figsize=(5, 5))\n\nbins = np.linspace(0, 2500, 61)\nplt.hist(awkward.flatten(data_baseline[\"ak4PFJetsCHS\"][\"energy\"]), bins=bins, histtype=\"step\", lw=2, label=\"PF\");\nplt.hist(awkward.flatten(data_mlpf[\"ak4PFJetsCHS\"][\"energy\"]), bins=bins, histtype=\"step\", lw=2, label=\"MLPF\");\nplt.yscale(\"log\")\nplt.ylim(top=1e5)\ncms_label()\nsample_label(ax, x=0.02)\n\nplt.xlabel(\"ak4PFJetsCHS $E$ [GeV]\")\nplt.ylabel(\"Number of jets\")\nplt.legend(loc=\"best\")\n\nplt.savefig(\"ak4jet_energy_{}.pdf\".format(physics_process), bbox_inches=\"tight\")",
"_____no_output_____"
],
[
"plt.figure(figsize=(5, 5))\n\nbins = np.linspace(-6, 6, 101)\nplt.hist(awkward.flatten(data_baseline[\"ak4PFJetsCHS\"][\"eta\"]), bins=bins, histtype=\"step\", lw=2, label=\"PF\");\nplt.hist(awkward.flatten(data_mlpf[\"ak4PFJetsCHS\"][\"eta\"]), bins=bins, histtype=\"step\", lw=2, label=\"MLPF\");\n#plt.yscale(\"log\")\ncms_label()\nsample_label(ax)\nplt.ylim(top=2000)\nplt.xlabel(\"ak4PFJetsCHS $\\eta$\")\nplt.ylabel(\"Number of jets\")\nplt.legend(loc=\"best\")\n\nplt.savefig(\"ak4jet_eta_{}.pdf\".format(physics_process), bbox_inches=\"tight\")",
"_____no_output_____"
],
[
"color_map = {\n 1: \"red\",\n 2: \"blue\",\n 11: \"orange\",\n 22: \"cyan\",\n 13: \"purple\",\n 130: \"green\",\n 211: \"black\"\n}\n\nparticle_labels = {\n 1: \"HFEM\",\n 2: \"HFHAD\",\n 11: \"$e^\\pm$\",\n 22: \"$\\gamma$\",\n 13: \"$\\mu$\",\n 130: \"neutral hadron\",\n 211: \"charged hadron\"\n \n}",
"_____no_output_____"
],
[
"def draw_event(iev):\n pt_0 = data_mlpf[\"particleFlow\"][\"pt\"][iev]\n energy_0 = data_mlpf[\"particleFlow\"][\"energy\"][iev]\n eta_0 = data_mlpf[\"particleFlow\"][\"eta\"][iev]\n phi_0 = data_mlpf[\"particleFlow\"][\"phi\"][iev]\n pdgid_0 = np.abs(data_mlpf[\"particleFlow\"][\"pdgId\"][iev])\n \n pt_1 = data_baseline[\"particleFlow\"][\"pt\"][iev]\n energy_1 = data_baseline[\"particleFlow\"][\"energy\"][iev]\n eta_1 = data_baseline[\"particleFlow\"][\"eta\"][iev]\n phi_1 = data_baseline[\"particleFlow\"][\"phi\"][iev]\n pdgid_1 = np.abs(data_baseline[\"particleFlow\"][\"pdgId\"][iev])\n \n plt.figure(figsize=(5, 5))\n ax = plt.axes()\n plt.scatter(eta_0, phi_0, marker=\".\", s=energy_0, c=[color_map[p] for p in pdgid_0], alpha=0.6)\n\n pids = [211,130,1,2,22,11,13]\n for p in pids:\n plt.plot([], [], color=color_map[p], lw=0, marker=\"o\", label=particle_labels[p])\n plt.legend(loc=8, frameon=False, ncol=3, fontsize=8)\n\n cms_label()\n sample_label(ax)\n plt.xlim(-6,6)\n plt.ylim(-5,4)\n plt.xlabel(\"PFCandidate $\\eta$\")\n plt.ylabel(\"PFCandidate $\\phi$\")\n plt.title(\"MLPF (trained on PF), CMSSW-ONNX inference\", y=1.05)\n plt.savefig(\"event_mlpf_{}_iev{}.pdf\".format(physics_process, iev), bbox_inches=\"tight\")\n plt.savefig(\"event_mlpf_{}_iev{}.png\".format(physics_process, iev), bbox_inches=\"tight\", dpi=300)\n \n plt.figure(figsize=(5, 5))\n ax = plt.axes()\n plt.scatter(eta_1, phi_1, marker=\".\", s=energy_1, c=[color_map[p] for p in pdgid_1], alpha=0.6)\n# plt.scatter(\n# data_baseline[\"ak4PFJetsCHS\"][\"eta\"][iev],\n# data_baseline[\"ak4PFJetsCHS\"][\"phi\"][iev],\n# s=data_baseline[\"ak4PFJetsCHS\"][\"energy\"][iev], color=\"gray\", alpha=0.3\n# )\n cms_label()\n sample_label(ax)\n plt.xlim(-6,6)\n plt.ylim(-5,4)\n plt.xlabel(\"PFCandidate $\\eta$\")\n plt.ylabel(\"PFCandidate $\\phi$\")\n plt.title(\"Standard PF, CMSSW\", y=1.05)\n \n pids = [211,130,1,2,22,11,13]\n for p in pids:\n plt.plot([], [], color=color_map[p], lw=0, marker=\"o\", label=particle_labels[p])\n plt.legend(loc=8, frameon=False, ncol=3, fontsize=8)\n \n plt.savefig(\"event_pf_{}_iev{}.pdf\".format(physics_process, iev), bbox_inches=\"tight\")\n plt.savefig(\"event_pf_{}_iev{}.png\".format(physics_process, iev), bbox_inches=\"tight\", dpi=300)",
"_____no_output_____"
],
[
"draw_event(0)",
"_____no_output_____"
],
[
"draw_event(1)",
"_____no_output_____"
],
[
"draw_event(2)",
"_____no_output_____"
],
[
"def plot_dqm(key, title, rebin=None):\n h1 = fi1.get(key).to_boost()\n h2 = fi2.get(key).to_boost()\n\n fig, (ax1, ax2) = plt.subplots(2, 1)\n plt.sca(ax1)\n if rebin:\n h1 = h1[bh.rebin(rebin)]\n h2 = h2[bh.rebin(rebin)]\n \n mplhep.histplot(h1, yerr=0, label=\"PF\");\n mplhep.histplot(h2, yerr=0, label=\"MLPF\");\n plt.legend(frameon=False)\n plt.ylabel(\"Number of particles / bin\")\n sample_label(ax=ax1, additional_text=\", \"+title, physics_process=physics_process)\n\n plt.sca(ax2)\n ratio_hist = h2/h1\n vals_y = ratio_hist.values()\n vals_y[np.isnan(vals_y)] = 0\n plt.plot(ratio_hist.axes[0].centers, vals_y, color=\"gray\", lw=0, marker=\".\")\n plt.ylim(0,2)\n plt.axhline(1.0, color=\"black\", ls=\"--\")\n plt.ylabel(\"MLPF / PF\")\n \n return ax1, ax2\n \n#plt.xscale(\"log\")\n#plt.yscale(\"log\")\n\nlog10_pt = \"$\\log_{10}[p_T/\\mathrm{GeV}]$\"\neta = \"$\\eta$\"\n\ndqm_plots_ptcl = [\n (\"DQMData/Run 1/ParticleFlow/Run summary/PackedCandidates/chargedHadron/chargedHadronLog10Pt\",\n \"ch.had.\", log10_pt, \"ch_had_logpt\"),\n (\"DQMData/Run 1/ParticleFlow/Run summary/PackedCandidates/chargedHadron/chargedHadronEta\",\n \"ch.had.\", eta, \"ch_had_eta\"),\n \n (\"DQMData/Run 1/ParticleFlow/Run summary/PackedCandidates/neutralHadron/neutralHadronLog10Pt\",\n \"n.had.\", log10_pt, \"n_had_logpt\"),\n (\"DQMData/Run 1/ParticleFlow/Run summary/PackedCandidates/neutralHadron/neutralHadronPtLow\",\n \"n.had.\", \"$p_T$ [GeV]\", \"n_had_ptlow\"),\n (\"DQMData/Run 1/ParticleFlow/Run summary/PackedCandidates/neutralHadron/neutralHadronPtMid\",\n \"n.had.\", \"$p_T$ [GeV]\", \"n_had_ptmid\"),\n (\"DQMData/Run 1/ParticleFlow/Run summary/PackedCandidates/neutralHadron/neutralHadronEta\",\n \"n.had.\", eta, \"n_had_eta\"),\n \n (\"DQMData/Run 1/ParticleFlow/Run summary/PackedCandidates/HF_hadron/HF_hadronLog10Pt\",\n \"HFHAD\", log10_pt, \"hfhad_logpt\"),\n (\"DQMData/Run 1/ParticleFlow/Run summary/PackedCandidates/HF_hadron/HF_hadronEta\",\n \"HFHAD\", eta, \"hfhad_eta\"),\n \n (\"DQMData/Run 1/ParticleFlow/Run summary/PackedCandidates/HF_EM_particle/HF_EM_particleLog10Pt\",\n \"HFEM\", log10_pt, \"hfem_logpt\"),\n (\"DQMData/Run 1/ParticleFlow/Run summary/PackedCandidates/HF_EM_particle/HF_EM_particleEta\",\n \"HFEM\", eta, \"hfem_eta\"),\n \n (\"DQMData/Run 1/ParticleFlow/Run summary/PackedCandidates/photon/photonLog10Pt\",\n \"photon\", log10_pt, \"photon_logpt\"),\n (\"DQMData/Run 1/ParticleFlow/Run summary/PackedCandidates/photon/photonEta\",\n \"photon\", eta, \"photon_eta\"),\n \n \n (\"DQMData/Run 1/ParticleFlow/Run summary/PackedCandidates/electron/electronLog10Pt\",\n \"electron\", log10_pt, \"electron_logpt\"),\n (\"DQMData/Run 1/ParticleFlow/Run summary/PackedCandidates/electron/electronEta\",\n \"electron\", eta, \"electron_eta\"), \n \n (\"DQMData/Run 1/ParticleFlow/Run summary/PackedCandidates/muon/muonLog10Pt\",\n \"muon\", log10_pt, \"muon_logpt\"),\n (\"DQMData/Run 1/ParticleFlow/Run summary/PackedCandidates/muon/muonEta\",\n \"muon\", eta, \"muon_eta\"),\n]\n\ndqm_plots_jetres = [\n (\"DQMData/Run 1/ParticleFlow/Run summary/PFJetValidation/CompWithGenJet/mean_delta_et_Over_et_VS_et_\",\n \"jets\", \"gen-jet $E_t$\", \"$\\Delta E_t / E_t$\"),\n]",
"_____no_output_____"
],
[
"for key, title, xlabel, plot_label in dqm_plots_ptcl:\n rh = plot_dqm(key, title)\n plt.xlabel(xlabel)\n cms_label()\n plt.savefig(\"dqm_{}_{}.pdf\".format(plot_label, physics_process), bbox_inches=\"tight\")\n plt.savefig(\"dqm_{}_{}.png\".format(plot_label, physics_process), bbox_inches=\"tight\", dpi=300)",
"_____no_output_____"
],
[
"ax1, ax2 = plot_dqm(\"DQMData/Run 1/JetMET/Run summary/Jet/Cleanedak4PFJetsCHS/Pt\", \"ak4PFCHS jets\")\nax2.set_xlabel(\"jet $p_t$ [GeV]\")\nax1.set_ylabel(\"number of jets / bin\")\n#plt.xscale(\"log\")\n#plt.ylim(bottom=1, top=1e4)\nax1.set_yscale(\"log\")\nax1.set_ylim(bottom=1, top=1e5)\n#ax2.set_ylim(0,5)\n\ncms_label()\nplt.savefig(\"dqm_jet_pt_{}.pdf\".format(physics_process), bbox_inches=\"tight\")\nplt.savefig(\"dqm_jet_pt_{}.png\".format(physics_process), bbox_inches=\"tight\", dpi=300)",
"_____no_output_____"
],
[
"ax1, ax2 = plot_dqm(\"DQMData/Run 1/JetMET/Run summary/Jet/CleanedslimmedJetsPuppi/Pt\", \"ak4PFPuppi jets\")\nax2.set_xlabel(\"jet $p_t$ [GeV]\")\nax1.set_ylabel(\"number of jets / bin\")\n#plt.xscale(\"log\")\n#plt.ylim(bottom=1, top=1e4)\nax1.set_yscale(\"log\")\nax1.set_ylim(bottom=1, top=1e5)\n#ax2.set_ylim(0,5)\n\ncms_label()\nplt.savefig(\"dqm_jet_pt_puppi_{}.pdf\".format(physics_process), bbox_inches=\"tight\")\nplt.savefig(\"dqm_jet_pt_puppi_{}.png\".format(physics_process), bbox_inches=\"tight\", dpi=300)",
"_____no_output_____"
],
[
"ax1, ax2 = plot_dqm(\"DQMData/Run 1/JetMET/Run summary/Jet/Cleanedak4PFJetsCHS/Eta\", \"ak4PFCHS jets\")\nax2.set_xlabel(\"jet $\\eta$\")\nax1.set_ylabel(\"number of jets / bin\")\n#plt.xscale(\"log\")\n#plt.ylim(bottom=1, top=1e4)\n#ax1.set_yscale(\"log\")\nax1.set_ylim(bottom=0, top=1e3)\n#ax2.set_ylim(0,5)\n\ncms_label()\nplt.savefig(\"dqm_jet_eta_{}.pdf\".format(physics_process), bbox_inches=\"tight\")\nplt.savefig(\"dqm_jet_eta_{}.png\".format(physics_process), bbox_inches=\"tight\", dpi=300)",
"_____no_output_____"
],
[
"ax1, ax2 = plot_dqm(\"DQMData/Run 1/JetMET/Run summary/Jet/CleanedslimmedJetsPuppi/Eta\", \"ak4PFPuppi jets\")\nax2.set_xlabel(\"jet $\\eta$\")\nax1.set_ylabel(\"number of jets / bin\")\n#plt.xscale(\"log\")\n#plt.ylim(bottom=1, top=1e4)\n#ax1.set_yscale(\"log\")\n#ax1.set_ylim(bottom=0, top=20)\n#ax2.set_ylim(0,5)\n\ncms_label()\nplt.savefig(\"dqm_jet_eta_puppi_{}.pdf\".format(physics_process), bbox_inches=\"tight\")\nplt.savefig(\"dqm_jet_eta_puppi_{}.png\".format(physics_process), bbox_inches=\"tight\", dpi=300)",
"_____no_output_____"
],
[
"# plot_dqm(\"DQMData/Run 1/ParticleFlow/Run summary/PFJetValidation/CompWithGenJet/mean_delta_et_Over_et_VS_et_\", \"AK4 PF jets\")\n# plt.xlabel(\"gen-jet $E_t$ [GeV]\")\n# plt.ylabel(\"profiled $\\mu(\\Delta E_t / E_t$)\")\n# plt.xscale(\"log\")\n# plt.ylim(0,3)\n# cms_label()\n# plt.savefig(\"dqm_jet_mean_delta_et_Over_et_VS_et.pdf\", bbox_inches=\"tight\")",
"_____no_output_____"
],
[
"# plot_dqm(\"DQMData/Run 1/ParticleFlow/Run summary/PFJetValidation/CompWithGenJet/sigma_delta_et_Over_et_VS_et_\", \"AK4 PF jets\")\n# plt.xlabel(\"gen-jet $E_t$ [GeV]\")\n# plt.ylabel(\"profiled $\\sigma(\\Delta E_t / E_t)$\")\n# plt.xscale(\"log\")\n# plt.ylim(0,10)\n# cms_label()\n# plt.savefig(\"dqm_jet_sigma_delta_et_Over_et_VS_et.pdf\", bbox_inches=\"tight\")",
"_____no_output_____"
],
[
"ax1, ax2 = plot_dqm(\"DQMData/Run 1/JetMET/Run summary/METValidation/pfMet/MET\", \"PFMET\", rebin=1)\nax2.set_xlabel(\"$\\sum E_t$ [GeV]\")\nax1.set_ylabel(\"number of events / bin\")\n#ax1.set_xscale(\"log\")\nax1.set_ylim(bottom=1, top=1000)\nax1.set_yscale(\"log\")\nplt.savefig(\"dqm_met_sumet_{}.pdf\".format(physics_process), bbox_inches=\"tight\")\nplt.savefig(\"dqm_met_sumet_{}.png\".format(physics_process), bbox_inches=\"tight\", dpi=300)",
"_____no_output_____"
],
[
"# plot_dqm(\"DQMData/Run 1/ParticleFlow/Run summary/PFMETValidation/CompWithGenMET/profileRMS_delta_et_Over_et_VS_et_\", \"PFMET\")\n# plt.xlabel(\"gen-MET $E_t$ [GeV]\")\n# plt.ylabel(\"profiled RMS $\\Delta E_t / E_t$\")\n# plt.xscale(\"log\")\n# plt.ylim(0,3)\n# cms_label()\n# plt.savefig(\"dqm_met_profileRMS_delta_et_Over_et_VS_et.pdf\", bbox_inches=\"tight\")",
"_____no_output_____"
],
[
"# plot_dqm(\"DQMData/Run 1/ParticleFlow/Run summary/PFMETValidation/CompWithGenMET/profile_delta_et_VS_et_\", \"PFMET\")\n# plt.xlabel(\"gen-MET $E_t$ [GeV]\")\n# plt.ylabel(\"profiled $\\Delta E_t$ [GeV]\")\n# plt.xscale(\"log\")\n# plt.ylim(0, 80)\n# cms_label()\n# plt.savefig(\"dqm_met_delta_et_VS_et.pdf\", bbox_inches=\"tight\")",
"_____no_output_____"
],
[
"timing_output = \"\"\"\nNelem=1600 mean_time=5.92 ms stddev_time=5.03 ms mem_used=1018 MB\nNelem=1920 mean_time=6.57 ms stddev_time=1.01 ms mem_used=1110 MB\nNelem=2240 mean_time=6.92 ms stddev_time=0.81 ms mem_used=1127 MB\nNelem=2560 mean_time=7.37 ms stddev_time=0.66 ms mem_used=1136 MB\nNelem=2880 mean_time=8.17 ms stddev_time=0.56 ms mem_used=1123 MB\nNelem=3200 mean_time=8.88 ms stddev_time=1.09 ms mem_used=1121 MB\nNelem=3520 mean_time=9.51 ms stddev_time=0.65 ms mem_used=1121 MB\nNelem=3840 mean_time=10.48 ms stddev_time=0.93 ms mem_used=1255 MB\nNelem=4160 mean_time=11.05 ms stddev_time=0.87 ms mem_used=1255 MB\nNelem=4480 mean_time=12.07 ms stddev_time=0.81 ms mem_used=1230 MB\nNelem=4800 mean_time=12.92 ms stddev_time=0.89 ms mem_used=1230 MB\nNelem=5120 mean_time=13.44 ms stddev_time=0.75 ms mem_used=1230 MB\nNelem=5440 mean_time=14.07 ms stddev_time=0.78 ms mem_used=1230 MB\nNelem=5760 mean_time=15.00 ms stddev_time=0.84 ms mem_used=1230 MB\nNelem=6080 mean_time=15.74 ms stddev_time=1.05 ms mem_used=1230 MB\nNelem=6400 mean_time=16.32 ms stddev_time=1.30 ms mem_used=1230 MB\nNelem=6720 mean_time=17.24 ms stddev_time=0.99 ms mem_used=1230 MB\nNelem=7040 mean_time=17.74 ms stddev_time=0.85 ms mem_used=1230 MB\nNelem=7360 mean_time=18.59 ms stddev_time=1.04 ms mem_used=1230 MB\nNelem=7680 mean_time=19.33 ms stddev_time=0.93 ms mem_used=1499 MB\nNelem=8000 mean_time=20.00 ms stddev_time=1.06 ms mem_used=1499 MB\nNelem=8320 mean_time=20.55 ms stddev_time=1.13 ms mem_used=1499 MB\nNelem=8640 mean_time=21.10 ms stddev_time=0.90 ms mem_used=1499 MB\nNelem=8960 mean_time=22.88 ms stddev_time=1.24 ms mem_used=1499 MB\nNelem=9280 mean_time=23.44 ms stddev_time=1.14 ms mem_used=1499 MB\nNelem=9600 mean_time=23.93 ms stddev_time=1.04 ms mem_used=1499 MB\nNelem=9920 mean_time=24.75 ms stddev_time=0.91 ms mem_used=1499 MB\nNelem=10240 mean_time=25.47 ms stddev_time=1.33 ms mem_used=1499 MB\nNelem=10560 mean_time=26.29 ms stddev_time=1.33 ms mem_used=1499 MB\nNelem=10880 mean_time=26.72 ms stddev_time=1.18 ms mem_used=1490 MB\nNelem=11200 mean_time=29.50 ms stddev_time=2.60 ms mem_used=1502 MB\nNelem=11520 mean_time=28.50 ms stddev_time=0.91 ms mem_used=1491 MB\nNelem=11840 mean_time=29.11 ms stddev_time=1.14 ms mem_used=1491 MB\nNelem=12160 mean_time=30.01 ms stddev_time=1.15 ms mem_used=1499 MB\nNelem=12480 mean_time=30.55 ms stddev_time=0.94 ms mem_used=1499 MB\nNelem=12800 mean_time=31.31 ms stddev_time=1.08 ms mem_used=1499 MB\nNelem=13120 mean_time=32.61 ms stddev_time=1.19 ms mem_used=1499 MB\nNelem=13440 mean_time=33.37 ms stddev_time=1.01 ms mem_used=1499 MB\nNelem=13760 mean_time=34.13 ms stddev_time=1.18 ms mem_used=1499 MB\nNelem=14080 mean_time=34.73 ms stddev_time=1.40 ms mem_used=1499 MB\nNelem=14400 mean_time=35.79 ms stddev_time=1.70 ms mem_used=2036 MB\nNelem=14720 mean_time=36.68 ms stddev_time=1.37 ms mem_used=2036 MB\nNelem=15040 mean_time=37.17 ms stddev_time=0.97 ms mem_used=2036 MB\nNelem=15360 mean_time=38.73 ms stddev_time=1.19 ms mem_used=2036 MB\nNelem=15680 mean_time=39.80 ms stddev_time=1.04 ms mem_used=2036 MB\nNelem=16000 mean_time=40.87 ms stddev_time=1.46 ms mem_used=1996 MB\nNelem=16320 mean_time=41.89 ms stddev_time=1.01 ms mem_used=1996 MB\nNelem=16640 mean_time=43.36 ms stddev_time=1.08 ms mem_used=1996 MB\nNelem=16960 mean_time=44.87 ms stddev_time=1.35 ms mem_used=1996 MB\nNelem=17280 mean_time=46.04 ms stddev_time=0.96 ms mem_used=1996 MB\nNelem=17600 mean_time=47.96 ms stddev_time=1.47 ms mem_used=1996 MB\nNelem=17920 mean_time=49.01 ms stddev_time=1.35 ms mem_used=1996 MB\nNelem=18240 mean_time=50.04 ms stddev_time=1.34 ms mem_used=1956 MB\nNelem=18560 mean_time=51.34 ms stddev_time=1.49 ms mem_used=1956 MB\nNelem=18880 mean_time=52.16 ms stddev_time=1.20 ms mem_used=1956 MB\nNelem=19200 mean_time=53.19 ms stddev_time=1.20 ms mem_used=1956 MB\nNelem=19520 mean_time=54.03 ms stddev_time=0.96 ms mem_used=1956 MB\nNelem=19840 mean_time=55.68 ms stddev_time=1.05 ms mem_used=1956 MB\nNelem=20160 mean_time=56.88 ms stddev_time=1.12 ms mem_used=1956 MB\nNelem=20480 mean_time=57.49 ms stddev_time=1.50 ms mem_used=1956 MB\nNelem=20800 mean_time=60.40 ms stddev_time=3.51 ms mem_used=1959 MB\nNelem=21120 mean_time=61.30 ms stddev_time=3.90 ms mem_used=1959 MB\nNelem=21440 mean_time=60.74 ms stddev_time=1.05 ms mem_used=1948 MB\nNelem=21760 mean_time=61.66 ms stddev_time=1.29 ms mem_used=1948 MB\nNelem=22080 mean_time=63.35 ms stddev_time=1.11 ms mem_used=1948 MB\nNelem=22400 mean_time=64.70 ms stddev_time=1.16 ms mem_used=1948 MB\nNelem=22720 mean_time=65.63 ms stddev_time=0.95 ms mem_used=1948 MB\nNelem=23040 mean_time=67.09 ms stddev_time=1.02 ms mem_used=1948 MB\nNelem=23360 mean_time=68.40 ms stddev_time=1.15 ms mem_used=1948 MB\nNelem=23680 mean_time=69.76 ms stddev_time=0.88 ms mem_used=1948 MB\nNelem=24000 mean_time=71.55 ms stddev_time=0.94 ms mem_used=1948 MB\nNelem=24320 mean_time=73.04 ms stddev_time=1.46 ms mem_used=1948 MB\nNelem=24640 mean_time=74.53 ms stddev_time=1.28 ms mem_used=1948 MB\nNelem=24960 mean_time=76.03 ms stddev_time=1.07 ms mem_used=1948 MB\nNelem=25280 mean_time=77.59 ms stddev_time=0.88 ms mem_used=1948 MB\n\"\"\"",
"_____no_output_____"
],
[
"time_x = []\ntime_y = []\ntime_y_err = []\ngpu_mem_use = []\nfor line in timing_output.split(\"\\n\"):\n if len(line)>0:\n spl = line.split()\n time_x.append(int(spl[0].split(\"=\")[1]))\n time_y.append(float(spl[1].split(\"=\")[1]))\n time_y_err.append(float(spl[3].split(\"=\")[1]))\n gpu_mem_use.append(float(spl[5].split(\"=\")[1]))",
"_____no_output_____"
],
[
"import glob\nnelem = []\nfor fi in glob.glob(\"../data/TTbar_14TeV_TuneCUETP8M1_cfi/raw/*.pkl\"):\n d = pickle.load(open(fi, \"rb\"))\n for elem in d:\n X = elem[\"Xelem\"][(elem[\"Xelem\"][\"typ\"]!=2)&(elem[\"Xelem\"][\"typ\"]!=3)]\n nelem.append(X.shape[0])",
"_____no_output_____"
],
[
"plt.figure(figsize=(5,5))\nax = plt.axes()\nplt.hist(nelem, bins=np.linspace(2000,6000,100));\nplt.ylabel(\"Number of events / bin\")\nplt.xlabel(\"PFElements per event\")\ncms_label()\nsample_label(ax, physics_process=\"ttbar\")",
"_____no_output_____"
],
[
"plt.figure(figsize=(10, 3))\nplt.errorbar(time_x, time_y, yerr=time_y_err, marker=\".\", label=\"MLPF\")\nplt.axvline(np.mean(nelem)-np.std(nelem), color=\"black\", ls=\"--\", lw=1.0, label=r\"$t\\bar{t}$+PU Run 3\")\nplt.axvline(np.mean(nelem)+np.std(nelem), color=\"black\", ls=\"--\", lw=1.0)\n#plt.xticks(time_x, time_x);\nplt.xlim(0,30000)\nplt.ylim(0,100)\nplt.ylabel(\"Average runtime per event [ms]\")\nplt.xlabel(\"PFElements per event\")\nplt.legend(frameon=False)\ncms_label(x1=0.17, x2=0.8)\nplt.savefig(\"runtime_scaling.pdf\", bbox_inches=\"tight\")\nplt.savefig(\"runtime_scaling.png\", bbox_inches=\"tight\", dpi=300)",
"_____no_output_____"
],
[
"plt.figure(figsize=(10, 3))\nplt.plot(time_x, gpu_mem_use, marker=\".\", label=\"MLPF\")\nplt.axvline(np.mean(nelem)-np.std(nelem), color=\"black\", ls=\"--\", lw=1.0, label=r\"$t\\bar{t}$+PU Run 3\")\nplt.axvline(np.mean(nelem)+np.std(nelem), color=\"black\", ls=\"--\", lw=1.0)\n#plt.xticks(time_x, time_x);\nplt.xlim(0,30000)\nplt.ylim(0,3000)\nplt.ylabel(\"Maximum GPU memory used [MB]\")\nplt.xlabel(\"PFElements per event\")\nplt.legend(frameon=False, loc=4)\ncms_label(x1=0.17, x2=0.8)\nplt.savefig(\"memory_scaling.pdf\", bbox_inches=\"tight\")\nplt.savefig(\"memory_scaling.png\", bbox_inches=\"tight\", dpi=300)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52a5b9f972b6cb4302d95282aaa920cde9c2248
| 296,549 |
ipynb
|
Jupyter Notebook
|
05-models/02-experiment-design/07.1-run_gbt_model_strsamp-AG-ds3-cs3.ipynb
|
pierrepita/super-srag-ai-covid19-google-cidacs
|
6431b410bc2490405a512f2d9906ba8addc01ccb
|
[
"MIT"
] | 1 |
2021-06-04T13:02:04.000Z
|
2021-06-04T13:02:04.000Z
|
05-models/02-experiment-design/07.1-run_gbt_model_strsamp-AG-ds3-cs3.ipynb
|
pierrepita/super-srag-ai-covid19-google-cidacs
|
6431b410bc2490405a512f2d9906ba8addc01ccb
|
[
"MIT"
] | null | null | null |
05-models/02-experiment-design/07.1-run_gbt_model_strsamp-AG-ds3-cs3.ipynb
|
pierrepita/super-srag-ai-covid19-google-cidacs
|
6431b410bc2490405a512f2d9906ba8addc01ccb
|
[
"MIT"
] | 1 |
2021-07-12T19:21:27.000Z
|
2021-07-12T19:21:27.000Z
| 296,549 | 296,549 | 0.686895 |
[
[
[
"import pandas as pd\nimport pyspark.sql.functions as F\nfrom datetime import datetime\nfrom pyspark.sql.types import *\nfrom pyspark import StorageLevel\n\nimport numpy as np\npd.set_option(\"display.max_rows\", 1000)\npd.set_option(\"display.max_columns\", 1000)\npd.set_option(\"mode.chained_assignment\", None)",
"_____no_output_____"
],
[
"from pyspark.ml import Pipeline\nfrom pyspark.ml.classification import GBTClassifier\nfrom pyspark.ml.feature import IndexToString, StringIndexer, VectorIndexer\n# from pyspark.ml.evaluation import MulticlassClassificationEvaluator\nfrom pyspark.ml.feature import OneHotEncoderEstimator, StringIndexer, VectorAssembler\n\nfrom pyspark.ml.evaluation import BinaryClassificationEvaluator\nfrom pyspark.ml.tuning import CrossValidator, ParamGridBuilder\n\nfrom pyspark.sql import Row\nfrom pyspark.ml.linalg import Vectors",
"_____no_output_____"
],
[
"# !pip install scikit-plot",
"_____no_output_____"
],
[
"import sklearn\nimport scikitplot as skplt\nfrom sklearn.metrics import classification_report, confusion_matrix, precision_score",
"_____no_output_____"
]
],
[
[
"<hr />\n<hr />\n<hr />",
"_____no_output_____"
]
],
[
[
"result_schema = StructType([\n StructField('experiment_filter', StringType(), True),\n StructField('undersampling_method', StringType(), True),\n StructField('undersampling_column', StringType(), True),\n StructField('filename', StringType(), True),\n StructField('experiment_id', StringType(), True),\n StructField('n_covid', IntegerType(), True),\n StructField('n_not_covid', IntegerType(), True),\n StructField('model_name', StringType(), True),\n StructField('model_seed', StringType(), True),\n StructField('model_maxIter', IntegerType(), True),\n StructField('model_maxDepth', IntegerType(), True),\n StructField('model_maxBins', IntegerType(), True),\n StructField('model_minInstancesPerNode', IntegerType(), True),\n StructField('model_minInfoGain', FloatType(), True),\n StructField('model_featureSubsetStrategy', StringType(), True),\n StructField('model_n_estimators', IntegerType(), True),\n StructField('model_learning_rate', FloatType(), True),\n StructField('model_impurity', StringType(), True),\n StructField('model_AUC_ROC', StringType(), True),\n StructField('model_AUC_PR', StringType(), True),\n StructField('model_covid_precision', StringType(), True),\n StructField('model_covid_recall', StringType(), True),\n StructField('model_covid_f1', StringType(), True),\n StructField('model_not_covid_precision', StringType(), True),\n StructField('model_not_covid_recall', StringType(), True),\n StructField('model_not_covid_f1', StringType(), True),\n StructField('model_avg_precision', StringType(), True),\n StructField('model_avg_recall', StringType(), True),\n StructField('model_avg_f1', StringType(), True),\n StructField('model_avg_acc', StringType(), True),\n StructField('model_TP', StringType(), True),\n StructField('model_TN', StringType(), True),\n StructField('model_FN', StringType(), True),\n StructField('model_FP', StringType(), True),\n StructField('model_time_exec', StringType(), True),\n StructField('model_col_set', StringType(), True)\n ])",
"_____no_output_____"
]
],
[
[
"<hr />\n<hr />\n<hr />",
"_____no_output_____"
]
],
[
[
"# undersamp_col = ['03-STRSAMP-AG', '04-STRSAMP-EW'] \n# dfs = ['ds-1', 'ds-2', 'ds-3']\n# cols_sets = ['cols_set_1', 'cols_set_2', 'cols_set_3']\n\nundersamp_col = ['03-STRSAMP-AG']\ndfs = ['ds-3']\ncols_sets = ['cols_set_3']",
"_____no_output_____"
],
[
"# lists of params\nmodel_maxIter = [20, 50, 100] \nmodel_maxDepth = [3, 5, 7] \nmodel_maxBins = [32, 64] \n# model_learningRate = [0.01, 0.1, 0.5]\n# model_loss = ['logLoss', 'leastSquaresError', 'leastAbsoluteError']\n\n\n\nlist_of_param_dicts = []\n\nfor maxIter in model_maxIter:\n for maxDepth in model_maxDepth:\n for maxBins in model_maxBins: \n params_dict = {}\n params_dict['maxIter'] = maxIter\n params_dict['maxDepth'] = maxDepth\n params_dict['maxBins'] = maxBins\n list_of_param_dicts.append(params_dict)\n\nprint(\"There is {} set of params.\".format(len(list_of_param_dicts)))\n# list_of_param_dicts",
"There is 18 set of params.\n"
],
[
"prefix = 'gs://ai-covid19-datalake/trusted/experiment_map/'",
"_____no_output_____"
]
],
[
[
"<hr />\n<hr />\n<hr />",
"_____no_output_____"
]
],
[
[
"# filename = 'gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-1/cols_set_1/experiment0.parquet'\n# df = spark.read.parquet(filename)\n# df.limit(2).toPandas()",
"_____no_output_____"
],
[
"# params_dict = {'maxIter': 100,\n# 'maxDepth': 3,\n# 'maxBins': 32,\n# 'learningRate': 0.5,\n# 'loss': 'leastAbsoluteError'}\n# cols = 'cols_set_1'\n# experiment_filter = 'ds-1'\n# undersampling_method = '03-STRSAMP-AG', \n# experiment_id = 0",
"_____no_output_____"
],
[
"# run_gbt(df, params_dict, cols, filename, experiment_filter, undersampling_method, experiment_id)",
"_____no_output_____"
]
],
[
[
"<hr />\n<hr />\n<hr />",
"_____no_output_____"
]
],
[
[
"def run_gbt(exp_df, params_dict, cols, filename, experiment_filter, \n undersampling_method, experiment_id):\n import time\n start_time = time.time()\n \n n_covid = exp_df.filter(F.col('CLASSI_FIN') == 1.0).count()\n n_not_covid = exp_df.filter(F.col('CLASSI_FIN') == 0.0).count()\n \n \n id_cols = ['NU_NOTIFIC', 'CLASSI_FIN']\n\n labelIndexer = StringIndexer(inputCol=\"CLASSI_FIN\", outputCol=\"indexedLabel\").fit(exp_df) \n \n input_cols = [x for x in exp_df.columns if x not in id_cols]\n assembler = VectorAssembler(inputCols = input_cols, outputCol= 'features')\n exp_df = assembler.transform(exp_df)\n \n # Automatically identify categorical features, and index them.\n # Set maxCategories so features with > 4 distinct values are treated as continuous.\n featureIndexer = VectorIndexer(inputCol=\"features\", outputCol=\"indexedFeatures\", maxCategories=30).fit(exp_df)\n \n # Split the data into training and test sets (30% held out for testing)\n (trainingData, testData) = exp_df.randomSplit([0.7, 0.3])\n trainingData = trainingData.persist(StorageLevel.MEMORY_ONLY)\n testData = testData.persist(StorageLevel.MEMORY_ONLY)\n \n # Train a RandomForest model.\n gbt = GBTClassifier(labelCol = \"indexedLabel\", featuresCol = \"indexedFeatures\",\n maxIter = params_dict['maxIter'],\n maxDepth = params_dict['maxDepth'],\n maxBins = params_dict['maxBins'])\n \n # Convert indexed labels back to original labels.\n labelConverter = IndexToString(inputCol=\"prediction\", outputCol=\"predictedLabel\",\n labels=labelIndexer.labels)\n\n # Chain indexers and forest in a Pipeline\n pipeline = Pipeline(stages=[labelIndexer, featureIndexer, gbt, labelConverter])\n\n # Train model. This also runs the indexers.\n model = pipeline.fit(trainingData)\n \n # Make predictions.\n predictions = model.transform(testData) \n \n \n pred = predictions.select(['CLASSI_FIN', 'predictedLabel'])\\\n .withColumn('predictedLabel', F.col('predictedLabel').cast('double'))\\\n .withColumn('predictedLabel', F.when(F.col('predictedLabel') == 1.0, 'covid').otherwise('n-covid'))\\\n .withColumn('CLASSI_FIN', F.when(F.col('CLASSI_FIN') == 1.0, 'covid').otherwise('n-covid'))\\\n .toPandas()\n\n y_true = pred['CLASSI_FIN'].tolist()\n y_pred = pred['predictedLabel'].tolist()\n \n report = classification_report(y_true, y_pred, output_dict=True)\n \n \n evaluator_ROC = BinaryClassificationEvaluator(labelCol=\"indexedLabel\", rawPredictionCol=\"prediction\", metricName=\"areaUnderROC\")\n accuracy_ROC = evaluator_ROC.evaluate(predictions)\n\n\n \n evaluator_PR = BinaryClassificationEvaluator(labelCol=\"indexedLabel\", rawPredictionCol=\"prediction\", metricName=\"areaUnderPR\")\n accuracy_PR = evaluator_PR.evaluate(predictions)\n \n conf_matrix = confusion_matrix(y_true, y_pred)\n\n result_dict = {}\n \n result_dict['experiment_filter'] = experiment_filter\n result_dict['undersampling_method'] = undersampling_method\n result_dict['filename'] = filename\n result_dict['experiment_id'] = experiment_id\n result_dict['n_covid'] = n_covid\n result_dict['n_not_covid'] = n_not_covid\n result_dict['model_name'] = 'GBT'\n result_dict['params'] = params_dict\n result_dict['model_AUC_ROC'] = accuracy_ROC\n result_dict['model_AUC_PR'] = accuracy_PR\n result_dict['model_covid_precision'] = report['covid']['precision']\n result_dict['model_covid_recall'] = report['covid']['recall']\n result_dict['model_covid_f1'] = report['covid']['f1-score']\n result_dict['model_not_covid_precision'] = report['n-covid']['precision']\n result_dict['model_not_covid_recall'] = report['n-covid']['recall']\n result_dict['model_not_covid_f1'] = report['n-covid']['f1-score']\n result_dict['model_avg_precision'] = report['macro avg']['precision']\n result_dict['model_avg_recall'] = report['macro avg']['recall']\n result_dict['model_avg_f1'] = report['macro avg']['f1-score']\n result_dict['model_avg_acc'] = report['accuracy']\n result_dict['model_TP'] = conf_matrix[0][0]\n result_dict['model_TN'] = conf_matrix[1][1]\n result_dict['model_FN'] = conf_matrix[0][1]\n result_dict['model_FP'] = conf_matrix[1][0]\n result_dict['model_time_exec'] = time.time() - start_time\n result_dict['model_col_set'] = cols\n \n return result_dict",
"_____no_output_____"
]
],
[
[
"<hr />\n<hr />\n<hr />",
"_____no_output_____"
],
[
"# Running GBT on 10 samples for each experiment\n### 3x col sets -> ['cols_set_1', 'cols_set_2', 'cols_set_3']\n### 3x model_maxIter -> [100, 200, 300]\n### 3x model_maxDepth -> [5, 10, 15]\n### 3x model_maxBins -> [16, 32, 64]\nTotal: 10 * 3 * 3 * 3 * 3 = 810",
"_____no_output_____"
]
],
[
[
"experiments = []",
"_____no_output_____"
]
],
[
[
"### Datasets: strat_samp_lab_agegrp",
"_____no_output_____"
]
],
[
[
"for uc in undersamp_col: \n for ds in dfs:\n for col_set in cols_sets:\n for params_dict in list_of_param_dicts: \n for id_exp in range(50):\n filename = prefix + uc + '/' + ds + '/' + col_set + '/' + 'experiment' + str(id_exp) + '.parquet'\n exp_dataframe = spark.read.parquet(filename)\n# if 'SG_UF_NOT' in exp_dataframe.columns:\n# exp_dataframe = exp_dataframe.withColumn('SG_UF_NOT', F.col('SG_UF_NOT').cast('float'))\n print('read {}'.format(filename))\n \n undersampling_method = uc\n experiment_filter = ds\n experiment_id = id_exp\n\n try: \n model = run_gbt(exp_dataframe, params_dict, col_set, filename, experiment_filter, undersampling_method, experiment_id)\n experiments.append(model)\n\n print(\"Parameters ==> {}\\n Results: \\n AUC_PR: {} \\n Precision: {} \\n Time: {}\".format(str(params_dict), str(model['model_AUC_PR']), str(model['model_avg_precision']), str(model['model_time_exec'])))\n print('=========================== \\n')\n except:\n print('=========== W A R N I N G =========== \\n')\n print('Something wrong with the exp: {}, {}, {}'.format(filename, params_dict, col_set))",
"read gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.902442743520048 \n Precision: 0.9307826911496015 \n Time: 27.197532415390015\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8981630098121935 \n Precision: 0.9296620650444807 \n Time: 19.599364042282104\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9010682316934415 \n Precision: 0.9311387080518003 \n Time: 19.544635772705078\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9007662934392351 \n Precision: 0.9316127911018421 \n Time: 20.516762018203735\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9056199549941559 \n Precision: 0.9325512746841362 \n Time: 18.563634872436523\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.903309365465197 \n Precision: 0.9320758084838303 \n Time: 18.39739418029785\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.906922869766107 \n Precision: 0.9336778736240112 \n Time: 18.88933277130127\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8996913679122375 \n Precision: 0.9305834736970453 \n Time: 17.993168830871582\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8947527143057171 \n Precision: 0.9273170545119891 \n Time: 17.730310678482056\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8995229828227803 \n Precision: 0.9295996867839271 \n Time: 17.63637614250183\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.903200534012848 \n Precision: 0.9323069036012904 \n Time: 17.997276067733765\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8971882959738281 \n Precision: 0.9288925977361033 \n Time: 17.63829016685486\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8961129794537617 \n Precision: 0.9265216714835962 \n Time: 18.06064224243164\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.895926062157061 \n Precision: 0.9283992182001009 \n Time: 18.4149808883667\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9004735132977751 \n Precision: 0.930006885858901 \n Time: 18.096524715423584\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9006079752688219 \n Precision: 0.9298373280047474 \n Time: 19.13778829574585\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8988034979213085 \n Precision: 0.9291218538648331 \n Time: 18.854344129562378\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.896517888166792 \n Precision: 0.9280381574193153 \n Time: 18.726903200149536\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8981317759693237 \n Precision: 0.9291015987964408 \n Time: 17.8125057220459\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8981243585518983 \n Precision: 0.9291428262106793 \n Time: 19.196642875671387\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9033394640807118 \n Precision: 0.9314012845019921 \n Time: 18.833765506744385\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9015976755042925 \n Precision: 0.9312419044835817 \n Time: 18.17951250076294\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9084297410208522 \n Precision: 0.9344251663207392 \n Time: 17.838440418243408\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8987748650323186 \n Precision: 0.9289566623697536 \n Time: 17.96646475791931\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9003117776568031 \n Precision: 0.9302690536755405 \n Time: 17.77174735069275\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9012032692598759 \n Precision: 0.9323158593231586 \n Time: 17.726816415786743\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9000790247569235 \n Precision: 0.9303619964596537 \n Time: 18.08605933189392\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9030852629319666 \n Precision: 0.9312891560205652 \n Time: 17.864402294158936\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8991406216268986 \n Precision: 0.9298434659416157 \n Time: 17.743768453598022\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9006635668609376 \n Precision: 0.9308718165919292 \n Time: 17.728760242462158\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9012277854695911 \n Precision: 0.9310157635823075 \n Time: 17.736851692199707\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9019500169091368 \n Precision: 0.9321082388790645 \n Time: 17.699691772460938\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9014570888859393 \n Precision: 0.9318769280913267 \n Time: 19.945068359375\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9001719092917283 \n Precision: 0.9299694320617118 \n Time: 17.89328646659851\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8979430555636967 \n Precision: 0.9300850869344479 \n Time: 20.428072929382324\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8990864587558812 \n Precision: 0.9296418703452132 \n Time: 18.230851888656616\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8973948183359302 \n Precision: 0.9290093879830181 \n Time: 18.32573413848877\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9002289679326229 \n Precision: 0.9292559200188897 \n Time: 17.983446836471558\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9002901890251731 \n Precision: 0.9296505750253912 \n Time: 17.590157747268677\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8992757808867264 \n Precision: 0.9303357096412226 \n Time: 17.932886123657227\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8959616886744601 \n Precision: 0.9279788101619066 \n Time: 17.94200849533081\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9049817771016628 \n Precision: 0.9324056512746605 \n Time: 18.779808282852173\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9016460829061375 \n Precision: 0.9317923209126797 \n Time: 17.20854902267456\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9023988562000475 \n Precision: 0.9314342730579768 \n Time: 17.457520008087158\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment44.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9020742860707756 \n Precision: 0.9306418874691509 \n Time: 17.967145442962646\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.899537710143535 \n Precision: 0.9301112871370062 \n Time: 17.61592435836792\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9022195454228539 \n Precision: 0.9318230171260076 \n Time: 17.2732572555542\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8962384222428865 \n Precision: 0.9278437644846944 \n Time: 17.831281661987305\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9037356639612417 \n Precision: 0.9318526909943261 \n Time: 17.077885627746582\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9067590100774011 \n Precision: 0.9342024594727322 \n Time: 17.40255880355835\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8973649298185208 \n Precision: 0.9294523112828184 \n Time: 18.320077896118164\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9022022632412188 \n Precision: 0.9311561845991361 \n Time: 17.604114770889282\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9003782176468059 \n Precision: 0.9313252104550225 \n Time: 17.509179830551147\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9011242480307061 \n Precision: 0.93176083086001 \n Time: 17.204569578170776\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9060947171133308 \n Precision: 0.9338259274374361 \n Time: 17.541428327560425\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9027124255343646 \n Precision: 0.9315448838933873 \n Time: 17.26843810081482\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9052531977628299 \n Precision: 0.9324765860729569 \n Time: 17.50411868095398\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9052855373664984 \n Precision: 0.9335409778017598 \n Time: 17.541986227035522\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8983731906634413 \n Precision: 0.9300664358049164 \n Time: 17.25502371788025\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9011324606941309 \n Precision: 0.9304104173302461 \n Time: 17.240640878677368\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8994919701852919 \n Precision: 0.931014284881976 \n Time: 17.62468457221985\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8979594064174073 \n Precision: 0.9296441931816722 \n Time: 17.385822534561157\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8990630706784829 \n Precision: 0.9284336676250478 \n Time: 17.20732879638672\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8973167169037551 \n Precision: 0.9284300341915134 \n Time: 16.973525762557983\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9020034036700364 \n Precision: 0.930565382019451 \n Time: 16.94288182258606\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8933964361334947 \n Precision: 0.9268154847657354 \n Time: 17.061384201049805\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8987044835584215 \n Precision: 0.9296512388400133 \n Time: 17.358638048171997\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9022490777304581 \n Precision: 0.9319675312000067 \n Time: 18.0862398147583\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8992876258418235 \n Precision: 0.929795779239249 \n Time: 17.928739070892334\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8990070404370086 \n Precision: 0.9282817754868922 \n Time: 17.85200572013855\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9005395102643216 \n Precision: 0.9307597158387089 \n Time: 18.68419098854065\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9028239879907036 \n Precision: 0.9314279335674334 \n Time: 17.46378183364868\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8992847881228108 \n Precision: 0.930077806335906 \n Time: 18.206687450408936\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8999905254795935 \n Precision: 0.9281110647663844 \n Time: 17.691362619400024\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9034605355410152 \n Precision: 0.9319850414919426 \n Time: 19.013212203979492\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.89754293926135 \n Precision: 0.929056862350669 \n Time: 18.63164210319519\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8996610697930765 \n Precision: 0.9295609647690495 \n Time: 18.152924299240112\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9001990847718758 \n Precision: 0.9309120802754527 \n Time: 21.794907331466675\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8986886614672461 \n Precision: 0.9297311099139369 \n Time: 18.06216812133789\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9025953628839922 \n Precision: 0.9329221253940194 \n Time: 18.479509115219116\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8986879358433935 \n Precision: 0.928569800206444 \n Time: 17.888431310653687\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9009522614529539 \n Precision: 0.9315133841241992 \n Time: 18.08194327354431\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9011034292267498 \n Precision: 0.9304517292632595 \n Time: 17.91877770423889\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9034239709615607 \n Precision: 0.9327398960439708 \n Time: 20.692384958267212\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8982962094164032 \n Precision: 0.9300912928518477 \n Time: 17.462400913238525\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8982564798348154 \n Precision: 0.9289331996362755 \n Time: 18.410398960113525\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8998889518243223 \n Precision: 0.9299641662328137 \n Time: 17.74255895614624\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8995479056662928 \n Precision: 0.9292170214135853 \n Time: 17.879459857940674\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9024530710534445 \n Precision: 0.9309885853725608 \n Time: 17.69804310798645\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9007393111145272 \n Precision: 0.9307739445350529 \n Time: 18.418153524398804\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8968901111950905 \n Precision: 0.9278122849537788 \n Time: 18.329663515090942\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9028487893163974 \n Precision: 0.9311550402178908 \n Time: 18.337087869644165\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9011428978740833 \n Precision: 0.9310617279618382 \n Time: 18.694604873657227\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8998645239525401 \n Precision: 0.9294224055996572 \n Time: 19.275578498840332\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment44.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9009670640057369 \n Precision: 0.9303348698278326 \n Time: 19.553163766860962\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9037432994099364 \n Precision: 0.9314907742585818 \n Time: 18.235682010650635\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9000910370060251 \n Precision: 0.929889819181704 \n Time: 18.084460020065308\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9032472841888394 \n Precision: 0.9316728764106826 \n Time: 18.751230716705322\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9026230668248099 \n Precision: 0.9314467084082589 \n Time: 18.94463801383972\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8990355899770954 \n Precision: 0.9281207198390986 \n Time: 18.50300431251526\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.900422366827776 \n Precision: 0.9320785361106085 \n Time: 21.64713144302368\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9015118879505347 \n Precision: 0.9329271418037923 \n Time: 22.398178577423096\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.90036398426911 \n Precision: 0.9331063436611928 \n Time: 21.450815439224243\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9013239562091488 \n Precision: 0.9327695776344449 \n Time: 22.094266891479492\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9088432166593798 \n Precision: 0.9365776089263227 \n Time: 21.154694080352783\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9076744157044146 \n Precision: 0.9353274293759778 \n Time: 21.68222403526306\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.896258421395336 \n Precision: 0.9306063965953703 \n Time: 20.96069836616516\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9033820041332407 \n Precision: 0.9339440504692774 \n Time: 22.85544753074646\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.8998211481326255 \n Precision: 0.9308273070505412 \n Time: 20.997872591018677\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9009905961010628 \n Precision: 0.9323898426709168 \n Time: 21.16803550720215\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.901188313497164 \n Precision: 0.9335756569088256 \n Time: 22.036235570907593\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.901706484550635 \n Precision: 0.9331399738925557 \n Time: 21.70982551574707\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9025135566595253 \n Precision: 0.9327941389346159 \n Time: 21.51058578491211\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9008081172861755 \n Precision: 0.9317766364514484 \n Time: 22.001621961593628\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9049899879877311 \n Precision: 0.9341879885177815 \n Time: 22.199961185455322\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.8982409714903272 \n Precision: 0.930625727458415 \n Time: 21.787559509277344\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9031412497311602 \n Precision: 0.9338769176344464 \n Time: 21.054959774017334\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9006412458362177 \n Precision: 0.9319187973463565 \n Time: 20.570606470108032\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.8973668358507833 \n Precision: 0.9303158188971361 \n Time: 22.115007638931274\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9014930841890916 \n Precision: 0.9319145080001023 \n Time: 21.2970027923584\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9049023316961438 \n Precision: 0.9349201142300738 \n Time: 21.61622714996338\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9016486055570088 \n Precision: 0.9327599573857643 \n Time: 21.466233015060425\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.902976909429374 \n Precision: 0.9332766626572537 \n Time: 20.894634008407593\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9034453689853503 \n Precision: 0.9340272425862842 \n Time: 21.06128978729248\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.8993174953096633 \n Precision: 0.9321614450573668 \n Time: 21.410863637924194\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9036364900750882 \n Precision: 0.9344202241219495 \n Time: 21.475728034973145\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9056613579249446 \n Precision: 0.934837953275232 \n Time: 21.310078144073486\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.90110808554801 \n Precision: 0.9322036329454124 \n Time: 20.596656560897827\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9036154957062322 \n Precision: 0.9334226235089713 \n Time: 21.249772548675537\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9045953987081581 \n Precision: 0.935095362467123 \n Time: 20.9046368598938\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9028769564515122 \n Precision: 0.9333017796660803 \n Time: 20.967380046844482\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9036890559982343 \n Precision: 0.9342058851529822 \n Time: 21.113458395004272\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9006805239152965 \n Precision: 0.9314097008493918 \n Time: 21.116931915283203\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9014324103016119 \n Precision: 0.9335044887943463 \n Time: 21.944511651992798\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9025743485467508 \n Precision: 0.9338300488766518 \n Time: 24.07971692085266\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9032275945945165 \n Precision: 0.9340429656911227 \n Time: 20.67476224899292\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.901137191740715 \n Precision: 0.9337498798495334 \n Time: 21.798333644866943\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9031055137928642 \n Precision: 0.9331486536359199 \n Time: 25.09146237373352\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9041304915053953 \n Precision: 0.9346308705847098 \n Time: 21.110992670059204\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9040460727345305 \n Precision: 0.9341926633847264 \n Time: 21.630762100219727\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9028385825271843 \n Precision: 0.9337748069353085 \n Time: 20.908711910247803\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9083996818888518 \n Precision: 0.9360546452603 \n Time: 21.560182094573975\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9073727069220568 \n Precision: 0.9363724055614139 \n Time: 21.48400568962097\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9030301783620076 \n Precision: 0.9327871532722432 \n Time: 20.95303440093994\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment44.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9010948551037935 \n Precision: 0.932139756287744 \n Time: 22.62737536430359\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.903012496554733 \n Precision: 0.9329305804198754 \n Time: 21.19304394721985\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.904470616563327 \n Precision: 0.9343066596984917 \n Time: 21.23890471458435\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9047105463816077 \n Precision: 0.9341113739170908 \n Time: 20.93006134033203\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9077172277009464 \n Precision: 0.9360948880174655 \n Time: 20.725437879562378\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9037173218810416 \n Precision: 0.9334359888862729 \n Time: 21.23894214630127\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9039141594317276 \n Precision: 0.9339120729539041 \n Time: 39.747031927108765\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9006673461092741 \n Precision: 0.9324535430287856 \n Time: 20.745424270629883\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.8984032074182066 \n Precision: 0.9314908715398094 \n Time: 21.216978549957275\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9047207697768119 \n Precision: 0.934776002647139 \n Time: 20.31941819190979\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9042663650292028 \n Precision: 0.9332308306985431 \n Time: 20.63442587852478\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9023854861864717 \n Precision: 0.9325994712412998 \n Time: 20.957533836364746\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9038641228425057 \n Precision: 0.9333228949044794 \n Time: 21.987506866455078\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.8989170309306771 \n Precision: 0.9318407632727992 \n Time: 20.869292497634888\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9028653911049485 \n Precision: 0.932954007811948 \n Time: 25.47028684616089\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9007680393528819 \n Precision: 0.9323299525346099 \n Time: 20.888270616531372\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.8997734415540111 \n Precision: 0.932490362078461 \n Time: 22.428312063217163\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.8952254058706406 \n Precision: 0.929740384071181 \n Time: 20.871047258377075\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9044149310010209 \n Precision: 0.9335147447171899 \n Time: 20.89235782623291\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.8996629396306235 \n Precision: 0.9318558665558772 \n Time: 20.675885677337646\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9034239129849686 \n Precision: 0.9337069076887913 \n Time: 21.681232213974\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9019471171890336 \n Precision: 0.9328833465070814 \n Time: 21.640329837799072\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9025678781500686 \n Precision: 0.9323506563100878 \n Time: 20.64785122871399\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9036232843017328 \n Precision: 0.9336691172359746 \n Time: 21.293567180633545\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9017583777582123 \n Precision: 0.9332635401195173 \n Time: 22.093535661697388\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9022644633728971 \n Precision: 0.9333257338589945 \n Time: 21.387179613113403\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9026802511468027 \n Precision: 0.9333178852042143 \n Time: 22.361164808273315\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9053588774988826 \n Precision: 0.9344828014146882 \n Time: 21.55519938468933\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.903935974621491 \n Precision: 0.93377432398325 \n Time: 21.6581814289093\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9044857829421735 \n Precision: 0.934283891578013 \n Time: 22.374729871749878\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9031596316339593 \n Precision: 0.9348408877419117 \n Time: 21.64767813682556\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9013779864221155 \n Precision: 0.9330594836224397 \n Time: 21.108837842941284\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9050664933450357 \n Precision: 0.9343077298265481 \n Time: 20.852844953536987\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9022101021477207 \n Precision: 0.9339936291019051 \n Time: 21.219780445098877\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9024175586849397 \n Precision: 0.9343337616444123 \n Time: 20.961957216262817\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9075636898654251 \n Precision: 0.9367263705296609 \n Time: 20.95497965812683\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9054431680839737 \n Precision: 0.9350646452075289 \n Time: 23.27664065361023\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9013665613179653 \n Precision: 0.9330925471293574 \n Time: 21.698156118392944\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9019829737286503 \n Precision: 0.9333500061717517 \n Time: 21.797619581222534\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9026721575898267 \n Precision: 0.9340165465158139 \n Time: 21.806807279586792\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9029563116387659 \n Precision: 0.9337748083880903 \n Time: 21.29137682914734\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9026570423604805 \n Precision: 0.9336617511864697 \n Time: 21.541778326034546\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9031399761728426 \n Precision: 0.9340590557315045 \n Time: 21.851935625076294\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9006113530737543 \n Precision: 0.9329361234542001 \n Time: 21.04912829399109\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9007711704495684 \n Precision: 0.9319178503598309 \n Time: 21.537513971328735\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9026340438730475 \n Precision: 0.9333819068435416 \n Time: 22.568259716033936\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9075429745231388 \n Precision: 0.9368037688590368 \n Time: 22.678234577178955\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9036041305277966 \n Precision: 0.9336444937136542 \n Time: 22.459800243377686\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9037729222282307 \n Precision: 0.9337108678778652 \n Time: 205.15382075309753\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9048110597005067 \n Precision: 0.9358673829517805 \n Time: 629.9734101295471\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9066460781896467 \n Precision: 0.9350220425346971 \n Time: 22.804221153259277\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9038099445903721 \n Precision: 0.9348862834948821 \n Time: 22.518052577972412\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9068879193329851 \n Precision: 0.9343498459950927 \n Time: 23.004854440689087\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.902748022986975 \n Precision: 0.9342691459973804 \n Time: 21.505866527557373\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9016747279968391 \n Precision: 0.932533121774297 \n Time: 21.516578197479248\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9051535088224374 \n Precision: 0.934947682264887 \n Time: 26.354928016662598\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9011544852405616 \n Precision: 0.9336125500124606 \n Time: 26.416109561920166\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9064599652101057 \n Precision: 0.936693239916323 \n Time: 26.685486555099487\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9079431340483677 \n Precision: 0.9374322310961167 \n Time: 26.053820610046387\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9077323713527916 \n Precision: 0.9350757696376728 \n Time: 25.728825330734253\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.900715210384833 \n Precision: 0.9323774665069655 \n Time: 25.667924165725708\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9079824088890882 \n Precision: 0.9372617874952247 \n Time: 25.964298009872437\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9072064318162889 \n Precision: 0.9366492975189596 \n Time: 25.969967126846313\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.901436118162505 \n Precision: 0.9337979882214944 \n Time: 29.60326361656189\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9067574749411829 \n Precision: 0.9363937006407671 \n Time: 36.0706844329834\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9052767938532675 \n Precision: 0.936177481375519 \n Time: 25.300410985946655\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9032377266314748 \n Precision: 0.9339587498445119 \n Time: 25.294395685195923\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.90937057042974 \n Precision: 0.9367547717158602 \n Time: 26.30689001083374\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9093500775802865 \n Precision: 0.9376340373337008 \n Time: 25.936983108520508\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9074156897672624 \n Precision: 0.9362742200407765 \n Time: 25.409971714019775\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9029785824152763 \n Precision: 0.9335213721457287 \n Time: 25.85776662826538\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9103639192541748 \n Precision: 0.9378432346154573 \n Time: 25.89565873146057\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9031424600942372 \n Precision: 0.9338259604175725 \n Time: 26.448599815368652\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9025681866390035 \n Precision: 0.9339868157804231 \n Time: 25.340760707855225\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9083132254613969 \n Precision: 0.9366367555750855 \n Time: 27.50559115409851\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9037428910787448 \n Precision: 0.9337780669096962 \n Time: 26.057255268096924\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.909102159126619 \n Precision: 0.9367015685934945 \n Time: 26.230397939682007\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9076775719790825 \n Precision: 0.9360835206725925 \n Time: 26.428771257400513\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9047552065418741 \n Precision: 0.9349079159260761 \n Time: 25.775885343551636\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9061892991360052 \n Precision: 0.9357437221356382 \n Time: 25.640891551971436\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9066055141771648 \n Precision: 0.9359187568170664 \n Time: 25.92359948158264\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9096279814517784 \n Precision: 0.9367773003511319 \n Time: 25.550042152404785\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9075237606695987 \n Precision: 0.9354424675625239 \n Time: 25.83820104598999\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9066152900790667 \n Precision: 0.9353460488137908 \n Time: 26.163663864135742\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9082019458062813 \n Precision: 0.9366392406389259 \n Time: 26.31045913696289\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9046160096595421 \n Precision: 0.9347860013208282 \n Time: 26.31422996520996\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9057277556192913 \n Precision: 0.9358248905162444 \n Time: 26.375432014465332\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.906262889849782 \n Precision: 0.935851071231872 \n Time: 24.86080002784729\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9091955549638476 \n Precision: 0.9384806451998613 \n Time: 25.701096534729004\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9102260370347239 \n Precision: 0.9373441657950533 \n Time: 25.42876124382019\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9084496275398842 \n Precision: 0.9367829273986038 \n Time: 25.110166549682617\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9018277231921846 \n Precision: 0.9340347102829079 \n Time: 25.80825972557068\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9069932450615705 \n Precision: 0.936245376953537 \n Time: 27.796754598617554\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9037993669596395 \n Precision: 0.9334660774331953 \n Time: 30.966728925704956\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9066362877028387 \n Precision: 0.9361851439121073 \n Time: 26.312142372131348\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9046595022983749 \n Precision: 0.9357229075659912 \n Time: 27.095773220062256\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9067786203060493 \n Precision: 0.9355663315297338 \n Time: 27.1701078414917\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9114534753732151 \n Precision: 0.9395580820042905 \n Time: 25.892277717590332\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9031172859776906 \n Precision: 0.9337334140075786 \n Time: 26.97225570678711\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment44.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.904322249223121 \n Precision: 0.9348407204172953 \n Time: 26.102783203125\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9062009717043189 \n Precision: 0.9356327316521296 \n Time: 25.797479391098022\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9059725435442795 \n Precision: 0.9357682299460857 \n Time: 27.272992849349976\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9084611662550275 \n Precision: 0.9368167314306769 \n Time: 26.136746168136597\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.908141719948383 \n Precision: 0.9363632853876364 \n Time: 26.37408471107483\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9032575950859351 \n Precision: 0.9339716879870368 \n Time: 26.70840048789978\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9055735038726245 \n Precision: 0.9352350241179146 \n Time: 26.265790462493896\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9042884928734484 \n Precision: 0.934811292924139 \n Time: 33.00631308555603\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9057334126699543 \n Precision: 0.935608071273095 \n Time: 26.432103157043457\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9083172782618683 \n Precision: 0.9365946975035291 \n Time: 26.47494912147522\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9067202717191329 \n Precision: 0.9362326272666466 \n Time: 26.127371072769165\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9055491391293443 \n Precision: 0.9350761218974497 \n Time: 25.331146955490112\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9122776839968303 \n Precision: 0.9384036837197125 \n Time: 25.742772817611694\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9061947649773205 \n Precision: 0.9358671470691657 \n Time: 25.502926349639893\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9078886599520422 \n Precision: 0.9360203307477195 \n Time: 25.70159888267517\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9000169147371762 \n Precision: 0.9329128552870037 \n Time: 28.421602487564087\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.898655750369652 \n Precision: 0.93215184716429 \n Time: 25.91239595413208\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9080118424618979 \n Precision: 0.9362131788303804 \n Time: 26.102734327316284\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9104995148100331 \n Precision: 0.9370664756770286 \n Time: 25.6853506565094\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9055536030472479 \n Precision: 0.935987092552362 \n Time: 26.534003973007202\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9028602348530171 \n Precision: 0.9339163912135857 \n Time: 26.10303807258606\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9060977238948673 \n Precision: 0.935466886160462 \n Time: 26.27988862991333\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9027246714462105 \n Precision: 0.9342003846207525 \n Time: 27.583038806915283\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9078048128501341 \n Precision: 0.9348670882190435 \n Time: 33.93876791000366\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9061301638156737 \n Precision: 0.9342475023184629 \n Time: 27.40745973587036\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.904635601385699 \n Precision: 0.9349037165676972 \n Time: 29.294575929641724\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9034177562383611 \n Precision: 0.9340776342535226 \n Time: 26.801780700683594\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9054819590950781 \n Precision: 0.9357777324308563 \n Time: 26.70048427581787\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9027255486080898 \n Precision: 0.9338559505045958 \n Time: 26.180315256118774\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9053796660260458 \n Precision: 0.9347269280203755 \n Time: 27.67456603050232\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9052968763685069 \n Precision: 0.9353155077605242 \n Time: 27.78536319732666\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9040754341508404 \n Precision: 0.9350823809245568 \n Time: 26.49265456199646\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9098004662104379 \n Precision: 0.9374249007617208 \n Time: 26.79237675666809\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9013235880004844 \n Precision: 0.9335517647663725 \n Time: 27.28500008583069\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9086233593709918 \n Precision: 0.9371039132015893 \n Time: 26.276047229766846\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9089074595267957 \n Precision: 0.9361684635340439 \n Time: 26.186185359954834\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9068612036416847 \n Precision: 0.9363089292137492 \n Time: 26.462210655212402\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9097082463697181 \n Precision: 0.9387188362638512 \n Time: 26.461347818374634\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9049873104451418 \n Precision: 0.9351141655714689 \n Time: 27.00491714477539\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.906631105211295 \n Precision: 0.9369564486166094 \n Time: 29.72197985649109\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9075327313597652 \n Precision: 0.9360798146468647 \n Time: 26.266533374786377\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9041840255000244 \n Precision: 0.9342214265975562 \n Time: 26.322339296340942\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9038603986724276 \n Precision: 0.9341699080098425 \n Time: 25.859607934951782\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9027468856224853 \n Precision: 0.9344207326560101 \n Time: 26.35148549079895\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9022966706333305 \n Precision: 0.9334944855250087 \n Time: 26.43967604637146\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9074792835414751 \n Precision: 0.9359696078747464 \n Time: 26.199161529541016\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9067683496252464 \n Precision: 0.9350750750806016 \n Time: 25.8667950630188\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9062712150938339 \n Precision: 0.9355497124918403 \n Time: 26.087923526763916\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9042041007023871 \n Precision: 0.9356703753154458 \n Time: 26.924112796783447\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9050488689396149 \n Precision: 0.9349809363021417 \n Time: 29.85032296180725\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment44.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9058002694290823 \n Precision: 0.9355125570558487 \n Time: 26.60063672065735\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.902888902812495 \n Precision: 0.9339675175599458 \n Time: 26.819406270980835\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9039501481208058 \n Precision: 0.9343725026140288 \n Time: 26.5902156829834\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9088691476514265 \n Precision: 0.9361693678738914 \n Time: 26.231446981430054\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9045032745236069 \n Precision: 0.9347651166020003 \n Time: 26.144397020339966\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 20, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9063645102289627 \n Precision: 0.9352999025727392 \n Time: 26.026703357696533\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9030158143815592 \n Precision: 0.933189642431824 \n Time: 28.329811096191406\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8986317055330737 \n Precision: 0.9316269074098872 \n Time: 29.304169178009033\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9048952970846766 \n Precision: 0.9347633205422514 \n Time: 29.307700157165527\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8991015721744472 \n Precision: 0.9313201584306261 \n Time: 28.72466802597046\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9066630415730487 \n Precision: 0.9341414813956208 \n Time: 28.934213161468506\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.902604357996642 \n Precision: 0.9334442236212279 \n Time: 30.102729558944702\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9040944220833179 \n Precision: 0.9337442605731997 \n Time: 50.47849130630493\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9059798351910442 \n Precision: 0.935560699632831 \n Time: 28.4879207611084\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8971741361639893 \n Precision: 0.9301663390253176 \n Time: 29.080363035202026\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9017701726274254 \n Precision: 0.9318156234039596 \n Time: 29.350586891174316\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8974593918017629 \n Precision: 0.9312005258393958 \n Time: 29.479079008102417\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8987278055112997 \n Precision: 0.9320869877905025 \n Time: 28.9401798248291\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9033383606209129 \n Precision: 0.9321164010204692 \n Time: 30.042563915252686\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9029365475406318 \n Precision: 0.9332039743344835 \n Time: 29.50516700744629\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9051694155958748 \n Precision: 0.9355773000056988 \n Time: 29.096553564071655\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8998380173987404 \n Precision: 0.9313479777876063 \n Time: 29.825077295303345\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9010946174728676 \n Precision: 0.9320172138846685 \n Time: 29.85160803794861\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9005839845415469 \n Precision: 0.9311138236474663 \n Time: 29.840870141983032\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9028400089716069 \n Precision: 0.9330938398114053 \n Time: 29.823240518569946\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8987097731536309 \n Precision: 0.931214909601209 \n Time: 29.266785860061646\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9031762187442287 \n Precision: 0.9342483771544472 \n Time: 29.539528608322144\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8995818611841622 \n Precision: 0.9317684705628753 \n Time: 29.35168719291687\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9045634972222225 \n Precision: 0.9332074645395033 \n Time: 28.876226663589478\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8990891340572293 \n Precision: 0.9324470537139288 \n Time: 29.044886589050293\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9014619944676512 \n Precision: 0.933360606411718 \n Time: 29.36557126045227\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8988201966270636 \n Precision: 0.9312908455887376 \n Time: 29.27629828453064\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9042891940579122 \n Precision: 0.9340931412392874 \n Time: 29.257377862930298\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9025662269491691 \n Precision: 0.9326298988573252 \n Time: 30.12704038619995\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.905124638951215 \n Precision: 0.9344423248469514 \n Time: 29.31664729118347\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9018958713244559 \n Precision: 0.9327109543248167 \n Time: 29.250138521194458\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9010712233248145 \n Precision: 0.9320343073108581 \n Time: 28.626305103302002\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9032911791527783 \n Precision: 0.9340769741051387 \n Time: 28.475315809249878\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.900529831128307 \n Precision: 0.9316345864277495 \n Time: 29.180243253707886\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9041238441005071 \n Precision: 0.9347656929345699 \n Time: 29.3738694190979\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9005074279556393 \n Precision: 0.9322514223523835 \n Time: 28.488032817840576\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9025806072504652 \n Precision: 0.9333446724873677 \n Time: 28.420722723007202\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9012930258079281 \n Precision: 0.9330962628781267 \n Time: 29.180593490600586\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9018749467073452 \n Precision: 0.9322225673801234 \n Time: 32.54806065559387\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9000579884031578 \n Precision: 0.9319212609603571 \n Time: 29.883920192718506\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.901292501758621 \n Precision: 0.9333729358876971 \n Time: 29.19738221168518\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9017818699084313 \n Precision: 0.9319486296590376 \n Time: 28.882147073745728\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9035923932323373 \n Precision: 0.9329550208760855 \n Time: 28.60904359817505\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9010324103154127 \n Precision: 0.9331963145746429 \n Time: 28.683225393295288\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9000204091530988 \n Precision: 0.9318361541530582 \n Time: 29.58707904815674\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment44.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8959558716532878 \n Precision: 0.9287185206107051 \n Time: 30.628731727600098\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9017381364305387 \n Precision: 0.9322303429387533 \n Time: 28.517988443374634\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9007687260054934 \n Precision: 0.9324546205869249 \n Time: 27.950455904006958\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9005734387852748 \n Precision: 0.9310680664613249 \n Time: 28.283472061157227\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9046820323514717 \n Precision: 0.9335793820343254 \n Time: 31.014434814453125\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9017374229332509 \n Precision: 0.9316456907722279 \n Time: 28.0007643699646\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9019696981076862 \n Precision: 0.9336694429804454 \n Time: 28.974740266799927\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8977212928996083 \n Precision: 0.93102752564003 \n Time: 28.424745559692383\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8969231149064412 \n Precision: 0.931349877773628 \n Time: 29.092456817626953\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9018671739644937 \n Precision: 0.9330076725968701 \n Time: 28.227748155593872\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9028737655124611 \n Precision: 0.9326108663111061 \n Time: 28.084880352020264\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9041934037927993 \n Precision: 0.9334960456079812 \n Time: 27.869682788848877\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9000084375117015 \n Precision: 0.9321869689656583 \n Time: 29.20649552345276\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8963568549956892 \n Precision: 0.9309634999610396 \n Time: 29.917954921722412\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8982732889250413 \n Precision: 0.9310542345122766 \n Time: 28.933663845062256\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9005613566454761 \n Precision: 0.9323770524735129 \n Time: 28.95972776412964\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.90235042652817 \n Precision: 0.9343646978471407 \n Time: 29.568293571472168\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9007159064025985 \n Precision: 0.9329172173564011 \n Time: 29.127377033233643\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.903802113166882 \n Precision: 0.9337114336510319 \n Time: 29.29978346824646\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.899381161325923 \n Precision: 0.9311198185124797 \n Time: 27.932926177978516\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9026500500227614 \n Precision: 0.9337569085302498 \n Time: 28.379727363586426\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8971337391879096 \n Precision: 0.9298566118172038 \n Time: 28.887584924697876\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9003809424203981 \n Precision: 0.931871353164351 \n Time: 28.45981526374817\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.902976510541663 \n Precision: 0.9328884870523839 \n Time: 28.60584282875061\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9011022797528689 \n Precision: 0.9320605700553684 \n Time: 29.05706548690796\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9042791649794357 \n Precision: 0.9339938962956114 \n Time: 29.386688470840454\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9046438532946512 \n Precision: 0.9350580993329951 \n Time: 29.07602095603943\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9006010853827658 \n Precision: 0.9320826679029992 \n Time: 29.49001717567444\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9020637263603906 \n Precision: 0.9327265727889946 \n Time: 28.791438817977905\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8979341075769163 \n Precision: 0.9315320764783144 \n Time: 28.958894729614258\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8988050858001518 \n Precision: 0.9314564376431832 \n Time: 29.4800865650177\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.901958752864911 \n Precision: 0.9326272801516728 \n Time: 29.401638507843018\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9035746254655127 \n Precision: 0.9331487297557206 \n Time: 28.90970230102539\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9007348663639612 \n Precision: 0.9323213587827492 \n Time: 28.99397373199463\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8983266018466245 \n Precision: 0.9308754899478926 \n Time: 29.39650297164917\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8999315358728067 \n Precision: 0.9326718859402405 \n Time: 31.440719842910767\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9006330777177066 \n Precision: 0.9319806722845967 \n Time: 29.379993438720703\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.899807952807626 \n Precision: 0.9322291708719441 \n Time: 30.00290274620056\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.901790063344893 \n Precision: 0.9317167468110473 \n Time: 28.560813665390015\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9015771385673506 \n Precision: 0.9322853017175863 \n Time: 29.46921706199646\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8998766203279834 \n Precision: 0.9314218552474438 \n Time: 28.508324146270752\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8984660225203696 \n Precision: 0.9316141778290152 \n Time: 32.04293489456177\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9007444174547632 \n Precision: 0.9320488843653387 \n Time: 29.847065448760986\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9051811664236923 \n Precision: 0.9348021788887186 \n Time: 28.943710565567017\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9032658756351651 \n Precision: 0.9327836309375973 \n Time: 28.48950219154358\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9054076965219291 \n Precision: 0.9350349276749081 \n Time: 28.93208885192871\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9033292842059338 \n Precision: 0.9332317379851474 \n Time: 28.855992078781128\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9031794685570265 \n Precision: 0.9336769443748778 \n Time: 28.446962594985962\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9037526889027492 \n Precision: 0.9347662661370392 \n Time: 28.459949731826782\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9021497315330208 \n Precision: 0.9336862872707103 \n Time: 28.469974279403687\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment44.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9015756955755516 \n Precision: 0.9326743057542783 \n Time: 28.695394277572632\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.904826248194202 \n Precision: 0.9334517656503521 \n Time: 30.413798809051514\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9010680548236663 \n Precision: 0.9321504641811889 \n Time: 28.63152766227722\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9061097942581327 \n Precision: 0.9338843659518637 \n Time: 28.590466260910034\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9047316900567608 \n Precision: 0.9334351174991651 \n Time: 28.31094217300415\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9084267243288074 \n Precision: 0.9362306362845267 \n Time: 28.27474021911621\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.905715366473052 \n Precision: 0.9356772055339271 \n Time: 41.24546813964844\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9038579777609589 \n Precision: 0.9356475122065082 \n Time: 38.625943660736084\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9080770829789206 \n Precision: 0.9363316249394693 \n Time: 41.76826310157776\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9039954255484519 \n Precision: 0.9347314412772764 \n Time: 38.70603060722351\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9115854026973454 \n Precision: 0.9388416758942988 \n Time: 40.48750329017639\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9031811211820592 \n Precision: 0.9331055369554768 \n Time: 39.64604902267456\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9064855050573493 \n Precision: 0.9360176226379348 \n Time: 39.41585898399353\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9056418448920673 \n Precision: 0.9353540855029867 \n Time: 38.57249140739441\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9069719317119854 \n Precision: 0.9352666551143511 \n Time: 37.857436656951904\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9021686564771891 \n Precision: 0.9332127517667603 \n Time: 45.56922483444214\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9068260928781066 \n Precision: 0.9367727086938696 \n Time: 41.05853486061096\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9023585528368634 \n Precision: 0.9341075097309564 \n Time: 38.97345447540283\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9071137197689277 \n Precision: 0.935089448632237 \n Time: 39.594165325164795\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9064116239890974 \n Precision: 0.9361123021127875 \n Time: 38.98656702041626\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9084181554586573 \n Precision: 0.937227845788818 \n Time: 38.74824857711792\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9074573815568954 \n Precision: 0.9361106843004874 \n Time: 40.554415702819824\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9068787904600673 \n Precision: 0.9367154377285631 \n Time: 38.09704089164734\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9011861065836154 \n Precision: 0.9336333081154702 \n Time: 39.25570774078369\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.8998194963764644 \n Precision: 0.9321389560753255 \n Time: 38.98353552818298\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9062281908101826 \n Precision: 0.9345771926738812 \n Time: 38.550841331481934\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9041863757449209 \n Precision: 0.9348520727176408 \n Time: 38.70467185974121\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9038023308791242 \n Precision: 0.9340832289318814 \n Time: 38.109893560409546\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9020543614761627 \n Precision: 0.9342604834723153 \n Time: 45.66667318344116\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9098139919113807 \n Precision: 0.9376887754461066 \n Time: 38.97044277191162\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9074482910018706 \n Precision: 0.9361396220711156 \n Time: 38.10969161987305\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9083029044736486 \n Precision: 0.9373217599272698 \n Time: 38.31292676925659\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9081021293229596 \n Precision: 0.935873641505256 \n Time: 38.45249819755554\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.905741162024801 \n Precision: 0.9349878496445916 \n Time: 39.62121105194092\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9043306739442157 \n Precision: 0.9344126695375141 \n Time: 39.85754752159119\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9074625931905879 \n Precision: 0.9366347194746131 \n Time: 39.24317479133606\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9060559556197579 \n Precision: 0.9353972507617081 \n Time: 39.17841076850891\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9040085297916761 \n Precision: 0.9345735487812055 \n Time: 39.33888125419617\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9087764093840116 \n Precision: 0.9372759290803874 \n Time: 39.02291989326477\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9043316079085658 \n Precision: 0.9351503582476194 \n Time: 42.514620780944824\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9036606024673358 \n Precision: 0.9346896662499163 \n Time: 38.94949555397034\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9061042205642924 \n Precision: 0.935765025466893 \n Time: 38.407867670059204\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9058426576258674 \n Precision: 0.9358645368611543 \n Time: 39.297619581222534\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9079292630368752 \n Precision: 0.9361445614685935 \n Time: 38.016072511672974\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9070829731134906 \n Precision: 0.9363127700092957 \n Time: 37.83885169029236\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9065143634417725 \n Precision: 0.9366009812987954 \n Time: 37.802587032318115\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9121175989538544 \n Precision: 0.9389329106855615 \n Time: 37.967461585998535\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9044441660574587 \n Precision: 0.9339535757803424 \n Time: 37.51012301445007\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9077610418070344 \n Precision: 0.9363874554037682 \n Time: 38.300585985183716\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9067204114391207 \n Precision: 0.9364373823674353 \n Time: 39.93561911582947\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment44.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.8998173339052327 \n Precision: 0.9324202886742459 \n Time: 38.419384717941284\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9028760677423588 \n Precision: 0.9338365455895089 \n Time: 38.131749391555786\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9034828823549979 \n Precision: 0.933841814083784 \n Time: 38.12323045730591\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.906870287830157 \n Precision: 0.9349379046428952 \n Time: 37.73733878135681\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9061015450631351 \n Precision: 0.9355043837877044 \n Time: 43.97499203681946\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.906821000988306 \n Precision: 0.9352088657946739 \n Time: 37.68941855430603\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9068829958063753 \n Precision: 0.9363233437107528 \n Time: 41.11090970039368\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9042993551956828 \n Precision: 0.9356858975370852 \n Time: 38.745750188827515\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9064645647110755 \n Precision: 0.9363966150802391 \n Time: 37.995487689971924\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.903358097150163 \n Precision: 0.9335144783495983 \n Time: 38.03850507736206\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9073447743171558 \n Precision: 0.9358842677285886 \n Time: 37.81126284599304\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9071503391415436 \n Precision: 0.9364563685542744 \n Time: 37.85555934906006\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.908113333476845 \n Precision: 0.936513607094219 \n Time: 37.5196807384491\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9106172525169292 \n Precision: 0.9381414087290775 \n Time: 38.35452055931091\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9079599100680223 \n Precision: 0.9367582617968531 \n Time: 37.494704484939575\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9062700195893543 \n Precision: 0.9366302294211677 \n Time: 37.64779615402222\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9018287892139929 \n Precision: 0.934248955997157 \n Time: 36.284101247787476\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9062499915548398 \n Precision: 0.9359306171850488 \n Time: 37.55312752723694\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9084960570738422 \n Precision: 0.9361271800615043 \n Time: 37.80216717720032\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9037355668137251 \n Precision: 0.9349823480368677 \n Time: 38.368847131729126\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9080865204875481 \n Precision: 0.9366429710323099 \n Time: 37.70923209190369\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9055708718468266 \n Precision: 0.9341039543821904 \n Time: 37.26318430900574\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9066324292302926 \n Precision: 0.9358079062075648 \n Time: 37.029743909835815\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9061276634775992 \n Precision: 0.9350380250435592 \n Time: 41.73462629318237\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9050194748055587 \n Precision: 0.9356991472641318 \n Time: 38.19928765296936\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9112702334387265 \n Precision: 0.9383264337732613 \n Time: 37.4248685836792\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9050408388133274 \n Precision: 0.9345790591634627 \n Time: 37.143099784851074\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9072928391933704 \n Precision: 0.9362752128616003 \n Time: 37.573713302612305\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9072603449633372 \n Precision: 0.9351531017858524 \n Time: 37.99237871170044\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9070219411437997 \n Precision: 0.9352910835029422 \n Time: 38.315441846847534\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9043768021965126 \n Precision: 0.9338979457132909 \n Time: 39.39202547073364\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9048633438744251 \n Precision: 0.9357651054127394 \n Time: 37.93214559555054\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9046711923158685 \n Precision: 0.9343551744202332 \n Time: 37.629849910736084\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9018115646534131 \n Precision: 0.9331008504050557 \n Time: 37.26673984527588\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9026141659603736 \n Precision: 0.9328643048414682 \n Time: 38.774054527282715\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9033378878213838 \n Precision: 0.9342261461904776 \n Time: 38.38624882698059\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9059707981169194 \n Precision: 0.9350335367365438 \n Time: 37.706605434417725\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9046370207001281 \n Precision: 0.9359814213868483 \n Time: 37.82722544670105\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9059372291841851 \n Precision: 0.9356535366241754 \n Time: 37.61863160133362\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9053935393757636 \n Precision: 0.936152143569259 \n Time: 37.84267258644104\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.907860101111375 \n Precision: 0.936650400366932 \n Time: 37.87155246734619\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9097433517561402 \n Precision: 0.9370071202239105 \n Time: 38.388893604278564\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9043466947758457 \n Precision: 0.9337089971041719 \n Time: 38.99710726737976\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9051621374176771 \n Precision: 0.9347998605724828 \n Time: 40.54106044769287\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9065781385506696 \n Precision: 0.9355357620730296 \n Time: 37.73307394981384\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9058572187472109 \n Precision: 0.9351650482627853 \n Time: 37.844422340393066\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9050334978496355 \n Precision: 0.9346232611506768 \n Time: 39.23844504356384\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9070712131037387 \n Precision: 0.9354645936168811 \n Time: 37.94496417045593\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9105503042309037 \n Precision: 0.9392530801210601 \n Time: 41.73290967941284\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9076411168764218 \n Precision: 0.9361788613478061 \n Time: 38.23209238052368\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment44.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9082190910708587 \n Precision: 0.9366503099919625 \n Time: 37.82820987701416\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9050654564584595 \n Precision: 0.9350224915024379 \n Time: 38.223743200302124\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9053055619313205 \n Precision: 0.9350816541088252 \n Time: 38.733819007873535\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9066235817029636 \n Precision: 0.9351794044461409 \n Time: 37.75397062301636\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9091801459762392 \n Precision: 0.9378393491168626 \n Time: 37.61826300621033\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9080260130901188 \n Precision: 0.9358188315476661 \n Time: 37.04887080192566\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9084921529487081 \n Precision: 0.9373283125743204 \n Time: 56.61766171455383\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.904981870366434 \n Precision: 0.9354152027025777 \n Time: 57.02572154998779\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9082671938502467 \n Precision: 0.9363230473734243 \n Time: 55.86561727523804\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.912702602633787 \n Precision: 0.9397152254444734 \n Time: 55.696563959121704\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9103747675894105 \n Precision: 0.9382709415838161 \n Time: 55.56484627723694\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9110967640324175 \n Precision: 0.9383930259577228 \n Time: 55.146562337875366\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9118938949733475 \n Precision: 0.939873820530853 \n Time: 55.06653070449829\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9098094644981909 \n Precision: 0.9379878952321803 \n Time: 55.257073402404785\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.907940266143654 \n Precision: 0.9370363172659752 \n Time: 55.05683135986328\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9044016421142373 \n Precision: 0.9352132968339315 \n Time: 56.85144114494324\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9082048747883206 \n Precision: 0.9374124864378275 \n Time: 57.279741048812866\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.90346126313616 \n Precision: 0.9341096694710225 \n Time: 56.37692093849182\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9075701707545256 \n Precision: 0.9363964606821849 \n Time: 55.901496171951294\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9026184340853288 \n Precision: 0.9334620809434979 \n Time: 56.352234840393066\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9077660810389481 \n Precision: 0.9371322836436905 \n Time: 56.51518750190735\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9052305435511327 \n Precision: 0.9357211490708532 \n Time: 67.37586832046509\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.900155252007948 \n Precision: 0.9328502070792875 \n Time: 56.709516525268555\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9095470337369653 \n Precision: 0.9365689669697156 \n Time: 55.52518033981323\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.907741004988134 \n Precision: 0.9371786947502625 \n Time: 56.469783544540405\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9069371866076107 \n Precision: 0.9362233501742372 \n Time: 55.6863009929657\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9055040133115775 \n Precision: 0.936176122044035 \n Time: 56.490012407302856\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9094501639737463 \n Precision: 0.9371484786153359 \n Time: 56.715306520462036\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9069732258880531 \n Precision: 0.9356640690867717 \n Time: 57.30391550064087\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9124042714689158 \n Precision: 0.9394364447032506 \n Time: 56.47416687011719\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9075934998883037 \n Precision: 0.936526163750462 \n Time: 56.93890142440796\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9064647833107518 \n Precision: 0.9364928466760942 \n Time: 56.39614725112915\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9096544781519909 \n Precision: 0.9370204594994964 \n Time: 56.87396168708801\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9086031862684719 \n Precision: 0.9361188934958025 \n Time: 56.227723121643066\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9050952108983247 \n Precision: 0.934797458275725 \n Time: 55.738595485687256\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9094109980084778 \n Precision: 0.938213213694151 \n Time: 56.167521953582764\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9094972593829058 \n Precision: 0.9381611074721921 \n Time: 56.16462159156799\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9073253347400926 \n Precision: 0.9371839071974768 \n Time: 56.278138875961304\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9057141827109598 \n Precision: 0.9358004688019391 \n Time: 55.8704469203949\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9022889757386463 \n Precision: 0.9344193867492913 \n Time: 56.263046741485596\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9066861779617039 \n Precision: 0.9362711067706339 \n Time: 55.98446464538574\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9085783036855273 \n Precision: 0.9368249797368534 \n Time: 55.78716039657593\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9051010252077203 \n Precision: 0.9343650244408711 \n Time: 57.02686405181885\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9071456339733714 \n Precision: 0.9368012625568607 \n Time: 55.72800874710083\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9091587910091632 \n Precision: 0.9381535164757859 \n Time: 55.86014246940613\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9097410051361314 \n Precision: 0.9381411173697167 \n Time: 55.96132493019104\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9062013804554176 \n Precision: 0.935518081295448 \n Time: 55.738783836364746\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9095000734449653 \n Precision: 0.9377411870745318 \n Time: 57.100637912750244\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9120726713466922 \n Precision: 0.9390882365386275 \n Time: 56.10579061508179\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.907219161718077 \n Precision: 0.9372976039827934 \n Time: 55.94601035118103\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment44.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9064257934932716 \n Precision: 0.9360242879280068 \n Time: 55.70659637451172\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9034538155299929 \n Precision: 0.9344917174179408 \n Time: 56.10788536071777\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9108711140911898 \n Precision: 0.9377514328342349 \n Time: 58.27480864524841\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9138261246260126 \n Precision: 0.9395831913724304 \n Time: 55.46875858306885\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9057240460644335 \n Precision: 0.9355714867569727 \n Time: 57.03104496002197\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9072044215575952 \n Precision: 0.9355450465228115 \n Time: 55.90582513809204\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9084228906997995 \n Precision: 0.9379386151883247 \n Time: 55.2075355052948\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9102263722595144 \n Precision: 0.9374668572716127 \n Time: 54.885825634002686\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9088106576346813 \n Precision: 0.9376780163558259 \n Time: 55.703728675842285\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.910473628422831 \n Precision: 0.9381976952297972 \n Time: 56.20583152770996\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9109760570847253 \n Precision: 0.938260080542233 \n Time: 55.501144886016846\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.908869390276214 \n Precision: 0.938051040464901 \n Time: 55.544503927230835\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9108519963063748 \n Precision: 0.9380594269519248 \n Time: 55.90962076187134\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9048853552699847 \n Precision: 0.9351634932225805 \n Time: 55.369338274002075\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9079469639025062 \n Precision: 0.9364553242648918 \n Time: 56.17408323287964\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9049491689639739 \n Precision: 0.935480173267798 \n Time: 56.363786697387695\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.905982932309932 \n Precision: 0.9361953624265245 \n Time: 56.33200550079346\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9047585480920106 \n Precision: 0.9354768745099028 \n Time: 56.227062702178955\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.909751289332603 \n Precision: 0.9369689603239428 \n Time: 56.132670402526855\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9103505779672474 \n Precision: 0.9382066546766321 \n Time: 56.679200410842896\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9079576709057281 \n Precision: 0.9359407069365111 \n Time: 57.18750977516174\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9067241975606486 \n Precision: 0.9357424666813254 \n Time: 55.883161306381226\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9065644752065273 \n Precision: 0.9367406514070256 \n Time: 56.42419362068176\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9059320098144441 \n Precision: 0.9352555382021448 \n Time: 56.709876537323\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9068954333752883 \n Precision: 0.9361152635827996 \n Time: 56.657965898513794\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9076551632358384 \n Precision: 0.9363204076883737 \n Time: 55.73127055168152\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9072877060786135 \n Precision: 0.9371761952558527 \n Time: 56.59487199783325\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9089661735611947 \n Precision: 0.9371477934854893 \n Time: 56.819740533828735\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9123926760551224 \n Precision: 0.9391772924670054 \n Time: 57.310163497924805\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9026508159866575 \n Precision: 0.9337894033598501 \n Time: 56.16026186943054\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9110142778184801 \n Precision: 0.9385181042238314 \n Time: 57.728036642074585\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9072612230709975 \n Precision: 0.9378755559121076 \n Time: 56.28585648536682\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9061985274316128 \n Precision: 0.9352853528564372 \n Time: 56.674442291259766\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9073600614740838 \n Precision: 0.9364271697545539 \n Time: 57.91284942626953\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9065134274814878 \n Precision: 0.9357712828845216 \n Time: 56.00025749206543\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9105457418075945 \n Precision: 0.9385811447809846 \n Time: 56.49970531463623\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9050122546948096 \n Precision: 0.9342968431586038 \n Time: 55.87150859832764\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9079560374084547 \n Precision: 0.9376221635511846 \n Time: 56.75919198989868\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9090776310393355 \n Precision: 0.9373837592963025 \n Time: 57.17625093460083\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9056306034667004 \n Precision: 0.9360928170142073 \n Time: 55.49699831008911\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9042522524587218 \n Precision: 0.9357535696954347 \n Time: 56.06321620941162\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9110117552591787 \n Precision: 0.9384709379360088 \n Time: 56.27649474143982\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9056274801672131 \n Precision: 0.9359174101580716 \n Time: 56.88566708564758\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.905589477407264 \n Precision: 0.9354223162414541 \n Time: 55.93983840942383\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9039514229814253 \n Precision: 0.933889248087687 \n Time: 56.216747999191284\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9088467040378059 \n Precision: 0.9378834666902006 \n Time: 56.81517148017883\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9068400895552076 \n Precision: 0.9365014983454527 \n Time: 56.7933394908905\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.908779910989662 \n Precision: 0.9371584787723553 \n Time: 56.195876121520996\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.908143832564329 \n Precision: 0.9375464551137038 \n Time: 56.85780143737793\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9037143440912179 \n Precision: 0.9327299752976977 \n Time: 56.998450756073\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment44.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9065225467647038 \n Precision: 0.9359312179313197 \n Time: 56.251545667648315\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9055092283328947 \n Precision: 0.9347660784397556 \n Time: 58.16247582435608\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9077906099116975 \n Precision: 0.9370314782643876 \n Time: 55.99722957611084\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9070318840825147 \n Precision: 0.9362464543588123 \n Time: 56.905410289764404\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9091454121173738 \n Precision: 0.9377591234278462 \n Time: 56.16766333580017\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 50, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9054153092166779 \n Precision: 0.9357559784248315 \n Time: 56.96745228767395\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9023110579871585 \n Precision: 0.9337109079518686 \n Time: 48.36055326461792\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.903369622724069 \n Precision: 0.9342229068884176 \n Time: 48.69347834587097\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9068936393139158 \n Precision: 0.9367041742766847 \n Time: 48.70393466949463\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9050503718718875 \n Precision: 0.9354319710259558 \n Time: 48.46686506271362\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9077800124556923 \n Precision: 0.9367569541035445 \n Time: 48.53447461128235\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9011998438487249 \n Precision: 0.9323109366110768 \n Time: 48.38573431968689\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9020626338154796 \n Precision: 0.9334635579882729 \n Time: 48.770554542541504\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9016114598065853 \n Precision: 0.9341433190098871 \n Time: 48.113677740097046\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9054665704746325 \n Precision: 0.9362822183204614 \n Time: 48.73346972465515\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9049773441602089 \n Precision: 0.9348807305204628 \n Time: 48.58719182014465\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9003046499559276 \n Precision: 0.9328659380562129 \n Time: 48.94209265708923\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9024411565553757 \n Precision: 0.9335046378052624 \n Time: 48.39376997947693\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9055704284662922 \n Precision: 0.9340936722042885 \n Time: 48.58267164230347\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9000297061465186 \n Precision: 0.9317786533624324 \n Time: 48.06942701339722\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.908173943385323 \n Precision: 0.9361595245667165 \n Time: 48.56058621406555\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9027800977726441 \n Precision: 0.9336890805907947 \n Time: 49.139023542404175\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9003112075636042 \n Precision: 0.9316405781287267 \n Time: 50.49056529998779\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.903786333646213 \n Precision: 0.933552060059397 \n Time: 48.9074547290802\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9028494153802307 \n Precision: 0.9335529640170794 \n Time: 48.203879594802856\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9073796614341958 \n Precision: 0.9354878262516354 \n Time: 47.826894760131836\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9044758901478984 \n Precision: 0.9345998924581358 \n Time: 49.09787893295288\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9016294310034538 \n Precision: 0.9322458542380337 \n Time: 48.269028663635254\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9054701293326165 \n Precision: 0.9349022242132798 \n Time: 48.30893516540527\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9021833917774662 \n Precision: 0.9334030900499953 \n Time: 49.358280420303345\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9051159768987151 \n Precision: 0.9355346647311782 \n Time: 48.60317420959473\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9003368398698104 \n Precision: 0.9329719039919905 \n Time: 48.61043405532837\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9060578058312927 \n Precision: 0.9346657298898368 \n Time: 48.0559504032135\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9054466448954613 \n Precision: 0.9348283993445037 \n Time: 48.762046337127686\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8994152139173311 \n Precision: 0.9320813118530584 \n Time: 48.295854330062866\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9070912913191854 \n Precision: 0.9357480328611181 \n Time: 47.8585205078125\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9044521919472709 \n Precision: 0.9348758957374153 \n Time: 47.85276484489441\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9062629362268749 \n Precision: 0.9362412598981682 \n Time: 48.35135293006897\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9045482763348273 \n Precision: 0.9348485968088204 \n Time: 49.24479341506958\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9017347711040502 \n Precision: 0.933314618608877 \n Time: 48.44390296936035\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.905868877515127 \n Precision: 0.9350275547738156 \n Time: 48.30623650550842\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9038740881726671 \n Precision: 0.9335541380137775 \n Time: 48.23018836975098\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9034450748329584 \n Precision: 0.9338205669497812 \n Time: 48.308462142944336\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9030955232176965 \n Precision: 0.9337020619554086 \n Time: 59.33990979194641\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9012455966863041 \n Precision: 0.932041667535247 \n Time: 49.12489914894104\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9017962901906325 \n Precision: 0.9330157426712156 \n Time: 49.23694920539856\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9029604536923754 \n Precision: 0.9333013571340953 \n Time: 47.94307565689087\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9055257134655855 \n Precision: 0.9362454778763567 \n Time: 48.68632912635803\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.8985833549465948 \n Precision: 0.9319973939346389 \n Time: 50.200539112091064\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9072612874982166 \n Precision: 0.9361952364197497 \n Time: 48.755908727645874\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment44.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.902878619165481 \n Precision: 0.9335098760648748 \n Time: 49.17009615898132\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9015970934370483 \n Precision: 0.9328719562721837 \n Time: 48.104990005493164\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9064969829496083 \n Precision: 0.9363062012674024 \n Time: 49.967408180236816\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9050363544742459 \n Precision: 0.9339010712995106 \n Time: 49.24367952346802\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.9072077216335998 \n Precision: 0.9360397741970643 \n Time: 49.01566934585571\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 32}\n Results: \n AUC_PR: 0.900173951361193 \n Precision: 0.9324240931515366 \n Time: 48.56814646720886\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9062514306269347 \n Precision: 0.9353191082939707 \n Time: 48.815414905548096\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.898315102626379 \n Precision: 0.931985676614746 \n Time: 51.784165143966675\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8988144297083078 \n Precision: 0.9323778923254327 \n Time: 51.52915143966675\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9027047992283331 \n Precision: 0.9330968228339392 \n Time: 63.81003260612488\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9055083687391862 \n Precision: 0.9339790814824753 \n Time: 48.610398292541504\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.904851860861166 \n Precision: 0.9349695698585824 \n Time: 47.985679149627686\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9014500152997587 \n Precision: 0.9328221859361259 \n Time: 48.035062313079834\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.904627812827826 \n Precision: 0.9349748020672424 \n Time: 48.21759247779846\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9039060252654088 \n Precision: 0.9341382836919805 \n Time: 48.21502470970154\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9025934037031039 \n Precision: 0.9328341957198925 \n Time: 48.21175813674927\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8971421949115793 \n Precision: 0.9308518695149789 \n Time: 47.8333957195282\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9042368935925071 \n Precision: 0.9349200643615088 \n Time: 49.05598330497742\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9072516100783097 \n Precision: 0.9350193971102809 \n Time: 48.21821165084839\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9066278242678099 \n Precision: 0.9354356814419893 \n Time: 48.20527696609497\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9091051438843687 \n Precision: 0.9372246940160613 \n Time: 48.05683612823486\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9011617488686319 \n Precision: 0.9327570442295148 \n Time: 48.37910079956055\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8997843292851361 \n Precision: 0.9319645851022904 \n Time: 48.15654730796814\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9023607183057379 \n Precision: 0.9323857367583006 \n Time: 48.83467674255371\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8989421589224875 \n Precision: 0.9310446927206317 \n Time: 48.456665992736816\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9068403096642842 \n Precision: 0.9359549610658043 \n Time: 48.21019625663757\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9033045230050899 \n Precision: 0.9348243285410591 \n Time: 51.56865859031677\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9029177837030683 \n Precision: 0.9333093820482934 \n Time: 49.121041774749756\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9031281928615849 \n Precision: 0.9342779040255194 \n Time: 48.088624000549316\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9013232031268734 \n Precision: 0.932978574765333 \n Time: 48.18765997886658\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.902025258376723 \n Precision: 0.9334062664003955 \n Time: 47.8771071434021\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9008470656939356 \n Precision: 0.9331971912915177 \n Time: 48.78934073448181\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9053453510604133 \n Precision: 0.934494522571821 \n Time: 48.45957374572754\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9038377096013509 \n Precision: 0.9342239685985932 \n Time: 48.842203855514526\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9017406615062106 \n Precision: 0.9325947821221894 \n Time: 48.39713644981384\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9056052816997995 \n Precision: 0.9354689446913313 \n Time: 48.43826222419739\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9020940223818503 \n Precision: 0.9337405849839712 \n Time: 48.9368941783905\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.908450648015816 \n Precision: 0.9367051135459135 \n Time: 48.96528959274292\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.90294462227301 \n Precision: 0.9337001705106238 \n Time: 48.99358034133911\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9005850693446578 \n Precision: 0.9334659943038115 \n Time: 48.84800720214844\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.899456062830109 \n Precision: 0.9315308627055144 \n Time: 49.70939302444458\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9042064844651928 \n Precision: 0.9344591357754848 \n Time: 50.289509296417236\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9035694662593547 \n Precision: 0.9333713979903038 \n Time: 48.208412885665894\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9027060475071873 \n Precision: 0.9334359102691712 \n Time: 49.04769515991211\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.8987982267753347 \n Precision: 0.9312819199599247 \n Time: 49.331490993499756\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9050521297530894 \n Precision: 0.9354318121021695 \n Time: 49.15145564079285\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9031939276432894 \n Precision: 0.9346519261565149 \n Time: 49.57759976387024\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9047048757644842 \n Precision: 0.93417711108613 \n Time: 48.67007303237915\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9043773851238186 \n Precision: 0.935289319611748 \n Time: 48.8505425453186\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9038155384173169 \n Precision: 0.9350918261655892 \n Time: 49.721877098083496\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment44.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9055836849647475 \n Precision: 0.9356926487027677 \n Time: 48.97325348854065\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9063859717169999 \n Precision: 0.9357520486815012 \n Time: 47.74741983413696\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9063790546641255 \n Precision: 0.9365337695279322 \n Time: 49.14367747306824\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9033579495549501 \n Precision: 0.9336618596773556 \n Time: 47.98937463760376\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9037306116275872 \n Precision: 0.9348328341168323 \n Time: 47.99415707588196\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 3, 'maxBins': 64}\n Results: \n AUC_PR: 0.9068043163995849 \n Precision: 0.93491014708406 \n Time: 47.82111167907715\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9061792579917394 \n Precision: 0.9368783618268628 \n Time: 72.49802041053772\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9085752037685173 \n Precision: 0.9373759859211801 \n Time: 72.1791558265686\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9036999008578696 \n Precision: 0.93512344149556 \n Time: 71.8902325630188\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9084748837023356 \n Precision: 0.9367701599452702 \n Time: 72.49059510231018\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9142532593196565 \n Precision: 0.9394521479872564 \n Time: 72.08058524131775\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9121736001694224 \n Precision: 0.938774511189674 \n Time: 71.94428825378418\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.905835834337257 \n Precision: 0.9359877572024655 \n Time: 72.18520903587341\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9082514510342107 \n Precision: 0.9371719134276776 \n Time: 71.46522045135498\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9062960824973115 \n Precision: 0.936406369586734 \n Time: 71.78222870826721\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9098255041520119 \n Precision: 0.9384777088463252 \n Time: 73.44830870628357\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.903257101986606 \n Precision: 0.9348378800004062 \n Time: 71.8164644241333\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9063762576409903 \n Precision: 0.9363062101479623 \n Time: 72.57092332839966\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9109698912151333 \n Precision: 0.9382752693697393 \n Time: 74.13214302062988\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9052647430249948 \n Precision: 0.9344338835698331 \n Time: 73.53552484512329\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9075596094606526 \n Precision: 0.9365064400737688 \n Time: 72.5237169265747\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9085217096072786 \n Precision: 0.9365019208116792 \n Time: 72.16959190368652\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9061124520913685 \n Precision: 0.9361837651310071 \n Time: 71.22267937660217\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9053254323021004 \n Precision: 0.9355909480909481 \n Time: 71.64536261558533\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9092341921899932 \n Precision: 0.9377485191768249 \n Time: 72.1838812828064\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.906442735521783 \n Precision: 0.936200995618797 \n Time: 72.3942506313324\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9073649493698466 \n Precision: 0.9372287059364298 \n Time: 72.3374354839325\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.906910274890462 \n Precision: 0.9365132936352816 \n Time: 71.56843376159668\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9076020056153573 \n Precision: 0.9371426146773046 \n Time: 74.74117422103882\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9099026202939593 \n Precision: 0.9368861951367484 \n Time: 71.60419368743896\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9076529010401376 \n Precision: 0.937679869992338 \n Time: 73.34317326545715\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.905323607085355 \n Precision: 0.9362754357325693 \n Time: 72.9639139175415\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9067228860565704 \n Precision: 0.9352065443092288 \n Time: 71.49404788017273\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9061705336478878 \n Precision: 0.9360290702098311 \n Time: 72.26562428474426\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9047005603496558 \n Precision: 0.9354090419133709 \n Time: 72.86683082580566\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9069735316587978 \n Precision: 0.936637560590708 \n Time: 72.34970664978027\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9081518635049962 \n Precision: 0.9365918502529962 \n Time: 72.73137331008911\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9060630586982052 \n Precision: 0.9365964906367215 \n Time: 72.09077191352844\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9108391424373047 \n Precision: 0.9384021087121643 \n Time: 71.47893476486206\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9028884297799337 \n Precision: 0.9350937308222969 \n Time: 72.07442259788513\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.902405445585463 \n Precision: 0.9339696959219435 \n Time: 71.89733362197876\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9090400388263892 \n Precision: 0.936972340615965 \n Time: 71.65760540962219\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9096637161066831 \n Precision: 0.9382015327326528 \n Time: 72.53909063339233\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9116025039244058 \n Precision: 0.937692602444354 \n Time: 71.93840289115906\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9025298769954532 \n Precision: 0.9334919191581835 \n Time: 72.79751467704773\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9047703322910258 \n Precision: 0.9353239760426311 \n Time: 72.32899594306946\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9101629081915984 \n Precision: 0.9384236119029414 \n Time: 72.50654006004333\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9102720603547562 \n Precision: 0.9372801136284392 \n Time: 76.9428403377533\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9082974729627605 \n Precision: 0.9379157788054666 \n Time: 78.41733694076538\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9077806953889457 \n Precision: 0.9369630325011893 \n Time: 72.93970489501953\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment44.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9046900266408819 \n Precision: 0.9354479259452636 \n Time: 72.80249071121216\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9123287148876713 \n Precision: 0.9387587568895748 \n Time: 72.03849053382874\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9028101878256319 \n Precision: 0.9345412566817413 \n Time: 72.36593055725098\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9086582656125541 \n Precision: 0.9377414227359384 \n Time: 72.10481429100037\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9068914487514217 \n Precision: 0.9356330095262537 \n Time: 71.72464990615845\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 32}\n Results: \n AUC_PR: 0.9102506392873592 \n Precision: 0.9377405858360819 \n Time: 74.1210732460022\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.904747926198673 \n Precision: 0.9355692311677647 \n Time: 71.68836402893066\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9059916928469853 \n Precision: 0.9365709087332124 \n Time: 72.26484894752502\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9093329686169636 \n Precision: 0.9378912763258984 \n Time: 72.86059188842773\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9070492511345729 \n Precision: 0.9366838196375028 \n Time: 72.55272126197815\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9091572431425592 \n Precision: 0.9371757770977798 \n Time: 72.47415256500244\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9109449206756464 \n Precision: 0.9383937958499244 \n Time: 73.4075517654419\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.908385532195166 \n Precision: 0.9378604863076203 \n Time: 73.46580862998962\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9078770760181554 \n Precision: 0.9369072175632794 \n Time: 72.72805595397949\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9054009392488999 \n Precision: 0.9355154961913674 \n Time: 72.08572626113892\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9059033712033059 \n Precision: 0.9355578472821746 \n Time: 73.03018474578857\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9096070438261277 \n Precision: 0.9379532360767612 \n Time: 72.87164902687073\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9021254834907548 \n Precision: 0.9346397561402678 \n Time: 72.12002229690552\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9081124961233281 \n Precision: 0.9359784484758089 \n Time: 76.5785710811615\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9071657960217531 \n Precision: 0.9356399032528075 \n Time: 72.46058011054993\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9075442230111603 \n Precision: 0.9377683710193837 \n Time: 72.82953381538391\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9018586626362758 \n Precision: 0.9329081702573698 \n Time: 71.4019341468811\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9047861984733792 \n Precision: 0.9350410404304917 \n Time: 71.40863418579102\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.906976064582923 \n Precision: 0.9353276601626094 \n Time: 73.4622631072998\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9047636073806107 \n Precision: 0.9349431279766351 \n Time: 72.15376114845276\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9066214485969536 \n Precision: 0.9358640640138782 \n Time: 72.16895270347595\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9090060012797649 \n Precision: 0.9372235337260204 \n Time: 73.90882205963135\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.908869200432601 \n Precision: 0.9372996554317533 \n Time: 72.45161008834839\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9107483502622647 \n Precision: 0.9374636067169386 \n Time: 71.36159586906433\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.907950312880077 \n Precision: 0.9369498988993088 \n Time: 71.37909579277039\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9067140458665027 \n Precision: 0.9360830417052393 \n Time: 72.19429183006287\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9039869482759325 \n Precision: 0.934870592191006 \n Time: 72.44789028167725\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9103166662747699 \n Precision: 0.9376802740097887 \n Time: 71.76970434188843\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9043302484868969 \n Precision: 0.9342789732564127 \n Time: 71.04367160797119\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9055369134580018 \n Precision: 0.9355607829873895 \n Time: 73.29350423812866\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9091074511109102 \n Precision: 0.9380218751011327 \n Time: 72.75348162651062\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9103716601579833 \n Precision: 0.9381764368097967 \n Time: 72.19837212562561\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9130802520886928 \n Precision: 0.9402111973738569 \n Time: 71.67205929756165\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9079174826373388 \n Precision: 0.9366754091821066 \n Time: 74.09656310081482\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9012344963997483 \n Precision: 0.9342715286536944 \n Time: 72.12487006187439\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9100702257438081 \n Precision: 0.9381976142719222 \n Time: 73.76422834396362\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9077960569323713 \n Precision: 0.937269008991577 \n Time: 72.36531639099121\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9046568197478828 \n Precision: 0.9361101758456256 \n Time: 75.45720934867859\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9076062242591669 \n Precision: 0.9357486950011045 \n Time: 73.69801354408264\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9072215019126688 \n Precision: 0.9363068460170048 \n Time: 73.72369694709778\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9039033073860808 \n Precision: 0.935145830296469 \n Time: 71.46680688858032\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9094401979560328 \n Precision: 0.9378590964401077 \n Time: 72.71540236473083\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9082583471428327 \n Precision: 0.9365688147225284 \n Time: 71.90634393692017\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.910105656537142 \n Precision: 0.9384845458795196 \n Time: 71.8661949634552\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9089721140366228 \n Precision: 0.9362019303507639 \n Time: 72.64328932762146\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment44.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9048663660877034 \n Precision: 0.9347747717395941 \n Time: 72.51566505432129\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9072940819535278 \n Precision: 0.9365129880731384 \n Time: 73.13628888130188\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9096249159806644 \n Precision: 0.9385153179781641 \n Time: 72.59054803848267\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9067712999737024 \n Precision: 0.9360627294522409 \n Time: 72.20282196998596\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9056267781724097 \n Precision: 0.936228286242319 \n Time: 74.06503510475159\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 5, 'maxBins': 64}\n Results: \n AUC_PR: 0.9081626795266082 \n Precision: 0.9372701083138227 \n Time: 72.76062750816345\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9071379402108671 \n Precision: 0.9366423655589222 \n Time: 129.10522079467773\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9086364351265933 \n Precision: 0.9385714032811858 \n Time: 129.13302659988403\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9084346014719776 \n Precision: 0.9375240950963803 \n Time: 129.89228224754333\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9072475402829326 \n Precision: 0.9366756395306151 \n Time: 129.18555402755737\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.908883221377454 \n Precision: 0.9371479087403545 \n Time: 128.88950991630554\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9090741989408754 \n Precision: 0.9364160142081877 \n Time: 129.42471432685852\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9065628584503002 \n Precision: 0.9369421891766427 \n Time: 130.8288493156433\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9093226091499815 \n Precision: 0.9388075264916214 \n Time: 130.73928332328796\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9045916601609436 \n Precision: 0.9357466772535246 \n Time: 129.00673651695251\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.908839139550026 \n Precision: 0.9367737324767278 \n Time: 129.19285416603088\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.90410778771598 \n Precision: 0.9356926477582332 \n Time: 128.89005041122437\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9083385006701564 \n Precision: 0.9372030155571675 \n Time: 129.9287974834442\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9081917896550049 \n Precision: 0.9369756239494247 \n Time: 128.99351739883423\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9099340730589399 \n Precision: 0.9382616121457801 \n Time: 129.2888867855072\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9106619285018788 \n Precision: 0.9381838106348086 \n Time: 129.59042739868164\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9106766420134639 \n Precision: 0.938728527664991 \n Time: 132.0515217781067\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9057921701860164 \n Precision: 0.9358718575799281 \n Time: 129.9588327407837\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9055686166760921 \n Precision: 0.9357963179745952 \n Time: 129.8317711353302\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9051189285420045 \n Precision: 0.9352280837628695 \n Time: 128.08555459976196\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9066335830352206 \n Precision: 0.9353446551807315 \n Time: 130.57828044891357\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9070146769674224 \n Precision: 0.9367713450986355 \n Time: 130.72887134552002\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9048995889562517 \n Precision: 0.9359136336319429 \n Time: 130.24656462669373\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9116657423076896 \n Precision: 0.9399426723588613 \n Time: 133.08264803886414\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9076458429142021 \n Precision: 0.9360545148249031 \n Time: 136.38958501815796\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9087030753280295 \n Precision: 0.9377817991946222 \n Time: 128.6435751914978\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.905576801239633 \n Precision: 0.9365232157923344 \n Time: 131.28992438316345\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9067835513780376 \n Precision: 0.9363626027039327 \n Time: 127.91628503799438\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.903884172913999 \n Precision: 0.9347045044244682 \n Time: 128.09830594062805\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9092156413534308 \n Precision: 0.9371636542890014 \n Time: 129.52541184425354\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9127140133589078 \n Precision: 0.9397957927732254 \n Time: 128.41335678100586\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9051966729055685 \n Precision: 0.9356269036473615 \n Time: 130.30173707008362\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9051114553462922 \n Precision: 0.9366105353379971 \n Time: 128.3551094532013\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9030478198534228 \n Precision: 0.9348924846629618 \n Time: 127.97792172431946\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9091472384075032 \n Precision: 0.9373592091551699 \n Time: 129.66808700561523\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9058856046085647 \n Precision: 0.9357564504286678 \n Time: 130.8587441444397\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9087893119462669 \n Precision: 0.9382773332297057 \n Time: 128.81590843200684\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9066224826252114 \n Precision: 0.9367236262196412 \n Time: 130.6660714149475\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9101091675867414 \n Precision: 0.9377291510126672 \n Time: 129.50522685050964\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9046205398257336 \n Precision: 0.9352408802687099 \n Time: 128.91830158233643\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.906284118843159 \n Precision: 0.9368471668136913 \n Time: 131.29571270942688\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9074863517539196 \n Precision: 0.9373397966789515 \n Time: 128.30707693099976\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9059672196663325 \n Precision: 0.9359875990648464 \n Time: 129.16705465316772\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9063747804840566 \n Precision: 0.937231271351823 \n Time: 129.88559794425964\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9100413825228886 \n Precision: 0.9375198130107267 \n Time: 129.28225302696228\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment44.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9063885758951964 \n Precision: 0.9359242717919375 \n Time: 130.57183480262756\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9082553628912715 \n Precision: 0.936306839390662 \n Time: 131.04051804542542\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9059351443548074 \n Precision: 0.9371412988828332 \n Time: 131.00317001342773\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9067725656970624 \n Precision: 0.9350497600090074 \n Time: 129.280198097229\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.908975398559982 \n Precision: 0.9377998325276347 \n Time: 131.25297451019287\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 32}\n Results: \n AUC_PR: 0.9096772519144665 \n Precision: 0.9378272455593093 \n Time: 130.20633339881897\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment0.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9092694536423291 \n Precision: 0.9383950404914037 \n Time: 130.15820479393005\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment1.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9040758787019332 \n Precision: 0.9351346371613307 \n Time: 129.45571208000183\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment2.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.909610149566709 \n Precision: 0.9382093757792019 \n Time: 130.4798080921173\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment3.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9074653878282266 \n Precision: 0.9372483679260977 \n Time: 130.37480401992798\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment4.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.909801999070979 \n Precision: 0.9373309094007505 \n Time: 130.7226414680481\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment5.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9107135889480292 \n Precision: 0.9382694161443338 \n Time: 128.6224226951599\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment6.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9092782244513802 \n Precision: 0.9383211453787792 \n Time: 130.5102117061615\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment7.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9029226496509295 \n Precision: 0.9344408361363417 \n Time: 130.7002658843994\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment8.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9099356816891386 \n Precision: 0.9386559129988481 \n Time: 129.13057613372803\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment9.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9080203483167848 \n Precision: 0.9378437028959583 \n Time: 130.40618228912354\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment10.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.906612014634797 \n Precision: 0.9364022465874318 \n Time: 129.31470036506653\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment11.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9066229150697358 \n Precision: 0.9364712251146065 \n Time: 128.58763313293457\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment12.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9051269101606608 \n Precision: 0.9347770419688822 \n Time: 130.29154348373413\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment13.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9050339872708235 \n Precision: 0.9354359609108246 \n Time: 129.74244475364685\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment14.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9077203670238936 \n Precision: 0.9368730180357262 \n Time: 130.674565076828\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment15.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9057766057705167 \n Precision: 0.9354993859958327 \n Time: 130.17671513557434\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment16.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9067725287342732 \n Precision: 0.9359414646628633 \n Time: 129.31827974319458\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment17.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9072832751659263 \n Precision: 0.9358910228382625 \n Time: 130.18201613426208\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment18.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9033974587706627 \n Precision: 0.9344908112171209 \n Time: 127.7900664806366\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment19.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9075287234821645 \n Precision: 0.936693613687986 \n Time: 128.02772521972656\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment20.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9036705334013543 \n Precision: 0.9351439620531147 \n Time: 133.29975938796997\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment21.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9060548733394082 \n Precision: 0.9357473336589214 \n Time: 130.51528978347778\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment22.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9079973971038053 \n Precision: 0.9361770222661869 \n Time: 129.67230558395386\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment23.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9077467604507157 \n Precision: 0.937814131116842 \n Time: 129.44778275489807\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment24.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9064452321736403 \n Precision: 0.9363737022314165 \n Time: 129.59012603759766\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment25.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9094180655236436 \n Precision: 0.9382202375263049 \n Time: 129.75301599502563\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment26.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9054861443897156 \n Precision: 0.9355990422872125 \n Time: 130.24276900291443\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment27.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9056858632935945 \n Precision: 0.9353528591055814 \n Time: 129.74936604499817\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment28.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9013140129008756 \n Precision: 0.9330426617103464 \n Time: 129.7263698577881\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment29.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9054460109328296 \n Precision: 0.9370399957736522 \n Time: 127.76526498794556\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment30.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9090034144672109 \n Precision: 0.9369482303030235 \n Time: 130.22965335845947\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment31.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9042283759236046 \n Precision: 0.9359330542604651 \n Time: 129.89432168006897\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment32.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.906134956720401 \n Precision: 0.9363236708924882 \n Time: 127.86751365661621\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment33.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.909045355670617 \n Precision: 0.9382790298589625 \n Time: 128.87890076637268\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment34.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.90661762403301 \n Precision: 0.9372801796270731 \n Time: 128.2276701927185\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment35.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9069226496449629 \n Precision: 0.9366087480866899 \n Time: 127.8695387840271\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment36.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9039907008023832 \n Precision: 0.9361558805158441 \n Time: 124.72189807891846\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment37.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.901664670901522 \n Precision: 0.9330174854514877 \n Time: 124.29073071479797\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment38.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9100824167422633 \n Precision: 0.9384879298992628 \n Time: 125.83367490768433\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment39.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.90649626865441 \n Precision: 0.9359717014590643 \n Time: 125.82477164268494\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment40.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9044005413936478 \n Precision: 0.9343766722525175 \n Time: 126.86456823348999\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment41.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9062159778700233 \n Precision: 0.9366182424511378 \n Time: 124.51083707809448\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment42.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9085367842708011 \n Precision: 0.9380039023728564 \n Time: 125.5688362121582\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment43.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9108450553760861 \n Precision: 0.9391825307667439 \n Time: 126.58972644805908\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment44.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9099256762647745 \n Precision: 0.9383906516969596 \n Time: 126.21451807022095\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment45.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9058991501076057 \n Precision: 0.9356294067397699 \n Time: 128.58386611938477\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment46.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9054026224729975 \n Precision: 0.9355765381963881 \n Time: 127.33891534805298\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment47.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9063245434750395 \n Precision: 0.9351091225448628 \n Time: 126.33588480949402\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment48.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9101967115988007 \n Precision: 0.9382481057230541 \n Time: 125.82736015319824\n=========================== \n\nread gs://ai-covid19-datalake/trusted/experiment_map/03-STRSAMP-AG/ds-3/cols_set_3/experiment49.parquet\nParameters ==> {'maxIter': 100, 'maxDepth': 7, 'maxBins': 64}\n Results: \n AUC_PR: 0.9057802225263878 \n Precision: 0.9361331997774244 \n Time: 126.61196494102478\n=========================== \n\n"
]
],
[
[
"<hr />\n<hr />\n<hr />",
"_____no_output_____"
]
],
[
[
"for i in range(len(experiments)):\n for d in list(experiments[i].keys()):\n experiments[i][d] = str(experiments[i][d])",
"_____no_output_____"
],
[
"# experiments",
"_____no_output_____"
],
[
"cols = ['experiment_filter', 'undersampling_method', 'filename', 'experiment_id', 'n_covid', 'n_not_covid', 'model_name', 'params', 'model_AUC_ROC', 'model_AUC_PR', 'model_covid_precision', 'model_covid_recall', 'model_covid_f1', 'model_not_covid_precision', 'model_not_covid_recall', 'model_not_covid_f1', 'model_avg_precision', 'model_avg_recall', 'model_avg_f1', 'model_avg_acc', 'model_TP', 'model_TN', 'model_FN', 'model_FP', 'model_time_exec', 'model_col_set']",
"_____no_output_____"
],
[
"intermed_results = spark.createDataFrame(data=experiments).select(cols)\nintermed_results.toPandas()",
"/usr/lib/spark/python/pyspark/sql/session.py:346: UserWarning: inferring schema from dict is deprecated,please use pyspark.sql.Row instead\n warnings.warn(\"inferring schema from dict is deprecated,\"\n"
],
[
"intermed_results.write.parquet('gs://ai-covid19-datalake/trusted/intermed_results/STRSAMP/GBT_experiments-AG-ds3-cs3.parquet', mode='overwrite')",
"_____no_output_____"
],
[
"print('finished')",
"finished\n"
],
[
"intermed_results.show()",
"+-----------------+--------------------+--------------------+-------------+-------+-----------+----------+--------------------+------------------+------------------+---------------------+------------------+------------------+-------------------------+----------------------+------------------+-------------------+------------------+------------------+------------------+--------+--------+--------+--------+------------------+-------------+\n|experiment_filter|undersampling_method| filename|experiment_id|n_covid|n_not_covid|model_name| params| model_AUC_ROC| model_AUC_PR|model_covid_precision|model_covid_recall| model_covid_f1|model_not_covid_precision|model_not_covid_recall|model_not_covid_f1|model_avg_precision| model_avg_recall| model_avg_f1| model_avg_acc|model_TP|model_TN|model_FN|model_FP| model_time_exec|model_col_set|\n+-----------------+--------------------+--------------------+-------------+-------+-----------+----------+--------------------+------------------+------------------+---------------------+------------------+------------------+-------------------------+----------------------+------------------+-------------------+------------------+------------------+------------------+--------+--------+--------+--------+------------------+-------------+\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 0| 76008| 44538| GBT|{'maxIter': 20, '...|0.9105833299606098| 0.902442743520048| 0.9162362081077731|0.9708380768893948|0.9427471990135426| 0.94532917419143| 0.8503285830318246|0.8953158406219631| 0.9307826911496015|0.9105833299606096|0.9190315198177529|0.9259778455787362| 22172| 11516| 666| 2027|27.197532415390015| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 1| 76098| 44538| GBT|{'maxIter': 20, '...|0.9143934759337773|0.8981630098121935| 0.9225887791552847|0.9654338361405514|0.9435251643959256| 0.9367353509336768| 0.863353115727003|0.8985484867201977| 0.9296620650444807|0.9143934759337773|0.9210368255580617|0.9274413981611861| 21953| 11638| 786| 1842|19.599364042282104| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 2| 76542| 44538| GBT|{'maxIter': 20, '...|0.9105458623056624|0.9010682316934415| 0.9179215621380109|0.9709859524747276|0.9437083980179912| 0.9443558539655896| 0.8501057721365971|0.8947556757186593| 0.9311387080518003|0.9105458623056624|0.9192320368683253|0.9266494859644747| 22188| 11252| 663| 1984|19.544635772705078| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 3| 75793| 44538| GBT|{'maxIter': 20, '...|0.9134151248672557|0.9007662934392351| 0.9215367965367965|0.9692233604763155|0.9447787308496565| 0.9416887856668879| 0.8576068892581961| 0.897683245038349| 0.9316127911018421|0.9134151248672557|0.9212309879440027| 0.928270509977827| 22139| 11353| 703| 1885|20.516762018203735| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 4| 74465| 44538| GBT|{'maxIter': 20, '...|0.9131731288023294|0.9056199549941559| 0.9179255633386784|0.9714630225080386|0.9439357778259927| 0.9471769860295941| 0.8548832350966201|0.8986666666666666| 0.9325512746841362|0.9131731288023294|0.9213012222463297|0.9278111468082134| 21753| 11458| 639| 1945|18.563634872436523| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 5| 74906| 44538| GBT|{'maxIter': 20, '...|0.9128032413624818| 0.903309365465197| 0.9191516169676607|0.9707252162341983|0.9442347103872291| 0.945| 0.8548812664907651|0.8976845438353453| 0.9320758084838303|0.9128032413624817|0.9209596271112872|0.9278134599273946| 21885| 11340| 660| 1925| 18.39739418029785| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 6| 75869| 44538| GBT|{'maxIter': 20, '...|0.9128815873021318| 0.906922869766107| 0.9178099173553719|0.9732275874156515| 0.944706733018587| 0.9495458298926507| 0.8525355871886121|0.8984295648097508| 0.9336778736240112|0.9128815873021319|0.9215681489141689|0.9283943817130267| 22211| 11499| 611| 1989| 18.88933277130127| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 7| 75764| 44538| GBT|{'maxIter': 20, '...|0.9158260051336311|0.8996913679122375| 0.9236869858679604|0.9656023990121715|0.9441797287682455| 0.9374799615261302| 0.8660496112550907|0.9003502559562758| 0.9305834736970453|0.9158260051336311|0.9222649923622607|0.9284431054973605| 21896| 11696| 780| 1809|17.993168830871582| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 8| 75929| 44538| GBT|{'maxIter': 20, '...|0.9097219908331698|0.8947527143057171| 0.9183648059763436| 0.965950759559979|0.9415569218986022| 0.9362693030476346| 0.8534932221063608|0.8929670757841417| 0.9273170545119891| 0.90972199083317|0.9172619988413719|0.9243958826443551| 22128| 11459| 780| 1967|17.730310678482056| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 9| 75880| 44538| GBT|{'maxIter': 20, '...|0.9112688925105754|0.8995229828227803| 0.9181908879281676| 0.968304765244838|0.9425822006955854| 0.9410084856396866| 0.8542330197763128|0.8955235469969327| 0.9295996867839271|0.9112688925105754|0.9190528738462591|0.9258922670191673| 22088| 11533| 723| 1968| 17.63637614250183| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 10| 76773| 44538| GBT|{'maxIter': 20, '...| 0.912711093607274| 0.903200534012848| 0.9194458271113324|0.9710430630393831|0.9445403217215298| 0.9451679800912485| 0.8543791241751649|0.8974833602457564| 0.9323069036012904| 0.912711093607274|0.9210118409836431|0.9280203522937809| 22166| 11394| 661| 1942|17.997276067733765| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 11| 75938| 44538| GBT|{'maxIter': 20, '...|0.9101312470405588|0.8971882959738281| 0.9183937823834197|0.9679820311811856|0.9425361293365926| 0.939391413088787| 0.852280462899932|0.8937182741116753| 0.9288925977361033|0.9101312470405588| 0.918127201724134|0.9254042920366299| 21979| 11268| 727| 1953| 17.63829016685486| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 12| 74219| 44538| GBT|{'maxIter': 20, '...|0.9080197194941112|0.8961129794537617| 0.9145061067066638| 0.966616841056303|0.9398396899497931| 0.9385372362605288| 0.8494225979319194|0.8917591125198098| 0.9265216714835962|0.9080197194941112|0.9157994012348014|0.9226631942478628| 21340| 11254| 737| 1995| 18.06064224243164| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 13| 75346| 44538| GBT|{'maxIter': 20, '...|0.9115477842312691| 0.895926062157061| 0.9203476952651595|0.9659634194482214|0.9426040061633282| 0.9364507411350422| 0.8571321490143168|0.8950375704445838| 0.9283992182001009|0.9115477842312691| 0.918820788303956|0.9257885998893193| 22023| 11435| 776| 1906| 18.4149808883667| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 14| 75248| 44538| GBT|{'maxIter': 20, '...|0.9111634468617408|0.9004735132977751| 0.917694955091077|0.9690643974648762|0.9426803768134688| 0.942318816626725| 0.8532624962586052|0.8955821716080895| 0.930006885858901|0.9111634468617407|0.9191312742107791|0.9259888106438054| 21865| 11403| 698| 1961|18.096524715423584| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 15| 74762| 44538| GBT|{'maxIter': 20, '...|0.9125924004999537|0.9006079752688219| 0.9187666765490661| 0.967659916138817|0.9425796780290687| 0.9409079794604287| 0.8575248848610905|0.8972834324355835| 0.9298373280047474|0.9125924004999537|0.9199315552323262|0.9263377926421404| 21693| 11544| 725| 1918| 19.13778829574585| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 16| 75621| 44538| GBT|{'maxIter': 20, '...| 0.911502697907092|0.8988034979213085| 0.9185163304920246|0.9673792275898849|0.9423147687179376| 0.9397273772376417| 0.855626168224299|0.8957069620005479| 0.9291218538648331|0.9115026979070919|0.9190108653592428|0.9257163563384992| 21767| 11444| 734| 1931|18.854344129562378| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 17| 75031| 44538| GBT|{'maxIter': 20, '...|0.9107857352905782| 0.896517888166792| 0.9186380533964177|0.9662312272282947| 0.941833773658452| 0.9374382614422128| 0.8553402433528616| 0.894509465085225| 0.9280381574193153|0.9107857352905782|0.9181716193718386|0.9250139586823004| 21746| 11388| 760| 1926|18.726903200149536| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 18| 75151| 44538| GBT|{'maxIter': 20, '...|0.9128453541935022|0.8981317759693237| 0.920433384120535|0.9660625444207533|0.9426961421759862| 0.9377698134723467| 0.8596281639662511|0.8970003895597976| 0.9291015987964408|0.9128453541935022|0.9198482658678919|0.9263612310263195| 21748| 11513| 764| 1880| 17.8125057220459| cols_set_3|\n| ds-3| 03-STRSAMP-AG|gs://ai-covid19-d...| 19| 75145| 44538| GBT|{'maxIter': 20, '...| 0.911902687412907|0.8981243585518983| 0.9195585581805211|0.9669505361161365| 0.94265926786252| 0.9387270942408377| 0.8568548387096774|0.8959244222361025| 0.9291428262106793|0.9119026874129069|0.9192918450493113|0.9260574122867841| 21914| 11475| 749| 1917|19.196642875671387| cols_set_3|\n+-----------------+--------------------+--------------------+-------------+-------+-----------+----------+--------------------+------------------+------------------+---------------------+------------------+------------------+-------------------------+----------------------+------------------+-------------------+------------------+------------------+------------------+--------+--------+--------+--------+------------------+-------------+\nonly showing top 20 rows\n\n"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52a5fa6aa2b50089d5d61abe07d3745acea7b41
| 130,824 |
ipynb
|
Jupyter Notebook
|
Copy_of_Disco_Diffusion_v4_1_[w_Video_Inits,_Recovery_&_DDIM_Sharpen].ipynb
|
cdosrunwild/glide-text2im
|
d6a1f6a55e72d92a60d5428d4964e26398b3609b
|
[
"MIT"
] | 1 |
2022-03-08T02:25:37.000Z
|
2022-03-08T02:25:37.000Z
|
Copy_of_Disco_Diffusion_v4_1_[w_Video_Inits,_Recovery_&_DDIM_Sharpen].ipynb
|
cdosrunwild/glide-text2im
|
d6a1f6a55e72d92a60d5428d4964e26398b3609b
|
[
"MIT"
] | null | null | null |
Copy_of_Disco_Diffusion_v4_1_[w_Video_Inits,_Recovery_&_DDIM_Sharpen].ipynb
|
cdosrunwild/glide-text2im
|
d6a1f6a55e72d92a60d5428d4964e26398b3609b
|
[
"MIT"
] | null | null | null | 49.818736 | 368 | 0.485224 |
[
[
[
"<a href=\"https://colab.research.google.com/github/cdosrunwild/glide-text2im/blob/main/Copy_of_Disco_Diffusion_v4_1_%5Bw_Video_Inits%2C_Recovery_%26_DDIM_Sharpen%5D.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Disco Diffusion v4.1 - Now with Video Inits, Recovery, DDIM Sharpen and improved UI\n\nIn case of confusion, Disco is the name of this notebook edit. The diffusion model in use is Katherine Crowson's fine-tuned 512x512 model\n\nFor issues, message [@Somnai_dreams](https://twitter.com/Somnai_dreams) or Somnai#6855\n\nCredits & Changelog ⬇️\n",
"_____no_output_____"
],
[
"Original notebook by Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings). It uses either OpenAI's 256x256 unconditional ImageNet or Katherine Crowson's fine-tuned 512x512 diffusion model (https://github.com/openai/guided-diffusion), together with CLIP (https://github.com/openai/CLIP) to connect text prompts with images.\n\nModified by Daniel Russell (https://github.com/russelldc, https://twitter.com/danielrussruss) to include (hopefully) optimal params for quick generations in 15-100 timesteps rather than 1000, as well as more robust augmentations.\n\nFurther improvements from Dango233 and nsheppard helped improve the quality of diffusion in general, and especially so for shorter runs like this notebook aims to achieve.\n\nVark added code to load in multiple Clip models at once, which all prompts are evaluated against, which may greatly improve accuracy.\n\nThe latest zoom, pan, rotation, and keyframes features were taken from Chigozie Nri's VQGAN Zoom Notebook (https://github.com/chigozienri, https://twitter.com/chigozienri)\n\nAdvanced DangoCutn Cutout method is also from Dango223.\n\n--\n\nI, Somnai (https://twitter.com/Somnai_dreams), have added Diffusion Animation techniques, QoL improvements and various implementations of tech and techniques, mostly listed in the changelog below.",
"_____no_output_____"
]
],
[
[
"# @title Licensed under the MIT License\n\n# Copyright (c) 2021 Katherine Crowson \n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.",
"_____no_output_____"
],
[
"#@title <- View Changelog\n\nskip_for_run_all = True #@param {type: 'boolean'}\n\nif skip_for_run_all == False:\n print(\n '''\n v1 Update: Oct 29th 2021\n\n QoL improvements added by Somnai (@somnai_dreams), including user friendly UI, settings+prompt saving and improved google drive folder organization.\n\n v1.1 Update: Nov 13th 2021\n\n Now includes sizing options, intermediate saves and fixed image prompts and perlin inits. unexposed batch option since it doesn't work\n\n v2 Update: Nov 22nd 2021\n\n Initial addition of Katherine Crowson's Secondary Model Method (https://colab.research.google.com/drive/1mpkrhOjoyzPeSWy2r7T8EYRaU7amYOOi#scrollTo=X5gODNAMEUCR)\n\n Noticed settings were saving with the wrong name so corrected it. Let me know if you preferred the old scheme.\n\n v3 Update: Dec 24th 2021\n\n Implemented Dango's advanced cutout method\n\n Added SLIP models, thanks to NeuralDivergent\n\n Fixed issue with NaNs resulting in black images, with massive help and testing from @Softology\n\n Perlin now changes properly within batches (not sure where this perlin_regen code came from originally, but thank you)\n\n v4 Update: Jan 2021\n\n Implemented Diffusion Zooming\n\n Added Chigozie keyframing\n\n Made a bunch of edits to processes\n \n v4.1 Update: Jan 14th 2021\n\n Added video input mode\n\n Added license that somehow went missing\n\n Added improved prompt keyframing, fixed image_prompts and multiple prompts\n\n Improved UI\n\n Significant under the hood cleanup and improvement\n\n Refined defaults for each mode\n\n Added latent-diffusion SuperRes for sharpening\n\n Added resume run mode\n\n '''\n )",
"_____no_output_____"
],
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"_____no_output_____"
]
],
[
[
"#Tutorial",
"_____no_output_____"
],
[
"**Diffusion settings**\n---\n\nThis section is outdated as of v2\n\nSetting | Description | Default\n--- | --- | ---\n**Your vision:**\n`text_prompts` | A description of what you'd like the machine to generate. Think of it like writing the caption below your image on a website. | N/A\n`image_prompts` | Think of these images more as a description of their contents. | N/A\n**Image quality:**\n`clip_guidance_scale` | Controls how much the image should look like the prompt. | 1000\n`tv_scale` | Controls the smoothness of the final output. | 150\n`range_scale` | Controls how far out of range RGB values are allowed to be. | 150\n`sat_scale` | Controls how much saturation is allowed. From nshepperd's JAX notebook. | 0\n`cutn` | Controls how many crops to take from the image. | 16\n`cutn_batches` | Accumulate CLIP gradient from multiple batches of cuts | 2\n**Init settings:**\n`init_image` | URL or local path | None\n`init_scale` | This enhances the effect of the init image, a good value is 1000 | 0\n`skip_steps Controls the starting point along the diffusion timesteps | 0\n`perlin_init` | Option to start with random perlin noise | False\n`perlin_mode` | ('gray', 'color') | 'mixed'\n**Advanced:**\n`skip_augs` |Controls whether to skip torchvision augmentations | False\n`randomize_class` |Controls whether the imagenet class is randomly changed each iteration | True\n`clip_denoised` |Determines whether CLIP discriminates a noisy or denoised image | False\n`clamp_grad` |Experimental: Using adaptive clip grad in the cond_fn | True\n`seed` | Choose a random seed and print it at end of run for reproduction | random_seed\n`fuzzy_prompt` | Controls whether to add multiple noisy prompts to the prompt losses | False\n`rand_mag` |Controls the magnitude of the random noise | 0.1\n`eta` | DDIM hyperparameter | 0.5\n\n..\n\n**Model settings**\n---\n\nSetting | Description | Default\n--- | --- | ---\n**Diffusion:**\n`timestep_respacing` | Modify this value to decrease the number of timesteps. | ddim100\n`diffusion_steps` || 1000\n**Diffusion:**\n`clip_models` | Models of CLIP to load. Typically the more, the better but they all come at a hefty VRAM cost. | ViT-B/32, ViT-B/16, RN50x4",
"_____no_output_____"
],
[
"# 1. Set Up",
"_____no_output_____"
]
],
[
[
"#@title 1.1 Check GPU Status\n!nvidia-smi -L",
"_____no_output_____"
],
[
"from google.colab import drive\n#@title 1.2 Prepare Folders\n#@markdown If you connect your Google Drive, you can save the final image of each run on your drive.\n\ngoogle_drive = True #@param {type:\"boolean\"}\n\n#@markdown Click here if you'd like to save the diffusion model checkpoint file to (and/or load from) your Google Drive:\nyes_please = True #@param {type:\"boolean\"}\n\nif google_drive is True:\n drive.mount('/content/drive')\n root_path = '/content/drive/MyDrive/AI/Disco_Diffusion'\nelse:\n root_path = '/content'\n\nimport os\nfrom os import path\n#Simple create paths taken with modifications from Datamosh's Batch VQGAN+CLIP notebook\ndef createPath(filepath):\n if path.exists(filepath) == False:\n os.makedirs(filepath)\n print(f'Made {filepath}')\n else:\n print(f'filepath {filepath} exists.')\n\ninitDirPath = f'{root_path}/init_images'\ncreatePath(initDirPath)\noutDirPath = f'{root_path}/images_out'\ncreatePath(outDirPath)\n\nif google_drive and not yes_please or not google_drive:\n model_path = '/content/models'\n createPath(model_path)\nif google_drive and yes_please:\n model_path = f'{root_path}/models'\n createPath(model_path)\n# libraries = f'{root_path}/libraries'\n# createPath(libraries)\n\n",
"_____no_output_____"
],
[
"#@title ### 1.3 Install and import dependencies\n\nif google_drive is not True:\n root_path = f'/content'\n model_path = '/content/models' \n\nmodel_256_downloaded = False\nmodel_512_downloaded = False\nmodel_secondary_downloaded = False\n\n!git clone https://github.com/openai/CLIP\n# !git clone https://github.com/facebookresearch/SLIP.git\n!git clone https://github.com/crowsonkb/guided-diffusion\n!git clone https://github.com/assafshocher/ResizeRight.git\n!pip install -e ./CLIP\n!pip install -e ./guided-diffusion\n!pip install lpips datetime timm\n!apt install imagemagick\n\n\nimport sys\n# sys.path.append('./SLIP')\nsys.path.append('./ResizeRight')\nfrom dataclasses import dataclass\nfrom functools import partial\nimport cv2\nimport pandas as pd\nimport gc\nimport io\nimport math\nimport timm\nfrom IPython import display\nimport lpips\nfrom PIL import Image, ImageOps\nimport requests\nfrom glob import glob\nimport json\nfrom types import SimpleNamespace\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torchvision.transforms as T\nimport torchvision.transforms.functional as TF\nfrom tqdm.notebook import tqdm\nsys.path.append('./CLIP')\nsys.path.append('./guided-diffusion')\nimport clip\nfrom resize_right import resize\n# from models import SLIP_VITB16, SLIP, SLIP_VITL16\nfrom guided_diffusion.script_util import create_model_and_diffusion, model_and_diffusion_defaults\nfrom datetime import datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nfrom ipywidgets import Output\nimport hashlib\n\n#SuperRes\n!git clone https://github.com/CompVis/latent-diffusion.git\n!git clone https://github.com/CompVis/taming-transformers\n!pip install -e ./taming-transformers\n!pip install ipywidgets omegaconf>=2.0.0 pytorch-lightning>=1.0.8 torch-fidelity einops wandb\n\n#SuperRes\nimport ipywidgets as widgets\nimport os\nsys.path.append(\".\")\nsys.path.append('./taming-transformers')\nfrom taming.models import vqgan # checking correct import from taming\nfrom torchvision.datasets.utils import download_url\n%cd '/content/latent-diffusion'\nfrom functools import partial\nfrom ldm.util import instantiate_from_config\nfrom ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like\n# from ldm.models.diffusion.ddim import DDIMSampler\nfrom ldm.util import ismap\n%cd '/content'\nfrom google.colab import files\nfrom IPython.display import Image as ipyimg\nfrom numpy import asarray\nfrom einops import rearrange, repeat\nimport torch, torchvision\nimport time\nfrom omegaconf import OmegaConf\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\n\nimport torch\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\nprint('Using device:', device)\n\nif torch.cuda.get_device_capability(device) == (8,0): ## A100 fix thanks to Emad\n print('Disabling CUDNN for A100 gpu', file=sys.stderr)\n torch.backends.cudnn.enabled = False",
"_____no_output_____"
],
[
"#@title 1.4 Define necessary functions\n\n# https://gist.github.com/adefossez/0646dbe9ed4005480a2407c62aac8869\n\ndef interp(t):\n return 3 * t**2 - 2 * t ** 3\n\ndef perlin(width, height, scale=10, device=None):\n gx, gy = torch.randn(2, width + 1, height + 1, 1, 1, device=device)\n xs = torch.linspace(0, 1, scale + 1)[:-1, None].to(device)\n ys = torch.linspace(0, 1, scale + 1)[None, :-1].to(device)\n wx = 1 - interp(xs)\n wy = 1 - interp(ys)\n dots = 0\n dots += wx * wy * (gx[:-1, :-1] * xs + gy[:-1, :-1] * ys)\n dots += (1 - wx) * wy * (-gx[1:, :-1] * (1 - xs) + gy[1:, :-1] * ys)\n dots += wx * (1 - wy) * (gx[:-1, 1:] * xs - gy[:-1, 1:] * (1 - ys))\n dots += (1 - wx) * (1 - wy) * (-gx[1:, 1:] * (1 - xs) - gy[1:, 1:] * (1 - ys))\n return dots.permute(0, 2, 1, 3).contiguous().view(width * scale, height * scale)\n\ndef perlin_ms(octaves, width, height, grayscale, device=device):\n out_array = [0.5] if grayscale else [0.5, 0.5, 0.5]\n # out_array = [0.0] if grayscale else [0.0, 0.0, 0.0]\n for i in range(1 if grayscale else 3):\n scale = 2 ** len(octaves)\n oct_width = width\n oct_height = height\n for oct in octaves:\n p = perlin(oct_width, oct_height, scale, device)\n out_array[i] += p * oct\n scale //= 2\n oct_width *= 2\n oct_height *= 2\n return torch.cat(out_array)\n\ndef create_perlin_noise(octaves=[1, 1, 1, 1], width=2, height=2, grayscale=True):\n out = perlin_ms(octaves, width, height, grayscale)\n if grayscale:\n out = TF.resize(size=(side_y, side_x), img=out.unsqueeze(0))\n out = TF.to_pil_image(out.clamp(0, 1)).convert('RGB')\n else:\n out = out.reshape(-1, 3, out.shape[0]//3, out.shape[1])\n out = TF.resize(size=(side_y, side_x), img=out)\n out = TF.to_pil_image(out.clamp(0, 1).squeeze())\n\n out = ImageOps.autocontrast(out)\n return out\n\ndef regen_perlin():\n if perlin_mode == 'color':\n init = create_perlin_noise([1.5**-i*0.5 for i in range(12)], 1, 1, False)\n init2 = create_perlin_noise([1.5**-i*0.5 for i in range(8)], 4, 4, False)\n elif perlin_mode == 'gray':\n init = create_perlin_noise([1.5**-i*0.5 for i in range(12)], 1, 1, True)\n init2 = create_perlin_noise([1.5**-i*0.5 for i in range(8)], 4, 4, True)\n else:\n init = create_perlin_noise([1.5**-i*0.5 for i in range(12)], 1, 1, False)\n init2 = create_perlin_noise([1.5**-i*0.5 for i in range(8)], 4, 4, True)\n\n init = TF.to_tensor(init).add(TF.to_tensor(init2)).div(2).to(device).unsqueeze(0).mul(2).sub(1)\n del init2\n return init.expand(batch_size, -1, -1, -1)\n\ndef fetch(url_or_path):\n if str(url_or_path).startswith('http://') or str(url_or_path).startswith('https://'):\n r = requests.get(url_or_path)\n r.raise_for_status()\n fd = io.BytesIO()\n fd.write(r.content)\n fd.seek(0)\n return fd\n return open(url_or_path, 'rb')\n\ndef read_image_workaround(path):\n \"\"\"OpenCV reads images as BGR, Pillow saves them as RGB. Work around\n this incompatibility to avoid colour inversions.\"\"\"\n im_tmp = cv2.imread(path)\n return cv2.cvtColor(im_tmp, cv2.COLOR_BGR2RGB)\n\ndef parse_prompt(prompt):\n if prompt.startswith('http://') or prompt.startswith('https://'):\n vals = prompt.rsplit(':', 2)\n vals = [vals[0] + ':' + vals[1], *vals[2:]]\n else:\n vals = prompt.rsplit(':', 1)\n vals = vals + ['', '1'][len(vals):]\n return vals[0], float(vals[1])\n\ndef sinc(x):\n return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))\n\ndef lanczos(x, a):\n cond = torch.logical_and(-a < x, x < a)\n out = torch.where(cond, sinc(x) * sinc(x/a), x.new_zeros([]))\n return out / out.sum()\n\ndef ramp(ratio, width):\n n = math.ceil(width / ratio + 1)\n out = torch.empty([n])\n cur = 0\n for i in range(out.shape[0]):\n out[i] = cur\n cur += ratio\n return torch.cat([-out[1:].flip([0]), out])[1:-1]\n\ndef resample(input, size, align_corners=True):\n n, c, h, w = input.shape\n dh, dw = size\n\n input = input.reshape([n * c, 1, h, w])\n\n if dh < h:\n kernel_h = lanczos(ramp(dh / h, 2), 2).to(input.device, input.dtype)\n pad_h = (kernel_h.shape[0] - 1) // 2\n input = F.pad(input, (0, 0, pad_h, pad_h), 'reflect')\n input = F.conv2d(input, kernel_h[None, None, :, None])\n\n if dw < w:\n kernel_w = lanczos(ramp(dw / w, 2), 2).to(input.device, input.dtype)\n pad_w = (kernel_w.shape[0] - 1) // 2\n input = F.pad(input, (pad_w, pad_w, 0, 0), 'reflect')\n input = F.conv2d(input, kernel_w[None, None, None, :])\n\n input = input.reshape([n, c, h, w])\n return F.interpolate(input, size, mode='bicubic', align_corners=align_corners)\n\nclass MakeCutouts(nn.Module):\n def __init__(self, cut_size, cutn, skip_augs=False):\n super().__init__()\n self.cut_size = cut_size\n self.cutn = cutn\n self.skip_augs = skip_augs\n self.augs = T.Compose([\n T.RandomHorizontalFlip(p=0.5),\n T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),\n T.RandomAffine(degrees=15, translate=(0.1, 0.1)),\n T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),\n T.RandomPerspective(distortion_scale=0.4, p=0.7),\n T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),\n T.RandomGrayscale(p=0.15),\n T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),\n # T.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),\n ])\n\n def forward(self, input):\n input = T.Pad(input.shape[2]//4, fill=0)(input)\n sideY, sideX = input.shape[2:4]\n max_size = min(sideX, sideY)\n\n cutouts = []\n for ch in range(self.cutn):\n if ch > self.cutn - self.cutn//4:\n cutout = input.clone()\n else:\n size = int(max_size * torch.zeros(1,).normal_(mean=.8, std=.3).clip(float(self.cut_size/max_size), 1.))\n offsetx = torch.randint(0, abs(sideX - size + 1), ())\n offsety = torch.randint(0, abs(sideY - size + 1), ())\n cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]\n\n if not self.skip_augs:\n cutout = self.augs(cutout)\n cutouts.append(resample(cutout, (self.cut_size, self.cut_size)))\n del cutout\n\n cutouts = torch.cat(cutouts, dim=0)\n return cutouts\n\ncutout_debug = False\npadargs = {}\n\nclass MakeCutoutsDango(nn.Module):\n def __init__(self, cut_size,\n Overview=4, \n InnerCrop = 0, IC_Size_Pow=0.5, IC_Grey_P = 0.2\n ):\n super().__init__()\n self.cut_size = cut_size\n self.Overview = Overview\n self.InnerCrop = InnerCrop\n self.IC_Size_Pow = IC_Size_Pow\n self.IC_Grey_P = IC_Grey_P\n if args.animation_mode == 'None':\n self.augs = T.Compose([\n T.RandomHorizontalFlip(p=0.5),\n T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),\n T.RandomAffine(degrees=10, translate=(0.05, 0.05), interpolation = T.InterpolationMode.BILINEAR),\n T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),\n T.RandomGrayscale(p=0.1),\n T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),\n T.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),\n ])\n elif args.animation_mode == 'Video Input':\n self.augs = T.Compose([\n T.RandomHorizontalFlip(p=0.5),\n T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),\n T.RandomAffine(degrees=15, translate=(0.1, 0.1)),\n T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),\n T.RandomPerspective(distortion_scale=0.4, p=0.7),\n T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),\n T.RandomGrayscale(p=0.15),\n T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),\n # T.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),\n ])\n elif args.animation_mode == '2D':\n self.augs = T.Compose([\n T.RandomHorizontalFlip(p=0.4),\n T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),\n T.RandomAffine(degrees=10, translate=(0.05, 0.05), interpolation = T.InterpolationMode.BILINEAR),\n T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),\n T.RandomGrayscale(p=0.1),\n T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),\n T.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.3),\n ])\n \n\n def forward(self, input):\n cutouts = []\n gray = T.Grayscale(3)\n sideY, sideX = input.shape[2:4]\n max_size = min(sideX, sideY)\n min_size = min(sideX, sideY, self.cut_size)\n l_size = max(sideX, sideY)\n output_shape = [1,3,self.cut_size,self.cut_size] \n output_shape_2 = [1,3,self.cut_size+2,self.cut_size+2]\n pad_input = F.pad(input,((sideY-max_size)//2,(sideY-max_size)//2,(sideX-max_size)//2,(sideX-max_size)//2), **padargs)\n cutout = resize(pad_input, out_shape=output_shape)\n\n if self.Overview>0:\n if self.Overview<=4:\n if self.Overview>=1:\n cutouts.append(cutout)\n if self.Overview>=2:\n cutouts.append(gray(cutout))\n if self.Overview>=3:\n cutouts.append(TF.hflip(cutout))\n if self.Overview==4:\n cutouts.append(gray(TF.hflip(cutout)))\n else:\n cutout = resize(pad_input, out_shape=output_shape)\n for _ in range(self.Overview):\n cutouts.append(cutout)\n\n if cutout_debug:\n TF.to_pil_image(cutouts[0].clamp(0, 1).squeeze(0)).save(\"/content/cutout_overview0.jpg\",quality=99)\n \n if self.InnerCrop >0:\n for i in range(self.InnerCrop):\n size = int(torch.rand([])**self.IC_Size_Pow * (max_size - min_size) + min_size)\n offsetx = torch.randint(0, sideX - size + 1, ())\n offsety = torch.randint(0, sideY - size + 1, ())\n cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]\n if i <= int(self.IC_Grey_P * self.InnerCrop):\n cutout = gray(cutout)\n cutout = resize(cutout, out_shape=output_shape)\n cutouts.append(cutout)\n if cutout_debug:\n TF.to_pil_image(cutouts[-1].clamp(0, 1).squeeze(0)).save(\"/content/cutout_InnerCrop.jpg\",quality=99)\n cutouts = torch.cat(cutouts)\n if skip_augs is not True: cutouts=self.augs(cutouts)\n return cutouts\n\ndef spherical_dist_loss(x, y):\n x = F.normalize(x, dim=-1)\n y = F.normalize(y, dim=-1)\n return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) \n\ndef tv_loss(input):\n \"\"\"L2 total variation loss, as in Mahendran et al.\"\"\"\n input = F.pad(input, (0, 1, 0, 1), 'replicate')\n x_diff = input[..., :-1, 1:] - input[..., :-1, :-1]\n y_diff = input[..., 1:, :-1] - input[..., :-1, :-1]\n return (x_diff**2 + y_diff**2).mean([1, 2, 3])\n\n\ndef range_loss(input):\n return (input - input.clamp(-1, 1)).pow(2).mean([1, 2, 3])\n\nstop_on_next_loop = False # Make sure GPU memory doesn't get corrupted from cancelling the run mid-way through, allow a full frame to complete\n\ndef do_run():\n seed = args.seed\n print(range(args.start_frame, args.max_frames))\n for frame_num in range(args.start_frame, args.max_frames):\n if stop_on_next_loop:\n break\n \n display.clear_output(wait=True)\n\n # Print Frame progress if animation mode is on\n if args.animation_mode != \"None\":\n batchBar = tqdm(range(args.max_frames), desc =\"Frames\")\n batchBar.n = frame_num\n batchBar.refresh()\n\n \n # Inits if not video frames\n if args.animation_mode != \"Video Input\":\n if args.init_image == '':\n init_image = None\n else:\n init_image = args.init_image\n init_scale = args.init_scale\n skip_steps = args.skip_steps\n\n if args.animation_mode == \"2D\":\n if args.key_frames:\n angle = args.angle_series[frame_num]\n zoom = args.zoom_series[frame_num]\n translation_x = args.translation_x_series[frame_num]\n translation_y = args.translation_y_series[frame_num]\n print(\n f'angle: {angle}',\n f'zoom: {zoom}',\n f'translation_x: {translation_x}',\n f'translation_y: {translation_y}',\n )\n \n if frame_num > 0:\n seed = seed + 1 \n if resume_run and frame_num == start_frame:\n img_0 = cv2.imread(batchFolder+f\"/{batch_name}({batchNum})_{start_frame-1:04}.png\")\n else:\n img_0 = cv2.imread('prevFrame.png')\n center = (1*img_0.shape[1]//2, 1*img_0.shape[0]//2)\n trans_mat = np.float32(\n [[1, 0, translation_x],\n [0, 1, translation_y]]\n )\n rot_mat = cv2.getRotationMatrix2D( center, angle, zoom )\n trans_mat = np.vstack([trans_mat, [0,0,1]])\n rot_mat = np.vstack([rot_mat, [0,0,1]])\n transformation_matrix = np.matmul(rot_mat, trans_mat)\n img_0 = cv2.warpPerspective(\n img_0,\n transformation_matrix,\n (img_0.shape[1], img_0.shape[0]),\n borderMode=cv2.BORDER_WRAP\n )\n cv2.imwrite('prevFrameScaled.png', img_0)\n init_image = 'prevFrameScaled.png'\n init_scale = args.frames_scale\n skip_steps = args.calc_frames_skip_steps\n\n if args.animation_mode == \"Video Input\":\n seed = seed + 1 \n init_image = f'{videoFramesFolder}/{frame_num+1:04}.jpg'\n init_scale = args.frames_scale\n skip_steps = args.calc_frames_skip_steps\n\n loss_values = []\n \n if seed is not None:\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n \n target_embeds, weights = [], []\n \n if args.prompts_series is not None and frame_num >= len(args.prompts_series):\n frame_prompt = args.prompts_series[-1]\n elif args.prompts_series is not None:\n frame_prompt = args.prompts_series[frame_num]\n else:\n frame_prompt = []\n \n print(args.image_prompts_series)\n if args.image_prompts_series is not None and frame_num >= len(args.image_prompts_series):\n image_prompt = args.image_prompts_series[-1]\n elif args.image_prompts_series is not None:\n image_prompt = args.image_prompts_series[frame_num]\n else:\n image_prompt = []\n\n print(f'Frame Prompt: {frame_prompt}')\n\n model_stats = []\n for clip_model in clip_models:\n cutn = 16\n model_stat = {\"clip_model\":None,\"target_embeds\":[],\"make_cutouts\":None,\"weights\":[]}\n model_stat[\"clip_model\"] = clip_model\n \n \n for prompt in frame_prompt:\n txt, weight = parse_prompt(prompt)\n txt = clip_model.encode_text(clip.tokenize(prompt).to(device)).float()\n \n if args.fuzzy_prompt:\n for i in range(25):\n model_stat[\"target_embeds\"].append((txt + torch.randn(txt.shape).cuda() * args.rand_mag).clamp(0,1))\n model_stat[\"weights\"].append(weight)\n else:\n model_stat[\"target_embeds\"].append(txt)\n model_stat[\"weights\"].append(weight)\n \n if image_prompt:\n model_stat[\"make_cutouts\"] = MakeCutouts(clip_model.visual.input_resolution, cutn, skip_augs=skip_augs) \n for prompt in image_prompt:\n path, weight = parse_prompt(prompt)\n img = Image.open(fetch(path)).convert('RGB')\n img = TF.resize(img, min(side_x, side_y, *img.size), T.InterpolationMode.LANCZOS)\n batch = model_stat[\"make_cutouts\"](TF.to_tensor(img).to(device).unsqueeze(0).mul(2).sub(1))\n embed = clip_model.encode_image(normalize(batch)).float()\n if fuzzy_prompt:\n for i in range(25):\n model_stat[\"target_embeds\"].append((embed + torch.randn(embed.shape).cuda() * rand_mag).clamp(0,1))\n weights.extend([weight / cutn] * cutn)\n else:\n model_stat[\"target_embeds\"].append(embed)\n model_stat[\"weights\"].extend([weight / cutn] * cutn)\n \n model_stat[\"target_embeds\"] = torch.cat(model_stat[\"target_embeds\"])\n model_stat[\"weights\"] = torch.tensor(model_stat[\"weights\"], device=device)\n if model_stat[\"weights\"].sum().abs() < 1e-3:\n raise RuntimeError('The weights must not sum to 0.')\n model_stat[\"weights\"] /= model_stat[\"weights\"].sum().abs()\n model_stats.append(model_stat)\n \n init = None\n if init_image is not None:\n init = Image.open(fetch(init_image)).convert('RGB')\n init = init.resize((args.side_x, args.side_y), Image.LANCZOS)\n init = TF.to_tensor(init).to(device).unsqueeze(0).mul(2).sub(1)\n \n if args.perlin_init:\n if args.perlin_mode == 'color':\n init = create_perlin_noise([1.5**-i*0.5 for i in range(12)], 1, 1, False)\n init2 = create_perlin_noise([1.5**-i*0.5 for i in range(8)], 4, 4, False)\n elif args.perlin_mode == 'gray':\n init = create_perlin_noise([1.5**-i*0.5 for i in range(12)], 1, 1, True)\n init2 = create_perlin_noise([1.5**-i*0.5 for i in range(8)], 4, 4, True)\n else:\n init = create_perlin_noise([1.5**-i*0.5 for i in range(12)], 1, 1, False)\n init2 = create_perlin_noise([1.5**-i*0.5 for i in range(8)], 4, 4, True)\n # init = TF.to_tensor(init).add(TF.to_tensor(init2)).div(2).to(device)\n init = TF.to_tensor(init).add(TF.to_tensor(init2)).div(2).to(device).unsqueeze(0).mul(2).sub(1)\n del init2\n \n cur_t = None\n \n def cond_fn(x, t, y=None):\n with torch.enable_grad():\n x_is_NaN = False\n x = x.detach().requires_grad_()\n n = x.shape[0]\n if use_secondary_model is True:\n alpha = torch.tensor(diffusion.sqrt_alphas_cumprod[cur_t], device=device, dtype=torch.float32)\n sigma = torch.tensor(diffusion.sqrt_one_minus_alphas_cumprod[cur_t], device=device, dtype=torch.float32)\n cosine_t = alpha_sigma_to_t(alpha, sigma)\n out = secondary_model(x, cosine_t[None].repeat([n])).pred\n fac = diffusion.sqrt_one_minus_alphas_cumprod[cur_t]\n x_in = out * fac + x * (1 - fac)\n x_in_grad = torch.zeros_like(x_in)\n else:\n my_t = torch.ones([n], device=device, dtype=torch.long) * cur_t\n out = diffusion.p_mean_variance(model, x, my_t, clip_denoised=False, model_kwargs={'y': y})\n fac = diffusion.sqrt_one_minus_alphas_cumprod[cur_t]\n x_in = out['pred_xstart'] * fac + x * (1 - fac)\n x_in_grad = torch.zeros_like(x_in)\n for model_stat in model_stats:\n for i in range(args.cutn_batches):\n t_int = int(t.item())+1 #errors on last step without +1, need to find source\n #when using SLIP Base model the dimensions need to be hard coded to avoid AttributeError: 'VisionTransformer' object has no attribute 'input_resolution'\n try:\n input_resolution=model_stat[\"clip_model\"].visual.input_resolution\n except:\n input_resolution=224\n\n cuts = MakeCutoutsDango(input_resolution,\n Overview= args.cut_overview[1000-t_int], \n InnerCrop = args.cut_innercut[1000-t_int], IC_Size_Pow=args.cut_ic_pow, IC_Grey_P = args.cut_icgray_p[1000-t_int]\n )\n clip_in = normalize(cuts(x_in.add(1).div(2)))\n image_embeds = model_stat[\"clip_model\"].encode_image(clip_in).float()\n dists = spherical_dist_loss(image_embeds.unsqueeze(1), model_stat[\"target_embeds\"].unsqueeze(0))\n dists = dists.view([args.cut_overview[1000-t_int]+args.cut_innercut[1000-t_int], n, -1])\n losses = dists.mul(model_stat[\"weights\"]).sum(2).mean(0)\n loss_values.append(losses.sum().item()) # log loss, probably shouldn't do per cutn_batch\n x_in_grad += torch.autograd.grad(losses.sum() * clip_guidance_scale, x_in)[0] / cutn_batches\n tv_losses = tv_loss(x_in)\n if use_secondary_model is True:\n range_losses = range_loss(out)\n else:\n range_losses = range_loss(out['pred_xstart'])\n sat_losses = torch.abs(x_in - x_in.clamp(min=-1,max=1)).mean()\n loss = tv_losses.sum() * tv_scale + range_losses.sum() * range_scale + sat_losses.sum() * sat_scale\n if init is not None and args.init_scale:\n init_losses = lpips_model(x_in, init)\n loss = loss + init_losses.sum() * args.init_scale\n x_in_grad += torch.autograd.grad(loss, x_in)[0]\n if torch.isnan(x_in_grad).any()==False:\n grad = -torch.autograd.grad(x_in, x, x_in_grad)[0]\n else:\n # print(\"NaN'd\")\n x_is_NaN = True\n grad = torch.zeros_like(x)\n if args.clamp_grad and x_is_NaN == False:\n magnitude = grad.square().mean().sqrt()\n return grad * magnitude.clamp(max=args.clamp_max) / magnitude #min=-0.02, min=-clamp_max, \n return grad\n \n if model_config['timestep_respacing'].startswith('ddim'):\n sample_fn = diffusion.ddim_sample_loop_progressive\n else:\n sample_fn = diffusion.p_sample_loop_progressive\n \n\n image_display = Output()\n for i in range(args.n_batches):\n if args.animation_mode == 'None':\n display.clear_output(wait=True)\n batchBar = tqdm(range(args.n_batches), desc =\"Batches\")\n batchBar.n = i\n batchBar.refresh()\n print('')\n display.display(image_display)\n gc.collect()\n torch.cuda.empty_cache()\n cur_t = diffusion.num_timesteps - skip_steps - 1\n total_steps = cur_t\n\n if perlin_init:\n init = regen_perlin()\n\n if model_config['timestep_respacing'].startswith('ddim'):\n samples = sample_fn(\n model,\n (batch_size, 3, args.side_y, args.side_x),\n clip_denoised=clip_denoised,\n model_kwargs={},\n cond_fn=cond_fn,\n progress=True,\n skip_timesteps=skip_steps,\n init_image=init,\n randomize_class=randomize_class,\n eta=eta,\n )\n else:\n samples = sample_fn(\n model,\n (batch_size, 3, args.side_y, args.side_x),\n clip_denoised=clip_denoised,\n model_kwargs={},\n cond_fn=cond_fn,\n progress=True,\n skip_timesteps=skip_steps,\n init_image=init,\n randomize_class=randomize_class,\n )\n \n \n # with run_display:\n # display.clear_output(wait=True)\n imgToSharpen = None\n for j, sample in enumerate(samples): \n cur_t -= 1\n intermediateStep = False\n if args.steps_per_checkpoint is not None:\n if j % steps_per_checkpoint == 0 and j > 0:\n intermediateStep = True\n elif j in args.intermediate_saves:\n intermediateStep = True\n with image_display:\n if j % args.display_rate == 0 or cur_t == -1 or intermediateStep == True:\n for k, image in enumerate(sample['pred_xstart']):\n # tqdm.write(f'Batch {i}, step {j}, output {k}:')\n current_time = datetime.now().strftime('%y%m%d-%H%M%S_%f')\n percent = math.ceil(j/total_steps*100)\n if args.n_batches > 0:\n #if intermediates are saved to the subfolder, don't append a step or percentage to the name\n if cur_t == -1 and args.intermediates_in_subfolder is True:\n save_num = f'{frame_num:04}' if animation_mode != \"None\" else i\n filename = f'{args.batch_name}({args.batchNum})_{save_num}.png'\n else:\n #If we're working with percentages, append it\n if args.steps_per_checkpoint is not None:\n filename = f'{args.batch_name}({args.batchNum})_{i:04}-{percent:02}%.png'\n # Or else, iIf we're working with specific steps, append those\n else:\n filename = f'{args.batch_name}({args.batchNum})_{i:04}-{j:03}.png'\n image = TF.to_pil_image(image.add(1).div(2).clamp(0, 1))\n if j % args.display_rate == 0 or cur_t == -1:\n image.save('progress.png')\n display.clear_output(wait=True)\n display.display(display.Image('progress.png'))\n if args.steps_per_checkpoint is not None:\n if j % args.steps_per_checkpoint == 0 and j > 0:\n if args.intermediates_in_subfolder is True:\n image.save(f'{partialFolder}/{filename}')\n else:\n image.save(f'{batchFolder}/{filename}')\n else:\n if j in args.intermediate_saves:\n if args.intermediates_in_subfolder is True:\n image.save(f'{partialFolder}/{filename}')\n else:\n image.save(f'{batchFolder}/{filename}')\n if cur_t == -1:\n if frame_num == 0:\n save_settings()\n if args.animation_mode != \"None\":\n image.save('prevFrame.png')\n if args.sharpen_preset != \"Off\" and animation_mode == \"None\":\n imgToSharpen = image\n if args.keep_unsharp is True:\n image.save(f'{unsharpenFolder}/{filename}')\n else:\n image.save(f'{batchFolder}/{filename}')\n # if frame_num != args.max_frames-1:\n # display.clear_output()\n\n with image_display: \n if args.sharpen_preset != \"Off\" and animation_mode == \"None\":\n print('Starting Diffusion Sharpening...')\n do_superres(imgToSharpen, f'{batchFolder}/{filename}')\n display.clear_output()\n \n plt.plot(np.array(loss_values), 'r')\n\ndef save_settings():\n setting_list = {\n 'text_prompts': text_prompts,\n 'image_prompts': image_prompts,\n 'clip_guidance_scale': clip_guidance_scale,\n 'tv_scale': tv_scale,\n 'range_scale': range_scale,\n 'sat_scale': sat_scale,\n # 'cutn': cutn,\n 'cutn_batches': cutn_batches,\n 'max_frames': max_frames,\n 'interp_spline': interp_spline,\n # 'rotation_per_frame': rotation_per_frame,\n 'init_image': init_image,\n 'init_scale': init_scale,\n 'skip_steps': skip_steps,\n # 'zoom_per_frame': zoom_per_frame,\n 'frames_scale': frames_scale,\n 'frames_skip_steps': frames_skip_steps,\n 'perlin_init': perlin_init,\n 'perlin_mode': perlin_mode,\n 'skip_augs': skip_augs,\n 'randomize_class': randomize_class,\n 'clip_denoised': clip_denoised,\n 'clamp_grad': clamp_grad,\n 'clamp_max': clamp_max,\n 'seed': seed,\n 'fuzzy_prompt': fuzzy_prompt,\n 'rand_mag': rand_mag,\n 'eta': eta,\n 'width': width_height[0],\n 'height': width_height[1],\n 'diffusion_model': diffusion_model,\n 'use_secondary_model': use_secondary_model,\n 'steps': steps,\n 'diffusion_steps': diffusion_steps,\n 'ViTB32': ViTB32,\n 'ViTB16': ViTB16,\n 'ViTL14': ViTL14,\n 'RN101': RN101,\n 'RN50': RN50,\n 'RN50x4': RN50x4,\n 'RN50x16': RN50x16,\n 'RN50x64': RN50x64,\n 'cut_overview': str(cut_overview),\n 'cut_innercut': str(cut_innercut),\n 'cut_ic_pow': cut_ic_pow,\n 'cut_icgray_p': str(cut_icgray_p),\n 'key_frames': key_frames,\n 'max_frames': max_frames,\n 'angle': angle,\n 'zoom': zoom,\n 'translation_x': translation_x,\n 'translation_y': translation_y,\n 'video_init_path':video_init_path,\n 'extract_nth_frame':extract_nth_frame,\n }\n # print('Settings:', setting_list)\n with open(f\"{batchFolder}/{batch_name}({batchNum})_settings.txt\", \"w+\") as f: #save settings\n json.dump(setting_list, f, ensure_ascii=False, indent=4)\n ",
"_____no_output_____"
],
[
"#@title 1.5 Define the secondary diffusion model\n\ndef append_dims(x, n):\n return x[(Ellipsis, *(None,) * (n - x.ndim))]\n\n\ndef expand_to_planes(x, shape):\n return append_dims(x, len(shape)).repeat([1, 1, *shape[2:]])\n\n\ndef alpha_sigma_to_t(alpha, sigma):\n return torch.atan2(sigma, alpha) * 2 / math.pi\n\n\ndef t_to_alpha_sigma(t):\n return torch.cos(t * math.pi / 2), torch.sin(t * math.pi / 2)\n\n\n@dataclass\nclass DiffusionOutput:\n v: torch.Tensor\n pred: torch.Tensor\n eps: torch.Tensor\n\n\nclass ConvBlock(nn.Sequential):\n def __init__(self, c_in, c_out):\n super().__init__(\n nn.Conv2d(c_in, c_out, 3, padding=1),\n nn.ReLU(inplace=True),\n )\n\n\nclass SkipBlock(nn.Module):\n def __init__(self, main, skip=None):\n super().__init__()\n self.main = nn.Sequential(*main)\n self.skip = skip if skip else nn.Identity()\n\n def forward(self, input):\n return torch.cat([self.main(input), self.skip(input)], dim=1)\n\n\nclass FourierFeatures(nn.Module):\n def __init__(self, in_features, out_features, std=1.):\n super().__init__()\n assert out_features % 2 == 0\n self.weight = nn.Parameter(torch.randn([out_features // 2, in_features]) * std)\n\n def forward(self, input):\n f = 2 * math.pi * input @ self.weight.T\n return torch.cat([f.cos(), f.sin()], dim=-1)\n\n\nclass SecondaryDiffusionImageNet(nn.Module):\n def __init__(self):\n super().__init__()\n c = 64 # The base channel count\n\n self.timestep_embed = FourierFeatures(1, 16)\n\n self.net = nn.Sequential(\n ConvBlock(3 + 16, c),\n ConvBlock(c, c),\n SkipBlock([\n nn.AvgPool2d(2),\n ConvBlock(c, c * 2),\n ConvBlock(c * 2, c * 2),\n SkipBlock([\n nn.AvgPool2d(2),\n ConvBlock(c * 2, c * 4),\n ConvBlock(c * 4, c * 4),\n SkipBlock([\n nn.AvgPool2d(2),\n ConvBlock(c * 4, c * 8),\n ConvBlock(c * 8, c * 4),\n nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),\n ]),\n ConvBlock(c * 8, c * 4),\n ConvBlock(c * 4, c * 2),\n nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),\n ]),\n ConvBlock(c * 4, c * 2),\n ConvBlock(c * 2, c),\n nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),\n ]),\n ConvBlock(c * 2, c),\n nn.Conv2d(c, 3, 3, padding=1),\n )\n\n def forward(self, input, t):\n timestep_embed = expand_to_planes(self.timestep_embed(t[:, None]), input.shape)\n v = self.net(torch.cat([input, timestep_embed], dim=1))\n alphas, sigmas = map(partial(append_dims, n=v.ndim), t_to_alpha_sigma(t))\n pred = input * alphas - v * sigmas\n eps = input * sigmas + v * alphas\n return DiffusionOutput(v, pred, eps)\n\n\nclass SecondaryDiffusionImageNet2(nn.Module):\n def __init__(self):\n super().__init__()\n c = 64 # The base channel count\n cs = [c, c * 2, c * 2, c * 4, c * 4, c * 8]\n\n self.timestep_embed = FourierFeatures(1, 16)\n self.down = nn.AvgPool2d(2)\n self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)\n\n self.net = nn.Sequential(\n ConvBlock(3 + 16, cs[0]),\n ConvBlock(cs[0], cs[0]),\n SkipBlock([\n self.down,\n ConvBlock(cs[0], cs[1]),\n ConvBlock(cs[1], cs[1]),\n SkipBlock([\n self.down,\n ConvBlock(cs[1], cs[2]),\n ConvBlock(cs[2], cs[2]),\n SkipBlock([\n self.down,\n ConvBlock(cs[2], cs[3]),\n ConvBlock(cs[3], cs[3]),\n SkipBlock([\n self.down,\n ConvBlock(cs[3], cs[4]),\n ConvBlock(cs[4], cs[4]),\n SkipBlock([\n self.down,\n ConvBlock(cs[4], cs[5]),\n ConvBlock(cs[5], cs[5]),\n ConvBlock(cs[5], cs[5]),\n ConvBlock(cs[5], cs[4]),\n self.up,\n ]),\n ConvBlock(cs[4] * 2, cs[4]),\n ConvBlock(cs[4], cs[3]),\n self.up,\n ]),\n ConvBlock(cs[3] * 2, cs[3]),\n ConvBlock(cs[3], cs[2]),\n self.up,\n ]),\n ConvBlock(cs[2] * 2, cs[2]),\n ConvBlock(cs[2], cs[1]),\n self.up,\n ]),\n ConvBlock(cs[1] * 2, cs[1]),\n ConvBlock(cs[1], cs[0]),\n self.up,\n ]),\n ConvBlock(cs[0] * 2, cs[0]),\n nn.Conv2d(cs[0], 3, 3, padding=1),\n )\n\n def forward(self, input, t):\n timestep_embed = expand_to_planes(self.timestep_embed(t[:, None]), input.shape)\n v = self.net(torch.cat([input, timestep_embed], dim=1))\n alphas, sigmas = map(partial(append_dims, n=v.ndim), t_to_alpha_sigma(t))\n pred = input * alphas - v * sigmas\n eps = input * sigmas + v * alphas\n return DiffusionOutput(v, pred, eps)\n",
"_____no_output_____"
],
[
"#@title 1.6 SuperRes Define\nclass DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sharpening with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sharpening', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):\n b, *_, device = *x.shape, x.device\n e_t = self.model.apply_model(x, t, c)\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n\ndef download_models(mode):\n\n if mode == \"superresolution\":\n # this is the small bsr light model\n url_conf = 'https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1'\n url_ckpt = 'https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1'\n\n path_conf = f'{model_path}/superres/project.yaml'\n path_ckpt = f'{model_path}/superres/last.ckpt'\n\n download_url(url_conf, path_conf)\n download_url(url_ckpt, path_ckpt)\n\n path_conf = path_conf + '/?dl=1' # fix it\n path_ckpt = path_ckpt + '/?dl=1' # fix it\n return path_conf, path_ckpt\n\n else:\n raise NotImplementedError\n\n\ndef load_model_from_config(config, ckpt):\n print(f\"Loading model from {ckpt}\")\n pl_sd = torch.load(ckpt, map_location=\"cpu\")\n global_step = pl_sd[\"global_step\"]\n sd = pl_sd[\"state_dict\"]\n model = instantiate_from_config(config.model)\n m, u = model.load_state_dict(sd, strict=False)\n model.cuda()\n model.eval()\n return {\"model\": model}, global_step\n\n\ndef get_model(mode):\n path_conf, path_ckpt = download_models(mode)\n config = OmegaConf.load(path_conf)\n model, step = load_model_from_config(config, path_ckpt)\n return model\n\n\ndef get_custom_cond(mode):\n dest = \"data/example_conditioning\"\n\n if mode == \"superresolution\":\n uploaded_img = files.upload()\n filename = next(iter(uploaded_img))\n name, filetype = filename.split(\".\") # todo assumes just one dot in name !\n os.rename(f\"{filename}\", f\"{dest}/{mode}/custom_{name}.{filetype}\")\n\n elif mode == \"text_conditional\":\n w = widgets.Text(value='A cake with cream!', disabled=True)\n display.display(w)\n\n with open(f\"{dest}/{mode}/custom_{w.value[:20]}.txt\", 'w') as f:\n f.write(w.value)\n\n elif mode == \"class_conditional\":\n w = widgets.IntSlider(min=0, max=1000)\n display.display(w)\n with open(f\"{dest}/{mode}/custom.txt\", 'w') as f:\n f.write(w.value)\n\n else:\n raise NotImplementedError(f\"cond not implemented for mode{mode}\")\n\n\ndef get_cond_options(mode):\n path = \"data/example_conditioning\"\n path = os.path.join(path, mode)\n onlyfiles = [f for f in sorted(os.listdir(path))]\n return path, onlyfiles\n\n\ndef select_cond_path(mode):\n path = \"data/example_conditioning\" # todo\n path = os.path.join(path, mode)\n onlyfiles = [f for f in sorted(os.listdir(path))]\n\n selected = widgets.RadioButtons(\n options=onlyfiles,\n description='Select conditioning:',\n disabled=False\n )\n display.display(selected)\n selected_path = os.path.join(path, selected.value)\n return selected_path\n\n\ndef get_cond(mode, img):\n example = dict()\n if mode == \"superresolution\":\n up_f = 4\n # visualize_cond_img(selected_path)\n\n c = img\n c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)\n c_up = torchvision.transforms.functional.resize(c, size=[up_f * c.shape[2], up_f * c.shape[3]], antialias=True)\n c_up = rearrange(c_up, '1 c h w -> 1 h w c')\n c = rearrange(c, '1 c h w -> 1 h w c')\n c = 2. * c - 1.\n\n c = c.to(torch.device(\"cuda\"))\n example[\"LR_image\"] = c\n example[\"image\"] = c_up\n\n return example\n\n\ndef visualize_cond_img(path):\n display.display(ipyimg(filename=path))\n\n\ndef sr_run(model, img, task, custom_steps, eta, resize_enabled=False, classifier_ckpt=None, global_step=None):\n # global stride\n\n example = get_cond(task, img)\n\n save_intermediate_vid = False\n n_runs = 1\n masked = False\n guider = None\n ckwargs = None\n mode = 'ddim'\n ddim_use_x0_pred = False\n temperature = 1.\n eta = eta\n make_progrow = True\n custom_shape = None\n\n height, width = example[\"image\"].shape[1:3]\n split_input = height >= 128 and width >= 128\n\n if split_input:\n ks = 128\n stride = 64\n vqf = 4 #\n model.split_input_params = {\"ks\": (ks, ks), \"stride\": (stride, stride),\n \"vqf\": vqf,\n \"patch_distributed_vq\": True,\n \"tie_braker\": False,\n \"clip_max_weight\": 0.5,\n \"clip_min_weight\": 0.01,\n \"clip_max_tie_weight\": 0.5,\n \"clip_min_tie_weight\": 0.01}\n else:\n if hasattr(model, \"split_input_params\"):\n delattr(model, \"split_input_params\")\n\n invert_mask = False\n\n x_T = None\n for n in range(n_runs):\n if custom_shape is not None:\n x_T = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)\n x_T = repeat(x_T, '1 c h w -> b c h w', b=custom_shape[0])\n\n logs = make_convolutional_sample(example, model,\n mode=mode, custom_steps=custom_steps,\n eta=eta, swap_mode=False , masked=masked,\n invert_mask=invert_mask, quantize_x0=False,\n custom_schedule=None, decode_interval=10,\n resize_enabled=resize_enabled, custom_shape=custom_shape,\n temperature=temperature, noise_dropout=0.,\n corrector=guider, corrector_kwargs=ckwargs, x_T=x_T, save_intermediate_vid=save_intermediate_vid,\n make_progrow=make_progrow,ddim_use_x0_pred=ddim_use_x0_pred\n )\n return logs\n\n\[email protected]_grad()\ndef convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_sequence=None,\n mask=None, x0=None, quantize_x0=False, img_callback=None,\n temperature=1., noise_dropout=0., score_corrector=None,\n corrector_kwargs=None, x_T=None, log_every_t=None\n ):\n\n ddim = DDIMSampler(model)\n bs = shape[0] # dont know where this comes from but wayne\n shape = shape[1:] # cut batch dim\n # print(f\"Sampling with eta = {eta}; steps: {steps}\")\n samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, conditioning=cond, callback=callback,\n normals_sequence=normals_sequence, quantize_x0=quantize_x0, eta=eta,\n mask=mask, x0=x0, temperature=temperature, verbose=False,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs, x_T=x_T)\n\n return samples, intermediates\n\n\[email protected]_grad()\ndef make_convolutional_sample(batch, model, mode=\"vanilla\", custom_steps=None, eta=1.0, swap_mode=False, masked=False,\n invert_mask=True, quantize_x0=False, custom_schedule=None, decode_interval=1000,\n resize_enabled=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,\n corrector_kwargs=None, x_T=None, save_intermediate_vid=False, make_progrow=True,ddim_use_x0_pred=False):\n log = dict()\n\n z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=not (hasattr(model, 'split_input_params')\n and model.cond_stage_key == 'coordinates_bbox'),\n return_original_cond=True)\n\n log_every_t = 1 if save_intermediate_vid else None\n\n if custom_shape is not None:\n z = torch.randn(custom_shape)\n # print(f\"Generating {custom_shape[0]} samples of shape {custom_shape[1:]}\")\n\n z0 = None\n\n log[\"input\"] = x\n log[\"reconstruction\"] = xrec\n\n if ismap(xc):\n log[\"original_conditioning\"] = model.to_rgb(xc)\n if hasattr(model, 'cond_stage_key'):\n log[model.cond_stage_key] = model.to_rgb(xc)\n\n else:\n log[\"original_conditioning\"] = xc if xc is not None else torch.zeros_like(x)\n if model.cond_stage_model:\n log[model.cond_stage_key] = xc if xc is not None else torch.zeros_like(x)\n if model.cond_stage_key =='class_label':\n log[model.cond_stage_key] = xc[model.cond_stage_key]\n\n with model.ema_scope(\"Plotting\"):\n t0 = time.time()\n img_cb = None\n\n sample, intermediates = convsample_ddim(model, c, steps=custom_steps, shape=z.shape,\n eta=eta,\n quantize_x0=quantize_x0, img_callback=img_cb, mask=None, x0=z0,\n temperature=temperature, noise_dropout=noise_dropout,\n score_corrector=corrector, corrector_kwargs=corrector_kwargs,\n x_T=x_T, log_every_t=log_every_t)\n t1 = time.time()\n\n if ddim_use_x0_pred:\n sample = intermediates['pred_x0'][-1]\n\n x_sample = model.decode_first_stage(sample)\n\n try:\n x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)\n log[\"sample_noquant\"] = x_sample_noquant\n log[\"sample_diff\"] = torch.abs(x_sample_noquant - x_sample)\n except:\n pass\n\n log[\"sample\"] = x_sample\n log[\"time\"] = t1 - t0\n\n return log\n\nsr_diffMode = 'superresolution'\nsr_model = get_model('superresolution')\n\n\n\n\n\n\ndef do_superres(img, filepath):\n\n if args.sharpen_preset == 'Faster':\n sr_diffusion_steps = \"25\" \n sr_pre_downsample = '1/2' \n if args.sharpen_preset == 'Fast':\n sr_diffusion_steps = \"100\" \n sr_pre_downsample = '1/2' \n if args.sharpen_preset == 'Slow':\n sr_diffusion_steps = \"25\" \n sr_pre_downsample = 'None' \n if args.sharpen_preset == 'Very Slow':\n sr_diffusion_steps = \"100\" \n sr_pre_downsample = 'None' \n\n\n sr_post_downsample = 'Original Size'\n sr_diffusion_steps = int(sr_diffusion_steps)\n sr_eta = 1.0 \n sr_downsample_method = 'Lanczos' \n\n gc.collect()\n torch.cuda.empty_cache()\n\n im_og = img\n width_og, height_og = im_og.size\n\n #Downsample Pre\n if sr_pre_downsample == '1/2':\n downsample_rate = 2\n elif sr_pre_downsample == '1/4':\n downsample_rate = 4\n else:\n downsample_rate = 1\n\n width_downsampled_pre = width_og//downsample_rate\n height_downsampled_pre = height_og//downsample_rate\n\n if downsample_rate != 1:\n # print(f'Downsampling from [{width_og}, {height_og}] to [{width_downsampled_pre}, {height_downsampled_pre}]')\n im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)\n # im_og.save('/content/temp.png')\n # filepath = '/content/temp.png'\n\n logs = sr_run(sr_model[\"model\"], im_og, sr_diffMode, sr_diffusion_steps, sr_eta)\n\n sample = logs[\"sample\"]\n sample = sample.detach().cpu()\n sample = torch.clamp(sample, -1., 1.)\n sample = (sample + 1.) / 2. * 255\n sample = sample.numpy().astype(np.uint8)\n sample = np.transpose(sample, (0, 2, 3, 1))\n a = Image.fromarray(sample[0])\n\n #Downsample Post\n if sr_post_downsample == '1/2':\n downsample_rate = 2\n elif sr_post_downsample == '1/4':\n downsample_rate = 4\n else:\n downsample_rate = 1\n\n width, height = a.size\n width_downsampled_post = width//downsample_rate\n height_downsampled_post = height//downsample_rate\n\n if sr_downsample_method == 'Lanczos':\n aliasing = Image.LANCZOS\n else:\n aliasing = Image.NEAREST\n\n if downsample_rate != 1:\n # print(f'Downsampling from [{width}, {height}] to [{width_downsampled_post}, {height_downsampled_post}]')\n a = a.resize((width_downsampled_post, height_downsampled_post), aliasing)\n elif sr_post_downsample == 'Original Size':\n # print(f'Downsampling from [{width}, {height}] to Original Size [{width_og}, {height_og}]')\n a = a.resize((width_og, height_og), aliasing)\n\n display.display(a)\n a.save(filepath)\n return\n print(f'Processing finished!')\n",
"_____no_output_____"
]
],
[
[
"# 2. Diffusion and CLIP model settings",
"_____no_output_____"
]
],
[
[
"#@markdown ####**Models Settings:**\ndiffusion_model = \"512x512_diffusion_uncond_finetune_008100\" #@param [\"256x256_diffusion_uncond\", \"512x512_diffusion_uncond_finetune_008100\"]\nuse_secondary_model = True #@param {type: 'boolean'}\n\ntimestep_respacing = '50' # param ['25','50','100','150','250','500','1000','ddim25','ddim50', 'ddim75', 'ddim100','ddim150','ddim250','ddim500','ddim1000'] \ndiffusion_steps = 1000 # param {type: 'number'}\nuse_checkpoint = True #@param {type: 'boolean'}\nViTB32 = True #@param{type:\"boolean\"}\nViTB16 = True #@param{type:\"boolean\"}\nViTL14 = False #@param{type:\"boolean\"}\nRN101 = False #@param{type:\"boolean\"}\nRN50 = True #@param{type:\"boolean\"}\nRN50x4 = False #@param{type:\"boolean\"}\nRN50x16 = False #@param{type:\"boolean\"}\nRN50x64 = False #@param{type:\"boolean\"}\nSLIPB16 = False # param{type:\"boolean\"}\nSLIPL16 = False # param{type:\"boolean\"}\n\n#@markdown If you're having issues with model downloads, check this to compare SHA's:\ncheck_model_SHA = False #@param{type:\"boolean\"}\n\nmodel_256_SHA = '983e3de6f95c88c81b2ca7ebb2c217933be1973b1ff058776b970f901584613a'\nmodel_512_SHA = '9c111ab89e214862b76e1fa6a1b3f1d329b1a88281885943d2cdbe357ad57648'\nmodel_secondary_SHA = '983e3de6f95c88c81b2ca7ebb2c217933be1973b1ff058776b970f901584613a'\n\nmodel_256_link = 'https://openaipublic.blob.core.windows.net/diffusion/jul-2021/256x256_diffusion_uncond.pt'\nmodel_512_link = 'https://v-diffusion.s3.us-west-2.amazonaws.com/512x512_diffusion_uncond_finetune_008100.pt'\nmodel_secondary_link = 'https://v-diffusion.s3.us-west-2.amazonaws.com/secondary_model_imagenet_2.pth'\n\nmodel_256_path = f'{model_path}/256x256_diffusion_uncond.pt'\nmodel_512_path = f'{model_path}/512x512_diffusion_uncond_finetune_008100.pt'\nmodel_secondary_path = f'{model_path}/secondary_model_imagenet_2.pth'\n\n# Download the diffusion model\nif diffusion_model == '256x256_diffusion_uncond':\n if os.path.exists(model_256_path) and check_model_SHA:\n print('Checking 256 Diffusion File')\n with open(model_256_path,\"rb\") as f:\n bytes = f.read() \n hash = hashlib.sha256(bytes).hexdigest();\n if hash == model_256_SHA:\n print('256 Model SHA matches')\n model_256_downloaded = True\n else: \n print(\"256 Model SHA doesn't match, redownloading...\")\n !wget --continue {model_256_link} -P {model_path}\n model_256_downloaded = True\n elif os.path.exists(model_256_path) and not check_model_SHA or model_256_downloaded == True:\n print('256 Model already downloaded, check check_model_SHA if the file is corrupt')\n else: \n !wget --continue {model_256_link} -P {model_path}\n model_256_downloaded = True\nelif diffusion_model == '512x512_diffusion_uncond_finetune_008100':\n if os.path.exists(model_512_path) and check_model_SHA:\n print('Checking 512 Diffusion File')\n with open(model_512_path,\"rb\") as f:\n bytes = f.read() \n hash = hashlib.sha256(bytes).hexdigest();\n if hash == model_512_SHA:\n print('512 Model SHA matches')\n model_512_downloaded = True\n else: \n print(\"512 Model SHA doesn't match, redownloading...\")\n !wget --continue {model_512_link} -P {model_path}\n model_512_downloaded = True\n elif os.path.exists(model_512_path) and not check_model_SHA or model_512_downloaded == True:\n print('512 Model already downloaded, check check_model_SHA if the file is corrupt')\n else: \n !wget --continue {model_512_link} -P {model_path}\n model_512_downloaded = True\n\n\n# Download the secondary diffusion model v2\nif use_secondary_model == True:\n if os.path.exists(model_secondary_path) and check_model_SHA:\n print('Checking Secondary Diffusion File')\n with open(model_secondary_path,\"rb\") as f:\n bytes = f.read() \n hash = hashlib.sha256(bytes).hexdigest();\n if hash == model_secondary_SHA:\n print('Secondary Model SHA matches')\n model_secondary_downloaded = True\n else: \n print(\"Secondary Model SHA doesn't match, redownloading...\")\n !wget --continue {model_secondary_link} -P {model_path}\n model_secondary_downloaded = True\n elif os.path.exists(model_secondary_path) and not check_model_SHA or model_secondary_downloaded == True:\n print('Secondary Model already downloaded, check check_model_SHA if the file is corrupt')\n else: \n !wget --continue {model_secondary_link} -P {model_path}\n model_secondary_downloaded = True\n\nmodel_config = model_and_diffusion_defaults()\nif diffusion_model == '512x512_diffusion_uncond_finetune_008100':\n model_config.update({\n 'attention_resolutions': '32, 16, 8',\n 'class_cond': False,\n 'diffusion_steps': diffusion_steps,\n 'rescale_timesteps': True,\n 'timestep_respacing': timestep_respacing,\n 'image_size': 512,\n 'learn_sigma': True,\n 'noise_schedule': 'linear',\n 'num_channels': 256,\n 'num_head_channels': 64,\n 'num_res_blocks': 2,\n 'resblock_updown': True,\n 'use_checkpoint': use_checkpoint,\n 'use_fp16': True,\n 'use_scale_shift_norm': True,\n })\nelif diffusion_model == '256x256_diffusion_uncond':\n model_config.update({\n 'attention_resolutions': '32, 16, 8',\n 'class_cond': False,\n 'diffusion_steps': diffusion_steps,\n 'rescale_timesteps': True,\n 'timestep_respacing': timestep_respacing,\n 'image_size': 256,\n 'learn_sigma': True,\n 'noise_schedule': 'linear',\n 'num_channels': 256,\n 'num_head_channels': 64,\n 'num_res_blocks': 2,\n 'resblock_updown': True,\n 'use_checkpoint': use_checkpoint,\n 'use_fp16': True,\n 'use_scale_shift_norm': True,\n })\n\nsecondary_model_ver = 2\nmodel_default = model_config['image_size']\n\n\n\nif secondary_model_ver == 2:\n secondary_model = SecondaryDiffusionImageNet2()\n secondary_model.load_state_dict(torch.load(f'{model_path}/secondary_model_imagenet_2.pth', map_location='cpu'))\nsecondary_model.eval().requires_grad_(False).to(device)\n\nclip_models = []\nif ViTB32 is True: clip_models.append(clip.load('ViT-B/32', jit=False)[0].eval().requires_grad_(False).to(device)) \nif ViTB16 is True: clip_models.append(clip.load('ViT-B/16', jit=False)[0].eval().requires_grad_(False).to(device) ) \nif ViTL14 is True: clip_models.append(clip.load('ViT-L/14', jit=False)[0].eval().requires_grad_(False).to(device) ) \nif RN50 is True: clip_models.append(clip.load('RN50', jit=False)[0].eval().requires_grad_(False).to(device))\nif RN50x4 is True: clip_models.append(clip.load('RN50x4', jit=False)[0].eval().requires_grad_(False).to(device)) \nif RN50x16 is True: clip_models.append(clip.load('RN50x16', jit=False)[0].eval().requires_grad_(False).to(device)) \nif RN50x64 is True: clip_models.append(clip.load('RN50x64', jit=False)[0].eval().requires_grad_(False).to(device)) \nif RN101 is True: clip_models.append(clip.load('RN101', jit=False)[0].eval().requires_grad_(False).to(device)) \n\nif SLIPB16:\n SLIPB16model = SLIP_VITB16(ssl_mlp_dim=4096, ssl_emb_dim=256)\n if not os.path.exists(f'{model_path}/slip_base_100ep.pt'):\n !wget https://dl.fbaipublicfiles.com/slip/slip_base_100ep.pt -P {model_path}\n sd = torch.load(f'{model_path}/slip_base_100ep.pt')\n real_sd = {}\n for k, v in sd['state_dict'].items():\n real_sd['.'.join(k.split('.')[1:])] = v\n del sd\n SLIPB16model.load_state_dict(real_sd)\n SLIPB16model.requires_grad_(False).eval().to(device)\n\n clip_models.append(SLIPB16model)\n\nif SLIPL16:\n SLIPL16model = SLIP_VITL16(ssl_mlp_dim=4096, ssl_emb_dim=256)\n if not os.path.exists(f'{model_path}/slip_large_100ep.pt'):\n !wget https://dl.fbaipublicfiles.com/slip/slip_large_100ep.pt -P {model_path}\n sd = torch.load(f'{model_path}/slip_large_100ep.pt')\n real_sd = {}\n for k, v in sd['state_dict'].items():\n real_sd['.'.join(k.split('.')[1:])] = v\n del sd\n SLIPL16model.load_state_dict(real_sd)\n SLIPL16model.requires_grad_(False).eval().to(device)\n\n clip_models.append(SLIPL16model)\n\nnormalize = T.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711])\nlpips_model = lpips.LPIPS(net='vgg').to(device)",
"_____no_output_____"
]
],
[
[
"# 3. Settings",
"_____no_output_____"
]
],
[
[
"#@markdown ####**Basic Settings:**\nbatch_name = 'TimeToDisco60' #@param{type: 'string'}\nsteps = 250#@param [25,50,100,150,250,500,1000]{type: 'raw', allow-input: true}\nwidth_height = [1280, 768]#@param{type: 'raw'}\nclip_guidance_scale = 5000 #@param{type: 'number'}\ntv_scale = 0#@param{type: 'number'}\nrange_scale = 150#@param{type: 'number'}\nsat_scale = 0#@param{type: 'number'}\ncutn_batches = 1 #@param{type: 'number'}\nskip_augs = False#@param{type: 'boolean'}\n\n#@markdown ---\n\n#@markdown ####**Init Settings:**\ninit_image = None #@param{type: 'string'}\ninit_scale = 1000 #@param{type: 'integer'}\nskip_steps = 0 #@param{type: 'integer'}\n#@markdown *Make sure you set skip_steps to ~50% of your steps if you want to use an init image.*\n\n#Get corrected sizes\nside_x = (width_height[0]//64)*64;\nside_y = (width_height[1]//64)*64;\nif side_x != width_height[0] or side_y != width_height[1]:\n print(f'Changing output size to {side_x}x{side_y}. Dimensions must by multiples of 64.')\n\n#Update Model Settings\ntimestep_respacing = f'ddim{steps}'\ndiffusion_steps = (1000//steps)*steps if steps < 1000 else steps\nmodel_config.update({\n 'timestep_respacing': timestep_respacing,\n 'diffusion_steps': diffusion_steps,\n})\n\n#Make folder for batch\nbatchFolder = f'{outDirPath}/{batch_name}'\ncreatePath(batchFolder)\n",
"_____no_output_____"
]
],
[
[
"###Animation Settings",
"_____no_output_____"
]
],
[
[
"#@markdown ####**Animation Mode:**\nanimation_mode = \"None\" #@param['None', '2D', 'Video Input']\n#@markdown *For animation, you probably want to turn `cutn_batches` to 1 to make it quicker.*\n\n\n#@markdown ---\n\n#@markdown ####**Video Input Settings:**\nvideo_init_path = \"/content/training.mp4\" #@param {type: 'string'}\nextract_nth_frame = 2 #@param {type:\"number\"} \n\nif animation_mode == \"Video Input\":\n videoFramesFolder = f'/content/videoFrames'\n createPath(videoFramesFolder)\n print(f\"Exporting Video Frames (1 every {extract_nth_frame})...\")\n try:\n !rm {videoFramesFolder}/*.jpg\n except:\n print('')\n vf = f'\"select=not(mod(n\\,{extract_nth_frame}))\"'\n !ffmpeg -i {video_init_path} -vf {vf} -vsync vfr -q:v 2 -loglevel error -stats {videoFramesFolder}/%04d.jpg\n\n\n#@markdown ---\n\n#@markdown ####**2D Animation Settings:**\n#@markdown `zoom` is a multiplier of dimensions, 1 is no zoom.\n\nkey_frames = True #@param {type:\"boolean\"}\nmax_frames = 100#@param {type:\"number\"}\n\nif animation_mode == \"Video Input\":\n max_frames = len(glob(f'{videoFramesFolder}/*.jpg'))\n\ninterp_spline = 'Linear' #Do not change, currently will not look good. param ['Linear','Quadratic','Cubic']{type:\"string\"}\nangle = \"0:(0)\"#@param {type:\"string\"}\nzoom = \"0: (1), 10: (1.05)\"#@param {type:\"string\"}\ntranslation_x = \"0: (0)\"#@param {type:\"string\"}\ntranslation_y = \"0: (0)\"#@param {type:\"string\"}\n\n#@markdown ---\n\n#@markdown ####**Coherency Settings:**\n#@markdown `frame_scale` tries to guide the new frame to looking like the old one. A good default is 1500.\nframes_scale = 1500 #@param{type: 'integer'}\n#@markdown `frame_skip_steps` will blur the previous frame - higher values will flicker less but struggle to add enough new detail to zoom into.\nframes_skip_steps = '60%' #@param ['40%', '50%', '60%', '70%', '80%'] {type: 'string'}\n\n\ndef parse_key_frames(string, prompt_parser=None):\n \"\"\"Given a string representing frame numbers paired with parameter values at that frame,\n return a dictionary with the frame numbers as keys and the parameter values as the values.\n\n Parameters\n ----------\n string: string\n Frame numbers paired with parameter values at that frame number, in the format\n 'framenumber1: (parametervalues1), framenumber2: (parametervalues2), ...'\n prompt_parser: function or None, optional\n If provided, prompt_parser will be applied to each string of parameter values.\n \n Returns\n -------\n dict\n Frame numbers as keys, parameter values at that frame number as values\n\n Raises\n ------\n RuntimeError\n If the input string does not match the expected format.\n \n Examples\n --------\n >>> parse_key_frames(\"10:(Apple: 1| Orange: 0), 20: (Apple: 0| Orange: 1| Peach: 1)\")\n {10: 'Apple: 1| Orange: 0', 20: 'Apple: 0| Orange: 1| Peach: 1'}\n\n >>> parse_key_frames(\"10:(Apple: 1| Orange: 0), 20: (Apple: 0| Orange: 1| Peach: 1)\", prompt_parser=lambda x: x.lower()))\n {10: 'apple: 1| orange: 0', 20: 'apple: 0| orange: 1| peach: 1'}\n \"\"\"\n import re\n pattern = r'((?P<frame>[0-9]+):[\\s]*[\\(](?P<param>[\\S\\s]*?)[\\)])'\n frames = dict()\n for match_object in re.finditer(pattern, string):\n frame = int(match_object.groupdict()['frame'])\n param = match_object.groupdict()['param']\n if prompt_parser:\n frames[frame] = prompt_parser(param)\n else:\n frames[frame] = param\n\n if frames == {} and len(string) != 0:\n raise RuntimeError('Key Frame string not correctly formatted')\n return frames\n\ndef get_inbetweens(key_frames, integer=False):\n \"\"\"Given a dict with frame numbers as keys and a parameter value as values,\n return a pandas Series containing the value of the parameter at every frame from 0 to max_frames.\n Any values not provided in the input dict are calculated by linear interpolation between\n the values of the previous and next provided frames. If there is no previous provided frame, then\n the value is equal to the value of the next provided frame, or if there is no next provided frame,\n then the value is equal to the value of the previous provided frame. If no frames are provided,\n all frame values are NaN.\n\n Parameters\n ----------\n key_frames: dict\n A dict with integer frame numbers as keys and numerical values of a particular parameter as values.\n integer: Bool, optional\n If True, the values of the output series are converted to integers.\n Otherwise, the values are floats.\n \n Returns\n -------\n pd.Series\n A Series with length max_frames representing the parameter values for each frame.\n \n Examples\n --------\n >>> max_frames = 5\n >>> get_inbetweens({1: 5, 3: 6})\n 0 5.0\n 1 5.0\n 2 5.5\n 3 6.0\n 4 6.0\n dtype: float64\n\n >>> get_inbetweens({1: 5, 3: 6}, integer=True)\n 0 5\n 1 5\n 2 5\n 3 6\n 4 6\n dtype: int64\n \"\"\"\n key_frame_series = pd.Series([np.nan for a in range(max_frames)])\n\n for i, value in key_frames.items():\n key_frame_series[i] = value\n key_frame_series = key_frame_series.astype(float)\n \n interp_method = interp_spline\n\n if interp_method == 'Cubic' and len(key_frames.items()) <=3:\n interp_method = 'Quadratic'\n \n if interp_method == 'Quadratic' and len(key_frames.items()) <= 2:\n interp_method = 'Linear'\n \n \n key_frame_series[0] = key_frame_series[key_frame_series.first_valid_index()]\n key_frame_series[max_frames-1] = key_frame_series[key_frame_series.last_valid_index()]\n # key_frame_series = key_frame_series.interpolate(method=intrp_method,order=1, limit_direction='both')\n key_frame_series = key_frame_series.interpolate(method=interp_method.lower(),limit_direction='both')\n if integer:\n return key_frame_series.astype(int)\n return key_frame_series\n\ndef split_prompts(prompts):\n prompt_series = pd.Series([np.nan for a in range(max_frames)])\n for i, prompt in prompts.items():\n prompt_series[i] = prompt\n # prompt_series = prompt_series.astype(str)\n prompt_series = prompt_series.ffill().bfill()\n return prompt_series\n\nif key_frames:\n try:\n angle_series = get_inbetweens(parse_key_frames(angle))\n except RuntimeError as e:\n print(\n \"WARNING: You have selected to use key frames, but you have not \"\n \"formatted `angle` correctly for key frames.\\n\"\n \"Attempting to interpret `angle` as \"\n f'\"0: ({angle})\"\\n'\n \"Please read the instructions to find out how to use key frames \"\n \"correctly.\\n\"\n )\n angle = f\"0: ({angle})\"\n angle_series = get_inbetweens(parse_key_frames(angle))\n\n try:\n zoom_series = get_inbetweens(parse_key_frames(zoom))\n except RuntimeError as e:\n print(\n \"WARNING: You have selected to use key frames, but you have not \"\n \"formatted `zoom` correctly for key frames.\\n\"\n \"Attempting to interpret `zoom` as \"\n f'\"0: ({zoom})\"\\n'\n \"Please read the instructions to find out how to use key frames \"\n \"correctly.\\n\"\n )\n zoom = f\"0: ({zoom})\"\n zoom_series = get_inbetweens(parse_key_frames(zoom))\n\n try:\n translation_x_series = get_inbetweens(parse_key_frames(translation_x))\n except RuntimeError as e:\n print(\n \"WARNING: You have selected to use key frames, but you have not \"\n \"formatted `translation_x` correctly for key frames.\\n\"\n \"Attempting to interpret `translation_x` as \"\n f'\"0: ({translation_x})\"\\n'\n \"Please read the instructions to find out how to use key frames \"\n \"correctly.\\n\"\n )\n translation_x = f\"0: ({translation_x})\"\n translation_x_series = get_inbetweens(parse_key_frames(translation_x))\n\n try:\n translation_y_series = get_inbetweens(parse_key_frames(translation_y))\n except RuntimeError as e:\n print(\n \"WARNING: You have selected to use key frames, but you have not \"\n \"formatted `translation_y` correctly for key frames.\\n\"\n \"Attempting to interpret `translation_y` as \"\n f'\"0: ({translation_y})\"\\n'\n \"Please read the instructions to find out how to use key frames \"\n \"correctly.\\n\"\n )\n translation_y = f\"0: ({translation_y})\"\n translation_y_series = get_inbetweens(parse_key_frames(translation_y))\n\nelse:\n angle = float(angle)\n zoom = float(zoom)\n translation_x = float(translation_x)\n translation_y = float(translation_y)\n",
"_____no_output_____"
]
],
[
[
"### Extra Settings\n Partial Saves, Diffusion Sharpening, Advanced Settings, Cutn Scheduling",
"_____no_output_____"
]
],
[
[
"#@markdown ####**Saving:**\n\nintermediate_saves = 150#@param{type: 'raw'}\nintermediates_in_subfolder = True #@param{type: 'boolean'}\n#@markdown Intermediate steps will save a copy at your specified intervals. You can either format it as a single integer or a list of specific steps \n\n#@markdown A value of `2` will save a copy at 33% and 66%. 0 will save none.\n\n#@markdown A value of `[5, 9, 34, 45]` will save at steps 5, 9, 34, and 45. (Make sure to include the brackets)\n\n\nif type(intermediate_saves) is not list:\n if intermediate_saves:\n steps_per_checkpoint = math.floor((steps - skip_steps - 1) // (intermediate_saves+1))\n steps_per_checkpoint = steps_per_checkpoint if steps_per_checkpoint > 0 else 1\n print(f'Will save every {steps_per_checkpoint} steps')\n else:\n steps_per_checkpoint = steps+10\nelse:\n steps_per_checkpoint = None\n\nif intermediate_saves and intermediates_in_subfolder is True:\n partialFolder = f'{batchFolder}/partials'\n createPath(partialFolder)\n\n #@markdown ---\n\n#@markdown ####**SuperRes Sharpening:**\n#@markdown *Sharpen each image using latent-diffusion. Does not run in animation mode. `keep_unsharp` will save both versions.*\nsharpen_preset = 'Fast' #@param ['Off', 'Faster', 'Fast', 'Slow', 'Very Slow']\nkeep_unsharp = True #@param{type: 'boolean'}\n\nif sharpen_preset != 'Off' and keep_unsharp is True:\n unsharpenFolder = f'{batchFolder}/unsharpened'\n createPath(unsharpenFolder)\n\n\n #@markdown ---\n\n#@markdown ####**Advanced Settings:**\n#@markdown *There are a few extra advanced settings available if you double click this cell.*\n\n#@markdown *Perlin init will replace your init, so uncheck if using one.*\n\nperlin_init = False #@param{type: 'boolean'}\nperlin_mode = 'mixed' #@param ['mixed', 'color', 'gray']\nset_seed = 'random_seed' #@param{type: 'string'}\neta = 0.8#@param{type: 'number'}\nclamp_grad = True #@param{type: 'boolean'}\nclamp_max = 0.05 #@param{type: 'number'}\n\n\n### EXTRA ADVANCED SETTINGS:\nrandomize_class = True\nclip_denoised = False\nfuzzy_prompt = False\nrand_mag = 0.05\n\n\n #@markdown ---\n\n#@markdown ####**Cutn Scheduling:**\n#@markdown Format: `[40]*400+[20]*600` = 40 cuts for the first 400 /1000 steps, then 20 for the last 600/1000\n\n#@markdown cut_overview and cut_innercut are cumulative for total cutn on any given step. Overview cuts see the entire image and are good for early structure, innercuts are your standard cutn.\n\ncut_overview = \"[12]*400+[4]*600\" #@param {type: 'string'} \ncut_innercut =\"[4]*400+[12]*600\"#@param {type: 'string'} \ncut_ic_pow = 1#@param {type: 'number'} \ncut_icgray_p = \"[0.2]*400+[0]*600\"#@param {type: 'string'} \n\n",
"_____no_output_____"
]
],
[
[
"###Prompts\n`animation_mode: None` will only use the first set. `animation_mode: 2D / Video` will run through them per the set frames and hold on the last one.",
"_____no_output_____"
]
],
[
[
"text_prompts = {\n 0: [\"A scary painting of a man with the head of a lizard, and blood dripping from its fangs.\", \"red and blue color scheme\"],\n 100: [\"This set of prompts start at frame 100\",\"This prompt has weight five:5\"],\n}\n\nimage_prompts = {\n # 0:['ImagePromptsWorkButArentVeryGood.png:2',],\n}",
"_____no_output_____"
]
],
[
[
"# 4. Diffuse!",
"_____no_output_____"
]
],
[
[
"#@title Do the Run!\n#@markdown `n_batches` ignored with animation modes.\ndisplay_rate = 50 #@param{type: 'number'}\nn_batches = 1 #@param{type: 'number'}\n\nbatch_size = 1 \n\ndef move_files(start_num, end_num, old_folder, new_folder):\n for i in range(start_num, end_num):\n old_file = old_folder + f'/{batch_name}({batchNum})_{i:04}.png'\n new_file = new_folder + f'/{batch_name}({batchNum})_{i:04}.png'\n os.rename(old_file, new_file)\n\n#@markdown ---\n\n\nresume_run = True #@param{type: 'boolean'}\nrun_to_resume = 'latest' #@param{type: 'string'}\nresume_from_frame = 'latest' #@param{type: 'string'}\nretain_overwritten_frames = True #@param{type: 'boolean'}\nif retain_overwritten_frames is True:\n retainFolder = f'{batchFolder}/retained'\n createPath(retainFolder)\n\n\nskip_step_ratio = int(frames_skip_steps.rstrip(\"%\")) / 100\ncalc_frames_skip_steps = math.floor(steps * skip_step_ratio)\n\n\nif steps <= calc_frames_skip_steps:\n sys.exit(\"ERROR: You can't skip more steps than your total steps\")\n\nif resume_run:\n if run_to_resume == 'latest':\n try:\n batchNum\n except:\n batchNum = len(glob(f\"{batchFolder}/{batch_name}(*)_settings.txt\"))-1\n else:\n batchNum = int(run_to_resume)\n if resume_from_frame == 'latest':\n start_frame = len(glob(batchFolder+f\"/{batch_name}({batchNum})_*.png\"))\n else:\n start_frame = int(resume_from_frame)+1\n if retain_overwritten_frames is True:\n existing_frames = len(glob(batchFolder+f\"/{batch_name}({batchNum})_*.png\"))\n frames_to_save = existing_frames - start_frame\n print(f'Moving {frames_to_save} frames to the Retained folder')\n move_files(start_frame, existing_frames, batchFolder, retainFolder)\nelse:\n start_frame = 0\n batchNum = len(glob(batchFolder+\"/*.txt\"))\n while path.isfile(f\"{batchFolder}/{batch_name}({batchNum})_settings.txt\") is True or path.isfile(f\"{batchFolder}/{batch_name}-{batchNum}_settings.txt\") is True:\n batchNum += 1\n\nprint(f'Starting Run: {batch_name}({batchNum}) at frame {start_frame}')\n\nif set_seed == 'random_seed':\n random.seed()\n seed = random.randint(0, 2**32)\n # print(f'Using seed: {seed}')\nelse:\n seed = int(set_seed)\n\nargs = {\n 'batchNum': batchNum,\n 'prompts_series':split_prompts(text_prompts) if text_prompts else None,\n 'image_prompts_series':split_prompts(image_prompts) if image_prompts else None,\n 'seed': seed,\n 'display_rate':display_rate,\n 'n_batches':n_batches if animation_mode == 'None' else 1,\n 'batch_size':batch_size,\n 'batch_name': batch_name,\n 'steps': steps,\n 'width_height': width_height,\n 'clip_guidance_scale': clip_guidance_scale,\n 'tv_scale': tv_scale,\n 'range_scale': range_scale,\n 'sat_scale': sat_scale,\n 'cutn_batches': cutn_batches,\n 'init_image': init_image,\n 'init_scale': init_scale,\n 'skip_steps': skip_steps,\n 'sharpen_preset': sharpen_preset,\n 'keep_unsharp': keep_unsharp,\n 'side_x': side_x,\n 'side_y': side_y,\n 'timestep_respacing': timestep_respacing,\n 'diffusion_steps': diffusion_steps,\n 'animation_mode': animation_mode,\n 'video_init_path': video_init_path,\n 'extract_nth_frame': extract_nth_frame,\n 'key_frames': key_frames,\n 'max_frames': max_frames if animation_mode != \"None\" else 1,\n 'interp_spline': interp_spline,\n 'start_frame': start_frame,\n 'angle': angle,\n 'zoom': zoom,\n 'translation_x': translation_x,\n 'translation_y': translation_y,\n 'angle_series':angle_series,\n 'zoom_series':zoom_series,\n 'translation_x_series':translation_x_series,\n 'translation_y_series':translation_y_series,\n 'frames_scale': frames_scale,\n 'calc_frames_skip_steps': calc_frames_skip_steps,\n 'skip_step_ratio': skip_step_ratio,\n 'calc_frames_skip_steps': calc_frames_skip_steps,\n 'text_prompts': text_prompts,\n 'image_prompts': image_prompts,\n 'cut_overview': eval(cut_overview),\n 'cut_innercut': eval(cut_innercut),\n 'cut_ic_pow': cut_ic_pow,\n 'cut_icgray_p': eval(cut_icgray_p),\n 'intermediate_saves': intermediate_saves,\n 'intermediates_in_subfolder': intermediates_in_subfolder,\n 'steps_per_checkpoint': steps_per_checkpoint,\n 'perlin_init': perlin_init,\n 'perlin_mode': perlin_mode,\n 'set_seed': set_seed,\n 'eta': eta,\n 'clamp_grad': clamp_grad,\n 'clamp_max': clamp_max,\n 'skip_augs': skip_augs,\n 'randomize_class': randomize_class,\n 'clip_denoised': clip_denoised,\n 'fuzzy_prompt': fuzzy_prompt,\n 'rand_mag': rand_mag,\n}\n\nargs = SimpleNamespace(**args)\n\nprint('Prepping model...')\nmodel, diffusion = create_model_and_diffusion(**model_config)\nmodel.load_state_dict(torch.load(f'{model_path}/{diffusion_model}.pt', map_location='cpu'))\nmodel.requires_grad_(False).eval().to(device)\nfor name, param in model.named_parameters():\n if 'qkv' in name or 'norm' in name or 'proj' in name:\n param.requires_grad_()\nif model_config['use_fp16']:\n model.convert_to_fp16()\n\ngc.collect()\ntorch.cuda.empty_cache()\ntry:\n do_run()\nexcept KeyboardInterrupt:\n pass\nfinally:\n print('Seed used:', seed)\n gc.collect()\n torch.cuda.empty_cache()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
c52a673ef1fbb899d620aeaf22ac85fc689e372c
| 6,131 |
ipynb
|
Jupyter Notebook
|
nbs/exec.parse_data.ipynb
|
Cris140/uberduck-ml-dev
|
7349480210cdee40d6df494ecec5a62207d8ee72
|
[
"Apache-2.0"
] | 167 |
2021-10-18T22:04:17.000Z
|
2022-03-21T19:44:21.000Z
|
nbs/exec.parse_data.ipynb
|
Cris140/uberduck-ml-dev
|
7349480210cdee40d6df494ecec5a62207d8ee72
|
[
"Apache-2.0"
] | 18 |
2021-10-19T02:33:57.000Z
|
2022-03-28T17:25:52.000Z
|
nbs/exec.parse_data.ipynb
|
Cris140/uberduck-ml-dev
|
7349480210cdee40d6df494ecec5a62207d8ee72
|
[
"Apache-2.0"
] | 24 |
2021-10-22T02:16:53.000Z
|
2022-03-30T18:22:43.000Z
| 24.426295 | 131 | 0.496167 |
[
[
[
"# default_exp exec.parse_data",
"_____no_output_____"
]
],
[
[
"# uberduck_ml_dev.exec.parse_data",
"_____no_output_____"
],
[
"Log a speech dataset to the filelist database\n\nUsage:\n\n```\npython -m uberduck_ml_dev.exec.parse_data \\\n --input ~/multispeaker-root \\\n --format standard-multispeaker \\\n --ouput list.txt\n```\n\n### Supported formats:\n\n### `standard-multispeaker`\n\n```\nroot\n speaker1\n list.txt\n wavs\n speaker2\n list.txt\n wavs\n```\n\n### `standard-singlespeaker`\n\n```\nroot\n list.txt\n wavs\n```\n\n### Unsupported formats (yet):\n\n\n### `vctk`\n\nFormat of the VCTK dataset as downloaded from the [University of Edinburgh](https://datashare.ed.ac.uk/handle/10283/3443).\n\n```\nroot\n wav48_silence_trimmed\n p228\n p228_166_mic1.flac\n ...\n txt\n p228\n p228_166.txt\n ...\n```",
"_____no_output_____"
]
],
[
[
"# export\n\nimport argparse\nimport os\nfrom pathlib import Path\n\nimport sqlite3\nfrom tqdm import tqdm\n\nfrom uberduck_ml_dev.data.cache import ensure_speaker_table, CACHE_LOCATION\nfrom uberduck_ml_dev.data.parse import (\n _cache_filelists,\n _write_db_to_csv,\n STANDARD_MULTISPEAKER,\n STANDARD_SINGLESPEAKER,\n)\n\nFORMATS = [\n STANDARD_MULTISPEAKER,\n STANDARD_SINGLESPEAKER,\n]",
"_____no_output_____"
],
[
"CACHE_LOCATION.parent.exists()",
"_____no_output_____"
],
[
"# export\nfrom typing import List\nimport sys\n\n\ndef _parse_args(args: List[str]):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-i\", \"--input\", help=\"Path to input dataset file or directory\", required=True\n )\n parser.add_argument(\n \"-f\", \"--format\", help=\"Input dataset format\", default=STANDARD_MULTISPEAKER\n )\n parser.add_argument(\n \"-n\", \"--name\", help=\"Dataset name\", default=STANDARD_MULTISPEAKER\n )\n parser.add_argument(\n \"-d\", \"--database\", help=\"Output database\", default=CACHE_LOCATION\n )\n parser.add_argument(\"--csv_path\", help=\"Path to save csv\", default=None)\n return parser.parse_args(args)\n\n\ntry:\n from nbdev.imports import IN_NOTEBOOK\nexcept:\n IN_NOTEBOOK = False\n\nif __name__ == \"__main__\" and not IN_NOTEBOOK:\n\n args = _parse_args(sys.argv[1:])\n ensure_speaker_table(args.database)\n conn = sqlite3.connect(args.database)\n _cache_filelists(\n folder=args.input, fmt=args.format, conn=conn, dataset_name=args.name\n )\n if args.csv_path is not None:\n _write_db_to_csv(conn, args.csv_path)",
"_____no_output_____"
],
[
"# skip\npython -m uberduck_ml_dev.exec.parse_data -i /mnt/disks/uberduck-experiments-v0/data/eminem/ \\\n\t-f standard-singlespeaker \\\n\t-d /home/s_uberduck_ai/.cache/uberduck/uberduck-ml-exp.db \\\n\t--csv_path $UBMLEXP/filelist_list \\\n\t-n eminem",
"_____no_output_____"
],
[
"# skip\nfrom tempfile import NamedTemporaryFile, TemporaryFile\n\nwith NamedTemporaryFile(\"w\") as f:\n _generate_filelist(\n str(Path(\"/Users/zwf/data/voice/dvc-managed/uberduck-multispeaker/\").resolve()),\n \"standard-multispeaker\",\n f.name,\n )\n\nwith TemporaryFile(\"w\") as f:\n _convert_to_multispeaker(\n f,\n str(Path(\"/Users/zwf/data/voice/dvc-managed/uberduck-multispeaker/\").resolve()),\n \"standard-multispeaker\",\n )",
" 5%|▍ | 4/85 [00:00<00:02, 35.22it/s]"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
c52a741eb1768dbd19667430f071ffe8d61c7bb1
| 6,435 |
ipynb
|
Jupyter Notebook
|
data_download_2019.ipynb
|
twdatascience/irs990
|
cadddd0fe434c9f29ab328a0fc5f56efec953ac6
|
[
"MIT"
] | null | null | null |
data_download_2019.ipynb
|
twdatascience/irs990
|
cadddd0fe434c9f29ab328a0fc5f56efec953ac6
|
[
"MIT"
] | null | null | null |
data_download_2019.ipynb
|
twdatascience/irs990
|
cadddd0fe434c9f29ab328a0fc5f56efec953ac6
|
[
"MIT"
] | null | null | null | 20.428571 | 98 | 0.498213 |
[
[
[
"import numpy as np\nimport pandas as pd\nimport requests\nimport re\nimport json\nfrom os import walk\nfrom multiprocessing.pool import ThreadPool",
"_____no_output_____"
],
[
"URL = \"https://s3.amazonaws.com/irs-form-990/index_2019.json\"\n\nresponse = requests.get(URL)\nwith open(\"index_2019.json\", 'wb') as f:\n f.write(response.content)",
"_____no_output_____"
],
[
"with open(\"index_2019.json\") as f:\n data = json.load(f)\n data = data[list(data.keys())[0]]",
"_____no_output_____"
],
[
"df = pd.DataFrame.from_dict(data)\ndf.tail(5)",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"def download_url(url):\n file_name_start = url.rfind('/') + 1\n file_name = url[file_name_start:]\n output_dir = \"/home/meso/git_repo/irs990/data/data_2019/\"\n file_name = output_dir + file_name\n \n r = requests.get(url, stream=True)\n if r.status_code == requests.codes.ok:\n with open(file_name, 'wb') as f:\n f.write(r.content)\n return url\n\nurls = []\nfor i in range(df.shape[0]): \n urls.append(data[i]['URL'])\n \nresults = ThreadPool(100).imap_unordered(download_url, urls)",
"_____no_output_____"
],
[
"f = []\nfor (dirpath, dirnames, filenames) in walk('/home/meso/git_repo/irs990/data/data_2019/'):\n f.extend(filenames)\n break",
"_____no_output_____"
],
[
"u = list(df['URL'])\nlen(u)",
"_____no_output_____"
],
[
"file_names_list = []\n\nfor i in range(len(u)):\n file_loc = u[i].rfind(\"/\") + 1\n file_name = u[i][file_loc:]\n file_names_list.append(file_name)",
"_____no_output_____"
],
[
"print(len(file_names_list), str(\":\"), len(f))",
"_____no_output_____"
],
[
"diff = list(set(file_names_list) - set(f))\nlen(diff)",
"_____no_output_____"
],
[
"file_loc = u[i].rfind(\"/\") + 1\nurl_pre = u[0][:file_loc]",
"_____no_output_____"
],
[
"urls = []\nfor i in range(len(diff)):\n url = url_pre + diff[i]\n urls.append(url)",
"_____no_output_____"
],
[
"for url in urls:\n download_url(url)",
"_____no_output_____"
],
[
"f = []\nfor (dirpath, dirnames, filenames) in walk('/home/meso/git_repo/irs990/data/data_2019/'):\n f.extend(filenames)\n break",
"_____no_output_____"
],
[
"len(f)",
"_____no_output_____"
],
[
"diff = list(set(file_names_list) - set(f))\nlen(diff)",
"_____no_output_____"
],
[
"def list_duplicates(seq):\n seen = set()\n seen_add = seen.add\n return [idx for idx, item in enumerate(seq) if item in seen or seen_add(item)]",
"_____no_output_____"
],
[
"print(list_duplicates(file_names_list))",
"_____no_output_____"
],
[
"dup_df = df.iloc[list_duplicates(file_names_list)]",
"_____no_output_____"
],
[
"dup_ein = pd.DataFrame(dup_df['EIN'])",
"_____no_output_____"
],
[
"dup_ein",
"_____no_output_____"
],
[
"duplicates = pd.DataFrame([])\n\nfor dup in range(dup_ein.shape[0]):\n ein = dup_ein.iloc[dup]['EIN']\n duplicates = duplicates.append(df[df['EIN'] == ein])",
"_____no_output_____"
],
[
"duplicates",
"_____no_output_____"
],
[
"duplicates['URL'].iloc[0]",
"_____no_output_____"
],
[
"duplicates['URL'].iloc[1]",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52a7813227b1631674b6fd79c4bb4c33277a37c
| 14,088 |
ipynb
|
Jupyter Notebook
|
scraping_wiki_dumps.ipynb
|
mattiapocci/PhilosoperRank
|
fa515e2aec8212e941b5f328bbdc60bf6ba106cb
|
[
"MIT"
] | null | null | null |
scraping_wiki_dumps.ipynb
|
mattiapocci/PhilosoperRank
|
fa515e2aec8212e941b5f328bbdc60bf6ba106cb
|
[
"MIT"
] | null | null | null |
scraping_wiki_dumps.ipynb
|
mattiapocci/PhilosoperRank
|
fa515e2aec8212e941b5f328bbdc60bf6ba106cb
|
[
"MIT"
] | 1 |
2020-07-04T12:57:30.000Z
|
2020-07-04T12:57:30.000Z
| 30.962637 | 243 | 0.447757 |
[
[
[
"<a href=\"https://colab.research.google.com/github/mattiapocci/PhilosopherRank/blob/master/scrapingWikiDumps.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Import",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\nimport os\nimport json\nimport re\ndrive.mount('/content/drive')",
"Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"
]
],
[
[
"## Download and extract Wikipedia dump\n",
"_____no_output_____"
]
],
[
[
"!wget -P \"/content/drive/My Drive/Wiki_dump/\" \"https://dumps.wikimedia.org/enwiki/20200120/enwiki-20200120-pages-articles-multistream.xml.bz2\"",
"_____no_output_____"
],
[
"!bunzip2 -d -k -s /content/drive/My\\ Drive/Wiki_dump/enwiki-20200120-pages-articles-multistream.xml.bz2",
"_____no_output_____"
],
[
"!python3 WikiExtractor.py \"enwiki-20200120-pages-articles-multistream.xml\" --json --processes 2",
"_____no_output_____"
]
],
[
[
"# Parsing",
"_____no_output_____"
],
[
"## Parsing utility",
"_____no_output_____"
]
],
[
[
"def create_valid_json(filename):\n \"\"\"\n Create a valid json with the commas and the square brackets\n :param string:\n :return: None \n \"\"\"\n with open(filename, 'r+') as f:\n data = f.read().replace('}', '},')\n data = data[:-2]\n f.seek(0, 0)\n f.write('['.rstrip('\\r\\n') + '\\n' + data)\n \n with open(filename, 'a') as f:\n f.write(\"]\")\n\n\ndef find_matches(filename,word,phil_list):\n \"\"\"\n Find matches inside the articles with the given word and add it to the given phil_list\n :param string:\n :param string:\n :param list:\n :return: list of the articles containg the word philosopher\n \"\"\"\n #phil_list = []\n with open(filename, 'r', encoding='utf-8') as file:\n try:\n data = json.loads(file.read())\n for article in data:\n if word in article['text']:\n phil_list.append(article)\n return phil_list\n except:\n print(\"Error with: \", filename)\n\n\ndef write_json(data, name):\n \"\"\"\n Write into a file the data given with the name given\n :param lst of json:\n :param string:\n :return: None\n \"\"\"\n with open('/content/drive/My Drive/Wiki_dump/'+name, 'w') as outfile:\n json.dump(data, outfile)\n\n\n",
"_____no_output_____"
]
],
[
[
"## Executing the parsing",
"_____no_output_____"
],
[
"If the wikipedia articles are yet parsed with the create_valid_json function, do NOT redo the parsing, otherwise you will mess all the articles!!! To avoid this, comment the indicated line.",
"_____no_output_____"
]
],
[
[
"def parse_data(rootdir=\"/home/luigi/Downloads/wir/wikiextractor-master/text/\"):\n \"\"\"\n Parse all the files presents in the rootdir and its subdirectories\n :param string:\n :return: None \n \"\"\"\n lst = []\n for subdir, dirs, files in os.walk(rootdir):\n for file in files:\n filename_fullpath = os.path.join(subdir, file)\n create_valid_json(filename_fullpath) #THIS LINE TO BE COMMENTED according to what is written above\n lst = find_matches(filename_fullpath,\"philosopher\",lst)\n write_json(lst,file+\".json\")\n",
"_____no_output_____"
]
],
[
[
"Formatting the data to be uniform with the beautiful soup data.",
"_____no_output_____"
]
],
[
[
"lst = []\nwith open(\"/content/drive/My Drive/Wiki_dump/wiki_75.json\", 'r', encoding='utf-8') as file:\n data = json.loads(file.read())\n for article in data:\n if \"philosopher\" in article['text']:\n var = prototype = {\n \"philosopher\":\"\" ,\n \"article\": \"\",\n \"pageid\": \"\",\n \"table_influenced\": [],\n \"table_influences\": []\n }\n var['philosopher'] = article['title']\n var['article'] = article['text']\n var['pageid'] = article['id']\n lst.append(var)\n write_json(lst,\"uniformat.json\")",
"_____no_output_____"
]
],
[
[
"# Compare with the category dump",
"_____no_output_____"
],
[
"Construct lists from the entire dump",
"_____no_output_____"
]
],
[
[
"reg_a_phil_dump = []\nborn_lived_dump = []\n\nwith open(\"/content/drive/My Drive/Wiki_dump/uniformat.json\", 'r', encoding='utf-8') as file:\n data = json.loads(file.read())\n for article in data:\n if \"born\" in article['article'] or \"lived\" in article['article']:\n born_lived_dump.append(article)\n if re.match(r\".*a.*philosopher\",article['article']):\n reg_a_phil_dump.append(article)",
"_____no_output_____"
]
],
[
[
"construct lists from the category dump",
"_____no_output_____"
]
],
[
[
"born_lived = []\nphil = []\nreg_a_phil = []\nwith open(\"/content/drive/My Drive/Wiki_dump/mattia_ground_t.json\", 'r', encoding='utf-8') as file:\n data_cat = json.loads(file.read())\nfor a in data_cat:\n if 'philosopher' in a['article']:\n phil.append(a)\n if \"born\" in a['article'] or \"lived\" in a['article']:\n born_lived.append(a)\n if re.match(r\".*a.*philosopher\",a['article']):\n reg_a_phil.append(a)",
"_____no_output_____"
]
],
[
[
"Finding the articles presents in both the dumps",
"_____no_output_____"
]
],
[
[
"match = 0\ntrovato = False\nfor cat_art in phil:\n trovato = False\n for dump_art in data:\n if not trovato and cat_art['pageid'] == dump_art['pageid']:\n match = match + 1\n trovato = True\n if not trovato:\n print(cat_art)\n",
"_____no_output_____"
]
],
[
[
"Printing the scores",
"_____no_output_____"
]
],
[
[
"print(\"===============DATA ANALYSIS OF CATEGORY DUMP ============\")\nprint(\"Length of category dump: \",len(data_cat))\nprint(\"Length with 'philosopher' in article: \",len(phil))\nprint(\"Length with 'born' or 'lived' in article: \",len(born_lived))\nprint(\"Length with regex '.* a.* philosopher' in article: \",len(reg_a_phil))\nprint(\"\\n\")\nprint(\"===============DATA ANALYSIS OF ENTIRE DUMP ============\")\nprint(\"Length of dump: 62000000\")\nprint(\"Length with 'philosopher' in article: \",len(data))\nprint(\"Length with 'born' or 'lived' in article: \",len(born_lived_dump))\nprint(\"Length with regex '.* a.* philosopher' in article: \",len(reg_a_phil_dump))\nprint(\"\\n\")\nprint(\"Matched \",match,\" articles between category dump and all wiki\")\nprint(\"Missing\",len(phil)-match,\" articles from all dump\")",
"===============DATA ANALYSIS OF CATEGORY DUMP ============\nLength of category dump: 1712\nLength with 'philosopher' in article: 1161\nLength with 'born' or 'lived' in article: 968\nLength with regex '.* a.* philosopher' in article: 996\n\n\n===============DATA ANALYSIS OF ENTIRE DUMP ============\nLength of dump: 62000000\nLength with 'philosopher' in article: 26312\nLength with 'born' or 'lived' in article: 14272\nLength with regex '.* a.* philosopher' in article: 309\n\n\nMatched 1131 articles between category dump and all wiki\nMissing 30 articles from all dump\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
c52a99e4184536b2795864f6fe5d2e1326ac8c03
| 411,860 |
ipynb
|
Jupyter Notebook
|
loan_prediction/loan_prediction.ipynb
|
rojaAchary/Machine-Learning-Projects
|
216021a197d869702652a3face8ba4b61b874a33
|
[
"MIT"
] | null | null | null |
loan_prediction/loan_prediction.ipynb
|
rojaAchary/Machine-Learning-Projects
|
216021a197d869702652a3face8ba4b61b874a33
|
[
"MIT"
] | null | null | null |
loan_prediction/loan_prediction.ipynb
|
rojaAchary/Machine-Learning-Projects
|
216021a197d869702652a3face8ba4b61b874a33
|
[
"MIT"
] | 1 |
2021-08-24T10:48:42.000Z
|
2021-08-24T10:48:42.000Z
| 200.711501 | 48,072 | 0.656177 |
[
[
[
"#problem - given an csv/excel format file >> find you from present customers who should be given the loan\r\n",
"_____no_output_____"
],
[
"#Data load",
"_____no_output_____"
],
[
"import pandas as pd ",
"_____no_output_____"
],
[
"import numpy as np\r\nimport matplotlib as plt\r\n%matplotlib inline",
"_____no_output_____"
],
[
"df = pd.read_csv('train_loan_predict.csv')",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"#Quick Data Exploration",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
],
[
"df.describe()",
"_____no_output_____"
],
[
"df['Property_Area'].value_counts()",
"_____no_output_____"
],
[
"df['Property_Area'].hist(bins=10)",
"_____no_output_____"
],
[
"df['ApplicantIncome'].hist(bins=10)",
"_____no_output_____"
],
[
"df.boxplot(column = 'ApplicantIncome', by = 'Education')",
"_____no_output_____"
],
[
"df['LoanAmount'].hist(bins=50)",
"_____no_output_____"
],
[
"temp1 = df['Credit_History'].value_counts(ascending=True)\r\ntemp2 = df.pivot_table(values='Loan_Status',index=['Credit_History'],aggfunc=lambda x: x.map({'Y':1,'N':0}).mean())\r\nprint ('Frequency Table for Credit History:') \r\nprint (temp1)\r\n\r\nprint ('\\nProbility of getting loan for each Credit History class:')\r\nprint (temp2)",
"Frequency Table for Credit History:\n0.0 89\n1.0 475\nName: Credit_History, dtype: int64\n\nProbility of getting loan for each Credit History class:\n Loan_Status\nCredit_History \n0.0 0.078652\n1.0 0.795789\n"
],
[
"df['Gender'].value_counts()",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\r\nfig = plt.figure(figsize=(8,4))\r\nax1 = fig.add_subplot(121)\r\nax1.set_xlabel('Credit_History')\r\nax1.set_ylabel('Count of Applicants')\r\nax1.set_title(\"Applicants by Credit_History\")\r\ntemp1.plot(kind='bar')\r\n\r\nax2 = fig.add_subplot(122)\r\ntemp2.plot(kind = 'bar')\r\nax2.set_xlabel('Credit_History')\r\nax2.set_ylabel('Probability of getting loan')\r\nax2.set_title(\"Probability of getting loan by credit history\")",
"_____no_output_____"
],
[
"temp3 = pd.crosstab(df['Credit_History'], df['Loan_Status'])\r\ntemp3.plot(kind='bar', stacked=True, color=['red','blue'], grid=False)",
"_____no_output_____"
],
[
"df.apply(lambda x: sum(x.isnull()),axis=0)",
"_____no_output_____"
],
[
"df['Gender'].fillna('Female',inplace=True)",
"_____no_output_____"
],
[
"df['LoanAmount'].fillna(df['LoanAmount'].mean(), inplace=True)",
"_____no_output_____"
],
[
"df['Self_Employed'].fillna('No',inplace=True)",
"_____no_output_____"
],
[
"df.apply(lambda x: sum(x.isnull()),axis=0)",
"_____no_output_____"
],
[
"df['LoanAmount_log'] = np.log(df['LoanAmount'])\r\ndf['LoanAmount_log'].hist(bins=20)",
"_____no_output_____"
],
[
"df['TotalIncome'] = df['ApplicantIncome'] + df['CoapplicantIncome']\r\ndf['TotalIncome_log'] = np.log(df['TotalIncome'])\r\ndf['LoanAmount_log'].hist(bins=20)",
"_____no_output_____"
],
[
"#Missing value correction",
"_____no_output_____"
],
[
"df['Married'].value_counts()",
"_____no_output_____"
],
[
"df['Married'].fillna('No',inplace=True)",
"_____no_output_____"
],
[
"df.apply(lambda x: sum(x.isnull()),axis=0)",
"_____no_output_____"
],
[
"df['Dependents'].fillna('3+',inplace=True)",
"_____no_output_____"
],
[
"df['Dependents'].value_counts()",
"_____no_output_____"
],
[
"df.apply(lambda x: sum(x.isnull()),axis=0)",
"_____no_output_____"
],
[
"df['Loan_Amount_Term'].value_counts()",
"_____no_output_____"
],
[
"df['Loan_Amount_Term'].fillna(df['Loan_Amount_Term'].mean(), inplace=True)",
"_____no_output_____"
],
[
"df.apply(lambda x: sum(x.isnull()),axis=0)",
"_____no_output_____"
],
[
"df['Credit_History'].fillna('0.0', inplace=True)",
"_____no_output_____"
],
[
"df.describe()",
"_____no_output_____"
],
[
"# Building a Predicting model in python",
"_____no_output_____"
],
[
"df['Gender'].fillna(df['Gender'].mode()[0], inplace=True)\r\ndf['Married'].fillna(df['Married'].mode()[0], inplace=True)\r\ndf['Dependents'].fillna(df['Dependents'].mode()[0], inplace=True)\r\ndf['Loan_Amount_Term'].fillna(df['Loan_Amount_Term'].mode()[0], inplace=True)\r\ndf['Credit_History'].fillna(df['Credit_History'].mode()[0], inplace=True)",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"from sklearn.preprocessing import LabelEncoder\r\nvar_mod = ['Gender','Married','Dependents','Education','Self_Employed','Property_Area','Loan_Status']\r\nle = LabelEncoder()\r\nfor i in var_mod:\r\n df[i] = le.fit_transform(df[i])\r\ndf.dtypes ",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"#Import models from scikit learn module:\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import KFold #For K-fold cross validation\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier, export_graphviz\r\nfrom sklearn import metrics\r\n\r\n#Generic function for making a classification model and accessing performance:\r\ndef classification_model(model, data, predictors, outcome):\r\n #Fit the model:\r\n model.fit(data[predictors],data[outcome])\r\n \r\n #Make predictions on training set:\r\n predictions = model.predict(data[predictors])\r\n \r\n #Print accuracy\r\n accuracy = metrics.accuracy_score(predictions,data[outcome])\r\n print (\"Accuracy : %s\" % \"{0:.3%}\".format(accuracy))\r\n\r\n #Perform k-fold cross-validation with 5 folds\r\n kf = KFold(n_splits=5, random_state=None)\r\n error = []\r\n for train, test in kf.split(data[predictors]):\r\n # Filter training data\r\n train_predictors = (data[predictors].iloc[train,:])\r\n \r\n # The target we're using to train the algorithm.\r\n train_target = data[outcome].iloc[train]\r\n \r\n # Training the algorithm using the predictors and target.\r\n model.fit(train_predictors, train_target)\r\n \r\n #Record error from each cross-validation run\r\n error.append(model.score(data[predictors].iloc[test,:], data[outcome].iloc[test]))\r\n \r\n print (\"Cross-Validation Score : %s\" % \"{0:.3%}\".format(np.mean(error)))\r\n\r\n #Fit the model again so that it can be refered outside the function:\r\n model.fit(data[predictors],data[outcome]) ",
"_____no_output_____"
],
[
"outcome_var = 'Loan_Status'\r\nmodel = LogisticRegression()\r\npredictor_var = ['Credit_History']\r\nclassification_model(model, df,predictor_var,outcome_var)",
"Accuracy : 77.036%\nCross-Validation Score : 77.041%\n"
],
[
"#We can try different combination of variables:\r\npredictor_var = ['Credit_History','Education','Married','Self_Employed','Property_Area']\r\nclassification_model(model, df,predictor_var,outcome_var)",
"Accuracy : 77.036%\nCross-Validation Score : 77.041%\n"
],
[
"#Decision Trees",
"_____no_output_____"
],
[
"model = DecisionTreeClassifier()\r\npredictor_var = ['Credit_History','Gender','Married','Education']\r\nclassification_model(model, df,predictor_var,outcome_var)",
"Accuracy : 77.036%\nCross-Validation Score : 76.716%\n"
],
[
"#We can try different combination of variables:\r\npredictor_var = ['Credit_History','Loan_Amount_Term','LoanAmount_log']\r\nclassification_model(model, df,predictor_var,outcome_var)",
"Accuracy : 89.577%\nCross-Validation Score : 64.164%\n"
],
[
"model = RandomForestClassifier(n_estimators=100)\r\npredictor_var = ['Gender', 'Married', 'Dependents', 'Education',\r\n 'Self_Employed', 'Loan_Amount_Term', 'Credit_History', 'Property_Area',\r\n 'LoanAmount_log','TotalIncome_log']\r\nclassification_model(model, df,predictor_var,outcome_var)",
"Accuracy : 100.000%\nCross-Validation Score : 73.455%\n"
],
[
"#Create a series with feature importances:\r\nfeatimp = pd.Series(model.feature_importances_, index=predictor_var).sort_values(ascending=False)\r\nprint (featimp)",
"TotalIncome_log 0.286896\nLoanAmount_log 0.256552\nCredit_History 0.172489\nDependents 0.062100\nProperty_Area 0.056895\nLoan_Amount_Term 0.056745\nMarried 0.029520\nEducation 0.027268\nSelf_Employed 0.026071\nGender 0.025464\ndtype: float64\n"
],
[
"model = RandomForestClassifier(n_estimators=25, min_samples_split=25, max_depth=7, max_features=1)\r\npredictor_var = ['TotalIncome_log','LoanAmount_log','Credit_History','Dependents','Property_Area']\r\nclassification_model(model, df,predictor_var,outcome_var)",
"Accuracy : 79.642%\nCross-Validation Score : 74.425%\n"
],
[
"#Loan Prediction Done",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52a9a7e33d2c080cb05f88f4d5a26b25e82944d
| 80,019 |
ipynb
|
Jupyter Notebook
|
02-Chat-Bots.ipynb
|
MOTURUPRAVEENBHARGAV/ChatBot
|
eeab53addd63bf57d30cae2a79a1cc493cce04e7
|
[
"Apache-2.0"
] | null | null | null |
02-Chat-Bots.ipynb
|
MOTURUPRAVEENBHARGAV/ChatBot
|
eeab53addd63bf57d30cae2a79a1cc493cce04e7
|
[
"Apache-2.0"
] | null | null | null |
02-Chat-Bots.ipynb
|
MOTURUPRAVEENBHARGAV/ChatBot
|
eeab53addd63bf57d30cae2a79a1cc493cce04e7
|
[
"Apache-2.0"
] | null | null | null | 41.546729 | 22,014 | 0.601857 |
[
[
[
"## Loading the Data\n\nWe will be working with the Babi Data Set from Facebook Research.\n\nFull Details: https://research.fb.com/downloads/babi/\n\n- Jason Weston, Antoine Bordes, Sumit Chopra, Tomas Mikolov, Alexander M. Rush,\n \"Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks\",\n http://arxiv.org/abs/1502.05698\n",
"_____no_output_____"
]
],
[
[
"import pickle\nimport numpy as np",
"_____no_output_____"
],
[
"pwd",
"_____no_output_____"
],
[
"with open(\"train_qa.txt\", \"rb\") as fp: # Unpickling\n train_data = pickle.load(fp)",
"_____no_output_____"
],
[
"with open(\"test_qa.txt\", \"rb\") as fp: # Unpickling\n test_data = pickle.load(fp)",
"_____no_output_____"
]
],
[
[
"----",
"_____no_output_____"
],
[
"## Exploring the Format of the Data",
"_____no_output_____"
]
],
[
[
"type(test_data)",
"_____no_output_____"
],
[
"type(train_data)",
"_____no_output_____"
],
[
"len(test_data)",
"_____no_output_____"
],
[
"len(train_data)",
"_____no_output_____"
],
[
"train_data[0]",
"_____no_output_____"
],
[
"' '.join(train_data[0][0])",
"_____no_output_____"
],
[
"' '.join(train_data[0][1])",
"_____no_output_____"
],
[
"train_data[8][2]",
"_____no_output_____"
]
],
[
[
"-----\n\n## Setting up Vocabulary of All Words",
"_____no_output_____"
]
],
[
[
"# Create a set that holds the vocab words\nvocab = set()",
"_____no_output_____"
],
[
"all_data = test_data + train_data",
"_____no_output_____"
],
[
"for story, question , answer in all_data:\n \n vocab = vocab.union(set(story))\n vocab = vocab.union(set(question))",
"_____no_output_____"
],
[
"vocab.add('no')\nvocab.add('yes')",
"_____no_output_____"
],
[
"vocab",
"_____no_output_____"
],
[
"vocab_len = len(vocab) + 1 #we add an extra space to hold a 0 for Keras's pad_sequences\nvocab_len",
"_____no_output_____"
],
[
"max_story_len = max([len(data[0]) for data in all_data])",
"_____no_output_____"
],
[
"max_story_len",
"_____no_output_____"
],
[
"max_question_len = max([len(data[1]) for data in all_data])",
"_____no_output_____"
],
[
"max_question_len",
"_____no_output_____"
]
],
[
[
"## Vectorizing the Data",
"_____no_output_____"
]
],
[
[
"vocab",
"_____no_output_____"
],
[
"# Reserve 0 for pad_sequences\nvocab_size = len(vocab) + 1",
"_____no_output_____"
]
],
[
[
"-----------",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.preprocessing.text import Tokenizer",
"_____no_output_____"
],
[
"# integer encode sequences of words\ntokenizer = Tokenizer(filters=[])\ntokenizer.fit_on_texts(vocab)",
"_____no_output_____"
],
[
"tokenizer.word_index",
"_____no_output_____"
],
[
"train_story_text = []\ntrain_question_text = []\ntrain_answers = []\n\nfor story,question,answer in train_data:\n train_story_text.append(story)\n train_question_text.append(question)",
"_____no_output_____"
],
[
"train_story_seq = tokenizer.texts_to_sequences(train_story_text)",
"_____no_output_____"
],
[
"len(train_story_text)",
"_____no_output_____"
],
[
"len(train_story_seq)",
"_____no_output_____"
],
[
"# word_index = tokenizer.word_index",
"_____no_output_____"
]
],
[
[
"### Functionalize Vectorization",
"_____no_output_____"
]
],
[
[
"def vectorize_stories(data, word_index=tokenizer.word_index, max_story_len=max_story_len,max_question_len=max_question_len):\n '''\n INPUT: \n \n data: consisting of Stories,Queries,and Answers\n word_index: word index dictionary from tokenizer\n max_story_len: the length of the longest story (used for pad_sequences function)\n max_question_len: length of the longest question (used for pad_sequences function)\n\n\n OUTPUT:\n \n Vectorizes the stories,questions, and answers into padded sequences. We first loop for every story, query , and\n answer in the data. Then we convert the raw words to an word index value. Then we append each set to their appropriate\n output list. Then once we have converted the words to numbers, we pad the sequences so they are all of equal length.\n \n Returns this in the form of a tuple (X,Xq,Y) (padded based on max lengths)\n '''\n \n \n # X = STORIES\n X = []\n # Xq = QUERY/QUESTION\n Xq = []\n # Y = CORRECT ANSWER\n Y = []\n \n \n for story, query, answer in data:\n \n # Grab the word index for every word in story\n x = [word_index[word.lower()] for word in story]\n # Grab the word index for every word in query\n xq = [word_index[word.lower()] for word in query]\n \n # Grab the Answers (either Yes/No so we don't need to use list comprehension here)\n # Index 0 is reserved so we're going to use + 1\n y = np.zeros(len(word_index) + 1)\n \n # Now that y is all zeros and we know its just Yes/No , we can use numpy logic to create this assignment\n #\n y[word_index[answer]] = 1\n \n # Append each set of story,query, and answer to their respective holding lists\n X.append(x)\n Xq.append(xq)\n Y.append(y)\n \n # Finally, pad the sequences based on their max length so the RNN can be trained on uniformly long sequences.\n \n # RETURN TUPLE FOR UNPACKING\n return (pad_sequences(X, maxlen=max_story_len),pad_sequences(Xq, maxlen=max_question_len), np.array(Y))",
"_____no_output_____"
],
[
"inputs_train, queries_train, answers_train = vectorize_stories(train_data)",
"_____no_output_____"
],
[
"inputs_test, queries_test, answers_test = vectorize_stories(test_data)",
"_____no_output_____"
],
[
"inputs_test",
"_____no_output_____"
],
[
"queries_test",
"_____no_output_____"
],
[
"answers_test",
"_____no_output_____"
],
[
"sum(answers_test)",
"_____no_output_____"
],
[
"tokenizer.word_index['yes']",
"_____no_output_____"
],
[
"tokenizer.word_index['no']",
"_____no_output_____"
]
],
[
[
"## Creating the Model",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Input, Activation, Dense, Permute, Dropout,Embedding, LSTM\nfrom tensorflow.keras.layers import add, dot, concatenate",
"_____no_output_____"
]
],
[
[
"### Placeholders for Inputs\n\nRecall we technically have two inputs, stories and questions. So we need to use placeholders. `Input()` is used to instantiate a Keras tensor.\n",
"_____no_output_____"
]
],
[
[
"input_sequence = Input((max_story_len,))\nquestion = Input((max_question_len,))",
"_____no_output_____"
],
[
"input_sequence",
"_____no_output_____"
],
[
"question",
"_____no_output_____"
]
],
[
[
"### Building the Networks\n\nTo understand why we chose this setup, make sure to read the paper we are using:\n\n* Sainbayar Sukhbaatar, Arthur Szlam, Jason Weston, Rob Fergus,\n \"End-To-End Memory Networks\",\n http://arxiv.org/abs/1503.08895",
"_____no_output_____"
],
[
"## Encoders\n\n### Input Encoder m",
"_____no_output_____"
]
],
[
[
"# Input gets embedded to a sequence of vectors\ninput_encoder_m = Sequential()\ninput_encoder_m.add(Embedding(input_dim=vocab_size,output_dim=64))\ninput_encoder_m.add(Dropout(0.3))\n\n# This encoder will output:\n# (samples, story_maxlen, embedding_dim)",
"_____no_output_____"
]
],
[
[
"### Input Encoder c",
"_____no_output_____"
]
],
[
[
"# embed the input into a sequence of vectors of size query_maxlen\ninput_encoder_c = Sequential()\ninput_encoder_c.add(Embedding(input_dim=vocab_size,output_dim=max_question_len))\ninput_encoder_c.add(Dropout(0.3))\n# output: (samples, story_maxlen, query_maxlen)",
"_____no_output_____"
]
],
[
[
"### Question Encoder",
"_____no_output_____"
]
],
[
[
"# embed the question into a sequence of vectors\nquestion_encoder = Sequential()\nquestion_encoder.add(Embedding(input_dim=vocab_size,\n output_dim=64,\n input_length=max_question_len))\nquestion_encoder.add(Dropout(0.3))\n# output: (samples, query_maxlen, embedding_dim)",
"_____no_output_____"
]
],
[
[
"### Encode the Sequences",
"_____no_output_____"
]
],
[
[
"# encode input sequence and questions (which are indices)\n# to sequences of dense vectors\ninput_encoded_m = input_encoder_m(input_sequence)\ninput_encoded_c = input_encoder_c(input_sequence)\nquestion_encoded = question_encoder(question)",
"_____no_output_____"
]
],
[
[
"##### Use dot product to compute the match between first input vector seq and the query",
"_____no_output_____"
]
],
[
[
"# shape: `(samples, story_maxlen, query_maxlen)`\nmatch = dot([input_encoded_m, question_encoded], axes=(2, 2))\nmatch = Activation('softmax')(match)",
"_____no_output_____"
]
],
[
[
"#### Add this match matrix with the second input vector sequence",
"_____no_output_____"
]
],
[
[
"# add the match matrix with the second input vector sequence\nresponse = add([match, input_encoded_c]) # (samples, story_maxlen, query_maxlen)\nresponse = Permute((2, 1))(response) # (samples, query_maxlen, story_maxlen)",
"_____no_output_____"
]
],
[
[
"#### Concatenate",
"_____no_output_____"
]
],
[
[
"# concatenate the match matrix with the question vector sequence\nanswer = concatenate([response, question_encoded])",
"_____no_output_____"
],
[
"answer",
"_____no_output_____"
],
[
"# Reduce with RNN (LSTM)\nanswer = LSTM(32)(answer) # (samples, 32)",
"_____no_output_____"
],
[
"# Regularization with Dropout\nanswer = Dropout(0.5)(answer)\nanswer = Dense(vocab_size)(answer) # (samples, vocab_size)",
"_____no_output_____"
],
[
"# we output a probability distribution over the vocabulary\nanswer = Activation('softmax')(answer)\n\n# build the final model\nmodel = Model([input_sequence, question], answer)\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy',\n metrics=['accuracy'])",
"_____no_output_____"
],
[
"model.summary()",
"__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_1 (InputLayer) (None, 156) 0 \n__________________________________________________________________________________________________\ninput_2 (InputLayer) (None, 6) 0 \n__________________________________________________________________________________________________\nsequential_1 (Sequential) multiple 2432 input_1[0][0] \n__________________________________________________________________________________________________\nsequential_3 (Sequential) (None, 6, 64) 2432 input_2[0][0] \n__________________________________________________________________________________________________\ndot_1 (Dot) (None, 156, 6) 0 sequential_1[1][0] \n sequential_3[1][0] \n__________________________________________________________________________________________________\nactivation_1 (Activation) (None, 156, 6) 0 dot_1[0][0] \n__________________________________________________________________________________________________\nsequential_2 (Sequential) multiple 228 input_1[0][0] \n__________________________________________________________________________________________________\nadd_1 (Add) (None, 156, 6) 0 activation_1[0][0] \n sequential_2[1][0] \n__________________________________________________________________________________________________\npermute_1 (Permute) (None, 6, 156) 0 add_1[0][0] \n__________________________________________________________________________________________________\nconcatenate_1 (Concatenate) (None, 6, 220) 0 permute_1[0][0] \n sequential_3[1][0] \n__________________________________________________________________________________________________\nlstm_1 (LSTM) (None, 32) 32384 concatenate_1[0][0] \n__________________________________________________________________________________________________\ndropout_4 (Dropout) (None, 32) 0 lstm_1[0][0] \n__________________________________________________________________________________________________\ndense_1 (Dense) (None, 38) 1254 dropout_4[0][0] \n__________________________________________________________________________________________________\nactivation_2 (Activation) (None, 38) 0 dense_1[0][0] \n==================================================================================================\nTotal params: 38,730\nTrainable params: 38,730\nNon-trainable params: 0\n__________________________________________________________________________________________________\n"
],
[
"# train\nhistory = model.fit([inputs_train, queries_train], answers_train,batch_size=32,epochs=120,validation_data=([inputs_test, queries_test], answers_test))",
"Train on 10000 samples, validate on 1000 samples\nEpoch 1/120\n10000/10000 [==============================] - 7s 701us/step - loss: 0.8846 - acc: 0.4966 - val_loss: 0.6938 - val_acc: 0.4970\nEpoch 2/120\n10000/10000 [==============================] - 4s 364us/step - loss: 0.7022 - acc: 0.4987 - val_loss: 0.6935 - val_acc: 0.5030\nEpoch 3/120\n10000/10000 [==============================] - 3s 347us/step - loss: 0.6958 - acc: 0.5042 - val_loss: 0.6937 - val_acc: 0.4970\nEpoch 4/120\n10000/10000 [==============================] - 3s 348us/step - loss: 0.6946 - acc: 0.5097 - val_loss: 0.6977 - val_acc: 0.4970\nEpoch 5/120\n10000/10000 [==============================] - 3s 345us/step - loss: 0.6943 - acc: 0.5073 - val_loss: 0.6932 - val_acc: 0.5030\nEpoch 6/120\n10000/10000 [==============================] - 3s 347us/step - loss: 0.6954 - acc: 0.4873 - val_loss: 0.6938 - val_acc: 0.4970\nEpoch 7/120\n10000/10000 [==============================] - 3s 345us/step - loss: 0.6946 - acc: 0.4970 - val_loss: 0.6953 - val_acc: 0.4970\nEpoch 8/120\n10000/10000 [==============================] - 3s 347us/step - loss: 0.6948 - acc: 0.4955 - val_loss: 0.6939 - val_acc: 0.4970\nEpoch 9/120\n10000/10000 [==============================] - 3s 346us/step - loss: 0.6944 - acc: 0.4937 - val_loss: 0.6933 - val_acc: 0.5030\nEpoch 10/120\n10000/10000 [==============================] - 4s 360us/step - loss: 0.6939 - acc: 0.5011 - val_loss: 0.6937 - val_acc: 0.4970\nEpoch 11/120\n10000/10000 [==============================] - 4s 365us/step - loss: 0.6941 - acc: 0.5051 - val_loss: 0.6934 - val_acc: 0.5030\nEpoch 12/120\n10000/10000 [==============================] - 4s 352us/step - loss: 0.6941 - acc: 0.5014 - val_loss: 0.6955 - val_acc: 0.4970\nEpoch 13/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.6936 - acc: 0.5104 - val_loss: 0.6943 - val_acc: 0.4940\nEpoch 14/120\n10000/10000 [==============================] - 3s 350us/step - loss: 0.6938 - acc: 0.5045 - val_loss: 0.6938 - val_acc: 0.4950\nEpoch 15/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.6914 - acc: 0.5216 - val_loss: 0.6944 - val_acc: 0.5030\nEpoch 16/120\n10000/10000 [==============================] - 4s 351us/step - loss: 0.6830 - acc: 0.5467 - val_loss: 0.6825 - val_acc: 0.5250\nEpoch 17/120\n10000/10000 [==============================] - 4s 356us/step - loss: 0.6663 - acc: 0.5840 - val_loss: 0.6656 - val_acc: 0.6020\nEpoch 18/120\n10000/10000 [==============================] - 4s 368us/step - loss: 0.6404 - acc: 0.6339 - val_loss: 0.6247 - val_acc: 0.6690\nEpoch 19/120\n10000/10000 [==============================] - 4s 364us/step - loss: 0.6049 - acc: 0.6829 - val_loss: 0.5708 - val_acc: 0.7210\nEpoch 20/120\n10000/10000 [==============================] - 4s 356us/step - loss: 0.5569 - acc: 0.7290 - val_loss: 0.5159 - val_acc: 0.7460\nEpoch 21/120\n10000/10000 [==============================] - 4s 358us/step - loss: 0.5180 - acc: 0.7549 - val_loss: 0.4775 - val_acc: 0.7870\nEpoch 22/120\n10000/10000 [==============================] - 4s 354us/step - loss: 0.4891 - acc: 0.7774 - val_loss: 0.4449 - val_acc: 0.7970\nEpoch 23/120\n10000/10000 [==============================] - 4s 354us/step - loss: 0.4528 - acc: 0.8020 - val_loss: 0.4142 - val_acc: 0.8190\nEpoch 24/120\n10000/10000 [==============================] - 4s 354us/step - loss: 0.4253 - acc: 0.8161 - val_loss: 0.4205 - val_acc: 0.8280\nEpoch 25/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.4009 - acc: 0.8354 - val_loss: 0.4094 - val_acc: 0.8280\nEpoch 26/120\n10000/10000 [==============================] - 3s 348us/step - loss: 0.3815 - acc: 0.8432 - val_loss: 0.3919 - val_acc: 0.8240\nEpoch 27/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.3653 - acc: 0.8496 - val_loss: 0.3926 - val_acc: 0.8450\nEpoch 28/120\n10000/10000 [==============================] - 4s 351us/step - loss: 0.3535 - acc: 0.8549 - val_loss: 0.3939 - val_acc: 0.8430\nEpoch 29/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.3435 - acc: 0.8581 - val_loss: 0.3716 - val_acc: 0.8320\nEpoch 30/120\n10000/10000 [==============================] - 4s 350us/step - loss: 0.3403 - acc: 0.8603 - val_loss: 0.3677 - val_acc: 0.8340\nEpoch 31/120\n10000/10000 [==============================] - 4s 351us/step - loss: 0.3302 - acc: 0.8570 - val_loss: 0.3681 - val_acc: 0.8430\nEpoch 32/120\n10000/10000 [==============================] - 3s 348us/step - loss: 0.3295 - acc: 0.8593 - val_loss: 0.3476 - val_acc: 0.8380\nEpoch 33/120\n10000/10000 [==============================] - 3s 347us/step - loss: 0.3239 - acc: 0.8628 - val_loss: 0.3521 - val_acc: 0.8430\nEpoch 34/120\n10000/10000 [==============================] - 4s 350us/step - loss: 0.3171 - acc: 0.8677 - val_loss: 0.3443 - val_acc: 0.8390\nEpoch 35/120\n10000/10000 [==============================] - 3s 347us/step - loss: 0.3168 - acc: 0.8629 - val_loss: 0.3507 - val_acc: 0.8340\nEpoch 36/120\n10000/10000 [==============================] - 4s 351us/step - loss: 0.3121 - acc: 0.8664 - val_loss: 0.3558 - val_acc: 0.8310\nEpoch 37/120\n10000/10000 [==============================] - 4s 351us/step - loss: 0.3107 - acc: 0.8662 - val_loss: 0.3411 - val_acc: 0.8430\nEpoch 38/120\n10000/10000 [==============================] - 4s 355us/step - loss: 0.3061 - acc: 0.8698 - val_loss: 0.3460 - val_acc: 0.8400\nEpoch 39/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.3065 - acc: 0.8671 - val_loss: 0.3493 - val_acc: 0.8400\nEpoch 40/120\n10000/10000 [==============================] - 4s 352us/step - loss: 0.3060 - acc: 0.8688 - val_loss: 0.3446 - val_acc: 0.8410\nEpoch 41/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.3000 - acc: 0.8696 - val_loss: 0.3542 - val_acc: 0.8450\nEpoch 42/120\n10000/10000 [==============================] - 4s 354us/step - loss: 0.3039 - acc: 0.8665 - val_loss: 0.3692 - val_acc: 0.8350\nEpoch 43/120\n10000/10000 [==============================] - 3s 348us/step - loss: 0.3015 - acc: 0.8695 - val_loss: 0.3513 - val_acc: 0.8400\nEpoch 44/120\n10000/10000 [==============================] - 3s 347us/step - loss: 0.2986 - acc: 0.8694 - val_loss: 0.3577 - val_acc: 0.8320\nEpoch 45/120\n10000/10000 [==============================] - 3s 346us/step - loss: 0.2952 - acc: 0.8730 - val_loss: 0.3496 - val_acc: 0.8400\nEpoch 46/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.2969 - acc: 0.8681 - val_loss: 0.3424 - val_acc: 0.8450\nEpoch 47/120\n10000/10000 [==============================] - 3s 347us/step - loss: 0.2923 - acc: 0.8721 - val_loss: 0.3549 - val_acc: 0.8280\nEpoch 48/120\n10000/10000 [==============================] - 3s 346us/step - loss: 0.2911 - acc: 0.8732 - val_loss: 0.4681 - val_acc: 0.8140\nEpoch 49/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.2917 - acc: 0.8703 - val_loss: 0.3502 - val_acc: 0.8390\nEpoch 50/120\n10000/10000 [==============================] - 3s 344us/step - loss: 0.2900 - acc: 0.8746 - val_loss: 0.3515 - val_acc: 0.8400\nEpoch 51/120\n10000/10000 [==============================] - 4s 352us/step - loss: 0.2855 - acc: 0.8757 - val_loss: 0.3499 - val_acc: 0.8360\nEpoch 52/120\n10000/10000 [==============================] - 4s 354us/step - loss: 0.2864 - acc: 0.8735 - val_loss: 0.3531 - val_acc: 0.8410\nEpoch 53/120\n10000/10000 [==============================] - 4s 356us/step - loss: 0.2864 - acc: 0.8772 - val_loss: 0.3905 - val_acc: 0.8270\nEpoch 54/120\n10000/10000 [==============================] - 3s 346us/step - loss: 0.2857 - acc: 0.8752 - val_loss: 0.3618 - val_acc: 0.8390\nEpoch 55/120\n10000/10000 [==============================] - 3s 346us/step - loss: 0.2819 - acc: 0.8742 - val_loss: 0.3501 - val_acc: 0.8380\nEpoch 56/120\n10000/10000 [==============================] - 3s 348us/step - loss: 0.2853 - acc: 0.8775 - val_loss: 0.3484 - val_acc: 0.8400\nEpoch 57/120\n10000/10000 [==============================] - 4s 355us/step - loss: 0.2767 - acc: 0.8804 - val_loss: 0.3463 - val_acc: 0.8410\nEpoch 58/120\n10000/10000 [==============================] - 4s 355us/step - loss: 0.2802 - acc: 0.8780 - val_loss: 0.3763 - val_acc: 0.8350\nEpoch 59/120\n10000/10000 [==============================] - 3s 350us/step - loss: 0.2844 - acc: 0.8777 - val_loss: 0.3483 - val_acc: 0.8420\nEpoch 60/120\n10000/10000 [==============================] - 4s 350us/step - loss: 0.2759 - acc: 0.8828 - val_loss: 0.3819 - val_acc: 0.8340\nEpoch 61/120\n10000/10000 [==============================] - 4s 351us/step - loss: 0.2722 - acc: 0.8799 - val_loss: 0.3596 - val_acc: 0.8400\nEpoch 62/120\n10000/10000 [==============================] - 4s 366us/step - loss: 0.2704 - acc: 0.8845 - val_loss: 0.3751 - val_acc: 0.8400\nEpoch 63/120\n10000/10000 [==============================] - 4s 372us/step - loss: 0.2691 - acc: 0.8854 - val_loss: 0.3745 - val_acc: 0.8430\nEpoch 64/120\n10000/10000 [==============================] - 4s 356us/step - loss: 0.2698 - acc: 0.8865 - val_loss: 0.3562 - val_acc: 0.8400\nEpoch 65/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.2674 - acc: 0.8875 - val_loss: 0.3534 - val_acc: 0.8400\nEpoch 66/120\n10000/10000 [==============================] - 4s 356us/step - loss: 0.2622 - acc: 0.8888 - val_loss: 0.3763 - val_acc: 0.8390\nEpoch 67/120\n10000/10000 [==============================] - 3s 347us/step - loss: 0.2593 - acc: 0.8885 - val_loss: 0.3670 - val_acc: 0.8470\nEpoch 68/120\n10000/10000 [==============================] - 3s 347us/step - loss: 0.2567 - acc: 0.8937 - val_loss: 0.3699 - val_acc: 0.8560\nEpoch 69/120\n10000/10000 [==============================] - 4s 361us/step - loss: 0.2573 - acc: 0.8951 - val_loss: 0.3676 - val_acc: 0.8430\nEpoch 70/120\n10000/10000 [==============================] - 4s 364us/step - loss: 0.2489 - acc: 0.8962 - val_loss: 0.3564 - val_acc: 0.8510\nEpoch 71/120\n10000/10000 [==============================] - 4s 363us/step - loss: 0.2479 - acc: 0.8961 - val_loss: 0.3605 - val_acc: 0.8460\nEpoch 72/120\n10000/10000 [==============================] - 4s 353us/step - loss: 0.2406 - acc: 0.9026 - val_loss: 0.3605 - val_acc: 0.8560\nEpoch 73/120\n10000/10000 [==============================] - 4s 354us/step - loss: 0.2404 - acc: 0.9020 - val_loss: 0.3490 - val_acc: 0.8510\nEpoch 74/120\n10000/10000 [==============================] - 4s 358us/step - loss: 0.2374 - acc: 0.9045 - val_loss: 0.3400 - val_acc: 0.8470\nEpoch 75/120\n10000/10000 [==============================] - 4s 381us/step - loss: 0.2299 - acc: 0.9060 - val_loss: 0.3453 - val_acc: 0.8490\nEpoch 76/120\n10000/10000 [==============================] - 4s 352us/step - loss: 0.2301 - acc: 0.9046 - val_loss: 0.3372 - val_acc: 0.8490\nEpoch 77/120\n10000/10000 [==============================] - 4s 353us/step - loss: 0.2250 - acc: 0.9076 - val_loss: 0.3354 - val_acc: 0.8510\nEpoch 78/120\n10000/10000 [==============================] - 3s 346us/step - loss: 0.2147 - acc: 0.9087 - val_loss: 0.3416 - val_acc: 0.8490\nEpoch 79/120\n10000/10000 [==============================] - 4s 351us/step - loss: 0.2111 - acc: 0.9119 - val_loss: 0.3774 - val_acc: 0.8520\nEpoch 80/120\n10000/10000 [==============================] - 4s 351us/step - loss: 0.2148 - acc: 0.9139 - val_loss: 0.3209 - val_acc: 0.8650\nEpoch 81/120\n10000/10000 [==============================] - 3s 347us/step - loss: 0.2045 - acc: 0.9158 - val_loss: 0.3157 - val_acc: 0.8650\nEpoch 82/120\n10000/10000 [==============================] - 3s 345us/step - loss: 0.1916 - acc: 0.9194 - val_loss: 0.3012 - val_acc: 0.8700\nEpoch 83/120\n10000/10000 [==============================] - 3s 350us/step - loss: 0.1881 - acc: 0.9228 - val_loss: 0.2922 - val_acc: 0.8670\nEpoch 84/120\n10000/10000 [==============================] - 3s 348us/step - loss: 0.1783 - acc: 0.9266 - val_loss: 0.2849 - val_acc: 0.8770\nEpoch 85/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.1933 - acc: 0.9209 - val_loss: 0.3006 - val_acc: 0.8730\nEpoch 86/120\n10000/10000 [==============================] - 4s 352us/step - loss: 0.1824 - acc: 0.9279 - val_loss: 0.2729 - val_acc: 0.8810\nEpoch 87/120\n10000/10000 [==============================] - 3s 350us/step - loss: 0.1779 - acc: 0.9282 - val_loss: 0.2774 - val_acc: 0.8840\nEpoch 88/120\n10000/10000 [==============================] - 3s 345us/step - loss: 0.1710 - acc: 0.9303 - val_loss: 0.2758 - val_acc: 0.8810\nEpoch 89/120\n10000/10000 [==============================] - 3s 348us/step - loss: 0.1658 - acc: 0.9345 - val_loss: 0.2854 - val_acc: 0.8880\nEpoch 90/120\n10000/10000 [==============================] - 4s 358us/step - loss: 0.1637 - acc: 0.9347 - val_loss: 0.2634 - val_acc: 0.8930\nEpoch 91/120\n10000/10000 [==============================] - 4s 351us/step - loss: 0.1577 - acc: 0.9358 - val_loss: 0.2546 - val_acc: 0.8910\nEpoch 92/120\n10000/10000 [==============================] - 4s 354us/step - loss: 0.1611 - acc: 0.9387 - val_loss: 0.2445 - val_acc: 0.9080\nEpoch 93/120\n10000/10000 [==============================] - 4s 368us/step - loss: 0.1560 - acc: 0.9381 - val_loss: 0.2369 - val_acc: 0.9040\nEpoch 94/120\n10000/10000 [==============================] - 4s 352us/step - loss: 0.1470 - acc: 0.9409 - val_loss: 0.2764 - val_acc: 0.8950\nEpoch 95/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.1475 - acc: 0.9428 - val_loss: 0.2634 - val_acc: 0.8980\nEpoch 96/120\n10000/10000 [==============================] - 3s 350us/step - loss: 0.1418 - acc: 0.9454 - val_loss: 0.2367 - val_acc: 0.9070\nEpoch 97/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.1436 - acc: 0.9453 - val_loss: 0.2460 - val_acc: 0.9120\nEpoch 98/120\n10000/10000 [==============================] - 4s 350us/step - loss: 0.1434 - acc: 0.9430 - val_loss: 0.2593 - val_acc: 0.9130\nEpoch 99/120\n10000/10000 [==============================] - 3s 348us/step - loss: 0.1348 - acc: 0.9465 - val_loss: 0.2851 - val_acc: 0.9000\nEpoch 100/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.1406 - acc: 0.9431 - val_loss: 0.2609 - val_acc: 0.9040\nEpoch 101/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.1342 - acc: 0.9478 - val_loss: 0.2705 - val_acc: 0.9050\nEpoch 102/120\n10000/10000 [==============================] - 3s 346us/step - loss: 0.1352 - acc: 0.9475 - val_loss: 0.2505 - val_acc: 0.9010\nEpoch 103/120\n10000/10000 [==============================] - 3s 345us/step - loss: 0.1291 - acc: 0.9502 - val_loss: 0.2708 - val_acc: 0.9080\nEpoch 104/120\n10000/10000 [==============================] - 4s 351us/step - loss: 0.1250 - acc: 0.9523 - val_loss: 0.2634 - val_acc: 0.9120\nEpoch 105/120\n10000/10000 [==============================] - 3s 347us/step - loss: 0.1203 - acc: 0.9519 - val_loss: 0.2725 - val_acc: 0.9070\nEpoch 106/120\n10000/10000 [==============================] - 4s 350us/step - loss: 0.1187 - acc: 0.9540 - val_loss: 0.2557 - val_acc: 0.9170\nEpoch 107/120\n10000/10000 [==============================] - 3s 347us/step - loss: 0.1182 - acc: 0.9531 - val_loss: 0.2664 - val_acc: 0.9090\nEpoch 108/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.1181 - acc: 0.9530 - val_loss: 0.2334 - val_acc: 0.9130\nEpoch 109/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.1158 - acc: 0.9554 - val_loss: 0.2899 - val_acc: 0.9120\nEpoch 110/120\n10000/10000 [==============================] - 3s 350us/step - loss: 0.1167 - acc: 0.9567 - val_loss: 0.2754 - val_acc: 0.9090\nEpoch 111/120\n10000/10000 [==============================] - 4s 350us/step - loss: 0.1120 - acc: 0.9561 - val_loss: 0.2898 - val_acc: 0.9100\nEpoch 112/120\n10000/10000 [==============================] - 3s 349us/step - loss: 0.1118 - acc: 0.9588 - val_loss: 0.2541 - val_acc: 0.9140\nEpoch 113/120\n10000/10000 [==============================] - 3s 346us/step - loss: 0.1083 - acc: 0.9583 - val_loss: 0.2511 - val_acc: 0.9110\nEpoch 114/120\n10000/10000 [==============================] - 4s 359us/step - loss: 0.1131 - acc: 0.9560 - val_loss: 0.2496 - val_acc: 0.9180\nEpoch 115/120\n10000/10000 [==============================] - 4s 364us/step - loss: 0.1050 - acc: 0.9599 - val_loss: 0.3021 - val_acc: 0.9170\nEpoch 116/120\n10000/10000 [==============================] - 3s 342us/step - loss: 0.1038 - acc: 0.9619 - val_loss: 0.2673 - val_acc: 0.9160\nEpoch 117/120\n"
]
],
[
[
"### Saving the Model",
"_____no_output_____"
]
],
[
[
"filename = 'chatbot_120_epochs.h5'\nmodel.save(filename)",
"_____no_output_____"
]
],
[
[
"## Evaluating the Model\n\n### Plotting Out Training History",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline\nprint(history.history.keys())\n# summarize history for accuracy\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()",
"dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])\n"
]
],
[
[
"### Evaluating on Given Test Set",
"_____no_output_____"
]
],
[
[
"model.load_weights(filename)\npred_results = model.predict(([inputs_test, queries_test]))",
"_____no_output_____"
],
[
"test_data[0][0]",
"_____no_output_____"
],
[
"story =' '.join(word for word in test_data[0][0])\nprint(story)",
"Mary got the milk there . John moved to the bedroom .\n"
],
[
"query = ' '.join(word for word in test_data[0][1])\nprint(query)",
"Is John in the kitchen ?\n"
],
[
"print(\"True Test Answer from Data is:\",test_data[0][2])",
"True Test Answer from Data is: no\n"
],
[
"#Generate prediction from model\nval_max = np.argmax(pred_results[0])\n\nfor key, val in tokenizer.word_index.items():\n if val == val_max:\n k = key\n\nprint(\"Predicted answer is: \", k)\nprint(\"Probability of certainty was: \", pred_results[0][val_max])",
"Predicted answer is: no\nProbability of certainty was: 0.9999999\n"
]
],
[
[
"## Writing Your Own Stories and Questions\n\nRemember you can only use words from the existing vocab",
"_____no_output_____"
]
],
[
[
"vocab",
"_____no_output_____"
],
[
"# Note the whitespace of the periods\nmy_story = \"John left the kitchen . Sandra dropped the football in the garden .\"\nmy_story.split()",
"_____no_output_____"
],
[
"my_question = \"Is the football in the garden ?\"",
"_____no_output_____"
],
[
"my_question.split()",
"_____no_output_____"
],
[
"mydata = [(my_story.split(),my_question.split(),'yes')]",
"_____no_output_____"
],
[
"my_story,my_ques,my_ans = vectorize_stories(mydata)",
"_____no_output_____"
],
[
"pred_results = model.predict(([ my_story, my_ques]))",
"_____no_output_____"
],
[
"#Generate prediction from model\nval_max = np.argmax(pred_results[0])\n\nfor key, val in tokenizer.word_index.items():\n if val == val_max:\n k = key\n\nprint(\"Predicted answer is: \", k)\nprint(\"Probability of certainty was: \", pred_results[0][val_max])",
"Predicted answer is: yes\nProbability of certainty was: 0.97079676\n"
]
],
[
[
"# Great Job!",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
c52a9e752c17b99c8cb377ecb9c9ecc79a0f6089
| 9,356 |
ipynb
|
Jupyter Notebook
|
02_usecases/fraud_detector/profiler/Fraud_Detector_Data_Profiling.ipynb
|
MarcusFra/workshop
|
83f16d41f5e10f9c23242066f77a14bb61ac78d7
|
[
"Apache-2.0"
] | 2,327 |
2020-03-01T09:47:34.000Z
|
2021-11-25T12:38:42.000Z
|
02_usecases/fraud_detector/profiler/Fraud_Detector_Data_Profiling.ipynb
|
MarcusFra/workshop
|
83f16d41f5e10f9c23242066f77a14bb61ac78d7
|
[
"Apache-2.0"
] | 209 |
2020-03-01T17:14:12.000Z
|
2021-11-08T20:35:42.000Z
|
02_usecases/fraud_detector/profiler/Fraud_Detector_Data_Profiling.ipynb
|
MarcusFra/workshop
|
83f16d41f5e10f9c23242066f77a14bb61ac78d7
|
[
"Apache-2.0"
] | 686 |
2020-03-03T17:24:51.000Z
|
2021-11-25T23:39:12.000Z
| 34.021818 | 431 | 0.576101 |
[
[
[
"# Amazon Fraud Detector - Data Profiler Notebook \n\n\n### Dataset Guidance\n-------\n\nAWS Fraud Detector's Online Fraud Insights(OFI) model supports a flexible schema, enabling you to train an OFI model to your specific data and business need. This notebook was developed to help you profile your data and identify potenital issues before you train an OFI model. The following summarizes the minimimum CSV File requirements:\n\n* The files are in CSV UTF-8 (comma delimited) format (*.csv).\n* The file should contain at least 10k rows and the following __four__ required fields: \n\n * Event timestamp \n * IP address \n * Email address\n * Fraud label \n \n* The maximum file size is 10 gigabytes (GB). \n\n* The following dates and datetime formats are supported:\n * Dates: YYYY-MM-DD (eg. 2019-03-21)\n * Datetime: YYYY-MM-DD HH:mm:ss (eg. 2019-03-21 12:01:32) \n * ISO 8601 Datetime: YYYY-MM-DDTHH:mm:ss+/-HH:mm (eg. 2019-03-21T20:58:41+07:00)\n\n* The decimal precision is up to four decimal places.\n* Numeric data should not contain commas and currency symbols. \n* Columns with values that could contain commas, such as address or custom text should be enclosed in double quotes. \n\n\n\n### Getting Started with Data \n-------\nThe following general guidance is provided to get the most out of your AWS Fraud Detector Online Fraud Insights Model. \n\n* Gathering Data - The OFI model requires a minimum of 10k records. We recommend that a minimum of 6 weeks of historic data is collected, though 3 - 6 months of data is preferable. As part of the process the OFI model partitions your data based on the Event Timestamp such that performance metrics are calculated on the out of sample (latest) data, thus the format of the event timestamp is important. \n\n \n* Data & Label Maturity: As part of the data gathering process we want to insure that records have had sufficient time to “mature”, i.e. that enough time has passed to insure “non-fraud\" and “fraud” records have been correctly identified. It often takes 30 - 45 days (or more) to correctly identify fraudulent events, because of this it is important to insure that the latest records are at least 30 days old or older. \n\n \n* Sampling: The OFI training process will sample and partition historic based on event timestamp. There is no need to manually sample the data and doing so may negatively influence your model’s results. \n\n \n* Fraud Labels: The OFI model requires that a minimum of 500 observations are identified and labeled as “fraud”. As noted above, fraud label maturity is important. Insure that extracted data has sufficiently matured to insure that fraudulent events have been reliably found. \n \n \n* Custom Fields: the OFI model requires 4 fields: event timestamp, IP address, email address and fraud label. The more custom fields you provide the better the OFI model can differentiate between fraud and not fraud. \n \n \n* Nulls and Missing Values: OFI model handles null and missing values, however the percentage of nulls in key fields should be limited. Especially timestamp and fraud label columns should not contain any missing values. \n\n \nIf you would like to know more, please check out the [Fraud Detector's Documentation](https://docs.aws.amazon.com/frauddetector/). \n",
"_____no_output_____"
]
],
[
[
"from IPython.core.display import display, HTML\nfrom IPython.display import clear_output\ndisplay(HTML(\"<style>.container { width:90% }</style>\"))\nfrom IPython.display import IFrame\n# ------------------------------------------------------------------\nimport numpy as np\nimport pandas as pd\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\npd.options.display.float_format = '{:.4f}'.format\n\n# -- AWS stuff -- \nimport boto3\n",
"_____no_output_____"
]
],
[
[
"### Amazon Fraud Detector Profiling \n-----\n\nfrom github download and copy the afd_profile.py python program and template directory to your notebook \n\n<div class=\"alert alert-info\"> <strong> afd_profile.py </strong>\n\n- afd_profile.py - is the python package which will generate your profile report. \n- /templates - directory contains the supporting profile templates \n\n\n</div>\n",
"_____no_output_____"
]
],
[
[
"# -- get this package from github -- \nimport afd_profile",
"_____no_output_____"
]
],
[
[
"### Intialize your S3 client \n-----\nhttps://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html",
"_____no_output_____"
]
],
[
[
"client = boto3.client('s3')",
"_____no_output_____"
]
],
[
[
"### File & Field Mapping\n-----\nSimply map your file and field names to the required config values. \n\n<div class=\"alert alert-info\"> <strong> Map the Required fields </strong>\n\n- input_file: this is your CSV file in your s3 bucket \n\n<b> required_features </b> are the minimally required freatures to run Amazon Fraud Detector \n- EVENT_TIMESTAMP: map this to your file's Date or Datetime field. \n- IP_ADDRESS: map this to your file's IP address field. \n- EMAIL_ADDRESS: map this to your file's email address field. \n- FRAUD_LABEL: map this to your file's fraud label field. \n **note: the profiler will identify the \"rare\" case and assume that it is fraud**\n \n</div>\n",
"_____no_output_____"
]
],
[
[
"# -- update your configuration -- \nconfig = { \n \"input_file\" : \"<training dataset name>.csv\",\n \"required_features\" : {\n \"EVENT_TIMESTAMP\" : \"EVENT_DATE\",\n \"EVENT_LABEL\" : \"EVENT_LABEL\",\n \"IP_ADDRESS\" : \"ip_address\",\n \"EMAIL_ADDRESS\" : \"user_email\"\n }\n}\n",
"_____no_output_____"
]
],
[
[
"#### Run Profiler\n-----\nThe profiler will read your file and produce an HTML file as a result which will be displayed inline within this notebook. \n \nNote: you can also open **report.html** in a separate browser tab. ",
"_____no_output_____"
]
],
[
[
"# -- generate the report object --\nreport = afd_profile.profile_report(config)",
"0\n"
],
[
"with open(\"report.html\", \"w\") as file:\n file.write(report)\n\nIFrame(src='report.html', width=1500, height=800)\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
c52ab236f5dcfe6c065df0f8a5dc7e8732e0c6d6
| 834,995 |
ipynb
|
Jupyter Notebook
|
docs/source/examples-v016/simulating_a_predefined_model.ipynb
|
samirmartins/sysidentpy
|
f6667996c1c9565c9b5feb8b1d2c995f937498f8
|
[
"BSD-3-Clause"
] | 107 |
2020-05-19T12:59:56.000Z
|
2022-03-29T05:25:27.000Z
|
docs/source/examples-v016/simulating_a_predefined_model.ipynb
|
samirmartins/sysidentpy
|
f6667996c1c9565c9b5feb8b1d2c995f937498f8
|
[
"BSD-3-Clause"
] | 20 |
2020-05-24T15:56:15.000Z
|
2022-03-05T19:54:02.000Z
|
docs/source/examples-v016/simulating_a_predefined_model.ipynb
|
samirmartins/sysidentpy
|
f6667996c1c9565c9b5feb8b1d2c995f937498f8
|
[
"BSD-3-Clause"
] | 25 |
2020-05-19T14:02:17.000Z
|
2022-03-15T20:17:58.000Z
| 1,546.287037 | 113,058 | 0.808563 |
[
[
[
"# V0.1.6 - Simulate a Predefined Model\n\nExample created by Wilson Rocha Lacerda Junior",
"_____no_output_____"
]
],
[
[
"pip install sysidentpy",
"_____no_output_____"
],
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sysidentpy.metrics import root_relative_squared_error\nfrom sysidentpy.utils.generate_data import get_miso_data, get_siso_data\nfrom sysidentpy.polynomial_basis.simulation import SimulatePolynomialNarmax\n",
"_____no_output_____"
]
],
[
[
"## Generating 1 input 1 output sample data \n### The data is generated by simulating the following model:\n\n$y_k = 0.2y_{k-1} + 0.1y_{k-1}x_{k-1} + 0.9x_{k-2} + e_{k}$\n\nIf *colored_noise* is set to True:\n\n$e_{k} = 0.8\\nu_{k-1} + \\nu_{k}$\n\nwhere $x$ is a uniformly distributed random variable and $\\nu$ is a gaussian distributed variable with $\\mu=0$ and $\\sigma=0.1$\n\nIn the next example we will generate a data with 1000 samples with white noise and selecting 90% of the data to train the model. ",
"_____no_output_____"
]
],
[
[
"x_train, x_test, y_train, y_test = get_siso_data(n=1000,\n colored_noise=False,\n sigma=0.001,\n train_percentage=90)",
"_____no_output_____"
]
],
[
[
"## Defining the model\n\nWe already know that the generated data is a result of the model $𝑦_𝑘=0.2𝑦_{𝑘−1}+0.1𝑦_{𝑘−1}𝑥_{𝑘−1}+0.9𝑥_{𝑘−2}+𝑒_𝑘$ . Thus, we can create a model with those regressors follwing a codification pattern:\n- $0$ is the constant term,\n- $[1001] = y_{k-1}$\n- $[100n] = y_{k-n}$\n- $[200n] = x1_{k-n}$\n- $[300n] = x2_{k-n}$\n- $[1011, 1001] = y_{k-11} \\times y_{k-1}$\n- $[100n, 100m] = y_{k-n} \\times y_{k-m}$\n- $[12001, 1003, 1001] = x11_{k-1} \\times y_{k-3} \\times y_{k-1}$\n- and so on\n\n### Importante Note\n\nThe order of the arrays matter. \n\nIf you use [2001, 1001], it will work, but [1001, 2001] will not (the regressor will be ignored). Always put the highest value first:\n- $[2003, 2001]$ **works**\n- $[2001, 2003]$ **do not work**\n\nWe will handle this limitation in upcoming update.",
"_____no_output_____"
]
],
[
[
"s = SimulatePolynomialNarmax()\n\n# the model must be a numpy array\nmodel = np.array(\n [\n [1001, 0], # y(k-1)\n [2001, 1001], # x1(k-1)y(k-1)\n [2002, 0], # x1(k-2)\n ]\n )\n# theta must be a numpy array of shape (n, 1) where n is the number of regressors\ntheta = np.array([[0.2, 0.9, 0.1]]).T",
"_____no_output_____"
]
],
[
[
"## Simulating the model\n\nAfter defining the model and theta we just need to use the simulate method.\n\nThe simulate method returns the predicted values and the results where we can look at regressors,\nparameters and ERR values.",
"_____no_output_____"
]
],
[
[
"yhat, results = s.simulate(\n X_test=x_test,\n y_test=y_test,\n model_code=model,\n theta=theta,\n plot=True)",
"_____no_output_____"
],
[
"results = pd.DataFrame(results, columns=['Regressors', 'Parameters', 'ERR'])\nresults",
"_____no_output_____"
]
],
[
[
"### Options\n\nYou can set the `steps_ahead` to run the prediction/simulation:",
"_____no_output_____"
]
],
[
[
"yhat, results = s.simulate(\n X_test=x_test,\n y_test=y_test,\n model_code=model,\n theta=theta,\n plot=False,\n steps_ahead=1)\nrrse = root_relative_squared_error(y_test, yhat)\nrrse",
"_____no_output_____"
],
[
"yhat, results = s.simulate(\n X_test=x_test,\n y_test=y_test,\n model_code=model,\n theta=theta,\n plot=False,\n steps_ahead=21)\nrrse = root_relative_squared_error(y_test, yhat)\nrrse",
"_____no_output_____"
]
],
[
[
"### Estimating the parameters\n\nIf you have only the model strucuture, you can create an object with `estimate_parameter=True` and\nchoose the methed for estimation using `estimator`. In this case, you have to pass the training data\nfor parameters estimation. \n\nWhen `estimate_parameter=True`, we also computate the ERR considering only the regressors defined by the user. ",
"_____no_output_____"
]
],
[
[
"s2 = SimulatePolynomialNarmax(estimate_parameter=True, estimator='recursive_least_squares')\nyhat, results = s2.simulate(\n X_train=x_train,\n y_train=y_train,\n X_test=x_test,\n y_test=y_test,\n model_code=model,\n # theta will be estimated using the defined estimator\n plot=True)\n\nresults = pd.DataFrame(results, columns=['Regressors', 'Parameters', 'ERR'])\nresults",
"_____no_output_____"
],
[
"yhat, results = s2.simulate(\n X_train=x_train,\n y_train=y_train,\n X_test=x_test,\n y_test=y_test,\n model_code=model,\n # theta will be estimated using the defined estimator\n plot=True,\n steps_ahead=8)\n\nresults = pd.DataFrame(results, columns=['Regressors', 'Parameters', 'ERR'])\nresults",
"_____no_output_____"
],
[
"yhat, results = s2.simulate(\n X_train=x_train,\n y_train=y_train,\n X_test=x_test,\n y_test=y_test,\n model_code=model,\n # theta will be estimated using the defined estimator\n plot=True,\n steps_ahead=8)\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
c52abcb0d8753dcfde5009a25aeda673db86cf95
| 7,761 |
ipynb
|
Jupyter Notebook
|
colabs/sdf_to_bigquery.ipynb
|
isabella232/starthinker
|
d6bbecce5ef4a543fa2a19ce981c3381061e003a
|
[
"Apache-2.0"
] | null | null | null |
colabs/sdf_to_bigquery.ipynb
|
isabella232/starthinker
|
d6bbecce5ef4a543fa2a19ce981c3381061e003a
|
[
"Apache-2.0"
] | 1 |
2021-06-18T14:54:19.000Z
|
2021-06-18T14:54:19.000Z
|
colabs/sdf_to_bigquery.ipynb
|
isabella232/starthinker
|
d6bbecce5ef4a543fa2a19ce981c3381061e003a
|
[
"Apache-2.0"
] | null | null | null | 41.951351 | 322 | 0.549929 |
[
[
[
"#1. Install Dependencies\nFirst install the libraries needed to execute recipes, this only needs to be done once, then click play.\n",
"_____no_output_____"
]
],
[
[
"!pip install git+https://github.com/google/starthinker\n",
"_____no_output_____"
]
],
[
[
"#2. Get Cloud Project ID\nTo run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play.\n",
"_____no_output_____"
]
],
[
[
"CLOUD_PROJECT = 'PASTE PROJECT ID HERE'\n\nprint(\"Cloud Project Set To: %s\" % CLOUD_PROJECT)\n",
"_____no_output_____"
]
],
[
[
"#3. Get Client Credentials\nTo read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play.\n",
"_____no_output_____"
]
],
[
[
"CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE'\n\nprint(\"Client Credentials Set To: %s\" % CLIENT_CREDENTIALS)\n",
"_____no_output_____"
]
],
[
[
"#4. Enter DV360 SDF To BigQuery Parameters\nDownload SDF reports into a BigQuery table.\n 1. Select your filter types and the filter ideas.\n 1. Enter the <a href='https://developers.google.com/bid-manager/v1.1/sdf/download' target='_blank'>file types</a> using commas.\n 1. SDF_ will be prefixed to all tables and date appended to daily tables.\n 1. File types take the following format: FILE_TYPE_CAMPAIGN, FILE_TYPE_AD_GROUP,...\nModify the values below for your use case, can be done multiple times, then click play.\n",
"_____no_output_____"
]
],
[
[
"FIELDS = {\n 'auth_write': 'service', # Credentials used for writing data.\n 'partner_id': '', # The sdf file types.\n 'file_types': [], # The sdf file types.\n 'filter_type': '', # The filter type for the filter ids.\n 'filter_ids': [], # Comma separated list of filter ids for the request.\n 'dataset': '', # Dataset to be written to in BigQuery.\n 'version': '5', # The sdf version to be returned.\n 'table_suffix': '', # Optional: Suffix string to put at the end of the table name (Must contain alphanumeric or underscores)\n 'time_partitioned_table': False, # Is the end table a time partitioned\n 'create_single_day_table': False, # Would you like a separate table for each day? This will result in an extra table each day and the end table with the most up to date SDF.\n}\n\nprint(\"Parameters Set To: %s\" % FIELDS)\n",
"_____no_output_____"
]
],
[
[
"#5. Execute DV360 SDF To BigQuery\nThis does NOT need to be modified unless you are changing the recipe, click play.\n",
"_____no_output_____"
]
],
[
[
"from starthinker.util.configuration import Configuration\nfrom starthinker.util.configuration import commandline_parser\nfrom starthinker.util.configuration import execute\nfrom starthinker.util.recipe import json_set_fields\n\nUSER_CREDENTIALS = '/content/user.json'\n\nTASKS = [\n {\n 'dataset': {\n 'auth': 'user',\n 'dataset': {'field': {'name': 'dataset','kind': 'string','order': 6,'default': '','description': 'Dataset to be written to in BigQuery.'}}\n }\n },\n {\n 'sdf': {\n 'auth': 'user',\n 'version': {'field': {'name': 'version','kind': 'choice','order': 6,'default': '5','description': 'The sdf version to be returned.','choices': ['SDF_VERSION_5','SDF_VERSION_5_1']}},\n 'partner_id': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'The sdf file types.'}},\n 'file_types': {'field': {'name': 'file_types','kind': 'string_list','order': 2,'default': [],'description': 'The sdf file types.'}},\n 'filter_type': {'field': {'name': 'filter_type','kind': 'choice','order': 3,'default': '','description': 'The filter type for the filter ids.','choices': ['FILTER_TYPE_ADVERTISER_ID','FILTER_TYPE_CAMPAIGN_ID','FILTER_TYPE_INSERTION_ORDER_ID','FILTER_TYPE_MEDIA_PRODUCT_ID','FILTER_TYPE_LINE_ITEM_ID']}},\n 'read': {\n 'filter_ids': {\n 'single_cell': True,\n 'values': {'field': {'name': 'filter_ids','kind': 'integer_list','order': 4,'default': [],'description': 'Comma separated list of filter ids for the request.'}}\n }\n },\n 'time_partitioned_table': {'field': {'name': 'time_partitioned_table','kind': 'boolean','order': 7,'default': False,'description': 'Is the end table a time partitioned'}},\n 'create_single_day_table': {'field': {'name': 'create_single_day_table','kind': 'boolean','order': 8,'default': False,'description': 'Would you like a separate table for each day? This will result in an extra table each day and the end table with the most up to date SDF.'}},\n 'dataset': {'field': {'name': 'dataset','kind': 'string','order': 6,'default': '','description': 'Dataset to be written to in BigQuery.'}},\n 'table_suffix': {'field': {'name': 'table_suffix','kind': 'string','order': 6,'default': '','description': 'Optional: Suffix string to put at the end of the table name (Must contain alphanumeric or underscores)'}}\n }\n }\n]\n\njson_set_fields(TASKS, FIELDS)\n\nexecute(Configuration(project=CLOUD_PROJECT, client=CLIENT_CREDENTIALS, user=USER_CREDENTIALS, verbose=True), TASKS, force=True)\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
c52ac31c0942291812b949fd887e8bd016cada65
| 65,724 |
ipynb
|
Jupyter Notebook
|
05-Matplotlib/2/Activities/01-Stu_PlotsReview/Solved/plot_drills.ipynb
|
madhavinamballa/datascience_Berkely
|
15f2987a0b012b344d209df70fdcf527f1c664b2
|
[
"ADSL"
] | null | null | null |
05-Matplotlib/2/Activities/01-Stu_PlotsReview/Solved/plot_drills.ipynb
|
madhavinamballa/datascience_Berkely
|
15f2987a0b012b344d209df70fdcf527f1c664b2
|
[
"ADSL"
] | null | null | null |
05-Matplotlib/2/Activities/01-Stu_PlotsReview/Solved/plot_drills.ipynb
|
madhavinamballa/datascience_Berkely
|
15f2987a0b012b344d209df70fdcf527f1c664b2
|
[
"ADSL"
] | null | null | null | 290.814159 | 20,940 | 0.922038 |
[
[
[
"# Import Dependencies\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"# DATASET 1\ngyms = [\"Crunch\", \"Planet Fitness\", \"NY Sports Club\", \"Rickie's Gym\"]\nmembers = [49, 92, 84, 53]",
"_____no_output_____"
],
[
"x_axis = np.arange(0, len(gyms))\ntick_locations = []\nfor x in x_axis:\n tick_locations.append(x)\n\nplt.title(\"NYC Gym Popularity\")\nplt.xlabel(\"Gym Name\")\nplt.ylabel(\"Number of Members\")\n\nplt.xlim(-0.75, len(gyms)-.25)\nplt.ylim(0, max(members) + 5)\n\nplt.bar(x_axis, members, facecolor=\"red\", alpha=0.75, align=\"center\")\nplt.xticks(tick_locations, gyms)\nplt.show()",
"_____no_output_____"
],
[
"# DATASET 2\nx_lim = 2 * np.pi\nx_axis = np.arange(0, x_lim, 0.1)\nsin = np.sin(x_axis)",
"_____no_output_____"
],
[
"plt.title(\"Sin from 0 to 2$\\pi$\")\nplt.xlabel(\"Real Numbers from 0 to 2$\\pi$\")\nplt.ylabel(\"sin(x)\")\n\nplt.hlines(0, 0, x_lim, alpha=0.2)\nplt.xlim(0, x_lim)\nplt.ylim(-1.25, 1.25)\n\nplt.plot(x_axis, sin, marker=\"o\", color=\"red\", linewidth=1)\nplt.show()",
"_____no_output_____"
],
[
"# DATASET 3\ngyms = [\"Crunch\", \"Planet Fitness\", \"NY Sports Club\", \"Rickie's Gym\"]\nmembers = [49, 92, 84, 53]\ncolors = [\"yellowgreen\", \"red\", \"lightcoral\", \"lightskyblue\"]\nexplode = (0, 0.05, 0, 0)",
"_____no_output_____"
],
[
"plt.title(\"NYC Gym Popularity\")\nplt.pie(members, explode=explode, labels=gyms, colors=colors,\n autopct=\"%1.1f%%\", shadow=True, startangle=90)\nplt.axis(\"equal\")\nplt.show()",
"_____no_output_____"
],
[
"# DATASET 4\nx_axis = np.arange(0, 10, 0.1)\ntimes = []\nfor x in x_axis:\n times.append(x * x + np.random.randint(0, np.ceil(max(x_axis))))",
"_____no_output_____"
],
[
"plt.title(\"Running Time of FakeSort for Sample Input Sizes\")\nplt.xlabel(\"Length of Input Array\")\nplt.ylabel(\"Time to Sort (s)\")\n\nplt.scatter(x_axis, times, marker=\"o\", color=\"red\")\nplt.show()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52aea2b3612d43c6cb71c4d798847ecfa2c28df
| 3,414 |
ipynb
|
Jupyter Notebook
|
How Many Human Genes Code Proteins?.ipynb
|
bio2bel/bio2bel-notebooks
|
ebd96a2676583d2e3e7a4634d2e6fe65903f2557
|
[
"Apache-2.0"
] | 1 |
2021-06-30T17:39:23.000Z
|
2021-06-30T17:39:23.000Z
|
How Many Human Genes Code Proteins?.ipynb
|
bio2bel/bio2bel-notebooks
|
ebd96a2676583d2e3e7a4634d2e6fe65903f2557
|
[
"Apache-2.0"
] | 1 |
2017-07-20T10:58:31.000Z
|
2017-07-20T10:58:31.000Z
|
How Many Human Genes Code Proteins?.ipynb
|
bio2bel/bio2bel-notebooks
|
ebd96a2676583d2e3e7a4634d2e6fe65903f2557
|
[
"Apache-2.0"
] | 2 |
2018-11-22T11:39:21.000Z
|
2021-03-10T21:21:17.000Z
| 21.745223 | 68 | 0.466315 |
[
[
[
"import sys\nimport time\n\nimport bio2bel_hgnc\n\nfrom bio2bel_hgnc.constants import encodings\nfrom collections import Counter",
"_____no_output_____"
],
[
"print(sys.version)",
"3.6.5 (default, Apr 25 2018, 14:23:58) \n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]\n"
],
[
"print(time.asctime())",
"Fri May 11 15:55:53 2018\n"
],
[
"m = bio2bel_hgnc.Manager()",
"_____no_output_____"
],
[
"encodings",
"_____no_output_____"
],
[
"Counter(\n encoding\n for gene in m.list_human_genes()\n for encoding in encodings.get(gene.locus_type, [])\n) ",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52aeeea3a43ddf5e5ecadf521d1aa039d7b1962
| 10,286 |
ipynb
|
Jupyter Notebook
|
Classification using Gensim word vectors.ipynb
|
Nimori/Classification_Models-NLP
|
64d6b539d7bcf0505a3cc96573d93bbff5d50bc7
|
[
"MIT"
] | null | null | null |
Classification using Gensim word vectors.ipynb
|
Nimori/Classification_Models-NLP
|
64d6b539d7bcf0505a3cc96573d93bbff5d50bc7
|
[
"MIT"
] | null | null | null |
Classification using Gensim word vectors.ipynb
|
Nimori/Classification_Models-NLP
|
64d6b539d7bcf0505a3cc96573d93bbff5d50bc7
|
[
"MIT"
] | null | null | null | 29.557471 | 364 | 0.504278 |
[
[
[
"import nltk\nnltk.download('averaged_perceptron_tagger')\nimport pickle as pk\nimport pandas as pd\nfrom gensim.models import KeyedVectors",
"[nltk_data] Downloading package averaged_perceptron_tagger to\n[nltk_data] /home/nimori/nltk_data...\n[nltk_data] Package averaged_perceptron_tagger is already up-to-\n[nltk_data] date!\n/home/nimori/venv_envs/tf24/lib/python3.8/site-packages/gensim/similarities/__init__.py:15: UserWarning: The gensim.similarities.levenshtein submodule is disabled, because the optional Levenshtein package <https://pypi.org/project/python-Levenshtein/> is unavailable. Install Levenhstein (e.g. `pip install python-Levenshtein`) to suppress this warning.\n warnings.warn(msg)\n"
],
[
"EMBEDDING_FILE = 'GoogleNews-vectors-negative300.bin'\ngoogle_word2vec = KeyedVectors.load_word2vec_format(EMBEDDING_FILE, binary=True)",
"_____no_output_____"
],
[
"categories={'food':['ration', 'shop', 'community', 'kitchen'], 'jobs':['training', 'professional', 'job'], \n 'money':['invest','save','bank','donation'],\n 'utilities':['internet', 'phone', 'electricity', 'water', 'landlord', 'hotel', 'shelter', 'lpg', 'waste'],\n 'medical':['hospitals', 'facilities', 'specialists', 'blood'],\n 'education':['school', 'college', 'tuitions', 'career', 'consultations'],\n 'medical':['hospitals', 'facilities', 'specialists', 'blood'], 'security':['police', 'theft', 'army', 'guard'],\n 'infrastructure':['road', 'bridge', 'sewage', 'traffic'],\n 'buy':['shopkeeper', 'land', 'apartment', 'furniture', 'electronics', 'rental'],\n 'sell':['shopkeeper', 'land', 'apartment', 'furniture', 'electronics', 'rental'],\n 'government':['schemes', 'corruption'], 'politics':['politics'],\n 'emergency':['covid', 'blood', 'robbery', 'crime'],\n 'travel':['transport', 'cab', 'public', 'auto', 'hotel', 'traffic', 'tourism', 'tolls'],\n 'services':['business', 'legal', 'accountant', 'carpenter', 'mechanic', 'electrician', 'plumber', 'house', 'help', 'labour'],\n 'other':['parking', 'women', 'human', 'rights', 'consumer', 'sanitation'], 'technology':['technology'], 'environment':['environment', 'animals']}",
"_____no_output_____"
],
[
"sent= pk.load(open('translated.pk', 'rb'))\n#sent would be a list in which each element is another list that comprises of the following 3 things:\n##1) an ID\n##2) original text\n##3) translated text (in the form of another list)\n \n ######Example shown below########\n###[1601345851000,'मुख्यमंत्री गहलोत बोले-प्रदेश में जब भी कोई नया जिला बनेगा तो सबसे पहले ब्यावर का होगा नाम',\n### ['Chief Minister Gehlot said - whenever a new district is formed in the state, Beawar will be named first.']]",
"_____no_output_____"
],
[
"text= []\nfor i in range(len(sent)):\n text.append(sent[i][2]) #to select just the translated text\n#text",
"_____no_output_____"
]
],
[
[
"## Data Preprocessing",
"_____no_output_____"
]
],
[
[
"from nltk.tokenize import sent_tokenize\nimport re\nfrom nltk.corpus import stopwords\nnltk.download('stopwords')\ndef get_unigram(text):\n '''\n preprocessing: tokenization; stemming\n '''\n tt=[]\n for j in text:\n j=re.sub(r\"\\W\", \" \",j, flags=re.I).strip()\n \n for i in j.split():\n# i=ps.stem(i.lower())\n i=i.lower()\n \n if i.isdigit() or len(i)<=2:\n continue\n \n if i in stopwords.words('english'):\n continue\n else:\n tt.append(i)\n return list(set(tt))",
"[nltk_data] Downloading package stopwords to /home/nimori/nltk_data...\n[nltk_data] Unzipping corpora/stopwords.zip.\n"
],
[
"text2= []\nfor txt in text:\n sentence= ' '.join([str(elem) for elem in txt])\n tagged_sentence = nltk.tag.pos_tag(sentence.split())\n edited_sentence = [word for word,tag in tagged_sentence if tag != 'NNP' and tag != 'NNPS']\n sent_final= ' '.join(edited_sentence)\n text2.append([sent_final])\n #print(' '.join(edited_sentence))\n#text2",
"_____no_output_____"
]
],
[
[
"## Sentence Tokenization",
"_____no_output_____"
]
],
[
[
"tokens= []\nfor txt in text2:\n tokens.append(get_unigram(txt))\n#tokens",
"_____no_output_____"
]
],
[
[
"## Determining Cosine Similarity using Word Vectors",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics.pairwise import cosine_similarity\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"## POC for first token",
"_____no_output_____"
]
],
[
[
"tk={}\nfor i in tokens[0]:\n\n vec1=google_word2vec[i]\n cat = {}\n for j in categories.keys():\n vec2=google_word2vec[j]\n sim = cosine_similarity(vec1.reshape(1, -1),vec2.reshape(1, -1))\n if sim>0.1:\n cat[j]=sim[0][0]\n categ=sorted(cat.items(),key=lambda x:x[1])[::-1]\n if categ!=[]:\n for k in categories[categ[0][0]]:\n print(k)\n tk[i]=categ[0][0]\n#tk",
"environment\nanimals\ncovid\nblood\nrobbery\ncrime\ntechnology\ntechnology\nschemes\ncorruption\nschool\ncollege\ntuitions\ncareer\nconsultations\nparking\nwomen\nhuman\nrights\nconsumer\nsanitation\n"
],
[
"cat= list(categories.keys())\n#cat[0]\n#tk",
"_____no_output_____"
],
[
"col= []\nfor cat in tk.values():\n vec1=google_word2vec[cat]\n for token in tk.keys():\n vec2=google_word2vec[token]\n row= {}\n for subcat in categories.get(cat):\n row[subcat]= cosine_similarity(vec1.reshape(1, -1),vec2.reshape(1, -1)).tolist()\n col.append(row)\ncol",
"_____no_output_____"
],
[
"li_0= []\nli= []\nfor dic in col:\n li_0.append(list(dic.values()))\n li.append(li_0[0][0][0])",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
c52af0598831c50d17122b820b15327c9f7eff1a
| 18,787 |
ipynb
|
Jupyter Notebook
|
5_word2vec.ipynb
|
basnijholt/deep-learning-udacity
|
ae2b7f044b8be7e87b00f28a87b8a37872954787
|
[
"MIT"
] | null | null | null |
5_word2vec.ipynb
|
basnijholt/deep-learning-udacity
|
ae2b7f044b8be7e87b00f28a87b8a37872954787
|
[
"MIT"
] | null | null | null |
5_word2vec.ipynb
|
basnijholt/deep-learning-udacity
|
ae2b7f044b8be7e87b00f28a87b8a37872954787
|
[
"MIT"
] | null | null | null | 30.20418 | 349 | 0.562623 |
[
[
[
"Deep Learning\n=============\n\nAssignment 5\n------------\n\nThe goal of this assignment is to train a Word2Vec skip-gram model over [Text8](http://mattmahoney.net/dc/textdata) data.",
"_____no_output_____"
]
],
[
[
"# These are all the modules we'll be using later. Make sure you can import them\n# before proceeding further.\n%matplotlib inline\nfrom __future__ import print_function\nimport collections\nimport math\nimport numpy as np\nimport os\nimport random\nimport tensorflow as tf\nimport zipfile\nfrom matplotlib import pylab\nfrom six.moves import range\nfrom six.moves.urllib.request import urlretrieve\nfrom sklearn.manifold import TSNE",
"_____no_output_____"
]
],
[
[
"Download the data from the source website if necessary.",
"_____no_output_____"
]
],
[
[
"url = 'http://mattmahoney.net/dc/'\n\ndef maybe_download(filename, expected_bytes):\n \"\"\"Download a file if not present, and make sure it's the right size.\"\"\"\n if not os.path.exists(filename):\n filename, _ = urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename\n\nfilename = maybe_download('text8.zip', 31344016)",
"_____no_output_____"
]
],
[
[
"Read the data into a string.",
"_____no_output_____"
]
],
[
[
"def read_data(filename):\n \"\"\"Extract the first file enclosed in a zip file as a list of words\"\"\"\n with zipfile.ZipFile(filename) as f:\n data = tf.compat.as_str(f.read(f.namelist()[0])).split()\n return data\n \nwords = read_data(filename)\nprint('Data size %d' % len(words))",
"_____no_output_____"
]
],
[
[
"Build the dictionary and replace rare words with UNK token.",
"_____no_output_____"
]
],
[
[
"vocabulary_size = 50000\n\ndef build_dataset(words):\n count = [['UNK', -1]]\n count.extend(collections.Counter(words).most_common(vocabulary_size - 1))\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n unk_count = 0\n for word in words:\n if word in dictionary:\n index = dictionary[word]\n else:\n index = 0 # dictionary['UNK']\n unk_count = unk_count + 1\n data.append(index)\n count[0][1] = unk_count\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) \n return data, count, dictionary, reverse_dictionary\n\ndata, count, dictionary, reverse_dictionary = build_dataset(words)\nprint('Most common words (+UNK)', count[:5])\nprint('Sample data', data[:10])\ndel words # Hint to reduce memory.",
"_____no_output_____"
]
],
[
[
"Function to generate a training batch for the skip-gram model.",
"_____no_output_____"
]
],
[
[
"data_index = 0\n\ndef generate_batch(batch_size, num_skips, skip_window):\n global data_index\n assert batch_size % num_skips == 0\n assert num_skips <= 2 * skip_window\n batch = np.ndarray(shape=(batch_size), dtype=np.int32)\n labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n span = 2 * skip_window + 1 # [ skip_window target skip_window ]\n buffer = collections.deque(maxlen=span)\n for _ in range(span):\n buffer.append(data[data_index])\n data_index = (data_index + 1) % len(data)\n for i in range(batch_size // num_skips):\n target = skip_window # target label at the center of the buffer\n targets_to_avoid = [ skip_window ]\n for j in range(num_skips):\n while target in targets_to_avoid:\n target = random.randint(0, span - 1)\n targets_to_avoid.append(target)\n batch[i * num_skips + j] = buffer[skip_window]\n labels[i * num_skips + j, 0] = buffer[target]\n buffer.append(data[data_index])\n data_index = (data_index + 1) % len(data)\n return batch, labels\n\nprint('data:', [reverse_dictionary[di] for di in data[:8]])\n\nfor num_skips, skip_window in [(2, 1), (4, 2)]:\n data_index = 0\n batch, labels = generate_batch(batch_size=8, num_skips=num_skips, skip_window=skip_window)\n print('\\nwith num_skips = %d and skip_window = %d:' % (num_skips, skip_window))\n print(' batch:', [reverse_dictionary[bi] for bi in batch])\n print(' labels:', [reverse_dictionary[li] for li in labels.reshape(8)])",
"_____no_output_____"
]
],
[
[
"Train a skip-gram model.",
"_____no_output_____"
]
],
[
[
"batch_size = 128\nembedding_size = 128 # Dimension of the embedding vector.\nskip_window = 1 # How many words to consider left and right.\nnum_skips = 2 # How many times to reuse an input to generate a label.\n# We pick a random validation set to sample nearest neighbors. here we limit the\n# validation samples to the words that have a low numeric ID, which by\n# construction are also the most frequent. \nvalid_size = 16 # Random set of words to evaluate similarity on.\nvalid_window = 100 # Only pick dev samples in the head of the distribution.\nvalid_examples = np.array(random.sample(range(valid_window), valid_size))\nnum_sampled = 64 # Number of negative examples to sample.\n\ngraph = tf.Graph()\n\nwith graph.as_default(), tf.device('/cpu:0'):\n\n # Input data.\n train_dataset = tf.placeholder(tf.int32, shape=[batch_size])\n train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n valid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n \n # Variables.\n embeddings = tf.Variable(\n tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))\n softmax_weights = tf.Variable(\n tf.truncated_normal([vocabulary_size, embedding_size],\n stddev=1.0 / math.sqrt(embedding_size)))\n softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))\n \n # Model.\n # Look up embeddings for inputs.\n embed = tf.nn.embedding_lookup(embeddings, train_dataset)\n # Compute the softmax loss, using a sample of the negative labels each time.\n loss = tf.reduce_mean(\n tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, embed,\n train_labels, num_sampled, vocabulary_size))\n\n # Optimizer.\n # Note: The optimizer will optimize the softmax_weights AND the embeddings.\n # This is because the embeddings are defined as a variable quantity and the\n # optimizer's `minimize` method will by default modify all variable quantities \n # that contribute to the tensor it is passed.\n # See docs on `tf.train.Optimizer.minimize()` for more details.\n optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)\n \n # Compute the similarity between minibatch examples and all embeddings.\n # We use the cosine distance:\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n normalized_embeddings = embeddings / norm\n valid_embeddings = tf.nn.embedding_lookup(\n normalized_embeddings, valid_dataset)\n similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))",
"_____no_output_____"
],
[
"num_steps = 100001\n\nwith tf.Session(graph=graph) as session:\n tf.initialize_all_variables().run()\n print('Initialized')\n average_loss = 0\n for step in range(num_steps):\n batch_data, batch_labels = generate_batch(\n batch_size, num_skips, skip_window)\n feed_dict = {train_dataset : batch_data, train_labels : batch_labels}\n _, l = session.run([optimizer, loss], feed_dict=feed_dict)\n average_loss += l\n if step % 2000 == 0:\n if step > 0:\n average_loss = average_loss / 2000\n # The average loss is an estimate of the loss over the last 2000 batches.\n print('Average loss at step %d: %f' % (step, average_loss))\n average_loss = 0\n # note that this is expensive (~20% slowdown if computed every 500 steps)\n if step % 10000 == 0:\n sim = similarity.eval()\n for i in range(valid_size):\n valid_word = reverse_dictionary[valid_examples[i]]\n top_k = 8 # number of nearest neighbors\n nearest = (-sim[i, :]).argsort()[1:top_k+1]\n log = 'Nearest to %s:' % valid_word\n for k in range(top_k):\n close_word = reverse_dictionary[nearest[k]]\n log = '%s %s,' % (log, close_word)\n print(log)\n final_embeddings = normalized_embeddings.eval()",
"_____no_output_____"
],
[
"num_points = 400\n\ntsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)\ntwo_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points+1, :])",
"_____no_output_____"
],
[
"def plot(embeddings, labels):\n assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'\n pylab.figure(figsize=(15,15)) # in inches\n for i, label in enumerate(labels):\n x, y = embeddings[i,:]\n pylab.scatter(x, y)\n pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',\n ha='right', va='bottom')\n pylab.show()\n\nwords = [reverse_dictionary[i] for i in range(1, num_points+1)]\nplot(two_d_embeddings, words)",
"_____no_output_____"
]
],
[
[
"---\n\nProblem\n-------\n\nAn alternative to skip-gram is another Word2Vec model called [CBOW](http://arxiv.org/abs/1301.3781) (Continuous Bag of Words). In the CBOW model, instead of predicting a context word from a word vector, you predict a word from the sum of all the word vectors in its context. Implement and evaluate a CBOW model trained on the text8 dataset.\n\n---",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
c52b0632c8892b8f48fc93a249c2232e5aa3fa5c
| 15,016 |
ipynb
|
Jupyter Notebook
|
docker/notebook/Lab04/Lab 04 - Polynomial Fitting.ipynb
|
barrackHa/iml_huji_2021
|
92a8f068770f0cb97cdf49896364b83ed60ff783
|
[
"MIT"
] | null | null | null |
docker/notebook/Lab04/Lab 04 - Polynomial Fitting.ipynb
|
barrackHa/iml_huji_2021
|
92a8f068770f0cb97cdf49896364b83ed60ff783
|
[
"MIT"
] | null | null | null |
docker/notebook/Lab04/Lab 04 - Polynomial Fitting.ipynb
|
barrackHa/iml_huji_2021
|
92a8f068770f0cb97cdf49896364b83ed60ff783
|
[
"MIT"
] | null | null | null | 40.149733 | 377 | 0.575053 |
[
[
[
"# Lab 04 - Polynomial Fitting\nIn the previous lab we discussed linear regression and the OLS estimator for solving the minimization of the RSS. As we\nmentioned, regression problems are a very wide family of settings and algorithms which we use to try to estimate the relation between a set of explanatory variables and a **continuous** response (i.e. $\\mathcal{Y}\\in\\mathbb{R^p}$). In the following lab we will discuss one such setting called \"Polynomial Fitting\". \n\nSometimes, the data (and the relation between the explanatory variables and response) can be described by some polynomial\nof some degree. Here, we only focus on the case where it is a polynomial of a single variable. That is: \n$$ p_k\\left(x\\right)=\\sum_{i=0}^{k}\\alpha_i x_i^k\\quad\\alpha_1,\\ldots,\\alpha_k\\in\\mathbb{R} $$\n\nSo our hypothesis class is of the form:\n$$ \\mathcal{H}^k_{poly}=\\left\\{p_k|p_k\\left(x\\right)=\\sum_{i=0}^{k}\\alpha_i x_i^k\\quad\\alpha_1,\\ldots,\\alpha_k\\in\\mathbb{R}\\right\\} $$\n\nNotice that similar to linear regression, each hypothesis in the class is defined by a coefficients vector. Below are two\nexamples (simulated and real) for datasets where the relation between the explanatory variable and response is polynomial.",
"_____no_output_____"
]
],
[
[
"import sys\nsys.path.append(\"../\")\nfrom utils import *",
"_____no_output_____"
],
[
"response = lambda x: x**4 - 2*x**3 - .5*x**2 + 1\n\nx = np.linspace(-1.2, 2, 20)\ny_ = response(x)\n\ndf = pd.read_csv(\"../data/Position_Salaries.csv\", skiprows=2, index_col=0)\nx2, y2 = df.index, df.Salary\n\nmake_subplots(1, 2, subplot_titles=(r\"$\\text{Simulated Data: }y=x^4-2x^3-0.5x^2+1$\", r\"$\\text{Positions Salary}$\"))\\\n .add_traces([go.Scatter(x=x, y=y_, mode=\"markers\", marker=dict(color=\"black\", opacity=.7), showlegend=False),\n go.Scatter(x=x2, y=y2, mode=\"markers\",marker=dict(color=\"black\", opacity=.7), showlegend=False)], \n rows=[1,1], cols=[1,2])\\\n .update_layout(title=r\"$\\text{(1) Datasets For Polynomial Fitting}$\", margin=dict(t=100)).show()\n\n",
"_____no_output_____"
]
],
[
[
"As we have discussed in class, solving a polynomial fitting problem can be done by first manipulating the input data,\nsuch that we represent each sample $x_i\\in\\mathbb{R}$ as a vector $\\mathbf{x}_i=\\left(x^0,x^1,\\ldots,x^k\\right)$. Then,\nwe treat the data as a design matrix $\\mathbf{X}\\in\\mathbb{R}^{m\\times k}$ of a linear regression problem.\n\nFor the simulated dataset above, which is of a polynomial of degree 4, the design matrix looks as follows: \n",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import PolynomialFeatures\nm, k, X = 5, 4, x.reshape(-1, 1)\npd.DataFrame(PolynomialFeatures(k).fit_transform(X[:m]), \n columns=[rf\"$x^{{0}}$\".format(i) for i in range(0, k+1)],\n index=[rf\"$x_{{0}}$\".format(i) for i in range(1, m+1)])",
"_____no_output_____"
]
],
[
[
"## Fitting A Polynomial Of Different Degrees\n\nNext, let us fit polynomials of different degrees and different noise properties to study how it influences the learned model.\nWe begin with the noise-less case where we fit for different values of $k$. As we increase $k$ we manage to fit a model\nthat describes the data in a better way, reflected by the decrease in the MSE.",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LinearRegression\nfrom sklearn.pipeline import make_pipeline\n\nks = [2, 3, 4, 5]\nfig = make_subplots(1, 4, subplot_titles=list(ks))\nfor i, k in enumerate(ks):\n y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression()).fit(X, y_).predict(X)\n \n fig.add_traces([go.Scatter(x=x, y=y_, mode=\"markers\", name=\"Real Points\", marker=dict(color=\"black\", opacity=.7), showlegend=False),\n go.Scatter(x=x, y=y_hat, mode=\"markers\", name=\"Predicted Points\", marker=dict(color=\"blue\", opacity=.7), showlegend=False)], rows=1, cols=i+1)\n fig[\"layout\"][\"annotations\"][i][\"text\"] = rf\"$k={{0}}, MSE={{1}}$\".format(k, round(np.mean((y_-y_hat)**2), 2))\n\nfig.update_layout(title=r\"$\\text{(2) Simulated Data - Fitting Polynomials of Different Degrees}$\",\n margin=dict(t=60),\n yaxis_title=r\"$\\widehat{y}$\",\n height=300).show()",
"_____no_output_____"
]
],
[
[
"Once we find the right $k$ (which in our case is 4) we managed to fit a perfect model, after which, as we increase $k$, \nthe additional coefficients will be zero.",
"_____no_output_____"
]
],
[
[
"coefs = {}\nfor k in ks:\n fit = make_pipeline(PolynomialFeatures(k), LinearRegression()).fit(X, y_)\n coefs[rf\"$k={{{k}}}$\"] = [round(c,3) for c in fit.steps[1][1].coef_]\npd.DataFrame.from_dict(coefs, orient='index', columns=[rf\"$w_{{{i}}}$\" for i in range(max(ks)+1)])\n",
"_____no_output_____"
]
],
[
[
"## Fitting Polynomial Of Different Degrees - With Sample Noise\n\nStill fitting for different values of $k$, let us add some standard Gaussian noise (i.e. $\\mathcal{N}\\left(0,1\\right)$).\nThis time we observe two things:\n- Even for the correct $k=4$ model we are not able to achieve zero MSE.\n- As we increase $4<k\\rightarrow 7$ we manage to decrease the error more and more.",
"_____no_output_____"
]
],
[
[
"y = y_ + np.random.normal(size=len(y_))\n\nks = range(2, 8)\nfig = make_subplots(2, 3, subplot_titles=list(ks))\nfor i, k in enumerate(ks):\n r,c = i//3+1, i%3+1 \n \n y_hat = make_pipeline(PolynomialFeatures(k), LinearRegression()).fit(X, y).predict(X)\n fig.add_traces([go.Scatter(x=x, y=y_, mode=\"markers\", name=\"Real Points\", marker=dict(color=\"black\", opacity=.7), showlegend=False),\n go.Scatter(x=x, y=y, mode=\"markers\", name=\"Observed Points\", marker=dict(color=\"red\", opacity=.7), showlegend=False), \n go.Scatter(x=x, y=y_hat, mode=\"markers\", name=\"Predicted Points\", marker=dict(color=\"blue\", opacity=.7), showlegend=False)], rows=r, cols=c)\n fig[\"layout\"][\"annotations\"][i][\"text\"] = rf\"$k={{0}}, MSE={{1}}$\".format(k, round(np.mean((y-y_hat)**2), 2))\n\nfig.update_layout(title=r\"$\\text{(4) Simulated Data With Noise - Fitting Polynomials of Different Degrees}$\", margin=dict(t=80)).show()",
"_____no_output_____"
]
],
[
[
"How is it that we are able to fit \"better\" models for $k$s larger than the true one? As we increase $k$ we enable the model\nmore \"degrees of freedom\" to try and adapt itself to the observed data. The higher $k$ the more the learner will \"go after\nthe noise\" and miss the real signal of the data. In other words, what we have just observed is what is known as **overfitting**.\n\nLater in the course we will learn methods for detection and avoidance of overfitting.\n\n\n## Fitting Polynomial Over Different Sample Noise Levels\n\nNext, let us set $k=4$ (the true values) and study the outputted models when training over different noise levels. Though\nwe will only be changing the scale of the noise (i.e. the variance, $\\sigma^2$), changing other properties such as its\ndistribution is interesting too. As we would expect, as we increase the scale of the noise our error increases. We can\nobserve this also in a visual manner, where the fitted polynomial (in blue) less and less resembles the actual model (in black).",
"_____no_output_____"
]
],
[
[
"scales = range(6)\nfig = make_subplots(2, 3, subplot_titles=list(map(str, scales)))\nfor i, s in enumerate(scales):\n r,c = i//3+1, i%3+1\n \n y = y_ + np.random.normal(scale=s, size=len(y_))\n y_hat = make_pipeline(PolynomialFeatures(4), LinearRegression()).fit(X, y).predict(X)\n\n fig.add_traces([go.Scatter(x=x, y=y_, mode=\"markers\", name=\"Real Points\", marker=dict(color=\"black\", opacity=.7), showlegend=False),\n go.Scatter(x=x, y=y, mode=\"markers\", name=\"Observed Points\", marker=dict(color=\"red\", opacity=.7), showlegend=False),\n go.Scatter(x=x, y=y_hat, mode=\"markers\", name=\"Predicted Points\", marker=dict(color=\"blue\", opacity=.7), showlegend=False)], rows=r, cols=c)\n fig[\"layout\"][\"annotations\"][i][\"text\"] = rf\"$\\sigma^2={{0}}, MSE={{1}}$\".format(s, round(np.mean((y-y_hat)**2), 2))\n\nfig.update_layout(title=r\"$\\text{(5) Simulated Data - Different Noise Scales}$\", margin=dict(t=80)).show()",
"_____no_output_____"
]
],
[
[
"## The Influence Of $k$ And $\\sigma^2$ On Error\n\nLastly, let us check how the error is influenced by both $k$ and $\\sigma^2$. For each value of $k$ and $\\sigma^2$ we will\nadd noise drawn from $\\mathcal{N}\\left(0,\\sigma^2\\right)$ and then, based on the noisy data, let the learner select an\nhypothesis from $\\mathcal{H}_{poly}^k$. We repeat the process for each set of $\\left(k,\\sigma^2\\right)$ 10 times and report\nthe mean MSE value. Results are seen in heatmap below: \n",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import ParameterGrid\n\ndf = []\nfor setting in ParameterGrid(dict(k=range(10), s=np.linspace(0, 5, 10), repetition=range(10))):\n y = y_ + np.random.normal(scale=setting[\"s\"], size=len(y_))\n y_hat = make_pipeline(PolynomialFeatures(setting[\"k\"]), LinearRegression()).fit(X, y).predict(X)\n \n df.append([setting[\"k\"], setting[\"s\"], np.mean((y-y_hat)**2)])\n \ndf = pd.DataFrame.from_records(df, columns=[\"k\", \"sigma\",\"mse\"]).groupby([\"k\",\"sigma\"]).mean().reset_index()\n\ngo.Figure(go.Heatmap(x=df.k, y=df.sigma, z=df.mse, colorscale=\"amp\"),\n layout=go.Layout(title=r\"$\\text{(6) Average Train } MSE \\text{ As Function of } \\left(k,\\sigma^2\\right)$\", \n xaxis_title=r\"$k$ - Fitted Polynomial Degree\",\n yaxis_title=r\"$\\sigma^2$ - Noise Levels\")).show()\n",
"_____no_output_____"
]
],
[
[
"# Time To Think...\n\nIn the above figure, we observe the following trends:\n- As already seen before, for the noise-free data, once we reach the correct $k$ we achieve zero MSE.\n- Across all values of $k$, as we increase $\\sigma^2$ we get higher MSE values.\n- For all noise levels, we manage to reduce MSE values by increasing $k$.\n\nSo, by choosing a **richer** hypothesis class (i.e. larger and that can express more functions - polynomials of higher\ndegree) we are able to choose an hypothesis that fits the **observed** data **better**, regardless to how noisy the data is.\nTry and think how the above heatmap would look if instead of calculating the MSE over the training samples (i.e train error)\nwe would have calculated it over a **new** set of test samples drawn from the same distribution.\n\nUse the below code to create a test set. Change the code generating figure 6 such that the reported error is a test error. Do not forget to add the noise (that depends on $\\sigma^2$) to the test data. What has changed between what we observe for the train error to the test error? What happens for high/low values of $\\sigma^2$? What happens for high/low values of $k$?",
"_____no_output_____"
]
],
[
[
"testX = np.linspace(-1.2, 2, 40)[1::2].reshape(-1,1)\ntestY = response(testX)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
c52b11feb82a67cf004d80097331bee7e3799007
| 21,245 |
ipynb
|
Jupyter Notebook
|
example/segmentation/load-segmentation.ipynb
|
ebiggerr/malaya
|
be757c793895522f80b929fe82353d90762f7fff
|
[
"MIT"
] | 1 |
2021-03-19T22:42:34.000Z
|
2021-03-19T22:42:34.000Z
|
example/segmentation/load-segmentation.ipynb
|
ebiggerr/malaya
|
be757c793895522f80b929fe82353d90762f7fff
|
[
"MIT"
] | null | null | null |
example/segmentation/load-segmentation.ipynb
|
ebiggerr/malaya
|
be757c793895522f80b929fe82353d90762f7fff
|
[
"MIT"
] | null | null | null | 25.412679 | 370 | 0.520217 |
[
[
[
"# Segmentation",
"_____no_output_____"
],
[
"<div class=\"alert alert-info\">\n\nThis tutorial is available as an IPython notebook at [Malaya/example/segmentation](https://github.com/huseinzol05/Malaya/tree/master/example/segmentation).\n \n</div>",
"_____no_output_____"
],
[
"<div class=\"alert alert-info\">\n\nThis module trained on both standard and local (included social media) language structures, so it is save to use for both.\n \n</div>",
"_____no_output_____"
]
],
[
[
"%%time\n\nimport malaya",
"CPU times: user 4.46 s, sys: 690 ms, total: 5.15 s\nWall time: 5.24 s\n"
]
],
[
[
"Common problem for social media texts, there are missing spaces in the text, so text segmentation can help you,\n\n1. huseinsukamakan ayam,dia sgtrisaukan -> husein suka makan ayam, dia sgt risaukan.\n2. drmahathir sangat menekankan budaya budakzamansekarang -> dr mahathir sangat menekankan budaya budak zaman sekarang.\n3. ceritatunnajibrazak -> cerita tun najib razak.\n4. TunM sukakan -> Tun M sukakan.",
"_____no_output_____"
],
[
"Segmentation only,\n\n1. Solve spacing error.\n3. Not correcting any grammar.",
"_____no_output_____"
]
],
[
[
"string1 = 'huseinsukamakan ayam,dia sgtrisaukan'\nstring2 = 'drmahathir sangat menekankan budaya budakzamansekarang'\nstring3 = 'ceritatunnajibrazak'\nstring4 = 'TunM sukakan'\nstring_hard = 'IPOH-AhliDewanUndangan Negeri(ADUN) HuluKinta, MuhamadArafat Varisai Mahamadmenafikanmesejtularmendakwa beliau akan melompatparti menyokong UMNO membentuk kerajaannegeridiPerak.BeliauyangjugaKetua Penerangan Parti Keadilan Rakyat(PKR)Perak dalam satumesejringkaskepadaSinar Harian menjelaskan perkara itutidakbenarsama sekali.'\nstring_socialmedia = 'aqxsukalah apeyg tejadidekat mamattu'",
"_____no_output_____"
]
],
[
[
"### Viterbi algorithm\n\nCommonly people use Viterbi algorithm to solve this problem, we also added viterbi using ngram from bahasa papers and wikipedia.\n\n```python\ndef viterbi(max_split_length: int = 20, **kwargs):\n \"\"\"\n Load Segmenter class using viterbi algorithm.\n\n Parameters\n ----------\n max_split_length: int, (default=20)\n max length of words in a sentence to segment\n validate: bool, optional (default=True)\n if True, malaya will check model availability and download if not available.\n\n Returns\n -------\n result : malaya.segmentation.SEGMENTER class\n \"\"\"\n```",
"_____no_output_____"
]
],
[
[
"viterbi = malaya.segmentation.viterbi()",
"_____no_output_____"
]
],
[
[
"#### Segmentize\n\n```python\ndef segment(self, strings: List[str]):\n \"\"\"\n Segment strings.\n Example, \"sayasygkan negarasaya\" -> \"saya sygkan negara saya\"\n\n Parameters\n ----------\n strings : List[str]\n\n Returns\n -------\n result: List[str]\n \"\"\"\n```",
"_____no_output_____"
]
],
[
[
"%%time\n\nviterbi.segment([string1, string2, string3, string4])",
"CPU times: user 109 ms, sys: 1.04 ms, total: 110 ms\nWall time: 110 ms\n"
],
[
"%%time\n\nviterbi.segment([string_hard, string_socialmedia])",
"CPU times: user 8.45 ms, sys: 157 µs, total: 8.6 ms\nWall time: 8.69 ms\n"
]
],
[
[
"### List available Transformer model",
"_____no_output_____"
]
],
[
[
"malaya.segmentation.available_transformer()",
"_____no_output_____"
]
],
[
[
"### Load Transformer model\n\n```python\ndef transformer(model: str = 'small', quantized: bool = False, **kwargs):\n \"\"\"\n Load transformer encoder-decoder model to Segmentize.\n\n Parameters\n ----------\n model : str, optional (default='base')\n Model architecture supported. Allowed values:\n\n * ``'small'`` - Transformer SMALL parameters.\n * ``'base'`` - Transformer BASE parameters.\n\n quantized : bool, optional (default=False)\n if True, will load 8-bit quantized model. \n Quantized model not necessary faster, totally depends on the machine.\n\n Returns\n -------\n result: malaya.model.tf.Segmentation class\n \"\"\"\n```",
"_____no_output_____"
]
],
[
[
"model = malaya.segmentation.transformer(model = 'small')\nquantized_model = malaya.segmentation.transformer(model = 'small', quantized = True)",
"WARNING:root:Load quantized model will cause accuracy drop.\n"
],
[
"model_base = malaya.segmentation.transformer(model = 'base')\nquantized_model_base = malaya.segmentation.transformer(model = 'base', quantized = True)",
"WARNING:root:Load quantized model will cause accuracy drop.\n"
]
],
[
[
"#### Predict using greedy decoder\n\n```python\ndef greedy_decoder(self, strings: List[str]):\n \"\"\"\n Segment strings using greedy decoder.\n Example, \"sayasygkan negarasaya\" -> \"saya sygkan negara saya\"\n\n Parameters\n ----------\n strings : List[str]\n\n Returns\n -------\n result: List[str]\n \"\"\"\n```",
"_____no_output_____"
]
],
[
[
"%%time\n\nmodel.greedy_decoder([string1, string2, string3, string4])",
"CPU times: user 1.12 s, sys: 432 ms, total: 1.55 s\nWall time: 959 ms\n"
],
[
"%%time\n\nquantized_model.greedy_decoder([string1, string2, string3, string4])",
"CPU times: user 1.12 s, sys: 464 ms, total: 1.58 s\nWall time: 888 ms\n"
],
[
"%%time\n\nmodel_base.greedy_decoder([string1, string2, string3, string4])",
"CPU times: user 5.58 s, sys: 2.88 s, total: 8.46 s\nWall time: 4.08 s\n"
],
[
"%%time\n\nquantized_model_base.greedy_decoder([string1, string2, string3, string4])",
"CPU times: user 5.73 s, sys: 2.96 s, total: 8.69 s\nWall time: 3.81 s\n"
],
[
"%%time\n\nmodel.greedy_decoder([string_hard, string_socialmedia])",
"CPU times: user 2.52 s, sys: 499 ms, total: 3.02 s\nWall time: 768 ms\n"
],
[
"%%time\n\nquantized_model.greedy_decoder([string_hard, string_socialmedia])",
"CPU times: user 2.62 s, sys: 447 ms, total: 3.07 s\nWall time: 756 ms\n"
],
[
"%%time\n\nmodel_base.greedy_decoder([string_hard, string_socialmedia])",
"CPU times: user 17.8 s, sys: 10.2 s, total: 28 s\nWall time: 5.84 s\n"
],
[
"%%time\n\nquantized_model_base.greedy_decoder([string_hard, string_socialmedia])",
"CPU times: user 17.6 s, sys: 9.63 s, total: 27.3 s\nWall time: 5.85 s\n"
]
],
[
[
"**Problem with batching string, short string might repeating itself, so to solve this, you need to give a single string only**,",
"_____no_output_____"
]
],
[
[
"%%time\n\nquantized_model_base.greedy_decoder([string_socialmedia])",
"CPU times: user 1.37 s, sys: 532 ms, total: 1.9 s\nWall time: 652 ms\n"
],
[
"%%time\n\nquantized_model_base.greedy_decoder([string3])",
"CPU times: user 648 ms, sys: 228 ms, total: 876 ms\nWall time: 289 ms\n"
],
[
"%%time\n\nquantized_model_base.greedy_decoder([string4])",
"CPU times: user 495 ms, sys: 202 ms, total: 697 ms\nWall time: 225 ms\n"
]
],
[
[
"#### Predict using beam decoder\n\n```python\ndef beam_decoder(self, strings: List[str]):\n \"\"\"\n Segment strings using beam decoder, beam width size 3, alpha 0.5 .\n Example, \"sayasygkan negarasaya\" -> \"saya sygkan negara saya\"\n\n Parameters\n ----------\n strings : List[str]\n\n Returns\n -------\n result: List[str]\n \"\"\"\n```",
"_____no_output_____"
]
],
[
[
"%%time\n\nquantized_model.beam_decoder([string_socialmedia])",
"CPU times: user 1.38 s, sys: 1.87 s, total: 3.25 s\nWall time: 654 ms\n"
],
[
"%%time\n\nquantized_model_base.beam_decoder([string_socialmedia])",
"CPU times: user 6.77 s, sys: 3.71 s, total: 10.5 s\nWall time: 2.43 s\n"
]
],
[
[
"**We can expect beam decoder is much more slower than greedy decoder**.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
c52b16af95893694aa9243670e98bbcd155bff40
| 725,337 |
ipynb
|
Jupyter Notebook
|
model.ipynb
|
MrGeislinger/clone-driving-behavior
|
58951a5502fce31132e31d5f17a2c2bf06fea8d9
|
[
"MIT"
] | null | null | null |
model.ipynb
|
MrGeislinger/clone-driving-behavior
|
58951a5502fce31132e31d5f17a2c2bf06fea8d9
|
[
"MIT"
] | null | null | null |
model.ipynb
|
MrGeislinger/clone-driving-behavior
|
58951a5502fce31132e31d5f17a2c2bf06fea8d9
|
[
"MIT"
] | null | null | null | 360.147468 | 611,296 | 0.926703 |
[
[
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Load-the-Data\" data-toc-modified-id=\"Load-the-Data-1\"><span class=\"toc-item-num\">1 </span>Load the Data</a></span><ul class=\"toc-item\"><li><span><a href=\"#Download-data-(if-needed)\" data-toc-modified-id=\"Download-data-(if-needed)-1.1\"><span class=\"toc-item-num\">1.1 </span>Download data (if needed)</a></span></li><li><span><a href=\"#Read-in-log-file\" data-toc-modified-id=\"Read-in-log-file-1.2\"><span class=\"toc-item-num\">1.2 </span>Read in log file</a></span></li></ul></li><li><span><a href=\"#Data-Augmentation\" data-toc-modified-id=\"Data-Augmentation-2\"><span class=\"toc-item-num\">2 </span>Data Augmentation</a></span><ul class=\"toc-item\"><li><span><a href=\"#Flip-the-image\" data-toc-modified-id=\"Flip-the-image-2.1\"><span class=\"toc-item-num\">2.1 </span>Flip the image</a></span></li><li><span><a href=\"#Adjust-off-center-images\" data-toc-modified-id=\"Adjust-off-center-images-2.2\"><span class=\"toc-item-num\">2.2 </span>Adjust off-center images</a></span></li><li><span><a href=\"#Don't-use-some-small-steering-values\" data-toc-modified-id=\"Don't-use-some-small-steering-values-2.3\"><span class=\"toc-item-num\">2.3 </span>Don't use some small steering values</a></span></li><li><span><a href=\"#Translate-Images\" data-toc-modified-id=\"Translate-Images-2.4\"><span class=\"toc-item-num\">2.4 </span>Translate Images</a></span></li></ul></li><li><span><a href=\"#Creating-own-generator-to-read-data\" data-toc-modified-id=\"Creating-own-generator-to-read-data-3\"><span class=\"toc-item-num\">3 </span>Creating own generator to read data</a></span><ul class=\"toc-item\"><li><span><a href=\"#Read-in-images-by-path-from-log-file\" data-toc-modified-id=\"Read-in-images-by-path-from-log-file-3.1\"><span class=\"toc-item-num\">3.1 </span>Read in images by path from log file</a></span></li><li><span><a href=\"#Split-the-data-into-training-and-validation\" data-toc-modified-id=\"Split-the-data-into-training-and-validation-3.2\"><span class=\"toc-item-num\">3.2 </span>Split the data into training and validation</a></span><ul class=\"toc-item\"><li><span><a href=\"#Generators-for-both-train-and-validation-sets\" data-toc-modified-id=\"Generators-for-both-train-and-validation-sets-3.2.1\"><span class=\"toc-item-num\">3.2.1 </span>Generators for both train and validation sets</a></span></li></ul></li></ul></li><li><span><a href=\"#Model\" data-toc-modified-id=\"Model-4\"><span class=\"toc-item-num\">4 </span>Model</a></span><ul class=\"toc-item\"><li><span><a href=\"#Using-center-images-only\" data-toc-modified-id=\"Using-center-images-only-4.1\"><span class=\"toc-item-num\">4.1 </span>Using center images only</a></span><ul class=\"toc-item\"><li><span><a href=\"#Evaluation\" data-toc-modified-id=\"Evaluation-4.1.1\"><span class=\"toc-item-num\">4.1.1 </span>Evaluation</a></span></li></ul></li></ul></li></ul></div>",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport cv2\nimport sklearn\nimport sklearn.model_selection\nimport tensorflow.keras as keras\n\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"# Load the Data",
"_____no_output_____"
],
[
"We'll load the log data & also load the images (found in the log file).",
"_____no_output_____"
],
[
"## Download data (if needed)",
"_____no_output_____"
]
],
[
[
"# Download data & unzip if it doesn't already exist\nimport os.path\nfrom io import BytesIO\nfrom urllib.request import urlopen\nfrom zipfile import ZipFile",
"_____no_output_____"
],
[
"def load_ext_file(data_zip_url, data_path='data/'):\n '''Download the zip file from URL and extract it to path (if specified).\n '''\n # Check if path already exits\n if not os.path.exists(data_path):\n with urlopen(data_zip_url) as zip_resp:\n with ZipFile(BytesIO(zip_resp.read())) as zfile:\n # Extract files into the data directory\n zfile.extractall(path=None)\n ",
"_____no_output_____"
],
[
"# Use particular release for data from a simulation run\nload_ext_file('https://github.com/MrGeislinger/clone-driving-behavior/releases/download/v0.14.0/data.zip')",
"_____no_output_____"
]
],
[
[
"## Read in log file",
"_____no_output_____"
]
],
[
[
"def create_img_meas_dfs(log_csv, data_dir=None, orig_dir=None, skiprows=None):\n '''Creates DataFrames for the image paths and measurements using CSV path.\n \n Returns tuple of two DataFrames.\n '''\n data_header = [\n 'image_center',\n 'image_left',\n 'image_right',\n 'steer_angle', # [-1,1]\n 'throttle', # boolen (if accelerating)\n 'break', # boolean (if breaking)\n 'speed' # mph\n ]\n\n df = pd.read_csv(\n log_csv,\n names=data_header,\n skiprows=skiprows\n )\n\n # Replace the original directory from dataset (if specified)\n if orig_dir and data_dir:\n for col in ['image_center','image_left','image_right']:\n df[col] = df[col].str.replace(orig_dir,data_dir)\n \n # Get specifics for each DF\n df_img_paths = df.iloc[:,:3]\n df_measurments = df.iloc[:,3:]\n \n return df_img_paths,df_measurments, df",
"_____no_output_____"
],
[
"df_imgs, df_meas, df_all = create_img_meas_dfs(\n log_csv='data/driving_log.csv', \n skiprows=1)\n\ndisplay(df_imgs.head())\n\nprint('Stats for measurements:')\ndisplay(df_meas.describe())",
"_____no_output_____"
],
[
"ax = df_meas.steer_angle.hist(bins=50)",
"_____no_output_____"
]
],
[
[
"# Data Augmentation",
"_____no_output_____"
],
[
"We can do some data augmentation to the images to have more variety in the training material.",
"_____no_output_____"
],
[
"## Flip the image",
"_____no_output_____"
],
[
"We can flip the image and the steering angle to better generalize.",
"_____no_output_____"
]
],
[
[
"def flip_image(image, target):\n '''Horizontally flip image and target value.\n '''\n image_flipped = np.fliplr(image)\n target_flipped = -target\n return image_flipped, target_flipped\n ",
"_____no_output_____"
]
],
[
[
"## Adjust off-center images",
"_____no_output_____"
]
],
[
[
"def adjust_offcenter_image(image, target, correction: float = 1e-2,\n img_camera_type: str = 'center'):\n '''\n img_camera_type: The type of camera image\n target: The target value (to be adjusted)\n '''\n # TODO: Adjust the target slightly for off-center image\n if img_camera_type == 'left':\n new_target = target + correction\n elif img_camera_type == 'right':\n new_target = target - correction\n # Don't make any correction if unknown or centere\n else: \n new_target = target\n return image, new_target",
"_____no_output_____"
]
],
[
[
"## Don't use some small steering values",
"_____no_output_____"
],
[
"Since the data is biased towards to low steering (driving straight), randomly drop some of the data to discourage simply driving straight.",
"_____no_output_____"
]
],
[
[
"def skip_low_steering(steer_value, steer_threshold=0.05, drop_percent=0.2):\n '''\n '''\n # Keep value if greater than threshold or by chance\n return (steer_value < steer_threshold['left']\n or steer_value > steer_threshold['right']\n or np.random.rand() > drop_percent)\n ",
"_____no_output_____"
]
],
[
[
"## Translate Images",
"_____no_output_____"
]
],
[
[
"def translate_image (image, target, correction=100, scale_factor=0.2):\n '''\n '''\n # Translate the image randomly about correction factor (then scaled)\n adjustment = int(correction * np.random.uniform(-1*scale_factor, scale_factor))\n # Get a new\n target_new = target + (adjustment / correction)\n n,m,c=image.shape\n bigsquare=np.zeros((n,m+100,c),image.dtype) \n if adjustment < 0:\n bigsquare[:,:m+adjustment]=image[:,-adjustment:m]\n else:\n bigsquare[:,adjustment:m+adjustment]=image\n return bigsquare[:n,:m,:], target_new\n",
"_____no_output_____"
]
],
[
[
"# Creating own generator to read data",
"_____no_output_____"
]
],
[
[
"def data_generator(X, y ,batch_size=64, center_only=True, data_dir='data/'):\n '''\n Generate a batch of training images and targets from a DataFrame.\n \n Inputs:\n X: array-like of paths to images\n y: array-like of targerts (in order of X)\n '''\n # Loop forever so the generator never terminates\n while True:\n # Shuffle the image paths and targets\n X_final = []\n y_final = []\n X_shuffled, y_shuffled = sklearn.utils.shuffle(X, y, n_samples=batch_size)\n # We grab the first element since there is 1 column\n for img_path,target in zip(X_shuffled,y_shuffled):\n fname = data_dir+img_path\n img = cv2.imread(fname[0])\n # Skip specifically for the center image (still checks left/right)\n steer_thresh = {'left':-0.01, 'right':0.005}\n drop_ratio = 0.3\n \n if skip_low_steering(target[0], steer_thresh, drop_ratio):\n X_final.append(img)\n y_final.append(target[0])\n # Use horizontally flipped images (new target)\n img_flipped, target_flipped = flip_image(img,target)\n X_final.append(img_flipped)\n y_final.append(target_flipped[0])\n # Check if we should use all images or just center\n if not center_only:\n # Translate the image randomly\n img_trans, target_trans = translate_image(img, target[0], scale_factor=0.5)\n X_final.append(img_trans)\n y_final.append(target_trans)\n \n # Order: center, left, right\n # Corret left image target & add image with target to array\n img_l = cv2.imread(fname[1])\n target_l = adjust_offcenter_image(img, target, 0.25, 'left')\n # Corret right image target & add image with target to array \n img_r = cv2.imread(fname[2])\n target_r = adjust_offcenter_image(img, target, 0.25, 'right')\n\n X_final.append(img_l)\n y_final.append(target_l[0])\n # Use horizontally flipped images (new target)\n img_flipped, target_flipped = flip_image(img_l,target_l)\n X_final.append(img_flipped)\n y_final.append(target_flipped[0])\n\n X_final.append(img_r)\n y_final.append(target_r[0])\n # Use horizontally flipped images (new target)\n img_flipped, target_flipped = flip_image(img_r,target_r)\n X_final.append(img_flipped)\n y_final.append(target_flipped[0])\n\n \n\n \n batch_x = np.array(X_final)\n batch_y = np.array(y_final)\n yield (batch_x, batch_y)",
"_____no_output_____"
]
],
[
[
"## Read in images by path from log file",
"_____no_output_____"
],
[
"We'll use the generator created above to read in images by a batch while training (and validating). But to ensure it works, let's test it out below.",
"_____no_output_____"
]
],
[
[
"# Note the multiple images will keep the proper shape\ntemp_generator = data_generator(\n df_all[['image_center','image_left','image_right']].values,\n df_all['steer_angle'].values.reshape(-1,1)\n)",
"_____no_output_____"
],
[
"imgs,targets = next(temp_generator)",
"_____no_output_____"
],
[
"# Test to see if image reading works\nimport matplotlib.pyplot as plt\n\nf = plt.figure(figsize=(25,25))\nax_left = f.add_subplot(1, 3, 1)\nax_center = f.add_subplot(1, 3, 2)\nax_right = f.add_subplot(1, 3, 3)\n\n\n# Print out three image examples\nax_center.imshow(imgs[0])\nax_left.imshow(imgs[1])\nax_right.imshow(imgs[2])",
"_____no_output_____"
]
],
[
[
"## Split the data into training and validation",
"_____no_output_____"
]
],
[
[
"# Adjust the target for off-center images to be used in training\nX = df_all[['image_center','image_left','image_right']].values\ny = df_all[['steer_angle']].values",
"_____no_output_____"
],
[
"X_train, X_valid, y_train, y_valid = sklearn.model_selection.train_test_split(\n X, y, test_size=0.2, random_state=27)",
"_____no_output_____"
]
],
[
[
"### Generators for both train and validation sets",
"_____no_output_____"
]
],
[
[
"# Using reasobable batch size so GPU can process enough images\ntrain_generator = data_generator(X_train, y_train, batch_size=256)\nvalid_generator = data_generator(X_valid, y_valid, batch_size=256)",
"_____no_output_____"
]
],
[
[
"# Model",
"_____no_output_____"
],
[
"## Using center images only",
"_____no_output_____"
],
[
"We'll try just using center images for training the model. If we simply put in the left and right images for the camera angle, we'd likely have issues with the model learning incorrect behavior. There are some techniques that could allow us to use these other images but for simplicity's sake we'll only use the center images for now.",
"_____no_output_____"
]
],
[
[
"# Creating a resuable default convolution\nfrom functools import partial\nDefaultConv2D = partial(keras.layers.Conv2D, kernel_initializer='he_normal',\n kernel_size=3, activation='elu', padding='SAME')",
"_____no_output_____"
],
[
"input_shape = (160,320,3)",
"_____no_output_____"
],
[
"# Based on https://developer.nvidia.com/blog/deep-learning-self-driving-cars/\nmodel_list = [\n # Normalize the images\n keras.layers.Lambda(lambda x: (x/255.0) - 0.5, input_shape=input_shape),\n DefaultConv2D(filters=24, kernel_size=5),\n keras.layers.MaxPooling2D(pool_size=2),\n DefaultConv2D(filters=36, kernel_size=5),\n keras.layers.MaxPooling2D(pool_size=2),\n DefaultConv2D(filters=36, kernel_size=5),\n keras.layers.MaxPooling2D(pool_size=2), \n keras.layers.Dropout(0.4), # Dropout to regularize\n DefaultConv2D(filters=48),\n keras.layers.MaxPooling2D(pool_size=2),\n DefaultConv2D(filters=64),\n keras.layers.MaxPooling2D(pool_size=2),\n DefaultConv2D(filters=64),\n keras.layers.MaxPooling2D(pool_size=2),\n keras.layers.Dropout(0.4), # Dropout to regularize\n # Fully connected network\n keras.layers.Flatten(),\n keras.layers.Dense(units=1024, activation='relu'),\n keras.layers.Dropout(0.2), # Dropout to regularize\n keras.layers.Dense(units=128, activation='relu'),\n keras.layers.Dropout(0.2), # Dropout to regularize\n keras.layers.Dense(units=64, activation='relu'),\n keras.layers.Dense(units=16, activation='relu'),\n keras.layers.Dense(units=1)\n]",
"_____no_output_____"
],
[
"# Adding in model to crop images first\nmodel_list = (\n [model_list[0]] +\n # Crop out \"unnecessary parts of the image\"\n [keras.layers.Cropping2D(cropping=((60,20), (0,0)))] +\n model_list[1:]\n)",
"_____no_output_____"
],
[
"model = keras.models.Sequential(model_list)",
"_____no_output_____"
],
[
"model.compile(\n loss='mse', \n optimizer='nadam'\n)",
"_____no_output_____"
],
[
"model.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nlambda (Lambda) (None, 160, 320, 3) 0 \n_________________________________________________________________\ncropping2d (Cropping2D) (None, 80, 320, 3) 0 \n_________________________________________________________________\nconv2d (Conv2D) (None, 80, 320, 24) 1824 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 40, 160, 24) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 40, 160, 36) 21636 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 20, 80, 36) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 20, 80, 36) 32436 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 10, 40, 36) 0 \n_________________________________________________________________\ndropout (Dropout) (None, 10, 40, 36) 0 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 10, 40, 48) 15600 \n_________________________________________________________________\nmax_pooling2d_3 (MaxPooling2 (None, 5, 20, 48) 0 \n_________________________________________________________________\nconv2d_4 (Conv2D) (None, 5, 20, 64) 27712 \n_________________________________________________________________\nmax_pooling2d_4 (MaxPooling2 (None, 2, 10, 64) 0 \n_________________________________________________________________\nconv2d_5 (Conv2D) (None, 2, 10, 64) 36928 \n_________________________________________________________________\nmax_pooling2d_5 (MaxPooling2 (None, 1, 5, 64) 0 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 1, 5, 64) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 320) 0 \n_________________________________________________________________\ndense (Dense) (None, 1024) 328704 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 1024) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 128) 131200 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 64) 8256 \n_________________________________________________________________\ndense_3 (Dense) (None, 16) 1040 \n_________________________________________________________________\ndense_4 (Dense) (None, 1) 17 \n=================================================================\nTotal params: 605,353\nTrainable params: 605,353\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"# Allow early stopping after not changing\nstop_after_no_change = keras.callbacks.EarlyStopping(\n monitor='val_loss',\n patience=15,\n restore_best_weights=True\n)",
"_____no_output_____"
],
[
"history = model.fit(\n x=train_generator,\n y=None, # Since using a generator\n batch_size=None, # Since using a generator\n epochs=256, # Large since we want to ensure we stop by early stopping\n steps_per_epoch=128, # Ideal: steps*batch_size = # of images\n validation_data=valid_generator,\n validation_steps=32,\n callbacks=[stop_after_no_change]\n)",
"Epoch 1/256\n128/128 [==============================] - 86s 673ms/step - loss: 0.0569 - val_loss: 0.0247\nEpoch 2/256\n128/128 [==============================] - 74s 579ms/step - loss: 0.0270 - val_loss: 0.0223\nEpoch 3/256\n128/128 [==============================] - 69s 540ms/step - loss: 0.0270 - val_loss: 0.0233\nEpoch 4/256\n128/128 [==============================] - 71s 551ms/step - loss: 0.0256 - val_loss: 0.0253\nEpoch 5/256\n128/128 [==============================] - 71s 554ms/step - loss: 0.0237 - val_loss: 0.0230\nEpoch 6/256\n128/128 [==============================] - 70s 546ms/step - loss: 0.0242 - val_loss: 0.0218\nEpoch 7/256\n128/128 [==============================] - 70s 544ms/step - loss: 0.0229 - val_loss: 0.0232\nEpoch 8/256\n128/128 [==============================] - 70s 550ms/step - loss: 0.0224 - val_loss: 0.0208\nEpoch 9/256\n128/128 [==============================] - 70s 549ms/step - loss: 0.0227 - val_loss: 0.0207\nEpoch 10/256\n128/128 [==============================] - 71s 556ms/step - loss: 0.0224 - val_loss: 0.0214\nEpoch 11/256\n128/128 [==============================] - 69s 540ms/step - loss: 0.0213 - val_loss: 0.0226\nEpoch 12/256\n128/128 [==============================] - 71s 552ms/step - loss: 0.0212 - val_loss: 0.0208\nEpoch 13/256\n128/128 [==============================] - 70s 549ms/step - loss: 0.0201 - val_loss: 0.0226\nEpoch 14/256\n128/128 [==============================] - 69s 543ms/step - loss: 0.0196 - val_loss: 0.0208\nEpoch 15/256\n128/128 [==============================] - 70s 546ms/step - loss: 0.0194 - val_loss: 0.0178\nEpoch 16/256\n128/128 [==============================] - 70s 544ms/step - loss: 0.0186 - val_loss: 0.0212\nEpoch 17/256\n128/128 [==============================] - 70s 548ms/step - loss: 0.0175 - val_loss: 0.0198\nEpoch 18/256\n128/128 [==============================] - 72s 562ms/step - loss: 0.0169 - val_loss: 0.0215\nEpoch 19/256\n128/128 [==============================] - 75s 589ms/step - loss: 0.0165 - val_loss: 0.0205\nEpoch 20/256\n128/128 [==============================] - 71s 558ms/step - loss: 0.0158 - val_loss: 0.0183\nEpoch 21/256\n128/128 [==============================] - 72s 564ms/step - loss: 0.0148 - val_loss: 0.0207\nEpoch 22/256\n128/128 [==============================] - 73s 571ms/step - loss: 0.0143 - val_loss: 0.0188\nEpoch 23/256\n128/128 [==============================] - 73s 569ms/step - loss: 0.0133 - val_loss: 0.0208\nEpoch 24/256\n128/128 [==============================] - 72s 564ms/step - loss: 0.0125 - val_loss: 0.0200\nEpoch 25/256\n128/128 [==============================] - 71s 552ms/step - loss: 0.0120 - val_loss: 0.0183\nEpoch 26/256\n128/128 [==============================] - 71s 556ms/step - loss: 0.0118 - val_loss: 0.0195\nEpoch 27/256\n128/128 [==============================] - 70s 550ms/step - loss: 0.0116 - val_loss: 0.0190\nEpoch 28/256\n128/128 [==============================] - 71s 556ms/step - loss: 0.0106 - val_loss: 0.0196\nEpoch 29/256\n128/128 [==============================] - 71s 556ms/step - loss: 0.0100 - val_loss: 0.0181\nEpoch 30/256\n128/128 [==============================] - 71s 552ms/step - loss: 0.0098 - val_loss: 0.0199\n"
]
],
[
[
"### Evaluation",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline \n\ndef eval_model(model, model_history, X, y, show=True):\n '''\n '''\n score = model.evaluate(X, y)\n print(f'Loss: {score:.2f}')\n\n if show:\n plt.plot(model_history.history['loss'], label='Loss (training data)')\n plt.plot(model_history.history['val_loss'], label='Loss (validation data)')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(loc='upper right')\n plt.show()",
"_____no_output_____"
]
],
[
[
"Let's checkout how the previous model turned while training.",
"_____no_output_____"
]
],
[
[
"test_generator = data_generator(X_valid, y_valid, batch_size=64)\nX_test, y_test = next(test_generator)\neval_model(model, history, X_test, y_test)",
"4/4 [==============================] - 0s 20ms/step - loss: 0.0148\nLoss: 0.01\n"
],
[
"# Ignore the first epoch since it's typically very high compared to the rest\nplt.plot(history.history['loss'], label='Loss (training data)')\nplt.plot(history.history['val_loss'], label='Loss (validation data)')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.ylim(\n top=np.median(history.history['val_loss'])+np.std(history.history['val_loss']), \n bottom=0.0\n)\nplt.legend(loc='upper right')\nplt.show()",
"_____no_output_____"
],
[
"model.save('model.h5')",
"_____no_output_____"
],
[
"# Clean up the data downloaded (not needed for output)\n!rm -rf data/",
"_____no_output_____"
],
[
"!rm -rf __MACOSX/",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
c52b27ba5e2038ea6d276661b6949c857bdcb2c0
| 8,161 |
ipynb
|
Jupyter Notebook
|
fast/[20191218]06_Class_1.ipynb
|
sung429/Python
|
66813680555c04a59c320b98726b5c42e1487f36
|
[
"CC-BY-3.0"
] | null | null | null |
fast/[20191218]06_Class_1.ipynb
|
sung429/Python
|
66813680555c04a59c320b98726b5c42e1487f36
|
[
"CC-BY-3.0"
] | null | null | null |
fast/[20191218]06_Class_1.ipynb
|
sung429/Python
|
66813680555c04a59c320b98726b5c42e1487f36
|
[
"CC-BY-3.0"
] | null | null | null | 17.741304 | 55 | 0.428869 |
[
[
[
"### class : 클래스\n- 변수와 함수를 묶어 놓은 개념\n- 사용방법\n - 변수와 함수가 들어있는 클래스를 선언\n - 클래스를 객체로 만들어서 클래스 안에 선언된 변수와 함수를 사용",
"_____no_output_____"
],
[
"#### 1. 기본 클래스의 사용",
"_____no_output_____"
]
],
[
[
"# 클래스의 선언\nclass Calculator:\n num1 = 1\n num2 = 2\n \n def plus(self):\n return self.num1 + self.num2\n \n def minus(self):\n return self.num1 + self.num2",
"_____no_output_____"
],
[
"# 클래스의 사용\ncalc = Calculator()\ncalc",
"_____no_output_____"
],
[
"calc.num1",
"_____no_output_____"
],
[
"calc.num2",
"_____no_output_____"
],
[
"#메소드 : 클래스 안에 있는 펑션",
"_____no_output_____"
],
[
"calc.plus()",
"_____no_output_____"
],
[
"calc.minus()",
"_____no_output_____"
],
[
"# self의 의미 : 객체 자신\ncalc2 = Calculator()",
"_____no_output_____"
],
[
"calc2.num1 = 10",
"_____no_output_____"
],
[
"calc2.plus()",
"_____no_output_____"
]
],
[
[
"### 2. 객체지향\n- 실제 세계를 코드에 반영해서 개발하는 방법\n- 여러명의 개발자가 코드를 효율적으로 작성해서 프로젝트를 완성시키기 위한 방법\n- 설계도 작성(class) -> 실제 물건(object)\n- 사용자 정의 데이터 타입",
"_____no_output_____"
]
],
[
[
"calc2.plus()",
"_____no_output_____"
],
[
"obj = 'python'\nobj.upper()",
"_____no_output_____"
],
[
"[data for data in dir(calc2) if data[:2] != '__']",
"_____no_output_____"
],
[
"dir(calc2)",
"_____no_output_____"
]
],
[
[
"### 3. 생성자\n- 클래스가 객체로 생성될때 실행되는 함수\n- 변수(재료)를 추가할때 사용됩니다.",
"_____no_output_____"
]
],
[
[
"class Calculator:\n # 생성자 함수 : __init__\n def __init__(self, num1, num2):\n self.num1 = num1\n self.num2 = num2\n \n def plus(self):\n return self.num1 + self.num2\n \n def minus(self):\n return self.num1 + self.num2",
"_____no_output_____"
],
[
"calc1 = Calculator(3, 4)",
"_____no_output_____"
],
[
"calc1.plus()",
"_____no_output_____"
],
[
"class Calculator:\n # 생성자 함수 : __init__\n def __init__(self, num1, num2=10):\n self.num1 = num1\n self.num2 = num2\n \n def plus(self):\n return self.num1 + self.num2\n \n def minus(self):\n return self.num1 + self.num2",
"_____no_output_____"
],
[
"calc2 = Calculator(3)\ncalc2.plus()",
"_____no_output_____"
],
[
"# join\nls = [\"python\", \"is\" , \"good\"]\nsep = \" \"\nsep.join(ls)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52b3975ac61fbc064b6e79c92d4a9c6f2fb26c9
| 125,650 |
ipynb
|
Jupyter Notebook
|
4_Transformers/BERT_Tensorflow_MeanMax.ipynb
|
SextonCJ/LegalDocClassifier
|
dfbd5b2f4d0ce3cc6a4fa5e47574e5baa65127aa
|
[
"MIT"
] | null | null | null |
4_Transformers/BERT_Tensorflow_MeanMax.ipynb
|
SextonCJ/LegalDocClassifier
|
dfbd5b2f4d0ce3cc6a4fa5e47574e5baa65127aa
|
[
"MIT"
] | null | null | null |
4_Transformers/BERT_Tensorflow_MeanMax.ipynb
|
SextonCJ/LegalDocClassifier
|
dfbd5b2f4d0ce3cc6a4fa5e47574e5baa65127aa
|
[
"MIT"
] | null | null | null | 234.859813 | 56,504 | 0.901632 |
[
[
[
"# Models trained on Mean and Max DistilBert Embeddings",
"_____no_output_____"
]
],
[
[
"#Imports\n\nimport keras\nimport pandas as pd\nimport numpy as np\nimport joblib\n\nimport tensorflow as tf\nfrom keras import Model\n\nfrom sklearn.metrics import precision_recall_fscore_support, classification_report\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import GridSearchCV\n\n# Graphing\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport seaborn as sns ",
"_____no_output_____"
],
[
"df_test = joblib.load('BERT_embeddings/lstm_test.sav')\ndf_train = joblib.load('BERT_embeddings/lstm_train.sav')\ndf_val = joblib.load('BERT_embeddings/lstm_val.sav')",
"_____no_output_____"
]
],
[
[
"# Mean Model on outputs\n",
"_____no_output_____"
]
],
[
[
"input_ids = np.array(df_train['mean_features'].tolist())\nlabels = np.array(df_train['label'].tolist())\ntest_input_ids = np.array(df_test['mean_features'].tolist())\ntest_labels = np.array(df_test['label'].tolist())\nval_input_ids = np.array(df_val['mean_features'].tolist())\nval_labels = np.array(df_val['label'].tolist())\n\n",
"_____no_output_____"
],
[
"# Re-train model with best results\ninp = tf.keras.layers.Input(shape=(768))\nX = tf.keras.layers.Dense(1000, activation='tanh')(inp)\nX = tf.keras.layers.Dropout(0.2)(X)\nX = tf.keras.layers.Dense(2, activation='softmax')(X)\nmodel = tf.keras.Model(inputs=inp, outputs = X)\n\nopt = keras.optimizers.Adam(learning_rate=2e-5)\n\n\nmodel.compile(optimizer=opt,\n loss='sparse_categorical_crossentropy',\n metrics=['acc'])\nmodel.summary()\n\nepochs = 10\nhistory = model.fit(x = input_ids, \n y = labels,\n epochs=epochs,\n batch_size = 8,\n validation_data=(val_input_ids, val_labels))",
"Model: \"model\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) [(None, 768)] 0 \n_________________________________________________________________\ndense (Dense) (None, 1000) 769000 \n_________________________________________________________________\ndropout (Dropout) (None, 1000) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 2) 2002 \n=================================================================\nTotal params: 771,002\nTrainable params: 771,002\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/10\n81/81 [==============================] - 0s 4ms/step - loss: 0.6875 - acc: 0.5626 - val_loss: 0.6687 - val_acc: 0.6049\nEpoch 2/10\n81/81 [==============================] - 0s 3ms/step - loss: 0.6814 - acc: 0.5796 - val_loss: 0.6777 - val_acc: 0.5926\nEpoch 3/10\n81/81 [==============================] - 0s 3ms/step - loss: 0.6763 - acc: 0.5858 - val_loss: 0.6804 - val_acc: 0.5926\nEpoch 4/10\n81/81 [==============================] - 0s 3ms/step - loss: 0.6619 - acc: 0.6105 - val_loss: 0.6670 - val_acc: 0.6049\nEpoch 5/10\n81/81 [==============================] - 0s 3ms/step - loss: 0.6635 - acc: 0.6198 - val_loss: 0.6698 - val_acc: 0.5679\nEpoch 6/10\n81/81 [==============================] - 0s 3ms/step - loss: 0.6618 - acc: 0.5966 - val_loss: 0.6649 - val_acc: 0.5926\nEpoch 7/10\n81/81 [==============================] - 0s 3ms/step - loss: 0.6588 - acc: 0.6275 - val_loss: 0.6921 - val_acc: 0.5556\nEpoch 8/10\n81/81 [==============================] - 0s 3ms/step - loss: 0.6501 - acc: 0.6383 - val_loss: 0.6950 - val_acc: 0.5679\nEpoch 9/10\n81/81 [==============================] - 0s 3ms/step - loss: 0.6441 - acc: 0.6337 - val_loss: 0.6761 - val_acc: 0.6049\nEpoch 10/10\n81/81 [==============================] - 0s 3ms/step - loss: 0.6561 - acc: 0.6167 - val_loss: 0.6699 - val_acc: 0.6049\n"
],
[
"# evaluate \nmodel.evaluate(test_input_ids, test_labels)",
"3/3 [==============================] - 0s 2ms/step - loss: 0.6073 - acc: 0.6173\n"
],
[
"y_log = model.predict(test_input_ids)\ny_pred = np.argmax(y_log, axis=1)",
"_____no_output_____"
],
[
"# Show classification report\nfrom sklearn.metrics import precision_recall_fscore_support, classification_report\nprint(\"Mean Pooling split chunks\")\nprint(classification_report(df_test['label'], y_pred))",
"Mean Pooling split chunks\n precision recall f1-score support\n\n 0 0.65 0.35 0.46 37\n 1 0.61 0.84 0.70 44\n\n accuracy 0.62 81\n macro avg 0.63 0.60 0.58 81\nweighted avg 0.63 0.62 0.59 81\n\n"
]
],
[
[
"# MAX Model on fixed outputs\n",
"_____no_output_____"
]
],
[
[
"input_ids = np.array(df_train['max_features'].tolist())\nlabels = np.array(df_train['label'].tolist())\ntest_input_ids = np.array(df_test['max_features'].tolist())\ntest_labels = np.array(df_test['label'].tolist())\nval_input_ids = np.array(df_val['max_features'].tolist())\nval_labels = np.array(df_val['label'].tolist())",
"_____no_output_____"
],
[
"# Re-train model with best results\ninp = tf.keras.layers.Input(shape=(768))\nX = tf.keras.layers.Dense(1000, activation='tanh')(inp)\nX = tf.keras.layers.Dropout(0.2)(X)\nX = tf.keras.layers.Dense(2, activation='softmax')(X)\nmodel = tf.keras.Model(inputs=inp, outputs = X)\n\nopt = keras.optimizers.Adam(learning_rate=2e-5)\n\n\nmodel.compile(optimizer=opt,\n loss='sparse_categorical_crossentropy',\n metrics=['acc'])\nmodel.summary()\n\nepochs = 10\nhistory = model.fit(x = input_ids, \n y = labels,\n epochs=epochs,\n batch_size = 8,\n validation_data=(val_input_ids, val_labels))",
"Model: \"model_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_2 (InputLayer) [(None, 768)] 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 1000) 769000 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 1000) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 2) 2002 \n=================================================================\nTotal params: 771,002\nTrainable params: 771,002\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/10\n81/81 [==============================] - 0s 3ms/step - loss: 0.7085 - acc: 0.5410 - val_loss: 0.6661 - val_acc: 0.5926\nEpoch 2/10\n81/81 [==============================] - 0s 2ms/step - loss: 0.6844 - acc: 0.5842 - val_loss: 0.6664 - val_acc: 0.6049\nEpoch 3/10\n81/81 [==============================] - 0s 2ms/step - loss: 0.6756 - acc: 0.5935 - val_loss: 0.6619 - val_acc: 0.5802\nEpoch 4/10\n81/81 [==============================] - 0s 2ms/step - loss: 0.6634 - acc: 0.6275 - val_loss: 0.6654 - val_acc: 0.6420\nEpoch 5/10\n81/81 [==============================] - 0s 2ms/step - loss: 0.6647 - acc: 0.6012 - val_loss: 0.6617 - val_acc: 0.6049\nEpoch 6/10\n81/81 [==============================] - 0s 2ms/step - loss: 0.6488 - acc: 0.6105 - val_loss: 0.6766 - val_acc: 0.5926\nEpoch 7/10\n81/81 [==============================] - 0s 2ms/step - loss: 0.6603 - acc: 0.6213 - val_loss: 0.6899 - val_acc: 0.5802\nEpoch 8/10\n81/81 [==============================] - 0s 3ms/step - loss: 0.6500 - acc: 0.6291 - val_loss: 0.6931 - val_acc: 0.5802\nEpoch 9/10\n81/81 [==============================] - 0s 2ms/step - loss: 0.6507 - acc: 0.6306 - val_loss: 0.6664 - val_acc: 0.6049\nEpoch 10/10\n81/81 [==============================] - 0s 2ms/step - loss: 0.6430 - acc: 0.6182 - val_loss: 0.6837 - val_acc: 0.5926\n"
],
[
"model.evaluate(test_input_ids, test_labels)",
"3/3 [==============================] - 0s 1ms/step - loss: 0.6047 - acc: 0.7160\n"
],
[
"y_log = model.predict(test_input_ids)\ny_pred = np.argmax(y_log, axis=1)",
"_____no_output_____"
],
[
"# Show classification report\nfrom sklearn.metrics import precision_recall_fscore_support, classification_report\nprint(\"Max Pooled split chunks\")\nprint(classification_report(df_test['label'], y_pred))",
"Max Pooled split chunks\n precision recall f1-score support\n\n 0 0.65 0.81 0.72 37\n 1 0.80 0.64 0.71 44\n\n accuracy 0.72 81\n macro avg 0.73 0.72 0.72 81\nweighted avg 0.73 0.72 0.72 81\n\n"
],
[
"sns.set(style='darkgrid')\nsns.set(font_scale=1.5)\nplt.rcParams[\"figure.figsize\"] = (12,6)\nplt.plot(history.history['loss'], 'b-o', label=\"Training\")\nplt.plot(history.history['val_loss'], 'g-o', label=\"Validation\")\nplt.title(\"Training & Validation Loss\")\nplt.xlabel(\"Epoch\")\nplt.ylabel(\"Loss\")\nplt.legend()\nplt.xticks([1, 2, 3, 4, 5, 6, 7, 8, 9,10])\n\nplt.show()",
"_____no_output_____"
],
[
"sns.set(style='darkgrid')\nsns.set(font_scale=1.5)\nplt.rcParams[\"figure.figsize\"] = (10,6)\nplt.plot(history.history['acc'], 'r-o', label=\"Train Accuracy\")\nplt.plot(history.history['val_acc'], 'y-o', label=\"Val Accuracy\")\nplt.title(\"Accuracy\")\nplt.xlabel(\"Epoch\")\nplt.ylabel(\"Loss\")\nplt.legend()\nplt.xticks([1, 2, 3, 4, 5, 6, 7, 8, 9,10])\n\nplt.show()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52b42dc630b44c44b451b7581d358638fb43e97
| 534,379 |
ipynb
|
Jupyter Notebook
|
Notebooks/Visualization.ipynb
|
ericzhou571/Co2Storage
|
738f5cd5111aef161528ae5ad1a5bd80de501c40
|
[
"MIT"
] | null | null | null |
Notebooks/Visualization.ipynb
|
ericzhou571/Co2Storage
|
738f5cd5111aef161528ae5ad1a5bd80de501c40
|
[
"MIT"
] | null | null | null |
Notebooks/Visualization.ipynb
|
ericzhou571/Co2Storage
|
738f5cd5111aef161528ae5ad1a5bd80de501c40
|
[
"MIT"
] | null | null | null | 711.556591 | 428,168 | 0.812395 |
[
[
[
"import geopandas as gpd\nimport pandas_bokeh\nimport re\nimport pandas as pd\npandas_bokeh.output_notebook()\nfrom script.function3dto2d import remove_third_dimension\n\ncapacity_list = ['conservative estimate Mt','neutral estimate Mt','optimistic estimate Mt']\nmove = ['COUNTRY','COUNTRYCOD','ID','geometry']\n\n#load data\nstorage_unit = gpd.read_file('data/storage_unit_map_lite.geojson')\nstorage_unit.geometry = storage_unit.geometry.apply(remove_third_dimension)\nstorage_unit.to_file('geodata verfication/storage_unit.geojson', driver = 'GeoJSON')\n\ntrap_unit = gpd.read_file('data/trap_map_lite.geojson')\ntrap_unit.geometry = trap_unit.geometry.apply(remove_third_dimension)\n\ncomplete_map = gpd.read_file('data/complete_map_37.geojson')\ncomplete_map.geometry = complete_map.geometry.apply(remove_third_dimension)\ncomplete_map.geometry = complete_map.geometry.buffer(0)",
"_____no_output_____"
]
],
[
[
"# Prepare Data for Visualization",
"_____no_output_____"
]
],
[
[
"# prepare data of capacity detail\nstorage_unit_summary = storage_unit.groupby('COUNTRYCOD').sum()[[i for i in storage_unit.columns if i not in move]]\nstorage_unit_summary.columns = [x+'storage_unit' for x in storage_unit_summary.columns]\n\ntrap_unit_summary = trap_unit.groupby('COUNTRYCOD').sum()[[i for i in trap_unit.columns if i not in move]]\ntrap_unit_summary.drop(capacity_list,axis=1,inplace=True)\n\nstorage_type_detail = storage_unit_summary.merge(trap_unit_summary, \n left_index= True, \n right_index= True, \n how= 'outer')\n\ndef view_storage_type_detail(estimation_type = 'conservative'):\n # columns selection\n tmp_df = storage_type_detail[[x for x in storage_type_detail.columns if re.search(estimation_type,x)]].copy()\n tmp_df.columns = ['Storage_unit (normal density, geological formation to store CO2)',\n 'Aquifer (high density storage unit)',\n 'Oil (high density storage unit)',\n 'Gas (high density storage unit)']\n for i in tmp_df.columns:\n tmp_df[i] = tmp_df[i]/1e3\n tmp_df.plot_bokeh.bar(stacked = True,figsize=(900, 500), xlabel = 'country', ylabel = 'capacity Gt',title = estimation_type + ' per country (unit: (Gt) gigaton)')",
"_____no_output_____"
],
[
"# total capacity\nsummary = complete_map.groupby('COUNTRYCOD').sum()[capacity_list]\nsummary.columns = [x.replace('Mt','') for x in summary.columns]\ncapacity_list_ = list(summary.columns)\nsummary = summary.reset_index()\nsummary = pd.melt(summary, id_vars= ['COUNTRYCOD'],value_vars = capacity_list_)\nsummary.value = summary.value/1e3 #Mt to Gt\n\n\n# print\nprint('+'*20)\nprint('total capacity of whole Europe unit: Gigaton')\nprint('+'*20)\nprint(summary.groupby('variable').sum()['value'])\n\n#---------------------------------------------------\n\n# offshore capacity\noffshore = gpd.read_file('data/offshore_shapes.geojson')\nonly_offshore = gpd.clip(complete_map, offshore)\nsummary_position_holder = summary.copy()\nsummary_position_holder.value = 0\n\nsummary_2 = only_offshore.groupby('COUNTRYCOD').sum()[capacity_list]\nsummary_2.columns = [x.replace('Mt','') for x in summary_2.columns]\ncapacity_list_ = list(summary_2.columns)\nsummary_2 = summary_2.reset_index()\nsummary_2 = pd.melt(summary_2, id_vars= ['COUNTRYCOD'],value_vars = capacity_list_)\nsummary_2.value = summary_2.value/1e3 #Mt to Gt\nsummary_2 = pd.concat([summary_2, summary_position_holder]).groupby(['COUNTRYCOD','variable']).sum().reset_index()\n\n\nprint('\\n\\n')\nprint('+'*20)\nprint('total offshore capacity of whole Europe unit: Gigaton')\nprint('+'*20)\nprint(summary_2.groupby('variable').sum()['value'])",
"++++++++++++++++++++\ntotal capacity of whole Europe unit: Gigaton\n++++++++++++++++++++\nvariable\nconservative estimate 145.623029\nneutral estimate 1103.171495\noptimistic estimate 7338.130167\nName: value, dtype: float64\n\n\n\n++++++++++++++++++++\ntotal offshore capacity of whole Europe unit: Gigaton\n++++++++++++++++++++\nvariable\nconservative estimate 108.734962\nneutral estimate 973.954263\noptimistic estimate 7147.724294\nName: value, dtype: float64\n"
]
],
[
[
"# Capacity of all European countries split by storage type",
"_____no_output_____"
]
],
[
[
"view_storage_type_detail('neutral')",
"_____no_output_____"
]
],
[
[
"**external GB dataset's capacity estimation (only for United Kingdom):** \n- total 61.768 Gt | storage_unit + aquifer: 52.749 Gt | oil: 2.678 Gt | gas: 5.997 Gt",
"_____no_output_____"
],
[
"# Total offshore capacity of each country under different estimations\n## vs\n# Total capacity of each country under different estimations",
"_____no_output_____"
]
],
[
[
"import seaborn as sns\nfrom matplotlib import pyplot as plt\nf,axes = plt.subplots(2,1,figsize = (20,12))\nsub1 = sns.barplot(x = 'COUNTRYCOD', y='value', hue = 'variable',data= summary_2, ax = axes[0] )\nsub1.set_yscale(\"log\")\nsub1.title.set_text('Offshore Capacity')\nsub1.set_ylabel('Capacity (Gt) log scale',fontsize = 20)\n#sub1.set_xlabel('Country',fontsize = 20)\n\n\nsub2 = sns.barplot(x = 'COUNTRYCOD', y='value', hue = 'variable',data= summary, ax = axes[1] )\nsub2.set_yscale(\"log\")\nsub2.title.set_text('Total Capacity')\nsub2.set_ylabel('Capacity (Gt) log scale',fontsize = 20)\n#sub2.set_xlabel('Country',fontsize = 20)\n\nplt.show()\n# y in log scale change the name of y axis",
"_____no_output_____"
]
],
[
[
"# Storage Map",
"_____no_output_____"
]
],
[
[
"pandas_bokeh.output_notebook()\n#pandas_bokeh.output_file(\"Interactive storage unit.html\")\ncomplete_map = complete_map.sort_values('conservative estimate Mt',ascending= True)\ncomplete_map.plot_bokeh(\n figsize=(900, 600),\n simplify_shapes=5000,\n dropdown=capacity_list,\n colormap=\"Viridis\",\n hovertool_columns=capacity_list+['ID'],\n colormap_range = (0,100)\n )",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
c52b4c2fe99c92b1dad5575a752e6bf0c1b06318
| 12,595 |
ipynb
|
Jupyter Notebook
|
Lab07_107065518.ipynb
|
jackraken/DeepLearningLabs
|
8bb8b62429b002d45872c25d5d20b66ac9a20f0c
|
[
"MIT"
] | 1 |
2018-10-25T03:37:01.000Z
|
2018-10-25T03:37:01.000Z
|
Lab07_107065518.ipynb
|
jackraken/DeepLearningLabs
|
8bb8b62429b002d45872c25d5d20b66ac9a20f0c
|
[
"MIT"
] | null | null | null |
Lab07_107065518.ipynb
|
jackraken/DeepLearningLabs
|
8bb8b62429b002d45872c25d5d20b66ac9a20f0c
|
[
"MIT"
] | null | null | null | 34.69697 | 125 | 0.374831 |
[
[
[
"import pandas as pd\nfrom sklearn.model_selection import train_test_split\ndf = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/'\n 'mushroom/agaricus-lepiota.data', header=None, engine='python')\ncolumn_name = ['classes','cap-shape', 'cap-surface','cap-color','bruises?','odor',\n 'gill-attachment','gill-spacing','gill-size','gill-color',\n 'stalk-shape','stalk-root','stalk-surface-above-ring',\n 'stalk-surface-below-ring','stalk-color-above-ring',\n 'stalk-color-below-ring','veil-type','veil-color','ring-number',\n 'ring-type','spore-print-color','population','habitat']\ndf.columns = column_name\ndf.head()",
"_____no_output_____"
],
[
"import numpy as np\nfrom sklearn.preprocessing import LabelEncoder\n\n# Todo\n# deal missing value denoted by '?'\n# encode label first\nlabel_le = LabelEncoder()\ndf['classes'] = label_le.fit_transform(df['classes'].values)\n\ncatego_le = LabelEncoder()\n\nnum_values = []\nfor i in column_name[1:]:\n df[i] = catego_le.fit_transform(df[i].values)\n classes_list = catego_le.classes_.tolist()\n \n # store the total number of values\n num_values.append(len(classes_list))\n \n # replace '?' with 'NaN'\n if '?' in classes_list:\n idx = classes_list.index('?')\n df[i] = df[i].replace(idx, np.nan)\n",
"_____no_output_____"
],
[
"from sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score\n\nX = df.drop('classes', axis=1).values\ny = df['classes'].values\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=0)\n\n\ncatego_features_idx = []\nfor fea in column_name[1:]:\n catego_features_idx.append(df.columns.tolist().index(fea))\n\n\n# define pipeline with an arbitrary number of transformer in a tuple array\npipe_knn = Pipeline([('imr', Imputer(missing_values='NaN', strategy='most_frequent', axis=0)),\n ('ohe', OneHotEncoder(n_values=num_values, sparse=False)),\n ('scl', StandardScaler()),\n ('clf', KNeighborsClassifier(n_neighbors=10, p=2, metric='minkowski'))])\n\npipe_svm = Pipeline([('imr', Imputer(missing_values='NaN', strategy='most_frequent', axis=0)),\n ('ohe', OneHotEncoder(n_values=num_values, sparse=False)),\n ('scl', StandardScaler()),\n ('clf', SVC(kernel='rbf', random_state=0, gamma=0.001, C=100.0))])\n\n\n# use the pipeline model to train\npipe_knn.fit(X_train, y_train)\ny_pred = pipe_knn.predict(X_test)\nprint('[KNN]')\nprint('Misclassified samples: %d' % (y_test != y_pred).sum())\nprint('Accuracy: %.4f' % accuracy_score(y_test, y_pred))\n\npipe_svm.fit(X_train, y_train)\ny_pred = pipe_svm.predict(X_test)\nprint('\\n[SVC]')\nprint('Misclassified samples: %d' % (y_test != y_pred).sum())\nprint('Accuracy: %.4f' % accuracy_score(y_test, y_pred))",
"[KNN]\nMisclassified samples: 0\nAccuracy: 1.0000\n\n[SVC]\nMisclassified samples: 0\nAccuracy: 1.0000\n"
]
],
[
[
"### Report ###\n\nIn this homework, I tried two different models using the sklearn pipeline. \nFor the preprocessing part, I used imputer to impute the missing data, and one-hot encoding for category features. \nAnd then I use KNN classifier for the first model, SVC for the second model. \nThey both perform quite well, so I thought there's no need to do additional feature selection. ",
"_____no_output_____"
]
]
] |
[
"code",
"markdown"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
c52b4dee9052b67abb88b3420b7d96e6c90e1684
| 8,702 |
ipynb
|
Jupyter Notebook
|
chapters/00_inductive-python/02_python-as-a-calculator.ipynb
|
lperezmo/pythonic-science
|
9fa448d0c981c92cae2be2bc50cf1dbe92e063df
|
[
"Unlicense"
] | 5 |
2017-04-03T20:30:54.000Z
|
2019-04-01T16:46:27.000Z
|
chapters/00_inductive-python/02_python-as-a-calculator.ipynb
|
lperezmo/pythonic-science
|
9fa448d0c981c92cae2be2bc50cf1dbe92e063df
|
[
"Unlicense"
] | null | null | null |
chapters/00_inductive-python/02_python-as-a-calculator.ipynb
|
lperezmo/pythonic-science
|
9fa448d0c981c92cae2be2bc50cf1dbe92e063df
|
[
"Unlicense"
] | 11 |
2016-12-15T21:30:59.000Z
|
2020-04-02T01:07:47.000Z
| 18.397463 | 181 | 0.493909 |
[
[
[
"# python behaves like a calculator",
"_____no_output_____"
]
],
[
[
"8*8",
"_____no_output_____"
]
],
[
[
"#### Predict the following output",
"_____no_output_____"
]
],
[
[
"print((5+5)/25)\nprint(5 + 5/25)",
"_____no_output_____"
]
],
[
[
"python does order of operations, etc. just like a calculator",
"_____no_output_____"
],
[
"#### Most of the notation is intuitive. \n\nWrite out the following in a cell. What value to you get?\n\n$$ (5 \\times 5 + \\frac{4}{2} - 8)^{2}$$",
"_____no_output_____"
],
[
"Most notation is intuitive. Only weirdo is that \"raise x to the power of y\" is given by `x**y` rather than `x^y`. ",
"_____no_output_____"
],
[
"What is the value of $sin(2)$?",
"_____no_output_____"
]
],
[
[
"sin(2)",
"_____no_output_____"
]
],
[
[
"Python gives very informative errors (if you know how to read them). What do you think `NameError` means?",
"_____no_output_____"
],
[
"`NameError` means that the current python \"interpreter\" does know what `sin` means",
"_____no_output_____"
],
[
"To get access to more math, you need \n\n`import math`",
"_____no_output_____"
]
],
[
[
"import math\n\nmath.sin(2)",
"_____no_output_____"
]
],
[
[
"Once you have math imported, you have access to a whole slew of math functions.",
"_____no_output_____"
]
],
[
[
"print(math.factorial(10))\nprint(math.sqrt(10))\nprint(math.sin(math.pi))\nprint(math.cos(10))\nprint(math.exp(10))\nprint(math.log(10))\nprint(math.log10(10))\n",
"_____no_output_____"
]
],
[
[
"If you want to see what the `math` module has to offer, type `math.` and then hit `TAB`. \n\n<img src=\"python-as-a-calculator/math-dropdown.png\" />",
"_____no_output_____"
]
],
[
[
"# for example\nmath.",
"_____no_output_____"
]
],
[
[
"You can also get information about a function by typing `help(FUNCTION)`. If you run the next cell, it will give you information about the `math.factorial` function.",
"_____no_output_____"
]
],
[
[
"help(math.factorial)",
"_____no_output_____"
]
],
[
[
"### Variables\n\n\n#### Predict what will happen.",
"_____no_output_____"
]
],
[
[
"x = 5\nprint(x)",
"_____no_output_____"
],
[
"x = 5\nx = x + 2\nprint(x)",
"_____no_output_____"
]
],
[
[
"In python \"`=`\" means \"assign the stuff on the right into the stuff on the left.\"",
"_____no_output_____"
],
[
"#### Predict what will happen.",
"_____no_output_____"
]
],
[
[
"x = 7 \n5*5 = x\nprint(x)",
"_____no_output_____"
]
],
[
[
"What happened? ",
"_____no_output_____"
],
[
"`SyntaxError` is something python can't interpret.\n\nIn python, `=` means store the output of the stuff on the **right** in the variable on the **left**. It's nonsensical to try to store anything in `5*5`, so the command fails.",
"_____no_output_____"
],
[
"#### Predict what will happen. ",
"_____no_output_____"
]
],
[
[
"this_is_a_variable = 5\nanother_variable = 2\nprint(this_is_a_variable*another_variable)",
"_____no_output_____"
]
],
[
[
"Variables can (and **should**) have descriptive names. ",
"_____no_output_____"
],
[
"#### Implement\n\nStirling's approximation says that you can approximate $n!$ using some nifty log tricks. \n\n$$ ln(n!) \\approx nln(n) - n + 1 $$\n\nWrite code to check this approximation for **any** value of $n$.",
"_____no_output_____"
],
[
"## Summary\n\n+ Python behaves like a calculator (order of operations, etc.). \n+ You can assign the results of calculations to variables using \"`=`\"\n+ Python does the stuff on the right first, then assigns it to the name on the left.\n+ You can access more math by `import math`",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
c52b52f7eb52c4646aadda352527aa4debb87144
| 92,694 |
ipynb
|
Jupyter Notebook
|
Unsupervise.ipynb
|
JoyceHsieh/Unsupervised-Learning
|
168310ff2d31f623e9bf161949a81c686524410a
|
[
"ADSL"
] | null | null | null |
Unsupervise.ipynb
|
JoyceHsieh/Unsupervised-Learning
|
168310ff2d31f623e9bf161949a81c686524410a
|
[
"ADSL"
] | null | null | null |
Unsupervise.ipynb
|
JoyceHsieh/Unsupervised-Learning
|
168310ff2d31f623e9bf161949a81c686524410a
|
[
"ADSL"
] | null | null | null | 50.34981 | 16,264 | 0.539107 |
[
[
[
"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom pathlib import Path\nfrom sklearn.preprocessing import StandardScaler",
"_____no_output_____"
],
[
"# Loading data\nfile_path = Path(\"crypto_data.csv\")\ndf = pd.read_csv(file_path)\ndf",
"_____no_output_____"
],
[
"df.dtypes",
"_____no_output_____"
],
[
"#filter for currencies that are currently being traded\ndf_drop_trading=df.loc[(df['IsTrading']==True)]\ndf_drop_trading",
"_____no_output_____"
],
[
"df_drop_trading.count()",
"_____no_output_____"
],
[
"#drop the IsTrading column from the dataframe.\ndf_drop_trading=df_drop_trading.drop(\"IsTrading\", axis=1)\ndf_drop_trading",
"_____no_output_____"
],
[
"# Count of rows with null values\ndf_drop_trading.isnull().sum()",
"_____no_output_____"
],
[
"# Delete rows with null values\ndf_trading = df_drop_trading.dropna()",
"_____no_output_____"
],
[
"#Check data again, we drop the row which column value is null\ndf_trading.count()",
"_____no_output_____"
],
[
"#Filter for cryptocurrencies that have been mined. That is, the total coins mined should be greater than zero.\ndf_filter=df_trading.loc[(df_trading['TotalCoinsMined']>0)]\ndf_filter #the row drop from 685 to 532",
"_____no_output_____"
],
[
"#Drop the coin names do not contribute to the analysis of the data\ndf_clean=df_filter.drop([\"Unnamed: 0\", \"CoinName\"], axis=1)\ndf_clean",
"_____no_output_____"
],
[
"#convert the remaining features with text values, Algorithm and ProofType, into numerical data.\nX=pd.get_dummies(df_clean)\nX #Feature colmuns increase from 4 to 377",
"_____no_output_____"
],
[
"#Standardize dataset\nfrom sklearn.preprocessing import StandardScaler\nX_scaled = StandardScaler().fit_transform(X)\nX_scaled",
"_____no_output_____"
],
[
"# Applying PCA to reduce dimensions from 377 to 100\n# Initialize PCA model\nfrom sklearn.decomposition import PCA\npca = PCA(n_components=0.99)\n\n# Get principal components for the X_scaled data.\nX_pca = pca.fit_transform(X_scaled)\nX_pca",
"_____no_output_____"
],
[
"# Fetch the explained variance\npca.explained_variance_ratio_",
"_____no_output_____"
],
[
"#reduce the dataset dimensions with t-SNE and visually inspect the results. \nfrom sklearn.manifold import TSNE\n# Initialize t-SNE model\ntsne = TSNE(learning_rate=35)\n# Reduce dimensions\ntsne_features = tsne.fit_transform(X_pca)",
"_____no_output_____"
],
[
"# The dataset has 2 columns\ntsne_features.shape",
"_____no_output_____"
],
[
"tsne_features",
"_____no_output_____"
],
[
"# Prepare to plot the dataset\n# The first column of transformed features\nX['x'] = tsne_features[:,0]\n\n# The second column of transformed features\nX['y'] = tsne_features[:,1]",
"_____no_output_____"
],
[
"# Visualize the clusters\nplt.scatter(X['x'], X['y'])\nplt.show()",
"_____no_output_____"
],
[
"#Create an elbow plot to identify the best number of clusters. \nfrom sklearn.cluster import KMeans\ninertia = []\n# Same as k = list(range(1, 11))\nk = [1,2,3,4,5,6,7,8,9,10]\n\n\n# Looking for the best k\nfor i in k:\n km = KMeans(n_clusters=i, random_state=0)\n km.fit(X_pca)\n inertia.append(km.inertia_)\n\n# Define a DataFrame to plot the Elbow Curve using hvPlot\nelbow_data = {\"k\": k, \"inertia\": inertia}\ndf_elbow = pd.DataFrame(elbow_data)\n\nplt.plot(df_elbow['k'], df_elbow['inertia'])\nplt.xticks(range(1,11))\nplt.xlabel('Number of clusters')\nplt.ylabel('Inertia')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## By using t-SNE\nI got 2 cluster which it's really interesting!! Like moon outside and sun inside the moon, which may mean there has lots of noise in the dataset.\n \nI run t-SNE several times, I got different chart, it's really interesting but the chart are similar like one cluster in the center, and other point around the center.\n \n## KMeans\nAfter that using KMeans by fit the pca dataset, we got elbow plot but hrad to siad that which point is the best n_clusters? Maybe we should run the model more than 10 which mean range (1,20), and see that if we can get better result.\n",
"_____no_output_____"
]
]
] |
[
"code",
"markdown"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
c52b5a18f3cbfd7c2906b47ca4d2a29c226d0c5e
| 2,424 |
ipynb
|
Jupyter Notebook
|
Lab1.ipynb
|
MichaelColcol/CPEN-21A-CPE-1-1
|
27d98907928c0e060f1fc011236c4b6050828b12
|
[
"Apache-2.0"
] | null | null | null |
Lab1.ipynb
|
MichaelColcol/CPEN-21A-CPE-1-1
|
27d98907928c0e060f1fc011236c4b6050828b12
|
[
"Apache-2.0"
] | null | null | null |
Lab1.ipynb
|
MichaelColcol/CPEN-21A-CPE-1-1
|
27d98907928c0e060f1fc011236c4b6050828b12
|
[
"Apache-2.0"
] | null | null | null | 24.989691 | 231 | 0.457921 |
[
[
[
"<a href=\"https://colab.research.google.com/github/MichaelColcol/CPEN-21A-CPE-1-1/blob/main/Lab1.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"Labaratory 1",
"_____no_output_____"
]
],
[
[
"a = \"Welcome to \"\nb = \"Python Programming\"\nc = a+b\nprint(c)",
"Welcome to Python Programming\n"
],
[
"Name = \"Michael S.Colcol\"\nAge = \"18 yrs old\"\nAddress = \"Phase 2 Block 7 Lot 5,Mary Cris Complex,Pasong Camachille II, General Trias, Cavite\"\n\nprint(\"I am \"\"\"+ Name)\nprint(Age)\nprint(\"from \" + Address)",
"I am Michael S.Colcol\n18 yrs old\nfrom Phase 2 Block 7 Lot 5,Mary Cris Complex,Pasong Camachille II, General Trias, Cavite\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
c52b60537b68377b30e5a7901ae38eace4746e4a
| 4,983 |
ipynb
|
Jupyter Notebook
|
openmdao/docs/openmdao_book/features/core_features/working_with_derivatives/approx_flags.ipynb
|
tong0711/OpenMDAO
|
d8496a0e606df405b2472f1c96b3c543eacaca5a
|
[
"Apache-2.0"
] | 451 |
2015-07-20T11:52:35.000Z
|
2022-03-28T08:04:56.000Z
|
openmdao/docs/openmdao_book/features/core_features/working_with_derivatives/approx_flags.ipynb
|
tong0711/OpenMDAO
|
d8496a0e606df405b2472f1c96b3c543eacaca5a
|
[
"Apache-2.0"
] | 1,096 |
2015-07-21T03:08:26.000Z
|
2022-03-31T11:59:17.000Z
|
openmdao/docs/openmdao_book/features/core_features/working_with_derivatives/approx_flags.ipynb
|
tong0711/OpenMDAO
|
d8496a0e606df405b2472f1c96b3c543eacaca5a
|
[
"Apache-2.0"
] | 301 |
2015-07-16T20:02:11.000Z
|
2022-03-28T08:04:39.000Z
| 25.423469 | 311 | 0.527795 |
[
[
[
"try:\n from openmdao.utils.notebook_utils import notebook_mode\nexcept ImportError:\n !python -m pip install openmdao[notebooks]",
"_____no_output_____"
]
],
[
[
"# How to know if a System is under FD or CS\n\nAll Systems (Components and Groups) have two flags that indicate whether the System is running under finite difference or complex step. The `under_finite_difference` flag is True if the System is being finite differenced and the `under_complex_step` flag is True if the System is being complex stepped.\n",
"_____no_output_____"
],
[
"## Usage\n\n\nFirst we'll show how to detect when a component is being finite differenced:",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\nimport openmdao.api as om\n\n\nclass MyFDPartialComp(om.ExplicitComponent):\n def setup(self):\n self.num_fd_computes = 0\n\n self.add_input('x')\n self.add_output('y')\n\n def setup_partials(self):\n self.declare_partials('y', 'x', method='fd')\n\n def compute(self, inputs, outputs):\n outputs['y'] = 1.5 * inputs['x']\n if self.under_finite_difference:\n self.num_fd_computes += 1\n print(f\"{self.pathname} is being finite differenced!\")\n\n \np = om.Problem()\np.model.add_subsystem('comp', MyFDPartialComp())\n\np.setup()\np.run_model()\n\n# there shouldn't be any finite difference computes yet\nprint(\"Num fd calls = \", p.model.comp.num_fd_computes)\n\ntotals = p.compute_totals(['comp.y'], ['comp.x'])\n\n# since we're doing forward difference, there should be 1 call to compute under fd\nprint(\"Num fd calls =\", p.model.comp.num_fd_computes)\n",
"_____no_output_____"
],
[
"assert p.model.comp.num_fd_computes == 1\n",
"_____no_output_____"
]
],
[
[
"Now we'll do the same thing for a complex stepped component:",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\nimport openmdao.api as om\n\nclass MyCSPartialComp(om.ExplicitComponent):\n def setup(self):\n self.num_cs_computes = 0\n\n self.add_input('x')\n self.add_output('y')\n\n def setup_partials(self):\n self.declare_partials('y', 'x', method='cs')\n\n def compute(self, inputs, outputs):\n outputs['y'] = 1.5 * inputs['x']\n if self.under_complex_step:\n self.num_cs_computes += 1\n print(f\"{self.pathname} is being complex stepped!\")\n\n \np = om.Problem()\np.model.add_subsystem('comp', MyCSPartialComp())\n\np.setup()\np.run_model()\n\n# there shouldn't be any complex step computes yet\nprint(\"Num cs calls =\", p.model.comp.num_cs_computes)\n\n\ntotals = p.compute_totals(['comp.y'], ['comp.x'])\n\n# there should be 1 call to compute under cs\nprint(\"Num cs calls =\", p.model.comp.num_cs_computes)\n",
"_____no_output_____"
],
[
"assert p.model.comp.num_cs_computes == 1\n",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
c52b67ee380ea012d9aea9cd9a5be8436990e203
| 968,406 |
ipynb
|
Jupyter Notebook
|
Correlations/Correlation-BC-All-Traffic.ipynb
|
varsha2509/hyperlocal-aq-prediction
|
3579974cc6c36ab82b216b37d5d573eb7bde1ab4
|
[
"MIT"
] | 11 |
2021-01-30T15:02:30.000Z
|
2021-12-06T04:50:07.000Z
|
Correlations/.ipynb_checkpoints/Correlation-BC-All-Traffic-checkpoint.ipynb
|
varsha2509/hyperlocal-aq-prediction
|
3579974cc6c36ab82b216b37d5d573eb7bde1ab4
|
[
"MIT"
] | null | null | null |
Correlations/.ipynb_checkpoints/Correlation-BC-All-Traffic-checkpoint.ipynb
|
varsha2509/hyperlocal-aq-prediction
|
3579974cc6c36ab82b216b37d5d573eb7bde1ab4
|
[
"MIT"
] | 4 |
2021-02-02T17:44:28.000Z
|
2022-02-09T01:31:22.000Z
| 515.383715 | 905,292 | 0.929279 |
[
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport descartes\nimport geopandas as gpd\nfrom shapely.geometry import Point, Polygon\nfrom shapely.ops import nearest_points\n\nimport seaborn as sns\n\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nimport math\n\nimport time\n\nfrom matplotlib import cm\n\nimport matplotlib.lines as mlines\n\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"### AIR POLLUTION MONITORING DATA FROM EDF",
"_____no_output_____"
]
],
[
[
" df = pd.read_csv('EDF_Data.csv', header = 1)\n df['TimePeriod'] = 'Jun2015-May2016'\n df.tail()",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"geometry = [Point(xy) for xy in zip(df['Longitude'], df['Latitude'])]",
"_____no_output_____"
]
],
[
[
"### Split the dataset into BC and NO2 since we are interested only in those two pollutants",
"_____no_output_____"
]
],
[
[
"BC_df = df[['Longitude', 'Latitude', 'BC Value', 'TimePeriod']]",
"_____no_output_____"
],
[
"NO2_df = df[['Longitude', 'Latitude', 'NO2 Value', 'TimePeriod']]",
"_____no_output_____"
],
[
"crs = {'init': 'epsg:4326'}\ngeo_df = gpd.GeoDataFrame(df, crs = crs, geometry = geometry)",
"_____no_output_____"
]
],
[
[
"## TRAFFIC DATA",
"_____no_output_____"
]
],
[
[
"### Load Annual Average Daily Traffic (AADT) file from Caltrans\ntraffic = pd.read_csv('Data/Traffic_Oakland_AADT.csv', header = 0)",
"_____no_output_____"
],
[
"# Drop columns that are unneccessary and choose only Ahead_AADT, along with N/E latitude and longitude\ntraffic.drop(columns = ['OBJECTID','District','Route','County', 'Postmile', \n 'Back_pk_h', 'Back_pk_m', 'Ahead_pk_h', 'Ahead_pk_m','Back_AADT','Lat_S_or_W', 'Lon_S_or_W'], inplace=True)",
"_____no_output_____"
],
[
"traffic.rename(columns={\"Ahead_AADT\":\"AADT\", \"Lat_N_or_E\":\"Latitude\", \"Lon_N_or_E\":\"Longitude\", \"Descriptn\":\"Description\"}, inplace=True)",
"_____no_output_____"
],
[
"traffic.head()",
"_____no_output_____"
],
[
"# Taking a closer look at the traffic data, there are some intersections where the AADT is zero, or the latitude and longitude are zero. We want to drop these rows\ntraffic = traffic[(traffic['Longitude']<-1) & (traffic['AADT']>1)]",
"_____no_output_____"
],
[
"traffic.shape",
"_____no_output_____"
]
],
[
[
"## Converting facility and traffic dataframe into a geopandas dataframe for plotting",
"_____no_output_____"
]
],
[
[
"# Create a geopandas dataframe with traffic data\ngeometry_traffic = [Point(xy) for xy in zip(traffic['Longitude'], traffic['Latitude'])]\ngeo_df_traffic = gpd.GeoDataFrame(traffic, crs = crs, geometry = geometry_traffic)",
"_____no_output_____"
],
[
"# Create a list of x and y coordinates for the Black Carbon concentration data using geopandas\ngeometry_df_BC = [Point(xy) for xy in zip(BC_df['Longitude'], BC_df['Latitude'])]\ngeo_df_BC = gpd.GeoDataFrame(BC_df, crs = crs, geometry = geometry_df_BC)",
"_____no_output_____"
]
],
[
[
"### Calculate distance between point of measurement and each facility and add it to the _dist column",
"_____no_output_____"
]
],
[
[
"### Defining a function to calculate the distance between two GPS coordinates (latitude and longitude)\ndef distance(origin, destination):\n lat1, lon1 = origin\n lat2, lon2 = destination\n radius = 6371 # km\n\n dlat = math.radians(lat2-lat1)\n dlon = math.radians(lon2-lon1)\n a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \\\n * math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n d = radius * c\n\n return d",
"_____no_output_____"
]
],
[
[
"#### Loading Traffic Data",
"_____no_output_____"
]
],
[
[
"traffic.head()",
"_____no_output_____"
],
[
"## Assign an intersection number to each traffic intersection instead of using description\ntraffic.reset_index(inplace=True)",
"_____no_output_____"
],
[
"#Rename index as Intersection\ntraffic.rename(columns={\"index\":\"Intersection\"}, inplace=True)",
"_____no_output_____"
],
[
"#Drop the description column\ntraffic.drop(columns=['Description'], inplace=True)",
"_____no_output_____"
],
[
"### Add an empty column for distance\ntraffic['dist'] = 0\ntraffic['dist'].astype(float)",
"_____no_output_____"
],
[
"traffic_lat = traffic[['Intersection', 'Latitude']].T\ntraffic_long = traffic[['Intersection', 'Longitude']].T\ntraffic_AADT = traffic[['Intersection', 'AADT']].T\ntraffic_dist = traffic[['Intersection', 'dist']].T\ntraffic_geo = traffic[['Intersection', 'geometry']].T\n",
"_____no_output_____"
],
[
"traffic_lat.head()",
"_____no_output_____"
],
[
"## Make the header as the first row in each transposed dataframe\ntraffic_lat = traffic_lat.rename(columns=traffic_lat.iloc[0].astype(int)).drop(traffic_lat.index[0])\ntraffic_long = traffic_long.rename(columns=traffic_long.iloc[0].astype(int)).drop(traffic_long.index[0])\ntraffic_AADT = traffic_AADT.rename(columns=traffic_AADT.iloc[0].astype(int)).drop(traffic_AADT.index[0])\ntraffic_dist = traffic_dist.rename(columns=traffic_dist.iloc[0].astype(int)).drop(traffic_dist.index[0])\ntraffic_geo = traffic_geo.rename(columns=traffic_geo.iloc[0].astype(int)).drop(traffic_geo.index[0])",
"_____no_output_____"
],
[
"## Add suffix to column header based on the dataframe type\ntraffic_lat.columns = [str(col) + '_latitude' for col in traffic_lat.columns]\ntraffic_long.columns = [str(col) + '_longitude' for col in traffic_long.columns]\ntraffic_AADT.columns = [str(col) + '_AADT' for col in traffic_AADT.columns]\ntraffic_dist.columns = [str(col) + '_traf_dist' for col in traffic_dist.columns]\ntraffic_geo.columns = [str(col) + '_geo' for col in traffic_geo.columns]",
"_____no_output_____"
],
[
"## Remove index for each dataframe\ntraffic_lat.reset_index(drop=True, inplace=True)\ntraffic_long.reset_index(drop=True, inplace=True)\ntraffic_AADT.reset_index(drop=True, inplace=True)\ntraffic_dist.reset_index(drop=True, inplace=True)\ntraffic_geo.reset_index(drop=True, inplace=True)",
"_____no_output_____"
],
[
"traffic_combined = traffic_lat.join(traffic_long).join(traffic_AADT).join(traffic_dist).join(traffic_geo)\n",
"_____no_output_____"
],
[
"traffic_combined",
"_____no_output_____"
],
[
"traffic_combined = traffic_combined.reindex(columns=sorted(traffic_combined.columns))",
"_____no_output_____"
],
[
"#Create a datafram where each row contains emissions of PM10 and PM2.5 for each facility\ntraffic_combined = traffic_combined.loc[traffic_combined.index.repeat(21488)].reset_index(drop=True)",
"_____no_output_____"
],
[
"BC_Traffic = BC_df.join(traffic_combined)",
"_____no_output_____"
],
[
"BC_Traffic.head()",
"_____no_output_____"
],
[
"# Convert distance column to float type\nfor idx, col in enumerate(BC_Traffic.columns):\n if \"_traf_dist\" in col:\n BC_Traffic[col] = pd.to_numeric(BC_Traffic[col], downcast=\"float\")\n",
"_____no_output_____"
]
],
[
[
"#### Calculate distance between each traffic intersection and point of measurement and store this in the _dist column",
"_____no_output_____"
]
],
[
[
"for index, row in BC_Traffic.iterrows():\n for idx, col in enumerate(BC_Traffic.columns):\n if \"_traf_dist\" in col: \n BC_Traffic.at[index,col] = float(distance((row.iloc[1], row.iloc[0]), (row.iloc[idx-2], row.iloc[idx-1])))*0.621\n #BC_Facility_Traffic.at[index,col] = float(row.iloc[idx])\n\n",
"_____no_output_____"
],
[
"BC_Traffic.head()",
"_____no_output_____"
],
[
"#### Write this to a dataframe\nBC_Traffic.to_csv(\"Data/BC_Traffic_ALL.csv\")",
"_____no_output_____"
]
],
[
[
"#### Similar to the facility dataframe, drop latitude and longitude since its captured in the distance column. Also drop AADT",
"_____no_output_____"
]
],
[
[
"BC_Traffic.drop(list(BC_Traffic.filter(regex = '_latitude')), axis = 1, inplace = True)\nBC_Traffic.drop(list(BC_Traffic.filter(regex = '_longitude')), axis = 1, inplace = True)\nBC_Traffic.drop(list(BC_Traffic.filter(regex = '_AADT')), axis = 1, inplace = True)\nBC_Traffic.drop(list(BC_Traffic.filter(regex = '_geo')), axis = 1, inplace = True)\nBC_Traffic.drop(list(BC_Traffic.filter(regex = 'Longitude')), axis = 1, inplace = True)\nBC_Traffic.drop(list(BC_Traffic.filter(regex = 'Latitude')), axis = 1, inplace = True)\nBC_Traffic.drop(list(BC_Traffic.filter(regex = 'TimePeriod')), axis = 1, inplace = True)",
"_____no_output_____"
],
[
"BC_Traffic.drop(list(BC_Traffic.filter(regex = 'geometry')), axis = 1, inplace = True)",
"_____no_output_____"
],
[
"BC_Traffic.head()",
"_____no_output_____"
],
[
"corr = BC_Traffic.corr()",
"_____no_output_____"
],
[
"arr_corr = corr.as_matrix()",
"/home/shridhar/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:1: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n \"\"\"Entry point for launching an IPython kernel.\n"
],
[
"arr_corr[0]",
"_____no_output_____"
]
],
[
[
"#### Plotting correlation between all features as a heatmap - but this visualization is not easy to follow....\n\nfig, ax = plt.subplots(figsize=(100, 100))\nax = sns.heatmap(\n corr, \n vmin=-1, vmax=1, center=0,\n cmap=sns.diverging_palette(20, 220, n=500),\n square=False\n)\nax.set_xticklabels(\n ax.get_xticklabels(),\n rotation=45,\n horizontalalignment='right'\n);\nplt.show()",
"_____no_output_____"
]
],
[
[
"print(plt.get_backend())\n\n# close any existing plots\nplt.close(\"all\")\n\n# mask out the top triangle\narr_corr[np.triu_indices_from(arr_corr)] = np.nan\n\nfig, ax = plt.subplots(figsize=(50, 50))\n\nhm = sns.heatmap(arr_corr, cbar=True, vmin = -1, vmax = 1, center = 0,\n fmt='.2f', annot_kws={'size': 8}, annot=True, \n square=False, cmap = 'coolwarm')\n#cmap=plt.cm.Blues\n\nticks = np.arange(corr.shape[0]) + 0.5\nax.set_xticks(ticks)\nax.set_xticklabels(corr.columns, rotation=90, fontsize=8)\nax.set_yticks(ticks)\nax.set_yticklabels(corr.index, rotation=360, fontsize=8)\n\nax.set_title('correlation matrix')\nplt.tight_layout()\n#plt.savefig(\"corr_matrix_incl_anno_double.png\", dpi=300)",
"module://ipykernel.pylab.backend_inline\n"
]
],
[
[
"#### Once again there doesn't seem to be much correlation between BC concentrations and the closest major traffic intersection. Next option is to identify all the traffic intersections in the area.\n",
"_____no_output_____"
],
[
"\nimport chart_studio.plotly as py\nimport plotly.graph_objs as go",
"_____no_output_____"
],
[
"import chart_studio\nchart_studio.tools.set_credentials_file(username='varsha2509', api_key='QLfBsWWLPKoLjY5hW0Fu')\n",
"_____no_output_____"
],
[
"heatmap = go.Heatmap(z=arr_corr, x=BC_Facility_Traffic_Met.columns, y=BC_Facility_Traffic_Met.index)\ndata = [heatmap]\npy.iplot(data, filename='basic-heatmap')\n",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
c52b6e136b2e6b2214138461642a49d85c79a799
| 14,883 |
ipynb
|
Jupyter Notebook
|
Datasets/Vectors/world_database_on_protected_areas.ipynb
|
dmendelo/earthengine-py-notebooks
|
515567fa2702b436daf449fff02f5c690003cf94
|
[
"MIT"
] | 2 |
2020-02-05T02:36:18.000Z
|
2021-03-23T11:02:39.000Z
|
Datasets/Vectors/world_database_on_protected_areas.ipynb
|
Fernigithub/earthengine-py-notebooks
|
32689dc5da4a86e46ea30d8b22241866c1f7cf61
|
[
"MIT"
] | null | null | null |
Datasets/Vectors/world_database_on_protected_areas.ipynb
|
Fernigithub/earthengine-py-notebooks
|
32689dc5da4a86e46ea30d8b22241866c1f7cf61
|
[
"MIT"
] | 3 |
2021-01-06T17:33:08.000Z
|
2022-02-18T02:14:18.000Z
| 82.226519 | 9,088 | 0.839817 |
[
[
[
"<table class=\"ee-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://github.com/giswqs/earthengine-py-notebooks/tree/master/Datasets/Vectors/world_database_on_protected_areas.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /> View source on GitHub</a></td>\n <td><a target=\"_blank\" href=\"https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Vectors/world_database_on_protected_areas.ipynb\"><img width=26px src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png\" />Notebook Viewer</a></td>\n <td><a target=\"_blank\" href=\"https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Datasets/Vectors/world_database_on_protected_areas.ipynb\"><img width=58px src=\"https://mybinder.org/static/images/logo_social.png\" />Run in binder</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Vectors/world_database_on_protected_areas.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /> Run in Google Colab</a></td>\n</table>",
"_____no_output_____"
],
[
"## Install Earth Engine API\nInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.\nThe magic command `%%capture` can be used to hide output from a specific cell. Uncomment these lines if you are running this notebook for the first time.",
"_____no_output_____"
]
],
[
[
"# %%capture\n# !pip install earthengine-api\n# !pip install geehydro",
"_____no_output_____"
]
],
[
[
"Import libraries",
"_____no_output_____"
]
],
[
[
"import ee\nimport folium\nimport geehydro",
"_____no_output_____"
]
],
[
[
"Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()` \nif you are running this notebook for the first time or if you are getting an authentication error. ",
"_____no_output_____"
]
],
[
[
"# ee.Authenticate()\nee.Initialize()",
"_____no_output_____"
]
],
[
[
"## Create an interactive map \nThis step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. \nThe optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.",
"_____no_output_____"
]
],
[
[
"Map = folium.Map(location=[40, -100], zoom_start=4)\nMap.setOptions('HYBRID')",
"_____no_output_____"
]
],
[
[
"## Add Earth Engine Python script ",
"_____no_output_____"
]
],
[
[
"dataset = ee.FeatureCollection('WCMC/WDPA/current/polygons')\nvisParams = {\n 'palette': ['2ed033', '5aff05', '67b9ff', '5844ff', '0a7618', '2c05ff'],\n 'min': 0.0,\n 'max': 1550000.0,\n 'opacity': 0.8,\n}\nimage = ee.Image().float().paint(dataset, 'REP_AREA')\nMap.setCenter(41.104, -17.724, 6)\nMap.addLayer(image, visParams, 'WCMC/WDPA/current/polygons')\n# Map.addLayer(dataset, {}, 'for Inspector', False)\n\n\ndataset = ee.FeatureCollection('WCMC/WDPA/current/points')\nstyleParams = {\n 'color': '#4285F4',\n 'width': 1,\n}\nprotectedAreaPoints = dataset.style(**styleParams)\n# Map.setCenter(110.57, 0.88, 4)\nMap.addLayer(protectedAreaPoints, {}, 'WCMC/WDPA/current/points')\n",
"_____no_output_____"
]
],
[
[
"## Display Earth Engine data layers ",
"_____no_output_____"
]
],
[
[
"Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)\nMap",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
c52b84dc344db657cdf1b3b4ef7220d9e49a257c
| 148,115 |
ipynb
|
Jupyter Notebook
|
archive/P1_original.ipynb
|
matttpj/CarND-LaneLines-P1
|
108b23febde207c1104a4a990a8b82aa1c341c3d
|
[
"MIT"
] | null | null | null |
archive/P1_original.ipynb
|
matttpj/CarND-LaneLines-P1
|
108b23febde207c1104a4a990a8b82aa1c341c3d
|
[
"MIT"
] | null | null | null |
archive/P1_original.ipynb
|
matttpj/CarND-LaneLines-P1
|
108b23febde207c1104a4a990a8b82aa1c341c3d
|
[
"MIT"
] | null | null | null | 253.188034 | 114,836 | 0.895304 |
[
[
[
"# Self-Driving Car Engineer Nanodegree\n\n\n## Project: **Finding Lane Lines on the Road** \n***\nIn this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip \"raw-lines-example.mp4\" (also contained in this repository) to see what the output should look like after using the helper functions below. \n\nOnce you have a result that looks roughly like \"raw-lines-example.mp4\", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.\n\nIn addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.\n\n---\nLet's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the \"play\" button above) to display the image.\n\n**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the \"Kernel\" menu above and selecting \"Restart & Clear Output\".**\n\n---",
"_____no_output_____"
],
[
"**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**\n\n---\n\n<figure>\n <img src=\"examples/line-segments-example.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your output should look something like this (above) after detecting line segments using the helper functions below </p> \n </figcaption>\n</figure>\n <p></p> \n<figure>\n <img src=\"examples/laneLines_thirdPass.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your goal is to connect/average/extrapolate line segments to get output like this</p> \n </figcaption>\n</figure>",
"_____no_output_____"
],
[
"**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** ",
"_____no_output_____"
],
[
"## Import Packages",
"_____no_output_____"
]
],
[
[
"#importing some useful packages\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Read in an Image",
"_____no_output_____"
]
],
[
[
"#reading in an image\nimage = mpimg.imread('test_images/solidWhiteRight.jpg')\n\n#printing out some stats and plotting\nprint('This image is:', type(image), 'with dimensions:', image.shape)\nplt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')",
"This image is: <class 'numpy.ndarray'> with dimensions: (540, 960, 3)\n"
]
],
[
[
"## Ideas for Lane Detection Pipeline",
"_____no_output_____"
],
[
"**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**\n\n`cv2.inRange()` for color selection \n`cv2.fillPoly()` for regions selection \n`cv2.line()` to draw lines on an image given endpoints \n`cv2.addWeighted()` to coadd / overlay two images \n`cv2.cvtColor()` to grayscale or change color \n`cv2.imwrite()` to output images to file \n`cv2.bitwise_and()` to apply a mask to an image\n\n**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**",
"_____no_output_____"
],
[
"## Helper Functions",
"_____no_output_____"
],
[
"Below are some helper functions to help get you started. They should look familiar from the lesson!",
"_____no_output_____"
]
],
[
[
"import math\n\ndef grayscale(img):\n \"\"\"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n (assuming your grayscaled image is called 'gray')\n you should call plt.imshow(gray, cmap='gray')\"\"\"\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \ndef canny(img, low_threshold, high_threshold):\n \"\"\"Applies the Canny transform\"\"\"\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n \"\"\"Applies a Gaussian Noise kernel\"\"\"\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n \n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n `vertices` should be a numpy array of integer points.\n \"\"\"\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=2):\n \"\"\"\n NOTE: this is the function you might want to use as a starting point once you want to \n average/extrapolate the line segments you detect to map out the full\n extent of the lane (going from the result shown in raw-lines-example.mp4\n to that shown in P1_example.mp4). \n \n Think about things like separating line segments by their \n slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left\n line vs. the right line. Then, you can average the position of each of \n the lines and extrapolate to the top and bottom of the lane.\n \n This function draws `lines` with `color` and `thickness`. \n Lines are drawn on the image inplace (mutates the image).\n If you want to make the lines semi-transparent, think about combining\n this function with the weighted_img() function below\n \"\"\"\n for line in lines:\n for x1,y1,x2,y2 in line:\n cv2.line(img, (x1, y1), (x2, y2), color, thickness)\n\ndef hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):\n \"\"\"\n `img` should be the output of a Canny transform.\n \n Returns an image with hough lines drawn.\n \"\"\"\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n draw_lines(line_img, lines)\n return line_img\n\n# Python 3 has support for cool math symbols.\n\ndef weighted_img(img, initial_img, α=0.8, β=1., γ=0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n \n `initial_img` should be the image before any processing.\n \n The result image is computed as follows:\n \n initial_img * α + img * β + γ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n return cv2.addWeighted(initial_img, α, img, β, γ)",
"_____no_output_____"
]
],
[
[
"## Test Images\n\nBuild your pipeline to work on the images in the directory \"test_images\" \n**You should make sure your pipeline works well on these images before you try the videos.**",
"_____no_output_____"
]
],
[
[
"import os\nos.listdir(\"test_images/\")",
"_____no_output_____"
]
],
[
[
"## Build a Lane Finding Pipeline\n\n",
"_____no_output_____"
],
[
"Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.\n\nTry tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.",
"_____no_output_____"
]
],
[
[
"# TODO: Build your pipeline that will draw lane lines on the test_images\n# then save them to the test_images_output directory.",
"_____no_output_____"
]
],
[
[
"## Test on Videos\n\nYou know what's cooler than drawing lanes over images? Drawing lanes over video!\n\nWe can test our solution on two provided videos:\n\n`solidWhiteRight.mp4`\n\n`solidYellowLeft.mp4`\n\n**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**\n\n**If you get an error that looks like this:**\n```\nNeedDownloadError: Need ffmpeg exe. \nYou can download it by calling: \nimageio.plugins.ffmpeg.download()\n```\n**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**",
"_____no_output_____"
]
],
[
[
"# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML",
"_____no_output_____"
],
[
"def process_image(image):\n # NOTE: The output you return should be a color image (3 channel) for processing video below\n # TODO: put your pipeline here,\n # you should return the final output (image where lines are drawn on lanes)\n\n return result",
"_____no_output_____"
]
],
[
[
"Let's try the one with the solid white lane on the right first ...",
"_____no_output_____"
]
],
[
[
"white_output = 'test_videos_output/solidWhiteRight.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\").subclip(0,5)\nclip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\")\nwhite_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\n%time white_clip.write_videofile(white_output, audio=False)",
"_____no_output_____"
]
],
[
[
"Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.",
"_____no_output_____"
]
],
[
[
"HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(white_output))",
"_____no_output_____"
]
],
[
[
"## Improve the draw_lines() function\n\n**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\".**\n\n**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**",
"_____no_output_____"
],
[
"Now for the one with the solid yellow lane on the left. This one's more tricky!",
"_____no_output_____"
]
],
[
[
"yellow_output = 'test_videos_output/solidYellowLeft.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)\nclip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')\nyellow_clip = clip2.fl_image(process_image)\n%time yellow_clip.write_videofile(yellow_output, audio=False)",
"_____no_output_____"
],
[
"HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(yellow_output))",
"_____no_output_____"
]
],
[
[
"## Writeup and Submission\n\nIf you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file.\n",
"_____no_output_____"
],
[
"## Optional Challenge\n\nTry your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!",
"_____no_output_____"
]
],
[
[
"challenge_output = 'test_videos_output/challenge.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)\nclip3 = VideoFileClip('test_videos/challenge.mp4')\nchallenge_clip = clip3.fl_image(process_image)\n%time challenge_clip.write_videofile(challenge_output, audio=False)",
"_____no_output_____"
],
[
"HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(challenge_output))",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
c52b8ebf176088512e57e2fab842b5adf732f8f4
| 1,427 |
ipynb
|
Jupyter Notebook
|
examples.ipynb
|
dkagramanyan/dkagramanyan-WC-Co_computer_vision
|
74eece7ded37d33ff6cb38cf50d1c99ea27d7468
|
[
"MIT"
] | null | null | null |
examples.ipynb
|
dkagramanyan/dkagramanyan-WC-Co_computer_vision
|
74eece7ded37d33ff6cb38cf50d1c99ea27d7468
|
[
"MIT"
] | null | null | null |
examples.ipynb
|
dkagramanyan/dkagramanyan-WC-Co_computer_vision
|
74eece7ded37d33ff6cb38cf50d1c99ea27d7468
|
[
"MIT"
] | null | null | null | 20.385714 | 135 | 0.553609 |
[
[
[
"from src.utils import grainPreprocess, grainShow, grainMark, grainDraw, grainApprox, grainStats, grainMorphology,grainGenerate\nfrom src.cfg import CfgAnglesNames, CfgBeamsNames, CfgDataset\nfrom matplotlib import pyplot as plt",
"_____no_output_____"
]
],
[
[
"## Downloading samples",
"_____no_output_____"
]
],
[
[
"names=CfgDataset.images_names\nprint(names)\nimages=grainPreprocess.get_example_images()\n\nplt.figure(figsize=(10,10))\nplt.imshow(images[2,0],cmap='gray')\nplt.show()",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
c52b9507c95c8c84c490471b7fc8f656b7b6a2e9
| 334,683 |
ipynb
|
Jupyter Notebook
|
content/ch-states/unique-properties-qubits.ipynb
|
delkouss/qiskit-textbook
|
16490a969becb7075513b821b5bdb778d3f5f3dc
|
[
"Apache-2.0"
] | null | null | null |
content/ch-states/unique-properties-qubits.ipynb
|
delkouss/qiskit-textbook
|
16490a969becb7075513b821b5bdb778d3f5f3dc
|
[
"Apache-2.0"
] | null | null | null |
content/ch-states/unique-properties-qubits.ipynb
|
delkouss/qiskit-textbook
|
16490a969becb7075513b821b5bdb778d3f5f3dc
|
[
"Apache-2.0"
] | null | null | null | 42.531834 | 599 | 0.495451 |
[
[
[
"# The Unique Properties of Qubits",
"_____no_output_____"
]
],
[
[
"from qiskit import *\nfrom qiskit.visualization import plot_histogram\n%config InlineBackend.figure_format = 'svg' # Makes the images look nice",
"_____no_output_____"
]
],
[
[
"You now know something about bits, and about how our familiar digital computers work. All the complex variables, objects and data structures used in modern software are basically all just big piles of bits. Those of us who work on quantum computing call these *classical variables.* The computers that use them, like the one you are using to read this article, we call *classical computers*.\n\nIn quantum computers, our basic variable is the _qubit_: a quantum variant of the bit. These are quantum objects, obeying the laws of quantum mechanics. Unlike any classical variable, these cannot be represented by some number of classical bits. They are fundamentally different.\n\n\nThe purpose of this section is to give you your first taste of what a qubit is, and how they are unique. We'll do this in a way that requires essentially no math. This means leaving terms like 'superposition' and 'entanglement' until future sections, since it is difficult to properly convey their meaning without pointing at an equation.\n\nInstead, we will use another well-known feature of quantum mechanics: the uncertainty principle.",
"_____no_output_____"
],
[
"### Heisenberg's uncertainty principle",
"_____no_output_____"
],
[
"The most common formulation of the uncertainty principle refers to the position and momentum of a particle: the more precisely its position is defined, the more uncertainty there is in its momentum, and vice-versa.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"This is a common feature of quantum objects, though it need not always refer to position and momentum. There are many possible sets of parameters for different quantum objects, where certain knowledge of one means that our observations of the others will be completely random.\n\nTo see how the uncertainty principle affects qubits, we need to look at measurement. As we saw in the last section, this is the method by which we extract a bit from a qubit.",
"_____no_output_____"
]
],
[
[
"measure_z = QuantumCircuit(1,1)\nmeasure_z.measure(0,0)\n\nmeasure_z.draw(output='mpl')",
"_____no_output_____"
]
],
[
[
"On the [Circuit Composer](https://quantum-computing.ibm.com/composer), the same operation looks like this.\n\n\n\nThis version has a small ‘z’ written in the box that represents the operation. This hints at the fact that this kind of measurement is not the only one. In fact, it is only one of an infinite number of possible ways to extract a bit from a qubit. Specifically, it is known as a *z measurement*.\n\nAnother commonly used measurement is the *x measurement*. It can be performed using the following sequence of gates.",
"_____no_output_____"
]
],
[
[
"measure_x = QuantumCircuit(1,1)\nmeasure_x.h(0)\nmeasure_x.measure(0,0)\n\nmeasure_x.draw(output='mpl')",
"_____no_output_____"
]
],
[
[
"Later chapters will explain why this sequence of operations performs a new kind of measurement. For now, you'll need to trust us.\n\nLike the position and momentum of a quantum particle, the z and x measurements of a qubit are governed by the uncertainty principle. Below we'll look at results from a few different circuits to see this effect in action.\n\n#### Results for an empty circuit\n\nThe easiest way to see an example is to take a freshly initialized qubit.",
"_____no_output_____"
]
],
[
[
"qc_0 = QuantumCircuit(1)\n\nqc_0.draw(output='mpl')",
"_____no_output_____"
]
],
[
[
"Qubits are always initialized such that they are certain to give the result `0` for a z measurement. The resulting histogram will therefore simply have a single column, showing the 100% probability of getting a `0`.",
"_____no_output_____"
]
],
[
[
"qc = qc_0 + measure_z\n\nprint('Results for z measurement:')\ncounts = execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts()\nplot_histogram(counts)",
"Results for z measurement:\n"
]
],
[
[
"If we instead do an x measurement, the results will be completely random.",
"_____no_output_____"
]
],
[
[
"qc = qc_0 + measure_x\n\nprint('Results for x measurement:')\ncounts = execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts()\nplot_histogram(counts)",
"Results for x measurement:\n"
]
],
[
[
"Note that the reason why the results are not split exactly 50/50 here is because we take samples by repeating the circuit a finite number of times, and so there will always be statistical noise. In this case, the default of `shots=1024` was used.",
"_____no_output_____"
],
[
"#### Results for a single Hadamard\n\nNow we'll try a different circuit. This has a single gate called a Hadamard, which we will learn more about in future sections.",
"_____no_output_____"
]
],
[
[
"qc_plus = QuantumCircuit(1)\nqc_plus.h(0)\n\nqc_plus.draw(output='mpl')",
"_____no_output_____"
]
],
[
[
"To see what effect it has, let's first try the z measurement.",
"_____no_output_____"
]
],
[
[
"qc = qc_plus + measure_z\n\nqc.draw()\n\nprint('Results for z measurement:')\ncounts = execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts()\nplot_histogram(counts)",
"Results for z measurement:\n"
]
],
[
[
"Here we see that it is the results of the z measurement that are random for this circuit.\n\nNow let's see what happens for an x measurement.",
"_____no_output_____"
]
],
[
[
"qc = qc_plus + measure_x\n\nprint('Results for x measurement:')\ncounts = execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts()\nplot_histogram(counts)",
"Results for x measurement:\n"
]
],
[
[
"For the x measurement, it is certain that the output for this circuit is `0`. The results here are therefore very different to what we saw for the empty circuit. The Hadamard has lead to an entirely opposite set of outcomes.\n\n#### Results for a y rotation\n\nUsing other circuits we can manipulate the results in different ways. Here is an example with an `ry` gate.",
"_____no_output_____"
]
],
[
[
"qc_y = QuantumCircuit(1)\nqc_y.ry( -3.14159/4,0)\n\nqc_y.draw(output='mpl')",
"_____no_output_____"
]
],
[
[
"We will learn more about `ry` in future sections. For now, just notice the effect it has for the z and x measurements.",
"_____no_output_____"
]
],
[
[
"qc = qc_y + measure_z\n\nprint('Results for z measurement:')\ncounts = execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts()\nplot_histogram(counts)",
"Results for z measurement:\n"
]
],
[
[
"Here we have a case that we have not seen before. The z measurement is most likely to output `0`, but it is not completely certain. A similar effect is seen below for the x measurement: it is most likely, but not certain, to output `1`.",
"_____no_output_____"
]
],
[
[
"qc = qc_y + measure_x\n\nprint('\\nResults for x measurement:')\ncounts = execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts()\nplot_histogram(counts)",
"\nResults for x measurement:\n"
]
],
[
[
"These results hint at an important principle: Qubits have a limited amount of certainty that they can hold. This ensures that, despite the different ways we can extract outputs from a qubit, it can only be used to store a single bit of information. In the case of the blank circuit, this certainty was dedicated entirely to the outcomes of z measurements. For the circuit with a single Hadamard, it was dedicated entirely to x measurements. In this case, it is shared between the two.",
"_____no_output_____"
],
[
"### Einstein vs. Bell",
"_____no_output_____"
],
[
"We have now played with some of the features of qubits, but we haven't done anything that couldn't be reproduced by a few bits and a random number generator. You can therefore be forgiven for thinking that quantum variables are just classical variables with some randomness bundled in.\n\nThis is essentially the claim made by Einstein, Podolsky and Rosen back in 1935. They objected to the uncertainty seen in quantum mechanics, and thought it meant that the theory was incomplete. They thought that a qubit should always know what output it would give for both kinds of measurement, and that it only seems random because some information is hidden from us. As Einstein said: God does not play dice with the universe.\n\nNo one spoke of qubits back then, and people hardly spoke of computers. But if we translate their arguments into modern language, they essentially claimed that qubits can indeed be described by some form of classical variable. They didn’t know how to do it, but they were sure it could be done. Then quantum mechanics could be replaced by a much nicer and more sensible theory.\n\nIt took until 1964 to show that they were wrong. J. S. Bell proved that quantum variables behaved in a way that was fundamentally unique. Since then, many new ways have been found to prove this, and extensive experiments have been done to show that this is exactly the way the universe works. We'll now consider a simple demonstration, using a variant of _Hardy’s paradox_.\n\nFor this we need two qubits, set up in such a way that their results are correlated. Specifically, we want to set them up such that we see the following properties.\n\n1. If z measurements are made on both qubits, they never both output `0`.\n2. If an x measurement of one qubit outputs `1`, a z measurement of the other will output `0`.\n\nIf we have qubits that satisfy these properties, what can we infer about the remaining case: an x measurement of both?\n\nFor example, let's think about the case where both qubits output `1` for an x measurement. By applying property 2 we can deduce what the result would have been if we had made z measurements instead: We would have gotten an output of `0` for both. However, this result is impossible according to property 1. We can therefore conclude that an output of `1` for x measurements of both qubits must also be impossible.\n\nThe paragraph you just read contains all the math in this section. Don't feel bad if you need to read it a couple more times!\n\nNow let's see what actually happens. Here is a circuit, composed of gates you will learn about in later sections. It prepares a pair of qubits that will satisfy the above properties.",
"_____no_output_____"
]
],
[
[
"qc_hardy = QuantumCircuit(2)\nqc_hardy.ry(1.911,1)\nqc_hardy.cx(1,0)\nqc_hardy.ry(0.785,0)\nqc_hardy.cx(1,0)\nqc_hardy.ry(2.356,0)\n\nqc_hardy.draw(output='mpl')",
"_____no_output_____"
]
],
[
[
"Let's see it in action. First a z measurement of both qubits.",
"_____no_output_____"
]
],
[
[
"measurements = QuantumCircuit(2,2)\n# z measurement on both qubits\nmeasurements.measure(0,0)\nmeasurements.measure(1,1)\n\nqc = qc_hardy + measurements\n\nprint('\\nResults for two z measurements:')\ncounts = execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts()\nplot_histogram(counts)",
"\nResults for two z measurements:\n"
]
],
[
[
"The probability of `00` is zero, and so these qubits do indeed satisfy property 1.\n\nNext, let's see the results of an x measurement of one and a z measurement of the other.",
"_____no_output_____"
]
],
[
[
"measurements = QuantumCircuit(2,2)\n# x measurement on qubit 0\nmeasurements.h(0)\nmeasurements.measure(0,0)\n# z measurement on qubit 1\nmeasurements.measure(1,1)\n\nqc = qc_hardy + measurements\n\nprint('\\nResults for two x measurement on qubit 0 and z measurement on qubit 1:')\ncounts = execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts()\nplot_histogram(counts)",
"\nResults for two x measurement on qubit 0 and z measurement on qubit 1:\n"
]
],
[
[
"The probability of `11` is zero. You'll see the same if you swap around the measurements. These qubits therefore also satisfy property 2.\n \n\nFinally, let's look at an x measurement of both.",
"_____no_output_____"
]
],
[
[
"measurements = QuantumCircuit(2,2)\nmeasurements.h(0)\nmeasurements.measure(0,0)\nmeasurements.h(1)\nmeasurements.measure(1,1)\n\nqc = qc_hardy + measurements\n\nprint('\\nResults for two x measurement on both qubits:')\ncounts = execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts()\nplot_histogram(counts)",
"\nResults for two x measurement on both qubits:\n"
]
],
[
[
"We reasoned that, given properties 1 and 2, it would be impossible to get the output `11`. From the results above, we see that our reasoning was not correct: one in every dozen results will have this 'impossible' result.\n\nSo where did we go wrong? Our mistake was in the following piece of reasoning.\n\n> By applying property 2 we can deduce what the result would have been if we had made z measurements instead\n\nWe used our knowledge of the x outputs to work out what the z outputs were. Once we’d done that, we assumed that we were certain about the value of both. More certain than the uncertainty principle allows us to be. And so we were wrong.\n\nOur logic would be completely valid if we weren’t reasoning about quantum objects. If it was some non-quantum variable, that we initialized by some random process, the x and z outputs would indeed both be well defined. They would just be based on some pre-determined list of random numbers in our computer, or generated by some deterministic process. Then there would be no reason why we shouldn't use one to deduce the value of the other, and our reasoning would be perfectly valid. The restriction it predicts would apply, and it would be impossible for both x measurements to output `1`.\n\nBut our qubits behave differently. The uncertainty of quantum mechanics allows qubits to dodge restrictions placed on classical variables. It allows them to do things that would otherwise be impossible. Indeed, this is the main thing to take away from this section:\n\n> A physical system in a definite state can still behave randomly.\n\nThis is the first of the key principles of the quantum world. It needs to become your new intuition, as it is what makes quantum systems different to classical systems. It's what makes quantum computers able to outperform classical computers. It leads to effects that allow programs made with quantum variables to solve problems in ways that those with normal variables cannot. But just because qubits don’t follow the same logic as normal computers, it doesn’t mean they defy logic entirely. They obey the definite rules laid out by quantum mechanics.\n\nIf you’d like to learn these rules, we’ll use the remainder of this chapter to guide you through them. We'll also show you how to express them using math. This will provide a foundation for later chapters, in which we'll explain various quantum algorithms and techniques.",
"_____no_output_____"
]
],
[
[
"import qiskit\nqiskit.__qiskit_version__",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
c52b95cb9f4fab8e77838dd5e9838ef43d033e2e
| 69,535 |
ipynb
|
Jupyter Notebook
|
02-data-x-signals/m250-theory-signal-spectral-lti-ffts/fourier.ipynb
|
UCBerkeley-SCET/DataX-Berkeley
|
f912d22c838b511d3ada4ecfa3548afd80437b74
|
[
"Apache-2.0"
] | 117 |
2019-09-02T06:08:46.000Z
|
2022-03-09T18:15:26.000Z
|
02-data-x-signals/m250-theory-signal-spectral-lti-ffts/fourier.ipynb
|
UCBerkeley-SCET/DataX-Berkeley
|
f912d22c838b511d3ada4ecfa3548afd80437b74
|
[
"Apache-2.0"
] | 4 |
2020-06-24T22:20:31.000Z
|
2022-02-28T01:37:36.000Z
|
02-data-x-signals/m250-theory-signal-spectral-lti-ffts/fourier.ipynb
|
UCBerkeley-SCET/DataX-Berkeley
|
f912d22c838b511d3ada4ecfa3548afd80437b74
|
[
"Apache-2.0"
] | 78 |
2020-06-19T09:41:01.000Z
|
2022-02-05T00:13:29.000Z
| 99.052707 | 17,622 | 0.759344 |
[
[
[
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"t = np.arange(256)",
"_____no_output_____"
],
[
"sp = np.fft.fft(np.sin(2.0 * np.pi * t/256.0))",
"_____no_output_____"
],
[
"print t, sp",
"[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17\n 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35\n 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53\n 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71\n 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89\n 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107\n 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125\n 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143\n 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161\n 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179\n 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197\n 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215\n 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233\n 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251\n 252 253 254 255] [ 1.25934127e-16 +0.00000000e+00j -6.77697413e-14 -1.28000000e+02j\n -2.41638411e-16 -7.33856684e-15j -1.23268770e-15 +4.70179451e-14j\n 6.86986247e-16 -2.88038304e-15j 4.59689368e-16 -1.99536176e-14j\n 1.31685036e-15 -1.08878945e-15j -3.17933903e-15 +1.97382462e-14j\n 8.97593277e-16 -7.94631222e-18j 6.76149902e-16 -1.12638859e-14j\n 1.28705883e-15 -6.69063246e-16j 1.43991360e-15 +1.66553219e-14j\n 4.11839155e-16 -7.32144556e-16j 9.23702204e-16 -6.12427409e-15j\n 8.66029826e-16 +8.27222458e-16j -1.37782079e-14 +6.35047570e-14j\n 1.02038343e-15 -2.52787619e-16j 1.22519290e-15 -4.68861548e-15j\n -1.03363892e-16 -7.61621593e-16j 4.85380573e-16 +1.00042382e-14j\n 1.23619404e-15 -7.94535613e-16j 3.49605994e-15 -7.06070863e-15j\n 5.72279258e-15 +1.36507395e-15j -5.81119942e-16 +1.33941934e-14j\n -3.74057181e-16 +2.65290645e-15j 9.28885390e-16 +3.27872981e-15j\n -4.63313861e-15 +2.70440023e-15j -5.69903430e-15 +5.23738734e-15j\n -2.20012509e-16 +3.31625061e-16j -8.94994885e-16 -1.94552827e-15j\n -1.74306717e-15 -5.24686093e-16j -1.54440889e-14 +2.57770706e-14j\n -1.54508382e-15 -4.38303670e-16j -2.61386993e-16 -3.11315152e-15j\n -1.31146536e-15 -1.50855959e-16j -1.71766332e-15 +2.45221311e-15j\n 2.61064583e-16 -3.60466556e-17j 1.61710045e-16 -1.51667319e-15j\n -3.33840192e-16 -8.39108865e-16j -4.23718810e-15 +3.87532443e-15j\n -8.88534200e-16 -3.51298714e-16j 1.68956571e-15 -3.03233946e-15j\n 3.43710430e-16 +9.10717085e-16j 5.61463750e-17 +3.23717757e-15j\n 2.16680960e-16 +1.00047984e-15j -2.99055202e-15 -1.24694952e-15j\n -3.33611042e-15 -4.24260971e-15j -9.02329729e-15 +2.09314848e-14j\n -2.87539260e-16 -6.65386199e-15j 5.52203953e-16 -1.44471680e-17j\n 2.84116096e-15 -4.37810270e-15j 1.26790743e-15 +5.35054698e-15j\n -4.00811865e-16 -5.05851959e-16j 2.94522106e-15 -2.82772552e-15j\n 1.65245502e-15 -5.68817277e-17j -1.97401559e-15 +3.53924681e-15j\n 1.07909519e-15 -1.15934755e-15j 5.42819086e-16 -8.81586026e-16j\n 7.97234009e-16 -9.91793796e-17j -2.68508549e-15 +5.09489759e-15j\n 3.38254921e-16 +4.45981231e-16j 1.91650182e-15 +4.05663264e-16j\n 1.14534984e-16 -4.14709150e-16j -4.30880957e-14 +5.86197757e-14j\n 5.87370571e-16 -1.05124243e-15j 9.60002769e-16 -4.23272528e-16j\n -1.59321520e-16 -5.55801652e-16j -8.44109646e-16 +2.22044605e-15j\n -5.84579751e-16 -4.03417461e-16j 2.37957339e-15 -9.36064145e-16j\n 2.24152532e-15 +9.93825699e-16j 9.71379103e-16 +3.31983087e-15j\n 4.70319670e-16 +2.54708845e-15j 1.20611416e-15 +3.24111448e-16j\n -1.31331116e-15 +1.40183459e-15j -1.52939849e-15 +5.26513442e-15j\n -2.59033304e-16 +4.51142594e-16j -2.26648919e-15 -1.75697425e-16j\n -7.16422806e-16 +1.85567116e-16j -1.06695834e-14 +2.30926389e-14j\n -5.85933387e-16 -9.45828247e-16j 1.30555580e-15 -6.07655512e-16j\n -1.20847442e-15 -3.63239335e-16j -3.61127324e-15 +7.13895725e-15j\n 6.06869421e-16 -5.59224740e-16j 7.51098715e-17 -6.17056783e-16j\n -5.04320850e-16 -1.50941892e-15j -2.65177918e-15 -1.91293812e-15j\n 1.75624981e-16 -1.19021061e-15j 1.52698895e-15 -1.72629001e-15j\n 6.03785307e-16 +1.25764673e-16j -3.15235988e-16 +5.59971753e-15j\n -1.87799364e-16 -4.19324229e-16j 1.84248922e-16 +1.95292727e-16j\n -1.45215814e-15 +6.13875145e-16j -2.52830495e-14 +4.36075536e-15j\n 6.91674904e-17 +1.93479804e-15j -9.45646693e-17 -4.00132994e-15j\n 1.35372274e-15 +8.60471213e-17j -5.93093166e-15 +2.22690031e-15j\n 2.63308147e-16 -9.16640620e-16j 2.52266400e-15 -1.21235200e-15j\n 1.42152066e-15 +2.12662205e-16j -5.41328552e-15 -1.88071889e-15j\n 4.77760398e-16 +7.58605813e-16j 7.90510604e-16 +1.74647289e-16j\n 4.16717417e-16 +4.89349709e-16j -4.06859516e-15 +7.60967010e-18j\n 2.92438952e-16 +7.92529745e-16j 1.15908888e-15 +4.63592774e-16j\n 8.79320021e-16 +1.52816431e-15j -1.63442329e-14 +3.40191235e-15j\n 9.88102648e-18 +1.79151015e-15j 8.68080722e-16 -2.83933317e-15j\n -1.33139333e-15 +5.82874180e-16j -3.46204823e-16 +4.62885792e-17j\n 5.48764729e-16 -2.84225935e-16j 1.15001535e-15 -3.27502098e-16j\n 2.15779981e-15 -8.28085638e-16j -6.13190126e-16 +1.21214878e-15j\n -8.58084692e-16 +2.00610302e-16j 8.95109824e-16 +1.60773013e-15j\n -4.24886272e-16 +4.01614534e-16j -3.88077055e-15 +9.17080002e-16j\n 5.25627355e-16 +6.31168493e-16j 1.63476470e-15 -2.13542086e-16j\n -1.28443596e-15 -2.83216248e-16j -6.61807346e-14 +7.10542736e-15j\n -2.55705038e-16 +0.00000000e+00j 1.63307548e-15 +0.00000000e+00j\n -1.28443596e-15 +2.83216248e-16j -3.78620066e-15 -4.99600361e-16j\n 5.25627355e-16 -6.31168493e-16j 1.14813927e-15 -4.24056363e-16j\n -4.24886272e-16 -4.01614534e-16j -3.20265075e-15 -2.93047799e-15j\n -8.58084692e-16 -2.00610302e-16j 1.63360319e-15 -2.10117562e-15j\n 2.15779981e-15 +8.28085638e-16j -4.89607700e-15 -2.94821347e-15j\n 5.48764729e-16 +2.84225935e-16j 2.19538843e-15 +2.26033715e-15j\n -1.33139333e-15 -5.82874180e-16j -1.20018510e-14 -6.66133815e-15j\n 9.88102648e-18 -1.79151015e-15j 8.68080722e-16 +2.83933317e-15j\n 8.79320021e-16 -1.52816431e-15j -1.27290941e-15 +5.55392548e-16j\n 2.92438952e-16 -7.92529745e-16j 4.27200943e-16 -1.08353315e-16j\n 4.16717417e-16 -4.89349709e-16j -5.69460086e-15 +3.16049106e-15j\n 4.77760398e-16 -7.58605813e-16j 9.63785618e-16 +6.96465032e-16j\n 1.42152066e-15 -2.12662205e-16j -4.87360746e-15 +7.25985840e-16j\n 2.63308147e-16 +9.16640620e-16j 2.93458276e-15 +1.28663293e-15j\n 1.35372274e-15 -8.60471213e-17j -1.49292494e-14 -8.19203508e-15j\n 6.91674904e-17 -1.93479804e-15j -9.45646693e-17 +4.00132994e-15j\n -1.45215814e-15 -6.13875145e-16j -3.52326965e-15 -5.00572607e-15j\n -1.87799364e-16 +4.19324229e-16j 1.19205877e-15 -1.55699469e-15j\n 6.03785307e-16 -1.25764673e-16j -3.74750734e-15 +1.60616505e-15j\n 1.75624981e-16 +1.19021061e-15j 6.55876625e-16 +1.55301499e-15j\n -5.04320850e-16 +1.50941892e-15j -2.96859208e-15 -6.92703365e-15j\n 6.06869421e-16 +5.59224740e-16j 3.24622940e-16 +2.44747075e-15j\n -1.20847442e-15 +3.63239335e-16j -9.65133428e-15 -1.73787712e-14j\n -5.85933387e-16 +9.45828247e-16j 1.30555580e-15 +6.07655512e-16j\n -7.16422806e-16 -1.85567116e-16j -4.44213541e-15 -6.27873669e-15j\n -2.59033304e-16 -4.51142594e-16j -8.13591931e-16 +9.67372493e-16j\n -1.31331116e-15 -1.40183459e-15j -4.96317807e-16 -2.53305464e-15j\n 4.70319670e-16 -2.54708845e-15j 1.69955966e-15 -1.06260481e-15j\n 2.24152532e-15 -9.93825699e-16j 1.76268875e-15 -2.66008631e-15j\n -5.84579751e-16 +4.03417461e-16j 1.47281595e-15 +1.63389592e-15j\n -1.59321520e-16 +5.55801652e-16j -4.30880957e-14 -5.15143483e-14j\n 5.87370571e-16 +1.05124243e-15j 9.60002769e-16 +4.23272528e-16j\n 1.14534984e-16 +4.14709150e-16j -1.28819886e-15 -2.10942375e-15j\n 3.38254921e-16 -4.45981231e-16j 1.20774923e-15 -9.49477887e-16j\n 7.97234009e-16 +9.91793796e-17j -2.88202862e-15 -3.57328654e-15j\n 1.07909519e-15 +1.15934755e-15j 1.03626458e-15 +1.62007939e-15j\n 1.65245502e-15 +5.68817277e-17j -3.35401083e-16 -3.69309088e-15j\n -4.00811865e-16 +5.05851959e-16j 1.85759050e-15 +8.12313165e-17j\n 2.84116096e-15 +4.37810270e-15j -8.00504815e-15 -2.30926389e-14j\n -2.87539260e-16 +6.65386199e-15j 5.52203953e-16 +1.44471680e-17j\n -3.33611042e-15 +4.24260971e-15j -4.08466242e-15 -3.48773330e-15j\n 2.16680960e-16 -1.00047984e-15j 9.50532842e-16 +6.05376474e-16j\n 3.43710430e-16 -9.10717085e-16j -1.30360963e-15 -1.82048659e-15j\n -8.88534200e-16 +3.51298714e-16j 8.18453390e-16 +3.20561448e-15j\n -3.33840192e-16 +8.39108865e-16j -4.78645160e-15 -5.66751812e-15j\n 2.61064583e-16 +3.60466556e-17j 3.04697198e-15 +2.58475735e-15j\n -1.31146536e-15 +1.50855959e-16j -1.01145847e-14 -2.19457909e-14j\n -1.54508382e-15 +4.38303670e-16j -2.61386993e-16 +3.11315152e-15j\n -1.74306717e-15 +5.24686093e-16j -6.63747327e-15 -8.99926076e-15j\n -2.20012509e-16 -3.31625061e-16j -3.05362826e-15 +5.06207883e-15j\n -4.63313861e-15 -2.70440023e-15j -4.74149671e-15 -1.51307872e-14j\n -3.74057181e-16 -2.65290645e-15j 1.10216040e-15 -4.14984213e-15j\n 5.72279258e-15 -1.36507395e-15j -2.24033066e-16 -8.04419189e-15j\n 1.23619404e-15 +7.94535613e-16j 1.04638217e-15 +4.94033663e-15j\n -1.03363892e-16 +7.61621593e-16j -9.43582605e-15 -6.37980449e-14j\n 1.02038343e-15 +2.52787619e-16j 1.22519290e-15 +4.68861548e-15j\n 8.66029826e-16 -8.27222458e-16j 1.25421724e-15 -1.51053104e-14j\n 4.11839155e-16 +7.32144556e-16j 1.55562964e-15 +4.21040187e-15j\n 1.28705883e-15 +6.69063246e-16j -3.06889209e-15 -2.00638965e-14j\n 8.97593277e-16 +7.94631222e-18j 1.41464327e-15 +1.17573314e-14j\n 1.31685036e-15 +1.08878945e-15j -2.70668127e-15 -4.83273143e-14j\n 6.86986247e-16 +2.88038304e-15j 3.25950837e-15 +2.00521332e-14j\n -2.41638411e-16 +7.33856684e-15j 4.13435612e-13 +1.28000000e+02j]\n"
],
[
"print t, sp",
"[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17\n 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35\n 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53\n 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71\n 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89\n 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107\n 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125\n 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143\n 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161\n 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179\n 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197\n 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215\n 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233\n 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251\n 252 253 254 255] [ 1.25934127e-16 +0.00000000e+00j -6.77697413e-14 -1.28000000e+02j\n -2.41638411e-16 -7.33856684e-15j -1.23268770e-15 +4.70179451e-14j\n 6.86986247e-16 -2.88038304e-15j 4.59689368e-16 -1.99536176e-14j\n 1.31685036e-15 -1.08878945e-15j -3.17933903e-15 +1.97382462e-14j\n 8.97593277e-16 -7.94631222e-18j 6.76149902e-16 -1.12638859e-14j\n 1.28705883e-15 -6.69063246e-16j 1.43991360e-15 +1.66553219e-14j\n 4.11839155e-16 -7.32144556e-16j 9.23702204e-16 -6.12427409e-15j\n 8.66029826e-16 +8.27222458e-16j -1.37782079e-14 +6.35047570e-14j\n 1.02038343e-15 -2.52787619e-16j 1.22519290e-15 -4.68861548e-15j\n -1.03363892e-16 -7.61621593e-16j 4.85380573e-16 +1.00042382e-14j\n 1.23619404e-15 -7.94535613e-16j 3.49605994e-15 -7.06070863e-15j\n 5.72279258e-15 +1.36507395e-15j -5.81119942e-16 +1.33941934e-14j\n -3.74057181e-16 +2.65290645e-15j 9.28885390e-16 +3.27872981e-15j\n -4.63313861e-15 +2.70440023e-15j -5.69903430e-15 +5.23738734e-15j\n -2.20012509e-16 +3.31625061e-16j -8.94994885e-16 -1.94552827e-15j\n -1.74306717e-15 -5.24686093e-16j -1.54440889e-14 +2.57770706e-14j\n -1.54508382e-15 -4.38303670e-16j -2.61386993e-16 -3.11315152e-15j\n -1.31146536e-15 -1.50855959e-16j -1.71766332e-15 +2.45221311e-15j\n 2.61064583e-16 -3.60466556e-17j 1.61710045e-16 -1.51667319e-15j\n -3.33840192e-16 -8.39108865e-16j -4.23718810e-15 +3.87532443e-15j\n -8.88534200e-16 -3.51298714e-16j 1.68956571e-15 -3.03233946e-15j\n 3.43710430e-16 +9.10717085e-16j 5.61463750e-17 +3.23717757e-15j\n 2.16680960e-16 +1.00047984e-15j -2.99055202e-15 -1.24694952e-15j\n -3.33611042e-15 -4.24260971e-15j -9.02329729e-15 +2.09314848e-14j\n -2.87539260e-16 -6.65386199e-15j 5.52203953e-16 -1.44471680e-17j\n 2.84116096e-15 -4.37810270e-15j 1.26790743e-15 +5.35054698e-15j\n -4.00811865e-16 -5.05851959e-16j 2.94522106e-15 -2.82772552e-15j\n 1.65245502e-15 -5.68817277e-17j -1.97401559e-15 +3.53924681e-15j\n 1.07909519e-15 -1.15934755e-15j 5.42819086e-16 -8.81586026e-16j\n 7.97234009e-16 -9.91793796e-17j -2.68508549e-15 +5.09489759e-15j\n 3.38254921e-16 +4.45981231e-16j 1.91650182e-15 +4.05663264e-16j\n 1.14534984e-16 -4.14709150e-16j -4.30880957e-14 +5.86197757e-14j\n 5.87370571e-16 -1.05124243e-15j 9.60002769e-16 -4.23272528e-16j\n -1.59321520e-16 -5.55801652e-16j -8.44109646e-16 +2.22044605e-15j\n -5.84579751e-16 -4.03417461e-16j 2.37957339e-15 -9.36064145e-16j\n 2.24152532e-15 +9.93825699e-16j 9.71379103e-16 +3.31983087e-15j\n 4.70319670e-16 +2.54708845e-15j 1.20611416e-15 +3.24111448e-16j\n -1.31331116e-15 +1.40183459e-15j -1.52939849e-15 +5.26513442e-15j\n -2.59033304e-16 +4.51142594e-16j -2.26648919e-15 -1.75697425e-16j\n -7.16422806e-16 +1.85567116e-16j -1.06695834e-14 +2.30926389e-14j\n -5.85933387e-16 -9.45828247e-16j 1.30555580e-15 -6.07655512e-16j\n -1.20847442e-15 -3.63239335e-16j -3.61127324e-15 +7.13895725e-15j\n 6.06869421e-16 -5.59224740e-16j 7.51098715e-17 -6.17056783e-16j\n -5.04320850e-16 -1.50941892e-15j -2.65177918e-15 -1.91293812e-15j\n 1.75624981e-16 -1.19021061e-15j 1.52698895e-15 -1.72629001e-15j\n 6.03785307e-16 +1.25764673e-16j -3.15235988e-16 +5.59971753e-15j\n -1.87799364e-16 -4.19324229e-16j 1.84248922e-16 +1.95292727e-16j\n -1.45215814e-15 +6.13875145e-16j -2.52830495e-14 +4.36075536e-15j\n 6.91674904e-17 +1.93479804e-15j -9.45646693e-17 -4.00132994e-15j\n 1.35372274e-15 +8.60471213e-17j -5.93093166e-15 +2.22690031e-15j\n 2.63308147e-16 -9.16640620e-16j 2.52266400e-15 -1.21235200e-15j\n 1.42152066e-15 +2.12662205e-16j -5.41328552e-15 -1.88071889e-15j\n 4.77760398e-16 +7.58605813e-16j 7.90510604e-16 +1.74647289e-16j\n 4.16717417e-16 +4.89349709e-16j -4.06859516e-15 +7.60967010e-18j\n 2.92438952e-16 +7.92529745e-16j 1.15908888e-15 +4.63592774e-16j\n 8.79320021e-16 +1.52816431e-15j -1.63442329e-14 +3.40191235e-15j\n 9.88102648e-18 +1.79151015e-15j 8.68080722e-16 -2.83933317e-15j\n -1.33139333e-15 +5.82874180e-16j -3.46204823e-16 +4.62885792e-17j\n 5.48764729e-16 -2.84225935e-16j 1.15001535e-15 -3.27502098e-16j\n 2.15779981e-15 -8.28085638e-16j -6.13190126e-16 +1.21214878e-15j\n -8.58084692e-16 +2.00610302e-16j 8.95109824e-16 +1.60773013e-15j\n -4.24886272e-16 +4.01614534e-16j -3.88077055e-15 +9.17080002e-16j\n 5.25627355e-16 +6.31168493e-16j 1.63476470e-15 -2.13542086e-16j\n -1.28443596e-15 -2.83216248e-16j -6.61807346e-14 +7.10542736e-15j\n -2.55705038e-16 +0.00000000e+00j 1.63307548e-15 +0.00000000e+00j\n -1.28443596e-15 +2.83216248e-16j -3.78620066e-15 -4.99600361e-16j\n 5.25627355e-16 -6.31168493e-16j 1.14813927e-15 -4.24056363e-16j\n -4.24886272e-16 -4.01614534e-16j -3.20265075e-15 -2.93047799e-15j\n -8.58084692e-16 -2.00610302e-16j 1.63360319e-15 -2.10117562e-15j\n 2.15779981e-15 +8.28085638e-16j -4.89607700e-15 -2.94821347e-15j\n 5.48764729e-16 +2.84225935e-16j 2.19538843e-15 +2.26033715e-15j\n -1.33139333e-15 -5.82874180e-16j -1.20018510e-14 -6.66133815e-15j\n 9.88102648e-18 -1.79151015e-15j 8.68080722e-16 +2.83933317e-15j\n 8.79320021e-16 -1.52816431e-15j -1.27290941e-15 +5.55392548e-16j\n 2.92438952e-16 -7.92529745e-16j 4.27200943e-16 -1.08353315e-16j\n 4.16717417e-16 -4.89349709e-16j -5.69460086e-15 +3.16049106e-15j\n 4.77760398e-16 -7.58605813e-16j 9.63785618e-16 +6.96465032e-16j\n 1.42152066e-15 -2.12662205e-16j -4.87360746e-15 +7.25985840e-16j\n 2.63308147e-16 +9.16640620e-16j 2.93458276e-15 +1.28663293e-15j\n 1.35372274e-15 -8.60471213e-17j -1.49292494e-14 -8.19203508e-15j\n 6.91674904e-17 -1.93479804e-15j -9.45646693e-17 +4.00132994e-15j\n -1.45215814e-15 -6.13875145e-16j -3.52326965e-15 -5.00572607e-15j\n -1.87799364e-16 +4.19324229e-16j 1.19205877e-15 -1.55699469e-15j\n 6.03785307e-16 -1.25764673e-16j -3.74750734e-15 +1.60616505e-15j\n 1.75624981e-16 +1.19021061e-15j 6.55876625e-16 +1.55301499e-15j\n -5.04320850e-16 +1.50941892e-15j -2.96859208e-15 -6.92703365e-15j\n 6.06869421e-16 +5.59224740e-16j 3.24622940e-16 +2.44747075e-15j\n -1.20847442e-15 +3.63239335e-16j -9.65133428e-15 -1.73787712e-14j\n -5.85933387e-16 +9.45828247e-16j 1.30555580e-15 +6.07655512e-16j\n -7.16422806e-16 -1.85567116e-16j -4.44213541e-15 -6.27873669e-15j\n -2.59033304e-16 -4.51142594e-16j -8.13591931e-16 +9.67372493e-16j\n -1.31331116e-15 -1.40183459e-15j -4.96317807e-16 -2.53305464e-15j\n 4.70319670e-16 -2.54708845e-15j 1.69955966e-15 -1.06260481e-15j\n 2.24152532e-15 -9.93825699e-16j 1.76268875e-15 -2.66008631e-15j\n -5.84579751e-16 +4.03417461e-16j 1.47281595e-15 +1.63389592e-15j\n -1.59321520e-16 +5.55801652e-16j -4.30880957e-14 -5.15143483e-14j\n 5.87370571e-16 +1.05124243e-15j 9.60002769e-16 +4.23272528e-16j\n 1.14534984e-16 +4.14709150e-16j -1.28819886e-15 -2.10942375e-15j\n 3.38254921e-16 -4.45981231e-16j 1.20774923e-15 -9.49477887e-16j\n 7.97234009e-16 +9.91793796e-17j -2.88202862e-15 -3.57328654e-15j\n 1.07909519e-15 +1.15934755e-15j 1.03626458e-15 +1.62007939e-15j\n 1.65245502e-15 +5.68817277e-17j -3.35401083e-16 -3.69309088e-15j\n -4.00811865e-16 +5.05851959e-16j 1.85759050e-15 +8.12313165e-17j\n 2.84116096e-15 +4.37810270e-15j -8.00504815e-15 -2.30926389e-14j\n -2.87539260e-16 +6.65386199e-15j 5.52203953e-16 +1.44471680e-17j\n -3.33611042e-15 +4.24260971e-15j -4.08466242e-15 -3.48773330e-15j\n 2.16680960e-16 -1.00047984e-15j 9.50532842e-16 +6.05376474e-16j\n 3.43710430e-16 -9.10717085e-16j -1.30360963e-15 -1.82048659e-15j\n -8.88534200e-16 +3.51298714e-16j 8.18453390e-16 +3.20561448e-15j\n -3.33840192e-16 +8.39108865e-16j -4.78645160e-15 -5.66751812e-15j\n 2.61064583e-16 +3.60466556e-17j 3.04697198e-15 +2.58475735e-15j\n -1.31146536e-15 +1.50855959e-16j -1.01145847e-14 -2.19457909e-14j\n -1.54508382e-15 +4.38303670e-16j -2.61386993e-16 +3.11315152e-15j\n -1.74306717e-15 +5.24686093e-16j -6.63747327e-15 -8.99926076e-15j\n -2.20012509e-16 -3.31625061e-16j -3.05362826e-15 +5.06207883e-15j\n -4.63313861e-15 -2.70440023e-15j -4.74149671e-15 -1.51307872e-14j\n -3.74057181e-16 -2.65290645e-15j 1.10216040e-15 -4.14984213e-15j\n 5.72279258e-15 -1.36507395e-15j -2.24033066e-16 -8.04419189e-15j\n 1.23619404e-15 +7.94535613e-16j 1.04638217e-15 +4.94033663e-15j\n -1.03363892e-16 +7.61621593e-16j -9.43582605e-15 -6.37980449e-14j\n 1.02038343e-15 +2.52787619e-16j 1.22519290e-15 +4.68861548e-15j\n 8.66029826e-16 -8.27222458e-16j 1.25421724e-15 -1.51053104e-14j\n 4.11839155e-16 +7.32144556e-16j 1.55562964e-15 +4.21040187e-15j\n 1.28705883e-15 +6.69063246e-16j -3.06889209e-15 -2.00638965e-14j\n 8.97593277e-16 +7.94631222e-18j 1.41464327e-15 +1.17573314e-14j\n 1.31685036e-15 +1.08878945e-15j -2.70668127e-15 -4.83273143e-14j\n 6.86986247e-16 +2.88038304e-15j 3.25950837e-15 +2.00521332e-14j\n -2.41638411e-16 +7.33856684e-15j 4.13435612e-13 +1.28000000e+02j]\n"
],
[
"x = np.array([1,1,1,1,1,1,1,1])\nprint x\nabs(np.fft.fft(x))",
"[1 1 1 1 1 1 1 1]\n"
],
[
"x = np.array([1,0,0,0,0,0,0,0])\nprint x\nprint abs(np.fft.fft(x))",
"[1 0 0 0 0 0 0 0]\n[ 1. 1. 1. 1. 1. 1. 1. 1.]\n"
],
[
"x = np.array([1,1,-2,-2,1,1,-1,-1])\nx = x/8.0\nprint x\nabs(np.fft.fft(x))",
"[ 0.125 0.125 -0.25 -0.25 0.125 0.125 -0.125 -0.125]\n"
],
[
"a = np.mgrid[:5, :5][0]\n\na",
"_____no_output_____"
],
[
"np.fft.fft2(a)",
"_____no_output_____"
],
[
"t = np.arange(256)",
"_____no_output_____"
],
[
"t.shape[0]",
"_____no_output_____"
],
[
"np.fft.fftfreq(t.shape[-1], 0.01)",
"_____no_output_____"
],
[
"x = np.array([1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0])\nx = x/16.0\nprint (x)\nprint(np.fft.fft(x))\nplt.plot(abs(np.fft.fft(x)))",
"[ 0.0625 0.0625 0.0625 0.0625 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. ]\n[ 0.25000000+0.j 0.18835436-0.12585436j 0.06250000-0.15088835j\n -0.01551893-0.07801893j 0.00000000+0.j 0.05213058+0.01036942j\n 0.06250000-0.02588835j 0.02503399-0.03746601j 0.00000000+0.j\n 0.02503399+0.03746601j 0.06250000+0.02588835j 0.05213058-0.01036942j\n 0.00000000+0.j -0.01551893+0.07801893j 0.06250000+0.15088835j\n 0.18835436+0.12585436j]\n"
],
[
"plt.plot(abs(np.fft.fft(x)))",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52b996016d76fa7fb9a9500fdab577751268447
| 48,937 |
ipynb
|
Jupyter Notebook
|
coverage-sdap/coverage-sdap.ipynb
|
ceos-coverage/coverage-jupyter-examples
|
00aef62347e18a6065e616e142f338f396419a2c
|
[
"Apache-2.0"
] | 1 |
2022-03-24T02:18:30.000Z
|
2022-03-24T02:18:30.000Z
|
coverage-sdap/coverage-sdap.ipynb
|
ceos-coverage/coverage-jupyter-examples
|
00aef62347e18a6065e616e142f338f396419a2c
|
[
"Apache-2.0"
] | null | null | null |
coverage-sdap/coverage-sdap.ipynb
|
ceos-coverage/coverage-jupyter-examples
|
00aef62347e18a6065e616e142f338f396419a2c
|
[
"Apache-2.0"
] | null | null | null | 40.985762 | 436 | 0.5635 |
[
[
[
"<table><tr>\n <td><img src=\"logos/JPL-NASA-logo_583x110.png\" alt=\"JPL/NASA logo\" style=\"height: 75px\"/></td>\n <td><img src=\"logos/CEOS-LOGO.png\" alt=\"CEOS logo\" style=\"height: 75px\"/></td>\n <td><img src=\"logos/CoverageLogoFullClear.png\" alt=\"COVERAGE logo\" style=\"height: 100px\"/></td>\n</tr></table>",
"_____no_output_____"
],
[
"# _Analytics Examples for COVERAGE_",
"_____no_output_____"
],
[
"# Important Notes \nWhen you first connected you should have seen two notebook folders, `coverage` and `work`. The original version of this notebook is in the `coverage` folder and is read-only. If you would like to modify the code samples in this notebook, please first click `File`->`Save as...` to save your own copy in the `work` folder instead, and make your modifications to that copy. \n\nWe don't yet have resources in place to support a true multi-user environment for notebooks. This means that all saved notebooks are visible to all users. Thus, it would help to include your own unique identifier in the notebook name to avoid conflicts with others. \n\nFurthermore, we do not guarantee to preserve the saved notebooks for any period of time. If you would like to keep your notebook, please remember to click `File`->`Download as`->`Notebook (.ipynb)` to download your own copy of the notebook at the end of each editting session.",
"_____no_output_____"
],
[
"# Notebook Setup\n\nIn the cell below are a few functions that help with plotting data using matplotlib. You shouldn't need to modify or pay much attention to this cell. Just run the cell to define the functions so that they can be used in the rest of the notebook.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n#######################################################################################\n# In some jupyter deployments you will get an error about PROJ_LIB not being defined.\n# In that case, uncomment these lines and set the directory to the location of your\n# proj folder.\n# import os\n# import sys\n# # Find where we are on the computer and make sure it is the pyICM directory\n# HomeDir = os.path.expanduser('~') # get the home directory\n\n# ICMDir = HomeDir + \"Desktop/AIST_Project/SDAP_Jupyter\"\n# # Navigate to the home directory\n# os.chdir(ICMDir)\n# print('Moved to Directory',ICMDir)\n\n# module_path = os.path.join(ICMDir,'code')\n# print('Code Directory is',module_path)\n# print('Adding to the system path')\n# if module_path not in sys.path:\n# sys.path.append(module_path)\n#######################################################################################\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib.colors as mcolors\nimport matplotlib.ticker as mticker\n# from mpl_toolkits.basemap import Basemap\nimport cartopy.crs as ccrs #added by B Clark because Basemap is deprecated\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\nimport numpy as np\nimport types\nimport math\nimport sys\nimport time\nimport requests\nfrom datetime import datetime\nimport itertools\nfrom shapely.geometry import box\nfrom pprint import pprint, pformat\nimport textwrap\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# ISO-8601 date format\ndt_format = \"%Y-%m-%dT%H:%M:%SZ\"\n\ndef show_plot(x_data, y_data, x_label=None, y_label=None, title=None):\n \"\"\"\n Display a simple line plot.\n \n :param x_data: Numpy array containing data for the X axis\n :param y_data: Numpy array containing data for the Y axis\n :param x_label: Label applied to X axis\n :param y_label: Label applied to Y axis\n \"\"\"\n plt.figure(figsize=(6,3), dpi=100)\n plt.plot([datetime.fromtimestamp(x_val) for x_val in x_data], y_data, 'b-', marker='|', markersize=2.0, mfc='b')\n plt.grid(b=True, which='major', color='k', linestyle='-')\n if title is not None:\n plt.title(title)\n if x_label is not None:\n plt.xlabel(x_label)\n if y_label is not None:\n plt.ylabel (y_label)\n plt.xticks(rotation=45)\n ts_range = x_data[-1] - x_data[0]\n\n # Define the time formatting\n if ts_range > 189216000: # 6 years\n dtFmt = mdates.DateFormatter('%Y')\n elif ts_range > 15552000: # 6 months\n dtFmt = mdates.DateFormatter('%b %Y')\n else: # < 6 months\n dtFmt = mdates.DateFormatter('%b %-d, %Y')\n\n plt.gca().xaxis.set_major_formatter(dtFmt)\n plt.show()\n\ndef plot_box(bbox):\n \"\"\"\n Display a Green bounding box on an image of the blue marble.\n \n :param bbox: Shapely Polygon that defines the bounding box to display\n \"\"\"\n min_lon, min_lat, max_lon, max_lat = bbox.bounds\n import matplotlib.pyplot as plt1\n import cartopy.crs as ccrs #added by B Clark because Basemap is deprecated\n # modified 11/30/2021 to use Cartopy toolbox B Clark NASA GSFC\n # from matplotlib.patches import Polygon\n # from mpl_toolkits.basemap import Basemap\n from shapely.geometry.polygon import Polygon\n # map = Basemap()\n # map.bluemarble(scale=0.5)\n # poly = Polygon([(min_lon,min_lat),(min_lon,max_lat),(max_lon,max_lat),(max_lon,min_lat)],\n # facecolor=(0,0,0,0.0),edgecolor='green',linewidth=2)\n # plt1.gca().add_patch(poly)\n # plt1.gcf().set_size_inches(10,15)\n ax = plt1.axes(projection=ccrs.PlateCarree())\n ax.stock_img()\n # plt.show() \n poly = Polygon(((min_lon,min_lat),(min_lon,max_lat),(max_lon,max_lat),(max_lon,min_lat),(min_lon,min_lat)))\n ax.add_geometries([poly],crs=ccrs.PlateCarree(),facecolor='b', edgecolor='red', alpha=0.8)\n # ax.fill(x, y, color='coral', alpha=0.4)\n # plt1.gca().add_patch(poly)\n # plt1.gcf().set_size_inches(10,15)\n plt1.show()\n\ndef show_plot_two_series(x_data_a, x_data_b, y_data_a, y_data_b, x_label, \n y_label_a, y_label_b, series_a_label, series_b_label,\n title=''):\n \"\"\"\n Display a line plot of two series\n \n :param x_data_a: Numpy array containing data for the Series A X axis\n :param x_data_b: Numpy array containing data for the Series B X axis\n :param y_data_a: Numpy array containing data for the Series A Y axis\n :param y_data_b: Numpy array containing data for the Series B Y axis\n :param x_label: Label applied to X axis\n :param y_label_a: Label applied to Y axis for Series A\n :param y_label_b: Label applied to Y axis for Series B\n :param series_a_label: Name of Series A\n :param series_b_label: Name of Series B\n \"\"\" \n font_size=12\n plt.rc('font', size=font_size) # controls default text sizes\n plt.rc('axes', titlesize=font_size) # fontsize of the axes title\n plt.rc('axes', labelsize=font_size) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=font_size) # fontsize of the tick labels\n plt.rc('ytick', labelsize=font_size) # fontsize of the tick labels\n plt.rc('legend', fontsize=font_size) # legend fontsize\n plt.rc('figure', titlesize=font_size) # fontsize of the figure title\n fig, ax1 = plt.subplots(figsize=(10,5), dpi=100)\n series_a, = ax1.plot(x_data_a, y_data_a, 'b-', marker='|', markersize=2.0, mfc='b', label=series_a_label)\n ax1.set_ylabel(y_label_a, color='b')\n ax1.tick_params('y', colors='b')\n ax1.set_ylim(min(0, *y_data_a), max(y_data_a)+.1*max(y_data_a))\n ax1.set_xlabel(x_label)\n \n ax2 = ax1.twinx()\n series_b, = ax2.plot(x_data_b, y_data_b, 'r-', marker='|', markersize=2.0, mfc='r', label=series_b_label)\n ax2.set_ylabel(y_label_b, color='r')\n ax2.set_ylim(min(0, *y_data_b), max(y_data_b)+.1*max(y_data_b))\n ax2.tick_params('y', colors='r')\n \n plt.grid(b=True, which='major', color='k', linestyle='-')\n plt.legend(handles=(series_a, series_b), bbox_to_anchor=(1.1, 1), loc=2, borderaxespad=0.)\n plt.title(title)\n plt.show()\n\ndef ts_plot_two(ts_json1, ts_json2, dataset1, dataset2, units1, units2,\n title='', t_name='time', val_name='mean'):\n t1 = np.array([ts[0][t_name] for ts in ts_json1[\"data\"]])\n t2 = np.array([ts[0][t_name] for ts in ts_json2[\"data\"]])\n vals1 = np.array([ts[0][val_name] for ts in ts_json1[\"data\"]])\n vals2 = np.array([ts[0][val_name] for ts in ts_json2[\"data\"]])\n show_plot_two_series(t1, t2, vals1, vals2, \"time (sec since 1970-01-01T00:00:00)\",\n units1, units2, dataset1, dataset2, title=title)\n\ndef scatter_plot(ts_json1, ts_json2, t_name=\"time\", val_name=\"mean\",\n title=\"\", xlabel=\"\", ylabel=\"\"):\n times1 = np.array([ts[0][t_name] for ts in ts_json1[\"data\"]])\n times2 = np.array([ts[0][t_name] for ts in ts_json2[\"data\"]])\n vals1 = np.array([ts[0][val_name] for ts in ts_json1[\"data\"]])\n vals2 = np.array([ts[0][val_name] for ts in ts_json2[\"data\"]])\n vals_x = []\n vals_y = []\n for i1,t1 in enumerate(times1):\n i = (np.abs(times2-times1[i1])).argmin()\n if np.abs(times1[i1]-times2[i]) < 86400: # 24 hrs\n vals_x.append(vals1[i1])\n vals_y.append(vals2[i])\n plt.scatter(vals_x, vals_y)\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.show()\n\ndef roundBorders(borders, borderSlop=10.):\n b0 = roundBorder(borders[0], 'down', borderSlop, 0.)\n b1 = roundBorder(borders[1], 'down', borderSlop, -90.)\n b2 = roundBorder(borders[2], 'up', borderSlop, 360.)\n b3 = roundBorder(borders[3], 'up', borderSlop, 90.)\n return [b0, b1, b2, b3]\n\ndef roundBorder(val, direction, step, end):\n if direction == 'up':\n rounder = math.ceil\n slop = step\n else:\n rounder = math.floor\n slop = -step\n### v = rounder(val/step) * step + slop \n v = rounder(val/step) * step\n if abs(v - end) < step+1.: v = end\n return v\n\ndef normalizeLon(lon):\n if lon < 0.: return lon + 360.\n if lon > 360.: return lon - 360.\n return lon\n\ndef normalizeLons(lons):\n return np.array([normalizeLon(lon) for lon in lons])\n\ndef ensureItems(d1, d2):\n for key in d2.keys():\n if key not in d1: d1[key] = d2[key]\n\nCmdOptions = {'MCommand': ['title', 'xlabel', 'ylabel', 'xlim', 'ylim', 'show\\\n'],\n \t 'plot': ['label', 'linewidth', 'legend', 'axis'],\n 'map.plot': ['label', 'linewidth', 'axis'],\n 'map.scatter': ['norm', 'alpha', 'linewidths', 'faceted', 'hold'\\\n],\n 'savefig': ['dpi', 'orientation']\n }\n\ndef die(*s): warn('Error,', *s); sys.exit()\n \ndef evalKeywordCmds(options, cmdOptions=CmdOptions):\n for option in options:\n if option in cmdOptions['MCommand']:\n args = options[option]\n if args:\n if args is True:\n args = ''\n else:\n args = \"'\" + args + \"'\"\n if option in cmdOptions:\n args += dict2kwargs( validCmdOptions(options, cmdOptions[option]) )\n try:\n eval('plt.' + option + '(%s)' % args)\n except:\n die('failed eval of keyword command option failed: %s=%s' % (option, args))\n\ndef validCmdOptions(options, cmd, possibleOptions=CmdOptions):\n return dict([(option, options[option]) for option in options.keys()\n if option in possibleOptions[cmd]])\n\ndef dict2kwargs(d):\n args = [',%s=%s' % (kw, d[kw]) for kw in d]\n return ', '.join(args)\n\ndef imageMap(lons, lats, vals, vmin=None, vmax=None, \n imageWidth=None, imageHeight=None, outFile=None,\n projection='cyl', cmap=plt.cm.jet, logColors=False, makeFigure=False,\n borders=[0., -90., 360., 90.], autoBorders=True, borderSlop=10.,\n meridians=[0, 360, 60], parallels=[-60, 90, 30], title='', normalizeLongs=True,\n **options):\n if normalizeLongs:\n lons = normalizeLons(lons)\n if vmin == 'auto': vmin = None\n if vmax == 'auto': vmax = None\n if imageWidth is not None: makeFigure = True\n if projection is None or projection == '': projection = 'cyl'\n if cmap is None or cmap == '': cmap = plt.cm.jet\n #if isinstance(cmap, types.StringType) and cmap != '':\n if isinstance(cmap, str) and cmap != '':\n try:\n cmap = eval('plt.cm.' + cmap)\n except:\n cmap = plt.cm.jet\n\n ensureItems(options, { \\\n 'title': title, 'dpi': 100,\n 'imageWidth': imageWidth or 1024, 'imageHeight': imageHeight or 768})\n if autoBorders:\n borders = [min(lons), min(lats), max(lons), max(lats)]\n borders = roundBorders(borders, borderSlop)\n\n #m = Basemap(borders[0], borders[1], borders[2], borders[3], \\\n # projection=projection, lon_0=np.average([lons[0], lons[-1]]))\n\n if makeFigure:\n dpi = float(options['dpi'])\n width = float(imageWidth) / dpi\n height = width\n #if imageHeight is None:\n # height = width * m.aspect\n #else:\n # height = float(imageHeight) / dpi\n #plt.figure(figsize=(width,height)).add_axes([0.1,0.1,0.8,0.8], frameon=True)\n plt.figure(figsize=(width,height))\n m = plt.axes(projection=ccrs.PlateCarree())\n #m.set_extent([meridians[0], meridians[1], parallels[0], parallels[1]],\n # crs=ccrs.PlateCarree())\n gl = m.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,\n linewidth=2, color='gray', alpha=0.5, linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_left = True\n gl.ylabels_right = False\n gl.xlines = True\n gl.ylines = True\n gl.xlocator = mticker.FixedLocator(np.arange(meridians[0], meridians[1]+meridians[2], meridians[2]))\n gl.ylocator = mticker.FixedLocator(np.arange(parallels[0], parallels[1]+parallels[2], parallels[2]))\n gl.xformatter = LONGITUDE_FORMATTER\n gl.yformatter = LATITUDE_FORMATTER\n gl.xlabel_style = {'size': 12, 'color': 'black'}\n gl.ylabel_style = {'size': 12, 'color': 'black'}\n \n if vmin is not None or vmax is not None: \n if vmin is None:\n vmin = np.min(vals)\n else:\n vmin = float(vmin)\n if vmax is None:\n vmax = np.max(vals)\n else:\n vmax = float(vmax)\n #vrange = (vmax - vmin) / 255.\n #levels = np.arange(vmin, vmax, vrange/30.)\n levels = np.linspace(vmin, vmax, 256)\n else:\n levels = 30\n\n if logColors:\n norm = mcolors.LogNorm(vmin=vmin, vmax=vmax)\n else:\n norm = None\n # x, y = m(*np.meshgrid(lons,lats))\n x, y = np.meshgrid(lons,lats)\n c = m.contourf(x, y, vals, levels, cmap=cmap, colors=None, norm=norm)\n # m.drawcoastlines()\n m.coastlines()\n #m.drawmeridians(range(meridians[0], meridians[1], meridians[2]), labels=[0,0,0,1])\n #m.drawparallels(range(parallels[0], parallels[1], parallels[2]), labels=[1,1,1,1])\n plt.colorbar(c, ticks=np.linspace(vmin,vmax,7), shrink=0.6)\n evalKeywordCmds(options)\n if outFile:\n plt.savefig(outFile, **validCmdOptions(options, 'savefig'))\n \ndef arr2d_from_json(js, var_name):\n return np.array([[js[i][j][var_name] for j in range(len(js[0]))] for i in range(len(js))])\n\ndef arr1d_from_json(js, var_name):\n return np.array([js[i][var_name] for i in range(len(js))])\n\ndef plot_map(map, val_key=\"mean\", cnt_key=\"cnt\", lon_key=\"lon\", lat_key=\"lat\", fill=-9999, grid_line_sep=10,\n border_slop=1, log_colors=False, title='', vmin=None, vmax=None, \n normalize_lons=False, image_width=1000, **options):\n # Parse values, longitudes and latitudes from JSON response.\n vals = arr2d_from_json(map, val_key)\n cnts = arr2d_from_json(map, cnt_key)\n lons = arr1d_from_json(map[0], lon_key)\n lats = arr1d_from_json([map[i][0] for i in range(len(map))], lat_key)\n \n # If cnt is 0, set value to fill\n vals[cnts==0] = fill\n \n # Plot time time-averaged map as an image.\n print(\"Creating plot of the results.\")\n print(\"This will take a minute. Please wait...\")\n min_val = np.min(vals[vals != fill])\n if vmin is None:\n vmin = min_val\n max_val = np.max(vals[vals != fill])\n if vmax is None:\n vmax = max_val\n min_lon = math.floor(np.min(lons)) - grid_line_sep\n max_lon = math.ceil(np.max(lons))\n min_lat = math.floor(np.min(lats)) - grid_line_sep\n max_lat = math.ceil(np.max(lats))\n imageMap(lons, lats, vals, imageWidth=image_width, vmin=vmin, vmax=vmax, logColors=log_colors,\n meridians=[min_lon, max_lon, grid_line_sep], \n parallels=[min_lat, max_lat, grid_line_sep], borderSlop=border_slop, \n title=title, normalizeLongs=normalize_lons, **options)\n \ndef ts_plot(ts_json, t_name='time', val_name='mean', \n title='', units=''):\n t = np.array([ts[0][t_name] for ts in ts_json[\"data\"]])\n vals = np.array([ts[0][val_name] for ts in ts_json[\"data\"]])\n show_plot(t, vals, title=textwrap.fill(title,64),\n y_label=textwrap.fill(units,32))\n\ndef plot_hovmoller(hm, time_key=\"time\", val_key=\"mean\",\n coord_series_key=\"lats\", coord_point_key=\"latitude\",\n coord_axis_vert=True, fill=-9999.,\n hovfig=None, subplot=111, add_color_bar=True,\n title=\"\"):\n times = [d[time_key] for d in hm]\n times = mdates.epoch2num(times)\n coords = [[d[coord_point_key] for d in hvals[coord_series_key]]\n for hvals in hm]\n coords_flat = np.array(sorted(list(set(itertools.chain(*coords)))))\n coords_delta = np.median(coords_flat[1:] - coords_flat[:-1])\n coords_min = np.amin(coords_flat)\n coords_max = np.amax(coords_flat)\n vals_fill = np.full((len(hm),len(coords_flat)), fill, dtype=np.float64)\n t_ind = 0\n for hvals in hm:\n cur_vals = np.array([d[val_key] for d in hvals[coord_series_key]])\n coords = np.array([d[coord_point_key] for d in hvals[coord_series_key]])\n coords_inds = np.round((coords - coords_min) /\n coords_delta).astype(int)\n vals_fill[t_ind, coords_inds] = cur_vals\n t_ind += 1\n vals = np.ma.array(data=vals_fill, mask=vals_fill == fill)\n extent = [np.min(times), np.max(times), coords_min, coords_max]\n dtFmt = mdates.DateFormatter('%b %Y') # define the formatting\n if hovfig is None:\n fig = plt.figure(figsize=(16,6))\n else:\n fig = hovfig\n ax = fig.add_subplot(subplot)\n ax.set_title(title)\n if coord_axis_vert:\n vals = np.transpose(vals)\n ax.xaxis.set_major_formatter(dtFmt)\n ax.set_ylabel(coord_point_key)\n plt.xticks(rotation=45)\n else:\n extent = [extent[2], extent[3], extent[0], extent[1]]\n ax.yaxis.set_major_formatter(dtFmt)\n ax.set_xlabel(coord_point_key)\n cax = ax.imshow(vals, origin='lower', extent=extent)\n ax.set_aspect('auto')\n if add_color_bar:\n fig.colorbar(cax, ticks=np.linspace(np.min(vals), np.max(vals), 7),\n orientation='vertical')\n return fig\n\ndef compute_ts_and_tam_no_plot(dataset, bbox, start_time, end_time, base_url=\"localhost\",\n seasonal_filter=\"false\"):\n url_params = 'ds={}&minLon={}&minLat={}&maxLon={}&maxLat={}&startTime={}&endTime={}'.\\\n format(dataset, *bbox.bounds, \n start_time.strftime(dt_format), end_time.strftime(dt_format))\n ts_url = '{}/timeSeriesSpark?{}&seasonalFilter={}'.format(base_url, url_params,\n seasonal_filter)\n tam_url = '{}/timeAvgMapSpark?{}'.format(base_url, url_params)\n\n # Display some information about the job\n print(ts_url); print()\n print(tam_url); print()\n\n # Query SDAP to compute the time series\n print(\"Computing time series...\")\n start = time.perf_counter()\n ts_json = requests.get(ts_url, verify=False).json()\n print(\"Area-averaged time series took {} seconds\".format(time.perf_counter() - start))\n print()\n \n # Query SDAP to compute the time averaged map\n print(\"Computing time averaged map...\")\n start = time.perf_counter()\n tam_json = requests.get(tam_url, verify=False).json()\n print(\"Time averaged map took {} seconds\".format(time.perf_counter() - start))\n return ts_json, tam_json\n\ndef compute_ts_and_tam(dataset, bbox, start_time, end_time, base_url=\"localhost\",\n seasonal_filter=\"false\", title='', grid_line_sep=5,\n units=None, log_colors=False, normalize_lons=False, **options):\n ts_json, tam_json = compute_ts_and_tam_no_plot(dataset, bbox, start_time, end_time,\n base_url=base_url,\n seasonal_filter=seasonal_filter)\n print()\n print(\"Plot of area-average time series:\")\n ts_plot(ts_json, val_name='mean', title=title, units=units)\n if seasonal_filter == \"true\":\n print(\"Plot of time series of difference with climatology:\")\n ts_plot(ts_json, val_name='meanSeasonal',\n title=title, units=units)\n print()\n \n # Query SDAP to compute the time averaged map\n tam = tam_json[\"data\"]\n plot_map(tam, log_colors=log_colors, grid_line_sep=grid_line_sep, title=title, \n normalize_lons=normalize_lons, **options)\n\ndef show_sdap_json(j, nh=20, nt=10):\n out_str = pformat(j)\n for line in out_str.splitlines()[:nh]:\n print(line)\n print(\"\\t\\t.\\n\"*3)\n for line in out_str.splitlines()[-nt:]:\n print(line)\n\nprint('Done with plotting setup.')",
"_____no_output_____"
]
],
[
[
"# Science Data Analytics Platform (SDAP)\nSDAP (https://sdap.apache.org/) provides advanced analytics capabilities to support NASA's New Observing Strategies (NOS) and Analytic Collaborative Frameworks (ACF) thrusts. In this demonstration we use SDAP with oceanographic datasets relevant to the CEOS Ocean Variables Enabling Research and Applications for GEO (COVERAGE) initiative.\n\nIn this demonstration, two geographically distributed SDAP cloud computing deployments are used, one on Amazon Web Services (AWS, https://aws.amazon.com/) for analytics with datasets curated in the USA (e.g., from NASA or NOAA), and one on WEkEO (https://www.wekeo.eu/) for analytics with European datasets (e.g., from CMEMS). In this way we follow the strategy of performing the computations close to the data host providers.\n\nSDAP provides web service endpoints for each analytic algorithm, and can be accessed in a web browser or from a variety of programming languages. This notebook demonstrates the Python API to access SDAP.",
"_____no_output_____"
],
[
"## Demonstration Setup\n\nIn the cell below, we specify the location of the SDAP deployments to use, a dataset to be used, the \nbounding box for an area of interest, and a time range for analysis.",
"_____no_output_____"
]
],
[
[
"# Base URLs for the USA (AWS) and European (WEkEO) SDAP deployments.\nbase_url_us = \"https://coverage.ceos.org/nexus\"\nbase_url_eu = \"https://coverage.wekeo.eu\"\n\n# Define bounding box and time period for analysis\nmin_lon = -77; max_lon = -70\nmin_lat = 35; max_lat = 42\nbbox = box(min_lon, min_lat, max_lon, max_lat)\n\n# Specify the SDAP name of the datasets\ndataset_us = \"MUR25-JPL-L4-GLOB-v4.2_analysed_sst\"\ndataset_eu = \"METOFFICE-GLO-SST-L4-NRT-OBS-GMPE-V3_analysed_sst\"\nstart_time = datetime(2018, 1, 1)\nend_time = datetime(2018, 12, 31)\n\nprint(\"dataset_us: {}\".format(dataset_us))\nprint(\"dataset_eu: {}\".format(dataset_eu))\nprint(\"spatial region {}, and time range {} to {}.\".\n format(bbox, start_time, end_time))\nplot_box(bbox)",
"_____no_output_____"
]
],
[
[
"# Cloud Analytics\n## Data Inventory\nWe begin by querying the SDAP `/list` endpoint at each of our SDAP deployments to examine what data are available in each instantiation of SDAP.",
"_____no_output_____"
]
],
[
[
"def get_sdap_inv(base_url):\n url = '{}/list'.format(base_url)\n print(\"Web Service Endpoint:\"); print(url);\n res = requests.get(url, verify=False).json()\n pprint(res)\n\nprint(\"Response from AWS SDAP:\")\nget_sdap_inv(base_url_us)\nprint()\n\nprint(\"Response from WEkEO SDAP:\")\nget_sdap_inv(base_url_eu)",
"_____no_output_____"
]
],
[
[
"## Area-Averaged Time Series\nNext we will make a simple web service call to the SDAP `/timeSeriesSpark` endpoint. This can also be done in a web browser or in a variety of programming languages.",
"_____no_output_____"
]
],
[
[
"# Compute time series using the SDAP/NEXUS web/HTTP interface\n#\n# Construct the URL\nurl = '{}/timeSeriesSpark?ds={}&minLon={}&minLat={}&maxLon={}&maxLat={}&startTime={}&endTime={}&seasonalFilter={}'.\\\n format(base_url_us, dataset_us, *bbox.bounds, \n start_time.strftime(dt_format), end_time.strftime(dt_format),\n \"false\")\n\n# Display some information about the job\nprint(url); print()\n\n# Query SDAP to compute the time averaged map\nprint(\"Waiting for response from SDAP...\")\nstart = time.perf_counter()\nts_json = requests.get(url, verify=False).json()\nprint(\"Time series took {} seconds\".format(time.perf_counter() - start))",
"_____no_output_____"
]
],
[
[
"### JSON response\nThe SDAP web service calls return the result in `JSON`, a standard web services data\ninterchange format. This makes it easy for another web service component to \"consume\" the SDAP output.\nLet's view the JSON response. It is long, so we'll show just the first few time values.",
"_____no_output_____"
]
],
[
[
"show_sdap_json(ts_json, nh=33, nt=10)",
"_____no_output_____"
]
],
[
[
"### Plot the result\nLet's check our time series result with a plot. An SDAP dataset can also be associated with its climatology (long-term average for a given time period like monthly or daily). If this is the case, we can apply a \"seasonal filter\" to compute the spatial average of the difference between the dataset and its climatology as a time series.",
"_____no_output_____"
]
],
[
[
"# Plot the result\nprint(\"Plot of area-average time series:\")\nts_plot(ts_json, val_name='mean', title=dataset_us, units='Degrees Celsius')",
"_____no_output_____"
]
],
[
[
"## Time Averaged Map\nNext we will issue an SDAP web service call to compute a time averaged map. While the time series algorithm used above averages spatially to produce a single value for each time stamp, the time average map averages over time to produce a single value at each grid cell location. While the time series produces a 1D result indexed by time, the time averaged map produces a 2D map indexed by latitude and longitude.",
"_____no_output_____"
]
],
[
[
"# Compute time-averaged map using the SDAP/NEXUS web/HTTP interface\n#\n# Construct the URL\nurl = '{}/timeAvgMapSpark?ds={}&minLon={}&minLat={}&maxLon={}&maxLat={}&startTime={}&endTime={}'.\\\n format(base_url_us, dataset_us, *bbox.bounds, \n start_time.strftime(dt_format), end_time.strftime(dt_format))\n\n# Display some information about the job\nprint(url); print()\n\n# Query SDAP to compute the time averaged map\nprint(\"Waiting for response from SDAP...\")\nstart = time.perf_counter()\ntam_json = requests.get(url, verify=False).json()\nprint(\"Time averaged map took {} seconds\".format(time.perf_counter() - start))",
"_____no_output_____"
]
],
[
[
"### JSON response\nThe SDAP web service calls return the result in `JSON`, a standard web services data\ninterchange format. This makes it easy for another web service component to \"consume\" the SDAP output.\nLet's view the JSON response. It is long, so we'll show just the first few grid cells.",
"_____no_output_____"
]
],
[
[
"show_sdap_json(tam_json, nh=13, nt=10)",
"_____no_output_____"
]
],
[
[
"### Extract the actual data and plot the result\nThe actual time averaged map data is readily accessible for plotting.",
"_____no_output_____"
]
],
[
[
"# Extract the actual output data\ntam = tam_json[\"data\"]\n\n# Create a plot of the Time Averaged Map results\nplot_map(tam, title=dataset_us+\" (deg C)\", grid_line_sep=2)",
"_____no_output_____"
]
],
[
[
"## Hovmoller Maps\nNext we will issue an SDAP web service call to compute latitude-time and longitude-time Hovmoller maps and plot the results.",
"_____no_output_____"
]
],
[
[
"# Construct the URLs\nurl_lat = '{}/latitudeTimeHofMoellerSpark?ds={}&minLon={}&minLat={}&maxLon={}&maxLat={}&startTime={}&endTime={}'.\\\n format(base_url_us, dataset_us, *bbox.bounds, \n start_time.strftime(dt_format), end_time.strftime(dt_format))\nurl_lon = '{}/longitudeTimeHofMoellerSpark?ds={}&minLon={}&minLat={}&maxLon={}&maxLat={}&startTime={}&endTime={}'.\\\n format(base_url_us, dataset_us, *bbox.bounds, \n start_time.strftime(dt_format), end_time.strftime(dt_format))\n\n# Query SDAP to compute the latitude-time Hovmoller map\nprint(url_lat); print()\nprint(\"Waiting for response from SDAP...\")\nstart = time.perf_counter()\nhm_lat_json = requests.get(url_lat, verify=False).json()\nprint(\"Latitude-time Hovmoller map took {} seconds\".format(time.perf_counter() - start)); print()\n\n# Query SDAP to compute the longitude-time Hovmoller map\nprint(url_lon); print()\nprint(\"Waiting for response from SDAP...\")\nstart = time.perf_counter()\nhm_lon_json = requests.get(url_lon, verify=False).json()\nprint(\"Longitude-time Hovmoller map took {} seconds\".format(time.perf_counter() - start))\n",
"_____no_output_____"
]
],
[
[
"### JSON response\nLet's view the JSON response. It is long, so we'll show just the first few grid cells.",
"_____no_output_____"
]
],
[
[
"# Show snippet of JSON response for latitude-time Hovmoller\nshow_sdap_json(hm_lat_json, nh=19, nt=10)",
"_____no_output_____"
],
[
"# Show snippet of JSON response for longitude-time Hovmoller\nshow_sdap_json(hm_lon_json, nh=19, nt=10)",
"_____no_output_____"
]
],
[
[
"### Extract the actual data and plot the results\nThe actual map data is readily accessible for plotting.",
"_____no_output_____"
]
],
[
[
"# Extract the actual output data\nhm_lat = hm_lat_json[\"data\"]\nhm_lon = hm_lon_json[\"data\"]\n\n# Plot the Hovmoller maps\nhovfig = plot_hovmoller(hm_lat, coord_series_key=\"lats\", coord_point_key=\"latitude\", \n coord_axis_vert=True, subplot=121, \n title=\"Sea Surface Temperature (deg C)\")\nhovfig = plot_hovmoller(hm_lon, coord_series_key=\"lons\", coord_point_key=\"longitude\",\n coord_axis_vert=False, hovfig=hovfig, subplot=122,\n title=\"Sea Surface Temperature (deg C)\")",
"_____no_output_____"
]
],
[
[
"## Joint Analytics Across AWS and WEkEO SDAP Deployments\nNext we can take advantage of the two SDAP deployments and conduct joint analytics across the two platforms.\n### Compare two SST datasets, one from AWS SDAP and one from WEkEO SDAP",
"_____no_output_____"
]
],
[
[
"# Previous time series result was computed on AWS with\n# dataset \"MUR25-JPL-L4-GLOB-v4.2_analysed_sst\"\nts_mur25_json = ts_json\n\n# Let's compute a 2nd SST time series, this time computed on WEkEO with\n# dataset \"METOFFICE-GLO-SST-L4-NRT-OBS-GMPE-V3_analysed_sst\"\n#\ndataset_eu_gmpe_sst = \"METOFFICE-GLO-SST-L4-NRT-OBS-GMPE-V3_analysed_sst\"\nurl = '{}/timeSeriesSpark?ds={}&minLon={}&minLat={}&maxLon={}&maxLat={}&startTime={}&endTime={}&seasonalFilter={}'.\\\n format(base_url_eu, dataset_eu_gmpe_sst, *bbox.bounds, \n start_time.strftime(dt_format), end_time.strftime(dt_format),\n \"false\")\n\n# Display some information about the job\nprint(url); print()\n\n# Query SDAP to compute the time averaged map\nprint(\"Waiting for response from SDAP...\")\nstart = time.perf_counter()\nts_gmpe_json = requests.get(url, verify=False).json()\nprint(\"Time series took {} seconds\".format(time.perf_counter() - start))",
"_____no_output_____"
],
[
"# Plot the result\nts_plot_two(ts_mur25_json, ts_gmpe_json, dataset_us, dataset_eu_gmpe_sst, \n \"Degrees Celsius\", \"Degrees Celsius\",\n title=\"SST Comparison\", val_name=\"mean\")\nscatter_plot(ts_mur25_json, ts_gmpe_json, title=\"SST Comparison\",\n xlabel=dataset_us, ylabel=dataset_eu_gmpe_sst)",
"_____no_output_____"
]
],
[
[
"### Compare SST from AWS SDAP and ADT from WEkEO SDAP",
"_____no_output_____"
]
],
[
[
"dataset_eu_cmems_adt = \"CMEMS_AVISO_SEALEVEL_GLO_PHY_L4_REP_OBSERVATIONS_008_047_adt\"\nurl = '{}/timeSeriesSpark?ds={}&minLon={}&minLat={}&maxLon={}&maxLat={}&startTime={}&endTime={}&seasonalFilter={}'.\\\n format(base_url_eu, dataset_eu_cmems_adt, *bbox.bounds, \n start_time.strftime(dt_format), end_time.strftime(dt_format),\n \"false\")\n\n# Display some information about the job\nprint(url); print()\n\n# Query SDAP to compute the time averaged map\nprint(\"Waiting for response from SDAP...\")\nstart = time.perf_counter()\nts_cmems_adt_json = requests.get(url, verify=False).json()\nprint(\"Time series took {} seconds\".format(time.perf_counter() - start))",
"_____no_output_____"
],
[
"# Plot the result\nts_plot_two(ts_mur25_json, ts_cmems_adt_json, dataset_us, dataset_eu_cmems_adt, \n \"Degrees Celsius\", \"Meters Above Geoid\",\n title=\"SST vs ADT\", val_name=\"mean\")",
"_____no_output_____"
]
],
[
[
"# More SDAP Analytics\nIn the rest of this notebook we use a helper function defined in the first notebook cell above to use SDAP to compute time series and time averaged map for a variety of other relevant datasets. In these results, SDAP is used in the same way as we demonstrated above.",
"_____no_output_____"
],
[
"## Absolute Dynamic Topography (ADT) from CMEMS_AVISO_SEALEVEL_GLO_PHY_L4_REP_OBSERVATIONS_008_047",
"_____no_output_____"
]
],
[
[
"dataset = \"CMEMS_AVISO_SEALEVEL_GLO_PHY_L4_REP_OBSERVATIONS_008_047_adt\"\ncompute_ts_and_tam(dataset,\n bbox, start_time, end_time, base_url=base_url_eu, \n units=\"meters\", title=dataset, grid_line_sep=2)",
"_____no_output_____"
]
],
[
[
"## Sea Level Anomaly (SLA) from CMEMS_AVISO_SEALEVEL_GLO_PHY_L4_REP_OBSERVATIONS_008_047",
"_____no_output_____"
]
],
[
[
"dataset = \"CMEMS_AVISO_SEALEVEL_GLO_PHY_L4_REP_OBSERVATIONS_008_047_sla\"\ncompute_ts_and_tam(dataset, \n bbox, start_time, end_time, base_url=base_url_eu, \n units=\"meters\", title=dataset, grid_line_sep=2)",
"_____no_output_____"
]
],
[
[
"## Sea Surface Salinity (SSS) from Multi-Mission Optimally Interpolated Sea Surface Salinity 7-Day Global Dataset V1",
"_____no_output_____"
]
],
[
[
"start_time_oisss7d = datetime(2011, 8, 28)\nend_time_oisss7d = datetime(2021, 9, 8)\ndataset = \"OISSS_L4_multimission_global_7d_v1.0_sss\"\ncompute_ts_and_tam(dataset, \n bbox, start_time_oisss7d, end_time_oisss7d, base_url=base_url_us, \n units=\"1e-3\", title=dataset, grid_line_sep=2)",
"_____no_output_____"
]
],
[
[
"## Sea Surface Salinity (SSS) Uncertainty from Multi-Mission Optimally Interpolated Sea Surface Salinity 7-Day Global Dataset V1",
"_____no_output_____"
]
],
[
[
"dataset = \"OISSS_L4_multimission_global_7d_v1.0_sss_uncertainty\"\nstart_time_oisss7d = datetime(2015, 7, 1)\nend_time_oisss7d = datetime(2021, 9, 8)\ncompute_ts_and_tam(dataset, \n bbox, start_time_oisss7d, end_time_oisss7d, base_url=base_url_us, \n units=\"1e-3\", title=dataset, grid_line_sep=2)",
"_____no_output_____"
]
],
[
[
"## Sea Surface Salinity (SSS) from Multi-Mission Optimally Interpolated Sea Surface Salinity Monthly Global Dataset V1",
"_____no_output_____"
]
],
[
[
"start_time_oisssmo = datetime(2011, 9, 16)\nend_time_oisssmo = datetime(2021, 8, 16)\ndataset = \"OISSS_L4_multimission_global_monthly_v1.0_sss\"\ncompute_ts_and_tam(dataset, \n bbox, start_time_oisssmo, end_time_oisssmo, base_url=base_url_us, \n units=\"1e-3\", title=dataset, grid_line_sep=2)",
"_____no_output_____"
]
],
[
[
"## Sea Surface Salinity (SSS) Anomaly from Multi-Mission Optimally Interpolated Sea Surface Salinity Monthly Global Dataset V1",
"_____no_output_____"
]
],
[
[
"dataset = \"OISSS_L4_multimission_global_monthly_v1.0_sss_anomaly\"\ncompute_ts_and_tam(dataset, \n bbox, start_time_oisssmo, end_time_oisssmo, base_url=base_url_us, \n units=\"1e-3\", title=dataset, grid_line_sep=2)",
"_____no_output_____"
]
],
[
[
"## Sea Surface Temperature (SST) from MUR25-JPL-L4-GLOB-v4.2",
"_____no_output_____"
]
],
[
[
"dataset = \"MUR25-JPL-L4-GLOB-v4.2_analysed_sst\"\ncompute_ts_and_tam(dataset, \n bbox, start_time, end_time, base_url=base_url_us, \n units=\"degrees celsius\", title=dataset, grid_line_sep=2)",
"_____no_output_____"
]
],
[
[
"## Chlorophyll-A from MODIS_Aqua_L3m_8D",
"_____no_output_____"
]
],
[
[
"# Define dataset and bounding box for analysis\ndataset = \"MODIS_Aqua_L3m_8D_chlor_a\"\ncompute_ts_and_tam(dataset, \n bbox, start_time, end_time, base_url=base_url_us,\n seasonal_filter=\"true\",\n units=\"milligram m-3\", title=dataset,\n log_colors=True, grid_line_sep=2)",
"_____no_output_____"
]
],
[
[
"## Chlorophyll-A from JPL-MRVA25-CHL-L4-GLOB-v3.0_CHLA",
"_____no_output_____"
]
],
[
[
"dataset = \"JPL-MRVA25-CHL-L4-GLOB-v3.0_CHLA_analysis\"\ncompute_ts_and_tam(dataset, \n bbox, start_time, end_time, base_url=base_url_us, \n units=\"milligram m-3\", title=dataset, log_colors=True,\n grid_line_sep=2)",
"_____no_output_____"
]
],
[
[
"## Chlorophyll-A from CMEMS_OCEANCOLOUR_GLO_CHL_L4_REP_OBSERVATIONS_009_082",
"_____no_output_____"
]
],
[
[
"dataset = \"CMEMS_OCEANCOLOUR_GLO_CHL_L4_REP_OBSERVATIONS_009_082_CHL\"\ncompute_ts_and_tam(dataset, \n bbox, start_time, end_time, base_url=base_url_eu, \n units=\"milligram m-3\", title=dataset, log_colors=True,\n grid_line_sep=2)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
c52ba93eeec0bf6528463f4963c4afa6afbdf1a2
| 843,831 |
ipynb
|
Jupyter Notebook
|
overfitting plots-checkpoint.ipynb
|
mottiden/scipy_2015_sklearn_tutorial-master
|
da16ecb941cf459699964b271ccdeca34fcd2b53
|
[
"CC0-1.0"
] | 659 |
2015-03-05T14:01:52.000Z
|
2022-03-14T08:18:39.000Z
|
overfitting plots-checkpoint.ipynb
|
anhlbt/scipy_2015_sklearn_tutorial
|
be0a35724b9f0f0b92ea6c325ad5137176585b1c
|
[
"CC0-1.0"
] | 42 |
2015-03-05T19:44:28.000Z
|
2017-07-18T19:53:37.000Z
|
overfitting plots-checkpoint.ipynb
|
anhlbt/scipy_2015_sklearn_tutorial
|
be0a35724b9f0f0b92ea6c325ad5137176585b1c
|
[
"CC0-1.0"
] | 398 |
2015-04-13T17:32:59.000Z
|
2022-03-10T19:39:12.000Z
| 880.825678 | 186,725 | 0.936946 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
c52bd4d1a5fce0d7eae37fd4c6421a462c60a0ea
| 15,910 |
ipynb
|
Jupyter Notebook
|
GuideToPandasVolume5.ipynb
|
jaikumardas577/projects
|
6b8c0517c10107872d814c3e9fdd1b82d89240da
|
[
"MIT"
] | null | null | null |
GuideToPandasVolume5.ipynb
|
jaikumardas577/projects
|
6b8c0517c10107872d814c3e9fdd1b82d89240da
|
[
"MIT"
] | null | null | null |
GuideToPandasVolume5.ipynb
|
jaikumardas577/projects
|
6b8c0517c10107872d814c3e9fdd1b82d89240da
|
[
"MIT"
] | null | null | null | 39.775 | 729 | 0.596103 |
[
[
[
"# Introduction\n\nYou have learned how to select relevant data from `DataFrame` and `Series` objects. Plucking the right data out of our data representation is critical to getting work done.\n\nHowever, the data does not always come in the format we want. Sometimes we have to do some more work ourselves to reformat it for our desired task.\n\nThe remainder of this tutorial will cover different operations we can apply to our data to get the input \"just right\". We'll start off in this section by looking at the most commonly looked built-in reshaping operations. Along the way we'll cover data `dtypes`, a concept essential to working with `pandas` effectively.",
"_____no_output_____"
],
[
"# Relevant Resources\n* **[Summary functions and maps](https://www.kaggle.com/residentmario/summary-functions-and-maps-reference)**\n* [Official pandas cheat sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf)\n\n# Set Up\nRun the code cell below to load your data and the necessary utility functions.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\npd.set_option('max_rows', 5)\nimport numpy as np\nfrom learntools.advanced_pandas.summary_functions_maps import *\n\nreviews = pd.read_csv(\"../input/wine-reviews/winemag-data-130k-v2.csv\", index_col=0)",
"_____no_output_____"
]
],
[
[
"Look at an overview of your data by running the line below:",
"_____no_output_____"
],
[
"# Checking Answers\n\n**Check your answers in each exercise using the `check_qN` function** (replacing `N` with the number of the exercise). For example here's how you would check an incorrect answer to exercise 1:",
"_____no_output_____"
]
],
[
[
"check_q1(pd.DataFrame())",
"_____no_output_____"
]
],
[
[
"If you get stuck, **use the `answer_qN` function to see the code with the correct answer.**\n\nFor the first set of questions, running the `check_qN` on the correct answer returns `True`.\n\nFor the second set of questions, using this function to check a correct answer will present an informative graph!\n",
"_____no_output_____"
],
[
"## Exercises\n\nLook at your data by running the cell below:",
"_____no_output_____"
]
],
[
[
"reviews.head()",
"_____no_output_____"
]
],
[
[
"**Exercise 1**: What is the median of the `points` column?",
"_____no_output_____"
]
],
[
[
"reviews.describe()",
"_____no_output_____"
],
[
"# Your code here\nmedian = reviews.points.median()\nmedian\n# check_q1(pd.DataFrame())",
"_____no_output_____"
]
],
[
[
"**Exercise 2**: What countries are represented in the dataset?",
"_____no_output_____"
]
],
[
[
"# Your code here\nreviews.country.unique()",
"_____no_output_____"
]
],
[
[
"**Exercise 3**: What countries appear in the dataset most often?",
"_____no_output_____"
]
],
[
[
"# Your code here\nreviews.country.mode()\ncheck_q3(pd.DataFrame())",
"_____no_output_____"
]
],
[
[
"**Exercise 4**: Remap the `price` column by subtracting the median price. Use the `Series.map` method.",
"_____no_output_____"
]
],
[
[
"# Your code here\n\n# reviews.price.map()\nj = map(lambda x :x - median,reviews.price)",
"_____no_output_____"
],
[
"j = list(j)",
"_____no_output_____"
]
],
[
[
"**Exercise 5**: I\"m an economical wine buyer. Which wine in is the \"best bargain\", e.g., which wine has the highest points-to-price ratio in the dataset?\n\nHint: use a map and the [`argmax` function](http://pandas.pydata.org/pandas-docs/version/0.19.2/generated/pandas.Series.argmax.html).",
"_____no_output_____"
]
],
[
[
"reviews.index.values",
"_____no_output_____"
],
[
"# Your code here\n\nj = reviews.apply(lambda x:x.points - x.price,axis = 'columns')\nj.head()",
"_____no_output_____"
]
],
[
[
" Now it's time for some visual exercises. In the questions that follow, generate the data that we will need to have in order to produce the plots that follow. These exercises will use skills from this workbook as well as from previous ones. They look a lot like questions you will actually be asking when working with your own data!",
"_____no_output_____"
],
[
"<!--\n**Exercise 6**: Sometimes the `province` and `region_1` provided in the dataset is the same value. Create a `Series` whose values counts how many times this occurs (`True`) and doesn't occur (`False`).\n-->",
"_____no_output_____"
],
[
"**Exercise 6**: Is a wine more likely to be \"tropical\" or \"fruity\"? Create a `Series` counting how many times each of these two words appears in the `description` column in the dataset.\n\nHint: use a map to check each description for the string `tropical`, then count up the number of times this is `True`. Repeat this for `fruity`. Create a `Series` combining the two values at the end.",
"_____no_output_____"
]
],
[
[
"reviews[\"description\"][2]",
"_____no_output_____"
],
[
"# Your code here\ndef func(ser):\n count = {'tropical': 0,'fruity': 0}\n if 'tropical' in ser:\n count[\"tropical\"] += 1\n elif 'fruity' in ser:\n count[\"fruity\"] += 1\n# if(count['tropical'] > count['fruity']):\n# return count['tropical']\n# elif(count['tropical'] < count['fruity']):\n# return count['fruity']\n# else:\n return (count['tropical'],count['fruity'])\nreviews[\"type\"] = list(map(func,reviews.description))\nreviews[\"type\"] \n",
"_____no_output_____"
],
[
"reviews",
"_____no_output_____"
]
],
[
[
"**Exercise 7**: What combination of countries and varieties are most common?\n\nCreate a `Series` whose index consists of strings of the form `\"<Country> - <Wine Variety>\"`. For example, a pinot noir produced in the US should map to `\"US - Pinot Noir\"`. The values should be counts of how many times the given wine appears in the dataset. Drop any reviews with incomplete `country` or `variety` data.\n\nHint: you can do this in three steps. First, generate a `DataFrame` whose `country` and `variety` columns are non-null. Then use a map to create a series whose entries are a `str` concatenation of those two columns. Finally, generate a `Series` counting how many times each label appears in the dataset.",
"_____no_output_____"
]
],
[
[
"# Your code here\ndataframe = reviews[(reviews.country.notnull()) & (reviews.variety.notnull())]\ndataframe[\"country - variety\"] = dataframe.apply(lambda x:x[\"country\"] +\" \" + x[\"variety\"],axis = 'columns')\n",
"_____no_output_____"
],
[
"dataframe_new = dataframe.iloc[0:5]\ndataframe_new[\"country - variety\"].value_counts()\n\n",
"_____no_output_____"
],
[
"type(dataframe[\"country - variety\"].value_counts())",
"_____no_output_____"
],
[
"data = pd.Series(data = dataframe[\"country - variety\"].value_counts().values ,index = dataframe[\"country - variety\"].unique())",
"_____no_output_____"
],
[
"data.head()",
"_____no_output_____"
]
],
[
[
"# Keep Going\n**[Continue to grouping and sorting](https://www.kaggle.com/kernels/fork/598715).**",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
c52bd7f0d605d6e0d0ca8ca69de87b3deb7079b0
| 13,844 |
ipynb
|
Jupyter Notebook
|
lectures/l14-mnist-pytorch-nn-msgd-inclass.ipynb
|
davidd-55/cs152fa21
|
de0876195d8da74909416aef3dece1848179b777
|
[
"CC0-1.0"
] | null | null | null |
lectures/l14-mnist-pytorch-nn-msgd-inclass.ipynb
|
davidd-55/cs152fa21
|
de0876195d8da74909416aef3dece1848179b777
|
[
"CC0-1.0"
] | null | null | null |
lectures/l14-mnist-pytorch-nn-msgd-inclass.ipynb
|
davidd-55/cs152fa21
|
de0876195d8da74909416aef3dece1848179b777
|
[
"CC0-1.0"
] | null | null | null | 31.321267 | 217 | 0.561687 |
[
[
[
"# Plan\n\n1. Read through code (~5 minutes)\n2. Get into groups and discuss code (~2 minutes)\n3. Ask questions on the sheet (~5 minutes)\n4. Work on \"Questions to answer\" (~10 minutes)\n5. Work on \"Things to explore\" (~10 minutes)\n6. Work on the \"Challenge\" (~20 minutes)\n7. Work on \"What's next?\"\n\nGetting started:\n\n- I recommend cloning this repository (or pulling changes if you already have it cloned)\n- Starting jupyter\n- Then duplicating this file so that you can alter it without confusing `git`\n\nSome tools to use:\n\n- You can create a cell above the current cell by typing \"esc\" then \"a\"\n- You can create a cell below the current cell by typing \"esc\" then \"b\"\n- You should copy code into newly created cells, alter it, print out the results, etc.\n- You can do this for single lines or you can copy, for example, the `for batch, (X, Y) in enumerate(dataloader):` loop out of `train_one_epoch` and make minor changes so that it works outside of the function\n- I will frequently put a break a the end of the for-loop so that it only iterates one time (so that I don't have to wait for every iteration)",
"_____no_output_____"
]
],
[
[
"from contextlib import contextmanager\nfrom timeit import default_timer as timer\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, SubsetRandomSampler\n\nfrom torchvision.datasets import MNIST\nfrom torchvision.transforms import Compose, Normalize, ToTensor",
"_____no_output_____"
],
[
"@contextmanager\ndef stopwatch(label: str):\n start = timer()\n try:\n yield\n finally:\n print(f\"{label}: {timer() - start:6.3f}s\")",
"_____no_output_____"
],
[
"def get_mnist_data_loaders(path, batch_size, valid_batch_size):\n\n # MNIST specific transforms\n mnist_xforms = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])\n\n # Training data loader\n train_dataset = MNIST(root=path, train=True, download=True, transform=mnist_xforms)\n\n tbs = len(train_dataset) if batch_size == 0 else batch_size\n train_loader = DataLoader(train_dataset, batch_size=tbs, shuffle=True)\n\n # Validation data loader\n valid_dataset = MNIST(root=path, train=False, download=True, transform=mnist_xforms)\n\n vbs = len(valid_dataset) if valid_batch_size == 0 else valid_batch_size\n valid_loader = DataLoader(valid_dataset, batch_size=vbs, shuffle=True)\n\n return train_loader, valid_loader",
"_____no_output_____"
],
[
"class NeuralNetwork(nn.Module):\n def __init__(self, layer_sizes):\n super(NeuralNetwork, self).__init__()\n\n first_layer = nn.Flatten()\n middle_layers = [\n # nn.Sequential(nn.Linear(nlminus1, nl), nn.Tanh())\n nn.Sequential(nn.Linear(nlminus1, nl), nn.ReLU())\n # nn.Sequential(nn.Linear(nlminus1, nl), nn.Sigmoid())\n for nl, nlminus1 in zip(layer_sizes[1:-1], layer_sizes)\n ]\n last_layer = nn.Linear(layer_sizes[-2], layer_sizes[-1])\n\n all_layers = [first_layer] + middle_layers + [last_layer]\n\n self.layers = nn.Sequential(*all_layers)\n\n def forward(self, X):\n return self.layers(X)",
"_____no_output_____"
],
[
"def train_one_epoch(dataloader, model, loss_fn, optimizer, device):\n\n model.train()\n\n num_batches = len(train_loader)\n batches_to_print = [0, num_batches // 3, 2 * num_batches // 3, num_batches - 1]\n\n for batch, (X, Y) in enumerate(dataloader):\n\n X, Y = X.to(device), Y.to(device)\n\n output = model(X)\n\n loss = loss_fn(output, Y)\n\n # Backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if batch in batches_to_print:\n print(f\"Batch {batch+1:>5} of {num_batches}: loss={loss.item():>6.3f}\")",
"_____no_output_____"
],
[
"def compute_validation_accuracy(dataloader, model, loss_fn, device):\n\n model.eval()\n\n N = len(dataloader.dataset)\n num_batches = len(dataloader)\n\n valid_loss, num_correct = 0, 0\n\n with torch.no_grad():\n\n for X, Y in dataloader:\n\n X, Y = X.to(device), Y.to(device)\n output = model(X)\n\n valid_loss += loss_fn(output, Y).item()\n num_correct += (output.argmax(1) == Y).type(torch.float).sum().item()\n\n valid_loss /= num_batches\n valid_accuracy = num_correct / N\n\n print(f\"Validation accuracy : {(100*valid_accuracy):>6.3f}%\")\n print(f\"Validation loss : {valid_loss:>6.3f}\")",
"_____no_output_____"
]
],
[
[
"# Configuration",
"_____no_output_____"
]
],
[
[
"# Configuration parameters\ndata_path = \"../data\"\nseed = 0\ntorch.manual_seed(seed)\n\n# Hyperparameters\nbatch_size = 1024\nvalid_batch_size = 0\nlearning_rate = 1e-2\nnum_epochs = 50\n\n# Training device\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nprint(f\"Using '{device}' device.\")",
"_____no_output_____"
]
],
[
[
"# Data",
"_____no_output_____"
]
],
[
[
"# Get data loaders\ntrain_loader, valid_loader = get_mnist_data_loaders(\n data_path, batch_size, valid_batch_size\n)",
"_____no_output_____"
]
],
[
[
"# Model",
"_____no_output_____"
]
],
[
[
"# Create neural network model\nnx = train_loader.dataset.data.shape[1:].numel()\nny = len(train_loader.dataset.classes)\nlayer_sizes = (nx, 512, 50, ny)\n\nmodel = NeuralNetwork(layer_sizes).to(device)\nprint(model)",
"_____no_output_____"
]
],
[
[
"# Training Loop",
"_____no_output_____"
]
],
[
[
"# Training utilities\nloss_fn = nn.CrossEntropyLoss()\n# optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)",
"_____no_output_____"
],
[
"with stopwatch(f\"\\nDone! Total time for {num_epochs} epochs\"):\n for epoch in range(num_epochs):\n print(f\"\\nEpoch {epoch+1}\\n-------------------------------\")\n with stopwatch(\"Epoch time \"):\n train_one_epoch(train_loader, model, loss_fn, optimizer, device)\n compute_validation_accuracy(valid_loader, model, loss_fn, device)",
"_____no_output_____"
]
],
[
[
"# Questions to answer\n\n(Try to answer these in your group prior to running or altering any code.)\n\n- What is the shape of `output` in the function `train_one_epoch`?\n - Shape is `[1024, 10]` (`[batch_size , num_output_features]`)\n- What values would you expect to see in `output`?\n - Rows represent predictions, columns represent possible outputs, data would be floats representing predictions where highest in a row represents the prediction by the network\n- What is the shape of `Y` in the function `train_one_epoch`?\n - Shape is `[1024, 1]` (`[batch_size , 1]`)\n- Describe each part of `(output.argmax(1) == Y).type(torch.float).sum().item()`\n - `output.argmax(1)` selects the max value in the first (zero-indexed) dimension of the `output` tensor. Analagous to selecting prediction!\n - `... == Y` compares predictions to valid data from `Y`, returns `[1024, 1]` tensor of bools representing correct/incorrect prediction\n - `.type(torch.float)` converts `False` to 0 and `True` to 1 in the tensor of bools\n - `.sum().item()` calculates the number of correctly predicted inputs. Can be divided by 1024 for accuracy! \n- What happens when you rerun the training cell for additional epoch (without rerunning any other cells)?\n - Picks up training where the last epoch left off!\n- What happens to if force device to be `\"cpu\"`?\n - Slows down on the server!",
"_____no_output_____"
],
[
"# Things to explore\n\n- change the hidden layer activation functions to sigmoid\n- change the hidden layer activation functions to [something else](https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity)\n- change the optimizer from `SGD` to `Adam` and try to train the network again\n\nYou can also try these if you feel like you have plenty of time. You can also choose to come back to them after working on the Challenge below\n\n- (optional) try adding a [dropout](https://pytorch.org/docs/stable/generated/torch.nn.Dropout.html#torch.nn.Dropout) layer somewhere in your network\n- (optional) try switching the dataset to either [KMNIST](https://pytorch.org/vision/0.8/datasets.html#kmnist) or [FashionMNIST](https://pytorch.org/vision/0.8/datasets.html#fashion-mnist)",
"_____no_output_____"
],
[
"# Challenge\n\nTrain a model and get the highest accuracy possible by adjusting hyperparameters and the model architecture (i.e., the number of layers, the number of neurons per layer, etc.).",
"_____no_output_____"
],
[
"# What's next?\n\nMove the inference cells below to a new file, and then try to make them work.",
"_____no_output_____"
],
[
"# Inference",
"_____no_output_____"
]
],
[
[
"model_filename = \"l14-model.pth\"\ntorch.save(model.state_dict(), model_filename)\nprint(\"Saved PyTorch Model State to\", model_filename)",
"_____no_output_____"
],
[
"model = NeuralNetwork(layer_sizes)\nmodel.load_state_dict(torch.load(model_filename))\n\nmodel.eval()\n\n# Index of example\ni = 0\n\n# Example input and output\nx, y = valid_loader.dataset[i][0], valid_loader.dataset[i][1]\n\nwith torch.no_grad():\n output = model(x)\n prediction = output[0].argmax(0)\n print(f\"Prediction : {prediction}\")\n print(f\"Target : {y}\")",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
c52bf5bfad9d188cb04cc32ad368dfe83d85b673
| 649,221 |
ipynb
|
Jupyter Notebook
|
Pokemon Picture.ipynb
|
kaiicheng/Pokemon-Photo
|
7fb93dc1ffb63bb4d653ba366f615b71bfb4ebd6
|
[
"MIT"
] | null | null | null |
Pokemon Picture.ipynb
|
kaiicheng/Pokemon-Photo
|
7fb93dc1ffb63bb4d653ba366f615b71bfb4ebd6
|
[
"MIT"
] | null | null | null |
Pokemon Picture.ipynb
|
kaiicheng/Pokemon-Photo
|
7fb93dc1ffb63bb4d653ba366f615b71bfb4ebd6
|
[
"MIT"
] | null | null | null | 57.488798 | 126 | 0.618997 |
[
[
[
"!pip install campy",
"Requirement already satisfied: campy in c:\\users\\paul\\anaconda3\\lib\\site-packages (0.0.1.dev19)\n"
],
[
"\"\"\"\nFile: my_drawing\nName: Kai Cheng\n----------------------\nTODO:\n\"\"\"\n\nfrom campy.graphics.gobjects import GOval, GRect, GLabel, GArc\nfrom campy.graphics.gwindow import GWindow\n\n\ndef main():\n \"\"\"\n This program is to create a drawing of Pokemon first generation.\n Last Saturday I went to Adidas with Alan, and spotted their new Pokemon pixel t-shirt.\n I decided to create a drawing of Pokemon, because I'm poor and have no money to purchase the t-shirt.\n Therefore, I like to paint a drawing which is full of my favorite cartoon in my childhood.\n \"\"\"\n\n window = GWindow(width=700, height=400, title=\"Pokemon\")\n\n # Create a white background.\n cover = GRect(700, 400)\n cover.color = \"white\"\n cover.fill_color = \"white\"\n cover.filled = True\n window.add(cover)\n\n # Draw pixels to create Bulbasaur.\n # Column 1\n b = GRect(5, 5, x=150, y=220)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=150, y=225)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=150, y=230)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b4 = GRect(5, 5, x=150, y=235)\n b4.filled = True\n b4.fill_color = \"lightgreen\"\n window.add(b4)\n\n b4 = GRect(5, 5, x=150, y=240)\n b4.filled = True\n b4.fill_color = \"limegreen\"\n window.add(b4)\n\n bb = GRect(5, 5, x=150, y=245)\n bb.filled = True\n bb.fill_color = \"black\"\n window.add(bb)\n\n # Column 2\n\n b = GRect(5, 5, x=155, y=205)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=155, y=210)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=155, y=215)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=155, y=220)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=155, y=225)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=155, y=230)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=155, y=235)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=155, y=240)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=155, y=245)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=155, y=250)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # Column 3\n\n b = GRect(5, 5, x=160, y=200)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=160, y=205)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=160, y=210)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=160, y=215)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=160, y=220)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=160, y=225)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=160, y=230)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=160, y=235)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=160, y=240)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=160, y=245)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=160, y=250)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # Column 4\n\n b = GRect(5, 5, x=165, y=205)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=165, y=210)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=165, y=215)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=165, y=220)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=165, y=225)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=165, y=230)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=165, y=235)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=165, y=240)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=165, y=245)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=165, y=250)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=165, y=255)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # Column 5\n\n b = GRect(5, 5, x=170, y=205)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=170, y=210)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=170, y=215)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=170, y=220)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=170, y=225)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=170, y=230)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=170, y=235)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=170, y=240)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=170, y=245)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=170, y=250)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=170, y=255)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # Column 6\n\n b = GRect(5, 5, x=175, y=200)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=175, y=205)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=175, y=210)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=175, y=215)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=175, y=220)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=175, y=225)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=175, y=230)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=175, y=235)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=175, y=240)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=175, y=245)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=175, y=250)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=175, y=255)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # Column 7\n\n b = GRect(5, 5, x=180, y=195)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=180, y=200)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=180, y=205)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=180, y=210)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=180, y=215)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=180, y=220)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=180, y=225)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=180, y=230)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=180, y=235)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=180, y=240)\n b.filled = True\n b.fill_color = \"tomato\"\n window.add(b)\n\n b = GRect(5, 5, x=180, y=245)\n b.filled = True\n b.fill_color = \"tomato\"\n window.add(b)\n\n b = GRect(5, 5, x=180, y=250)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=180, y=255)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # Column 8\n\n b = GRect(5, 5, x=185, y=195)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=185, y=200)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=185, y=205)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=185, y=210)\n b.filled = True\n b.fill_color = \"green\"\n window.add(b)\n\n b = GRect(5, 5, x=185, y=215)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=185, y=220)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=185, y=225)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=185, y=230)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=185, y=235)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=185, y=240)\n b.filled = True\n b.fill_color = \"white\"\n window.add(b)\n\n b = GRect(5, 5, x=185, y=245)\n b.filled = True\n b.fill_color = \"white\"\n window.add(b)\n\n b = GRect(5, 5, x=185, y=250)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=185, y=255)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # Column 9\n\n b = GRect(5, 5, x=190, y=190)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=190, y=195)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=190, y=200)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=190, y=205)\n b.filled = True\n b.fill_color = \"green\"\n window.add(b)\n\n b = GRect(5, 5, x=190, y=210)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=190, y=215)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=190, y=220)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=190, y=225)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=190, y=230)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=190, y=235)\n b.filled = True\n b.fill_color = \"lightgreen\"\n window.add(b)\n\n b = GRect(5, 5, x=190, y=240)\n b.filled = True\n b.fill_color = \"white\"\n window.add(b)\n\n b = GRect(5, 5, x=190, y=245)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=190, y=250)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=190, y=255)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # Column 10\n\n b = GRect(5, 5, x=195, y=190)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=195, y=195)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=195, y=200)\n b.filled = True\n b.fill_color = \"green\"\n window.add(b)\n\n b = GRect(5, 5, x=195, y=205)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=195, y=210)\n b.filled = True\n b.fill_color = \"green\"\n window.add(b)\n\n b = GRect(5, 5, x=195, y=215)\n b.filled = True\n b.fill_color = \"green\"\n window.add(b)\n\n b = GRect(5, 5, x=195, y=220)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=195, y=225)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=195, y=230)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=195, y=235)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=195, y=240)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=195, y=245)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=195, y=250)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=195, y=255)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # Column 11\n\n b = GRect(5, 5, x=200, y=185)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=200, y=190)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=200, y=195)\n b.filled = True\n b.fill_color = \"green\"\n window.add(b)\n\n b = GRect(5, 5, x=200, y=200)\n b.filled = True\n b.fill_color = \"green\"\n window.add(b)\n\n b = GRect(5, 5, x=200, y=205)\n b.filled = True\n b.fill_color = \"green\"\n window.add(b)\n\n b = GRect(5, 5, x=200, y=210)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=200, y=215)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=200, y=220)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=200, y=225)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=200, y=230)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=200, y=235)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=200, y=240)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=200, y=245)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=200, y=250)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=200, y=255)\n b.filled = True\n b.fill_color = \"white\"\n window.add(b)\n\n b = GRect(5, 5, x=200, y=260)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # Column 12\n\n b = GRect(5, 5, x=205, y=180)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=205, y=185)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=205, y=190)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=205, y=195)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=205, y=200)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=205, y=205)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=205, y=210)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=205, y=215)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=205, y=220)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=205, y=225)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=205, y=230)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=205, y=235)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=205, y=240)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=205, y=245)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=205, y=250)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=205, y=255)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=205, y=260)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # Column 13\n\n b = GRect(5, 5, x=210, y=180)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=210, y=185)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=210, y=190)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=210, y=195)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=210, y=200)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=210, y=205)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=210, y=210)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=210, y=215)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=210, y=220)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=210, y=225)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=210, y=230)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=210, y=235)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=210, y=240)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=210, y=245)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=210, y=250)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=210, y=255)\n b.filled = True\n b.fill_color = \"white\"\n window.add(b)\n\n b = GRect(5, 5, x=210, y=260)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # Column 14'\n\n b = GRect(5, 5, x=215, y=180)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=215, y=185)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=215, y=190)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=215, y=195)\n b.filled = True\n b.fill_color = \"green\"\n window.add(b)\n\n b = GRect(5, 5, x=215, y=200)\n b.filled = True\n b.fill_color = \"green\"\n window.add(b)\n\n b = GRect(5, 5, x=215, y=205)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=215, y=210)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=215, y=215)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=215, y=220)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=215, y=225)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=215, y=230)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=215, y=235)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=215, y=240)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=215, y=245)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=215, y=250)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=215, y=255)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # Column 15\n\n b = GRect(5, 5, x=220, y=185)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=220, y=190)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=220, y=195)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=220, y=200)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=220, y=205)\n b.filled = True\n b.fill_color = \"green\"\n window.add(b)\n\n b = GRect(5, 5, x=220, y=210)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=220, y=215)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=220, y=220)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=220, y=225)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=220, y=230)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=220, y=235)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=220, y=240)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # Column 16\n\n b = GRect(5, 5, x=225, y=195)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=225, y=200)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=225, y=205)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=225, y=210)\n b.filled = True\n b.fill_color = \"green\"\n window.add(b)\n\n b = GRect(5, 5, x=225, y=215)\n b.filled = True\n b.fill_color = \"green\"\n window.add(b)\n\n b = GRect(5, 5, x=225, y=220)\n b.filled = True\n b.fill_color = \"green\"\n window.add(b)\n\n b = GRect(5, 5, x=225, y=225)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=225, y=230)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=225, y=235)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=225, y=240)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # Column 17\n\n b = GRect(5, 5, x=230, y=195)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=230, y=200)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=230, y=205)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=230, y=210)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=230, y=215)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=230, y=220)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=230, y=225)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=230, y=230)\n b.filled = True\n b.fill_color = \"limegreen\"\n window.add(b)\n\n b = GRect(5, 5, x=230, y=235)\n b.filled = True\n b.fill_color = \"white\"\n window.add(b)\n\n b = GRect(5, 5, x=230, y=240)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # Column 18\n\n b = GRect(5, 5, x=235, y=200)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=235, y=205)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=235, y=210)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=235, y=215)\n b.filled = True\n b.fill_color = \"lawngreen\"\n window.add(b)\n\n b = GRect(5, 5, x=235, y=220)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=235, y=225)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=235, y=230)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=235, y=235)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # Column 19\n\n b = GRect(5, 5, x=240, y=205)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=240, y=210)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n b = GRect(5, 5, x=240, y=215)\n b.filled = True\n b.fill_color = \"black\"\n window.add(b)\n\n # # Draw pixels to create Charmander.\n # Column 1\n \"\"\"\n c = GRect(5, 5, x=300, y=260)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n \"\"\"\n c = GRect(5, 5, x=300, y=200)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=300, y=205)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=300, y=210)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 2\n\n c = GRect(5, 5, x=305, y=195)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=305, y=200)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=305, y=205)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=305, y=210)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=305, y=215)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 3\n\n c = GRect(5, 5, x=310, y=185)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=310, y=190)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=310, y=195)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=310, y=200)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=310, y=205)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=310, y=210)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=310, y=215)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=310, y=220)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 4\n\n c = GRect(5, 5, x=315, y=180)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=315, y=185)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=315, y=190)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=315, y=195)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=315, y=200)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=315, y=205)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=315, y=210)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=315, y=215)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=315, y=220)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 5\n\n c = GRect(5, 5, x=320, y=175)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=320, y=180)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=320, y=185)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=320, y=190)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=320, y=195)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=320, y=200)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=320, y=205)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=320, y=210)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=320, y=215)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=320, y=220)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=320, y=225)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=320, y=240)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 6\n\n c = GRect(5, 5, x=325, y=175)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=325, y=180)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=325, y=185)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=325, y=190)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=325, y=195)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=325, y=200)\n c.filled = True\n c.fill_color = \"white\"\n window.add(c)\n\n c = GRect(5, 5, x=325, y=205)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=325, y=210)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=325, y=215)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=325, y=220)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=325, y=225)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=325, y=230)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=325, y=235)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=325, y=240)\n c.filled = True\n c.fill_color = \"white\"\n window.add(c)\n\n c = GRect(5, 5, x=325, y=245)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 7\n\n c = GRect(5, 5, x=330, y=175)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=330, y=180)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=330, y=185)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=330, y=190)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=330, y=195)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=330, y=200)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=330, y=205)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=330, y=210)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=330, y=215)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=330, y=220)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=330, y=225)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=330, y=230)\n c.filled = True\n c.fill_color = \"honeydew\"\n window.add(c)\n\n c = GRect(5, 5, x=330, y=235)\n c.filled = True\n c.fill_color = \"yellow\"\n window.add(c)\n\n c = GRect(5, 5, x=330, y=240)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=330, y=245)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 8\n\n c = GRect(5, 5, x=335, y=175)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=335, y=180)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=335, y=185)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=335, y=190)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=335, y=195)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=335, y=200)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=335, y=205)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=335, y=210)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=335, y=215)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=335, y=220)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=335, y=225)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=335, y=230)\n c.filled = True\n c.fill_color = \"yellow\"\n window.add(c)\n\n c = GRect(5, 5, x=335, y=235)\n c.filled = True\n c.fill_color = \"yellow\"\n window.add(c)\n\n c = GRect(5, 5, x=335, y=240)\n c.filled = True\n c.fill_color = \"yellow\"\n window.add(c)\n\n c = GRect(5, 5, x=335, y=245)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 9\n\n c = GRect(5, 5, x=340, y=180)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=340, y=185)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=340, y=190)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=340, y=195)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=340, y=200)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=340, y=205)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=340, y=210)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=340, y=215)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=340, y=220)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=340, y=225)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=340, y=230)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=340, y=235)\n c.filled = True\n c.fill_color = \"yellow\"\n window.add(c)\n\n c = GRect(5, 5, x=340, y=240)\n c.filled = True\n c.fill_color = \"yellow\"\n window.add(c)\n\n c = GRect(5, 5, x=340, y=245)\n c.filled = True\n c.fill_color = \"yellow\"\n window.add(c)\n\n c = GRect(5, 5, x=340, y=250)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 10\n\n c = GRect(5, 5, x=345, y=185)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=345, y=190)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=345, y=195)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=345, y=200)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=345, y=205)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=345, y=210)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=345, y=215)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=345, y=220)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=345, y=225)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=345, y=230)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=345, y=235)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=345, y=240)\n c.filled = True\n c.fill_color = \"yellow\"\n window.add(c)\n\n c = GRect(5, 5, x=345, y=245)\n c.filled = True\n c.fill_color = \"yellow\"\n window.add(c)\n\n c = GRect(5, 5, x=345, y=250)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=345, y=255)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 11\n\n c = GRect(5, 5, x=350, y=195)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=350, y=200)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=350, y=205)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=350, y=210)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=350, y=215)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=350, y=220)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=350, y=225)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=350, y=230)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=350, y=235)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=350, y=240)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=350, y=245)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=350, y=250)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=350, y=255)\n c.filled = True\n c.fill_color = \"white\"\n window.add(c)\n\n c = GRect(5, 5, x=350, y=260)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 12\n\n c = GRect(5, 5, x=355, y=205)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=355, y=210)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=355, y=215)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=355, y=220)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=355, y=225)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=355, y=230)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=355, y=235)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=355, y=240)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=355, y=245)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=355, y=250)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=355, y=255)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=355, y=260)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 13\n\n c = GRect(5, 5, x=360, y=215)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=360, y=220)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=360, y=225)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=360, y=230)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=360, y=235)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=360, y=240)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=360, y=245)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=360, y=250)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=360, y=255)\n c.filled = True\n c.fill_color = \"white\"\n window.add(c)\n\n c = GRect(5, 5, x=360, y=260)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 14\n\n c = GRect(5, 5, x=365, y=220)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=365, y=225)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=365, y=230)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=365, y=235)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=365, y=240)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=365, y=245)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=365, y=250)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=365, y=255)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 15\n\n c = GRect(5, 5, x=370, y=220)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=370, y=225)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=370, y=230)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=370, y=235)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=370, y=240)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=370, y=245)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 16\n\n c = GRect(5, 5, x=375, y=195)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=375, y=200)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=375, y=215)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=375, y=220)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=375, y=225)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=375, y=230)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=375, y=235)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=375, y=240)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 17\n\n c = GRect(5, 5, x=380, y=180)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=380, y=185)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=380, y=190)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=380, y=195)\n c.filled = True\n c.fill_color = \"tomato\"\n window.add(c)\n\n c = GRect(5, 5, x=380, y=200)\n c.filled = True\n c.fill_color = \"tomato\"\n window.add(c)\n\n c = GRect(5, 5, x=380, y=205)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=380, y=210)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=380, y=215)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=380, y=220)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=380, y=225)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=380, y=230)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=380, y=235)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 18\n\n c = GRect(5, 5, x=385, y=175)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=385, y=180)\n c.filled = True\n c.fill_color = \"tomato\"\n window.add(c)\n\n c = GRect(5, 5, x=385, y=185)\n c.filled = True\n c.fill_color = \"tomato\"\n window.add(c)\n\n c = GRect(5, 5, x=385, y=190)\n c.filled = True\n c.fill_color = \"tomato\"\n window.add(c)\n\n c = GRect(5, 5, x=385, y=195)\n c.filled = True\n c.fill_color = \"tomato\"\n window.add(c)\n\n c = GRect(5, 5, x=385, y=200)\n c.filled = True\n c.fill_color = \"tomato\"\n window.add(c)\n\n c = GRect(5, 5, x=385, y=205)\n c.filled = True\n c.fill_color = \"yellow\"\n window.add(c)\n\n c = GRect(5, 5, x=385, y=210)\n c.filled = True\n c.fill_color = \"yellow\"\n window.add(c)\n\n c = GRect(5, 5, x=385, y=215)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=385, y=220)\n c.filled = True\n c.fill_color = \"orange\"\n window.add(c)\n\n c = GRect(5, 5, x=385, y=225)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=385, y=230)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 19\n\n c = GRect(5, 5, x=390, y=180)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=390, y=185)\n c.filled = True\n c.fill_color = \"tomato\"\n window.add(c)\n\n c = GRect(5, 5, x=390, y=190)\n c.filled = True\n c.fill_color = \"tomato\"\n window.add(c)\n\n c = GRect(5, 5, x=390, y=195)\n c.filled = True\n c.fill_color = \"yellow\"\n window.add(c)\n\n c = GRect(5, 5, x=390, y=200)\n c.filled = True\n c.fill_color = \"yellow\"\n window.add(c)\n\n c = GRect(5, 5, x=390, y=205)\n c.filled = True\n c.fill_color = \"yellow\"\n window.add(c)\n\n c = GRect(5, 5, x=390, y=210)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=390, y=215)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=390, y=220)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 20\n\n c = GRect(5, 5, x=395, y=185)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=395, y=190)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=395, y=195)\n c.filled = True\n c.fill_color = \"tomato\"\n window.add(c)\n\n c = GRect(5, 5, x=395, y=200)\n c.filled = True\n c.fill_color = \"yellow\"\n window.add(c)\n\n c = GRect(5, 5, x=395, y=205)\n c.filled = True\n c.fill_color = \"tomato\"\n window.add(c)\n\n c = GRect(5, 5, x=395, y=210)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Column 21\n\n c = GRect(5, 5, x=400, y=195)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=400, y=200)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n c = GRect(5, 5, x=400, y=205)\n c.filled = True\n c.fill_color = \"black\"\n window.add(c)\n\n # Draw pixels to create Squirtle.\n # Column 1\n\n s = GRect(5, 5, x=460, y=200)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=460, y=205)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=460, y=210)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 2\n\n s = GRect(5, 5, x=465, y=190)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=465, y=195)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=465, y=200)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=465, y=205)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=465, y=210)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=465, y=215)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 3\n\n s = GRect(5, 5, x=470, y=185)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=470, y=190)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=470, y=195)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=470, y=200)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=470, y=205)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=470, y=210)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=470, y=215)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=470, y=220)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=470, y=225)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 4\n\n s = GRect(5, 5, x=475, y=180)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=475, y=185)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=475, y=190)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=475, y=195)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=475, y=200)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=475, y=205)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=475, y=210)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=475, y=215)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=475, y=220)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=475, y=225)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=475, y=230)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 5\n\n s = GRect(5, 5, x=480, y=180)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=480, y=185)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=480, y=190)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=480, y=195)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=480, y=200)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=480, y=205)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=480, y=210)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=480, y=215)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=480, y=220)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=480, y=225)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=480, y=230)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=480, y=240)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 6\n\n s = GRect(5, 5, x=485, y=180)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=485, y=185)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=485, y=190)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=485, y=195)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=485, y=200)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=485, y=205)\n s.filled = True\n s.fill_color = \"white\"\n window.add(s)\n\n s = GRect(5, 5, x=485, y=210)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=485, y=215)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=485, y=220)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=485, y=225)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=485, y=230)\n s.filled = True\n s.fill_color = \"#F0E68C\"\n window.add(s)\n\n s = GRect(5, 5, x=485, y=235)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=485, y=240)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=485, y=245)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 7\n\n s = GRect(5, 5, x=490, y=180)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=490, y=185)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=490, y=190)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=490, y=195)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=490, y=200)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=490, y=205)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=490, y=210)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=490, y=215)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=490, y=220)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=490, y=225)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=490, y=230)\n s.filled = True\n s.fill_color = \"#F0E68C\"\n window.add(s)\n\n s = GRect(5, 5, x=490, y=235)\n s.filled = True\n s.fill_color = \"#F0E68C\"\n window.add(s)\n\n s = GRect(5, 5, x=490, y=240)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=490, y=245)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 8\n\n s = GRect(5, 5, x=495, y=185)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=495, y=190)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=495, y=195)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=495, y=200)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=495, y=205)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=495, y=210)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=495, y=215)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=495, y=220)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=495, y=225)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=495, y=230)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=495, y=235)\n s.filled = True\n s.fill_color = \"#F0E68C\"\n window.add(s)\n\n s = GRect(5, 5, x=495, y=240)\n s.filled = True\n s.fill_color = \"#F0E68C\"\n window.add(s)\n\n s = GRect(5, 5, x=495, y=245)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 9\n\n s = GRect(5, 5, x=500, y=185)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=500, y=190)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=500, y=195)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=500, y=200)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=500, y=205)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=500, y=210)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=500, y=215)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=500, y=220)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=500, y=225)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=500, y=230)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=500, y=235)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=500, y=240)\n s.filled = True\n s.fill_color = \"#F0E68C\"\n window.add(s)\n\n s = GRect(5, 5, x=500, y=245)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=500, y=250)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 10\n\n s = GRect(5, 5, x=505, y=190)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=505, y=195)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=505, y=200)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=505, y=205)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=505, y=210)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=505, y=215)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=505, y=220)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=505, y=225)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=505, y=230)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=505, y=235)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=505, y=240)\n s.filled = True\n s.fill_color = \"#F0E68C\"\n window.add(s)\n\n s = GRect(5, 5, x=505, y=245)\n s.filled = True\n s.fill_color = \"#F0E68C\"\n window.add(s)\n\n s = GRect(5, 5, x=505, y=250)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=505, y=255)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 11\n\n s = GRect(5, 5, x=510, y=190)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=510, y=195)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=510, y=200)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=510, y=205)\n s.filled = True\n s.fill_color = \"white\"\n window.add(s)\n\n s = GRect(5, 5, x=510, y=210)\n s.filled = True\n s.fill_color = \"white\"\n window.add(s)\n\n s = GRect(5, 5, x=510, y=215)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=510, y=220)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=510, y=225)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=510, y=230)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=510, y=235)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=510, y=240)\n s.filled = True\n s.fill_color = \"#F0E68C\"\n window.add(s)\n\n s = GRect(5, 5, x=510, y=245)\n s.filled = True\n s.fill_color = \"#F0E68C\"\n window.add(s)\n\n s = GRect(5, 5, x=510, y=250)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=510, y=255)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=510, y=260)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 12\n\n s = GRect(5, 5, x=515, y=195)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=515, y=200)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=515, y=205)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=515, y=210)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=515, y=215)\n s.filled = True\n s.fill_color = \"white\"\n window.add(s)\n\n s = GRect(5, 5, x=515, y=220)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=515, y=225)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=515, y=230)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=515, y=235)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=515, y=240)\n s.filled = True\n s.fill_color = \"#F0E68C\"\n window.add(s)\n\n s = GRect(5, 5, x=515, y=245)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=515, y=250)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=515, y=255)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=515, y=260)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 13\n\n s = GRect(5, 5, x=520, y=195)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=520, y=200)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=520, y=205)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=520, y=210)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=520, y=215)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=520, y=220)\n s.filled = True\n s.fill_color = \"white\"\n window.add(s)\n\n s = GRect(5, 5, x=520, y=225)\n s.filled = True\n s.fill_color = \"white\"\n window.add(s)\n\n s = GRect(5, 5, x=520, y=230)\n s.filled = True\n s.fill_color = \"white\"\n window.add(s)\n\n s = GRect(5, 5, x=520, y=235)\n s.filled = True\n s.fill_color = \"white\"\n window.add(s)\n\n s = GRect(5, 5, x=520, y=240)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=520, y=245)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=520, y=250)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=520, y=255)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=520, y=260)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 14\n\n s = GRect(5, 5, x=525, y=200)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=525, y=205)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=525, y=210)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=525, y=215)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=525, y=220)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=525, y=225)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=525, y=230)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=525, y=235)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=525, y=240)\n s.filled = True\n s.fill_color = \"white\"\n window.add(s)\n\n s = GRect(5, 5, x=525, y=245)\n s.filled = True\n s.fill_color = \"white\"\n window.add(s)\n\n s = GRect(5, 5, x=525, y=250)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=525, y=255)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 15\n\n s = GRect(5, 5, x=530, y=190)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=530, y=195)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=530, y=200)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=530, y=205)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=530, y=210)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=530, y=215)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=530, y=220)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=530, y=225)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=530, y=230)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=530, y=235)\n s.filled = True\n s.fill_color = \"peru\"\n window.add(s)\n\n s = GRect(5, 5, x=530, y=240)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=530, y=245)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 16\n\n s = GRect(5, 5, x=535, y=185)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=535, y=190)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=535, y=195)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=535, y=200)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=535, y=205)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=535, y=210)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=535, y=215)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=535, y=220)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=535, y=225)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=535, y=230)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=535, y=235)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 16\n\n s = GRect(5, 5, x=540, y=180)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=540, y=185)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=540, y=190)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=540, y=195)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=540, y=200)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=540, y=205)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=540, y=210)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=540, y=215)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 17\n\n s = GRect(5, 5, x=545, y=180)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=545, y=185)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=545, y=190)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=545, y=195)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=545, y=200)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=545, y=205)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=545, y=210)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 18\n\n s = GRect(5, 5, x=550, y=180)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=550, y=185)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=550, y=190)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=550, y=195)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=550, y=200)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=550, y=205)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=550, y=210)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 18\n\n s = GRect(5, 5, x=555, y=185)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=555, y=190)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=555, y=195)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=555, y=200)\n s.filled = True\n s.fill_color = \"sky blue\"\n window.add(s)\n\n s = GRect(5, 5, x=555, y=205)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n # Column 19\n\n s = GRect(5, 5, x=560, y=190)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=560, y=195)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n s = GRect(5, 5, x=560, y=200)\n s.filled = True\n s.fill_color = \"black\"\n window.add(s)\n\n label = GLabel(\"Pokémon\", x=200, y=80) # Create the name of Pokémon.\n label.font = \"Helvetica-50-italic-bold\"\n window.add(label)\n\n label = GLabel(\"Bulbasaur\", x=120, y=130) # Create the name of Bulbasaur.\n label.font = \"Helvetica-20-bold\"\n window.add(label)\n\n label = GLabel(\"Charmander\", x=275, y=130) # Create the name of Charmander.\n label.font = \"Helvetica-20-bold\"\n window.add(label)\n\n label = GLabel(\"Squirtle\", x=465, y=130) # Create the name of Squirtle.\n label.font = \"Helvetica-20-bold\"\n window.add(label)\n\n choose_sign1 = GLabel(\"▲\", x=170, y=330) # Create the sign of \"▲\".\n choose_sign1.font = \"-20\"\n window.add(choose_sign1)\n\n choose_sign2 = GLabel(\"▲\", x=337, y=330) # Create the sign of \"▲\".\n choose_sign2.font = \"-20\"\n window.add(choose_sign2)\n\n choose_sign2 = GLabel(\"▲\", x=500, y=330) # Create the sign of \"▲\".\n choose_sign2.font = \"-20\"\n window.add(choose_sign2)\n\n\nif __name__ == '__main__':\n main()\n",
"ERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\nERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\nERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\nERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\nERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\nERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\nERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\nERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\nERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\nERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\nERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\nERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\nERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\nERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\nERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\nERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\nERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\nERROR:asyncio:Exception in callback BaseSelectorEventLoop._read_from_self()\nhandle: <Handle BaseSelectorEventLoop._read_from_self()>\nTraceback (most recent call last):\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\events.py\", line 81, in _run\n self._context.run(self._callback, *self._args)\n File \"C:\\Users\\Paul\\anaconda3\\lib\\asyncio\\selector_events.py\", line 120, in _read_from_self\n data = self._ssock.recv(4096)\nConnectionResetError: [WinError 10054] 遠端主機已強制關閉一個現存的連線。\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
c52bf97f6dbe764b67e6482b537b6f69cf00ecd4
| 123,143 |
ipynb
|
Jupyter Notebook
|
00.ipynb
|
Programmer-RD-AI/Landscape-Pictures-GAN-I-am-going-to-try
|
c9ecf2f98965c1ea742ad3a3d361e5a9aa07ac99
|
[
"Apache-2.0"
] | 2 |
2021-07-30T18:15:03.000Z
|
2021-11-17T11:01:40.000Z
|
00.ipynb
|
Programmer-RD-AI/Landscape-Pictures-GAN-I-am-going-to-try
|
c9ecf2f98965c1ea742ad3a3d361e5a9aa07ac99
|
[
"Apache-2.0"
] | null | null | null |
00.ipynb
|
Programmer-RD-AI/Landscape-Pictures-GAN-I-am-going-to-try
|
c9ecf2f98965c1ea742ad3a3d361e5a9aa07ac99
|
[
"Apache-2.0"
] | null | null | null | 310.183879 | 110,712 | 0.923698 |
[
[
[
"import cv2\nimport os\nimport torch,torchvision\nimport torch.nn as nn\nimport numpy as np\nimport os\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\nimport torch.optim as optim\nfrom torch.nn import *\nfrom torch.utils.tensorboard import SummaryWriter\nimport matplotlib.pyplot as plt\nimport wandb\nfrom ray import tune\nimport os\ntorch.cuda.empty_cache()",
"_____no_output_____"
],
[
"device = 'cuda'\nPROJECT_NAME = 'Landscape-Pictures-GAN'\nIMG_SIZE = 224",
"_____no_output_____"
],
[
"def load_data(directory='./data/',img_size=IMG_SIZE,num_of_samples=500):\n idx = -1\n data = []\n for file in tqdm(os.listdir(directory)):\n idx += 1\n file = directory + file\n img = cv2.imread(file)\n img = cv2.resize(img,(img_size,img_size))\n data.append(img)\n print(idx)\n data = data[:num_of_samples]\n return torch.from_numpy(np.array(data))",
"_____no_output_____"
],
[
"# data = load_data()\n# torch.save(data,'./data.pt')\n# torch.save(data,'./data.pth')\ndata = torch.load('./data.pth')",
"_____no_output_____"
],
[
"data.shape",
"_____no_output_____"
],
[
"plt.imshow(torch.tensor(data[0]).view(IMG_SIZE,IMG_SIZE,3))",
"/home/indika/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:1: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n \"\"\"Entry point for launching an IPython kernel.\n"
],
[
"class Desc(nn.Module):\n def __init__(self,linearactivation=nn.LeakyReLU()):\n super().__init__()\n self.linearactivation = linearactivation\n self.linear1 = nn.Linear(IMG_SIZE*IMG_SIZE*3,2)\n self.linear1batchnorm = nn.BatchNorm1d(2)\n self.linear2 = nn.Linear(2,4)\n self.linear2batchnorm = nn.BatchNorm1d(4)\n self.linear3 = nn.Linear(4,2)\n self.linear3batchnorm = nn.BatchNorm1d(2)\n self.output = nn.Linear(2,1)\n self.outputactivation = nn.Sigmoid()\n \n def forward(self,X,shape=True):\n preds = self.linearactivation(self.linear1batchnorm(self.linear1(X)))\n preds = self.linearactivation(self.linear2batchnorm(self.linear2(preds)))\n preds = self.linearactivation(self.linear3batchnorm(self.linear3(preds)))\n preds = self.outputactivation(self.output(preds))\n return preds",
"_____no_output_____"
],
[
"class Gen(nn.Module):\n def __init__(self,z_dim,linearactivation=nn.LeakyReLU()):\n super().__init__()\n self.linearactivation = linearactivation\n self.linear1 = nn.Linear(z_dim,256)\n self.linear1batchnorm = nn.BatchNorm1d(256)\n self.linear2 = nn.Linear(256,512)\n self.linear2batchnorm = nn.BatchNorm1d(512)\n self.linear3 = nn.Linear(512,256)\n self.linear3batchnorm = nn.BatchNorm1d(256)\n self.output = nn.Linear(256,IMG_SIZE*IMG_SIZE*3)\n self.outputactivation = nn.Tanh()\n \n def forward(self,X):\n preds = self.linearactivation(self.linear1batchnorm(self.linear1(X)))\n preds = self.linearactivation(self.linear2batchnorm(self.linear2(preds)))\n preds = self.linearactivation(self.linear3batchnorm(self.linear3(preds)))\n preds = self.outputactivation(self.output(preds))\n return preds",
"_____no_output_____"
],
[
"z_dim = 64\nBATCH_SIZE = 32\nlr = 3e-4\ncriterion = nn.BCELoss()\nepochs = 125\nfixed_noise = torch.randn((BATCH_SIZE,z_dim)).to(device)",
"_____no_output_____"
],
[
"gen = Gen(z_dim=z_dim).to(device)\noptimizer_gen = optim.Adam(gen.parameters(),lr=lr)",
"_____no_output_____"
],
[
"desc = Desc().to(device)\noptimizer_desc = optim.Adam(desc.parameters(),lr=lr)",
"_____no_output_____"
],
[
"def accuracy_fake(desc_fake):\n correct = 0\n total = 0\n preds = np.round(np.array(desc_fake.cpu().detach().numpy()))\n for pred in preds:\n if pred == 0:\n correct += 1\n total += 1\n return round(correct/total,3)\ndef accuracy_real(desc_real):\n correct = 0\n total = 0\n preds = np.round(np.array(desc_real.cpu().detach().numpy()))\n for pred in preds:\n if pred == 1:\n correct += 1\n total += 1\n return round(correct/total,3)",
"_____no_output_____"
],
[
"torch.cuda.empty_cache()\nmsg = input('Msg : ') # 0.1-leaky-relu-desc\nwandb.init(project=PROJECT_NAME,name=f'baseline-{msg}')\nfor epoch in tqdm(range(epochs)):\n torch.cuda.empty_cache()\n for idx in range(0,len(data),BATCH_SIZE):\n torch.cuda.empty_cache()\n X_batch = torch.tensor(np.array(data[idx:idx+BATCH_SIZE])).view(-1,IMG_SIZE*IMG_SIZE*3).to(device).float()\n batch_size = X_batch.shape[0]\n noise = torch.randn(batch_size, z_dim).to(device)\n fake = gen(noise).float()\n desc_real = desc(X_batch).view(-1)\n lossD_real = criterion(desc_real,torch.ones_like(desc_real))\n desc_fake = desc(fake).view(-1)\n lossD_fake = criterion(desc_fake,torch.zeros_like(desc_fake))\n lossD = (lossD_real+lossD_fake)/2\n desc.zero_grad()\n lossD.backward(retain_graph=True)\n wandb.log({'lossD':lossD.item()})\n optimizer_desc.step()\n output = desc(fake).view(-1)\n lossG = criterion(output, torch.ones_like(output))\n gen.zero_grad()\n wandb.log({'lossG':lossG.item()})\n lossG.backward()\n wandb.log({'lossG':lossG.item()})\n optimizer_gen.step()\n wandb.log({'accuracy_fake':accuracy_fake(desc_fake)})\n wandb.log({'accuracy_real':accuracy_real(desc_real)})\n with torch.no_grad():\n imgs = gen(fixed_noise).view(-1,3,IMG_SIZE,IMG_SIZE)\n imgs_all_grid = torchvision.utils.make_grid(imgs,normalize=True)\n wandb.log({'img':wandb.Image(imgs[0].cpu())})\n wandb.log({'imgs':wandb.Image(imgs_all_grid)})",
"Msg : gen-starter-512\n"
],
[
"\n",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52bfe537120f83f127521936d7f6679f9aa667a
| 293,980 |
ipynb
|
Jupyter Notebook
|
code/03.rpy2.ipynb
|
computational-class/cjc
|
1569ce7a7a85571bd2e399ab20fb950d7f8963b2
|
[
"MIT"
] | 65 |
2017-04-06T01:00:19.000Z
|
2020-11-16T15:30:30.000Z
|
code/03.rpy2.ipynb
|
AnxietyVendor/cjc
|
4bfd22ea4f360a803093a95bd9b1a2d497b7200a
|
[
"MIT"
] | 90 |
2017-05-12T10:09:06.000Z
|
2019-09-17T13:13:22.000Z
|
code/03.rpy2.ipynb
|
AnxietyVendor/cjc
|
4bfd22ea4f360a803093a95bd9b1a2d497b7200a
|
[
"MIT"
] | 48 |
2017-03-22T02:58:34.000Z
|
2020-11-16T03:08:47.000Z
| 277.601511 | 108,823 | 0.917647 |
[
[
[
"### 计算新闻传播学课程\n\n***\n***\n\n# rpy2: Using R within Jupyter Notebook\n\n***\n***\n\n王成军\n\[email protected]\n\n计算传播网 http://computational-communication.com",
"_____no_output_____"
],
[
"> conda install rpy2",
"_____no_output_____"
]
],
[
[
"%load_ext rpy2.ipython \n# conda install rpy2",
"_____no_output_____"
]
],
[
[
"# Rpush: push Python object to R",
"_____no_output_____"
]
],
[
[
"import numpy as np\nX = np.array([4.5,6.3,7.9, 10.3])\n%Rpush X\n%R mean(X)",
"_____no_output_____"
],
[
"%%R\nY = c(2,4,3,9)\nsummary(lm(Y~X))",
"_____no_output_____"
],
[
"%R plot(X, Y)",
"_____no_output_____"
],
[
"%R dat = data.frame(X, Y)",
"_____no_output_____"
]
],
[
[
"# Rpull: pull data from R to python\n\nhttps://rpy2.github.io/doc/latest/html/interactive.html?highlight=rpull#rpy2.ipython.rmagic.RMagics.Rpull\n\nNot work for Python 3.X",
"_____no_output_____"
]
],
[
[
"%R x = c(3,4,6.7); y = c(4,6,7); z = c('a',3,4)",
"_____no_output_____"
],
[
"%R x",
"_____no_output_____"
],
[
"x",
"_____no_output_____"
],
[
"%Rpull dat",
"_____no_output_____"
],
[
"dat",
"_____no_output_____"
],
[
"# import rpy2's package module\nimport rpy2.robjects.packages as rpackages\n# import R's utility package\nutils = rpackages.importr('utils') ",
"_____no_output_____"
],
[
"# select a mirror for R packages\nutils.chooseCRANmirror() ",
"Secure CRAN mirrors\n \n\n\n\n\n\n 1: 0-Cloud [https] 2: Algeria [https] \n 3: Australia (Canberra) [https] 4: Australia (Melbourne 1) [https]\n 5: Australia (Melbourne 2) [https] 6: Australia (Perth) [https] \n 7: Austria [https] 8: Belgium (Ghent) [https] \n 9: Brazil (PR) [https] 10: Brazil (RJ) [https] \n11: Brazil (SP 1) [https] 12: Brazil (SP 2) [https] \n13: Bulgaria [https] 14: Chile 1 [https] \n15: Chile 2 [https] 16: China (Guangzhou) [https] \n17: China (Lanzhou) [https] 18: China (Shanghai) [https] \n19: Colombia (Cali) [https] 20: Czech Republic [https] \n21: Denmark [https] 22: East Asia [https] \n23: Ecuador (Cuenca) [https] 24: Ecuador (Quito) [https] \n25: Estonia [https] 26: France (Lyon 1) [https] \n27: France (Lyon 2) [https] 28: France (Marseille) [https] \n29: France (Montpellier) [https] 30: France (Paris 2) [https] \n31: Germany (Erlangen) [https] 32: Germany (Göttingen) [https] \n33: Germany (Münster) [https] 34: Greece [https] \n35: Iceland [https] 36: Indonesia (Jakarta) [https] \n37: Ireland [https] 38: Italy (Padua) [https] \n39: Japan (Tokyo) [https] 40: Japan (Yonezawa) [https] \n41: Malaysia [https] 42: Mexico (Mexico City) [https] \n43: Norway [https] 44: Philippines [https] \n45: Serbia [https] 46: Spain (A Coruña) [https] \n47: Spain (Madrid) [https] 48: Sweden [https] \n49: Switzerland [https] 50: Turkey (Denizli) [https] \n51: Turkey (Mersin) [https] 52: UK (Bristol) [https] \n53: UK (Cambridge) [https] 54: UK (London 1) [https] \n55: USA (CA 1) [https] 56: USA (IA) [https] \n57: USA (KS) [https] 58: USA (MI 1) [https] \n59: USA (NY) [https] 60: USA (OR) [https] \n61: USA (TN) [https] 62: USA (TX 1) [https] \n63: Vietnam [https] 64: (other mirrors) \n\n\n\n\n\n\nSelection: 17\n"
],
[
"# R package names\npacknames = ('ggplot2', 'hexbin')\n# R vector of strings \nfrom rpy2.robjects.vectors import StrVector\n# Selectively install what needs to be install.\n# We are fancy, just because we can.\nnames_to_install = packnames \nif len(names_to_install) > 0:\n utils.install_packages(StrVector(names_to_install))",
"/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: 还安装相依关系‘colorspace’, ‘munsell’, ‘viridisLite’, ‘MASS’, ‘scales’\n\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: 试开URL’https://mirror.lzu.edu.cn/CRAN/src/contrib/colorspace_1.3-2.tar.gz'\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: Content type 'application/octet-stream'\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: length 293433 bytes (286 KB)\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: =\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: \n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: downloaded 286 KB\n\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: 试开URL’https://mirror.lzu.edu.cn/CRAN/src/contrib/munsell_0.4.3.tar.gz'\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: length 97244 bytes (94 KB)\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: downloaded 94 KB\n\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: 试开URL’https://mirror.lzu.edu.cn/CRAN/src/contrib/viridisLite_0.3.0.tar.gz'\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: length 44019 bytes (42 KB)\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: downloaded 42 KB\n\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: 试开URL’https://mirror.lzu.edu.cn/CRAN/src/contrib/MASS_7.3-49.tar.gz'\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: length 487772 bytes (476 KB)\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: downloaded 476 KB\n\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: 试开URL’https://mirror.lzu.edu.cn/CRAN/src/contrib/scales_0.5.0.tar.gz'\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: length 59867 bytes (58 KB)\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: downloaded 58 KB\n\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: 试开URL’https://mirror.lzu.edu.cn/CRAN/src/contrib/ggplot2_2.2.1.tar.gz'\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: length 2213308 bytes (2.1 MB)\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: downloaded 2.1 MB\n\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: 试开URL’https://mirror.lzu.edu.cn/CRAN/src/contrib/hexbin_1.27.2.tar.gz'\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: length 491560 bytes (480 KB)\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: downloaded 480 KB\n\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: \n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: 下载的程序包在\n\t‘/private/var/folders/8b/hhnbt0nd4zsg2qhxc28q23w80000gn/T/Rtmp3eywQb/downloaded_packages’里\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: 更新'.Library'里的HTML程序包列表\n\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: Making 'packages.html' ...\n warnings.warn(x, RRuntimeWarning)\n/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: 做完了。\n\n warnings.warn(x, RRuntimeWarning)\n"
],
[
"import rpy2.interactive as r\nimport rpy2.interactive.packages # this can take few seconds\nr.packages.importr('ggplot2')",
"_____no_output_____"
],
[
"%%R \np = ggplot(data = dat, mapping = aes(x = X, y =Y))\np + geom_point()",
"_____no_output_____"
],
[
"%%R\nlibrary(lattice)\nattach(mtcars)\n\n# scatterplot matrix\nsplom(mtcars[c(1,3,4,5,6)], main=\"MTCARS Data\")",
"/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/rpy2/rinterface/__init__.py:145: RRuntimeWarning: The following object is masked from package:ggplot2:\n\n mpg\n\n\n warnings.warn(x, RRuntimeWarning)\n"
],
[
"%%R\ndata(diamonds) \nset.seed(42) \nsmall = diamonds[sample(nrow(diamonds), 1000), ] \nhead(small)\n\np = ggplot(data = small, mapping = aes(x = carat, y = price))\np + geom_point()",
"_____no_output_____"
],
[
"%%R \np = ggplot(data=small, mapping=aes(x=carat, y=price, shape=cut, colour=color)) \np+geom_point()",
"_____no_output_____"
],
[
"import rpy2.robjects as ro\nfrom rpy2.robjects.packages import importr\n\nbase = importr('base')\n\nfit_full = ro.r(\"lm('mpg ~ wt + cyl', data=mtcars)\")\nprint(base.summary(fit_full))",
"\nCall:\nlm(formula = \"mpg ~ wt + cyl\", data = mtcars)\n\nResiduals:\n Min 1Q Median 3Q Max \n-4.2893 -1.5512 -0.4684 1.5743 6.1004 \n\nCoefficients:\n Estimate Std. Error t value Pr(>|t|) \n(Intercept) 39.6863 1.7150 23.141 < 2e-16 ***\nwt -3.1910 0.7569 -4.216 0.000222 ***\ncyl -1.5078 0.4147 -3.636 0.001064 ** \n---\nSignif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1\n\nResidual standard error: 2.568 on 29 degrees of freedom\nMultiple R-squared: 0.8302,\tAdjusted R-squared: 0.8185 \nF-statistic: 70.91 on 2 and 29 DF, p-value: 6.809e-12\n\n\n"
],
[
"diamonds = ro.r(\"data(diamonds)\")",
"_____no_output_____"
],
[
"%R head(diamonds)",
"_____no_output_____"
],
[
"fit_dia = ro.r(\"lm('price ~ carat + cut + color + clarity + depth', data=diamonds)\")",
"_____no_output_____"
],
[
"print(base.summary(fit_dia))",
"\nCall:\nlm(formula = \"price ~ carat + cut + color + clarity + depth\", \n data = diamonds)\n\nResiduals:\n Min 1Q Median 3Q Max \n-16805.0 -680.3 -197.9 466.2 10393.4 \n\nCoefficients:\n Estimate Std. Error t value Pr(>|t|) \n(Intercept) -3264.660 232.513 -14.041 < 2e-16 ***\ncarat 8885.816 12.034 738.362 < 2e-16 ***\ncut.L 686.238 21.377 32.102 < 2e-16 ***\ncut.Q -319.729 18.383 -17.393 < 2e-16 ***\ncut.C 180.446 15.556 11.600 < 2e-16 ***\ncut^4 0.679 12.496 0.054 0.9567 \ncolor.L -1908.788 17.729 -107.667 < 2e-16 ***\ncolor.Q -627.976 16.121 -38.955 < 2e-16 ***\ncolor.C -172.431 15.072 -11.440 < 2e-16 ***\ncolor^4 21.905 13.840 1.583 0.1135 \ncolor^5 -85.781 13.076 -6.560 5.43e-11 ***\ncolor^6 -50.112 11.889 -4.215 2.50e-05 ***\nclarity.L 4214.426 30.873 136.508 < 2e-16 ***\nclarity.Q -1831.631 28.829 -63.533 < 2e-16 ***\nclarity.C 922.123 24.686 37.354 < 2e-16 ***\nclarity^4 -361.446 19.741 -18.310 < 2e-16 ***\nclarity^5 215.655 16.117 13.381 < 2e-16 ***\nclarity^6 2.606 14.039 0.186 0.8528 \nclarity^7 110.305 12.383 8.908 < 2e-16 ***\ndepth -7.160 3.727 -1.921 0.0547 . \n---\nSignif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1\n\nResidual standard error: 1157 on 53920 degrees of freedom\nMultiple R-squared: 0.9159,\tAdjusted R-squared: 0.9159 \nF-statistic: 3.092e+04 on 19 and 53920 DF, p-value: < 2.2e-16\n\n\n"
]
],
[
[
"# END",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
c52c076d7fbd7a757b76a7864887b2b1a4718fcb
| 230,990 |
ipynb
|
Jupyter Notebook
|
public/02-Mundi_Advanced_Notebooks/04_mundi_country_polygon.ipynb
|
VincentSaleh/Mundi_NotebookUsage
|
37a50631971fc58b2b18d0b0b3f020b8e06f3623
|
[
"BSD-3-Clause"
] | 3 |
2020-02-13T10:06:30.000Z
|
2020-03-12T12:03:50.000Z
|
public/02-Mundi_Advanced_Notebooks/04_mundi_country_polygon.ipynb
|
VincentSaleh/Mundi_NotebookUsage
|
37a50631971fc58b2b18d0b0b3f020b8e06f3623
|
[
"BSD-3-Clause"
] | 1 |
2020-04-09T20:27:34.000Z
|
2020-06-02T13:42:53.000Z
|
public/02-Mundi_Advanced_Notebooks/04_mundi_country_polygon.ipynb
|
VincentSaleh/Mundi_NotebookUsage
|
37a50631971fc58b2b18d0b0b3f020b8e06f3623
|
[
"BSD-3-Clause"
] | 5 |
2019-06-24T14:22:28.000Z
|
2021-02-07T12:51:06.000Z
| 1,069.398148 | 217,100 | 0.958466 |
[
[
[
"<img src='https://mundiwebservices.com/build/assets/Mundi-Logo-CMYK-colors.png' align='left' width='15%' ></img> ",
"_____no_output_____"
],
[
"# Mundi WMS Country Polygon",
"_____no_output_____"
]
],
[
[
"from utils import country_polygon_bbox\nfrom utils import display_country_on_world_map",
"_____no_output_____"
],
[
"from matplotlib import pyplot as plt\nfrom shapely.geometry.polygon import Polygon\nfrom descartes import PolygonPatch",
"_____no_output_____"
]
],
[
[
"# Selection of the desired country",
"_____no_output_____"
]
],
[
[
"country_name = 'Slovenia'\npolygon, bbox = country_polygon_bbox(country_name)",
"_____no_output_____"
]
],
[
[
"# Polygon and Bbox of the selected country",
"_____no_output_____"
]
],
[
[
"print(country_name)\nprint(f'bbox = {bbox}')\nprint(polygon)",
"Slovenia\nbbox = (13, 45, 17, 47)\nPOLYGON ((13.80647545742153 46.50930613869122, 14.63247155117483 46.43181732846955, 15.13709191250499 46.65870270444703, 16.01166385261266 46.6836107448117, 16.20229821133736 46.85238597267696, 16.37050499844742 46.8413272161665, 16.56480838386486 46.50375092221983, 15.76873294440855 46.23810822202345, 15.67152957526756 45.83415355079788, 15.3239538916724 45.73178253842768, 15.32767459479743 45.45231639259333, 14.93524376797293 45.47169505470269, 14.59510949062781 45.63494090431271, 14.41196821458541 45.46616567644746, 13.71505984869722 45.50032379819238, 13.93763024257831 45.59101593686462, 13.69810997890548 46.01677806251735, 13.80647545742153 46.50930613869122))\n"
]
],
[
[
"# Display of the selected country on a static and an interactive map",
"_____no_output_____"
]
],
[
[
"display_country_on_world_map(country_name, 20, 'red') ",
"_____no_output_____"
],
[
"import folium\nm = folium.Map([50.854457, 4.377184], zoom_start=3, tiles='cartodbpositron')\nfolium.GeoJson(polygon).add_to(m)\nfolium.LatLngPopup().add_to(m)\nm",
"_____no_output_____"
]
],
[
[
"# Getting a geojson file for the polygon of the selected country and display in it in the website",
"_____no_output_____"
]
],
[
[
"import warnings\nwarnings.filterwarnings(\"ignore\")\nimport json\nimport geojsonio\nimport geopandas as gpd\n\ncrs = {'init': 'epsg:4326'}\npolygon_file = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[polygon])\npolygon_file.to_file(filename='/home/jovyan/work/polygon.geojson', driver='GeoJSON')\n\n\ncountry_polygon = gpd.read_file('/home/jovyan/work/polygon.geojson')\n\ncountry_polygon = country_polygon.to_json()\n\nurl=geojsonio.display(country_polygon)\n\nfrom IPython.core.display import display, HTML\ndisplay(HTML(f\"\"\"<a href={url}>{country_name}</a>\"\"\"))\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
c52c095022b5d64c48a79c220c80b3007b505a6e
| 441,989 |
ipynb
|
Jupyter Notebook
|
examples/MOEAs/.ipynb_checkpoints/NSGAII & SPEA2 (DTLZ2 problem)-checkpoint.ipynb
|
FernandoGaGu/beagle
|
b1c968ec84d560e9903a582413e6334fcf447735
|
[
"BSD-3-Clause"
] | 1 |
2020-12-27T15:58:14.000Z
|
2020-12-27T15:58:14.000Z
|
examples/MOEAs/NSGAII & SPEA2 (DTLZ2 problem).ipynb
|
FernandoGaGu/beagle
|
b1c968ec84d560e9903a582413e6334fcf447735
|
[
"BSD-3-Clause"
] | null | null | null |
examples/MOEAs/NSGAII & SPEA2 (DTLZ2 problem).ipynb
|
FernandoGaGu/beagle
|
b1c968ec84d560e9903a582413e6334fcf447735
|
[
"BSD-3-Clause"
] | null | null | null | 1,319.370149 | 292,252 | 0.958913 |
[
[
[
"Definition of **DTLZ2 problem** with 3 objective functions:\n\n$f_1(X) = (1 + g(x_3)) \\cdot cos(x_1 \\cdot \\frac{\\pi}{2}) \\cdot cos(x_2 \\cdot \\frac{\\pi}{2})$\n\n$f_2(X) = (1 + g(x_3)) \\cdot cos(x_1 \\cdot \\frac{\\pi}{2}) \\cdot sin(x_2 \\cdot \\frac{\\pi}{2})$\n\n$f_3(x) = (1 + g(x_3)) \\cdot sin(x_1 \\cdot \\frac{\\pi}{2})$\n\nwith \n\n$-10 \\leq x_1 \\leq 10$\n\n$-10 \\leq x_2 \\leq 10$\n\n$-10 \\leq x_3 \\leq 10$\n\n$g(x_3) = \\sum_{x_i \\in X_M} (x_i - 0.5)^2$\n\nadapted from \"*Scalable Test Problems for Evolutionary Multi-Objective Optimization*\" section 8.2.",
"_____no_output_____"
]
],
[
[
"import sys\nsys.path.append('../..') \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport beagle as be",
"_____no_output_____"
],
[
"np.random.seed(1997)",
"_____no_output_____"
],
[
"# Problem definition\ndef func_1(values):\n const = np.pi / 2\n return (1 + g(values)) * np.cos(values[0]*const) * np.cos(values[1]*const) \n\ndef func_2(values):\n const = np.pi / 2\n return (1 + g(values)) * np.cos(values[0]*const) * np.sin(values[1]*const) \n\ndef func_3(values):\n const = np.pi / 2\n return (1 + g(values)) * np.sin(values[0]*const) \n\ndef g(values):\n result = 0.0\n for val in values:\n result += (val - 0.5)**2\n \n return result\n\n \nx_1 = x_2 = x_3 = (-10.0, 10.0)\n\nrepresentation = 'real'",
"_____no_output_____"
],
[
"# Algorithm definition\nnsga2 = be.use_algorithm(\n 'experimental.NSGA2', \n fitness=be.Fitness(func_1, func_2, func_3), \n population_size=100, \n individual_representation='real',\n bounds = [x_1, x_2, x_3],\n alg_id='nsga2',\n evaluate_in_parallel=False\n)\n\nspea2 = be.use_algorithm(\n 'experimental.SPEA2', \n fitness=be.Fitness(func_1, func_2, func_3), \n population_size=50, \n individual_representation='real',\n bounds = [x_1, x_2, x_3],\n spea2_archive=100,\n alg_id='spea2',\n evaluate_in_parallel=False\n)",
"_____no_output_____"
],
[
"wrapper = be.parallel(nsga2, spea2, generations=50)",
"(nsga2) Generations : 100%|██████████| 50/50 [00:05<00:00, 9.62it/s]\n(spea2) Generations : 100%|██████████| 50/50 [00:27<00:00, 1.84it/s]\n"
],
[
"wrapper.algorithms",
"_____no_output_____"
],
[
"# NSGA2\nbe.display(wrapper.algorithms[0], only_show=True)",
"_____no_output_____"
],
[
"# SPEA2\nbe.display(wrapper.algorithms[1], only_show=True)",
"_____no_output_____"
],
[
"# Obtain the solutions that make up the non-dominated front of each algorithm\nindices, values = be.pareto_front(wrapper.algorithms[0])\nnsga2_sols = np.array([\n wrapper.algorithms[0].population[idx].values for idx in indices['population']\n ])\n\nindices, values = be.pareto_front(wrapper.algorithms[0])\nspea2_sols = np.array([\n wrapper.algorithms[1].population['archive'][idx].values for idx in indices['population']\n ])",
"_____no_output_____"
],
[
"fig = plt.figure(2, figsize=(15, 15))\nax1 = fig.add_subplot(221, projection='3d')\nax2 = fig.add_subplot(222, projection='3d')\nax3 = fig.add_subplot(223, projection='3d')\nax4 = fig.add_subplot(224, projection='3d')\n\n# Problem definition\ndef f_1_vec(x, y, z):\n const = np.pi / 2\n values = np.array((x, y, z))\n return (1 + g_vec(values)) * np.cos(x*const) * np.cos(y*const)\n\ndef f_2_vec(x, y, z):\n const = np.pi / 2\n values = np.array((x, y, z)) \n return (1 + g_vec(values)) * np.cos(x*const) * np.sin(y*const) \n\ndef f_3_vec(x, y, z):\n const = np.pi / 2\n values = np.array((x, y, z)) \n return (1 + g_vec(values)) * np.sin(x*const) \n\ndef g_vec(values):\n result = np.power(values - 0.5, 2)\n \n return np.sum(result, axis=0)\n\nfor ax in [ax1, ax2, ax3, ax4]:\n \n # Plot the obtained Pareto's front\n ax.scatter(\n f_1_vec(spea2_sols[:, 0], nsga2_sols[:, 1], nsga2_sols[:, 2]),\n f_2_vec(nsga2_sols[:, 0], nsga2_sols[:, 1], nsga2_sols[:, 2]),\n f_3_vec(nsga2_sols[:, 0], nsga2_sols[:, 1], nsga2_sols[:, 2]),\n color='red', alpha=0.7, linewidth=0, antialiased=False, label='SPEA2')\n\n ax.scatter(\n f_1_vec(spea2_sols[:, 0], spea2_sols[:, 1], spea2_sols[:, 2]),\n f_2_vec(spea2_sols[:, 0], spea2_sols[:, 1], spea2_sols[:, 2]),\n f_3_vec(spea2_sols[:, 0], spea2_sols[:, 1], spea2_sols[:, 2]),\n color='green', alpha=0.7, linewidth=0, antialiased=False, label='NSGA2')\n \n ax.set_xlabel('f1(x)', size=15)\n ax.set_ylabel('f2(x)', size=15)\n ax.set_zlabel('f3(x)', size=15)\n\nax2.view_init(40, -20)\nax3.view_init(40, 0)\nax4.view_init(40, 30)\n\nhandles, labels = ax1.get_legend_handles_labels()\nfig.legend(handles, labels, loc='lower center', fontsize=20, markerscale=2)\n\nplt.show()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52c0bb0729f29ad796a6a88d983b13afc51d0e7
| 6,888 |
ipynb
|
Jupyter Notebook
|
Streamlit_Colab.ipynb
|
debparth/Colab_tips_tricks
|
13ec062a303257810a53307a9c1dfb65c79002e7
|
[
"MIT"
] | null | null | null |
Streamlit_Colab.ipynb
|
debparth/Colab_tips_tricks
|
13ec062a303257810a53307a9c1dfb65c79002e7
|
[
"MIT"
] | null | null | null |
Streamlit_Colab.ipynb
|
debparth/Colab_tips_tricks
|
13ec062a303257810a53307a9c1dfb65c79002e7
|
[
"MIT"
] | null | null | null | 31.888889 | 238 | 0.470093 |
[
[
[
"<a href=\"https://colab.research.google.com/github/debparth/Colab_tips_tricks/blob/main/Streamlit_Colab.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"Run all of the following to start the Streamlit demo!\n\nModify with the [docs](https://docs.streamlit.io/) to prototype your own ML apps.",
"_____no_output_____"
],
[
"**Install Streamlit**",
"_____no_output_____"
]
],
[
[
"!pip install streamlit -q",
"\u001b[K |████████████████████████████████| 7.5MB 9.3MB/s \n\u001b[K |████████████████████████████████| 81kB 9.1MB/s \n\u001b[K |████████████████████████████████| 4.5MB 40.6MB/s \n\u001b[K |████████████████████████████████| 163kB 49.5MB/s \n\u001b[K |████████████████████████████████| 112kB 39.7MB/s \n\u001b[K |████████████████████████████████| 122kB 39.9MB/s \n\u001b[K |████████████████████████████████| 71kB 8.4MB/s \n\u001b[?25h Building wheel for blinker (setup.py) ... \u001b[?25l\u001b[?25hdone\n\u001b[31mERROR: google-colab 1.0.0 has requirement ipykernel~=4.10, but you'll have ipykernel 5.4.3 which is incompatible.\u001b[0m\n"
]
],
[
[
"**Download the ngrok linux 64-bit zip file**",
"_____no_output_____"
]
],
[
[
"!wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip\n!unzip -qq ngrok-stable-linux-amd64.zip",
"--2021-02-02 08:07:59-- https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip\nResolving bin.equinox.io (bin.equinox.io)... 52.5.208.118, 52.20.36.26, 52.204.93.39, ...\nConnecting to bin.equinox.io (bin.equinox.io)|52.5.208.118|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 13773305 (13M) [application/octet-stream]\nSaving to: ‘ngrok-stable-linux-amd64.zip.1’\n\nngrok-stable-linux- 100%[===================>] 13.13M 19.1MB/s in 0.7s \n\n2021-02-02 08:08:00 (19.1 MB/s) - ‘ngrok-stable-linux-amd64.zip.1’ saved [13773305/13773305]\n\nreplace ngrok? [y]es, [n]o, [A]ll, [N]one, [r]ename: A\n"
]
],
[
[
"**Use the output of this command as the link to your Streamlit app.**",
"_____no_output_____"
]
],
[
[
"get_ipython().system_raw('./ngrok http 8501 &')\n! curl -s http://localhost:4040/api/tunnels | python3 -c \\\n \"import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'])\"",
"https://daf0f0cfc32d.ngrok.io\n"
]
],
[
[
"**Ignore the output of this**",
"_____no_output_____"
]
],
[
[
"!streamlit hello",
"\u001b[0m\n\u001b[34m\u001b[1m Welcome to Streamlit. Check out our demo in your browser.\u001b[0m\n\u001b[0m\n\u001b[34m Network URL: \u001b[0m\u001b[1mhttp://172.28.0.2:8501\u001b[0m\n\u001b[34m External URL: \u001b[0m\u001b[1mhttp://34.125.8.76:8501\u001b[0m\n\u001b[0m\n Ready to create your own Python apps super quickly?\u001b[0m\n Head over to \u001b[0m\u001b[1mhttps://docs.streamlit.io\u001b[0m\n\u001b[0m\n May you create awesome apps!\u001b[0m\n\u001b[0m\n2021-02-02 08:08:42.998 MediaFileManager: Missing file 25fecd1e37d02f7f54089f6acde72bd22f67643be1dbe2028f8e2c06.jpeg\n2021-02-02 08:08:43.114 MediaFileManager: Missing file f371b4e417e6a7fe417af3210f6609b568ad512851217395a95a12f9.jpeg\n2021-02-02 08:10:12.278 NumExpr defaulting to 2 threads.\n\u001b[34m Stopping...\u001b[0m\n\u001b[34m Stopping...\u001b[0m\n"
],
[
"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
c52c1570ca093507150ef36ebb1841c14e09ca97
| 264,262 |
ipynb
|
Jupyter Notebook
|
translator.ipynb
|
Guillemdb/nlpkungfu
|
9386fd56095502fd734720f76e395932a56c8cc2
|
[
"MIT"
] | null | null | null |
translator.ipynb
|
Guillemdb/nlpkungfu
|
9386fd56095502fd734720f76e395932a56c8cc2
|
[
"MIT"
] | null | null | null |
translator.ipynb
|
Guillemdb/nlpkungfu
|
9386fd56095502fd734720f76e395932a56c8cc2
|
[
"MIT"
] | null | null | null | 78.532541 | 34,032 | 0.715513 |
[
[
[
"from nlpkf.models.seq2seq import preprocess_catalan, Translator\nall_english, all_catalan = preprocess_catalan(\"cat.txt\")",
"_____no_output_____"
],
[
"\neng_prefixes = (\n \"i am \", \"i m \",\n \"he is\", \"he s \",\n \"she is\", \"she s \",\n \"you are\", \"you re \",\n \"we are\", \"we re \",\n \"they are\", \"they re \"\n)\ndef filter_prefixes(lang_1, lang_2, prefixes):\n filtered_1 = []\n filtered_2 = []\n for sent_1, sent_2 in zip(lang_1, lang_2): \n if sent_1[5:].strip().lower().startswith(prefixes):\n filtered_1.append(sent_1)\n filtered_2.append(sent_2)\n return filtered_1, filtered_2\nenglish, catalan = filter_prefixes(all_english, all_catalan, eng_prefixes)",
"_____no_output_____"
],
[
"#english, catalan = all_english, all_catalan",
"_____no_output_____"
],
[
"\ntok_kwargs = dict(remove_stopwords=False, use_stems=False, \n to_lowercase=False, use_lemma=False, remove_punctuation=True, normalize_strings=True)",
"_____no_output_____"
],
[
"import time\nstart = time.time()\ntrans = Translator(16, 32, tokenizer_kwargs=tok_kwargs, vectorizer_kwargs={\"lowercase\":False}, pretrained=False)\nend = time.time()\nprint(end - start)",
"9.531507015228271\n"
],
[
"trans.target_proc.build_vocabulary(catalan)",
"_____no_output_____"
],
[
"trans.target_proc.clean_text(catalan)",
"_____no_output_____"
],
[
"trans.target_proc.vocabulary\n",
"_____no_output_____"
],
[
"%%time\nx_corpus, y_corpus = trans.fit(english, catalan)",
"Building vocabulary.\nConverting to tensors.\nInitializing model and optimizers.\nCPU times: user 3.64 s, sys: 3.97 s, total: 7.62 s\nWall time: 2.74 s\n"
],
[
"trans.encoder",
"_____no_output_____"
],
[
"trans.decoder",
"_____no_output_____"
],
[
"trans.train(x_corpus, y_corpus, print_every=1, n_iters=100, preprocess=False, plot_every=5)",
"100%|██████████| 15/15 [00:00<00:00, 50.60it/s]\n 40%|████ | 6/15 [00:00<00:00, 58.67it/s]"
],
[
"%%time\ntensors_catalan = trans.sentences_to_tensors(catalan, trans.target_proc)",
"CPU times: user 21 s, sys: 28.4 s, total: 49.4 s\nWall time: 7.15 s\n"
],
[
"%%time\n\ntensors_english = trans.sentences_to_tensors(english, trans.src_proc)",
"CPU times: user 22.8 s, sys: 31 s, total: 53.8 s\nWall time: 7.78 s\n"
],
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\ntrans.plot_loss()\n",
"_____no_output_____"
],
[
"trans",
"_____no_output_____"
],
[
"trans.evaluate_attention(english[10])",
"input = /SOS We are sorry for the inconvenience. /EOS\noutput = /SOS es una bon la molestia /EOS\n"
],
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\ndef plot_loss(self):\n \n #plt.switch_backend('agg')\n plt.figure()\n fig, ax = plt.subplots()\n # this locator puts ticks at regular intervals\n loc = ticker.MultipleLocator(base=0.2)\n ax.yaxis.set_major_locator(loc)\n return plt.plot(self.plot_losses)\nplot_loss(trans)\nplt.show()",
"_____no_output_____"
],
[
"def show_attention(input_sentence, output_words, attentions):\n # Set up figure with colorbar\n fig = plt.figure()\n ax = fig.add_subplot(111)\n cax = ax.matshow(attentions.numpy(), cmap='bone')\n fig.colorbar(cax)\n # xticks = [''] + input_sentence.split(' ') + ['<EOS>']\n # yticks = [''] + output_words\n xticks = [''] + input_sentence.split(' ')\n yticks = [''] + output_words\n # Set up axes\n ax.set_xticklabels(xticks, rotation=90)\n ax.set_yticklabels(yticks)\n\n # Show label at every tick\n ax.xaxis.set_major_locator(ticker.MultipleLocator(1))\n ax.yaxis.set_major_locator(ticker.MultipleLocator(1))\n\n plt.show()\n\n\ndef evaluate_attention(input_sentence, trans, *args, **kwargs):\n output_words, attentions = trans.predict(input_sentence, *args, **kwargs)\n print('input =', input_sentence)\n print('output =', ' '.join(output_words))\n show_attention(input_sentence, output_words, attentions)\n return input_sentence, output_words",
"_____no_output_____"
],
[
"evaluate_attention(english[10], trans)",
"input = /SOS We are sorry for the inconvenience. /EOS\noutput = /SOS es una bon anys mes jove que el pare /EOS\n"
],
[
"trans.target_proc.clean_text(catalan)",
"_____no_output_____"
],
[
"input_sentence, output_words = evaluate_attention(english[12], trans)\nevaluate_attention(english[10], trans)",
"input = /SOS Make a wish. /EOS \noutput = /SOS on va es /EOS\n"
],
[
"output_words, attentions = trans.predict(english[0])",
"_____no_output_____"
],
[
"attentions.numpy()",
"_____no_output_____"
],
[
"output_words",
"_____no_output_____"
],
[
"english[0].split(\" \")",
"_____no_output_____"
],
[
" input_sentence.split(' ')[1:-1]",
"_____no_output_____"
],
[
"n = 2\ntrans.evaluate(english[n], catalan[n])",
"> /SOS He is a good athlete. /EOS \n= /SOS Ell és un bon atleta. /EOS \n< /SOS sos es una bona nedadora <EOS>\n\n"
],
[
"def evaluateRandomly(pairs, trans, n=1):\n for i in range(n):\n pair = random.choice(pairs)\n print('>', pair[0])\n print('=', pair[1])\n output_words, attentions = trans.evaluate(pair[0])\n output_sentence = ' '.join(output_words)\n print('<', output_sentence)\n print('')",
"_____no_output_____"
],
[
"import random\nevaluateRandomly([(english[10], catalan[10])], trans)",
"> /SOS I'm 17, too. /EOS \n= /SOS Jo també tinc 17 anys. /EOS \n< /SOS sos no no el que que <EOS>\n\n"
],
[
"%%time\nmax_seq_len(tensors_catalan, tensors_english)",
"CPU times: user 66 µs, sys: 85 µs, total: 151 µs\nWall time: 154 µs\n"
],
[
"import torch\nimport numpy as np\ntorch.from_numpy(np.arange(10)).to(\"cuda\")",
"_____no_output_____"
],
[
"from nlpkf.tokenizer import EOS_TOKEN, SOS_TOKEN\ndef add_tokens_to_sentence(text, sos_token: str=SOS_TOKEN, eos_token: str=EOS_TOKEN):\n return \" {} {} {} \".format(sos_token, text, eos_token)\n \ndef preprocess_translator(text, proc_func, sos_token: str=SOS_TOKEN, eos_token: str=EOS_TOKEN):\n source = []\n target = []\n for src, dst in proc_func(text):\n\n src, dst = add_tokens_to_sentence(src), add_tokens_to_sentence(dst)\n source.append(src)\n target.append(dst)\n return source, target\n ",
"_____no_output_____"
],
[
"[tuple(x.split(\"\\t\")) for x in str(data).split(\"\\n\")][0]",
"_____no_output_____"
],
[
"src, dst = preprocess_translator(data, lambda x: [tuple(x.split(\"\\t\")) for x in str(data).split(\"\\n\")[:-1]])",
"_____no_output_____"
],
[
"src",
"_____no_output_____"
],
[
"import torch\ntorch.tensor([[0]], device=\"cpu\")",
"_____no_output_____"
],
[
"\ntorch.cuda.is_available()",
"_____no_output_____"
],
[
"for sent in doc.sents:\n print(sent)",
"b'Wow!\\tCarai!\\nReally?\\tDe\nveritat?\\nThanks.\\tGr\\xc3\\xa0cies!\\nGoodbye!\\tAd\\xc3\\xa9u!\\nHurry up.\\tAfanya\\'t.\\nToo\nlate.\\tMassa tard.\\nThank you.\\tGr\\xc3\\xa0cies!\\nCan\nI help?\\tPuc ajudar?\\nI envy him.\\tL\\'envejo.\\nTime flies.\\tEl temps\nvola.\\nI\\'m 17, too.\\tJo\ntamb\\xc3\\xa9 tinc 17\nanys.\\nI\\'m at home.\\tEstic a casa.\\nMake\na wish.\\tDemana un desig\\nMoney talks.\\tQui paga,\nmana.\\nWe love you.\\tT\\'estimem.\\nWe love\nyou.\\tUs\nestimem.\\nWho are you?\\tQui ets\ntu?\\nWho are you?\\tQui\n\\xc3\\xa9s vost\\xc3\\xa8?\\nWho are you?\\tQui\nets?\\nWho are you?\\tQui sou?\\nCome with us.\\tVine amb nosaltres.\\nHe has a dog.\\tEll t\\xc3\\xa9 un gos.\\nShe stood up.\\tElla es va aixecar.\\nHi,\neverybody.\\tHola a tots.\\nI\\'m desperate.\\tEstic desesperat.\\nLet\nme try it.\\tDeixa\\'m intentar-ho.\\nYou look good.\\tTens bona cara.\\nYou look good.\\tFas bona cara.\\nYou look\ngood.\\tFas bon aspecte.\\nAre you insane?\\tEst\\xc3\\xa0s boig?\\nCan I help you?\\tPuc ajudar?\\nHappy New Year!\\tBon any nou!\\nI need a stamp.\\tNecessito un segell.\\nI\nsaw him jump.\\tEl vaig veure saltar.\\nLeave me\nalone!\\tDeixa\\'m en\npau!\\nWho painted it?\\tQui\nho\nha pintat?\\nHer book is red.\\tEl seu llibre \\xc3\\xa9s roig.\\nI\ndidn\\'t say it.\\tNo\nho\nhe dit pas.\\nI felt the same.\\tEm sentia igual.\\nI have two cats.\\tTinc dos gats.\\nI speak Swedish.\\tParlo suec.\\nIt\\'s cold today!\\tAvui fa fred!\\nIt\\'s your fault.\\t\\xc3\\x89s culpa\nteva.\\nWho are you all?\\tQui sou tots\nvosaltres?\\nWho are you all?\\tQui sou totes\nvosaltres?\\nHere is your bag.\\tAqu\\xc3\\xad \\xc3\\xa9s\nla teva bossa.\\nHere is your bag.\\tAc\\xc3\\xad est\\xc3\\xa0 la teua bossa.\\nHere is your bag.\\tAc\\xc3\\xad tens la teua bossa.\\nI am now on duty.\\tAra estic de servei.\\nI ate\nthe cheese.\\tEm vaig menjar el formatge.\\nI have a problem.\\tTinc un problema.\\nI have a problem.\\tTinc un maldecap.\\nI have no family.\\tNo tinc fam\\xc3\\xadlia.\\nI work in a bank.\\tJo treballo a un banc.\\nI wrote a letter.\\tVaig escriure una carta.\\nI\\'m already late.\\tJa faig tard.\\nI\\'m not a doctor.\\tJo no s\\xc3\\xb3c metge.\\nLet go of my arm.\\tDeixa\\'m anar el bra\\xc3\\xa7.\\nShe lives nearby.\\tViu\naqu\\xc3\\xad prop.\\nThey\\'re\nmy books.\\tS\\xc3\\xb3n els\nmeus\nllibres.\\nThis is your dog.\\tAquest \\xc3\\xa9s\nel teu\ngos.\\nTom isn\\'t\nhungry.\\tTom\nno\nt\\xc3\\xa9\nfam.\\nTom walked alone.\\tEn Tom caminava sol.\\nWhat is going on?\\tQu\\xc3\\xa8\nhi\nha?\\nWhat is going on?\\tQu\\xc3\\xa8\npassa?\\nWho are you with?\\tAmb\nqui\nest\\xc3\\xa0s?\\nWho are you\nwith?\\tAmb qui esteu?\\nAnswer in English.\\tContesta en angl\\xc3\\xa8s!\\nHe\nwent back\nhome.\\tEll va tornar a casa.\\nI\nhave an earache.\\tTinc otitis.\\nI have black eyes.\\tTinc els ulls negres.\\nI think he did it.\\tCrec que ho va fer\nell.\\nI\\'m a salesperson.\\tS\\xc3\\xb3c venedor.\\nLet\nme have a try.\\tDeixa\\'m intentar-ho.\\nNobody is\nperfect.\\tNing\\xc3\\xba \\xc3\\xa9s perfecte.\\nShe has\nlong hair.\\tElla t\\xc3\\xa9\nel cabell llarg.\\nTom doesn\\'t\ndrink.\\tTom\nno beu.\\nWhy is snow white?\\tPer\nqu\\xc3\\xa8 \\xc3\\xa9s blanca la neu?\\nYou need to hurry.\\tT\\'has d\\'afanyar.\\nAre you hungry\nnow?\\tTens fam, ara?\\nAre you hungry now?\\tTeniu fam, ara?\\nAre\nyou hungry\nnow?\\tAra\ntens fam?\\nCome along with me.\\tAcompanya\\'m.\\nDo it by all means.\\tFes-ho com\nsigui.\\nDo\nit by all means.\\tFes-ho peti qui\npeti.\\nEveryone loves him.\\tTots\nl\\'estimen.\\nHe speaks fluently.\\tParla amb soltura.\\nI\ndon\\'t have a cat.\\tNo tinc cap\ngat.\\nI don\\'t want sugar.\\tNo\nvull sucre.\\nI have lost my key.\\tHe perdut la meva clau.\\nI wrote a cookbook.\\tVaig escriure un llibre de cuina.\\nIt was a nightmare.\\tVa ser un malson.\\nShut up and listen!\\tCalla\ni escolta!\\nShut up and\nlisten.\\tCalla i escolta!\\nStay out of my way.\\tFora del meu\ncam\\xc3\\xad!\\nThis coat fits you.\\tAquest abric\net queda b\\xc3\\xa9.\\nTom is a silly man.\\tEn tom\n\\xc3\\xa9s un ximplet.\\nWhat\\'s the problem?\\tQuin problema hi ha?\\nWhat\\'s your secret?\\tQuin \\xc3\\xa9s el teu secret?\\nYou\nare a good boy.\\tEts un bon noi.\\nEurope is in crisis.\\tEuropa est\\xc3\\xa0 en crisi.\\nEverybody\nloves\nhim.\\tTots\nl\\'estimen.\\nHave\na good weekend!\\tBon cap de setmana!\\nHe\\'ll return at six.\\tTornar\\xc3\\xa0\na les sis.\\nHe\\'s the oldest son.\\tEll \\xc3\\xa9s el fill gran.\\nI can see the light.\\tPuc veure la llum.\\nI want to stay here.\\tVull\nquedar-me\naqu\\xc3\\xad.\\nI\\'ll call him later.\\tLi\ncridar\\xc3\\xa9 m\\xc3\\xa9s tard.\\nI\\'ve got a question.\\tTinc una pregunta.\\nShe has a white cat.\\tElla t\\xc3\\xa9 un gat blanc.\\nShe raised her hand.\\tElla va aixecar la m\\xc3\\xa0.\\nShe raised her hand.\\tElla va\nal\\xc3\\xa7ar la m\\xc3\\xa0.\\nShe raised her hand.\\tElla\nal\\xc3\\xa7\\xc3\\xa0 la m\\xc3\\xa0.\\nShe raised her hand.\\tVa al\\xc3\\xa7ar la m\\xc3\\xa0.\\nThe bicycle is mine.\\tLa\nbicicleta\n\\xc3\\xa9s meva.\\nThey were satisfied.\\tEstaven\nsatisfets.\\nThey were satisfied.\\tEstaven\ncofois.\\nWhere are you\ngoing?\\tA on vas?\\nYour father is tall.\\tTon pare \\xc3\\xa9s\nalt.\\nBirds fly in the sky.\\tEls ocells volen pel\ncel.\\nCan he speak English?\\tQue parla angl\\xc3\\xa8s, ell?\\nDo\nyou live in Tokyo?\\tVius\na Tokyo?\\nFish live in the sea.\\tEls peixos viuen al mar.\\nGood night,\neveryone!\\tBona nit a tothom!\\nHe is a good athlete.\\tEll \\xc3\\xa9s un bon atleta.\\nI dislike big cities.\\tNo\nm\\'agraden les ciutats grans.\\nI\ndon\\'t want to work.\\tNo vull treballar.\\nI have two daughters.\\tTinc dues filles.\\nI\\'m glad you\\'re\nhere.\\tM\\'alegra que estigues\nac\\xc3\\xad.\\nI\\'m glad you\\'re\nhere.\\tM\\'alegra que estigueu ac\\xc3\\xad.\\nIt\\'s a piece of cake.\\tAix\\xc3\\xb2 \\xc3\\xa9s bufar\ni fer ampolles.\\nIt\\'s really annoying.\\t\\xc3\\x89s\nrealment molest.\\nShe sent me a letter.\\tElla\nem\nva enviar una carta.\\nSorry for being late.\\tPerd\\xc3\\xb3 pel retard.\\nThat\\'s a bright idea.\\t\\xc3\\x89s una idea brillant.\\nThe ground seems wet.\\tEl\ns\\xc3\\xb2l sembla\nmullat.\\nThey got off the bus.\\tElls van baixar de l\\'autob\\xc3\\xbas.\\nTom always says that.\\tTom sempre\ndiu aix\\xc3\\xb2.\\nTom doesn\\'t watch TV.\\tEn Tom\nno mira\nla tele.\\nWho\\'s coming with me?\\tQui\nve amb\nmi?\\nYou can come with\nme.\\tPots venir amb\nmi.\\nYour son is a genius.\\tEl vostre fill \\xc3\\xa9s un geni.\\nDo\nyou have two books?\\tTens dos llibres?\\nDoes he speak English?\\tQue parla angl\\xc3\\xa8s, ell?\\nDoes\nhe speak English?\\tParla angl\\xc3\\xa8s?\\nDoes\nhe speak English?\\tEll parla angl\\xc3\\xa8s?\\nDoes your mother\nknow?\\tHo sap la teua mare?\\nHis house was on fire.\\tLa seva casa est\\xc3\\xa0 en flames.\\nI don\\'t have time now.\\tAra no tinc temps.\\nI have to go to sleep.\\tHe\nd\\'anar a dormir.\\nI know these students.\\tconec aquests\nestudiants.\\nI\\'d like some aspirin.\\tVoldria una aspirina.\\nI\\'d\nlike to go abroad.\\tM\\'agradaria\nanar\na l\\'estranger.\\nMoney opens all doors.\\tEls diners\nobren totes\nles\nportes.\\nShe is a good swimmer.\\t\\xc3\\x89s una bona nedadora.\\nThanks for everything.\\tMerc\\xc3\\xa8s per tot.\\nThe girl said nothing.\\tLa nena no va dir res.\\nThey\ndon\\'t seem happy.\\tNo semblen feli\\xc3\\xa7os.\\nThose are empty words.\\tS\\xc3\\xb3n\nparaules buides.\\nWe have two daughters.\\tTenim dues\nfilles.\\nWhat\\'s wrong with you?\\tQu\\xc3\\xa8\net passa?\\nYou should eat slower.\\tHas de menjar\nm\\xc3\\xa9s a poc a poc.\\nYou should\\'ve stopped.\\tT\\'hauries\nd\\'haver\naturat.\\nYou\\'re taller than me.\\tEts\nm\\xc3\\xa9s alt que jo.\\nCome\nwhenever you like.\\tVingui quan vulgui.\\nCome\nwhenever you like.\\tVeniu quan vulgueu.\\nCome\nwhenever you like.\\tVine quan vulguis.\\nDoes the bus stop here?\\tL\\'autob\\xc3\\xbas para ac\\xc3\\xad?\\nI\ndidn\\'t buy this book.\\tNo vaig comprar aquest llibre.\\nI think Tom isn\\'t home.\\tCrec que en Tom no est\\xc3\\xa0 a casa.\\nI think he has done it.\\tCrec que ell ho ha\nfet.\\nI\\'m not angry with you.\\tNo estic enfadada\namb tu.\\nThey will not eat\nmeat.\\tNo menjaran\ncarn.\\nTom wants to be famous.\\tTom vol ser fam\\xc3\\xb3s.\\nWe have to act\nquickly.\\tHem\nd\\'actuar\nr\\xc3\\xa0pid.\\nWhat is wrong with him?\\tQu\\xc3\\xa8 li passa?\\nAsk\nme something easier.\\tPregunta\\'m una cosa m\\xc3\\xa9s\nf\\xc3\\xa0cil.\\nDo\nyou have\na cellphone?\\tTens un m\\xc3\\xb2bil?\\nHe died three years ago.\\tVa morir fa tres anys.\\nI don\\'t like big cities.\\tNo m\\'agraden les ciutats grans.\\nI felt like I would die.\\tSentia\nque em moriria.\\nI\nhope that I can do it.\\tEspere poder-ho fer.\\nI listened to her story.\\tVaig\nescoltar la hist\\xc3\\xb2ria d\\'ella.\\nI want to see the ocean.\\tVull veure l\\'oce\\xc3\\xa0.\\nI won\\'t see him anymore.\\tNo\nel veur\\xc3\\xa9 mai m\\xc3\\xa9s.\\nI\\'ll start this evening.\\tComen\\xc3\\xa7ar\\xc3\\xa9 aquest vespre.\\nI\\'ll start this evening.\\tComen\\xc3\\xa7ar\\xc3\\xa9\nesta\nvesprada.\\nI\\'m sure of his success.\\tEstic segur del seu \\xc3\\xa8xit.\\nI\\'ve forgotten his name.\\tHe oblidat el seu nom.\\nMany sailors\ncan\\'t swim.\\tMolts mariners no saben\nnedar,\\nMy\nfather quit\ndrinking.\\tEl meu pare va aturar de beure.\\nMy father quit drinking.\\tMon pare va deixar de beure.\\nShe helped me willingly.\\tElla\nem\nva ajudar de bon gust.\\nTell me when he returns.\\tAvisa\\'m\nquan\ntorni.\\nTom is thirty years old.\\tTom t\\xc3\\xa9 trenta\nanys.\\nWhat a beautiful sunset!\\tQuina posta de sol m\\xc3\\xa9s\nbonica!\\nWhat a beautiful sunset.\\tQuina posta m\\xc3\\xa9s\nmaca!\\nWhat a beautiful sunset.\\tQuina posta de sol\nm\\xc3\\xa9s bonica.\\nWhere are our umbrellas?\\tOn estan els nostres\nparaig\\xc3\\xbces?\\nWould you draw me\na map?\\tEm far\\xc3\\xades un mapa?\\nYou will miss the train.\\tPerdr\\xc3\\xa0s el\ntren.\\nCan\nyour mom drive a car?\\tLa teva mama sap conduir?\\nCan\nyour mom drive a car?\\tTa mare sap conduir?\\nCan\nyour mom drive a car?\\tLa teua mare sap conduir\nun cotxe?\\nCan your mom drive a car?\\tLa vostra mare sap conduir?\\nDo\nyou have a cell\nphone?\\tTens\nun\nm\\xc3\\xb2bil?\\nDo\nyou have a smartphone?\\tTens un m\\xc3\\xb2bil?\\nDon\\'t\never do that again.\\tNo ho facis mai m\\xc3\\xa9s.\\nFire is always\ndangerous.\\tEl foc sempre\n\\xc3\\xa9s perill\\xc3\\xb3s.\\nHe fell and hurt his leg.\\tEll es va caure\ni es va fer mal a la cama.\\nHe has never played golf.\\tEll\nno ha jugat mai al golf.\\nHe looks like his father.\\tEll s\\'assembla\nal seu\npare.\\nHe speaks five languages.\\tEll parla cinc idiomes.\\nI\ncan\\'t stand that noise.\\tNo\npuc aguantar aquest soroll.\\nI\ndidn\\'t know what to do.\\tNo\nsabia\nqu\\xc3\\xa8 fer.\\nI\ndon\\'t\nknow her address.\\tNo\ns\\xc3\\xa9\nla seva adre\\xc3\\xa7a.\\nI\ndon\\'t love\nyou anymore.\\tJa no t\\'estimo.\\nI made my son a new suit.\\tHe fet un vestit nou per a mon fill.\\nI only have eyes for you.\\tAqu\\xc3\\xad no veig ning\\xc3\\xba\nm\\xc3\\xa9s que tu.\\nI recognized him at once.\\tEl\nvaig recon\\xc3\\xa8ixer de seguida.\\nI\\'m talking on the phone.\\tParlo per tel\\xc3\\xa8fon.\\nI\\'m talking on the phone.\\tEstic parlant per\ntel\\xc3\\xa8fon.\\nIt doesn\\'t\nsound natural.\\tNo sona natural.\\nIt is nice and cool here.\\tAqu\\xc3\\xad fa fresca i s\\'hi est\\xc3\\xa0 b\\xc3\\xa9.\\nMy mother is always busy.\\tLa meva mare sempre est\\xc3\\xa0 ocupada.\\nShe had a strange hat on.\\tElla portava un barret\nestrany.\\nThis is his car, I think.\\tAquest\n\\xc3\\xa9s el seu cotxe, crec.\\nTom is a history teacher.\\tEn Tom \\xc3\\xa9s un professor d\\'hist\\xc3\\xb2ria.\\nTom visited Mary\\'s\ngrave.\\tEn Tom va visitar la tomba de la Mary.\\nTurn down the TV, please.\\tBaixa\nel volum del televisor.\\nTurn the TV down,\nplease.\\tBaixa\nel volum del\ntelevisor.\\nYou are everything to me.\\tTu ets tot per mi.\\nYou don\\'t\nhave to eat\nit.\\tVost\\xc3\\xa8\nno ha de menjar-ho.\\nI have a coat, but no hat.\\tTinc un abric, per\\xc3\\xb2 cap barret.\\nI like listening to music.\\tM\\'agrada escoltar\nm\\xc3\\xbasica.\\nI like to listen to music.\\tM\\'agrada escoltar m\\xc3\\xbasica.\\nI made a careless mistake.\\tVaig cometre una neglig\\xc3\\xa8ncia.\\nI\\'m busy,\nso I\ncan\\'t\nhelp.\\tEstic ocupat, no puc ajudar-te.\\nI\\'m busy,\nso\nI\ncan\\'t\nhelp.\\tEstic ocupat, no puc ajudar\n-vos.\\nI\\'m standing in the shade.\\tM\\'estic dret a l\\'ombra.\\nIt\\'s always been that way.\\tSempre ha sigut aix\\xc3\\xad.\\nThat\\'ll put you in danger.\\tAix\\xc3\\xb2 et posar\\xc3\\xa0 en perill.\\nWe may be late for school.\\tPotser farem tard\na l\\'escola.\\nWhat a nice sounding word!\\tQu\\xc3\\xa8 b\\xc3\\xa9 sona aquesta\nparaula!\\nWhere\nwill you be staying?\\tOn\nt\\'estar\\xc3\\xa0s?\\nWhere\nwill you be staying?\\tOn\nt\\'allotjar\\xc3\\xa0s?\\nWhere will you be staying?\\tOn et quedar\\xc3\\xa0s?\\nWhere\nwill you be staying?\\tOn\nus allotjareu?\\nYou agree with Tom, right?\\tEst\\xc3\\xa0s d\\'acord\namb\nTom, oi?\\nYou agree with Tom,\nright?\\tEsteu d\\'acord\namb\nTom, veritat?\\nYou don\\'t have to do this.\\tNo\nhas de fer-ho.\\nYou don\\'t have to do this.\\tAix\\xc3\\xb2\nno ho has de fer.\\nDo\nthey have any good news?\\tTens bones\nnot\\xc3\\xadcies?\\nDo you come here\nevery day?\\tV\\xc3\\xa9ns aqu\\xc3\\xad cada dia?\\nDo\nyou come here every day?\\tVeniu\nac\\xc3\\xad tots els\ndies?\\nDo\nyou have a mobile phone?\\tTens un m\\xc3\\xb2bil?\\nDo\nyou know his birthplace?\\tSaps on va n\\xc3\\xa9ixer?\\nI have to buy one tomorrow.\\tHe de comprar-ne un dem\\xc3\\xa0.\\nI\njust want to be near\nyou.\\tNom\\xc3\\xa9s\nvull\nestar prop de tu.\\nI know\nhe likes\njazz music.\\tS\\xc3\\xa9 que li agrada el jazz.\\nI\\'d rather do it by myself.\\tPrefereixo fer-lo pel meu\ncompte.\\nI\\'m\nafraid\nI caught a cold.\\tEm sembla que\nhe agafat un constipat.\\nIt\\'s all\nyou can really\ndo.\\t\\xc3\\x89s tot el que pots fer.\\nIt\\'s always been like\nthat.\\tSempre ha sigut\naix\\xc3\\xad.\\nShe\\'s Tom\\'s younger sister.\\t\\xc3\\x89s la\ngermana petita d\\'en Tom.\\nShe\\'s Tom\\'s younger\nsister.\\t\\xc3\\x89s\nla germana menuda de Tom.\\nThe bird\\'s wing was broken.\\tL\\'ala de l\\'ocell estava trencada.\\nThe bird\\'s wing was broken.\\tL\\'ocell tenia una ala trencada.\\nThe bird\\'s wing was broken.\\tEl pardal tenia una\nala trencada.\\nThere were ten eggs in all.\\tHi havia deu ous en total.\\nThere\\'s no reason to worry.\\tNo\nhi ha cap\nmotiu per preocupar-se.\\nThings are not that simple.\\tLes coses no s\\xc3\\xb3n tan senzilles.\\nThis store sells old books.\\tAquesta botiga ven llibres\nvells.\\nYou\\'re not\na child anymore.\\tJa no ets un nen.\\nColumbus discovered America.\\tCol\\xc3\\xb3n va descobrir Am\\xc3\\xa8rica.\\nDon\\'t you like Chinese food?\\tNo\nt\\'agrada el menjar\nxin\\xc3\\xa8s?\\nFrance is in western Europe.\\tFran\\xc3\\xa7a \\xc3\\xa9s\na l\\'Europa Occidental.\\nHe plays baseball every day.\\tJuga al beisbol tots el dies.\\nHe wants a watch like yours.\\tVol un rellotge com el teu.\\nHe\\'s the one who touched me.\\tEll\n\\xc3\\xa9s el que em va tocar.\\nI\ndon\\'t know if he knows it.\\tNo\ns\\xc3\\xa9\nsi ho sap.\\nI\\'d like some sugar, please.\\tM\\'agradaria una mica de sucre,\nsi us\nplau.\\nI\\'ll be back in ten minutes.\\tTornar\\xc3\\xa9 en deu minuts.\\nI\\'m the one who has the key.\\tJo s\\xc3\\xb3c\nqui\nt\\xc3\\xa9\nla clau.\\nIs it going to snow\ntonight?\\tQu\\xc3\\xa8\nnevar\\xc3\\xa0 avui\nal vespre?\\nMaybe Tom asked Mary to lie.\\tPotser el Tom li va demanar\na\nla\nMary que ment\\xc3\\xads.\\nMy dog has a very long tail.\\tEl meu gos t\\xc3\\xa9 una cua molt llarga.\\nTake off your socks, please.\\tSisplau, lleva\\'t els mitjons.\\nTake off your socks, please.\\tLleva\\'t els calcetins, per favor.\\nTake off your socks, please.\\tLleveu-vos els calcetins,\nper favor.\\nThe evidence was against me.\\tL\\'evid\\xc3\\xa8ncia estava en contra\nmeua.\\nThe food was great in Italy.\\tEl menjar va ser cosa fina\na It\\xc3\\xa0lia.\\nThey say that love is blind.\\tEs\ndiu\nque\nl\\'amor \\xc3\\xa9s\ncec.\\nThey work eight hours a day.\\tTreballen vuit hores\nal dia.\\nWhat are you doing tomorrow?\\tQu\\xc3\\xa8 fas\ndem\\xc3\\xa0?\\nWhat is wrong with that guy?\\tQu\\xc3\\xa8 li passa a aquet\npaio?\\nWhere\nwill we go\nafterwards?\\tOn anirem\ndespr\\xc3\\xa9s?\\nDo you know where she\\'s gone?\\tSaps on ha anat\nella?\\nHe goes to the office by car.\\tVa al despatx amb cotxe.\\nHe is the manager of a hotel.\\t\\xc3\\x89s el director d\\'un hotel.\\nHe lost all the money he had.\\tVa\nperdre tots els\ndiners que tenia.\\nHe plays the piano very well.\\tEll\ntoca el piano molt b\\xc3\\xa9.\\nI\ndon\\'t want to go to school.\\tNo vull anar\na l\\'escola.\\nI have something to tell you.\\tT\\'he de dir una cosa.\\nI have something to tell you.\\tUs\nhe de dir una cosa.\\nI have something to tell you.\\tTinc una cosa a dir-te.\\nI must have the wrong number.\\tDec tenir el n\\xc3\\xbamero equivocat.\\nI never get tired of talking.\\tNo em canso mai de parlar.\\nI saw him tear up the letter.\\tEl vaig veure estripar la carta.\\nI will get in touch with you.\\tEm posar\\xc3\\xa9 en contacte amb tu.\\nJapan is smaller than Canada.\\tEl Jap\\xc3\\xb3 \\xc3\\xa9s\nm\\xc3\\xa9s petit que el Canad\\xc3\\xa0.\\nShe sent you her best wishes.\\tElla t\\'envia els seus millors\ndesitjos.\\nThat\\'s exactly what happened.\\tAix\\xc3\\xb2 \\xc3\\xa9s exactament\nel\nqu\\xc3\\xa8\nva passar.\\nThe girl\ndidn\\'t say anything.\\tLa\nnena no va dir\nres.\\nThe soldier gave water to me.\\tEl soldat m\\'ha donat aigua.\\nWe killed time playing cards.\\tMat\\xc3\\xa0vem el temps jugant\na les cartes.\\nWe must control our passions.\\tHem de controlar les nostres\npassions.\\nWhat you think is irrelevant.\\tEl que penses \\xc3\\xa9s\nirellevant.\\nDo you have\nmedical insurance?\\tTeniu asseguran\\xc3\\xa7a m\\xc3\\xa8dica?\\nHe comes here every five days.\\tVe\naqu\\xc3\\xad cada cinc dies.\\nHe left the book on the table.\\tVa\ndeixar el llibre\nsobre la\ntaula.\\nHow many children do you have?\\tQuants fills tens?\\nI believe the choice is clear.\\tCrec que l\\'elecci\\xc3\\xb3 est\\xc3\\xa0 clara.\\nI study for 3 hours every day.\\tJo estudio 3 hores\ncada dia.\\nIt was cheaper than I thought.\\t\\xc3\\x89s\nm\\xc3\\xa9s barat del que em\nvaig\npensar.\\nLet\nme know whenever you come.\\tQuan vinguis, fes-m\\'ho saber.\\nMost schools are closed\ntoday.\\tLa majoria d\\'escoles avui estan tancades.\\nMy dad died before I was\nborn.\\tMon pare va\nmorir abans\ndel meu\nnaixement.\\nNobody equals him in strength.\\tNing\\xc3\\xba\nno li fa ombra.\\nNobody equals him in strength.\\tNing\\xc3\\xba no\nli \\xc3\\xa9s\nrival.\\nOur summer is short, but warm.\\tEl nostre estiu \\xc3\\xa9s curt,\nper\\xc3\\xb2 calor\\xc3\\xb3s.\\nShe didn\\'t tell me her secret.\\tElla\nno em\nva dir el seu secret.\\nShe is giving a party tonight.\\tElla fa una festa aquesta\nnit.\\nThis is\na very strange letter.\\tAquesta\n\\xc3\\xa9s una carta molt\nestranya.\\nTom wants to change the world.\\tEn Tom vol canviar el m\\xc3\\xb3n.\\nTom\\'s arm had to be amputated.\\tVan haver\nd\\'amputar\nel bra\\xc3\\xa7 al Tom.\\nTom\\'s arm had to be amputated.\\tLi\nvan haver\nd\\'amputar el bra\\xc3\\xa7 a Tom.\\nYou agree with Tom\n, don\\'t you?\\tEst\\xc3\\xa0s d\\'acord amb Tom, no?\\nHe is a very thoughtful person.\\t\\xc3\\x89s\nuna persona molt considerada.\\nI\ndon\\'t know\nwhen he will come.\\tNo s\\xc3\\xa9\nquan vindr\\xc3\\xa0.\\nI\ndon\\'t like it when you swear.\\tNo m\\'agrada que digues paraulotes.\\nI\ndon\\'t like it\nwhen you swear.\\tNo m\\'agrada que digueu paraulotes.\\nI have breakfast every morning.\\tCada dia esmorzo.\\nI have not seen him since then.\\tNo\nl\\'he vist des d\\'aleshores.\\nI opened the box.\nIt was empty.\\tVaig obrir\nla caixa.\nEstava buida.\\nI wish I could buy that guitar.\\tCom voldria poder comprar aquesta guitarra.\\nI wish I could buy that guitar.\\tM\\'agradaria poder comprar eixa guitarra.\\nI wish I could buy that guitar.\\tTant de bo pogu\\xc3\\xa9s comprar aquesta guitarra.\\nI wish I could buy that guitar.\\tTant de bo poguera comprar eixa\nguitarra.\\nI\\'m\nvery glad to see you again.\\tEstic molt content de tornar-te a veure.\\nPlease circle the right answer.\\tEncercleu la resposta correcta, sisplau.\\nWas Tom in history class today?\\tEstava el Tom a classe d\\'hist\\xc3\\xb2ria avui?\\nHe had a firm belief in his God.\\tT\\xc3\\xa9 una creen\\xc3\\xa7a ferma en D\\xc3\\xa9u.\\nHe is getting better bit by bit.\\tEll s\\'est\\xc3\\xa0 millorant poc a poc\\nHe is getting better bit by bit.\\tEst\\xc3\\xa0 millorant poc\na poc.\\nHe told me an interesting story.\\tM\\'ha contat una hist\\xc3\\xb2ria interessant.\\nHelen Keller was deaf and\nblind.\\tHellen Keller era sorda\ni cega.\\nHow much\ndoes he earn per\nmonth?\\tQuant guanya al mes?\\nI can repeat it again and again.\\tPuc repetir-ho vint vegades.\\nI caught the ball with one hand.\\tVaig agafar\nla\npilota\namb una m\\xc3\\xa0.\\nI heard him sing at the concert.\\tEl vaig sentir cantant al concert.\\nI was not aware of his presence.\\tJo\nno era conscient que\nell estava al davant.\\nI wonder if he\\'ll come tomorrow.\\tEm pregunto si vindr\\xc3\\xa0 dem\\xc3\\xa0.\\nI\\'m a professional photographer.\\tJo s\\xc3\\xb3c fot\\xc3\\xb2graf professional.\\nLet me know when he will arrive.\\tJa\nem\ndir\\xc3\\xa0s quan arriba.\\nMy mother speaks little English.\\tLa meva mare parla una mica d\\'angl\\xc3\\xa8s.\\nShe made the same mistake again.\\tElla va cometre una altra vegada la mateixa errada.\\nShe will have a baby\nnext month.\\tElla vol tenir un fill el mes\nvinent.\\nThanks a lot for the invitation.\\tMoltes gr\\xc3\\xa0cies per la invitaci\\xc3\\xb3.\\nThe\nfood didn\\'t taste\nvery good.\\tEl menjar\nno tenia gaire\nbon gust.\\nThe food didn\\'t taste\nvery good.\\tEl menjar\nno feia gaire bon gust.\\nThe sun appeared on the horizon.\\tEl Sol apareix a l\\'horitzont.\\nThe sun appeared on the horizon.\\tEl sol aparegu\\xc3\\xa9 a l\\'horitz\\xc3\\xb3.\\nThe sun gives us heat and\nlight.\\tEl Sol ens d\\xc3\\xb3na calor\ni llum.\\nThe sun is larger than the moon.\\tEl Sol \\xc3\\xa9s\nm\\xc3\\xa9s gran que la Lluna.\\nThis\nsparrow\\'s wings are broken.\\tLes ales\nd\\'aquest\npardal estan trencades.\\nTom is too young to drive a car.\\tTom \\xc3\\xa9s massa jove per portar un cotxe.\\nTom says he\\'s never owned a car.\\tEl Tom diu que mai\nha tingut un cotxe.\\nTom was fired for a good reason.\\tTom va ser despedit per una causa\njusta.\\nYou don\\'t have to kick yourself.\\tNo et facis mala sang.\\nYou\nshould\\'ve told me yesterday.\\tM\\'ho hauries\nd\\'haver dit ahir.\\nYour opinion is important to me.\\tLa teua\nopini\\xc3\\xb3 \\xc3\\xa9s important per a mi.\\nAsians generally have black hair.\\tEls asi\\xc3\\xa0tics normalment tenen el cabell negre,\\nDo\nyou know who wrote this novel?\\tSaps qui va escriure aquesta\nnovela?\\nDo\nyou know who wrote this novel?\\tSabeu qui va escriure aquesta\nnovel\\xc2\\xb7la?\\nDon\\'t compare me to a movie star.\\tNo\nem comparis\namb una estrella de cinema.\\nHe went skiing during the winter.\\tSe\\'n va\nanar a esquiar a l\\'hivern.\\nI have lived in Tokyo since 1985.\\tHe viscut a Tokyo des de 1985.\\nI\nsaw the moon above the horizon.\\tVeig la lluna sobre l\\'horitzont.\\nMy brother-in-law is\na policeman.\\tEl meu cunyat \\xc3\\xa9s policia.\\nMy father died before I was\nborn.\\tMon pare va\nmorir abans\ndel meu\nnaixement.\\nMy\nfather died before I was\nborn.\\tMon pare va\nmorir abans que jo nasquera.\\nMy father died before I was born.\\tEl\nmeu pare va morir abans de n\\xc3\\xa9ixer jo.\\nThe bus arrived ten minutes\nlate.\\tEl bus va arribar\ndeu minuts tard.\\nThe bus arrived ten minutes late.\\tL\\'autob\\xc3\\xbas arrib\\xc3\\xa0\ndeu minuts tard.\\nThe bus arrived ten minutes\nlate.\\tL\\'autob\\xc3\\xbas va arribar\namb deu minuts de retard.\\nThe flood caused a lot of damage.\\tLa riada va fer molt de mal.\\nThe flood caused a lot of damage.\\tLa\ninundaci\\xc3\\xb3 va fer molt de mal.\\nThe rumor is true to some extent.\\tFins a un cert punt, el rumor \\xc3\\xa9s cert.\\nTom and Mary acted like children.\\tEn\nTom i la\nMary es portaven com\nnens.\\nTom couldn\\'t hold back his tears.\\tTom\nno va poder contenir\nles ll\\xc3\\xa0grimes.\\nTom couldn\\'t hold back his tears.\\tTom\nno podia contenir les ll\\xc3\\xa0grimes.\\nTom doesn\\'t go to school\nanymore.\\tTom ja\nno va a l\\'escola.\\nTom is no longer studying French.\\tEn Tom ja\nno\nestudia\nfranc\\xc3\\xa8s.\\nWhen can we see each other again?\\tOn ens podem tornar a veure?\\nAre we talking about the same Tom?\\tEstem parlant del mateix Tom?\\nAre we talking about the same Tom?\\tParlem del mateix Tom?\\nEveryone\nhoped that she would win.\\tTothom\nesperava que guany\\xc3\\xa9s.\\nHe was willing to work for others.\\tEll estava disposat a treballar per altres.\\nI burned my fingers on a hot iron.\\tEm vaig cremar els\ndits amb un ferro roent.\\nI burned my fingers on a hot iron.\\tEm vaig cremar els dits amb una planxa calenta.\\nI have nothing in common with her.\\tNo tinc res en com\\xc3\\xba amb ella.\\nI spend money as soon as I get it.\\tEm gasto els diners de seguida que en tinc.\\nI write letters that I never send.\\tEscric cartes que no envio\nmai.\\nIs Flight 123 going to be delayed?\\tEl vol 123, t\\xc3\\xa9\nretard?\\nLast night we worked until 10 p.m.\\tAhir a la nit\nv\\xc3\\xa0rem treballar fins a les\ndeu.\\nMy\nmother knows how to make cakes.\\tLa meva\nmare sap com fer\npastissos.\\nTell me\nyour plans for the future.\\tExplica\\'m els teus plans per al futur.\\nTell me\nyour plans for the future.\\tConta\\'m els\nteus plans de futur.\\nTell me your plans for the future.\\tExpliqueu-me els vostres plans per al futur.\\nThank you so much for inviting me.\\tMoltes gr\\xc3\\xa0cies\nper la invitaci\\xc3\\xb3.\\nThe plane took off exactly at six.\\tL\\'avi\\xc3\\xb3 s\\'enlair\\xc3\\xa0 a les sis\nclavades.\\nToday\\'s meeting has been canceled.\\tLa\nreuni\\xc3\\xb3 d\\'avui ha sigut cancelada.\\nWe\\'re sorry for the inconvenience.\\tEns sap greu la mol\\xc3\\xa8stia\ncausada.\\nWhere\\'s the nearest travel agency?\\tOn \\xc3\\xa9s l\\'ag\\xc3\\xa8ncia de viatges\nm\\xc3\\xa9s propera?\\nWhy is fishing not permitted here?\\tPer\nqu\\xc3\\xa8 est\\xc3\\xa0 prohibit pescar\nac\\xc3\\xad?\\nBangkok is Thailand\\'s capital city.\\tBangkok\n\\xc3\\xa9s\nla capital de Tail\\xc3\\xa0ndia.\\nDo\nyou want to play tennis with us?\\tVols jugar a tennis amb nosaltres?\\nHe helped poor people all his life.\\tEll va ajudar els pobres tota\nla seva vida.\\nHer husband is now living in Tokyo.\\tEl seu marit viu\na T\\xc3\\xb2kio ara.\\nI can\\'t remember where I bought it.\\tNo puc recordar on el vaig comprar.\\nI\ncan\\'t remember where I bought it.\\tNo recorde on el vaig comprar.\\nI\ncan\\'t remember where I bought it.\\tNo recorde on ho vaig comprar.\\nI\ncan\\'t remember where I bought it.\\tNo recorde on la vaig comprar.\\nI\ncan\\'t remember where I bought it.\\tNo me\\'n recorde d\\'on ho vaig comprar.\\nI\ncan\\'t remember where I bought it.\\tNo recordo on el vaig comprar.\\nI heard a beautiful song yesterday.\\tAhir vaig sentir una can\\xc3\\xa7\\xc3\\xb3 bonica.\\nI thanked him for what he had done.\\tLi vaig agrair\nel que va fer.\\nI\\'d like to meet your older sister.\\tVoldria trobar-me\namb la teva germana gran.\\nI\\'d like to meet your older sister.\\tM\\'agradaria con\\xc3\\xa8ixer la teva germana gran.\\nI\\'m the one who pays all the bills.\\tJo s\\xc3\\xb3c qui paga totes\nles factures.\\nI\\'m very slow at making up my mind.\\tS\\xc3\\xb3c molt\nlent a l\\'hora de prendre decisions.\\nI, too,\ndidn\\'t understand anything.\\tJo\ntampoc entenc res.\\nIs there a post office around here?\\tHi ha alguna oficina postal per aqu\\xc3\\xad?\\nIs there a post office around\nhere?\\tHi ha alguna oficina de correus prop d\\'ac\\xc3\\xad?\\nIs\nthere a post office around\nhere?\\tHi ha per ac\\xc3\\xad alguna oficina de correus?\\nOnly adults are allowed to do that.\\tNom\\xc3\\xa9s els adults tenen perm\\xc3\\xads per fer\nall\\xc3\\xb2.\\nThe door is locked at nine o\\'clock.\\tLa porta es\ntanca\namb clau\na les\nnou.\\nThe lion is the king of the jungle.\\tEl lle\\xc3\\xb3 \\xc3\\xa9s el rei de la selva.\\nThese questions are easy to answer.\\tAquestes preguntes\ns\\xc3\\xb3n f\\xc3\\xa0cils de respondre.\\nWe are sorry for the inconvenience.\\tEns sap greu la mol\\xc3\\xa8stia\ncausada.\\nWe\\'ll show you how to catch a fish.\\tT\\'ensenyarem com atrapar un peix.\\nWe\\'re not going to change anything.\\tNo canviarem res.\\nWhat little money I had was stolen.\\tEls\npocs diners que tenia me\\'ls van\nrobar.\\nA lot of jobs are done by computers.\\tMoltes feines les fan els ordinadors.\\nDo\nyou wonder why no one trusts him?\\tT\\'estranya que\nning\\xc3\\xba\nhi confi\\xc3\\xaf?\\nDon\\'t go to sleep with the light on.\\tNo\net durmis\namb el llum enc\\xc3\\xa8s.\\nI\ncan\\'t remember which is my racket.\\tNo recorde quina \\xc3\\xa9s la meua raqueta.\\nI\ndon\\'t think we can take that risk.\\tCrec que no podem c\\xc3\\xb3rrer aquest risc.\\nI\ndon\\'t think we can take that risk.\\tCrec que no podem\nc\\xc3\\xb3rrer eixe risc.\\nI have nothing to say to any of you.\\tNo tinc res a dir-vos a cap de vosaltres.\\nI was caught in a shower on the way.\\tM\\'ha\nenxampat un x\\xc3\\xa0fec pel cam\\xc3\\xad.\\nI\\'d like to reserve a table for two.\\tM\\'agradaria reservar una taula per a dos.\\nLook that word up in the dictionary.\\tCerca aquella paraula al diccionari.\\nMy apartment is on the fourth floor.\\tEl meu apartament est\\xc3\\xa0 al quart pis.\\nNight is when most people go to bed.\\tLa\nnit\n\\xc3\\xa9s quan la majoria\nde la gent se\\'n va al llit.\\nTake this medicine before each meal.\\tPreneu aquest medicament abans de cada \\xc3\\xa0pat.\\nThe teacher told me to study harder.\\tEl professor em\nva dir que estudi\\xc3\\xa9s molt.\\nTom may talk to Mary if he wants to.\\tEn Tom, si vol, pot parlar\namb la Mary.\\nTom may talk to Mary if he wants to.\\tTom pot parlar\namb Mary,\nsi vol.\\nWhen did you come back from Germany?\\tQuan vas tornar\nd\\'Alemanya?\\nFlowers die if they don\\'t have water.\\tSense aigua les flors\nes\npanseixen.\\nHis arrogance is no longer tolerable.\\tLa\nseva arrog\\xc3\\xa0ncia ja\nno\n\\xc3\\xa9s tolerable.\\nHis courage is worthy of high praise.\\tLa seva valentia mereix grans lloances.\\nI planted an apple tree in my garden.\\tHe plantat un pomer al meu jard\\xc3\\xad.\\nI really must have my watch repaired.\\tHe\nde dur el rellotge a arreglar.\\nI\\'m sick.\nWill you send for a doctor?\\tEstic malalt.\nOi que avisar\\xc3\\xa0s un metge?\\nI\\'m\nsure of winning the championship.\\tEstic segur\nde\nguanyar el campionat.\\nIt seems that he was a great athlete.\\tSembla que va ser un gran atleta.\\nIt\\'s easier to have fun than to work.\\t\\xc3\\x89s\nm\\xc3\\xa9s f\\xc3\\xa0cil divertir-se que\ntreballar.\\nPlease write to me from time to time.\\tEscriu-me de tant en tant, s\\xc3\\xad?\\nWhat are you going to eat for dinner?\\tQue sopar\\xc3\\xa0s\navui?\\nWhat do you want to talk to me\nabout?\\tDe\nqu\\xc3\\xa8\nvols parlar\namb\nmi?\\nWhat languages are spoken in America?\\tQuins idiomes\nes parlen a Am\\xc3\\xa8rica?\\nWhat\\'s your opinion of Japanese food?\\tQuina \\xc3\\xa9s\nla teva opini\\xc3\\xb3 sobre el menjar japon\\xc3\\xa8s?\\nEveryone was listening very carefully.\\tTots estaven escoltant\natentament.\\nHe is three years younger than Father.\\tEll \\xc3\\xa9s tres anys\nm\\xc3\\xa9s jove que el pare.\\nI\ndon\\'t know what has happened to him.\\tNo\ns\\xc3\\xa9\nqu\\xc3\\xa8\nli ha passat.\\nI was the one who knocked on the door.\\tVaig ser jo qui va trucar\na\nla porta.\\nI\\'ll make an exception\njust this once.\\tFar\\xc3\\xa9 una excepci\\xc3\\xb3 nom\\xc3\\xa9s per aquesta vegada.\\nI\\'m the one who takes out the garbage.\\tJo\ns\\xc3\\xb3c qui\ntreu les escombraries.\\nIn\ngeneral, men are taller than women.\\tEn general, els homes s\\xc3\\xb3n m\\xc3\\xa9s\nalts que les\ndones.\\nJapan imports a large quantity of oil.\\tEl Jap\\xc3\\xb3 importa una gran quantitat de petroli.\\nMary\\'s doctor advised her to exercise.\\tEl metge de la Mary li va aconsellar que fes exercici.\\nNo one knows why Tom hasn\\'t done that.\\tNing\\xc3\\xba sap perqu\\xc3\\xa8 el Tom\nno ho ha fet.\\nPlease correct me if I make a mistake.\\tSi us plau, corregeix-me si\nm\\'equivoco.\\nWhich is larger, the sun or the earth?\\tQuin \\xc3\\xa9s\nm\\xc3\\xa9s gran:\nel Sol\no la\nTerra?\\nWhy are we wasting our time with this?\\tPer\nqu\\xc3\\xa8 estem\nperdent el temps amb aix\\xc3\\xb2?\\nWill the work be finished by tomorrow?\\tEstar\\xc3\\xa0 enllestida la feina per a dem\\xc3\\xa0?\\nYou\nshould ask Tom to do that for you.\\tLi hauries de demanar\nal Tom que ho faci per tu.\\n\"Is\nshe reading a book?\"\n\"Yes, she is.\"\\t\"Est\\xc3\\xa0 llegint un llibre?\"\n\"S\\xc3\\xad.\"\\n\"Is she reading a book?\"\n\"Yes, she is.\"\\t\"Est\\xc3\\xa0 ella llegint un llibre?\"\n\"S\\xc3\\xad.\"\\nAll my friends like playing videogames.\\tA tots els meus amics els\nagraden els\nvideojocs.\\nAs long as there\\'s life, there is hope.\\tMentre hi\nha vida, hi ha esperan\\xc3\\xa7a.\\nBlue lines on the map\ndesignate rivers.\\tLes l\\xc3\\xadnies\nblaves al mapa designen\nrius.\\nHow much time\ndo you spend on Facebook?\\tQuant de temps passes a Facebook?\\nI\ndon\\'t know whether it is true or not.\\tNo\ns\\xc3\\xa9\nsi\n\\xc3\\xa9s veritat o no.\\nI don\\'t think Tom was talking about me.\\tNo crec que Tom estigu\\xc3\\xa9s parlant de mi.\\nI have cookies for breakfast every day.\\tCada\ndia menjo galetes per esmorzar.\\nI would like to visit New York someday.\\tUn dia m\\'agradaria visitar New York.\\nI\\'ve been waiting for this day to come.\\tHe estat\nesperant\nque arribi aquest dia.\\nIn Japan\nthere are four seasons a year.\\tAl Jap\\xc3\\xb3\nhi\nha\nquatre\nestacions cada any.\\nMathematics is important in daily life.\\tLes matem\\xc3\\xa0tiques\ns\\xc3\\xb3n importants a la vida\ndi\\xc3\\xa0ria.\\nThe Japanese economy developed rapidly.\\tL\\'economia japonesa es va desenvolupar depressa.\\nThe class was divided into four groups.\\tLa classe es va dividir en quatre grups.\\nThe earth is much larger than the moon.\\tLa Terra \\xc3\\xa9s molt\nm\\xc3\\xa9s\ngran que\nla\nLluna.\\nThey arrived late because of the storm.\\tElls van arribar tard a causa de la tempesta.\\nThey say golf is very popular in Japan.\\tDiuen\nque\nel golf \\xc3\\xa9s\nmolt popular\nal\nJap\\xc3\\xb3.\\nThis is the best book I have ever read.\\t\\xc3\\x89s\nel millor llibre que\nhe llegit mai.\\nTom is interested in French literature.\\tEn Tom est\\xc3\\xa0 interessat en la\nliteratura francesa.\\nTom is making great progress in French.\\tEn Tom est\\xc3\\xa0 progressant molt\namb\nel franc\\xc3\\xa8s.\\nTom is sick, so\nhe\ncan\\'t do that today.\\tEl\nTom est\\xc3\\xa0 malalt, pel que no podr\\xc3\\xa0 fer aix\\xc3\\xb2 avui.\\nHe fought against racial discrimination.\\tVa lluitar contra la discriminaci\\xc3\\xb3 racial.\\nI know that there was a big church here.\\tS\\xc3\\xa9\nque aqu\\xc3\\xad\nhi havia\nuna esgl\\xc3\\xa9sia gran.\\nI noticed that she sat in the front row.\\tVaig\nnotar que ella va seure a la fila del davant.\\nI was in the shower when the phone rang.\\tEstava en la dutxa quan ha sonat el tel\\xc3\\xa8fon.\\nPresident Clinton denied the accusation.\\tEl president Clinton va negar l\\'acusaci\\xc3\\xb3.\\nThe men are wearing short sleeve shirts.\\tEls homes\nporten camises de m\\xc3\\xa0niga\ncurta.\\nWhat do these dots represent on the map?\\tQu\\xc3\\xa8 signifiquen aquests\npunts al mapa?\\nWill\nyou please stop talking about food?\\tPodries deixar de parlar de menjar?\\nGerman\nis the best language in the world.\\tL\\'alemany\n\\xc3\\xa9s la millor llengua del\nm\\xc3\\xb3n.\\nHow many people are there in your family?\\tQuants\ns\\xc3\\xb3n a la seva fam\\xc3\\xadlia?\\nI asked him many questions about ecology.\\tLi vaig fer moltes preguntes sobre ecologia.\\nI\ndon\\'t have the strength to keep trying.\\tNo\ntinc la for\\xc3\\xa7a per continuar triant.\\nI started learning English six years\nago.\\tFa\nsis anys que vaig comen\\xc3\\xa7ar\na aprendre angl\\xc3\\xa8s.\\nI will ask him where he went last Sunday.\\tLi preguntar\\xc3\\xa9 on va\nanar el diumenge.\\nI\\'m surprised that he accepted the offer.\\tEm sorpr\\xc3\\xa8n que accept\\xc3\\xa9s l\\'oferiment.\\nIt\nis difficult to speak\nthree\nlanguages.\\t\\xc3\\x89s\ndif\\xc3\\xadcil parlar tres llengues.\\nThere are many beautiful parks in London.\\tA Londres\nhi\nhan molts\nparcs bonics.\\nTom does everything he can to save\nmoney.\\tEn Tom fa tot el que pot per estalviar.\\nTom goes to Boston every once in a while.\\tEn Tom va a Boston de tant en tant.\\nI am sure of his winning the tennis match.\\tEstic\nsegur\nde la seva vict\\xc3\\xb2ria al tennis.\\nI did basically the same thing as Tom did.\\tHe fet\nb\\xc3\\xa0sicament el mateix que ha fet el Tom.\\nI don\\'t know the reason why he went there.\\tNo\ns\\xc3\\xa9\nel motiu pel qual va anar-hi.\\nI\\'d like to know when you can send it\nout.\\tM\\'agadaria saber quan ho pot enviar.\\nIt\\'s strange that our friends aren\\'t\nhere.\\t\\xc3\\x89s\nextrany que els\nnostres amics\nno estiguin\naqui.\\nNothing happens unless you make it happen.\\tNo\npassa res\nsi tu\nno fas que\npassi.\\nThis is the best book that I\\'ve ever read.\\t\\xc3\\x89s\nel millor llibre que\nhe llegit\nmai.\\nAs\nwe go up higher, the air becomes cooler.\\tCom m\\xc3\\xa9s amunt anem,\nm\\xc3\\xa9s fresc\n\\xc3\\xa9s\nl\\'aire.\\nDo you support or oppose the death\npenalty?\\tEst\\xc3\\xa0s a favor o en contra de la pena de mort?\\nEnglish is not easy, but it is interesting.\\tL\\'angl\\xc3\\xa8s\nno \\xc3\\xa9s f\\xc3\\xa0cil, per\\xc3\\xb2 \\xc3\\xa9s interessant.\\nI\ndon\\'t have anything to say to any of you.\\tNo tinc res a dir-vos a cap de vosaltres.\\nI\ndon\\'t know for certain\nwhen he will come.\\tNo s\\xc3\\xa9\ndel cert quan vindr\\xc3\\xa0.\\nI eat a boiled egg for breakfast\nevery\nday.\\tCada dia\nem\nmenjo un ou dur per esmorzar.\\nI have been studying French\nfour years now.\\tFa quatre anys que estudio franc\\xc3\\xa8s.\\nI told you to be here on time\nthis morning.\\tEt vaig dir\nque havies de ser aqu\\xc3\\xad puntual aquest mat\\xc3\\xad.\\nI\\'m fed up with him always preaching to me.\\tEstic tip\nque em\nsermonegi\nconstantment.\\nI\\'m fed up with him\nalways preaching to me.\\tEstic tip dels seus sermons constants.\\nI\\'m getting off the train at the next stop.\\tEm baixo del tren a la pr\\xc3\\xb2xima\nestaci\\xc3\\xb3.\\nIt seems those two are made for each other.\\tSembla que aquell parell estan fets l\\'un per l\\'altre.\\nThis is the place where my father was born.\\tAquest \\xc3\\xa9s el lloc on va n\\xc3\\xa8ixer el meu pare.\\nTom is young, but he knows what he\\'s doing.\\tEl Tom \\xc3\\xa9s jove\n, per\\xc3\\xb2 sap el que est\\xc3\\xa0 fent.\\nWhen will it be convenient for you to come?\\tQuan\nli convendria venir?\\nCan\nyou remember the first time you saw Tom?\\tPodeu recordar el primer cop que heu vist en Tom?\\nGive him this message the moment\nhe arrives.\\tD\\xc3\\xb3na-li aquest missatge quan arribi.\\nI demanded that he pay the bill\nimmediately.\\tLi vaig demanar\nde pagar la factura immediatament.\\nI feel like telling him what I think of him.\\tTinc ganes de dir-li qu\\xc3\\xa8 penso d\\'ell.\\nI\nreally need to take care of some business.\\tHe de tenir cura d\\'alguns negocis.\\nI refused to eat until my parents came\nhome.\\tNo vaig voler menjar\nfins que els meus pares\nno tornessin\na casa.\\nJapan imports great quantities of crude oil.\\tEl\nJap\\xc3\\xb3 importa una gran quantitat de petroli.\\nShe\nmakes him do his homework before dinner.\\tElla l\\'obliga\na fer els\ndeures abans de sopar.\\nThey fell into the conversation immediately.\\tVan\npassar al tema\na l\\'instant.\\nYou should pay more attention to what I say.\\tDeuries prestar\nm\\xc3\\xa9s\natenci\\xc3\\xb3 a all\\xc3\\xb2 que dic.\\nBoth of them are unpredictable and impatient.\\tTots\ndos s\\xc3\\xb3n impredictibles\ni\nimpacients.\\nHer explanation of the problem made no sense.\\tLa seva explicaci\\xc3\\xb3 del problema no tenia ni cap ni peus.\\nI am going to do it whether you agree or not.\\tHo far\\xc3\\xa9,\nestigueu\no no d\\'acord\namb mi.\\nI didn\\'t\nknow you were that kind of a person.\\tNo sabia que eres aix\\xc3\\xad.\\nI will take you to the zoo\none of these days.\\tUn dia d\\'aquests et portar\\xc3\\xa9 al zoo.\\nMy son has gone to America to study medicine.\\tEl meu fill ha anat a Am\\xc3\\xa8rica\na estudiar medicina.\\nShe says she brushes her teeth\nevery morning.\\tElla diu que\nes raspatlla les dents tots els\ndematins.\\nWe need to invest in clean, renewable energy.\\tHem d\\'invertir en energia neta\ni\nrenovable.\\nHe is one of the candidates running for mayor.\\t\\xc3\\x89s un dels candidats\nque es presenta per alcalde.\\nI\nhaven\\'t got the nerve to ask you for a loan.\\tNo tinc valor per demanar-te un pr\\xc3\\xa9stec.\\nI\\'m getting off the train at the next station.\\tEm baixo del tren\na la pr\\xc3\\xb2xima\nestaci\\xc3\\xb3.\\nIt is said that golf is very popular in Japan.\\tEs diu\nque el golf \\xc3\\xa9s\nmolt popular\nal Jap\\xc3\\xb3.\\nIt seems I\\'m going to be up all night tonight.\\tSembla\nque avui\nestar\\xc3\\xa9\ndespert tota\nla nit.\\nPlease wash your hands properly before\neating.\\tSiusplau\nrenteu-vos les mans com\ncal abans de menjar.\\nThe urban population of America is increasing.\\tLa\npoblaci\\xc3\\xb3 urbana\na Am\\xc3\\xa8rica est\\xc3\\xa0 creixent.\\nI thought she was angry and would just go away.\\tVaig pensar\nque\ns\\'havia\nenfadat\ni que se n\\'aniria.\\nShe buys what she wants regardless of the cost.\\tCompra el que vol sense\nfixar-se en\nel que val.\\nShe\\'s curious to find out who sent the flowers.\\tElla t\\xc3\\xa9 curiositat per saber qui va enviar les\nflors.\\nUnfortunately, my birthday is only once a year.\\tMalauradament, el meu aniversario nom\\xc3\\xa9s succeeix una vegada a l\\'any.\\nWhat would it cost to have this chair repaired?\\tQuant costaria arreglar aquesta cadira?\\nDrink some coffee.\nIt tastes very good, I think.\\tPren una mica de caf\\xc3\\xa8.\nT\\xc3\\xa9\nmolt bon gust, crec.\\nHe and his sisters are currently living in Tokyo.\\tEn aquest moment,\nell\ni\nles seves germanes\nviuen a T\\xc3\\xb2quio.\\nHe never fails to write to his mother every week.\\tNo\npassa\nuna setmana que no li escrigui\na la seva mare.\\nI have a friend whose father is a famous pianist.\\tTinc un amic el pare del qual \\xc3\\xa9s un pianista fam\\xc3\\xb3s.\\nI\\'m not interested in going to the baseball game.\\tNo tinc cap inter\\xc3\\xa8s a anar al partit de beisbol.\\nI\\'m sorry, but I can\\'t find the book you lent me.\\tEm sap greu, per\\xc3\\xb2\nno\ntrobo\nel llibre\nque em\nvas\ndeixar.\\nIf\nonly I knew, I would tell you all\nthat\nI\nknew.\\tSi\nho\nsab\\xc3\\xa9s, et diria tot el que s\\xc3\\xa9.\\nShe tried to squeeze the juice out of the orange.\\tVa provar d\\'esc\\xc3\\xb3rrer la taronja.\\nThis story is far more interesting than that one.\\tAquesta\nhist\\xc3\\xb2ria \\xc3\\xa9s molt m\\xc3\\xa9s interessant que aquella.\\nI took it for granted that he would pass the exam.\\tDono per descomptat que aprovar\\xc3\\xa0 l\\'examen.\\nThey insisted on my making use of the opportunity.\\tEm varen insistir per a que aprofit\\xc3\\xa8s aquella oportunitat.\\nDo you know which deity this temple is dedicated to?\\tSabeu\na quina divinitat est\\xc3\\xa0 dedicat aquest temple?\\nWhy don\\'t\nwe see if Tom wants to play cards with us?\\tPerqu\\xc3\\xa8\nno mirem\nsi en\nTom vol jugar a les cartes\namb niosaltres?\\nI was glad to see that he finally came to his senses.\\tVaig estar content\nde veure\nque al final va posar-hi seny.\\nI want to live in a quiet city where the air is clean.\\tVull viure a una ciutat tranquila amb l\\'aire pur.\\nIf\nyou\ndon\\'t want to be alone, I can keep you company.\\tSi no vols estar sol, puc fer-te companyia.\\nHe will take over the business when his father retires.\\tEll continuar\\xc3\\xa0 el negoci quan son\npare es\njubili.\\nMy mother likes tulips very much and so does my sister.\\tA ma mare li\nagraden\nmolt les tulipes\ni a ma germana tamb\\xc3\\xa9.\\nThis cola has lost its fizz and doesn\\'t taste any good.\\tAquesta cola s\\'ha esbravat\ni\nno\nt\\xc3\\xa9\nbon gust.\\nTom is accustomed to calling up girls on the telephone.\\tEn Tom acostuma a trucar\nnoies.\\nTom should tell Mary not to forget to water the flowers.\\tEl Tom li hauria de dir\na\nla\nMary que no oblidi regar les\nflors.\\nCuzco is one of the most interesting places in the world.\\tCuzco \\xc3\\xa9s un dels indrets\nm\\xc3\\xa9s interessants del m\\xc3\\xb3n.\\nI stayed in bed one more day just to be on the safe side.\\tEm vaig\nquedar un dia\nm\\xc3\\xa9s al llit per si de cas.\\nTom will likely be discharged from the hospital tomorrow.\\tDem\\xc3\\xa0 donaran\nd\\'alta de l\\'hospital en\nTom.\\n\"How are you feeling this morning?\"\n\"Pretty good, thanks.\"\\t\"Com et sents aquest mat\\xc3\\xad?\"\n\"Bastant b\\xc3\\xa9,\ngr\\xc3\\xa0cies.\"\\nPeople of my generation all think the same way about this.\\tTota la gent de la meva generaci\\xc3\\xb3 pensen igual sobre aix\\xc3\\xb2.\\nThe only useful answers are those that raise new questions.\\tLes \\xc3\\xbaniques respostes\n\\xc3\\xbatils\ns\\xc3\\xb3n les que creen noves\npreguntes.\\nIt takes us thirty minutes to walk from here to the station.\\tD\\'aqu\\xc3\\xad\na l\\'estaci\\xc3\\xb3 triguem mitja hora\na peu.\\nThe secret of longevity is to choose your parents carefully.\\tEl secret de la longevitat\n\\xc3\\xa9s triar amb compte els\npares.\\nIt takes about 10 minutes to get to the train station by foot.\\tTens uns 10 minuts d\\'aqu\\xc3\\xad a l\\'estaci\\xc3\\xb3 a peu.\\nThis medicine must not be placed within the reach of children.\\tAquest medicament no s\\'ha de deixar a la ma dels\nnins.\\nYou told her that you had finished the work three days\nbefore.\\tLi\nvas dir\nque havies\nenllestit\nla feina\nfeia tres dies.\\nHis father died, and to make matters worse, his mother fell ill.\\tSon pare es va morir, i per acabar-ho d\\'adobar, sa mare es va posar malalta.\\nTry to understand it in Spanish, without translating to English.\\tTracta d\\'entendre-ho amb espanyol, sense tradu\\xc3\\xafr-lo amb\nangl\\xc3\\xa8s.\\nWe lost our way, and what was worse, we were caught in a shower.\\tEns vam perdre\ni, encara pitjor, ens va enxampar\nun\nx\\xc3\\xa0fec.\\nShe\\'s worried since she hasn\\'t heard from her son for many months.\\tEst\\xc3\\xa0 amo\\xc3\\xafnada perqu\\xc3\\xa8 fa mesos que\nno\nt\\xc3\\xa9\nnot\\xc3\\xadcia del seu fill.\\nI suspected that he was telling a lie, but that didn\\'t surprise me.\\tSospitava que m\\'estava dient una mentida, per\\xc3\\xb2 aix\\xc3\\xb2 no em va sorprendre.\\nMy daughter won\\'t find it easy to get accustomed to the new school.\\tLa meva\nfilla no trobar\\xc3\\xa0 f\\xc3\\xa0cil per acostumar-se\na la nova escola.\\nThe bullet penetrated his chest, leaving him in critical condition.\\tLa bala va penetrar\nal seu pit\ni el va deixar en estat cr\\xc3\\xadtic.\\nI wanted to buy the book, but I found I had no more than 200 yen with me.\\tVolia comprar el llibre,\nper\\xc3\\xb2 vaig adonar-\nme que no duia\nm\\xc3\\xa9s de 200\niens.\\nFor the first time in more than 6 years, the unemployment rate is below 6%.\\tPer primera vegada en m\\xc3\\xa9s de 6 anys, la taxa d\\'atur est\\xc3\\xa0 per davall del 6%.\\nWe would have bought the plane tickets if the price had been a little lower.\\tHaur\\xc3\\xadem comprat els bitllets d\\'avi\\xc3\\xb3 si el preu fos un p\\xc3\\xa8l\nm\\xc3\\xa9s baix.\\nMy friend has had three jobs in a year; he never sticks to anything for long.\\tEl meu amic\nha treballat a tres llocs diferents en un any; res no li dura gaire.\\nYou can\\'t park in a handicapped parking space unless you have a special permit.\\tNo pots aparcar a una pla\\xc3\\xa7a d\\'aparcament per discapacitats\nsi\nno\ntens un perm\\xc3\\xads especial.\\nDrinking lots of water is good for you, sure, but one can\\'t drink that much water at once.\\tBeure molta aigua \\xc3\\xa9s bo per tu, segur,\nper\\xc3\\xb2 no es pot beure tanta\naigua de cop.\\nWe\\'re gonna make sure that no one is taking advantage of the American people for their own short-term gain.\\tEns assegurarem que ning\\xc3\\xba\ns\\'estiga aprofitant del poble americ\\xc3\\xa0 per al seu propi inter\\xc3\\xa8s a curt termini.\\n'\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52c297ec642c630f4e3983e17c8df0bf58531d9
| 35,534 |
ipynb
|
Jupyter Notebook
|
notebooks/RSA-TFIDF-DUC2007.ipynb
|
stepgazaille/RSAsummarization
|
aad389d5a242c505d62b1f6e525fcf39d11b88ec
|
[
"Apache-2.0"
] | 1 |
2020-09-14T19:57:01.000Z
|
2020-09-14T19:57:01.000Z
|
notebooks/RSA-TFIDF-DUC2007.ipynb
|
tyagi-iiitv/RSAsummarization
|
aad389d5a242c505d62b1f6e525fcf39d11b88ec
|
[
"Apache-2.0"
] | null | null | null |
notebooks/RSA-TFIDF-DUC2007.ipynb
|
tyagi-iiitv/RSAsummarization
|
aad389d5a242c505d62b1f6e525fcf39d11b88ec
|
[
"Apache-2.0"
] | 1 |
2020-07-06T16:14:02.000Z
|
2020-07-06T16:14:02.000Z
| 58.636964 | 240 | 0.600861 |
[
[
[
"from subprocess import call\nfrom glob import glob\nfrom nltk.corpus import stopwords\nimport os, struct\nfrom tensorflow.core.example import example_pb2\nimport pyrouge\nimport shutil\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom nltk.stem.porter import *",
"_____no_output_____"
],
[
"ratio = 1\nduc_num = 7\nmax_len = 250\n\n#cmd = '/root/miniconda2/bin/python ../pointer-generator-master/run_summarization.py --mode=decode --single_pass=1 --coverage=True --vocab_path=finished_files/vocab --log_root=log --exp_name=myexperiment --data_path=test/temp_file'\n#cmd = '/root/miniconda2/bin/python run_summarization.py --mode=decode --single_pass=1 --coverage=True --vocab_path=finished_files/vocab --log_root=log --exp_name=myexperiment --data_path=test/temp_file --max_enc_steps=4000'\n#cmd = cmd.split()\n#generated_path = '/gttp/pointer-generator-master/log/myexperiment/decode_test_4000maxenc_4beam_35mindec_120maxdec_ckpt-238410/'\n#generated_path = '/gttp/pointer-generator-tal/log/myexperiment/decode_test_4000maxenc_4beam_35mindec_100maxdec_ckpt-238410/'\n\n\nvocab_path = '../data/DMQA/finished_files/vocab'\nlog_root = 'log'\nexp_name = 'myexperiment'\ndata_path= 'test/temp_file'\nmax_enc_steps = 4000\n\ncmd = ['python',\n 'run_summarization.py',\n '--mode=decode',\n '--single_pass=1',\n '--coverage=True',\n '--vocab_path=' + vocab_path,\n '--log_root=' + log_root,\n '--exp_name=' + exp_name,\n '--data_path=' + data_path,\n '--max_enc_steps=' + str(max_enc_steps)]\n\ngenerated_path = 'log/myexperiment/decode_test_4000maxenc_4beam_35mindec_100maxdec_ckpt-238410/'\n\n\nstopwords = set(stopwords.words('english'))\nstemmer = PorterStemmer()",
"_____no_output_____"
],
[
"def pp(string):\n return ' '.join([stemmer.stem(word.decode('utf8')) for word in string.lower().split() if not word in stopwords])\n \ndef write_to_file(article, abstract, rel, writer):\n abstract = '<s> '+' '.join(abstract)+' </s>'\n #abstract = abstract.encode('utf8', 'ignore')\n #rel = rel.encode('utf8', 'ignore')\n #article = article.encode('utf8', 'ignore')\n tf_example = example_pb2.Example()\n tf_example.features.feature['abstract'].bytes_list.value.extend([bytes(abstract)])\n tf_example.features.feature['relevancy'].bytes_list.value.extend([bytes(rel)])\n tf_example.features.feature['article'].bytes_list.value.extend([bytes(article)])\n tf_example_str = tf_example.SerializeToString()\n str_len = len(tf_example_str)\n writer.write(struct.pack('q', str_len))\n writer.write(struct.pack('%ds' % str_len, tf_example_str))\n\n\ndef duck_iterator(i):\n duc_folder = 'duc0' + str(i) + 'tokenized/'\n for topic in os.listdir(duc_folder + 'testdata/docs/'):\n topic_folder = duc_folder + 'testdata/docs/' + topic\n if not os.path.isdir(topic_folder):\n continue\n query = ' '.join(open(duc_folder + 'queries/' + topic).readlines())\n model_files = glob(duc_folder + 'models/' + topic[:-1].upper() + '.*')\n\n topic_texts = [' '.join(open(topic_folder + '/' + file).readlines()).replace('\\n', '') for file in\n os.listdir(topic_folder)]\n\n abstracts = [' '.join(open(f).readlines()) for f in model_files]\n yield topic_texts, abstracts, query\n\ndef ones(sent, ref): return 1.\n\ndef count_score(sent, ref):\n ref = pp(ref).split()\n sent = ' '.join(pp(w) for w in sent.lower().split() if not w in stopwords)\n return sum([1. if w in ref else 0. for w in sent.split()])\n\n\ndef get_w2v_score_func(magic = 10):\n import gensim\n google = gensim.models.KeyedVectors.load_word2vec_format(\n 'GoogleNews-vectors-negative300.bin', binary=True)\n def w2v_score(sent, ref):\n ref = ref.lower()\n sent = sent.lower()\n sent = [w for w in sent.split() if w in google]\n ref = [w for w in ref.split() if w in google]\n try:\n score = google.n_similarity(sent, ref)\n except:\n score = 0.\n return score * magic\n return w2v_score\n\ndef get_tfidf_score_func_glob(magic = 1):\n corpus = []\n for i in range(5, 8):\n for topic_texts, _, _ in duck_iterator(i):\n corpus += [pp(t) for t in topic_texts]\n\n vectorizer = TfidfVectorizer()\n vectorizer.fit_transform(corpus)\n\n def tfidf_score_func(sent, ref):\n #ref = [pp(s) for s in ref.split(' . ')]\n sent = pp(sent)\n v1 = vectorizer.transform([sent])\n #v2s = [vectorizer.transform([r]) for r in ref]\n #return max([cosine_similarity(v1, v2)[0][0] for v2 in v2s])\n v2 = vectorizer.transform([ref])\n return cosine_similarity(v1, v2)[0][0]\n\n return tfidf_score_func",
"_____no_output_____"
],
[
"tfidf_score = get_tfidf_score_func_glob()",
"_____no_output_____"
],
[
"def get_tfidf_score_func(magic = 10):\n corpus = []\n for i in range(5, 8):\n for topic_texts, _, _ in duck_iterator(i):\n corpus += [t.lower() for t in topic_texts]\n\n vectorizer = TfidfVectorizer()\n vectorizer.fit_transform(corpus)\n\n def tfidf_score_func(sent, ref):\n ref = ref.lower()\n sent = sent.lower()\n v1 = vectorizer.transform([sent])\n v2 = vectorizer.transform([ref])\n return cosine_similarity(v1, v2)[0][0]*magic\n return tfidf_score_func\n\n\ndef just_relevant(text, query):\n text = text.split(' . ')\n score_per_sent = [count_score(sent, query) for sent in text]\n sents_gold = list(zip(*sorted(zip(score_per_sent, text), reverse=True)))[1]\n sents_gold = sents_gold[:int(len(sents_gold)*ratio)]\n\n filtered_sents = []\n for s in text:\n if not s: continue\n if s in sents_gold: filtered_sents.append(s)\n return ' . '.join(filtered_sents)\n\nclass Summary:\n def __init__(self, texts, abstracts, query):\n #texts = sorted([(tfidf_score(query, text), text) for text in texts], reverse=True)\n #texts = sorted([(tfidf_score(text, ' '.join(abstracts)), text) for text in texts], reverse=True)\n\n #texts = [text[1] for text in texts]\n self.texts = texts\n self.abstracts = abstracts\n self.query = query\n self.summary = []\n self.words = set()\n self.length = 0\n\n def add_sum(self, summ):\n for sent in summ:\n self.summary.append(sent)\n\n def get(self):\n text = max([(len(t.split()), t) for t in self.texts])[1]\n #text = texts[0]\n if ratio < 1: text = just_relevant(text, self.query)\n\n sents = text.split(' . ')\n score_per_sent = [(score_func(sent, self.query), sent) for sent in sents]\n #score_per_sent = [(count_score(sent, ' '.join(self.abstracts)), sent) for sent in sents]\n\n scores = []\n for score, sent in score_per_sent:\n scores += [score] * (len(sent.split()) + 1)\n scores = str(scores[:-1])\n return text, 'a', scores\n\ndef get_summaries(path):\n path = path+'decoded/'\n out = {}\n for file_name in os.listdir(path):\n index = int(file_name.split('_')[0])\n out[index] = open(path+file_name).readlines()\n return out\n\ndef rouge_eval(ref_dir, dec_dir):\n \"\"\"Evaluate the files in ref_dir and dec_dir with pyrouge, returning results_dict\"\"\"\n r = pyrouge.Rouge155()\n r.model_filename_pattern = '#ID#_reference_(\\d+).txt'\n r.system_filename_pattern = '(\\d+)_decoded.txt'\n r.model_dir = ref_dir\n r.system_dir = dec_dir\n return r.convert_and_evaluate()\n\ndef evaluate(summaries):\n for path in ['eval/ref', 'eval/dec']:\n if os.path.exists(path): shutil.rmtree(path, True)\n os.mkdir(path)\n for i, summ in enumerate(summaries):\n for j,abs in enumerate(summ.abstracts):\n with open('eval/ref/'+str(i)+'_reference_'+str(j)+'.txt', 'w') as f:\n f.write(abs)\n with open('eval/dec/'+str(i)+'_decoded.txt', 'w') as f:\n f.write(' '.join(summ.summary))\n print rouge_eval('eval/ref/', 'eval/dec/') ",
"_____no_output_____"
],
[
"#count_score\n#score_func = ones#get_w2v_score_func()#get_tfidf_score_func()#count_score\nscore_func = get_tfidf_score_func()\n\nsummaries = [Summary(texts, abstracts, query) for texts, abstracts, query in duck_iterator(duc_num)]\n\nwith open('test/temp_file', 'wb') as writer:\n for summ in summaries:\n article, abstract, scores = summ.get()\n write_to_file(article, abstracts, scores, writer)\ncall(['rm', '-r', generated_path])\ncall(cmd)\ngenerated_summaries = get_summaries(generated_path)\n\nfor i in range(len(summaries)):\n summaries[i].add_sum(generated_summaries[i])",
"_____no_output_____"
],
[
"evaluate(summaries)\nprint duc_num\nprint score_func ",
"2019-02-14 10:46:31,554 [MainThread ] [INFO ] Writing summaries.\n2019-02-14 10:46:31,557 [MainThread ] [INFO ] Processing summaries. Saving system files to /tmp/tmpQzPTX0/system and model files to /tmp/tmpQzPTX0/model.\n2019-02-14 10:46:31,558 [MainThread ] [INFO ] Processing files in eval/dec/.\n2019-02-14 10:46:31,559 [MainThread ] [INFO ] Processing 0_decoded.txt.\n2019-02-14 10:46:31,562 [MainThread ] [INFO ] Processing 1_decoded.txt.\n2019-02-14 10:46:31,564 [MainThread ] [INFO ] Processing 2_decoded.txt.\n2019-02-14 10:46:31,566 [MainThread ] [INFO ] Processing 3_decoded.txt.\n2019-02-14 10:46:31,568 [MainThread ] [INFO ] Processing 4_decoded.txt.\n2019-02-14 10:46:31,571 [MainThread ] [INFO ] Processing 5_decoded.txt.\n2019-02-14 10:46:31,573 [MainThread ] [INFO ] Processing 6_decoded.txt.\n2019-02-14 10:46:31,575 [MainThread ] [INFO ] Processing 7_decoded.txt.\n2019-02-14 10:46:31,577 [MainThread ] [INFO ] Processing 8_decoded.txt.\n2019-02-14 10:46:31,579 [MainThread ] [INFO ] Processing 9_decoded.txt.\n2019-02-14 10:46:31,581 [MainThread ] [INFO ] Processing 10_decoded.txt.\n2019-02-14 10:46:31,583 [MainThread ] [INFO ] Processing 11_decoded.txt.\n2019-02-14 10:46:31,585 [MainThread ] [INFO ] Processing 12_decoded.txt.\n2019-02-14 10:46:31,587 [MainThread ] [INFO ] Processing 13_decoded.txt.\n2019-02-14 10:46:31,589 [MainThread ] [INFO ] Processing 14_decoded.txt.\n2019-02-14 10:46:31,591 [MainThread ] [INFO ] Processing 15_decoded.txt.\n2019-02-14 10:46:31,593 [MainThread ] [INFO ] Processing 16_decoded.txt.\n2019-02-14 10:46:31,595 [MainThread ] [INFO ] Processing 17_decoded.txt.\n2019-02-14 10:46:31,597 [MainThread ] [INFO ] Processing 18_decoded.txt.\n2019-02-14 10:46:31,599 [MainThread ] [INFO ] Processing 19_decoded.txt.\n2019-02-14 10:46:31,601 [MainThread ] [INFO ] Processing 20_decoded.txt.\n2019-02-14 10:46:31,604 [MainThread ] [INFO ] Processing 21_decoded.txt.\n2019-02-14 10:46:31,606 [MainThread ] [INFO ] Processing 22_decoded.txt.\n2019-02-14 10:46:31,608 [MainThread ] [INFO ] Processing 23_decoded.txt.\n2019-02-14 10:46:31,610 [MainThread ] [INFO ] Processing 24_decoded.txt.\n2019-02-14 10:46:31,612 [MainThread ] [INFO ] Processing 25_decoded.txt.\n2019-02-14 10:46:31,614 [MainThread ] [INFO ] Processing 26_decoded.txt.\n2019-02-14 10:46:31,616 [MainThread ] [INFO ] Processing 27_decoded.txt.\n2019-02-14 10:46:31,618 [MainThread ] [INFO ] Processing 28_decoded.txt.\n2019-02-14 10:46:31,620 [MainThread ] [INFO ] Processing 29_decoded.txt.\n2019-02-14 10:46:31,622 [MainThread ] [INFO ] Processing 30_decoded.txt.\n2019-02-14 10:46:31,624 [MainThread ] [INFO ] Processing 31_decoded.txt.\n2019-02-14 10:46:31,626 [MainThread ] [INFO ] Processing 32_decoded.txt.\n2019-02-14 10:46:31,628 [MainThread ] [INFO ] Processing 33_decoded.txt.\n2019-02-14 10:46:31,630 [MainThread ] [INFO ] Processing 34_decoded.txt.\n2019-02-14 10:46:31,632 [MainThread ] [INFO ] Processing 35_decoded.txt.\n2019-02-14 10:46:31,634 [MainThread ] [INFO ] Processing 36_decoded.txt.\n2019-02-14 10:46:31,636 [MainThread ] [INFO ] Processing 37_decoded.txt.\n2019-02-14 10:46:31,638 [MainThread ] [INFO ] Processing 38_decoded.txt.\n2019-02-14 10:46:31,640 [MainThread ] [INFO ] Processing 39_decoded.txt.\n2019-02-14 10:46:31,642 [MainThread ] [INFO ] Processing 40_decoded.txt.\n2019-02-14 10:46:31,644 [MainThread ] [INFO ] Processing 41_decoded.txt.\n2019-02-14 10:46:31,646 [MainThread ] [INFO ] Processing 42_decoded.txt.\n2019-02-14 10:46:31,648 [MainThread ] [INFO ] Processing 43_decoded.txt.\n2019-02-14 10:46:31,650 [MainThread ] [INFO ] Processing 44_decoded.txt.\n2019-02-14 10:46:31,652 [MainThread ] [INFO ] Saved processed files to /tmp/tmpQzPTX0/system.\n2019-02-14 10:46:31,653 [MainThread ] [INFO ] Processing files in eval/ref/.\n2019-02-14 10:46:31,655 [MainThread ] [INFO ] Processing 0_reference_0.txt.\n2019-02-14 10:46:31,657 [MainThread ] [INFO ] Processing 0_reference_1.txt.\n2019-02-14 10:46:31,659 [MainThread ] [INFO ] Processing 0_reference_2.txt.\n2019-02-14 10:46:31,661 [MainThread ] [INFO ] Processing 0_reference_3.txt.\n2019-02-14 10:46:31,663 [MainThread ] [INFO ] Processing 1_reference_0.txt.\n2019-02-14 10:46:31,665 [MainThread ] [INFO ] Processing 1_reference_1.txt.\n2019-02-14 10:46:31,667 [MainThread ] [INFO ] Processing 1_reference_2.txt.\n2019-02-14 10:46:31,670 [MainThread ] [INFO ] Processing 1_reference_3.txt.\n2019-02-14 10:46:31,672 [MainThread ] [INFO ] Processing 2_reference_0.txt.\n2019-02-14 10:46:31,674 [MainThread ] [INFO ] Processing 2_reference_1.txt.\n2019-02-14 10:46:31,676 [MainThread ] [INFO ] Processing 2_reference_2.txt.\n2019-02-14 10:46:31,679 [MainThread ] [INFO ] Processing 2_reference_3.txt.\n2019-02-14 10:46:31,681 [MainThread ] [INFO ] Processing 3_reference_0.txt.\n2019-02-14 10:46:31,683 [MainThread ] [INFO ] Processing 3_reference_1.txt.\n2019-02-14 10:46:31,685 [MainThread ] [INFO ] Processing 3_reference_2.txt.\n2019-02-14 10:46:31,687 [MainThread ] [INFO ] Processing 3_reference_3.txt.\n2019-02-14 10:46:31,690 [MainThread ] [INFO ] Processing 4_reference_0.txt.\n2019-02-14 10:46:31,692 [MainThread ] [INFO ] Processing 4_reference_1.txt.\n2019-02-14 10:46:31,694 [MainThread ] [INFO ] Processing 4_reference_2.txt.\n2019-02-14 10:46:31,696 [MainThread ] [INFO ] Processing 4_reference_3.txt.\n2019-02-14 10:46:31,698 [MainThread ] [INFO ] Processing 5_reference_0.txt.\n2019-02-14 10:46:31,700 [MainThread ] [INFO ] Processing 5_reference_1.txt.\n2019-02-14 10:46:31,702 [MainThread ] [INFO ] Processing 5_reference_2.txt.\n2019-02-14 10:46:31,705 [MainThread ] [INFO ] Processing 5_reference_3.txt.\n2019-02-14 10:46:31,707 [MainThread ] [INFO ] Processing 6_reference_0.txt.\n2019-02-14 10:46:31,709 [MainThread ] [INFO ] Processing 6_reference_1.txt.\n2019-02-14 10:46:31,711 [MainThread ] [INFO ] Processing 6_reference_2.txt.\n2019-02-14 10:46:31,713 [MainThread ] [INFO ] Processing 6_reference_3.txt.\n2019-02-14 10:46:31,716 [MainThread ] [INFO ] Processing 7_reference_0.txt.\n2019-02-14 10:46:31,718 [MainThread ] [INFO ] Processing 7_reference_1.txt.\n2019-02-14 10:46:31,720 [MainThread ] [INFO ] Processing 7_reference_2.txt.\n2019-02-14 10:46:31,723 [MainThread ] [INFO ] Processing 7_reference_3.txt.\n2019-02-14 10:46:31,725 [MainThread ] [INFO ] Processing 8_reference_0.txt.\n2019-02-14 10:46:31,727 [MainThread ] [INFO ] Processing 8_reference_1.txt.\n2019-02-14 10:46:31,729 [MainThread ] [INFO ] Processing 8_reference_2.txt.\n2019-02-14 10:46:31,731 [MainThread ] [INFO ] Processing 8_reference_3.txt.\n2019-02-14 10:46:31,733 [MainThread ] [INFO ] Processing 9_reference_0.txt.\n2019-02-14 10:46:31,735 [MainThread ] [INFO ] Processing 9_reference_1.txt.\n2019-02-14 10:46:31,738 [MainThread ] [INFO ] Processing 9_reference_2.txt.\n2019-02-14 10:46:31,740 [MainThread ] [INFO ] Processing 9_reference_3.txt.\n2019-02-14 10:46:31,742 [MainThread ] [INFO ] Processing 10_reference_0.txt.\n2019-02-14 10:46:31,744 [MainThread ] [INFO ] Processing 10_reference_1.txt.\n2019-02-14 10:46:31,746 [MainThread ] [INFO ] Processing 10_reference_2.txt.\n2019-02-14 10:46:31,748 [MainThread ] [INFO ] Processing 10_reference_3.txt.\n2019-02-14 10:46:31,751 [MainThread ] [INFO ] Processing 11_reference_0.txt.\n2019-02-14 10:46:31,753 [MainThread ] [INFO ] Processing 11_reference_1.txt.\n2019-02-14 10:46:31,755 [MainThread ] [INFO ] Processing 11_reference_2.txt.\n2019-02-14 10:46:31,757 [MainThread ] [INFO ] Processing 11_reference_3.txt.\n2019-02-14 10:46:31,759 [MainThread ] [INFO ] Processing 12_reference_0.txt.\n2019-02-14 10:46:31,762 [MainThread ] [INFO ] Processing 12_reference_1.txt.\n2019-02-14 10:46:31,764 [MainThread ] [INFO ] Processing 12_reference_2.txt.\n2019-02-14 10:46:31,766 [MainThread ] [INFO ] Processing 12_reference_3.txt.\n2019-02-14 10:46:31,768 [MainThread ] [INFO ] Processing 13_reference_0.txt.\n2019-02-14 10:46:31,771 [MainThread ] [INFO ] Processing 13_reference_1.txt.\n2019-02-14 10:46:31,773 [MainThread ] [INFO ] Processing 13_reference_2.txt.\n2019-02-14 10:46:31,775 [MainThread ] [INFO ] Processing 13_reference_3.txt.\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52c2fcd4aa6f710864741bf346295845193cc1d
| 4,217 |
ipynb
|
Jupyter Notebook
|
Semester/3.ipynb
|
ShruKin/Data-Science-Data-Analytics-Lab
|
ecfea586a78915551d420576558572539108fb1f
|
[
"MIT"
] | null | null | null |
Semester/3.ipynb
|
ShruKin/Data-Science-Data-Analytics-Lab
|
ecfea586a78915551d420576558572539108fb1f
|
[
"MIT"
] | null | null | null |
Semester/3.ipynb
|
ShruKin/Data-Science-Data-Analytics-Lab
|
ecfea586a78915551d420576558572539108fb1f
|
[
"MIT"
] | null | null | null | 34.284553 | 1,284 | 0.428741 |
[
[
[
"Section: H Roll No.: 29",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import load_iris\niris = load_iris()\n\nimport pandas as pd\ndata = pd.DataFrame(iris.data, columns=iris.feature_names)\ndata[\"species\"] = pd.DataFrame(iris.target)\ndata.head()",
"_____no_output_____"
],
[
"from sklearn.neural_network import MLPClassifier\n\nmodel = MLPClassifier(\n hidden_layer_sizes=(12,),\n max_iter=5000,\n activation='logistic',\n solver='sgd',\n learning_rate_init=0.001)",
"_____no_output_____"
],
[
"X = data.iloc[:, :-1]\nY = data.iloc[:, -1]\n\nmodel_fit = model.fit(X, Y)",
"_____no_output_____"
],
[
"X_test = pd.DataFrame([\n [5, 2, 1, 1],\n [2, 7, 4, 1]\n])\nY_test = model.predict(X_test)\n\nfor y in Y_test:\n print(iris.target_names[y])",
"setosa\nsetosa\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
c52c3b8471eb6bcbde2bd3d07177a8e18f9afeab
| 387,014 |
ipynb
|
Jupyter Notebook
|
scratch/028_500_lines.ipynb
|
ANaka/genpen
|
08f811dde40596a774ab4343af45a0ac0896840e
|
[
"MIT"
] | null | null | null |
scratch/028_500_lines.ipynb
|
ANaka/genpen
|
08f811dde40596a774ab4343af45a0ac0896840e
|
[
"MIT"
] | null | null | null |
scratch/028_500_lines.ipynb
|
ANaka/genpen
|
08f811dde40596a774ab4343af45a0ac0896840e
|
[
"MIT"
] | null | null | null | 325.221849 | 41,337 | 0.750234 |
[
[
[
"import itertools\nimport numpy as np\nimport os\nimport seaborn as sns\nfrom tqdm import tqdm\nfrom dataclasses import asdict, dataclass, field\nimport vsketch\nimport shapely.geometry as sg\nfrom shapely.geometry import box, MultiLineString, Point, MultiPoint, Polygon, MultiPolygon, LineString\nimport shapely.affinity as sa\nimport shapely.ops as so\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport vpype_cli\nfrom typing import List, Generic\nfrom genpen import genpen as gp, utils as utils\nfrom scipy import stats as ss\nimport geopandas\nfrom shapely.errors import TopologicalError\nimport functools\n%load_ext autoreload\n%autoreload 2\nimport vpype\nfrom skimage import io\nfrom pathlib import Path\n\nimport bezier\n\nfrom sklearn.preprocessing import minmax_scale\nfrom skimage import feature\nfrom genpen.utils import Paper\n\nfrom scipy import spatial, stats\nfrom scipy.ndimage import gaussian_filter\nfrom scipy.integrate import odeint\nimport fn",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"# make page\npaper_size = '14x11 inches'\nborder:float=10\npaper = Paper(paper_size)\n\ndrawbox = paper.get_drawbox(border)",
"_____no_output_____"
],
[
"lines = []\n\nnode_sets = []\nn_lines = 500\nn_nodes_per_line = 40\ny_start = 0\ny_end = 14\nx_start = 0\nx_end = 10\nnode_x_centers = np.linspace(x_start, x_end, n_lines)\nstd_scale = 0.09\nn_eval_points = 80\n\n### initialize vals\nnode_ys = np.linspace(y_start, y_end, n_nodes_per_line)\ncentered_node_xs = np.zeros(node_ys.shape)\nbez_eval_end_center = 1\nbez_eval_end_noise = 0\nbez_eval_end_limit = 1.\nbez_eval_end_std_scale = 0.01\n\nfor i, node_x_center in enumerate(node_x_centers):\n new_x_noise = np.random.randn(n_nodes_per_line) * std_scale\n centered_node_xs = centered_node_xs + new_x_noise\n node_xs = node_x_center + centered_node_xs\n node_xs[:3] = node_x_center\n node_xs[-3:] = node_x_center\n nodes = np.asfortranarray([\n node_xs,\n node_ys,\n ])\n curve = bezier.Curve(nodes, degree=(nodes.shape[1]-1))\n eval_start = np.random.uniform(0, 0.03)\n eval_end = np.random.uniform(0.97, 1.)\n eval_points = np.linspace(eval_start, eval_end, n_eval_points)\n x, y = curve.evaluate_multi(eval_points)\n if i % 2:\n x = np.flipud(x)\n y = np.flipud(y)\n lines.append(np.stack([x, y]).T)\n node_sets.append(np.stack([node_xs, node_ys]).T)",
"_____no_output_____"
],
[
"ls = [LineString(l) for l in lines]\n\nmls = gp.make_like(gp.merge_LineStrings(ls), drawbox)\n\nmask = drawbox\n\nin_mask = mls.intersection(mask)\n\nin_mask = sa.rotate(in_mask, -90)\n\nsplit_point = 500\n\nlayer1 = in_mask[:split_point]\nlayer2 = in_mask[split_point:]\n\nlayers = []\nlayers.append(LineString(np.concatenate([np.array(l) for l in layer1])))\nlayers.append(LineString(np.concatenate([np.array(l) for l in layer2])))\n\n# layers = [in_mask]",
"_____no_output_____"
],
[
"sk = vsketch.Vsketch()\nsk.size(paper.page_format_mm)\nsk.scale('1mm')\nsk.penWidth('0.3mm')\nfor i, layer in enumerate(layers):\n sk.stroke(i+1)\n sk.geometry(layer)\n\nsk.penWidth('0.3')\nsk.vpype('linesort')\n\nsk.display(color_mode='none')",
"_____no_output_____"
],
[
"sk.save('/mnt/c/code/side/plotter_images/oned_outputs/246_500_lines.svg')",
"_____no_output_____"
],
[
"# make page\npaper_size = '17x23.5 inches'\nborder:float=55\npaper = Paper(paper_size)\n\ndrawbox = paper.get_drawbox(border)",
"_____no_output_____"
],
[
"def oscillator(y, t, a, b, c, d):\n v, u = y\n dvdt = np.sin(v) + (a * v) + (b * u)\n dudt = np.cos(u) + (c * v) + (d * u)\n dydt = [dvdt, dudt]\n return dydt\n\ndef oscillator2(y, t, a, b, c, d):\n v, u = y\n dvdt = np.sin(v) + np.sin(u) + (a * v) + (b * u)\n dudt = np.cos(u) + np.cos(u) ** 2 + (c * v) + (d * u)\n dydt = [dvdt, dudt]\n return dydt",
"_____no_output_____"
],
[
"def ode(y, t, a, b, c, d):\n v, u = y\n dvdt = np.sin(v) + np.cos(u * v) + (a * v) + (b * u)\n dudt = np.cos(u) + np.sin(u * v) + (c * v) + (d * u)\n dydt = [dvdt, dudt]\n return dydt",
"_____no_output_____"
],
[
"center = drawbox.centroid",
"_____no_output_____"
],
[
"n_lines = 500\nthetas = np.linspace(0, np.pi*10, n_lines)\nradii = np.linspace(.5, 4.5, n_lines)",
"_____no_output_____"
],
[
"pts = []\nfor theta, radius in zip(thetas, radii):\n x = np.cos(theta) * radius\n y = np.sin(theta) * radius\n pts.append(Point(x, y))",
"_____no_output_____"
],
[
"lfs = []\n\n\nt_max = 3.7\nt = np.linspace(0, t_max, 1801)\na = -0.4\nb = 0.3\nc = 0.75\nd = -0.2\n\n\n\nfor pt in tqdm(pts):\n sol = odeint(ode, [pt.x, pt.y], t, args=(a, b, c, d))\n lfs.append(LineString(sol))\n \n\nlines = gp.make_like(MultiLineString(lfs), drawbox)",
"_____no_output_____"
],
[
"sk = vsketch.Vsketch()\nsk.size(paper.page_format_mm)\nsk.scale('1mm')\nsk.penWidth('0.3mm')\n\nsk.geometry(lines)\n\nsk.penWidth('0.3')\nsk.vpype('linesimplify linesort ')\n\nsk.display(color_mode='none')",
"_____no_output_____"
],
[
"sk.save('/mnt/c/code/side/plotter_images/oned_outputs/247_500_lines.svg')",
"_____no_output_____"
]
],
[
[
"# Try 2",
"_____no_output_____"
]
],
[
[
"# make page\npaper_size = '17x23.5 inches'\nborder:float=55\npaper = Paper(paper_size)\n\ndrawbox = paper.get_drawbox(border)",
"_____no_output_____"
],
[
"center = drawbox.centroid",
"_____no_output_____"
],
[
"n_lines = 500\nthetas = np.linspace(0, np.pi*10, n_lines)\nradii = np.linspace(.75, 3.45, n_lines)",
"_____no_output_____"
],
[
"pts = []\nfor theta, radius in zip(thetas, radii):\n x = np.cos(theta) * radius - 3.3\n y = np.sin(theta) * radius + 0.5\n pts.append(Point(x, y))",
"_____no_output_____"
],
[
"def ode2(y, t, a, b, c, d):\n v, u = y\n dvdt = np.sin(u * v + (a * v) + (b * u))\n dudt = np.cos(u) + np.sin(u * v) + (c * v) + (d * u)\n dydt = [dvdt, dudt]\n return dydt",
"_____no_output_____"
],
[
"lfs = []\n\n\nt_max = 2.7\nt = np.linspace(0, t_max, 1801)\na = -0.2\nb = -0.2\nc = 0.04\nd = -0.25\n\n\n\nfor pt in tqdm(pts):\n sol = odeint(ode2, [pt.x, pt.y], t, args=(a, b, c, d))\n lfs.append(LineString(sol))\n \n\nlines = gp.make_like(MultiLineString(lfs), drawbox)",
"_____no_output_____"
],
[
"layers = []\nlayers.append(lines[:250])\nlayers.append(lines[250:])",
"_____no_output_____"
],
[
"sk = vsketch.Vsketch()\nsk.size(paper.page_format_mm)\nsk.scale('1mm')\nsk.penWidth('0.3mm')\n\nfor i, layer in enumerate(layers):\n sk.stroke(i+1)\n sk.geometry(layer)\n\nsk.penWidth('0.3')\nsk.vpype('linesimplify')\n\nsk.display(color_mode='layer')",
"_____no_output_____"
],
[
"sk.save('/mnt/c/code/side/plotter_images/oned_outputs/249_500_lines.svg')",
"_____no_output_____"
]
],
[
[
"# Try 3",
"_____no_output_____"
]
],
[
[
"# make page\npaper_size = '17x23.5 inches'\nborder:float=35\npaper = Paper(paper_size)\n\ndrawbox = paper.get_drawbox(border)",
"_____no_output_____"
],
[
"center = drawbox.centroid",
"_____no_output_____"
],
[
"n_lines = 3500\nthetas = np.linspace(0, np.pi*14, n_lines)\nradii = np.linspace(.5, 5.45, n_lines)",
"_____no_output_____"
],
[
"pts = []\nfor theta, radius in zip(thetas, radii):\n x = np.cos(theta) * radius - 3.3\n y = np.sin(theta) * radius + 0.5\n pts.append(Point(x, y))",
"_____no_output_____"
],
[
"def ode2(y, t, a, b, c, d):\n v, u = y\n dvdt = np.sin(u * v + (a * v) + (b * u))\n dudt = np.cos(u) + np.sin(u * v) + np.cos(c * v) + (d * u)\n dydt = [dvdt, dudt]\n return dydt",
"_____no_output_____"
],
[
"lfs = []\n\n\nt_max = 2.7\nt = np.linspace(0, t_max, 701)\na = -0.2\nb = -0.25\nc = 0.04\nd = -0.25\n\n\n\nfor pt in tqdm(pts):\n sol = odeint(ode2, [pt.x, pt.y], t, args=(a, b, c, d))\n lfs.append(LineString(sol))\n \n\nlines = gp.make_like(MultiLineString(lfs), drawbox)",
"_____no_output_____"
],
[
"lbs = lines.buffer(0.07, cap_style=2, join_style=2).boundary",
"_____no_output_____"
],
[
"lbs = gp.merge_LineStrings([l for l in lbs if l.length > 0.9])",
"_____no_output_____"
],
[
"n_layers = 1\nlayer_inds = np.split(np.arange(len(lbs)), n_layers)",
"_____no_output_____"
],
[
"layers = []\nfor ind_set in layer_inds:\n layer = [lbs[i] for i in ind_set]\n layers.append(gp.merge_LineStrings(layer))\n ",
"_____no_output_____"
],
[
"sk = vsketch.Vsketch()\nsk.size(paper.page_format_mm)\nsk.scale('1mm')\nsk.penWidth('0.3mm')\n\nfor i, layer in enumerate(layers):\n sk.stroke(i+1)\n sk.geometry(layer)\n\nsk.penWidth('0.3')\nsk.vpype('linesimplify')\n\nsk.display(color_mode='layer')",
"_____no_output_____"
],
[
"sk.save('/mnt/c/code/side/plotter_images/oned_outputs/251_3500_lines.svg')",
"_____no_output_____"
]
],
[
[
"# Try 4",
"_____no_output_____"
]
],
[
[
"# make page\npaper_size = '11x14 inches'\nborder:float=25\npaper = Paper(paper_size)\n\ndrawbox = paper.get_drawbox(border)",
"_____no_output_____"
],
[
"center = drawbox.centroid",
"_____no_output_____"
],
[
"n_lines = 500\nthetas = np.linspace(0, np.pi*14, n_lines)\nradii = np.linspace(.5, 5.45, n_lines)",
"_____no_output_____"
],
[
"pts = []\nfor theta, radius in zip(thetas, radii):\n x = np.cos(theta) * radius - 3.3\n y = np.sin(theta) * radius + 0.5\n pts.append(Point(x, y))",
"_____no_output_____"
],
[
"def ode2(y, t, a, b, c, d):\n v, u = y\n dvdt = np.sin(u * v + (a * v) + (b * u))\n dudt = np.cos(u) + np.sin(u * v) + np.cos(c * v) + (d * u)\n dydt = [dvdt, dudt]\n return dydt",
"_____no_output_____"
],
[
"lfs = []\n\n\nt_max = 2.7\nt = np.linspace(0, t_max, 701)\na = -0.2\nb = -0.25\nc = 0.04\nd = -0.25\n\n\n\nfor pt in tqdm(pts):\n sol = odeint(ode2, [pt.x, pt.y], t, args=(a, b, c, d))\n lfs.append(LineString(sol))\n \n\nlines = gp.make_like(MultiLineString(lfs), drawbox)",
"100%|██████████| 500/500 [00:00<00:00, 784.67it/s]\n"
],
[
"lbs = lines.buffer(0.07, cap_style=2, join_style=2).boundary",
"_____no_output_____"
],
[
"lbs = gp.merge_LineStrings([l for l in lbs if l.length > 0.9])",
"_____no_output_____"
],
[
"n_layers = 1\nlayer_inds = np.split(np.arange(len(lbs)), n_layers)",
"_____no_output_____"
],
[
"layers = []\nfor ind_set in layer_inds:\n layer = [lbs[i] for i in ind_set]\n layers.append(gp.merge_LineStrings(layer))\n ",
"_____no_output_____"
],
[
"sk = vsketch.Vsketch()\nsk.size(paper.page_format_mm)\nsk.scale('1mm')\nsk.penWidth('0.3mm')\n\nfor i, layer in enumerate(layers):\n sk.stroke(i+1)\n sk.geometry(layer)\n\nsk.penWidth('0.3')\nsk.vpype('linesimplify')\n\nsk.display(color_mode='layer')",
"_____no_output_____"
],
[
"plot_id = fn.new_plot_id()",
"saved 20210922-224529_191352-4e22b-c23cd2 to s3://algorithmic-ink/current_plot_id\n"
],
[
"savedir='/home/naka/art/plotter_svgs'",
"_____no_output_____"
],
[
"savepath = Path(savedir).joinpath(f'{plot_id}.svg').as_posix()\nsk.save(savepath)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
c52c451731c9cc6daa8debdf56cb6e18cbf1c2b6
| 2,375 |
ipynb
|
Jupyter Notebook
|
Phyton_Exercise_3.ipynb
|
seannebue/Linear-Algebra-58019
|
0d4641737427a0a5cd533f7f4c075ff9614ec01d
|
[
"Apache-2.0"
] | null | null | null |
Phyton_Exercise_3.ipynb
|
seannebue/Linear-Algebra-58019
|
0d4641737427a0a5cd533f7f4c075ff9614ec01d
|
[
"Apache-2.0"
] | null | null | null |
Phyton_Exercise_3.ipynb
|
seannebue/Linear-Algebra-58019
|
0d4641737427a0a5cd533f7f4c075ff9614ec01d
|
[
"Apache-2.0"
] | null | null | null | 26.388889 | 244 | 0.412211 |
[
[
[
"<a href=\"https://colab.research.google.com/github/seannebue/Linear-Algebra-58019/blob/main/Coding_Activity_3.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"import numpy as np\nA = np.array([[4,10,8],[10,26,26],[8,26,61]]) #creation of matrix A\nprint(A)\nprint()\ninv_A = np.linalg.inv(A) #creation of inverse of matrix A\nprint(inv_A)\nprint()\n\nB = np.array([[44],[128],[214]]) #creation of matrix B\nprint(B)\nprint()\nX = np.dot(inv_A, B)\nprint(X)\nprint()\n\n#Checking of the correct answer\nZ = np.dot(A,X) \nprint(Z)",
"[[ 4 10 8]\n [10 26 26]\n [ 8 26 61]]\n\n[[ 25.27777778 -11.16666667 1.44444444]\n [-11.16666667 5. -0.66666667]\n [ 1.44444444 -0.66666667 0.11111111]]\n\n[[ 44]\n [128]\n [214]]\n\n[[-8.]\n [ 6.]\n [ 2.]]\n\n[[ 44.]\n [128.]\n [214.]]\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.