hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cbb95e280cafcef6233b4bc90edcce91af01c358
| 729,981 |
ipynb
|
Jupyter Notebook
|
Dam Data.ipynb
|
utah-geological-survey/Groundwater_Processing_Notebooks
|
d80c7f77512fbe9587ba43287dc8cb200f0d3450
|
[
"MIT"
] | null | null | null |
Dam Data.ipynb
|
utah-geological-survey/Groundwater_Processing_Notebooks
|
d80c7f77512fbe9587ba43287dc8cb200f0d3450
|
[
"MIT"
] | null | null | null |
Dam Data.ipynb
|
utah-geological-survey/Groundwater_Processing_Notebooks
|
d80c7f77512fbe9587ba43287dc8cb200f0d3450
|
[
"MIT"
] | null | null | null | 470.348582 | 130,412 | 0.93235 |
[
[
[
"# Import Libraries",
"_____no_output_____"
]
],
[
[
"#import urllib2\nfrom io import StringIO\nimport os\nimport xmltodict\nimport pandas as pd\nfrom datetime import datetime \nimport statsmodels.api as sm\n\nfrom scipy.stats import linregress\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport requests\nimport pymannkendall as mk\nimport glob\n\nimport matplotlib\n\nimport scipy\n\nimport geopandas as gpd\n\nfrom bs4 import BeautifulSoup\n\nimport platform\nimport os\n\nimport matplotlib as mpl\n\nfrom IPython.display import Math,display\n\nfrom scipy.optimize import curve_fit\nimport pymannkendall as mk\nfrom pylab import rcParams\n%matplotlib inline\nmatplotlib.rc_file_defaults()\nrcParams['figure.figsize'] = 15, 10",
"_____no_output_____"
],
[
"print(\"Operating System \" + platform.system() + \" \" + platform.release())\nprint(\"Python Version \" + str(sys.version))\nprint(\"Pandas Version \" + str(pd.__version__))\nprint(\"Numpy Version \" + str(np.__version__))\nprint(\"Matplotlib Version \" + str(mpl.__version__))\n#print(\"Well Application Version \" + str(wa.__version__))\nprint(\"Scipy Version \" +str(scipy.__version__))\nprint (os.environ['CONDA_DEFAULT_ENV'])",
"Operating System Windows 10\nPython Version 3.9.4 | packaged by conda-forge | (default, May 10 2021, 22:10:34) [MSC v.1916 64 bit (AMD64)]\nPandas Version 1.3.1\nNumpy Version 1.21.1\nMatplotlib Version 3.4.2\nScipy Version 1.6.3\npygis39\n"
]
],
[
[
"# Read in Hypsometric Data",
"_____no_output_____"
],
[
"Hypsometric Curves are provided with all modern dam designs. Dam plans can be downloaded from the [Water Rights Dam Safty Website](https://maps.waterrights.utah.gov/EsriMap/map.asp?layersToAdd=Dams). You have to be on a state network to access them.",
"_____no_output_____"
],
[
"I digitized the curves in from the plans for the dams in the study area using ArcGIS Pro and a Planar projection that uses feet.",
"_____no_output_____"
]
],
[
[
"hypsometric_data = gpd.read_file(\"G:/Shared drives/UGS_Groundwater/Projects/Bryce/GIS/Bryce/Bryce.gdb\", \n driver='FileGDB', layer='damcurves_points')\nhypsometric_data",
"_____no_output_____"
]
],
[
[
"## Tropic Reservoir Curves",
"_____no_output_____"
],
[
"### Volume in Lake vs Lake Elevation Relationship - Tropic",
"_____no_output_____"
]
],
[
[
"hyps_data = hypsometric_data[(hypsometric_data['dam']==\"tropic new\")&(hypsometric_data['linetype']==\"capacity\")][['x','y']]\nhyps_old = hypsometric_data[(hypsometric_data['dam']==\"tropic old\")&(hypsometric_data['linetype']==\"capacity\")][['x','y']]\n\n# define the true objective function\ndef objective(x, a, b, c, d, e, f):\n return (a * x) + (b * x**2) + (c * x**3) + (d * x**4) + (e * x**5) + f\n\nx = hyps_data['y']\nyn = hyps_data['x']*100\n\nvpopt, pcov = curve_fit(objective, x, yn, maxfev = 40000)\nsterr = np.sqrt(np.diag(pcov))\na, b, c, d, e, f = vpopt\n\nxb = np.arange(np.min(x),np.max(x),0.01)\n\n# calculate the output for the range\nyb = objective(xb, a, b, c, d, e, f)\n\nplt.plot(xb, yb, label= \"modeled polynomial\",color='green')\nplt.plot(hyps_data['y'], hyps_data['x']*100, label='Tropic Dam Capacity')\nplt.xlabel('Elevation (ft)')\nplt.ylabel('Volume (ac-ft)')\nplt.grid()\n#plt.ylim(0,3500)\nplt.legend()\n\n\n",
"C:\\Users\\PAULIN~1\\AppData\\Local\\Temp/ipykernel_13004/3262229987.py:12: RuntimeWarning: invalid value encountered in sqrt\n sterr = np.sqrt(np.diag(pcov))\n"
]
],
[
[
"### Area of Open Water vs Lake Elevation Relationship - Tropic",
"_____no_output_____"
]
],
[
[
"hyps_data = hypsometric_data[(hypsometric_data['dam']==\"tropic new\")&(hypsometric_data['linetype']==\"area\")][['x','y']]\n\n# define the true objective function\ndef objective(x, a, b, c, d, e, f):\n return (a * x) + (b * x**2) + (c * x**3) + (d * x**4) + (e * x**5) + f\n\nx = hyps_data['y']\nyn = hyps_data['x']*10\n\nvpopt, pcov = curve_fit(objective, x, yn, maxfev = 40000)\nsterr = np.sqrt(np.diag(pcov))\na, b, c, d, e, f = vpopt\n\nxb = np.arange(np.min(x),np.max(x),0.01)\n\n# calculate the output for the range\nyb = objective(xb, a, b, c, d, e, f)\n\nplt.plot(xb, yb, label= \"modeled polynomial\",color='green')\nplt.plot(hyps_data['y'], hyps_data['x']*10, label='Tropic area')\nplt.xlabel('Elevation (ft)')\nplt.ylabel('Area (acres)')\nplt.grid()\n#plt.ylim(0,3500)\nplt.legend()\nprint(f\"({a:0.3f} * x) + ({b:0.3f} * x^2) + ({c:0.3e} * x^3) + ({d:0.3e} * x^4) + ({e:0.3e} * x^5) + {f:0.3e}\")",
"(1.715 * x) + (0.564 * x^2) + (-1.164e-02 * x^3) + (2.217e-06 * x^4) + (-1.131e-10 * x^5) + 5.471e+08\n"
]
],
[
[
"### Discharge Outflow vs. Reservoir Level Elevation",
"_____no_output_____"
]
],
[
[
"hyps_data = hypsometric_data[(hypsometric_data['dam']=='tropic new')&(hypsometric_data['linetype']==\"outlet\")][['x','y']]\n\n# define the true objective function\ndef objective(x, a, b, c, d, e, f):\n return (a * x) + (b * x**2) + (c * x**3) + (d * x**4) + (e * x**5) + f\n\nx = hyps_data['y']\nyn = hyps_data['x']*2.5\n\nvpopt, pcov = curve_fit(objective, x, yn, maxfev = 40000)\nsterr = np.sqrt(np.diag(pcov))\na, b, c, d, e, f = vpopt\n\nxb = np.arange(np.min(x),np.max(x),0.01)\n\n# calculate the output for the range\nyb = objective(xb, a, b, c, d, e, f)\n\nplt.plot(xb, yb, label= \"modeled polynomial\",color='green')\nplt.plot(hyps_data['y'], hyps_data['x']*2.5, label='Tropic Dam Outflow')\nplt.xlabel('Elevation (ft)')\nplt.ylabel('Area (acres)')\nplt.grid()\n#plt.ylim(0,3500)\nplt.legend()\nprint(f\"({a:0.3f} * x) + ({b:0.3f} * x^2) + ({c:0.3e} * x^3) + ({d:0.3e} * x^4) + ({e:0.3e} * x^5) + {f:0.3e}\")",
"(0.961 * x) + (1.690 * x^2) + (4.169e-01 * x^3) + (-7.995e-05 * x^4) + (4.086e-09 * x^5) + -2.002e+10\n"
],
[
"10/4",
"_____no_output_____"
]
],
[
[
"## Pine Lake Reservoir Curves",
"_____no_output_____"
],
[
"### Volume in Lake vs Lake Elevation Relationship",
"_____no_output_____"
]
],
[
[
"hyps_data = hypsometric_data[(hypsometric_data['dam']==\"pine\")&(hypsometric_data['linetype']==\"capacity\")][['x','y']]\n\n# define the true objective function\ndef objective(x, a, b, c, d, e, f):\n return (a * x) + (b * x**2) + (c * x**3) + (d * x**4) + (e * x**5) + f\n\nx = hyps_data['y']\nyn = hyps_data['x']*100\n\nvpopt, pcov = curve_fit(objective, x, yn, maxfev = 40000)\nsterr = np.sqrt(np.diag(pcov))\na, b, c, d, e, f = vpopt\n\nxb = np.arange(np.min(x),np.max(x),0.01)\n\n# calculate the output for the range\nyb = objective(xb, a, b, c, d, e, f)\n\nplt.plot(xb, yb, label= \"modeled polynomial\",color='green')\nplt.plot(hyps_data['y'], hyps_data['x']*100, label='Pine Dam Capacity')\nplt.xlabel('Elevation (ft)')\nplt.ylabel('Volume (ac-ft)')\nplt.grid()\n#plt.ylim(0,3500)\nplt.legend()\nprint(f\"({a:0.3f} * x) + ({b:0.3f} * x^2) + ({c:0.3e} * x^3) + ({d:0.3e} * x^4) + ({e:0.3e} * x^5) + {f:0.3e}\")",
"(17.173 * x) + (56.737 * x^2) + (-5.420e-03 * x^3) + (-3.290e-07 * x^4) + (3.939e-11 * x^5) + -7.995e+08\n"
]
],
[
[
"### Area of Open Water vs Lake Elevation Relationship",
"_____no_output_____"
]
],
[
[
"hyps_data = hypsometric_data[(hypsometric_data['dam']==\"pine\")&(hypsometric_data['linetype']==\"area\")][['x','y']]\n\n# define the true objective function\ndef objective(x, a, b, c, d, e, f):\n return (a * x) + (b * x**2) + (c * x**3) + (d * x**4) + (e * x**5) + f\n\nx = hyps_data['y']\nyn = hyps_data['x']*7.5\n\nvpopt, pcov = curve_fit(objective, x, yn, maxfev = 40000)\nsterr = np.sqrt(np.diag(pcov))\na, b, c, d, e, f = vpopt\n\nxb = np.arange(np.min(x),np.max(x),0.01)\n\n# calculate the output for the range\nyb = objective(xb, a, b, c, d, e, f)\n\nplt.plot(xb, yb, label= \"modeled polynomial\",color='green')\nplt.plot(hyps_data['y'], hyps_data['x']*7.5, label='Pine Dam Capacity')\nplt.xlabel('Elevation (ft)')\nplt.ylabel('Area (acres)')\nplt.grid()\n#plt.ylim(0,3500)\nplt.legend()\nprint(f\"({a:0.3f} * x) + ({b:0.3f} * x^2) + ({c:0.3e} * x^3) + ({d:0.3e} * x^4) + ({e:0.3e} * x^5) + {f:0.3e}\")",
"(-0.111 * x) + (-7.352 * x^2) + (5.350e-04 * x^3) + (7.232e-08 * x^4) + (-6.503e-12 * x^5) + 1.135e+08\n"
]
],
[
[
"### Discharge Outflow vs. Reservoir Level Elevation",
"_____no_output_____"
]
],
[
[
"hyps_data = hypsometric_data[pd.isna(hypsometric_data['dam'])&(hypsometric_data['linetype']==\"outlet\")][['x','y']]\n\n# define the true objective function\ndef objective(x, a, b, c, d, e, f):\n return (a * x) + (b * x**2) + (c * x**3) + (d * x**4) + (e * x**5) + f\n\nx = hyps_data['y']\nyn = hyps_data['x']\n\nvpopt, pcov = curve_fit(objective, x, yn, maxfev = 40000)\nsterr = np.sqrt(np.diag(pcov))\na, b, c, d, e, f = vpopt\n\nxb = np.arange(np.min(x),np.max(x),0.01)\n\n# calculate the output for the range\nyb = objective(xb, a, b, c, d, e, f)\n\nplt.plot(xb, yb, label= \"modeled polynomial\",color='green')\nplt.plot(hyps_data['y'], hyps_data['x'], label='Pine Dam Outflow')\nplt.xlabel('Elevation (ft)')\nplt.ylabel('Area (acres)')\nplt.grid()\n#plt.ylim(0,3500)\nplt.legend()\nprint(f\"({a:0.3f} * x) + ({b:0.3f} * x^2) + ({c:0.3e} * x^3) + ({d:0.3e} * x^4) + ({e:0.3e} * x^5) + {f:0.3e}\")",
"(1.021 * x) + (2.695 * x^2) + (-2.394e-03 * x^3) + (3.874e-07 * x^4) + (-1.840e-11 * x^5) + 6.952e+07\n"
]
],
[
[
"# Import Data From Dam Safty Website and Parse It",
"_____no_output_____"
],
[
"Dam operators regularly report data from dam piezometers, and also report dam flow and dam levels.",
"_____no_output_____"
],
[
"These data are pulled from an Excel Workbook that can pull data from the website using a programmed query. Make sure to update the data in the workbook periodically to ensure we capture all of the data.",
"_____no_output_____"
],
[
"## Pine Lake",
"_____no_output_____"
],
[
"Links to Water Rights Data:<br>\n[Pine Dam Information](https://www.waterrights.utah.gov/cgi-bin/damview.exe?Modinfo=Viewdam&DAM_NUMBER=UT00246)<br>\n[Pine data site](https://www.waterrights.utah.gov/cgi-bin/damview.exe?Modinfo=ViewMonitorData&DAM_NUMBER=UT00246)\n",
"_____no_output_____"
]
],
[
[
"pine = pd.read_excel(\"G:/Shared drives/UGS_Groundwater/Projects/Bryce/Dams/Live_Dam_Data.xlsx\",\"Pine Lake\",\n skiprows=[1],parse_dates=True,index_col=0)",
"_____no_output_____"
],
[
"pine.head()",
"_____no_output_____"
],
[
"pine['Res. Elev'].plot(marker='o')\nplt.ylim(7980,8000)\nplt.ylabel('Reservoir Elevation (ft)')\n",
"_____no_output_____"
]
],
[
[
"## Tropic Reservoir",
"_____no_output_____"
],
[
"[Tropic Dam Information](https://www.waterrights.utah.gov/cgi-bin/damview.exe?Modinfo=Viewdam&DAM_NUMBER=UT00302)<br>\n[Tropic Dam Data](https://www.waterrights.utah.gov/cgi-bin/damview.exe?Modinfo=ViewMonitorData&DAM_NUMBER=UT00302) \n",
"_____no_output_____"
]
],
[
[
"tropic = pd.read_excel(\"G:/Shared drives/UGS_Groundwater/Projects/Bryce/Dams/Live_Dam_Data.xlsx\",\n \"Tropic\",skiprows=[1],index_col=0,parse_dates=True)",
"_____no_output_____"
],
[
"tropic.head()",
"_____no_output_____"
],
[
"tropic['Reservoir Level'].plot(marker=\"o\")\nplt.ylabel('Reservoir Elevation (ft)')\nax2 = plt.twinx()\ntropic['2-inch drain gpm'].plot(color='green',ax=ax2)\nax2.set_ylabel('Outflow (gpm)')",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
cbb975d3d50dabd782d378924573da182323bb87
| 161,171 |
ipynb
|
Jupyter Notebook
|
Ex3/My Document Retrieval.ipynb
|
chcorophyll/UW-ml-foundations-notes
|
99b3d38c97c77f7ee0647dbfd77a6033c85cd850
|
[
"MIT"
] | 11 |
2018-03-30T06:29:18.000Z
|
2021-10-01T03:56:03.000Z
|
Ex3/My Document Retrieval.ipynb
|
chcorophyll/UW-ml-foundations-notes
|
99b3d38c97c77f7ee0647dbfd77a6033c85cd850
|
[
"MIT"
] | null | null | null |
Ex3/My Document Retrieval.ipynb
|
chcorophyll/UW-ml-foundations-notes
|
99b3d38c97c77f7ee0647dbfd77a6033c85cd850
|
[
"MIT"
] | 5 |
2018-03-27T11:35:04.000Z
|
2019-09-26T13:27:20.000Z
| 64.805388 | 53,351 | 0.5799 |
[
[
[
"import graphlab",
"_____no_output_____"
]
],
[
[
"# Load some text data - from wikipedia, page on people",
"_____no_output_____"
]
],
[
[
"people = graphlab.SFrame('people_wiki.gl/')",
"This non-commercial license of GraphLab Create for academic use is assigned to [email protected] and will expire on October 28, 2018.\n"
],
[
"people.head()",
"_____no_output_____"
],
[
"len(people)",
"_____no_output_____"
]
],
[
[
"# Explore the dataset and checkout the text it contains",
"_____no_output_____"
]
],
[
[
"obama = people[people['name'] == 'Barack Obama']",
"_____no_output_____"
],
[
"obama",
"_____no_output_____"
],
[
"obama['text']",
"_____no_output_____"
],
[
"clooney = people[people['name'] == 'George Clonney']",
"_____no_output_____"
]
],
[
[
"# Get the word counts for Obama article",
"_____no_output_____"
]
],
[
[
"obama['word_count'] = graphlab.text_analytics.count_words(obama['text'])",
"_____no_output_____"
],
[
"print obama['word_count']",
"[{'operations': 1, 'represent': 1, 'office': 2, 'unemployment': 1, 'is': 2, 'doddfrank': 1, 'over': 1, 'unconstitutional': 1, 'domestic': 2, 'named': 1, 'ending': 1, 'ended': 1, 'proposition': 1, 'seats': 1, 'graduate': 1, 'worked': 1, 'before': 1, 'death': 1, '20': 2, 'taxpayer': 1, 'inaugurated': 1, 'obamacare': 1, 'civil': 1, 'mccain': 1, 'to': 14, '4': 1, 'policy': 2, '8': 1, 'has': 4, '2011': 3, '2010': 2, '2013': 1, '2012': 1, 'bin': 1, 'then': 1, 'his': 11, 'march': 1, 'gains': 1, 'cuba': 1, 'californias': 1, '1992': 1, 'new': 1, 'not': 1, 'during': 2, 'years': 1, 'continued': 1, 'presidential': 2, 'husen': 1, 'osama': 1, 'term': 3, 'equality': 1, 'prize': 1, 'lost': 1, 'stimulus': 1, 'january': 3, 'university': 2, 'rights': 1, 'gun': 1, 'republican': 2, 'rodham': 1, 'troop': 1, 'withdrawal': 1, 'involvement': 3, 'response': 3, 'where': 1, 'referred': 1, 'affordable': 1, 'attorney': 1, 'school': 3, 'senate': 3, 'house': 2, 'national': 2, 'creation': 1, 'related': 1, 'hawaii': 1, 'born': 2, 'second': 2, 'street': 1, 'election': 3, 'close': 1, 'operation': 1, 'insurance': 1, 'sandy': 1, 'afghanistan': 2, 'initiatives': 1, 'for': 4, 'reform': 1, 'federal': 1, 'review': 1, 'representatives': 2, 'debate': 1, 'current': 1, 'state': 1, 'won': 1, 'marriage': 1, 'victory': 1, 'unsuccessfully': 1, 'reauthorization': 1, 'keynote': 1, 'full': 1, 'patient': 1, 'august': 1, 'degree': 1, '44th': 1, 'bm': 1, 'mitt': 1, 'attention': 1, 'delegates': 1, 'lgbt': 1, 'job': 1, 'protection': 2, 'address': 1, 'ask': 1, 'november': 2, 'debt': 1, 'by': 1, 'care': 1, 'on': 2, 'great': 1, 'defense': 1, 'signed': 3, 'libya': 1, 'receive': 1, 'of': 18, 'months': 1, 'against': 1, 'foreign': 2, 'spending': 1, 'american': 3, 'harvard': 2, 'act': 8, 'military': 4, 'hussein': 1, 'or': 1, 'first': 3, 'and': 21, 'major': 1, 'clinton': 1, '1997': 1, 'campaign': 3, 'russia': 1, 'wall': 1, 'legislation': 1, 'into': 1, 'primary': 2, 'community': 1, 'three': 1, 'down': 1, 'hook': 1, 'ii': 1, '63': 1, 'americans': 1, 'elementary': 1, 'total': 1, 'earning': 1, 'often': 1, 'barack': 1, 'law': 6, 'from': 3, 'raise': 1, 'district': 1, 'representing': 1, 'nine': 1, 'reinvestment': 1, 'arms': 1, 'relations': 1, 'nobel': 1, 'start': 1, 'dont': 2, 'tell': 1, 'iraq': 4, 'convention': 1, 'strike': 1, 'served': 2, 'john': 1, 'was': 5, 'war': 1, 'form': 1, 'that': 1, 'tax': 1, 'sufficient': 1, 'republicans': 1, 'resulted': 1, 'hillary': 1, 'taught': 1, 'honolulu': 1, 'filed': 1, 'regained': 1, 'july': 1, 'hold': 1, 'with': 3, 'he': 7, '13th': 1, 'made': 1, 'brk': 1, '1996': 1, 'whether': 1, 'reelected': 1, 'budget': 1, 'us': 6, 'nations': 1, 'recession': 1, 'while': 1, 'economic': 1, 'limit': 1, 'policies': 1, 'promoted': 1, 'called': 1, 'at': 2, 'control': 4, 'supreme': 1, 'ordered': 3, 'nominee': 2, 'process': 1, '2000in': 1, '2012obama': 1, 'received': 1, 'romney': 1, 'briefs': 1, 'defeated': 1, 'general': 1, 'states': 3, 'as': 6, 'urged': 1, 'in': 30, 'sought': 1, 'organizer': 1, 'shooting': 1, 'increased': 1, 'normalize': 1, 'lengthy': 1, 'united': 3, 'court': 1, 'recovery': 1, 'laden': 1, 'laureateduring': 1, 'peace': 1, 'administration': 1, '1961': 1, 'illinois': 2, 'other': 1, 'which': 1, 'party': 3, 'primaries': 1, 'sworn': 1, '2007': 1, 'obama': 9, 'columbia': 1, 'combat': 1, 'after': 4, 'islamic': 1, 'running': 1, 'levels': 1, 'two': 1, 'included': 1, 'president': 4, 'repeal': 1, 'nomination': 1, 'the': 40, 'a': 7, '2009': 3, 'chicago': 2, 'constitutional': 1, 'defeating': 1, 'treaty': 1, 'relief': 2, '2004': 3, 'african': 1, '2008': 1, 'democratic': 4, 'consumer': 1, 'began': 1, 'terms': 1}]\n"
]
],
[
[
"## Sort the word count for the Obama article",
"_____no_output_____"
]
],
[
[
"obama_word_count_table = obama[['word_count']].stack('word_count', new_column_name = ['word', 'count'])",
"_____no_output_____"
],
[
"obama_word_count_table",
"_____no_output_____"
],
[
"obama_word_count_table.sort('count', ascending=False)",
"_____no_output_____"
]
],
[
[
"# Compute TF-IDF for the corpus",
"_____no_output_____"
]
],
[
[
"people['word_count'] = graphlab.text_analytics.count_words(people['text'])",
"_____no_output_____"
],
[
"people.head()",
"_____no_output_____"
],
[
"tfidf = graphlab.text_analytics.tf_idf(people['word_count'])",
"_____no_output_____"
],
[
"tfidf.head()",
"_____no_output_____"
],
[
"people['tfidf'] = tfidf",
"_____no_output_____"
]
],
[
[
"## Examine the TF-IDF for the Obama article",
"_____no_output_____"
]
],
[
[
"obama = people[people['name'] == 'Barack Obama']",
"_____no_output_____"
],
[
"obama[['tfidf']].stack('tfidf', new_column_name=['word', 'tfidf']).sort('tfidf', ascending=False)",
"_____no_output_____"
]
],
[
[
"# Manually compute distances between a few people",
"_____no_output_____"
]
],
[
[
"clinton = people[people['name'] == 'Bill Clinton']",
"_____no_output_____"
],
[
"backham = people[people['name'] == 'David Beckham']",
"_____no_output_____"
]
],
[
[
"## Is Obama closer to Clinton than to Beckham?",
"_____no_output_____"
]
],
[
[
"graphlab.distances.cosine(obama['tfidf'][0], clinton['tfidf'][0])",
"_____no_output_____"
],
[
"graphlab.distances.cosine(obama['tfidf'][0], backham['tfidf'][0])",
"_____no_output_____"
]
],
[
[
"# Build a nearest neighbor model for document retrieval",
"_____no_output_____"
]
],
[
[
"knn_model = graphlab.nearest_neighbors.create(people, features=['tfidf'],label='name')",
"_____no_output_____"
]
],
[
[
"# Applying the nearest-neighbors model for retrieval",
"_____no_output_____"
],
[
"## Who is closest to Obama?",
"_____no_output_____"
]
],
[
[
"knn_model.query(obama)",
"_____no_output_____"
]
],
[
[
"## Other examples of document retrieval",
"_____no_output_____"
]
],
[
[
"swift = people[people['name'] == 'Taylor Swift']",
"_____no_output_____"
],
[
"knn_model.query(swift)",
"_____no_output_____"
],
[
"arnold = people[people['name'] == 'Arnold Schwarzenegger']",
"_____no_output_____"
],
[
"knn_model.query(arnold)",
"_____no_output_____"
]
],
[
[
"# Ex4",
"_____no_output_____"
],
[
"## Compare top words according to word counts to TF-IDF",
"_____no_output_____"
]
],
[
[
"john = people[people['name'] == 'Elton John']",
"_____no_output_____"
],
[
"john_word_count_table = john[['word_count']].stack('word_count', new_column_name = ['word', 'count']).sort('count', ascending=False)",
"_____no_output_____"
],
[
"john_word_count_table.head()",
"_____no_output_____"
],
[
"john[['tfidf']].stack('tfidf', new_column_name=['word', 'tfidf']).sort('tfidf', ascending=False)",
"_____no_output_____"
]
],
[
[
"## Measuring distance",
"_____no_output_____"
]
],
[
[
"victoria = people[people['name'] == 'Victoria Beckham']",
"_____no_output_____"
],
[
"paul = people[people['name'] == 'Paul McCartney']",
"_____no_output_____"
],
[
"graphlab.distances.cosine(john['tfidf'][0], victoria['tfidf'][0])",
"_____no_output_____"
],
[
"graphlab.distances.cosine(john['tfidf'][0], paul['tfidf'][0])",
"_____no_output_____"
]
],
[
[
"## Building nearest neighbors models with different input features and setting the distance metric",
"_____no_output_____"
]
],
[
[
"word_count_model = graphlab.nearest_neighbors.create(people, features=['word_count'],label='name',distance='cosine')",
"_____no_output_____"
],
[
"tfidf_model = graphlab.nearest_neighbors.create(people, features=['tfidf'],label='name',distance='cosine')",
"_____no_output_____"
],
[
"word_count_model.query(john)",
"_____no_output_____"
],
[
"tfidf_model.query(john)",
"_____no_output_____"
],
[
"word_count_model.query(victoria)",
"_____no_output_____"
],
[
"tfidf_model.query(victoria)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbb97b744b950dfa0c0d69169e149108f1447943
| 10,655 |
ipynb
|
Jupyter Notebook
|
Coursera/Intro to TensorFlow/Week-2/Example/c_dataset.ipynb
|
manipiradi/Online-Courses-Learning
|
2a4ce7590d1f6d1dfa5cfde632660b562fcff596
|
[
"MIT"
] | 331 |
2019-10-22T09:06:28.000Z
|
2022-03-27T13:36:03.000Z
|
Coursera/Intro to TensorFlow/Week-2/Example/c_dataset.ipynb
|
manipiradi/Online-Courses-Learning
|
2a4ce7590d1f6d1dfa5cfde632660b562fcff596
|
[
"MIT"
] | 8 |
2020-04-10T07:59:06.000Z
|
2022-02-06T11:36:47.000Z
|
Coursera/Intro to TensorFlow/Week-2/Example/c_dataset.ipynb
|
manipiradi/Online-Courses-Learning
|
2a4ce7590d1f6d1dfa5cfde632660b562fcff596
|
[
"MIT"
] | 572 |
2019-07-28T23:43:35.000Z
|
2022-03-27T22:40:08.000Z
| 39.906367 | 628 | 0.632942 |
[
[
[
"<h1> 2c. Loading large datasets progressively with the tf.data.Dataset </h1>\n\nIn this notebook, we continue reading the same small dataset, but refactor our ML pipeline in two small, but significant, ways:\n<ol>\n<li> Refactor the input to read data from disk progressively.\n<li> Refactor the feature creation so that it is not one-to-one with inputs.\n</ol>\n<br/>\nThe Pandas function in the previous notebook first read the whole data into memory -- on a large dataset, this won't be an option.",
"_____no_output_____"
]
],
[
[
"import datalab.bigquery as bq\nimport tensorflow as tf\nimport numpy as np\nimport shutil\nprint(tf.__version__)",
"/usr/local/envs/py3env/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n"
]
],
[
[
"<h2> 1. Refactor the input </h2>\n\nRead data created in Lab1a, but this time make it more general, so that we can later handle large datasets. We use the Dataset API for this. It ensures that, as data gets delivered to the model in mini-batches, it is loaded from disk only when needed.",
"_____no_output_____"
]
],
[
[
"CSV_COLUMNS = ['fare_amount', 'pickuplon','pickuplat','dropofflon','dropofflat','passengers', 'key']\nDEFAULTS = [[0.0], [-74.0], [40.0], [-74.0], [40.7], [1.0], ['nokey']]\n\ndef read_dataset(filename, mode, batch_size = 512):\n def decode_csv(row):\n columns = tf.decode_csv(row, record_defaults = DEFAULTS)\n features = dict(zip(CSV_COLUMNS, columns))\n features.pop('key') # discard, not a real feature\n label = features.pop('fare_amount') # remove label from features and store\n return features, label\n\n # Create list of file names that match \"glob\" pattern (i.e. data_file_*.csv)\n filenames_dataset = tf.data.Dataset.list_files(filename)\n # Read lines from text files\n textlines_dataset = filenames_dataset.flat_map(tf.data.TextLineDataset)\n # Parse text lines as comma-separated values (CSV)\n dataset = textlines_dataset.map(decode_csv)\n\n # Note:\n # use tf.data.Dataset.flat_map to apply one to many transformations (here: filename -> text lines)\n # use tf.data.Dataset.map to apply one to one transformations (here: text line -> feature list)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n num_epochs = None # loop indefinitely\n dataset = dataset.shuffle(buffer_size = 10 * batch_size)\n else:\n num_epochs = 1 # end-of-input after this\n\n dataset = dataset.repeat(num_epochs).batch(batch_size)\n\n return dataset\n\ndef get_train_input_fn():\n return read_dataset('./taxi-train.csv', mode = tf.estimator.ModeKeys.TRAIN)\n\ndef get_valid_input_fn():\n return read_dataset('./taxi-valid.csv', mode = tf.estimator.ModeKeys.EVAL)",
"_____no_output_____"
]
],
[
[
"<h2> 2. Refactor the way features are created. </h2>\n\nFor now, pass these through (same as previous lab). However, refactoring this way will enable us to break the one-to-one relationship between inputs and features.",
"_____no_output_____"
]
],
[
[
"INPUT_COLUMNS = [\n tf.feature_column.numeric_column('pickuplon'),\n tf.feature_column.numeric_column('pickuplat'),\n tf.feature_column.numeric_column('dropofflat'),\n tf.feature_column.numeric_column('dropofflon'),\n tf.feature_column.numeric_column('passengers'),\n]\n\ndef add_more_features(feats):\n # Nothing to add (yet!)\n return feats\n\nfeature_cols = add_more_features(INPUT_COLUMNS)",
"_____no_output_____"
]
],
[
[
"<h2> Create and train the model </h2>\n\nNote that we train for num_steps * batch_size examples.",
"_____no_output_____"
]
],
[
[
"tf.logging.set_verbosity(tf.logging.INFO)\nOUTDIR = 'taxi_trained'\nshutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time\nmodel = tf.estimator.LinearRegressor(\n feature_columns = feature_cols, model_dir = OUTDIR)\nmodel.train(input_fn = get_train_input_fn, steps = 200)",
"INFO:tensorflow:Using default config.\nINFO:tensorflow:Using config: {'_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f6fbcc45240>, '_session_config': None, '_model_dir': 'taxi_trained', '_task_type': 'worker', '_keep_checkpoint_every_n_hours': 10000, '_keep_checkpoint_max': 5, '_tf_random_seed': None, '_num_ps_replicas': 0, '_is_chief': True, '_log_step_count_steps': 100, '_save_checkpoints_steps': None, '_service': None, '_master': '', '_save_checkpoints_secs': 600, '_train_distribute': None, '_num_worker_replicas': 1, '_global_id_in_cluster': 0, '_task_id': 0, '_save_summary_steps': 100, '_evaluation_master': ''}\nINFO:tensorflow:Calling model_fn.\nINFO:tensorflow:Done calling model_fn.\nINFO:tensorflow:Create CheckpointSaverHook.\nINFO:tensorflow:Graph was finalized.\nINFO:tensorflow:Running local_init_op.\nINFO:tensorflow:Done running local_init_op.\nINFO:tensorflow:Saving checkpoints for 1 into taxi_trained/model.ckpt.\nINFO:tensorflow:step = 1, loss = 108723.11\nINFO:tensorflow:global_step/sec: 41.5945\nINFO:tensorflow:step = 101, loss = 54748.547 (2.409 sec)\nINFO:tensorflow:Saving checkpoints for 200 into taxi_trained/model.ckpt.\nINFO:tensorflow:Loss for final step: 46206.555.\n"
]
],
[
[
"<h3> Evaluate model </h3>\n\nAs before, evaluate on the validation data. We'll do the third refactoring (to move the evaluation into the training loop) in the next lab.",
"_____no_output_____"
]
],
[
[
"metrics = model.evaluate(input_fn = get_valid_input_fn, steps = None)\nprint('RMSE on dataset = {}'.format(np.sqrt(metrics['average_loss'])))",
"INFO:tensorflow:Calling model_fn.\nINFO:tensorflow:Done calling model_fn.\nINFO:tensorflow:Starting evaluation at 2018-11-21-05:47:06\nINFO:tensorflow:Graph was finalized.\nINFO:tensorflow:Restoring parameters from taxi_trained/model.ckpt-200\nINFO:tensorflow:Running local_init_op.\nINFO:tensorflow:Done running local_init_op.\nINFO:tensorflow:Finished evaluation at 2018-11-21-05:47:06\nINFO:tensorflow:Saving dict for global step 200: average_loss = 109.31021, global_step = 200, loss = 45500.375\nRMSE on dataset = 10.45515251159668\n"
]
],
[
[
"## Challenge Exercise\n\nCreate a neural network that is capable of finding the volume of a cylinder given the radius of its base (r) and its height (h). Assume that the radius and height of the cylinder are both in the range 0.5 to 2.0. Unlike in the challenge exercise for b_estimator.ipynb, assume that your measurements of r, h and V are all rounded off to the nearest 0.1. Simulate the necessary training dataset. This time, you will need a lot more data to get a good predictor.\n\nHint (highlight to see):\n<p style='color:white'>\nCreate random values for r and h and compute V. Then, round off r, h and V (i.e., the volume is computed from the true value of r and h; it's only your measurement that is rounded off). Your dataset will consist of the round values of r, h and V. Do this for both the training and evaluation datasets.\n</p>\n\nNow modify the \"noise\" so that instead of just rounding off the value, there is up to a 10% error (uniformly distributed) in the measurement followed by rounding off.",
"_____no_output_____"
],
[
"Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
cbb97cd38a23a1845e620ab3e1c3da797a52d60b
| 152,566 |
ipynb
|
Jupyter Notebook
|
code/evo_tune_201023/generate_embedding_seq2seq.ipynb
|
steveyu323/motor_embedding
|
65b05e024ca5a0aa339330eff6b63927af5ce4aa
|
[
"MIT"
] | null | null | null |
code/evo_tune_201023/generate_embedding_seq2seq.ipynb
|
steveyu323/motor_embedding
|
65b05e024ca5a0aa339330eff6b63927af5ce4aa
|
[
"MIT"
] | null | null | null |
code/evo_tune_201023/generate_embedding_seq2seq.ipynb
|
steveyu323/motor_embedding
|
65b05e024ca5a0aa339330eff6b63927af5ce4aa
|
[
"MIT"
] | null | null | null | 115.405446 | 2,029 | 0.871302 |
[
[
[
"# Documentation\n> 201025: This notebook generate embedding vectors for pfam_motors, df_dev, and motor_toolkit from the models that currently finished training:\n - lstm5 \n - evotune_lstm_5_balanced.pt \n - evotune_lstm_5_balanced_target.pt \n - mini_lstm_5_balanced.pt \n - mini_lstm_5_balanced_target.pt \n - transformer_encoder \n - evotune_seq2seq_encoder_balanced.pt \n - evotune_seq2seq_encoder_balanced_target.pt \n - mini_seq2seq_encoder_balanced.pt \n - mini_seq2seq_encoder_balanced_target.pt \n - seq2seq_attention_mini \n - transformer_encoder_201025.pt \n - evotune_transformerencoder_balanced.pt \n - evotune_transformerencoder_balanced_target.pt \n - mini_evotune_transformerencoder_balanced.pt \n - mini_evotune_transformerencoder_balanced_target.pt \n \n\n- output for motor_toolkit,pfamA_random, and pfamA_motors\n",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn \nimport torch.optim as optim \n\nimport torchvision \nimport torchvision.transforms as transforms \nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import Dataset, IterableDataset, DataLoader\n# import tqdm\nimport numpy as np\nimport pandas as pd\n\nimport math\nseed = 7\ntorch.manual_seed(seed)\nnp.random.seed(seed)",
"_____no_output_____"
],
[
"pfamA_motors = pd.read_csv(\"../../data/pfamA_motors.csv\")\npfamA_random = pd.read_csv(\"../../data/pfamA_random_201027.csv\")\nmotor_toolkit = pd.read_csv(\"../../data/motor_tookits.csv\")\n\npfamA_motors_balanced = pfamA_motors.groupby('clan').apply(lambda _df: _df.sample(4500,random_state=1))\npfamA_motors_balanced = pfamA_motors_balanced.apply(lambda x: x.reset_index(drop = True))\n\npfamA_target_name = [\"PF00349\",\"PF00022\",\"PF03727\",\"PF06723\",\\\n \"PF14450\",\"PF03953\",\"PF12327\",\"PF00091\",\"PF10644\",\\\n \"PF13809\",\"PF14881\",\"PF00063\",\"PF00225\",\"PF03028\"]\n\npfamA_target = pfamA_motors.loc[pfamA_motors[\"pfamA_acc\"].isin(pfamA_target_name),:]\n\n\naminoacid_list = [\n 'A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',\n 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y'\n]\nclan_list = [\"actin_like\",\"tubulin_c\",\"tubulin_binding\",\"p_loop_gtpase\"]\n \naa_to_ix = dict(zip(aminoacid_list, np.arange(1, 21)))\nclan_to_ix = dict(zip(clan_list, np.arange(0, 4)))\n\ndef word_to_index(seq,to_ix):\n \"Returns a list of indices (integers) from a list of words.\"\n return [to_ix.get(word, 0) for word in seq]\n\nix_to_aa = dict(zip(np.arange(1, 21), aminoacid_list))\nix_to_clan = dict(zip(np.arange(0, 4), clan_list))\n\ndef index_to_word(ixs,ix_to): \n \"Returns a list of words, given a list of their corresponding indices.\"\n return [ix_to.get(ix, 'X') for ix in ixs]\n\ndef prepare_sequence(seq):\n idxs = word_to_index(seq[0:-1],aa_to_ix)\n return torch.tensor(idxs, dtype=torch.long)\n\ndef prepare_labels(seq):\n idxs = word_to_index(seq[1:],aa_to_ix)\n return torch.tensor(idxs, dtype=torch.long)\n\ndef prepare_eval(seq):\n idxs = word_to_index(seq[:],aa_to_ix)\n return torch.tensor(idxs, dtype=torch.long)\n\nprepare_labels('YCHXXXXX')\n\n# set device\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice",
"_____no_output_____"
],
[
"# Hyperparameters\ninput_size = len(aminoacid_list) + 1\nnum_layers = 1\nhidden_size = 64\noutput_size = len(aminoacid_list) + 1\nembedding_size= 10\nlearning_rate = 0.001\n\nclass s2s_Encoder(nn.Module):\n def __init__(self, input_size, embedding_size, hidden_size, num_layers, output_size, batch_first=False, bidirectional=True):\n super(s2s_Encoder, self).__init__()\n self.embedding_size = embedding_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.num_layers = num_layers\n self.log_softmax = nn.LogSoftmax(dim= 1)\n self.batch_first = batch_first\n self.aa_embedding = nn.Embedding(input_size, embedding_size)\n self.rnn = nn.LSTM(input_size=embedding_size, hidden_size=hidden_size, num_layers=num_layers,\n batch_first=batch_first, bidirectional=bidirectional)\n\n def forward(self, seq):\n # embed each aa to the embedded space\n embedding_tensor = self.aa_embedding(seq)\n #output of shape (seq_len, batch, num_directions * hidden_size):\n outputs, hidden = self.rnn(embedding_tensor.view(len(seq), 1, -1))\n # Return output and final hidden state\n return outputs, hidden",
"_____no_output_____"
],
[
"def generate_embedding_lstm(dict_file,dat,dat_name,out_path,out_dir,seq_col):\n # initialize network\n model = s2s_Encoder(input_size = input_size, \\\n embedding_size = embedding_size, \\\n hidden_size = hidden_size, \\\n num_layers = num_layers, \\\n output_size = output_size).to(device)\n model.load_state_dict(torch.load(dict_file))\n print(\"loaded dict file for weights \" + dict_file)\n print(\"output embedding for \" + dat_name)\n model.eval()\n hn_vector = []\n print_every = 1000\n for epoch in np.arange(0, dat.shape[0]): \n with torch.no_grad():\n seq = dat.iloc[epoch, seq_col]\n sentence_in = prepare_eval(seq)\n sentence_in = sentence_in.to(device = device)\n _, (hn,_) = model(sentence_in)\n hn_vector.append(hn.cpu().detach().numpy().reshape(1,-1))\n if epoch % print_every == 0:\n print(f\"At Epoch: %.2f\"% epoch)\n print(seq)\n hn_vector = np.array(hn_vector)\n hn_vector = np.squeeze(hn_vector, axis=1)\n print(hn_vector.shape)\n print(out_dir+dat_name+\"_\"+out_path)\n np.save(out_dir+dat_name+\"_\"+out_path, hn_vector)\n return \n\n",
"_____no_output_____"
],
[
"dict_files = [\"evotune_seq2seq_encoder_balanced.pt\",\"evotune_seq2seq_encoder_balanced_target.pt\",\"mini_seq2seq_encoder_balanced.pt\",\"mini_seq2seq_encoder_balanced_target.pt\"]\ndict_files = [\"../../data/201025/\"+dict_file for dict_file in dict_files]\ndict_files.append(\"../../data/first_try/seq2seq_encoder_df_dev_201012_230k.pt\")\ndict_files\n# \"../data/hn_lstm5_motortoolkit.npy\"\n",
"_____no_output_____"
],
[
"out_paths = [\"evotune_balanced.npy\",\"evotune_balanced_target.npy\",\"mini_balanced.npy\",\"mini_balanced_target.npy\",\"raw.npy\"]",
"_____no_output_____"
],
[
"out_dir = \"../../out/201027/embedding/seq2seq/\"\nout_paths",
"_____no_output_____"
],
[
"len(dict_files)==len(out_paths)",
"_____no_output_____"
],
[
"pfamA_target.iloc[1,3]",
"_____no_output_____"
],
[
"data = [pfamA_motors_balanced,pfamA_target,pfamA_random,motor_toolkit]\ndata_names = [\"pfamA_motors_balanced\", \"pfamA_target\" , \"pfamA_random\", \"motor_toolkit\"]\nseq_cols = [3,3,2,7]",
"_____no_output_____"
],
[
"for i in range(len(dict_files)):\n dict_file = dict_files[i]\n out_path = out_paths[i]\n for i in range(len(data)):\n dat = data[i]\n dat_name = data_names[i]\n seq_col = seq_cols[i]\n generate_embedding_lstm(dict_file,dat,dat_name,out_path,out_dir,seq_col)",
"loaded dict file for weights ../../data/201025/evotune_seq2seq_encoder_balanced.pt\noutput embedding for pfamA_motors_balanced\nAt Epoch: 0.00\nHQDNVHARSLMGLVRNVFEQAGLEKTALDAVAVSSGPGSYTGLRIGVSVAKGLAYALDKPVIGVGTLEALAFRAIPFSDSTDTIIPMLDARRMEVYALVMDGLGDTLISPQPFILEDNPFMEYLEKGKVFFLGDGVPKSKEILSHPNSRFVPLFNSSQSIGELAYKKFLKADFESLAYFEPNYIKEFRI\nAt Epoch: 1000.00\nLAAEARGDRAEAARILGAGAANLVGLLDIDRVVLGGRTVAADEDAYVRGVRAVIADRAARGAGGAHVTVTVADGGDRPVAEGAAQLVLA\nAt Epoch: 2000.00\nARKIGIDLGTTNLLICVDNKGILVDEPSIITVDATTKKCIAAGLDARDMLGRTPKNMICIRPLKDGVVADFEATDMMLNYFLKKCDLKGMFKKNVILICHPTKITSVEKNAIRDCAYRAGAKKVYLEEEPKIAALGAGLDIGKASGNMVLDIGGGTSDIAVLSLGDIVCSTSIKTAGNKITQDILENVRIQKKMYIGEQTADEIKRRIANALVVKEPETITISGRDVETGLPHSIDINSNEVESYIRSSLQEIVHATKTILEVTPPELAADIVQHGLVLTGGGALLKNLDQLMRNELQIPVYVAENALKCVVDGCTIMLQNL\nAt Epoch: 3000.00\nNSLPSGDQHKAQQLTADYLGALKRHLIDSLKNQLGEHHAKATPLQFILTVPAVWSDAAKEKTLQAAETAGLGQHAPILMISEPEAAATYVLFRKELGGLSTGDTFVVCDAGGGTVDLISYTIEQLEPALQVKEAAPGSGGLCGSTYLNRRFQEFLVTKLGQEEGFDNETVGDAMKKFDEEIKREYSPNVPNPNYWVPVPGLATNPRLGIRRNKMTLPPDDVREILKPVIDEVVQLVRKQIQSTEREVKAVLLVGGFGGSQYLLERLKETVTKATVILQ\nAt Epoch: 4000.00\nHIAVDIGGSLAKLVYFSRDPTSKELGGRLNFLKFETARIDECIDFLRKLKLKYEIINGSRPSDLCVMATGGGAFKYYDEIKGALEVEVVREDEMECLIIGLDFFITEIPHEVFTYSQEEPMRFIAARPNIYPYLLVNIGSGVSMVKVSGPRQYERVGGTSLGGGTLWGLLSLLTGARTFEDMLSLAERGDNTAVDMLVGDIYGSGYGKIGLKSTTIASSFGKVYKMKRQAEQEAEDTGNLKEDSSQEHGRSFKSEDISKSLLYAVSNNIGQIAYLHAEKHNLEHIYFGGSFIGGHPQTMHTLSYAIKFWSKGEKQAYFLRHEGYLGSVGAFLK\nAt Epoch: 5000.00\nQPLGSFLFLGPTGVGKTELAKALAYELFDDEKHMVRIDMSEFMEQHSVARLIGAPPGYVGYDEGGQLSEAVRRKPYSVVLFDEVEKAHPQVWNVLLQVLDDGRLTDGKGKTVDFSNVVIIMTSNLGSQYLLAEAQLETISQHVKDSVMGEVRKHFRPEFLNRLDDM\nAt Epoch: 6000.00\nVNGVSFSVEAGETLAIVGESGCGKSVTSLSIMGLIASPGTITGGEITFQGRDLVKLSRKELRKLRGNEMSMIFQEPMTSLNPVFTIGNQLAEVFRVHQGTSKAEAKQKSIDMLQRVGIANASKLVRQFPHQLSGGMRQRVMIAMALACEPKLLIADEPTT\nAt Epoch: 7000.00\nYQHEGLDWLAKLYANQTNGILADEMGLGKTIQTIALLAHLAEEHHIWGPHLIVVPTSVILNWEMEFKKFLPGFKVLSYYGSVEERAQKRKGWSNPDIWNVVITSYQLILKDLPAIRVPEWHYMILDEAHNIKNFNSQRYQAMIRLKTHARLLLTGTPLQNSIIELWSLLTFLTAGQDGQGMGDLEEFTEWFRRPVDEIFVDGKSKLGNEAQDIVNKLHHSLRPYLLRRLKSSVEKQLPGKYEHTVICRLSKRQRQLYDAFMGLSDTKAKLTSGNMISVSQALMSLRKVCNHPDLF\nAt Epoch: 8000.00\nKQKNFRQFCFPKKYEFQIPQKFLAEFINPKTPYTGILVYHRIGAGKTCTAINIAENFKNKKRIMVVLPASLKGNFRSELRSLCADNNYLSANDRQKLKELEPSSQEYREIIQKSDKLIEKYYTIYSYNKFVDLIKNNLLNLTNTLLIIDEVHNMISETGTYYESLYKIIHSSPDDLRLVIMTATPIF\nAt Epoch: 9000.00\nFVIGIGKNGVDCVLRCMHLTEKRFGKDPKKVRFLCIGEETPLGERSYEGSAPGDGFTLPIDPEEAIYKYLNNPAKLPESAQIWFDSGLKNYSPAAPTYGLTKRQCGRIALFHYLKQIMKLTGEAMADFSGSDRSLEIVITGNLGDVFCGGMFIDLPYILAKLFSDAPYPVKFTGYFFAADTASLVETDQRDVGCYQANTIVAKAELDKFQLHRKRFTQKYSRTFEVDSDKPPYSACFLIPAADSYGLTMSRTAEKILNRMEIIFSKDDDAERIISYNMLRPEAAHDFRYLAFNVMACEIPTGKIMSYLAIKLFERLNR\nAt Epoch: 10000.00\nIIKVIGVGGGGSNAVTHMYKQGIVGVDFAICNTDAQAMEMSPVPTRIHLGPDLTEGRGAGSKPNIGKLACEESIDEVRKYLENNCRMLFITAGMGGGTGTGAAPIIAKAAKEMDVLTVGIVTLPFTFEGRRRTNQGMEGLLELKKHVDTLIVISNDKLRQIHG\nAt Epoch: 11000.00\nCQAGQTGNVFWELYCLEHGIQPNGPMPSDKTIGGDDSFNTFFSVMAVDKHVPVFVDPAPMIIDGVCTGTYCQHSHPEQSNTGKEDAADNDQRHYTLDKRIINLILKPVHKSSGFLVFHSFGGGIGSRFTSLLIEWKSKLEFSINVPPPRFPQLYLSPKNIFTTQTILEHPDFAFMLNYEAS\nAt Epoch: 12000.00\nLVIGMGSTGTEILEALADRIDWEVGGLQRAPWLEFLAVETDVAKPNRFNGTDDFKTLGIPATAWRDILHRPEIHEASIALNTWADAETLAQLPAQSIDSGAGHIRMVGRLALLYPPNYSEIKNAITQRVARLRNLTDAQAKAALNVNNAGLEMDVQFAVNASTGQTGVRVIVVGTLCGGTCSGTASDIGILLRTVLEDEEKTLAMFTLPHPNLSISQKSDAEIWKTNAYHALAELNQYHLHTDTERYKTIKFPDKPEGSPVLPHDAMPYDLVYLLRPNSTENVDLMRLTQAIADRMFLNVFVPETDPMAYMVNAGPVTVQQGRAFAFSTFGLSTIEYPMRRILEALKYRTLVHAVD\nAt Epoch: 13000.00\nEVISIHVGQCGVQVGNAVWELYCAEHAVKTDGSLYEHPHDQEWVETFFNLSEKGRYVPRCLFIDLEPSVIDEIRVGPWRSLFHPDKLITGYEDAANNFARGYFTVGKVLLSPILNEVRRTIEQCDGLQGLLFFRSLGGGTGAGLTAAILDVLGDYRKYTKVEIPIYPAPSLSPAVVEPYNCIFGEHFAMEDFNMGLLMDNEALYDVCS\nAt Epoch: 14000.00\nGVAIMSTGYGEGENRVKHAIDEALHSPLLNNDDIFNSKKVLLSITFCAKDQDQLTMEEMNEINDFMTKFGEDVETKWGVATDDTLEKKVKITVLATGFG\nAt Epoch: 15000.00\nPRIHFPLATYSPLFSADKAHHEQNSVMEMTFACFENGNQMVKCDPKEGKYMACCLLYRGDVAPKETSGAVAAIKTKRTIQFVDWCPTGFKLGVCNEPAACVPGGDLAKVTRSLCMLSNTTSIASAWNRLDH\nAt Epoch: 16000.00\nGMAMMGSGFAQGIDRARLATEQAISSPFLDDVTLDGARGILVNITTAPGCLKMSEYREIMKAVNANAHPDAECKVGTAEDDSMSEDAIRVTIIATGLK\nAt Epoch: 17000.00\nGVAHMGIGVGKGENAAQDAVRAAIESPLLETSIEGAENVLLNITGGSEFSLVDMGEVSSIVRDLVSEEANIIVGTAMDDNLKDEIKVTLIATGLD\n(18000, 128)\n../../out/201027/embedding/seq2seq/pfamA_motors_balanced_evotune_balanced.npy\nloaded dict file for weights ../../data/201025/evotune_seq2seq_encoder_balanced.pt\noutput embedding for pfamA_target\nAt Epoch: 0.00\nPDSAPIVIDNGASTFRIGWAGEAEPRVSFRNIVQRPRHRSSGETVTVVGDTDPALMKYFDCTRTSIRSAFDDDVVYQFEYMEYILDYAFDRLGATSEVGHPILMTECECNPSFSRARMSELLFETYGVPSVAFGIDDVFSYKYNQKLGNCGEDGLAISCEFGTCHVVPFLKGQPVLGACCRTNVGGSHITDFLRQLLSLKYPYHVANFSWEKAEELKKEHCYIAADYMSELQIFKNNKEEAEEKTRYWQLPWVPPPRDEPPSEEELARKAAYKEKAGQRLRDMAAAKKSQKIADLEEKLSGLEDLMDHLDGADEQEATSILGRSGYLSQQEIKSDILKATQSLRKAKGESNGNEENADASGADKYPLVSVPDDMLTPEQLKEKKKQILLKTTTEGKLRAKQKRAEE\nAt Epoch: 1000.00\nFRPVIIDNGSGRIKAGFASDERPRFICPNVVGEVKHKKFFSFSESQQCYVGNDALAHRAILKLSYPIKHGVISDWNGMEKVWSSVITGLGVSLKHHPVLLTEAPLNPKAKREEVCERFFEGFDCPAFYIGIQAVMSLYSTGKITGVVVESGQGVSCSVPIYQGYAIWHAIKRLNLAGHELTEYLSKLLRERGYCFKSSAEYEIVRDMKEKHCFVALDYEEALNKAAMSDELHVSYEMPDGQIVLIGSERFRCLEALFRPSLLGLEDVGIHWMVYNSIMKSDLDIRKDLYANIVLSGGTTMHEGFQERLQAEVVALAPRTVKVRVIAKPEVWTFGSVL\nAt Epoch: 2000.00\nMKEKHCYVALDFEQESNHNIKHSYELPDGQIIEIGAEIFRAPEVLFQPMMIGLEQSGIHEMAFNSIFKSDLEIRRDLYGNVVLSGGTSMLPGIADRLQKELMHLIPPNMMAMVVAPSERKNSTWTGGSMLASLSTFQERWIPKEAYDETGPGIVHRYCF\nAt Epoch: 3000.00\nLGIQRRSVLEHGLVADWDVMEEYWCHLFNRRVCVEPQDVGVLLTEPAVTPYEQRERTAEILFESFGVPKLFIGSQALFLLHSVGDRCDTAVVVESGAGVTQVVPIVAGYAVAAAARRFPVAGLDVTQYVLNNLREHEQGIEMEQALEVAEQVKVRYGCMAKDFARECAEAESKLPSYAIRNTELHTRAGAPYSIDVDYEQLLVPETLFQPDSLAAPSTVTASLFGGLPAVIDAVVWSCPMDCRRSLYANVIVSGGNTRLPYFAKRLHGALRHALDERATGVIAASGGPLGRQVEYEVNVRDYSQAMHAVWRGASAFAASPEYETSAVTRAAYMECGAAVMHQHH\nAt Epoch: 4000.00\nARLAPLVIDNGTGYSSQETQILRSSFRTAIATRGTSGGGSASGPSITGRPSIPSKPGALSASSNIATKRGIDDLDFFIGDEAIANSKTYNVSYPIRHGQIEDWDLMERYWQQTIFKYLRAEPEDHHVLLTEPPMNAPENREQTAEIMFEGLNIQGLYIAVQAVLALAASWSSNKVTDRTLTGTVIDSGDGVTHVIPVAEGYVIGSSIKHIPIAGRDITYFVQQLLRDRNESLNIPVDESLRIAEKIKEDYGYVCGDMVKEFRKYDSEPEKYIIKHEGFDTRTNKAYTIDVGYERFLAAEVFFNPEIYSSDFLTPLPEVVDNVIQTSPIDVRRGLYKNIVLSGGSTMYDHFGRRLQRDLKTIVDDRLYASEVASGGLIKSSGMDVNVITHKRQRYAVWFGGSLMASTPEFYSHCHSKADYMEYGPSICRRYQ\nAt Epoch: 5000.00\nSQDRKVVVCDNGTGFVKCGCAGPNFPEHIFPALVRRTVIRSTTKDLMVGDEASELRLMLEVNYPVGNGIVRNWNDMKHLWDYTFGAEKLIPKFVYVAIQTVL\nAt Epoch: 6000.00\nMMKLVQNKAAYVALNIQQELELAKKIPSPVNEEYELPCGHFMNFRSQKFRNPEALFQPSSARFVKDRENVGVRKMIFNSIMKCDIGIRNYLFKNIMLTVGSTLFPGFVEGITKEILELGSSTLAFKFSDLIAREINHKFANKMFRNVAPPNRMYNAGVGGPALALLNTFEQASPFKTQFY\nAt Epoch: 7000.00\nMTEQHNIHLNTSYQLPDGHVIRIGSERFRCPEALFQPLLLRCLWSHVEMFVVSIMKCDLDMRRKLYENIILSGGSTMFPGMGQRMTKELRVLVVWSRPVPLYSSWL\nAt Epoch: 8000.00\nVGLDIGTTKICAIVGRKNEFGKLEVLGMGKAESEGVVKGIVFNIDKTVYAIEKAIKDAGDQAGIDIGVVNVGIAGQHIRSFIQHGGITRTSKEDEITIADVERLTQDMYRMVVPPGSQIIHVMPQDYMVDYEEGIKEPVGMSGVRLEADFHIITAQTNAINNINKCVRRTGLEIDDLILEPLASSLAV\nAt Epoch: 9000.00\nALIDVGAGTSDICVTRDGSIIAYGMIPMAGDELTEVLVHEFLVDFATAEQIKRASTEGGNITYEDIMGISHTIKSEDVYKLTDPVMKKISGEVASKIKELNGDKSVSAAFVVGGGGKIHGFTEALSKDLDIVSERVALRGEEVMKNIVFEQNDIQKDSLLVTPIG\nAt Epoch: 10000.00\nTVIDLGYNSSRTIIFKDGIPKLFYSFPYGIKYILKDISNVLKVSEKEAHRLLTEEGACLRDTRTIKKVEFQPITGTGYSYTSLGLLNKIIYARVREIISRLNGELSRISYEKTYEIGALQGGIVLTGGGSKIRNIDQTIRELMGENYRKSSLVSLDYFRDVPEEIKKDSTYLSVFG\nAt Epoch: 11000.00\nACIDMGGGVTGVSLFLKKHMLFADAVRMGGDLVTRDISQGLRVPLPVAEWLKTRHGGLEATGRDDREMIDVTGEPGEDWDGERRFVSRADLIGIMRPRVEEILDGVREILEAAGFDQMPSRQVVLTGGASQIPGLDTLAMRILGYNVRIGRPLRIQGLAQQHTASCHAATVGLA\nAt Epoch: 12000.00\nEAKRVAAQFAFSSDDVRRATKEFINQMEEGLQKDHTDLSQIPTYVTAVPNGTEKGLYMAVDLGGTNFRVCSIMLHGNSTFTLTQTKVAIPRELMVAKTSKELFSFLAKQIELFLKAHHNEHYQGHIRRRKTTSLEEGYRDEEIFNLGFTFSFPVHQIGINKGVLMRWTKGFDIPDAVGKDVCALLQAEIDELHLPVRVAALVNDTVGTLMARSY\nAt Epoch: 13000.00\nKVENMLSGIHLSEEVVSRVKSVFLSEIELGINEEPSSLQMENTYVPELPDGTEEGLFLALDVGGTNFRVLLLELMEGRLVREEVKHYHITDELRLGPGIDLFDFLATCIADFVKEFNIADQTLPLGFTFSFPMHQRSMDCGCLVTWTKSFKCAGVQGEDVVEMLREAIRRRGDIKVDVVAVLNDTTGTLMQGAL\nAt Epoch: 14000.00\nAGLLKKFEAPLADVPGIARAFEVIYHSLALTASNQFLPTPIRALPTGEEKGRFLALDLGGTNLRVAVVRLYGGDGLKVCTQRSWSIPEHFKSGAAEVLFRWVADRIGDVVGEYLGDVGSEERERILSEGMELGITFSFPMEQTTHDSALLMPMGKGFTFTTTNDLSSLLKMAYDDLLSTTTPAHPLPKLDIVSITNDSISTLLSAAY\nAt Epoch: 15000.00\nVMGLLLGAGCNATVPMLIDDLHESKVRHIRLADPKAVETLVTTEWTLRAASEPLSNLNLITSWDSQLNASGDRPGFQPLEYMIAGRYLGELVRIIVHDYFHRILAISKEDLPDKLMKPYALTTEFLSLVVAPSQSGEELLADLERELPSPPLSGWKWTPSLADIVRATTTKIQRRAASLIAAASVGLLACTREIKLADLKEGKSVAETPVVCPSAVPTADPIALPHRSPGSNSPPKVKGQNNPEELVIAFSGGLIQHWPGFRESIQWHIDRLVLRGGPQELGKSIFLREVSDGGLVGVGVLAGT\nAt Epoch: 16000.00\nEIGLIVGTGTNACYMEELKNVELLDGDEGQMCVNMEWGAFGDNNCLEDITTSFDHDVDTFSINPGKQRYEKMISGMYLGEIVRQILIVLTRRGILFGGKISERLLTRDLFPTRFLSLIESDTLGLVQVRSILTELGLRSTCDDTMLVKEVCTTVSRRAAQLCAAGVAAVVEKMRANRGLDQLKVTVGVDGTLYKLHPHFAGVVQETVKILAPKCDVTFLQSDDGSGRGAALITAV\nAt Epoch: 17000.00\nRLGVIVGTGTNACYMEKLENCELWDGDDQEPRQVIVNTEWGAFGDNGVIDFVRSHYDWEVDEESLNPSHQKFEKMISGMYMGELARRVILRLAREHLIFNGRLSQKMKTAYAFKTKYISEIESDPKGCFDETRKVLAKLDQVGSDDDCQCLKLVVSRVSSRAAHLVSAAIATVLNKMKRPHTTVGVDGSVYRYHPKFHQLMEAKIAELTNPDYKFDLMLSEDGSGRGAALVAAV\nAt Epoch: 18000.00\nQVDIGIDLGTANTLVYLRGHGIVMDEPSVVAVTRGSHTVLNDGAAVGLEAKKMLGKTSYSVDVIRPLREGVIANFPITEAMLRYFISRVKARRMFSQTRVVIAIPFGITHAEMKAVYNSTMRAGADKVHLIEETLAAGLGSGLRIDDPTANLVVDIGGGTTGISVISVADIAFGATVRCAGDHMTDAVSDFIRERYKLQIGQQTAEQLKIELGSALPQNEHAAMQIRGQGENGRPATIEVSADDVREALRAPLHKILRGIDWVLENTPPELSADLVDRGILVTGGGALLPRIDDLISDHTGLNVTVADDPLTCVARGAGAYLDTINWQRS\nAt Epoch: 19000.00\nTKDMGIDLGTANTLVYSKGKGIVLREPSVVAINNLTKKPLAVGTEAKQMIGRTPGNIVAIRPLKDGVIADFDITQTMLKKFIEKITNKSAFTSPRIIVCFPSGVTEVERRAIEEATKQAGAREVVLMEEPMAAAIGAGLPVDEPTGSMIVDIGGGTTEVAIISLGGIVTSKSLRIAGDELDQAIIGYIKREYNLMIGERTSEQIKMEIGSAFKADEFEEEASMEIKGRDLISGLPKTVVVTESQIREALKEPVAAIIEAIKTTLEKTPPELAADIMDKGIMLAGGGALLKGLDALINHETHMPVHIAESPLDCVALGAGKALDKFDLIRQ\nAt Epoch: 20000.00\nKSDIGIDLGTASVLVYIKGKGVVIQEPSVVAIDRDTNKLLAVGEDARRMLGRTPGNIIAIRPLKDGVISDYEVTQRMLKYFIEKAIGKNNLFLRPRIVVCVPSGITEVEKRAVIQASNQAGARKTYLIEEPIAAAIGADLDITEPRGKMIIDIGGGTTDVAVISLGGIVVNSSIKVGGNTFDTYITRYIRKKHNLMIGERSAEELKVVIGTAYKREKEVSMDIRGRYLLTGLPEIVQVTSSELLEALSEPLEAIVDAVKSVLEKTPPELASDIGEKGIMMTGGSSLLHGIDKLIKERTGIKVNIAEDPVSCVATGTGRSLESIDVLEN\nAt Epoch: 21000.00\nGTDIGIDLGTASVLVYIKGKGVVLKEPSVVATDNTKRKVLAVGEEARQMIGRTPGNIIATRPLRDGVISDYDVTERMLRHFIKKARGNSVSLLRPRVIICIPCEATEVEKRAVKDAALSAGAGKVYLIEEPVAAAIGAGLDISKASGSMIVDIGGGTTDVAVLSLGGMVVRSSIKIAGDKFDEAIIRYIRKKHNIMIGERTAEELKINIGTAYPRSEEVTMDIRGRDLVTGLPKNITVSSEEMREALEETTSAIVDCVHSVLEHTPPELSADIINKGIIMTGGGSLLYGLDLLIQSRTHVTTTVAKDSICCVAYGTGEALENLDKFAE\nAt Epoch: 22000.00\nKIKVIGVGGGGGNAVNRMVAMEVKNVEFIAINTDEHVLRLSKASQKIQIGEKLTKGKGAGSMPAIGQSAAEESKDEISGVLKDTDMVFVTAGMGGGTGTGAAPVVAKIAKDMGILTVGVVTKPFAFEGKRRMTQAEQGIAELSACVDSLIIVPNERLKYVSD\nAt Epoch: 23000.00\nEIISISVGQCGNQIGQQFWRTISQEHGLSMDGHSTNTASPLEKENLGVYFSESSDRYVPRAVLVDLESGVLDSVKSSSQGQLFRPDNFINAASGAGNNWAKGFYTDGTELIDEIIDTIRKESESCDSLQGFQLTHSLGGGTGSGLGTLLVSKIKEEFPDRMLATFSVFPSAKVSDTVVEPYNATLSIHQLIENADQVFTIDNEALFDICT\nAt Epoch: 24000.00\nMIAEDHGIGPDGIYSGSSELQRGRMEVFFRETEENKHFPRAVVVDLESDSLNAVLQSTHRALFQGDNFVSGRGGTGNIWAKGFYGEGRRYIDEVLEVIRKEADICEGLQGFNVAHSLCGGTGSGFGALIIEKIHEQYPNRLISTFSTVSSNRLLGVMKQPYNTILSLQHLAENANITYCIDSDGLHDISR\nAt Epoch: 25000.00\nLIKVVGVGGGGGNAVNRMIQSGLRGVEFIAINTDAQALLMSDADVRLDIGRQLTRGLGAGSDPEVGRQAAEEHREEIEEALKGADMVFITAGEGGGTGTGGAPVVAEIARGLGALTIGVVTRPFGFEGRRRAQQAEDGISRLREYVDTLIVIPNDRLLTIAN\nAt Epoch: 26000.00\nSIVTVQLGQCGNQIGFEVFDALFRDSRCSQGLCSKSENEAYQASCSERFFREEENGVPVARAVLVDMEPKAINQTLSKAAPSGGWKYGQHACFCQKQGSGNNWAYGYSVHGPKHEESIMNLIQKEVEKCDSLSGFFIIMSMAGGTGSGLGAFVTQKLQDQYSNALLIHENDAVHKIC\nAt Epoch: 27000.00\nLRACFWEIISDEHGIDPSGVYRGTADIQLERISVYYNEATGGRYVPRSVLVDLEPGTMDAANNSLKGGSSPSHGRAATPSLSPESLKARVKSERRRKQLGQRHYTEGAELVDSVLDGIRKECESCDCLQVSAAP\nAt Epoch: 28000.00\nLIIGLGGTGGRIIRALRKIIYQEFRTIHPPDVNIAYLYIDSDDEMMALDDPRWKILGHSVQLGIDSQLLIQGADLEERLNNIHNYPGIKEWIGNRGDWKDILRSFAGGRVYGGQKRRLGRFLLACNIDAFINQLTLQVNHLQRISNQAEVTFHICCGLAGGTGSGSVIDTIVQLRKHYPYSNQGLTYPLLVYAYLPEKNPNPKWDTGNYQANGYAALMELNALSAGRFDPTDLMGGKPVACGVAFNGLYLFTNQNEKNVIVGVENEIPQIVADFLYQKIIAVSKVAWTSLAFLEDAQNGDSTPETAAIPNSRLPERAKRLLTFGINRIAIPEEEIKEYLTYHFARQAAL\nAt Epoch: 29000.00\nNSEISGHELAADLVTNALANPLYTDDRNHAERCISFLHAGTDLTLGEVETVREEITSQIDSGVGLELFTADTTKMMGNKRRLTL\nAt Epoch: 30000.00\nGTALMGIGTGSGKTSAEDAAVAAISSPLLDAPVDEATGVVFNIIGGESLSLQEVDRAAKVIYNNVHEDANVIFGALVDDEITDGTVSITVLATGFY\nAt Epoch: 31000.00\nGTALMGIGSASGENRTAEATKKAISSPLLEVSIDGAEQILLNVTGGPDLSLFEAQDASEIIASASSDDVNIIFGTSINESLGDEVVVTVIATGID\nAt Epoch: 32000.00\nPRIHFPLATLAPIISAAKAQHEQNSVAEMTFSCFETGNQMVKCEPREGKYMACCLLFRGDVIPKDANGAVATIKTKRTIQFVDWCPTGFKLGICNEPPAAVPGADLAPVSRSLCMLSNTTAISSAWSRLNK\nAt Epoch: 33000.00\nPRIHFPLVSFAPVLSKSKSSHESSNVQEITNACFEPSNQLVKCDPKAGKYMATCLLYRGDVVNRDVQNAVSMLKNKKTIQLVDWCPTGFKIGLCYKPPHYVPDGDLAPATRSVCALSNTTAIAEAWQRIDE\nAt Epoch: 34000.00\nTASSPIVFILSPGSDPASDLMKLAESSGFGGSKFKFLAMGQGQDKVAASRGQWLMLQNCHLLVKWLKELEKALERITKPNPNFRLWITTNPIEDFPIGILQNSLKVV\nAt Epoch: 35000.00\nEPRTPMVGLLSMGSDPTTSIELLAKKHKKECKAISMGQGQEIHARRLMSNSLQNGGWVLLQNCHLSLDYLMEVMDQLVEAETVHEDFSLWVTCEVHPKFPISFLQQSIKFT\nAt Epoch: 36000.00\nRVRPPLDCERDKMLCNLSYLDEATMEIASFEPTAKGKSIAHTFTFDQVFDHSSEQESIFEMVSPLIQSALDGYNICIFAYGQTGSGKTYTMDGIPSNPGVIPRTVDLLFDSIKNYRHLGWEYEIKVTFLEIYNEVLYDLLSNEQKDMEIRMVKNSKNDIYVSNITHETVGSAGRLRELMQIAKMNRATAATVGNERSSRSHAVTKIELIGTHAKKQELSIGSINLVDLAGSESPKTSTRMNETKNINRSLSELTNVILALLQKQDHIPYRNSKLTHLLMPSLGGNSKTLMFINIAPLQDCFVESLKSLRFAATVNQC\nAt Epoch: 37000.00\nRCRPFNGRETARNAQCIVKMKGDQTILSPPSEVKGKAAKAASEGVKTFAFDKSYWSFDRNAPNYAGQDNLHEDLGKPLLDNAFQGYNNCIFAYGQTGSGKSYSMMGYGADPGIIPKICQDMFERIKVVQQDKNVGCTVEVSYLEIYNERVRDLLNPSNKGNLRVREHPSTGPYVEDLAKLVVQSFQEIENLMDEGNKARTVAATNMNETSSRSHAVFTLTLTQKRHDTDAGMTGERVAKISLVDLAGSERAQSTGATGARLKEGAEINRSLSTLGRVIAALADMSQGKKKTQVPYRDSVLTWLLKDSLGGNSMTAMIAAISPADINFEETLSTLRYADSAKRI\nAt Epoch: 38000.00\nRIRPLSTMERDSQGYGRCLRQESAKTLVWLGHPETRFTFDHIACEKISQENLFKVAGQPMVENCLSGYNSCMFAYGQTGSGKTYTMMGGIYELEGKLNEDCGLTLRIFEHLFTRIGMEEKSKRDVKLKYSCKCSFLEIYNEQITDLLEPSSTNLQLREDSKKGVYVENLTEHSVSTINDVVKLLLQGAANRKMAATYMNSESSRSHSVFTCIIESHWEKDSRTHLRFARLNLVDLAGSERQKSSGAEGDRLKEAANINKSLSTLGLVIMSLVDLAHGKHRHIPYRDSRLTFLLQDSLGGNSKTTVIANVSPSFCSANETLSTLKFAQRAKQI\nAt Epoch: 39000.00\nRVRPQNEHELQGNCRTLIKVVDDKMLIFDPKTEENPFFYHGVAQKGRDLLKKQNKELQFIFDKIFNMQSDNTDVFEGSTKELICNLLDGYNCSVFAYGATGAGKTHTMLGNNEDPGITYRTVAELFSEIEKQGEHREFNLGVTYLEIYNENVQDLLHRSGPLHLRDDGRCGVIVAGLKIIAIHSAEELLTLLAKGNRNRTQHPTDANEESSRSHAVFQVYINITNKLDGQVRQVKLSMIDLAGSERASATGCKGPRFKEGANINKSLLALGNCINNLADGAKHITYRDSKLTRLLKDSLGGNCQTVMIANIAPSSFSYEDTYNTLRYADRAKKI\nAt Epoch: 40000.00\nRVRPFTVVESGNGESQECVTIEAPDTVVLKAPRSCQSNRQSEKSLPQTAQRFSFTQVFGPDASQRKVFEGSVRGLVRDVLEGGNCLVFTYGVTNAGKTFTFLGPDHDSGLLPRSLSVIFNSIEGRLYSRSDLKPQRCRDFSRLTPDQQAAESSSKKNLLRLLKEVTHIHTHTHTHTHTHTHT\nAt Epoch: 41000.00\nIIKIMQEEDKAVSSPEHPLQTNSLCIFGEACTNRDVYMKTTHPLIQHIFNGGNATCFAYGQTGAGKTYTMIGTHQNPGLYALAARDIFRQLEVSQPRRHLFVWISFYEIYCGQLYDLLNRRKRLFAREDSKHVVQIVGLQELQVDSVELLLEVILKGSKERSTGATGVNADSSRSHAIIQIQIKDSAKRTFGRISFIDLAGSERAADARDSDRQTKMEGAEINQSLLALKECIRALDQEHTHTPFRQSKLTQVLRDSFIGDAKTCMIANISPSHVATEHTLNTLRYADRVKEL\nAt Epoch: 42000.00\nRCRPFSDEELRSNAPQVVTCNDYSREVAVSQSIAGKHIDRVFTFDKVFGPSARQKDLYEQAVTPIVNEVLEGFNCTIFAYGQTGTGKTYTMEGECKRAKSGPNGELPPEAGVIPRAVKQIFDTLEGQNAEYSVKVTFLELYNEEITDLLAPEEISKVSLEEKQKKQLPLMEDGFDKRGVKSTDSCSEEMFDTMMNRARDGRSRPIVAEKRGSRR\nAt Epoch: 43000.00\nKTFVFSKNMNSKFLRRTKSIEKIQEIVKNEEKKNNTNQPSLNLELIQQNKPVIFVEPQNKCNQNIQNLKKYDQESKNYLRMRFKNRPERIKIGQTFIIYDETLKAKGKIIKDKQDSKPLNIHESKIDGIYVEGLSEYQCTHYYDAIQLMKRGEKNRKIRQTQMNNKSSRSHTILQFSIESTNNNNKNIMKRSKVNLCDLAGSEKINKNEIIQNDHFNELKNINQSLSTLGKIIYNLSCNQKLPMPFRESKLTRILQDSLTGNCKTIVIGNISPSLINIEETISTLKFVDRAKNI\nAt Epoch: 44000.00\nRFRPQNRREIESGGEPIVTFDSDDTCKLESQEATGSFTFDRVFDMASKQSDIFDFSIRPTVDDILNGYNGTVFAYGQTGAGKSYTMMGTDMEDEQGRGVIPRIVEQIFASIVASPSNIEYTVRVSYMEIYMERIRDLLVPQNDNLPIHEEKNRGVYVKGLLEIYVSSVQEVYEVMRKGGNSRAVAATNMNQESSRSHSIFVITITQKNVETGSAKSGQLFLVDLAGSEKVGKTGASGQTLEEAKKINKSLSALGMVINSLTDGKSSHIPYRDSKLTRILQESLGGNSRTTLIINASPSSYNDAETLSTMRFGMRAKAI\nAt Epoch: 45000.00\nRVRPPSKRETAEGSRIILNVDEKVARIKNIRLDHKPDGCEDTRERLIEFGFDSCYWSVDPEDPKYASQEMVFQDLGTLVLSEAISGYNVCLFAYGQTGSGKTYTMMGTPASIGLTPRICEGLFSYDEGSPETPNSFRVEVSFLEIYNERVRDLLHKSEEKKPYTLRVREHPERGPYVQGLSQHVVTSYEQVVALLEEGMENRITAATHIHDASSRSHAIFTIQYTQAMLEDNLPTEITSKINLVDLAGSERASPEYCKDRLTEGSNINRSLVTLGIVISTLAQNSQMTSSCQSINSIASDGDSGSPSGGSTNGSKRQPYVPYRDSILTWLLKDSLGGNSKTIMIATVSPASSSYNETMSTLRYASHAKNI\nAt Epoch: 46000.00\nRVRPTSGHSAWNSPQGSNSIQLDPAHARNPNLMSSNPSSLSTAPPTTYHFDSILTGIPNKPIYTTVARSHVHAAMEGFNAVIFAYGQTASGKTYTLSGDENEPGIIPRAMRDVFGFIKRTPDREYLLRCSYLEIYNETIYDLLAPPMGGSGSQVQIQGGTGMEVILTPLREEVVTSLKGVNEVLRRGERHRRTACTDWNERSSRSHSVFRLVIESRERGSGPGPLDDADMRAPSRSGRATPGNGRATPGPGNAGSRLQARGGKSVQTSILSLIDLAGSEKATSDKDRTREGKYINTSLLTLGSVIGTLSENAAKNKSDYVPYRNSKLTRMLQPSLAGNARISVICTINPDPSAVGETSSTLGFAKRVKGV\nAt Epoch: 47000.00\nVKVAVHVRPLIGDERLQGCKECVSVTPGKPQVQIGTHSFTFDHVYGSGGAPSTAMFEECIAPLVEGLFQGYNGTVLAYGQTGSGKTYTMGTGSKDGSQTGLIPQVMNALFSKIETLKNQTEFQLHVSFIEILKEEVRDLLDSVSLNKVENGNGHAGRVTVSGRQPIQIRETSNGAITLAGSTEIFVRTLQEMSTCLEQGSLSRATGSTNMNNQSSRSHAIFTITLEQMRKIHSVFPGNDTPDEDMGEEYFCAKLHLVDLAGSERAKRTGSDGVRLKEGIHINKGLLALGNVISALGDEKKRKEGVHVPYRDSKLTRLLQDSLGGNSKTVMIACISPADINAEETLNTLKYANRARNI\nAt Epoch: 48000.00\nRLRPLNEKEISRNDALDWECINDTTIIFKNHLPIPERSMYPSAYTFDRVFRSDSTTREVYEAGAKEVALSVVSGINSSIFAYGQTSSGKTFTMSGITEYTMADIYDHIERHKEREFLLKFSAMEIYNESVRDLLSSDTAPLRLLDDPERGTIVEKLTEETLRDWNHLIELLSLCEAQRQIGETALNETSSRSHQILRLTVESSAREFLGNDNSSVLTSTVNFVDLAGSERASQSLSAGTRLKEGCHINRSLLTLGTVIRKLSKGRSGHIPYRDSKLTRILQSSLGGNAKTAIICTMSPARSHVEQSRNTLLFASCAKEV\nAt Epoch: 49000.00\nPSPRPSISQTPIRTKLQLVDLAGSESVGMSGVSGAALWENSCINRSLSALSDVLGALAEQRPHVPYRNSKLTHLLQDSIGGDAKLLVMLCVSPTQRFLTESLQSLGFGSRARQI\nAt Epoch: 50000.00\nKEYTFDGVFDQESNQKEVYEDVGKPVLKDVLQGYNGSILAYGQTGAGKTHSLLNSGMGVDGKPDPKQAGLLPRLVAALFVHVGADVKHVYTVEASMLQIYNEQVDCLLGDDREKAQGLQVTGKSEVKGLVWHKCKTPNELLQCFQKGRMNLVYAETKMNKSSSRSHAVFQIKVSKRPRALDKTGTKGGKVEMKATFGKLTVVDLAGSERIKKSGVTGTQLKEATNINSSLLSFGNIVQALAEKKKFIPYRDSKLTRILEDSVGGNCKTSLLVCCSPSAESSDETVSTLEFASRAARI\nAt Epoch: 51000.00\nVEDMATLAQLHEGSIMHNLHIRYKKDNIYTYIGSILVSVNPYKSISGLYDITSMEQYSSYHLGERPPHIFAIANECYHCLWKRNDNQCVLISGESGAGKTESTKLILKYLSAMSQHSLDVTAKENVSYVEQAILESSPIMEVFGNAKTIHNNNSSRFGKFIKLNFCQKGNIQGGRIIDYLLEKNRVVRQNPGERNYHIFYALLAGTDEAQKEMFYLSEKENYYYLRQFGCIVDNAIDDQRTFQEVMTAMRVMKFSSEEILEVLKLLAGVLHLGNIEFVIAGGAQVSSKNALGRAAELLGLDSMKFTEVLTHRSMILRGEEISTPLTVEQGIDSRDSMAMALYSQCFSWIIKKINNRIKGKEDYRSVGVLDIFGFENFEVNRFEQFNINYANEKLQEYFNKHIFSLEQLEYNRDGLIWEDIDWMDNGECLDLIEKKMGILALINEESHFPKGTDDTLLAKLHSHHSKNPFYVKPRVLDHYFGVKHYAGEVLYHVKGILEKNRDTFRCDVLNLLCESRLDFIYDLFEHASSKINEDTFKSGTKHQKPTVSSQFKNSLHSLMATLSTSNPFFVRCIKPNDQKMPDQFDQTIVLNQLRYSGMLETVKIRRAGFPIRRQFEDFCARYKILMRNLSLPDDLKAKCAALLYCYDNTNTDWQLGRTKVFIR\nAt Epoch: 52000.00\nMDTKLPSKLFIGVLDIAGFEIFQLNSFEQLCINFTNEKLQQFFNHHMFVLEQEEYKMQGLEWTFVDFGLDLQGCIDLIEKPLGILSILEEECMFPKTTDITFNAKLLNNHLGKSPNFAKSKPDKKRKYESHFEILHYAGVVPYNLNGWLDKNKDPLNETAVELFQQSSNELVAMLYQDYVRAY\nAt Epoch: 53000.00\nVSDLTLISKISNEAINDNLKIRFQNGEIYTYIGHVLVSVNPFRDLGIYTDAVLHSYQGKNRLEAPPHVFAIAEASYYNMKAYKENQCVIISGESGAGKTEAAKRIMQYIASVSGGSNSSIQEIKDMVLATNPLLESFGNAKTLRNNNSSRFGKYLEIQFNDQGEPVGANINNYLLEKSRVVGQVKEERDFHIFYQFTKAASETYRSTYGVQQPNTYAYLSKSKCYDVNGIDDKADFKDTLNAMKVIGMSQQEIDEVFRMLAAVLWIGNVSFRENDEGNAEIVDQSVVDFVAYLLEVDSSHVNKAMSTRTIETARGGRRGSTYDVPMNIAQASSVRDALSMAIYTNMFDWIVQRINASLKARSAISHSIGILDIYGFEIFEKNSFEQLCINYVNEKLQQIFIQLTLKTEQEEYAREQIQWTPIKYFDNKVVCELIEEKRPPGVFAALNDACATAHADPAAADGTFVQRLNALSSNPNFAPRQGQFVIKHYAGDVNYEVAGMTDKNKDQLLKDLLNLVGESGNAFVQTIFPDRIDQDNKRRPPTAGDKIKASANDLVATLMKCTPSYIRTIKPNENKSPTEYNDGNVMHQIKYLGLQENVRIRRAGFAYRQTFEKFVERFYLLSPKCSYAGEYTWTGDAKSGVKQILKDTSIPAEEWQMGMSKAFIK\nAt Epoch: 54000.00\nVEDVCQLPHLNESSVLHVLRQRFANNLIHTRAGPVLLIVNPMAPLALYSEKVASMFRGCKAEDDMPPHIFAQAQTAYRAMLETRRDQSLIFLGRSGAGKTTSFKHALYYLTLASRQELQPSVRALTVEKVSAIGTIMDAFGHERTSLNGNATKFTQIFALDFDHSGQIVSGSIQIMPIDRMRPSGGSNRGRSGVPRWSFLAGVDGGALRKELLLEPAAGESSPGGSATVEQESIDYQRLCQAFRVLNIDQAAVRGIWYVLAAIHHLSQSGAVIVAGRVQFVNPRSAQKAAMLLGIPMEDLLSYVFPENGSGGATKATLNTAVVVECLTAFTEALYTELFYTIVGLINKSIAAVTPHQTIGSVLLVDVPGFQNPASVGGGTAASTLADLRFNYLHERLQLLFHNAMLVQPRARYAQEMVTVEDSL\nAt Epoch: 55000.00\nEDKLKLERDFSRYNYLSLDSAKVNGVDDAANFRTVRNAMQIVGFMDHEAEAVLEVVAAVLKLGNIEFKPESRVNGLDESKIKDKNGSFWLDVK\nAt Epoch: 56000.00\nVDDLMQLSYLNEPSVLYNLQYRYDRDMIYTKAGPVLVAINPFKEVQLYGNVYIEAYKSKSIDSPHVYAIADTAIHEMIRDEVNQSIIISGESGAGKTETAKIAMQYLAALGGGTGMEYEILQTNPILEAFGNAKTARNDNSSRFGKLIEIHFSPNGKISGAKIQTFLLEKSRVVQCAAGERSYHIFYQLCAGASKSLRDKLNLRSVEEYKYLKQSSCFVINGVNDAERFQSVMAAMKVVHIRQQDRDNVYAMLAAILWLGNISFNVIDNENHVEVVADEAAQTVSKLLGCDIQDLKLALCTRKMRVRSDTIIQKLTLTQAIDTRDALAKSLYASLFEWLVEQINMSLEVGKRRTGRSISILDIYGFESFEKNSFEQFCINYANERLQQHFNRHLFKLEQEEYIQDGIDWARVDFEDNQNCLKLFEKKPLGLLSLLDEESTFPNGTDLTFANKLKQHLHSNSCFKGERGKAFTVSHYAGEVVYDTTGFLEKNRDLLHIDSIQLLASCSCHLPQIFASKMLTQSDAQEGSPYRSSGVDSQRLSVATKFKGQLFQLMQRLGNTTPHFIRCIKPNKLQLPSTYEQSLILQQLKCCGVLEVVRISRSGYPTRMSHQKFARRYGFLLLENVASQDPLSVSVAILHQFNILPEMYQVGYTKLFFR\nAt Epoch: 57000.00\nIDDLTSLSHLNEPAVLHNLQVRYGMHNIYTYSGIVLVALNPFARVGVYSQDTLEAYAGRMRGELEPHLFAISEDAFQGMVRDRKNQTIIVSGESGAGKTVSAKYIMRYFASAHEAQRDVEHQEQTAMSGVEEQILATNPVLEAFGNAKTTRNDNSSRFGKFLEIRFSERHAIEAAFIRTYLLERSRLVYQPPTERNYHVFYQLLASDRALDEAQREALGLQGATWETFHYTRQGGSGEIVNVDDAREFEKTSAALGVVGVDATTQQQVFALLAALLHMGNIEITGSNSAAVADDDAAFAQATGLLQVDAAQFRKWLTRRQIVTRSEKIVSNMTRAQALVVRDSVAKYVYAHVFEWIVRTINGVLTGGGAGPAASFIGVLDIYGFEHFEHNSFEQFCINYANEKLQQNFNRHVFKLEQEEYQREQLANWTFVDFQDNQPCIDLIEGRLGVLALLDEECRLQQGSDAKFAEKLARQFAEQPVRQLPADSPAAFFRKPRFGADSFTIRHYAHDVAYEAAGFLEKNRDSVPDEIQNVLRASSAPLLAEVLADTSAAAADSGTATAVTASQTPARLSVRAPRRPTLGAVFKHSLAGLMETIEATESHYIRCIKPNDAKHAWVFDAPMVLSQLRACGVLETIRISCAGYPSRLPIPDFIHRYRVLLSDPGAPLRAASLDAFREFATQTLAEAFGARDCWQVGLTKVFFR\nAt Epoch: 58000.00\nERLNDTSELISYVDNQECLNLIASRSGGVFSTIDAISRLPGPSDRKLNERLHTLFKRHPCFPTPHPKEAHEMFCIVHYAGMVKYHIESFIDKNNNIISAQFEELMAISKSSVLQAQPLLSSASANSSPPTSQKGGSVTHMFSVQMRGLASELEGTRCNFIRCIKPNAEMEVGVFDRASVVDQLRCSGTVQACSVLRVGLPTRILYAEVVDTYLPLVGQETYEKFNCNERLFTQAICAALAFPSDAYRLGDTRLFFR\nAt Epoch: 59000.00\nINDLALSPSTSDDVLVSVLRERFLSDTVYTAIGSSALVVVNPYKYVSSNADNVLLDYAAEYRDTDAHDDRHVKPPHIFQLANNAYFHMRRTNMDQCILLSGESGSGKSETRRLAIKSILELSVTSPGKKGGKMATQIPSGEYILESFGNARTLQNPNASRFGKYTELQFSERGRLCGIKTLDYYLEKSRVAGAPGGERNFHVFYYLCSGASQEERQHLKLADKSTFRYLGQRPTGGREAVTEDSQRFDRLKMAMKSVGLSKRHIAQTFQLLAAILHLGNIDFTMDKSRNEDAAVVKNVDQLEIVADFLGVQPHALEAVMQYKTKLVHKELCTIFLDPEGAGGNRDDLAKTLYSLLFALLNETMNQNLCRDDFLTFIGLFDLPGFQNISSSASRTNSLDQFCVNFANERLQNWVQKRIFERNNKEYEAEEIASLIPTIPFFDNSECIRLMSHQPGGLIHIMDDQARRQPKKNDHTMVEAFSKRWGNHSSFRAGQMDRSGFPTFTVNHYVGPVTYSAESWLERDTDALNPDFVSLLRGATLNAD\n(59149, 128)\n../../out/201027/embedding/seq2seq/pfamA_target_evotune_balanced.npy\nloaded dict file for weights ../../data/201025/evotune_seq2seq_encoder_balanced.pt\noutput embedding for pfamA_random\nAt Epoch: 0.00\nNVVYVGNKEVMSYVLAVTTQFNEGSDEVVIKARGRAISTAVDTAEVVRNRFLEDVEVEDIKIST\nAt Epoch: 1000.00\nFEATYLVVSYKLDGIIRASGQVDDRGYIRGTKMKLMMDGNAIVDYMMVGTKFDGGENSVDNASGLFYSPYQEADEAGTFLVTSEPGSIQPVVGVITRYALSCFPDYADISQGAKPNG\n(1600, 128)\n../../out/201027/embedding/seq2seq/pfamA_random_evotune_balanced.npy\nloaded dict file for weights ../../data/201025/evotune_seq2seq_encoder_balanced.pt\noutput embedding for motor_toolkit\nAt Epoch: 0.00\nMASQPNSSAKKKEEKGKNIQVVVRCRPFNLAERKASAHSIVECDPVRKEVSVRTGGLADKSSRKTYTFDMVFGASTKQIDVYRSVVCPILDEVIMGYNCTIFAYGQTGTGKTFTMEGERSPNEEYTWEEDPLAGIIPRTLHQIFEKLTDNGTEFSVKVSLLEIYNEELFDLLNPSSDVSERLQMFDDPRNKRGVIIKGLEEITVHNKDEVYQILEKGAAKRTTAATLMNAYSSRSHSVFSVTIHMKETTIDGEELVKIGKLNLVDLAGSENIGRSGAVDKRAREAGNINQSLLTLGRVITALVERTPHVPYRESKLTRILQDSLGGRTRTSIIATISPASLNLEETLSTLEYAHRAKNILNKPEVNQKLTKKALIKEYTEEIERLKRDLAAAREKNGVYISEENFRVMSGKLTVQEEQIVELIEKIGAVEEELNRVTELFMDNKNELDQCKSDLQNKTQELETTQKHLQETKLQLVKEEYITSALESTEEKLHDAASKLLNTVEETTKDVSGLHSKLDRKKAVDQHNAEAQDIFGKNLNSLFNNMEELIKDGSSKQKAMLEVHKTLFGNLLSSSVSALDTITTVALGSLTSIPENVSTHVSQIFNMILKEQSLAAESKTVLQELINVLKTDLLSSLEMILSPTVVSILKINSQLKHIFKTSLTVADKIEDQKKELDGFLSILCNNLHELQENTICSLVESQKQCGNLTEDLKTIKQTHSQELCKLMNLWTERFCALEEKCENIQKPLSSVQENIQQKSKDIVNKMTFHSQKFCADSDGFSQELRNFNQEGTKLVEESVKHSDKLNGNLEKISQETEQRCESLNTRTVYFSEQWVSSLNEREQELHNLLEVVSQCCEASSSDITEKSDGRKAAHEKQHNIFLDQMTIDEDKLIAQNLELNETIKIGLTKLNCFLEQDLKLDIPTGTTPQRKSYLYPSTLVRTEPREHLLDQLKRKQPELLMMLNCSENNKEETIPDVDVEEAVLGQYTEEPLSQEPSVDAGVDCSSIGGVPFFQHKKSHGKDKENRGINTLERSKVEETTEHLVTKSRLPLRAQINL\nAt Epoch: 1000.00\nMEDQEMHLKVRRVADKFTESMYFLANEPSVALYRLQEHVRRSLPELVQHKTDMQSWEEQSQGAIYTVEYACSAVKSMTNSSIYFKNIDSLLRQTISMKEQISNSQGRSPHVSAPSASS\nAt Epoch: 2000.00\nMVIGTPVTTPLSKIVRTPSRVPGSRRTTPSKIREEKILVTIRVRPLSPKEQAAYDLIAWDFPDEQTIVSKNLNHERHTGPYSFDYVFDPTCSTSKVYEQGARDVALSALNGINATIFAYGQTSSGKTFTMRGITESAVNDIYGRIKLTTERDFVLKFSALEIYNETVVDLLNRESVSLRLLDDPEKGVIVEKQVEEIVKDEEHLKTLIGTVEAHRQVGETALNDKSSRSHQIIRLTIESSIRENSGCVKSFLATLNLVDLAGSERASQTSADGTRLKEGSHINRSLLTVTNVIRKLSCSGGKRSGHIPYRDSKLTRILQASLGGNSRTAIICTLSPALSHLEQSRNTLCFATSAKEVTTTAQVNMVVAEKQLLKHLQKEVSRLEAELRSPDPAASPCLRSLLIEKERKIQKMEEEMNELKRQRDLAQSQLELERRSKKELKGSDHHGPSRQVVKCLSFTPEDEEVSGASLSTNLGRKSLLERQAAIRRSTNSTNPSMLVHEIRKLEMRQRQLGDEANHALQLLHKEFASHRIGSQGATETIAKLFSEIKELQKISCIPEQIEIKDKASLKEEIARLRSQESNIASLEQKLENVQRSIDELVMHLPSCHESADSRTAPSKKKRVLPFNLSNTSNIPNIIRSPCSPMSPSSCNIVEGEIENRAPPECNNVGSAGDSFCSQLSTPVKSKDDNCTPGSRQSNSVNMKKMQTMFKKAAEDNIRSIKAYVTELKERVAKLQYQKQLLVCQVLELEANEAASDEADISDQSPLSWHLVFEDQRQQIIMLWHLCHVSLVHRTQFYMLFKGDPSDQIYLEVELRRLTWLDEHLAGLGNASPALLGDDAAGYVSSSIKALKQEREYLAKRVSSKLNAEEREMLYVKWDIPPDGKQRRRLQLVNKLWSDPLNMQNVRDSAEVVAKLVGFCETGEHVSKEMFQLNFVSPSDKKTWIGWNLISNLLHL\nAt Epoch: 3000.00\nMADEEDPWGFDDGGEEEKAASTQAGTPAPPSKAPSVASDHKADSVVAGTPANEEAAPEEVEEIKAPPPPPEDDGYRKPVQLYRHWVRPKFLQYKYMYNYRTNYYDDVIDYIDKKQTGVAREIPRPQTWAERVLRTRNISGSDIDSYAPAKRDKQLIQTLAASIRTYNYHTKAYINQRYASVL\n(3255, 128)\n../../out/201027/embedding/seq2seq/motor_toolkit_evotune_balanced.npy\nloaded dict file for weights ../../data/201025/evotune_seq2seq_encoder_balanced_target.pt\noutput embedding for pfamA_motors_balanced\nAt Epoch: 0.00\nHQDNVHARSLMGLVRNVFEQAGLEKTALDAVAVSSGPGSYTGLRIGVSVAKGLAYALDKPVIGVGTLEALAFRAIPFSDSTDTIIPMLDARRMEVYALVMDGLGDTLISPQPFILEDNPFMEYLEKGKVFFLGDGVPKSKEILSHPNSRFVPLFNSSQSIGELAYKKFLKADFESLAYFEPNYIKEFRI\nAt Epoch: 1000.00\nLAAEARGDRAEAARILGAGAANLVGLLDIDRVVLGGRTVAADEDAYVRGVRAVIADRAARGAGGAHVTVTVADGGDRPVAEGAAQLVLA\nAt Epoch: 2000.00\nARKIGIDLGTTNLLICVDNKGILVDEPSIITVDATTKKCIAAGLDARDMLGRTPKNMICIRPLKDGVVADFEATDMMLNYFLKKCDLKGMFKKNVILICHPTKITSVEKNAIRDCAYRAGAKKVYLEEEPKIAALGAGLDIGKASGNMVLDIGGGTSDIAVLSLGDIVCSTSIKTAGNKITQDILENVRIQKKMYIGEQTADEIKRRIANALVVKEPETITISGRDVETGLPHSIDINSNEVESYIRSSLQEIVHATKTILEVTPPELAADIVQHGLVLTGGGALLKNLDQLMRNELQIPVYVAENALKCVVDGCTIMLQNL\nAt Epoch: 3000.00\nNSLPSGDQHKAQQLTADYLGALKRHLIDSLKNQLGEHHAKATPLQFILTVPAVWSDAAKEKTLQAAETAGLGQHAPILMISEPEAAATYVLFRKELGGLSTGDTFVVCDAGGGTVDLISYTIEQLEPALQVKEAAPGSGGLCGSTYLNRRFQEFLVTKLGQEEGFDNETVGDAMKKFDEEIKREYSPNVPNPNYWVPVPGLATNPRLGIRRNKMTLPPDDVREILKPVIDEVVQLVRKQIQSTEREVKAVLLVGGFGGSQYLLERLKETVTKATVILQ\nAt Epoch: 4000.00\nHIAVDIGGSLAKLVYFSRDPTSKELGGRLNFLKFETARIDECIDFLRKLKLKYEIINGSRPSDLCVMATGGGAFKYYDEIKGALEVEVVREDEMECLIIGLDFFITEIPHEVFTYSQEEPMRFIAARPNIYPYLLVNIGSGVSMVKVSGPRQYERVGGTSLGGGTLWGLLSLLTGARTFEDMLSLAERGDNTAVDMLVGDIYGSGYGKIGLKSTTIASSFGKVYKMKRQAEQEAEDTGNLKEDSSQEHGRSFKSEDISKSLLYAVSNNIGQIAYLHAEKHNLEHIYFGGSFIGGHPQTMHTLSYAIKFWSKGEKQAYFLRHEGYLGSVGAFLK\nAt Epoch: 5000.00\nQPLGSFLFLGPTGVGKTELAKALAYELFDDEKHMVRIDMSEFMEQHSVARLIGAPPGYVGYDEGGQLSEAVRRKPYSVVLFDEVEKAHPQVWNVLLQVLDDGRLTDGKGKTVDFSNVVIIMTSNLGSQYLLAEAQLETISQHVKDSVMGEVRKHFRPEFLNRLDDM\nAt Epoch: 6000.00\nVNGVSFSVEAGETLAIVGESGCGKSVTSLSIMGLIASPGTITGGEITFQGRDLVKLSRKELRKLRGNEMSMIFQEPMTSLNPVFTIGNQLAEVFRVHQGTSKAEAKQKSIDMLQRVGIANASKLVRQFPHQLSGGMRQRVMIAMALACEPKLLIADEPTT\nAt Epoch: 7000.00\nYQHEGLDWLAKLYANQTNGILADEMGLGKTIQTIALLAHLAEEHHIWGPHLIVVPTSVILNWEMEFKKFLPGFKVLSYYGSVEERAQKRKGWSNPDIWNVVITSYQLILKDLPAIRVPEWHYMILDEAHNIKNFNSQRYQAMIRLKTHARLLLTGTPLQNSIIELWSLLTFLTAGQDGQGMGDLEEFTEWFRRPVDEIFVDGKSKLGNEAQDIVNKLHHSLRPYLLRRLKSSVEKQLPGKYEHTVICRLSKRQRQLYDAFMGLSDTKAKLTSGNMISVSQALMSLRKVCNHPDLF\nAt Epoch: 8000.00\nKQKNFRQFCFPKKYEFQIPQKFLAEFINPKTPYTGILVYHRIGAGKTCTAINIAENFKNKKRIMVVLPASLKGNFRSELRSLCADNNYLSANDRQKLKELEPSSQEYREIIQKSDKLIEKYYTIYSYNKFVDLIKNNLLNLTNTLLIIDEVHNMISETGTYYESLYKIIHSSPDDLRLVIMTATPIF\nAt Epoch: 9000.00\nFVIGIGKNGVDCVLRCMHLTEKRFGKDPKKVRFLCIGEETPLGERSYEGSAPGDGFTLPIDPEEAIYKYLNNPAKLPESAQIWFDSGLKNYSPAAPTYGLTKRQCGRIALFHYLKQIMKLTGEAMADFSGSDRSLEIVITGNLGDVFCGGMFIDLPYILAKLFSDAPYPVKFTGYFFAADTASLVETDQRDVGCYQANTIVAKAELDKFQLHRKRFTQKYSRTFEVDSDKPPYSACFLIPAADSYGLTMSRTAEKILNRMEIIFSKDDDAERIISYNMLRPEAAHDFRYLAFNVMACEIPTGKIMSYLAIKLFERLNR\nAt Epoch: 10000.00\nIIKVIGVGGGGSNAVTHMYKQGIVGVDFAICNTDAQAMEMSPVPTRIHLGPDLTEGRGAGSKPNIGKLACEESIDEVRKYLENNCRMLFITAGMGGGTGTGAAPIIAKAAKEMDVLTVGIVTLPFTFEGRRRTNQGMEGLLELKKHVDTLIVISNDKLRQIHG\nAt Epoch: 11000.00\nCQAGQTGNVFWELYCLEHGIQPNGPMPSDKTIGGDDSFNTFFSVMAVDKHVPVFVDPAPMIIDGVCTGTYCQHSHPEQSNTGKEDAADNDQRHYTLDKRIINLILKPVHKSSGFLVFHSFGGGIGSRFTSLLIEWKSKLEFSINVPPPRFPQLYLSPKNIFTTQTILEHPDFAFMLNYEAS\nAt Epoch: 12000.00\nLVIGMGSTGTEILEALADRIDWEVGGLQRAPWLEFLAVETDVAKPNRFNGTDDFKTLGIPATAWRDILHRPEIHEASIALNTWADAETLAQLPAQSIDSGAGHIRMVGRLALLYPPNYSEIKNAITQRVARLRNLTDAQAKAALNVNNAGLEMDVQFAVNASTGQTGVRVIVVGTLCGGTCSGTASDIGILLRTVLEDEEKTLAMFTLPHPNLSISQKSDAEIWKTNAYHALAELNQYHLHTDTERYKTIKFPDKPEGSPVLPHDAMPYDLVYLLRPNSTENVDLMRLTQAIADRMFLNVFVPETDPMAYMVNAGPVTVQQGRAFAFSTFGLSTIEYPMRRILEALKYRTLVHAVD\nAt Epoch: 13000.00\nEVISIHVGQCGVQVGNAVWELYCAEHAVKTDGSLYEHPHDQEWVETFFNLSEKGRYVPRCLFIDLEPSVIDEIRVGPWRSLFHPDKLITGYEDAANNFARGYFTVGKVLLSPILNEVRRTIEQCDGLQGLLFFRSLGGGTGAGLTAAILDVLGDYRKYTKVEIPIYPAPSLSPAVVEPYNCIFGEHFAMEDFNMGLLMDNEALYDVCS\nAt Epoch: 14000.00\nGVAIMSTGYGEGENRVKHAIDEALHSPLLNNDDIFNSKKVLLSITFCAKDQDQLTMEEMNEINDFMTKFGEDVETKWGVATDDTLEKKVKITVLATGFG\nAt Epoch: 15000.00\nPRIHFPLATYSPLFSADKAHHEQNSVMEMTFACFENGNQMVKCDPKEGKYMACCLLYRGDVAPKETSGAVAAIKTKRTIQFVDWCPTGFKLGVCNEPAACVPGGDLAKVTRSLCMLSNTTSIASAWNRLDH\nAt Epoch: 16000.00\nGMAMMGSGFAQGIDRARLATEQAISSPFLDDVTLDGARGILVNITTAPGCLKMSEYREIMKAVNANAHPDAECKVGTAEDDSMSEDAIRVTIIATGLK\nAt Epoch: 17000.00\nGVAHMGIGVGKGENAAQDAVRAAIESPLLETSIEGAENVLLNITGGSEFSLVDMGEVSSIVRDLVSEEANIIVGTAMDDNLKDEIKVTLIATGLD\n(18000, 128)\n../../out/201027/embedding/seq2seq/pfamA_motors_balanced_evotune_balanced_target.npy\nloaded dict file for weights ../../data/201025/evotune_seq2seq_encoder_balanced_target.pt\noutput embedding for pfamA_target\nAt Epoch: 0.00\nPDSAPIVIDNGASTFRIGWAGEAEPRVSFRNIVQRPRHRSSGETVTVVGDTDPALMKYFDCTRTSIRSAFDDDVVYQFEYMEYILDYAFDRLGATSEVGHPILMTECECNPSFSRARMSELLFETYGVPSVAFGIDDVFSYKYNQKLGNCGEDGLAISCEFGTCHVVPFLKGQPVLGACCRTNVGGSHITDFLRQLLSLKYPYHVANFSWEKAEELKKEHCYIAADYMSELQIFKNNKEEAEEKTRYWQLPWVPPPRDEPPSEEELARKAAYKEKAGQRLRDMAAAKKSQKIADLEEKLSGLEDLMDHLDGADEQEATSILGRSGYLSQQEIKSDILKATQSLRKAKGESNGNEENADASGADKYPLVSVPDDMLTPEQLKEKKKQILLKTTTEGKLRAKQKRAEE\nAt Epoch: 1000.00\nFRPVIIDNGSGRIKAGFASDERPRFICPNVVGEVKHKKFFSFSESQQCYVGNDALAHRAILKLSYPIKHGVISDWNGMEKVWSSVITGLGVSLKHHPVLLTEAPLNPKAKREEVCERFFEGFDCPAFYIGIQAVMSLYSTGKITGVVVESGQGVSCSVPIYQGYAIWHAIKRLNLAGHELTEYLSKLLRERGYCFKSSAEYEIVRDMKEKHCFVALDYEEALNKAAMSDELHVSYEMPDGQIVLIGSERFRCLEALFRPSLLGLEDVGIHWMVYNSIMKSDLDIRKDLYANIVLSGGTTMHEGFQERLQAEVVALAPRTVKVRVIAKPEVWTFGSVL\nAt Epoch: 2000.00\nMKEKHCYVALDFEQESNHNIKHSYELPDGQIIEIGAEIFRAPEVLFQPMMIGLEQSGIHEMAFNSIFKSDLEIRRDLYGNVVLSGGTSMLPGIADRLQKELMHLIPPNMMAMVVAPSERKNSTWTGGSMLASLSTFQERWIPKEAYDETGPGIVHRYCF\nAt Epoch: 3000.00\nLGIQRRSVLEHGLVADWDVMEEYWCHLFNRRVCVEPQDVGVLLTEPAVTPYEQRERTAEILFESFGVPKLFIGSQALFLLHSVGDRCDTAVVVESGAGVTQVVPIVAGYAVAAAARRFPVAGLDVTQYVLNNLREHEQGIEMEQALEVAEQVKVRYGCMAKDFARECAEAESKLPSYAIRNTELHTRAGAPYSIDVDYEQLLVPETLFQPDSLAAPSTVTASLFGGLPAVIDAVVWSCPMDCRRSLYANVIVSGGNTRLPYFAKRLHGALRHALDERATGVIAASGGPLGRQVEYEVNVRDYSQAMHAVWRGASAFAASPEYETSAVTRAAYMECGAAVMHQHH\nAt Epoch: 4000.00\nARLAPLVIDNGTGYSSQETQILRSSFRTAIATRGTSGGGSASGPSITGRPSIPSKPGALSASSNIATKRGIDDLDFFIGDEAIANSKTYNVSYPIRHGQIEDWDLMERYWQQTIFKYLRAEPEDHHVLLTEPPMNAPENREQTAEIMFEGLNIQGLYIAVQAVLALAASWSSNKVTDRTLTGTVIDSGDGVTHVIPVAEGYVIGSSIKHIPIAGRDITYFVQQLLRDRNESLNIPVDESLRIAEKIKEDYGYVCGDMVKEFRKYDSEPEKYIIKHEGFDTRTNKAYTIDVGYERFLAAEVFFNPEIYSSDFLTPLPEVVDNVIQTSPIDVRRGLYKNIVLSGGSTMYDHFGRRLQRDLKTIVDDRLYASEVASGGLIKSSGMDVNVITHKRQRYAVWFGGSLMASTPEFYSHCHSKADYMEYGPSICRRYQ\nAt Epoch: 5000.00\nSQDRKVVVCDNGTGFVKCGCAGPNFPEHIFPALVRRTVIRSTTKDLMVGDEASELRLMLEVNYPVGNGIVRNWNDMKHLWDYTFGAEKLIPKFVYVAIQTVL\nAt Epoch: 6000.00\nMMKLVQNKAAYVALNIQQELELAKKIPSPVNEEYELPCGHFMNFRSQKFRNPEALFQPSSARFVKDRENVGVRKMIFNSIMKCDIGIRNYLFKNIMLTVGSTLFPGFVEGITKEILELGSSTLAFKFSDLIAREINHKFANKMFRNVAPPNRMYNAGVGGPALALLNTFEQASPFKTQFY\nAt Epoch: 7000.00\nMTEQHNIHLNTSYQLPDGHVIRIGSERFRCPEALFQPLLLRCLWSHVEMFVVSIMKCDLDMRRKLYENIILSGGSTMFPGMGQRMTKELRVLVVWSRPVPLYSSWL\nAt Epoch: 8000.00\nVGLDIGTTKICAIVGRKNEFGKLEVLGMGKAESEGVVKGIVFNIDKTVYAIEKAIKDAGDQAGIDIGVVNVGIAGQHIRSFIQHGGITRTSKEDEITIADVERLTQDMYRMVVPPGSQIIHVMPQDYMVDYEEGIKEPVGMSGVRLEADFHIITAQTNAINNINKCVRRTGLEIDDLILEPLASSLAV\nAt Epoch: 9000.00\nALIDVGAGTSDICVTRDGSIIAYGMIPMAGDELTEVLVHEFLVDFATAEQIKRASTEGGNITYEDIMGISHTIKSEDVYKLTDPVMKKISGEVASKIKELNGDKSVSAAFVVGGGGKIHGFTEALSKDLDIVSERVALRGEEVMKNIVFEQNDIQKDSLLVTPIG\nAt Epoch: 10000.00\nTVIDLGYNSSRTIIFKDGIPKLFYSFPYGIKYILKDISNVLKVSEKEAHRLLTEEGACLRDTRTIKKVEFQPITGTGYSYTSLGLLNKIIYARVREIISRLNGELSRISYEKTYEIGALQGGIVLTGGGSKIRNIDQTIRELMGENYRKSSLVSLDYFRDVPEEIKKDSTYLSVFG\nAt Epoch: 11000.00\nACIDMGGGVTGVSLFLKKHMLFADAVRMGGDLVTRDISQGLRVPLPVAEWLKTRHGGLEATGRDDREMIDVTGEPGEDWDGERRFVSRADLIGIMRPRVEEILDGVREILEAAGFDQMPSRQVVLTGGASQIPGLDTLAMRILGYNVRIGRPLRIQGLAQQHTASCHAATVGLA\nAt Epoch: 12000.00\nEAKRVAAQFAFSSDDVRRATKEFINQMEEGLQKDHTDLSQIPTYVTAVPNGTEKGLYMAVDLGGTNFRVCSIMLHGNSTFTLTQTKVAIPRELMVAKTSKELFSFLAKQIELFLKAHHNEHYQGHIRRRKTTSLEEGYRDEEIFNLGFTFSFPVHQIGINKGVLMRWTKGFDIPDAVGKDVCALLQAEIDELHLPVRVAALVNDTVGTLMARSY\nAt Epoch: 13000.00\nKVENMLSGIHLSEEVVSRVKSVFLSEIELGINEEPSSLQMENTYVPELPDGTEEGLFLALDVGGTNFRVLLLELMEGRLVREEVKHYHITDELRLGPGIDLFDFLATCIADFVKEFNIADQTLPLGFTFSFPMHQRSMDCGCLVTWTKSFKCAGVQGEDVVEMLREAIRRRGDIKVDVVAVLNDTTGTLMQGAL\nAt Epoch: 14000.00\nAGLLKKFEAPLADVPGIARAFEVIYHSLALTASNQFLPTPIRALPTGEEKGRFLALDLGGTNLRVAVVRLYGGDGLKVCTQRSWSIPEHFKSGAAEVLFRWVADRIGDVVGEYLGDVGSEERERILSEGMELGITFSFPMEQTTHDSALLMPMGKGFTFTTTNDLSSLLKMAYDDLLSTTTPAHPLPKLDIVSITNDSISTLLSAAY\nAt Epoch: 15000.00\nVMGLLLGAGCNATVPMLIDDLHESKVRHIRLADPKAVETLVTTEWTLRAASEPLSNLNLITSWDSQLNASGDRPGFQPLEYMIAGRYLGELVRIIVHDYFHRILAISKEDLPDKLMKPYALTTEFLSLVVAPSQSGEELLADLERELPSPPLSGWKWTPSLADIVRATTTKIQRRAASLIAAASVGLLACTREIKLADLKEGKSVAETPVVCPSAVPTADPIALPHRSPGSNSPPKVKGQNNPEELVIAFSGGLIQHWPGFRESIQWHIDRLVLRGGPQELGKSIFLREVSDGGLVGVGVLAGT\nAt Epoch: 16000.00\nEIGLIVGTGTNACYMEELKNVELLDGDEGQMCVNMEWGAFGDNNCLEDITTSFDHDVDTFSINPGKQRYEKMISGMYLGEIVRQILIVLTRRGILFGGKISERLLTRDLFPTRFLSLIESDTLGLVQVRSILTELGLRSTCDDTMLVKEVCTTVSRRAAQLCAAGVAAVVEKMRANRGLDQLKVTVGVDGTLYKLHPHFAGVVQETVKILAPKCDVTFLQSDDGSGRGAALITAV\nAt Epoch: 17000.00\nRLGVIVGTGTNACYMEKLENCELWDGDDQEPRQVIVNTEWGAFGDNGVIDFVRSHYDWEVDEESLNPSHQKFEKMISGMYMGELARRVILRLAREHLIFNGRLSQKMKTAYAFKTKYISEIESDPKGCFDETRKVLAKLDQVGSDDDCQCLKLVVSRVSSRAAHLVSAAIATVLNKMKRPHTTVGVDGSVYRYHPKFHQLMEAKIAELTNPDYKFDLMLSEDGSGRGAALVAAV\nAt Epoch: 18000.00\nQVDIGIDLGTANTLVYLRGHGIVMDEPSVVAVTRGSHTVLNDGAAVGLEAKKMLGKTSYSVDVIRPLREGVIANFPITEAMLRYFISRVKARRMFSQTRVVIAIPFGITHAEMKAVYNSTMRAGADKVHLIEETLAAGLGSGLRIDDPTANLVVDIGGGTTGISVISVADIAFGATVRCAGDHMTDAVSDFIRERYKLQIGQQTAEQLKIELGSALPQNEHAAMQIRGQGENGRPATIEVSADDVREALRAPLHKILRGIDWVLENTPPELSADLVDRGILVTGGGALLPRIDDLISDHTGLNVTVADDPLTCVARGAGAYLDTINWQRS\nAt Epoch: 19000.00\nTKDMGIDLGTANTLVYSKGKGIVLREPSVVAINNLTKKPLAVGTEAKQMIGRTPGNIVAIRPLKDGVIADFDITQTMLKKFIEKITNKSAFTSPRIIVCFPSGVTEVERRAIEEATKQAGAREVVLMEEPMAAAIGAGLPVDEPTGSMIVDIGGGTTEVAIISLGGIVTSKSLRIAGDELDQAIIGYIKREYNLMIGERTSEQIKMEIGSAFKADEFEEEASMEIKGRDLISGLPKTVVVTESQIREALKEPVAAIIEAIKTTLEKTPPELAADIMDKGIMLAGGGALLKGLDALINHETHMPVHIAESPLDCVALGAGKALDKFDLIRQ\nAt Epoch: 20000.00\nKSDIGIDLGTASVLVYIKGKGVVIQEPSVVAIDRDTNKLLAVGEDARRMLGRTPGNIIAIRPLKDGVISDYEVTQRMLKYFIEKAIGKNNLFLRPRIVVCVPSGITEVEKRAVIQASNQAGARKTYLIEEPIAAAIGADLDITEPRGKMIIDIGGGTTDVAVISLGGIVVNSSIKVGGNTFDTYITRYIRKKHNLMIGERSAEELKVVIGTAYKREKEVSMDIRGRYLLTGLPEIVQVTSSELLEALSEPLEAIVDAVKSVLEKTPPELASDIGEKGIMMTGGSSLLHGIDKLIKERTGIKVNIAEDPVSCVATGTGRSLESIDVLEN\nAt Epoch: 21000.00\nGTDIGIDLGTASVLVYIKGKGVVLKEPSVVATDNTKRKVLAVGEEARQMIGRTPGNIIATRPLRDGVISDYDVTERMLRHFIKKARGNSVSLLRPRVIICIPCEATEVEKRAVKDAALSAGAGKVYLIEEPVAAAIGAGLDISKASGSMIVDIGGGTTDVAVLSLGGMVVRSSIKIAGDKFDEAIIRYIRKKHNIMIGERTAEELKINIGTAYPRSEEVTMDIRGRDLVTGLPKNITVSSEEMREALEETTSAIVDCVHSVLEHTPPELSADIINKGIIMTGGGSLLYGLDLLIQSRTHVTTTVAKDSICCVAYGTGEALENLDKFAE\nAt Epoch: 22000.00\nKIKVIGVGGGGGNAVNRMVAMEVKNVEFIAINTDEHVLRLSKASQKIQIGEKLTKGKGAGSMPAIGQSAAEESKDEISGVLKDTDMVFVTAGMGGGTGTGAAPVVAKIAKDMGILTVGVVTKPFAFEGKRRMTQAEQGIAELSACVDSLIIVPNERLKYVSD\nAt Epoch: 23000.00\nEIISISVGQCGNQIGQQFWRTISQEHGLSMDGHSTNTASPLEKENLGVYFSESSDRYVPRAVLVDLESGVLDSVKSSSQGQLFRPDNFINAASGAGNNWAKGFYTDGTELIDEIIDTIRKESESCDSLQGFQLTHSLGGGTGSGLGTLLVSKIKEEFPDRMLATFSVFPSAKVSDTVVEPYNATLSIHQLIENADQVFTIDNEALFDICT\nAt Epoch: 24000.00\nMIAEDHGIGPDGIYSGSSELQRGRMEVFFRETEENKHFPRAVVVDLESDSLNAVLQSTHRALFQGDNFVSGRGGTGNIWAKGFYGEGRRYIDEVLEVIRKEADICEGLQGFNVAHSLCGGTGSGFGALIIEKIHEQYPNRLISTFSTVSSNRLLGVMKQPYNTILSLQHLAENANITYCIDSDGLHDISR\nAt Epoch: 25000.00\nLIKVVGVGGGGGNAVNRMIQSGLRGVEFIAINTDAQALLMSDADVRLDIGRQLTRGLGAGSDPEVGRQAAEEHREEIEEALKGADMVFITAGEGGGTGTGGAPVVAEIARGLGALTIGVVTRPFGFEGRRRAQQAEDGISRLREYVDTLIVIPNDRLLTIAN\nAt Epoch: 26000.00\nSIVTVQLGQCGNQIGFEVFDALFRDSRCSQGLCSKSENEAYQASCSERFFREEENGVPVARAVLVDMEPKAINQTLSKAAPSGGWKYGQHACFCQKQGSGNNWAYGYSVHGPKHEESIMNLIQKEVEKCDSLSGFFIIMSMAGGTGSGLGAFVTQKLQDQYSNALLIHENDAVHKIC\nAt Epoch: 27000.00\nLRACFWEIISDEHGIDPSGVYRGTADIQLERISVYYNEATGGRYVPRSVLVDLEPGTMDAANNSLKGGSSPSHGRAATPSLSPESLKARVKSERRRKQLGQRHYTEGAELVDSVLDGIRKECESCDCLQVSAAP\nAt Epoch: 28000.00\nLIIGLGGTGGRIIRALRKIIYQEFRTIHPPDVNIAYLYIDSDDEMMALDDPRWKILGHSVQLGIDSQLLIQGADLEERLNNIHNYPGIKEWIGNRGDWKDILRSFAGGRVYGGQKRRLGRFLLACNIDAFINQLTLQVNHLQRISNQAEVTFHICCGLAGGTGSGSVIDTIVQLRKHYPYSNQGLTYPLLVYAYLPEKNPNPKWDTGNYQANGYAALMELNALSAGRFDPTDLMGGKPVACGVAFNGLYLFTNQNEKNVIVGVENEIPQIVADFLYQKIIAVSKVAWTSLAFLEDAQNGDSTPETAAIPNSRLPERAKRLLTFGINRIAIPEEEIKEYLTYHFARQAAL\nAt Epoch: 29000.00\nNSEISGHELAADLVTNALANPLYTDDRNHAERCISFLHAGTDLTLGEVETVREEITSQIDSGVGLELFTADTTKMMGNKRRLTL\nAt Epoch: 30000.00\nGTALMGIGTGSGKTSAEDAAVAAISSPLLDAPVDEATGVVFNIIGGESLSLQEVDRAAKVIYNNVHEDANVIFGALVDDEITDGTVSITVLATGFY\nAt Epoch: 31000.00\nGTALMGIGSASGENRTAEATKKAISSPLLEVSIDGAEQILLNVTGGPDLSLFEAQDASEIIASASSDDVNIIFGTSINESLGDEVVVTVIATGID\nAt Epoch: 32000.00\nPRIHFPLATLAPIISAAKAQHEQNSVAEMTFSCFETGNQMVKCEPREGKYMACCLLFRGDVIPKDANGAVATIKTKRTIQFVDWCPTGFKLGICNEPPAAVPGADLAPVSRSLCMLSNTTAISSAWSRLNK\nAt Epoch: 33000.00\nPRIHFPLVSFAPVLSKSKSSHESSNVQEITNACFEPSNQLVKCDPKAGKYMATCLLYRGDVVNRDVQNAVSMLKNKKTIQLVDWCPTGFKIGLCYKPPHYVPDGDLAPATRSVCALSNTTAIAEAWQRIDE\nAt Epoch: 34000.00\nTASSPIVFILSPGSDPASDLMKLAESSGFGGSKFKFLAMGQGQDKVAASRGQWLMLQNCHLLVKWLKELEKALERITKPNPNFRLWITTNPIEDFPIGILQNSLKVV\nAt Epoch: 35000.00\nEPRTPMVGLLSMGSDPTTSIELLAKKHKKECKAISMGQGQEIHARRLMSNSLQNGGWVLLQNCHLSLDYLMEVMDQLVEAETVHEDFSLWVTCEVHPKFPISFLQQSIKFT\nAt Epoch: 36000.00\nRVRPPLDCERDKMLCNLSYLDEATMEIASFEPTAKGKSIAHTFTFDQVFDHSSEQESIFEMVSPLIQSALDGYNICIFAYGQTGSGKTYTMDGIPSNPGVIPRTVDLLFDSIKNYRHLGWEYEIKVTFLEIYNEVLYDLLSNEQKDMEIRMVKNSKNDIYVSNITHETVGSAGRLRELMQIAKMNRATAATVGNERSSRSHAVTKIELIGTHAKKQELSIGSINLVDLAGSESPKTSTRMNETKNINRSLSELTNVILALLQKQDHIPYRNSKLTHLLMPSLGGNSKTLMFINIAPLQDCFVESLKSLRFAATVNQC\nAt Epoch: 37000.00\nRCRPFNGRETARNAQCIVKMKGDQTILSPPSEVKGKAAKAASEGVKTFAFDKSYWSFDRNAPNYAGQDNLHEDLGKPLLDNAFQGYNNCIFAYGQTGSGKSYSMMGYGADPGIIPKICQDMFERIKVVQQDKNVGCTVEVSYLEIYNERVRDLLNPSNKGNLRVREHPSTGPYVEDLAKLVVQSFQEIENLMDEGNKARTVAATNMNETSSRSHAVFTLTLTQKRHDTDAGMTGERVAKISLVDLAGSERAQSTGATGARLKEGAEINRSLSTLGRVIAALADMSQGKKKTQVPYRDSVLTWLLKDSLGGNSMTAMIAAISPADINFEETLSTLRYADSAKRI\nAt Epoch: 38000.00\nRIRPLSTMERDSQGYGRCLRQESAKTLVWLGHPETRFTFDHIACEKISQENLFKVAGQPMVENCLSGYNSCMFAYGQTGSGKTYTMMGGIYELEGKLNEDCGLTLRIFEHLFTRIGMEEKSKRDVKLKYSCKCSFLEIYNEQITDLLEPSSTNLQLREDSKKGVYVENLTEHSVSTINDVVKLLLQGAANRKMAATYMNSESSRSHSVFTCIIESHWEKDSRTHLRFARLNLVDLAGSERQKSSGAEGDRLKEAANINKSLSTLGLVIMSLVDLAHGKHRHIPYRDSRLTFLLQDSLGGNSKTTVIANVSPSFCSANETLSTLKFAQRAKQI\nAt Epoch: 39000.00\nRVRPQNEHELQGNCRTLIKVVDDKMLIFDPKTEENPFFYHGVAQKGRDLLKKQNKELQFIFDKIFNMQSDNTDVFEGSTKELICNLLDGYNCSVFAYGATGAGKTHTMLGNNEDPGITYRTVAELFSEIEKQGEHREFNLGVTYLEIYNENVQDLLHRSGPLHLRDDGRCGVIVAGLKIIAIHSAEELLTLLAKGNRNRTQHPTDANEESSRSHAVFQVYINITNKLDGQVRQVKLSMIDLAGSERASATGCKGPRFKEGANINKSLLALGNCINNLADGAKHITYRDSKLTRLLKDSLGGNCQTVMIANIAPSSFSYEDTYNTLRYADRAKKI\nAt Epoch: 40000.00\nRVRPFTVVESGNGESQECVTIEAPDTVVLKAPRSCQSNRQSEKSLPQTAQRFSFTQVFGPDASQRKVFEGSVRGLVRDVLEGGNCLVFTYGVTNAGKTFTFLGPDHDSGLLPRSLSVIFNSIEGRLYSRSDLKPQRCRDFSRLTPDQQAAESSSKKNLLRLLKEVTHIHTHTHTHTHTHTHT\nAt Epoch: 41000.00\nIIKIMQEEDKAVSSPEHPLQTNSLCIFGEACTNRDVYMKTTHPLIQHIFNGGNATCFAYGQTGAGKTYTMIGTHQNPGLYALAARDIFRQLEVSQPRRHLFVWISFYEIYCGQLYDLLNRRKRLFAREDSKHVVQIVGLQELQVDSVELLLEVILKGSKERSTGATGVNADSSRSHAIIQIQIKDSAKRTFGRISFIDLAGSERAADARDSDRQTKMEGAEINQSLLALKECIRALDQEHTHTPFRQSKLTQVLRDSFIGDAKTCMIANISPSHVATEHTLNTLRYADRVKEL\nAt Epoch: 42000.00\nRCRPFSDEELRSNAPQVVTCNDYSREVAVSQSIAGKHIDRVFTFDKVFGPSARQKDLYEQAVTPIVNEVLEGFNCTIFAYGQTGTGKTYTMEGECKRAKSGPNGELPPEAGVIPRAVKQIFDTLEGQNAEYSVKVTFLELYNEEITDLLAPEEISKVSLEEKQKKQLPLMEDGFDKRGVKSTDSCSEEMFDTMMNRARDGRSRPIVAEKRGSRR\nAt Epoch: 43000.00\nKTFVFSKNMNSKFLRRTKSIEKIQEIVKNEEKKNNTNQPSLNLELIQQNKPVIFVEPQNKCNQNIQNLKKYDQESKNYLRMRFKNRPERIKIGQTFIIYDETLKAKGKIIKDKQDSKPLNIHESKIDGIYVEGLSEYQCTHYYDAIQLMKRGEKNRKIRQTQMNNKSSRSHTILQFSIESTNNNNKNIMKRSKVNLCDLAGSEKINKNEIIQNDHFNELKNINQSLSTLGKIIYNLSCNQKLPMPFRESKLTRILQDSLTGNCKTIVIGNISPSLINIEETISTLKFVDRAKNI\nAt Epoch: 44000.00\nRFRPQNRREIESGGEPIVTFDSDDTCKLESQEATGSFTFDRVFDMASKQSDIFDFSIRPTVDDILNGYNGTVFAYGQTGAGKSYTMMGTDMEDEQGRGVIPRIVEQIFASIVASPSNIEYTVRVSYMEIYMERIRDLLVPQNDNLPIHEEKNRGVYVKGLLEIYVSSVQEVYEVMRKGGNSRAVAATNMNQESSRSHSIFVITITQKNVETGSAKSGQLFLVDLAGSEKVGKTGASGQTLEEAKKINKSLSALGMVINSLTDGKSSHIPYRDSKLTRILQESLGGNSRTTLIINASPSSYNDAETLSTMRFGMRAKAI\nAt Epoch: 45000.00\nRVRPPSKRETAEGSRIILNVDEKVARIKNIRLDHKPDGCEDTRERLIEFGFDSCYWSVDPEDPKYASQEMVFQDLGTLVLSEAISGYNVCLFAYGQTGSGKTYTMMGTPASIGLTPRICEGLFSYDEGSPETPNSFRVEVSFLEIYNERVRDLLHKSEEKKPYTLRVREHPERGPYVQGLSQHVVTSYEQVVALLEEGMENRITAATHIHDASSRSHAIFTIQYTQAMLEDNLPTEITSKINLVDLAGSERASPEYCKDRLTEGSNINRSLVTLGIVISTLAQNSQMTSSCQSINSIASDGDSGSPSGGSTNGSKRQPYVPYRDSILTWLLKDSLGGNSKTIMIATVSPASSSYNETMSTLRYASHAKNI\nAt Epoch: 46000.00\nRVRPTSGHSAWNSPQGSNSIQLDPAHARNPNLMSSNPSSLSTAPPTTYHFDSILTGIPNKPIYTTVARSHVHAAMEGFNAVIFAYGQTASGKTYTLSGDENEPGIIPRAMRDVFGFIKRTPDREYLLRCSYLEIYNETIYDLLAPPMGGSGSQVQIQGGTGMEVILTPLREEVVTSLKGVNEVLRRGERHRRTACTDWNERSSRSHSVFRLVIESRERGSGPGPLDDADMRAPSRSGRATPGNGRATPGPGNAGSRLQARGGKSVQTSILSLIDLAGSEKATSDKDRTREGKYINTSLLTLGSVIGTLSENAAKNKSDYVPYRNSKLTRMLQPSLAGNARISVICTINPDPSAVGETSSTLGFAKRVKGV\nAt Epoch: 47000.00\nVKVAVHVRPLIGDERLQGCKECVSVTPGKPQVQIGTHSFTFDHVYGSGGAPSTAMFEECIAPLVEGLFQGYNGTVLAYGQTGSGKTYTMGTGSKDGSQTGLIPQVMNALFSKIETLKNQTEFQLHVSFIEILKEEVRDLLDSVSLNKVENGNGHAGRVTVSGRQPIQIRETSNGAITLAGSTEIFVRTLQEMSTCLEQGSLSRATGSTNMNNQSSRSHAIFTITLEQMRKIHSVFPGNDTPDEDMGEEYFCAKLHLVDLAGSERAKRTGSDGVRLKEGIHINKGLLALGNVISALGDEKKRKEGVHVPYRDSKLTRLLQDSLGGNSKTVMIACISPADINAEETLNTLKYANRARNI\nAt Epoch: 48000.00\nRLRPLNEKEISRNDALDWECINDTTIIFKNHLPIPERSMYPSAYTFDRVFRSDSTTREVYEAGAKEVALSVVSGINSSIFAYGQTSSGKTFTMSGITEYTMADIYDHIERHKEREFLLKFSAMEIYNESVRDLLSSDTAPLRLLDDPERGTIVEKLTEETLRDWNHLIELLSLCEAQRQIGETALNETSSRSHQILRLTVESSAREFLGNDNSSVLTSTVNFVDLAGSERASQSLSAGTRLKEGCHINRSLLTLGTVIRKLSKGRSGHIPYRDSKLTRILQSSLGGNAKTAIICTMSPARSHVEQSRNTLLFASCAKEV\nAt Epoch: 49000.00\nPSPRPSISQTPIRTKLQLVDLAGSESVGMSGVSGAALWENSCINRSLSALSDVLGALAEQRPHVPYRNSKLTHLLQDSIGGDAKLLVMLCVSPTQRFLTESLQSLGFGSRARQI\nAt Epoch: 50000.00\nKEYTFDGVFDQESNQKEVYEDVGKPVLKDVLQGYNGSILAYGQTGAGKTHSLLNSGMGVDGKPDPKQAGLLPRLVAALFVHVGADVKHVYTVEASMLQIYNEQVDCLLGDDREKAQGLQVTGKSEVKGLVWHKCKTPNELLQCFQKGRMNLVYAETKMNKSSSRSHAVFQIKVSKRPRALDKTGTKGGKVEMKATFGKLTVVDLAGSERIKKSGVTGTQLKEATNINSSLLSFGNIVQALAEKKKFIPYRDSKLTRILEDSVGGNCKTSLLVCCSPSAESSDETVSTLEFASRAARI\nAt Epoch: 51000.00\nVEDMATLAQLHEGSIMHNLHIRYKKDNIYTYIGSILVSVNPYKSISGLYDITSMEQYSSYHLGERPPHIFAIANECYHCLWKRNDNQCVLISGESGAGKTESTKLILKYLSAMSQHSLDVTAKENVSYVEQAILESSPIMEVFGNAKTIHNNNSSRFGKFIKLNFCQKGNIQGGRIIDYLLEKNRVVRQNPGERNYHIFYALLAGTDEAQKEMFYLSEKENYYYLRQFGCIVDNAIDDQRTFQEVMTAMRVMKFSSEEILEVLKLLAGVLHLGNIEFVIAGGAQVSSKNALGRAAELLGLDSMKFTEVLTHRSMILRGEEISTPLTVEQGIDSRDSMAMALYSQCFSWIIKKINNRIKGKEDYRSVGVLDIFGFENFEVNRFEQFNINYANEKLQEYFNKHIFSLEQLEYNRDGLIWEDIDWMDNGECLDLIEKKMGILALINEESHFPKGTDDTLLAKLHSHHSKNPFYVKPRVLDHYFGVKHYAGEVLYHVKGILEKNRDTFRCDVLNLLCESRLDFIYDLFEHASSKINEDTFKSGTKHQKPTVSSQFKNSLHSLMATLSTSNPFFVRCIKPNDQKMPDQFDQTIVLNQLRYSGMLETVKIRRAGFPIRRQFEDFCARYKILMRNLSLPDDLKAKCAALLYCYDNTNTDWQLGRTKVFIR\nAt Epoch: 52000.00\nMDTKLPSKLFIGVLDIAGFEIFQLNSFEQLCINFTNEKLQQFFNHHMFVLEQEEYKMQGLEWTFVDFGLDLQGCIDLIEKPLGILSILEEECMFPKTTDITFNAKLLNNHLGKSPNFAKSKPDKKRKYESHFEILHYAGVVPYNLNGWLDKNKDPLNETAVELFQQSSNELVAMLYQDYVRAY\nAt Epoch: 53000.00\nVSDLTLISKISNEAINDNLKIRFQNGEIYTYIGHVLVSVNPFRDLGIYTDAVLHSYQGKNRLEAPPHVFAIAEASYYNMKAYKENQCVIISGESGAGKTEAAKRIMQYIASVSGGSNSSIQEIKDMVLATNPLLESFGNAKTLRNNNSSRFGKYLEIQFNDQGEPVGANINNYLLEKSRVVGQVKEERDFHIFYQFTKAASETYRSTYGVQQPNTYAYLSKSKCYDVNGIDDKADFKDTLNAMKVIGMSQQEIDEVFRMLAAVLWIGNVSFRENDEGNAEIVDQSVVDFVAYLLEVDSSHVNKAMSTRTIETARGGRRGSTYDVPMNIAQASSVRDALSMAIYTNMFDWIVQRINASLKARSAISHSIGILDIYGFEIFEKNSFEQLCINYVNEKLQQIFIQLTLKTEQEEYAREQIQWTPIKYFDNKVVCELIEEKRPPGVFAALNDACATAHADPAAADGTFVQRLNALSSNPNFAPRQGQFVIKHYAGDVNYEVAGMTDKNKDQLLKDLLNLVGESGNAFVQTIFPDRIDQDNKRRPPTAGDKIKASANDLVATLMKCTPSYIRTIKPNENKSPTEYNDGNVMHQIKYLGLQENVRIRRAGFAYRQTFEKFVERFYLLSPKCSYAGEYTWTGDAKSGVKQILKDTSIPAEEWQMGMSKAFIK\nAt Epoch: 54000.00\nVEDVCQLPHLNESSVLHVLRQRFANNLIHTRAGPVLLIVNPMAPLALYSEKVASMFRGCKAEDDMPPHIFAQAQTAYRAMLETRRDQSLIFLGRSGAGKTTSFKHALYYLTLASRQELQPSVRALTVEKVSAIGTIMDAFGHERTSLNGNATKFTQIFALDFDHSGQIVSGSIQIMPIDRMRPSGGSNRGRSGVPRWSFLAGVDGGALRKELLLEPAAGESSPGGSATVEQESIDYQRLCQAFRVLNIDQAAVRGIWYVLAAIHHLSQSGAVIVAGRVQFVNPRSAQKAAMLLGIPMEDLLSYVFPENGSGGATKATLNTAVVVECLTAFTEALYTELFYTIVGLINKSIAAVTPHQTIGSVLLVDVPGFQNPASVGGGTAASTLADLRFNYLHERLQLLFHNAMLVQPRARYAQEMVTVEDSL\nAt Epoch: 55000.00\nEDKLKLERDFSRYNYLSLDSAKVNGVDDAANFRTVRNAMQIVGFMDHEAEAVLEVVAAVLKLGNIEFKPESRVNGLDESKIKDKNGSFWLDVK\nAt Epoch: 56000.00\nVDDLMQLSYLNEPSVLYNLQYRYDRDMIYTKAGPVLVAINPFKEVQLYGNVYIEAYKSKSIDSPHVYAIADTAIHEMIRDEVNQSIIISGESGAGKTETAKIAMQYLAALGGGTGMEYEILQTNPILEAFGNAKTARNDNSSRFGKLIEIHFSPNGKISGAKIQTFLLEKSRVVQCAAGERSYHIFYQLCAGASKSLRDKLNLRSVEEYKYLKQSSCFVINGVNDAERFQSVMAAMKVVHIRQQDRDNVYAMLAAILWLGNISFNVIDNENHVEVVADEAAQTVSKLLGCDIQDLKLALCTRKMRVRSDTIIQKLTLTQAIDTRDALAKSLYASLFEWLVEQINMSLEVGKRRTGRSISILDIYGFESFEKNSFEQFCINYANERLQQHFNRHLFKLEQEEYIQDGIDWARVDFEDNQNCLKLFEKKPLGLLSLLDEESTFPNGTDLTFANKLKQHLHSNSCFKGERGKAFTVSHYAGEVVYDTTGFLEKNRDLLHIDSIQLLASCSCHLPQIFASKMLTQSDAQEGSPYRSSGVDSQRLSVATKFKGQLFQLMQRLGNTTPHFIRCIKPNKLQLPSTYEQSLILQQLKCCGVLEVVRISRSGYPTRMSHQKFARRYGFLLLENVASQDPLSVSVAILHQFNILPEMYQVGYTKLFFR\nAt Epoch: 57000.00\nIDDLTSLSHLNEPAVLHNLQVRYGMHNIYTYSGIVLVALNPFARVGVYSQDTLEAYAGRMRGELEPHLFAISEDAFQGMVRDRKNQTIIVSGESGAGKTVSAKYIMRYFASAHEAQRDVEHQEQTAMSGVEEQILATNPVLEAFGNAKTTRNDNSSRFGKFLEIRFSERHAIEAAFIRTYLLERSRLVYQPPTERNYHVFYQLLASDRALDEAQREALGLQGATWETFHYTRQGGSGEIVNVDDAREFEKTSAALGVVGVDATTQQQVFALLAALLHMGNIEITGSNSAAVADDDAAFAQATGLLQVDAAQFRKWLTRRQIVTRSEKIVSNMTRAQALVVRDSVAKYVYAHVFEWIVRTINGVLTGGGAGPAASFIGVLDIYGFEHFEHNSFEQFCINYANEKLQQNFNRHVFKLEQEEYQREQLANWTFVDFQDNQPCIDLIEGRLGVLALLDEECRLQQGSDAKFAEKLARQFAEQPVRQLPADSPAAFFRKPRFGADSFTIRHYAHDVAYEAAGFLEKNRDSVPDEIQNVLRASSAPLLAEVLADTSAAAADSGTATAVTASQTPARLSVRAPRRPTLGAVFKHSLAGLMETIEATESHYIRCIKPNDAKHAWVFDAPMVLSQLRACGVLETIRISCAGYPSRLPIPDFIHRYRVLLSDPGAPLRAASLDAFREFATQTLAEAFGARDCWQVGLTKVFFR\nAt Epoch: 58000.00\nERLNDTSELISYVDNQECLNLIASRSGGVFSTIDAISRLPGPSDRKLNERLHTLFKRHPCFPTPHPKEAHEMFCIVHYAGMVKYHIESFIDKNNNIISAQFEELMAISKSSVLQAQPLLSSASANSSPPTSQKGGSVTHMFSVQMRGLASELEGTRCNFIRCIKPNAEMEVGVFDRASVVDQLRCSGTVQACSVLRVGLPTRILYAEVVDTYLPLVGQETYEKFNCNERLFTQAICAALAFPSDAYRLGDTRLFFR\nAt Epoch: 59000.00\nINDLALSPSTSDDVLVSVLRERFLSDTVYTAIGSSALVVVNPYKYVSSNADNVLLDYAAEYRDTDAHDDRHVKPPHIFQLANNAYFHMRRTNMDQCILLSGESGSGKSETRRLAIKSILELSVTSPGKKGGKMATQIPSGEYILESFGNARTLQNPNASRFGKYTELQFSERGRLCGIKTLDYYLEKSRVAGAPGGERNFHVFYYLCSGASQEERQHLKLADKSTFRYLGQRPTGGREAVTEDSQRFDRLKMAMKSVGLSKRHIAQTFQLLAAILHLGNIDFTMDKSRNEDAAVVKNVDQLEIVADFLGVQPHALEAVMQYKTKLVHKELCTIFLDPEGAGGNRDDLAKTLYSLLFALLNETMNQNLCRDDFLTFIGLFDLPGFQNISSSASRTNSLDQFCVNFANERLQNWVQKRIFERNNKEYEAEEIASLIPTIPFFDNSECIRLMSHQPGGLIHIMDDQARRQPKKNDHTMVEAFSKRWGNHSSFRAGQMDRSGFPTFTVNHYVGPVTYSAESWLERDTDALNPDFVSLLRGATLNAD\n(59149, 128)\n../../out/201027/embedding/seq2seq/pfamA_target_evotune_balanced_target.npy\nloaded dict file for weights ../../data/201025/evotune_seq2seq_encoder_balanced_target.pt\noutput embedding for pfamA_random\nAt Epoch: 0.00\nNVVYVGNKEVMSYVLAVTTQFNEGSDEVVIKARGRAISTAVDTAEVVRNRFLEDVEVEDIKIST\nAt Epoch: 1000.00\nFEATYLVVSYKLDGIIRASGQVDDRGYIRGTKMKLMMDGNAIVDYMMVGTKFDGGENSVDNASGLFYSPYQEADEAGTFLVTSEPGSIQPVVGVITRYALSCFPDYADISQGAKPNG\n(1600, 128)\n../../out/201027/embedding/seq2seq/pfamA_random_evotune_balanced_target.npy\nloaded dict file for weights ../../data/201025/evotune_seq2seq_encoder_balanced_target.pt\noutput embedding for motor_toolkit\nAt Epoch: 0.00\nMASQPNSSAKKKEEKGKNIQVVVRCRPFNLAERKASAHSIVECDPVRKEVSVRTGGLADKSSRKTYTFDMVFGASTKQIDVYRSVVCPILDEVIMGYNCTIFAYGQTGTGKTFTMEGERSPNEEYTWEEDPLAGIIPRTLHQIFEKLTDNGTEFSVKVSLLEIYNEELFDLLNPSSDVSERLQMFDDPRNKRGVIIKGLEEITVHNKDEVYQILEKGAAKRTTAATLMNAYSSRSHSVFSVTIHMKETTIDGEELVKIGKLNLVDLAGSENIGRSGAVDKRAREAGNINQSLLTLGRVITALVERTPHVPYRESKLTRILQDSLGGRTRTSIIATISPASLNLEETLSTLEYAHRAKNILNKPEVNQKLTKKALIKEYTEEIERLKRDLAAAREKNGVYISEENFRVMSGKLTVQEEQIVELIEKIGAVEEELNRVTELFMDNKNELDQCKSDLQNKTQELETTQKHLQETKLQLVKEEYITSALESTEEKLHDAASKLLNTVEETTKDVSGLHSKLDRKKAVDQHNAEAQDIFGKNLNSLFNNMEELIKDGSSKQKAMLEVHKTLFGNLLSSSVSALDTITTVALGSLTSIPENVSTHVSQIFNMILKEQSLAAESKTVLQELINVLKTDLLSSLEMILSPTVVSILKINSQLKHIFKTSLTVADKIEDQKKELDGFLSILCNNLHELQENTICSLVESQKQCGNLTEDLKTIKQTHSQELCKLMNLWTERFCALEEKCENIQKPLSSVQENIQQKSKDIVNKMTFHSQKFCADSDGFSQELRNFNQEGTKLVEESVKHSDKLNGNLEKISQETEQRCESLNTRTVYFSEQWVSSLNEREQELHNLLEVVSQCCEASSSDITEKSDGRKAAHEKQHNIFLDQMTIDEDKLIAQNLELNETIKIGLTKLNCFLEQDLKLDIPTGTTPQRKSYLYPSTLVRTEPREHLLDQLKRKQPELLMMLNCSENNKEETIPDVDVEEAVLGQYTEEPLSQEPSVDAGVDCSSIGGVPFFQHKKSHGKDKENRGINTLERSKVEETTEHLVTKSRLPLRAQINL\nAt Epoch: 1000.00\nMEDQEMHLKVRRVADKFTESMYFLANEPSVALYRLQEHVRRSLPELVQHKTDMQSWEEQSQGAIYTVEYACSAVKSMTNSSIYFKNIDSLLRQTISMKEQISNSQGRSPHVSAPSASS\nAt Epoch: 2000.00\nMVIGTPVTTPLSKIVRTPSRVPGSRRTTPSKIREEKILVTIRVRPLSPKEQAAYDLIAWDFPDEQTIVSKNLNHERHTGPYSFDYVFDPTCSTSKVYEQGARDVALSALNGINATIFAYGQTSSGKTFTMRGITESAVNDIYGRIKLTTERDFVLKFSALEIYNETVVDLLNRESVSLRLLDDPEKGVIVEKQVEEIVKDEEHLKTLIGTVEAHRQVGETALNDKSSRSHQIIRLTIESSIRENSGCVKSFLATLNLVDLAGSERASQTSADGTRLKEGSHINRSLLTVTNVIRKLSCSGGKRSGHIPYRDSKLTRILQASLGGNSRTAIICTLSPALSHLEQSRNTLCFATSAKEVTTTAQVNMVVAEKQLLKHLQKEVSRLEAELRSPDPAASPCLRSLLIEKERKIQKMEEEMNELKRQRDLAQSQLELERRSKKELKGSDHHGPSRQVVKCLSFTPEDEEVSGASLSTNLGRKSLLERQAAIRRSTNSTNPSMLVHEIRKLEMRQRQLGDEANHALQLLHKEFASHRIGSQGATETIAKLFSEIKELQKISCIPEQIEIKDKASLKEEIARLRSQESNIASLEQKLENVQRSIDELVMHLPSCHESADSRTAPSKKKRVLPFNLSNTSNIPNIIRSPCSPMSPSSCNIVEGEIENRAPPECNNVGSAGDSFCSQLSTPVKSKDDNCTPGSRQSNSVNMKKMQTMFKKAAEDNIRSIKAYVTELKERVAKLQYQKQLLVCQVLELEANEAASDEADISDQSPLSWHLVFEDQRQQIIMLWHLCHVSLVHRTQFYMLFKGDPSDQIYLEVELRRLTWLDEHLAGLGNASPALLGDDAAGYVSSSIKALKQEREYLAKRVSSKLNAEEREMLYVKWDIPPDGKQRRRLQLVNKLWSDPLNMQNVRDSAEVVAKLVGFCETGEHVSKEMFQLNFVSPSDKKTWIGWNLISNLLHL\nAt Epoch: 3000.00\nMADEEDPWGFDDGGEEEKAASTQAGTPAPPSKAPSVASDHKADSVVAGTPANEEAAPEEVEEIKAPPPPPEDDGYRKPVQLYRHWVRPKFLQYKYMYNYRTNYYDDVIDYIDKKQTGVAREIPRPQTWAERVLRTRNISGSDIDSYAPAKRDKQLIQTLAASIRTYNYHTKAYINQRYASVL\n(3255, 128)\n../../out/201027/embedding/seq2seq/motor_toolkit_evotune_balanced_target.npy\nloaded dict file for weights ../../data/201025/mini_seq2seq_encoder_balanced.pt\noutput embedding for pfamA_motors_balanced\nAt Epoch: 0.00\nHQDNVHARSLMGLVRNVFEQAGLEKTALDAVAVSSGPGSYTGLRIGVSVAKGLAYALDKPVIGVGTLEALAFRAIPFSDSTDTIIPMLDARRMEVYALVMDGLGDTLISPQPFILEDNPFMEYLEKGKVFFLGDGVPKSKEILSHPNSRFVPLFNSSQSIGELAYKKFLKADFESLAYFEPNYIKEFRI\nAt Epoch: 1000.00\nLAAEARGDRAEAARILGAGAANLVGLLDIDRVVLGGRTVAADEDAYVRGVRAVIADRAARGAGGAHVTVTVADGGDRPVAEGAAQLVLA\nAt Epoch: 2000.00\nARKIGIDLGTTNLLICVDNKGILVDEPSIITVDATTKKCIAAGLDARDMLGRTPKNMICIRPLKDGVVADFEATDMMLNYFLKKCDLKGMFKKNVILICHPTKITSVEKNAIRDCAYRAGAKKVYLEEEPKIAALGAGLDIGKASGNMVLDIGGGTSDIAVLSLGDIVCSTSIKTAGNKITQDILENVRIQKKMYIGEQTADEIKRRIANALVVKEPETITISGRDVETGLPHSIDINSNEVESYIRSSLQEIVHATKTILEVTPPELAADIVQHGLVLTGGGALLKNLDQLMRNELQIPVYVAENALKCVVDGCTIMLQNL\nAt Epoch: 3000.00\nNSLPSGDQHKAQQLTADYLGALKRHLIDSLKNQLGEHHAKATPLQFILTVPAVWSDAAKEKTLQAAETAGLGQHAPILMISEPEAAATYVLFRKELGGLSTGDTFVVCDAGGGTVDLISYTIEQLEPALQVKEAAPGSGGLCGSTYLNRRFQEFLVTKLGQEEGFDNETVGDAMKKFDEEIKREYSPNVPNPNYWVPVPGLATNPRLGIRRNKMTLPPDDVREILKPVIDEVVQLVRKQIQSTEREVKAVLLVGGFGGSQYLLERLKETVTKATVILQ\nAt Epoch: 4000.00\nHIAVDIGGSLAKLVYFSRDPTSKELGGRLNFLKFETARIDECIDFLRKLKLKYEIINGSRPSDLCVMATGGGAFKYYDEIKGALEVEVVREDEMECLIIGLDFFITEIPHEVFTYSQEEPMRFIAARPNIYPYLLVNIGSGVSMVKVSGPRQYERVGGTSLGGGTLWGLLSLLTGARTFEDMLSLAERGDNTAVDMLVGDIYGSGYGKIGLKSTTIASSFGKVYKMKRQAEQEAEDTGNLKEDSSQEHGRSFKSEDISKSLLYAVSNNIGQIAYLHAEKHNLEHIYFGGSFIGGHPQTMHTLSYAIKFWSKGEKQAYFLRHEGYLGSVGAFLK\nAt Epoch: 5000.00\nQPLGSFLFLGPTGVGKTELAKALAYELFDDEKHMVRIDMSEFMEQHSVARLIGAPPGYVGYDEGGQLSEAVRRKPYSVVLFDEVEKAHPQVWNVLLQVLDDGRLTDGKGKTVDFSNVVIIMTSNLGSQYLLAEAQLETISQHVKDSVMGEVRKHFRPEFLNRLDDM\nAt Epoch: 6000.00\nVNGVSFSVEAGETLAIVGESGCGKSVTSLSIMGLIASPGTITGGEITFQGRDLVKLSRKELRKLRGNEMSMIFQEPMTSLNPVFTIGNQLAEVFRVHQGTSKAEAKQKSIDMLQRVGIANASKLVRQFPHQLSGGMRQRVMIAMALACEPKLLIADEPTT\nAt Epoch: 7000.00\nYQHEGLDWLAKLYANQTNGILADEMGLGKTIQTIALLAHLAEEHHIWGPHLIVVPTSVILNWEMEFKKFLPGFKVLSYYGSVEERAQKRKGWSNPDIWNVVITSYQLILKDLPAIRVPEWHYMILDEAHNIKNFNSQRYQAMIRLKTHARLLLTGTPLQNSIIELWSLLTFLTAGQDGQGMGDLEEFTEWFRRPVDEIFVDGKSKLGNEAQDIVNKLHHSLRPYLLRRLKSSVEKQLPGKYEHTVICRLSKRQRQLYDAFMGLSDTKAKLTSGNMISVSQALMSLRKVCNHPDLF\nAt Epoch: 8000.00\nKQKNFRQFCFPKKYEFQIPQKFLAEFINPKTPYTGILVYHRIGAGKTCTAINIAENFKNKKRIMVVLPASLKGNFRSELRSLCADNNYLSANDRQKLKELEPSSQEYREIIQKSDKLIEKYYTIYSYNKFVDLIKNNLLNLTNTLLIIDEVHNMISETGTYYESLYKIIHSSPDDLRLVIMTATPIF\nAt Epoch: 9000.00\nFVIGIGKNGVDCVLRCMHLTEKRFGKDPKKVRFLCIGEETPLGERSYEGSAPGDGFTLPIDPEEAIYKYLNNPAKLPESAQIWFDSGLKNYSPAAPTYGLTKRQCGRIALFHYLKQIMKLTGEAMADFSGSDRSLEIVITGNLGDVFCGGMFIDLPYILAKLFSDAPYPVKFTGYFFAADTASLVETDQRDVGCYQANTIVAKAELDKFQLHRKRFTQKYSRTFEVDSDKPPYSACFLIPAADSYGLTMSRTAEKILNRMEIIFSKDDDAERIISYNMLRPEAAHDFRYLAFNVMACEIPTGKIMSYLAIKLFERLNR\nAt Epoch: 10000.00\nIIKVIGVGGGGSNAVTHMYKQGIVGVDFAICNTDAQAMEMSPVPTRIHLGPDLTEGRGAGSKPNIGKLACEESIDEVRKYLENNCRMLFITAGMGGGTGTGAAPIIAKAAKEMDVLTVGIVTLPFTFEGRRRTNQGMEGLLELKKHVDTLIVISNDKLRQIHG\nAt Epoch: 11000.00\nCQAGQTGNVFWELYCLEHGIQPNGPMPSDKTIGGDDSFNTFFSVMAVDKHVPVFVDPAPMIIDGVCTGTYCQHSHPEQSNTGKEDAADNDQRHYTLDKRIINLILKPVHKSSGFLVFHSFGGGIGSRFTSLLIEWKSKLEFSINVPPPRFPQLYLSPKNIFTTQTILEHPDFAFMLNYEAS\nAt Epoch: 12000.00\nLVIGMGSTGTEILEALADRIDWEVGGLQRAPWLEFLAVETDVAKPNRFNGTDDFKTLGIPATAWRDILHRPEIHEASIALNTWADAETLAQLPAQSIDSGAGHIRMVGRLALLYPPNYSEIKNAITQRVARLRNLTDAQAKAALNVNNAGLEMDVQFAVNASTGQTGVRVIVVGTLCGGTCSGTASDIGILLRTVLEDEEKTLAMFTLPHPNLSISQKSDAEIWKTNAYHALAELNQYHLHTDTERYKTIKFPDKPEGSPVLPHDAMPYDLVYLLRPNSTENVDLMRLTQAIADRMFLNVFVPETDPMAYMVNAGPVTVQQGRAFAFSTFGLSTIEYPMRRILEALKYRTLVHAVD\nAt Epoch: 13000.00\nEVISIHVGQCGVQVGNAVWELYCAEHAVKTDGSLYEHPHDQEWVETFFNLSEKGRYVPRCLFIDLEPSVIDEIRVGPWRSLFHPDKLITGYEDAANNFARGYFTVGKVLLSPILNEVRRTIEQCDGLQGLLFFRSLGGGTGAGLTAAILDVLGDYRKYTKVEIPIYPAPSLSPAVVEPYNCIFGEHFAMEDFNMGLLMDNEALYDVCS\nAt Epoch: 14000.00\nGVAIMSTGYGEGENRVKHAIDEALHSPLLNNDDIFNSKKVLLSITFCAKDQDQLTMEEMNEINDFMTKFGEDVETKWGVATDDTLEKKVKITVLATGFG\nAt Epoch: 15000.00\nPRIHFPLATYSPLFSADKAHHEQNSVMEMTFACFENGNQMVKCDPKEGKYMACCLLYRGDVAPKETSGAVAAIKTKRTIQFVDWCPTGFKLGVCNEPAACVPGGDLAKVTRSLCMLSNTTSIASAWNRLDH\nAt Epoch: 16000.00\nGMAMMGSGFAQGIDRARLATEQAISSPFLDDVTLDGARGILVNITTAPGCLKMSEYREIMKAVNANAHPDAECKVGTAEDDSMSEDAIRVTIIATGLK\nAt Epoch: 17000.00\nGVAHMGIGVGKGENAAQDAVRAAIESPLLETSIEGAENVLLNITGGSEFSLVDMGEVSSIVRDLVSEEANIIVGTAMDDNLKDEIKVTLIATGLD\n(18000, 128)\n../../out/201027/embedding/seq2seq/pfamA_motors_balanced_mini_balanced.npy\nloaded dict file for weights ../../data/201025/mini_seq2seq_encoder_balanced.pt\noutput embedding for pfamA_target\nAt Epoch: 0.00\nPDSAPIVIDNGASTFRIGWAGEAEPRVSFRNIVQRPRHRSSGETVTVVGDTDPALMKYFDCTRTSIRSAFDDDVVYQFEYMEYILDYAFDRLGATSEVGHPILMTECECNPSFSRARMSELLFETYGVPSVAFGIDDVFSYKYNQKLGNCGEDGLAISCEFGTCHVVPFLKGQPVLGACCRTNVGGSHITDFLRQLLSLKYPYHVANFSWEKAEELKKEHCYIAADYMSELQIFKNNKEEAEEKTRYWQLPWVPPPRDEPPSEEELARKAAYKEKAGQRLRDMAAAKKSQKIADLEEKLSGLEDLMDHLDGADEQEATSILGRSGYLSQQEIKSDILKATQSLRKAKGESNGNEENADASGADKYPLVSVPDDMLTPEQLKEKKKQILLKTTTEGKLRAKQKRAEE\nAt Epoch: 1000.00\nFRPVIIDNGSGRIKAGFASDERPRFICPNVVGEVKHKKFFSFSESQQCYVGNDALAHRAILKLSYPIKHGVISDWNGMEKVWSSVITGLGVSLKHHPVLLTEAPLNPKAKREEVCERFFEGFDCPAFYIGIQAVMSLYSTGKITGVVVESGQGVSCSVPIYQGYAIWHAIKRLNLAGHELTEYLSKLLRERGYCFKSSAEYEIVRDMKEKHCFVALDYEEALNKAAMSDELHVSYEMPDGQIVLIGSERFRCLEALFRPSLLGLEDVGIHWMVYNSIMKSDLDIRKDLYANIVLSGGTTMHEGFQERLQAEVVALAPRTVKVRVIAKPEVWTFGSVL\nAt Epoch: 2000.00\nMKEKHCYVALDFEQESNHNIKHSYELPDGQIIEIGAEIFRAPEVLFQPMMIGLEQSGIHEMAFNSIFKSDLEIRRDLYGNVVLSGGTSMLPGIADRLQKELMHLIPPNMMAMVVAPSERKNSTWTGGSMLASLSTFQERWIPKEAYDETGPGIVHRYCF\nAt Epoch: 3000.00\nLGIQRRSVLEHGLVADWDVMEEYWCHLFNRRVCVEPQDVGVLLTEPAVTPYEQRERTAEILFESFGVPKLFIGSQALFLLHSVGDRCDTAVVVESGAGVTQVVPIVAGYAVAAAARRFPVAGLDVTQYVLNNLREHEQGIEMEQALEVAEQVKVRYGCMAKDFARECAEAESKLPSYAIRNTELHTRAGAPYSIDVDYEQLLVPETLFQPDSLAAPSTVTASLFGGLPAVIDAVVWSCPMDCRRSLYANVIVSGGNTRLPYFAKRLHGALRHALDERATGVIAASGGPLGRQVEYEVNVRDYSQAMHAVWRGASAFAASPEYETSAVTRAAYMECGAAVMHQHH\nAt Epoch: 4000.00\nARLAPLVIDNGTGYSSQETQILRSSFRTAIATRGTSGGGSASGPSITGRPSIPSKPGALSASSNIATKRGIDDLDFFIGDEAIANSKTYNVSYPIRHGQIEDWDLMERYWQQTIFKYLRAEPEDHHVLLTEPPMNAPENREQTAEIMFEGLNIQGLYIAVQAVLALAASWSSNKVTDRTLTGTVIDSGDGVTHVIPVAEGYVIGSSIKHIPIAGRDITYFVQQLLRDRNESLNIPVDESLRIAEKIKEDYGYVCGDMVKEFRKYDSEPEKYIIKHEGFDTRTNKAYTIDVGYERFLAAEVFFNPEIYSSDFLTPLPEVVDNVIQTSPIDVRRGLYKNIVLSGGSTMYDHFGRRLQRDLKTIVDDRLYASEVASGGLIKSSGMDVNVITHKRQRYAVWFGGSLMASTPEFYSHCHSKADYMEYGPSICRRYQ\nAt Epoch: 5000.00\nSQDRKVVVCDNGTGFVKCGCAGPNFPEHIFPALVRRTVIRSTTKDLMVGDEASELRLMLEVNYPVGNGIVRNWNDMKHLWDYTFGAEKLIPKFVYVAIQTVL\nAt Epoch: 6000.00\nMMKLVQNKAAYVALNIQQELELAKKIPSPVNEEYELPCGHFMNFRSQKFRNPEALFQPSSARFVKDRENVGVRKMIFNSIMKCDIGIRNYLFKNIMLTVGSTLFPGFVEGITKEILELGSSTLAFKFSDLIAREINHKFANKMFRNVAPPNRMYNAGVGGPALALLNTFEQASPFKTQFY\nAt Epoch: 7000.00\nMTEQHNIHLNTSYQLPDGHVIRIGSERFRCPEALFQPLLLRCLWSHVEMFVVSIMKCDLDMRRKLYENIILSGGSTMFPGMGQRMTKELRVLVVWSRPVPLYSSWL\nAt Epoch: 8000.00\nVGLDIGTTKICAIVGRKNEFGKLEVLGMGKAESEGVVKGIVFNIDKTVYAIEKAIKDAGDQAGIDIGVVNVGIAGQHIRSFIQHGGITRTSKEDEITIADVERLTQDMYRMVVPPGSQIIHVMPQDYMVDYEEGIKEPVGMSGVRLEADFHIITAQTNAINNINKCVRRTGLEIDDLILEPLASSLAV\nAt Epoch: 9000.00\nALIDVGAGTSDICVTRDGSIIAYGMIPMAGDELTEVLVHEFLVDFATAEQIKRASTEGGNITYEDIMGISHTIKSEDVYKLTDPVMKKISGEVASKIKELNGDKSVSAAFVVGGGGKIHGFTEALSKDLDIVSERVALRGEEVMKNIVFEQNDIQKDSLLVTPIG\nAt Epoch: 10000.00\nTVIDLGYNSSRTIIFKDGIPKLFYSFPYGIKYILKDISNVLKVSEKEAHRLLTEEGACLRDTRTIKKVEFQPITGTGYSYTSLGLLNKIIYARVREIISRLNGELSRISYEKTYEIGALQGGIVLTGGGSKIRNIDQTIRELMGENYRKSSLVSLDYFRDVPEEIKKDSTYLSVFG\nAt Epoch: 11000.00\nACIDMGGGVTGVSLFLKKHMLFADAVRMGGDLVTRDISQGLRVPLPVAEWLKTRHGGLEATGRDDREMIDVTGEPGEDWDGERRFVSRADLIGIMRPRVEEILDGVREILEAAGFDQMPSRQVVLTGGASQIPGLDTLAMRILGYNVRIGRPLRIQGLAQQHTASCHAATVGLA\nAt Epoch: 12000.00\nEAKRVAAQFAFSSDDVRRATKEFINQMEEGLQKDHTDLSQIPTYVTAVPNGTEKGLYMAVDLGGTNFRVCSIMLHGNSTFTLTQTKVAIPRELMVAKTSKELFSFLAKQIELFLKAHHNEHYQGHIRRRKTTSLEEGYRDEEIFNLGFTFSFPVHQIGINKGVLMRWTKGFDIPDAVGKDVCALLQAEIDELHLPVRVAALVNDTVGTLMARSY\nAt Epoch: 13000.00\nKVENMLSGIHLSEEVVSRVKSVFLSEIELGINEEPSSLQMENTYVPELPDGTEEGLFLALDVGGTNFRVLLLELMEGRLVREEVKHYHITDELRLGPGIDLFDFLATCIADFVKEFNIADQTLPLGFTFSFPMHQRSMDCGCLVTWTKSFKCAGVQGEDVVEMLREAIRRRGDIKVDVVAVLNDTTGTLMQGAL\nAt Epoch: 14000.00\nAGLLKKFEAPLADVPGIARAFEVIYHSLALTASNQFLPTPIRALPTGEEKGRFLALDLGGTNLRVAVVRLYGGDGLKVCTQRSWSIPEHFKSGAAEVLFRWVADRIGDVVGEYLGDVGSEERERILSEGMELGITFSFPMEQTTHDSALLMPMGKGFTFTTTNDLSSLLKMAYDDLLSTTTPAHPLPKLDIVSITNDSISTLLSAAY\nAt Epoch: 15000.00\nVMGLLLGAGCNATVPMLIDDLHESKVRHIRLADPKAVETLVTTEWTLRAASEPLSNLNLITSWDSQLNASGDRPGFQPLEYMIAGRYLGELVRIIVHDYFHRILAISKEDLPDKLMKPYALTTEFLSLVVAPSQSGEELLADLERELPSPPLSGWKWTPSLADIVRATTTKIQRRAASLIAAASVGLLACTREIKLADLKEGKSVAETPVVCPSAVPTADPIALPHRSPGSNSPPKVKGQNNPEELVIAFSGGLIQHWPGFRESIQWHIDRLVLRGGPQELGKSIFLREVSDGGLVGVGVLAGT\nAt Epoch: 16000.00\nEIGLIVGTGTNACYMEELKNVELLDGDEGQMCVNMEWGAFGDNNCLEDITTSFDHDVDTFSINPGKQRYEKMISGMYLGEIVRQILIVLTRRGILFGGKISERLLTRDLFPTRFLSLIESDTLGLVQVRSILTELGLRSTCDDTMLVKEVCTTVSRRAAQLCAAGVAAVVEKMRANRGLDQLKVTVGVDGTLYKLHPHFAGVVQETVKILAPKCDVTFLQSDDGSGRGAALITAV\nAt Epoch: 17000.00\nRLGVIVGTGTNACYMEKLENCELWDGDDQEPRQVIVNTEWGAFGDNGVIDFVRSHYDWEVDEESLNPSHQKFEKMISGMYMGELARRVILRLAREHLIFNGRLSQKMKTAYAFKTKYISEIESDPKGCFDETRKVLAKLDQVGSDDDCQCLKLVVSRVSSRAAHLVSAAIATVLNKMKRPHTTVGVDGSVYRYHPKFHQLMEAKIAELTNPDYKFDLMLSEDGSGRGAALVAAV\nAt Epoch: 18000.00\nQVDIGIDLGTANTLVYLRGHGIVMDEPSVVAVTRGSHTVLNDGAAVGLEAKKMLGKTSYSVDVIRPLREGVIANFPITEAMLRYFISRVKARRMFSQTRVVIAIPFGITHAEMKAVYNSTMRAGADKVHLIEETLAAGLGSGLRIDDPTANLVVDIGGGTTGISVISVADIAFGATVRCAGDHMTDAVSDFIRERYKLQIGQQTAEQLKIELGSALPQNEHAAMQIRGQGENGRPATIEVSADDVREALRAPLHKILRGIDWVLENTPPELSADLVDRGILVTGGGALLPRIDDLISDHTGLNVTVADDPLTCVARGAGAYLDTINWQRS\nAt Epoch: 19000.00\nTKDMGIDLGTANTLVYSKGKGIVLREPSVVAINNLTKKPLAVGTEAKQMIGRTPGNIVAIRPLKDGVIADFDITQTMLKKFIEKITNKSAFTSPRIIVCFPSGVTEVERRAIEEATKQAGAREVVLMEEPMAAAIGAGLPVDEPTGSMIVDIGGGTTEVAIISLGGIVTSKSLRIAGDELDQAIIGYIKREYNLMIGERTSEQIKMEIGSAFKADEFEEEASMEIKGRDLISGLPKTVVVTESQIREALKEPVAAIIEAIKTTLEKTPPELAADIMDKGIMLAGGGALLKGLDALINHETHMPVHIAESPLDCVALGAGKALDKFDLIRQ\nAt Epoch: 20000.00\nKSDIGIDLGTASVLVYIKGKGVVIQEPSVVAIDRDTNKLLAVGEDARRMLGRTPGNIIAIRPLKDGVISDYEVTQRMLKYFIEKAIGKNNLFLRPRIVVCVPSGITEVEKRAVIQASNQAGARKTYLIEEPIAAAIGADLDITEPRGKMIIDIGGGTTDVAVISLGGIVVNSSIKVGGNTFDTYITRYIRKKHNLMIGERSAEELKVVIGTAYKREKEVSMDIRGRYLLTGLPEIVQVTSSELLEALSEPLEAIVDAVKSVLEKTPPELASDIGEKGIMMTGGSSLLHGIDKLIKERTGIKVNIAEDPVSCVATGTGRSLESIDVLEN\nAt Epoch: 21000.00\nGTDIGIDLGTASVLVYIKGKGVVLKEPSVVATDNTKRKVLAVGEEARQMIGRTPGNIIATRPLRDGVISDYDVTERMLRHFIKKARGNSVSLLRPRVIICIPCEATEVEKRAVKDAALSAGAGKVYLIEEPVAAAIGAGLDISKASGSMIVDIGGGTTDVAVLSLGGMVVRSSIKIAGDKFDEAIIRYIRKKHNIMIGERTAEELKINIGTAYPRSEEVTMDIRGRDLVTGLPKNITVSSEEMREALEETTSAIVDCVHSVLEHTPPELSADIINKGIIMTGGGSLLYGLDLLIQSRTHVTTTVAKDSICCVAYGTGEALENLDKFAE\nAt Epoch: 22000.00\nKIKVIGVGGGGGNAVNRMVAMEVKNVEFIAINTDEHVLRLSKASQKIQIGEKLTKGKGAGSMPAIGQSAAEESKDEISGVLKDTDMVFVTAGMGGGTGTGAAPVVAKIAKDMGILTVGVVTKPFAFEGKRRMTQAEQGIAELSACVDSLIIVPNERLKYVSD\nAt Epoch: 23000.00\nEIISISVGQCGNQIGQQFWRTISQEHGLSMDGHSTNTASPLEKENLGVYFSESSDRYVPRAVLVDLESGVLDSVKSSSQGQLFRPDNFINAASGAGNNWAKGFYTDGTELIDEIIDTIRKESESCDSLQGFQLTHSLGGGTGSGLGTLLVSKIKEEFPDRMLATFSVFPSAKVSDTVVEPYNATLSIHQLIENADQVFTIDNEALFDICT\nAt Epoch: 24000.00\nMIAEDHGIGPDGIYSGSSELQRGRMEVFFRETEENKHFPRAVVVDLESDSLNAVLQSTHRALFQGDNFVSGRGGTGNIWAKGFYGEGRRYIDEVLEVIRKEADICEGLQGFNVAHSLCGGTGSGFGALIIEKIHEQYPNRLISTFSTVSSNRLLGVMKQPYNTILSLQHLAENANITYCIDSDGLHDISR\nAt Epoch: 25000.00\nLIKVVGVGGGGGNAVNRMIQSGLRGVEFIAINTDAQALLMSDADVRLDIGRQLTRGLGAGSDPEVGRQAAEEHREEIEEALKGADMVFITAGEGGGTGTGGAPVVAEIARGLGALTIGVVTRPFGFEGRRRAQQAEDGISRLREYVDTLIVIPNDRLLTIAN\nAt Epoch: 26000.00\nSIVTVQLGQCGNQIGFEVFDALFRDSRCSQGLCSKSENEAYQASCSERFFREEENGVPVARAVLVDMEPKAINQTLSKAAPSGGWKYGQHACFCQKQGSGNNWAYGYSVHGPKHEESIMNLIQKEVEKCDSLSGFFIIMSMAGGTGSGLGAFVTQKLQDQYSNALLIHENDAVHKIC\nAt Epoch: 27000.00\nLRACFWEIISDEHGIDPSGVYRGTADIQLERISVYYNEATGGRYVPRSVLVDLEPGTMDAANNSLKGGSSPSHGRAATPSLSPESLKARVKSERRRKQLGQRHYTEGAELVDSVLDGIRKECESCDCLQVSAAP\nAt Epoch: 28000.00\nLIIGLGGTGGRIIRALRKIIYQEFRTIHPPDVNIAYLYIDSDDEMMALDDPRWKILGHSVQLGIDSQLLIQGADLEERLNNIHNYPGIKEWIGNRGDWKDILRSFAGGRVYGGQKRRLGRFLLACNIDAFINQLTLQVNHLQRISNQAEVTFHICCGLAGGTGSGSVIDTIVQLRKHYPYSNQGLTYPLLVYAYLPEKNPNPKWDTGNYQANGYAALMELNALSAGRFDPTDLMGGKPVACGVAFNGLYLFTNQNEKNVIVGVENEIPQIVADFLYQKIIAVSKVAWTSLAFLEDAQNGDSTPETAAIPNSRLPERAKRLLTFGINRIAIPEEEIKEYLTYHFARQAAL\nAt Epoch: 29000.00\nNSEISGHELAADLVTNALANPLYTDDRNHAERCISFLHAGTDLTLGEVETVREEITSQIDSGVGLELFTADTTKMMGNKRRLTL\nAt Epoch: 30000.00\nGTALMGIGTGSGKTSAEDAAVAAISSPLLDAPVDEATGVVFNIIGGESLSLQEVDRAAKVIYNNVHEDANVIFGALVDDEITDGTVSITVLATGFY\nAt Epoch: 31000.00\nGTALMGIGSASGENRTAEATKKAISSPLLEVSIDGAEQILLNVTGGPDLSLFEAQDASEIIASASSDDVNIIFGTSINESLGDEVVVTVIATGID\nAt Epoch: 32000.00\nPRIHFPLATLAPIISAAKAQHEQNSVAEMTFSCFETGNQMVKCEPREGKYMACCLLFRGDVIPKDANGAVATIKTKRTIQFVDWCPTGFKLGICNEPPAAVPGADLAPVSRSLCMLSNTTAISSAWSRLNK\nAt Epoch: 33000.00\nPRIHFPLVSFAPVLSKSKSSHESSNVQEITNACFEPSNQLVKCDPKAGKYMATCLLYRGDVVNRDVQNAVSMLKNKKTIQLVDWCPTGFKIGLCYKPPHYVPDGDLAPATRSVCALSNTTAIAEAWQRIDE\nAt Epoch: 34000.00\nTASSPIVFILSPGSDPASDLMKLAESSGFGGSKFKFLAMGQGQDKVAASRGQWLMLQNCHLLVKWLKELEKALERITKPNPNFRLWITTNPIEDFPIGILQNSLKVV\nAt Epoch: 35000.00\nEPRTPMVGLLSMGSDPTTSIELLAKKHKKECKAISMGQGQEIHARRLMSNSLQNGGWVLLQNCHLSLDYLMEVMDQLVEAETVHEDFSLWVTCEVHPKFPISFLQQSIKFT\nAt Epoch: 36000.00\nRVRPPLDCERDKMLCNLSYLDEATMEIASFEPTAKGKSIAHTFTFDQVFDHSSEQESIFEMVSPLIQSALDGYNICIFAYGQTGSGKTYTMDGIPSNPGVIPRTVDLLFDSIKNYRHLGWEYEIKVTFLEIYNEVLYDLLSNEQKDMEIRMVKNSKNDIYVSNITHETVGSAGRLRELMQIAKMNRATAATVGNERSSRSHAVTKIELIGTHAKKQELSIGSINLVDLAGSESPKTSTRMNETKNINRSLSELTNVILALLQKQDHIPYRNSKLTHLLMPSLGGNSKTLMFINIAPLQDCFVESLKSLRFAATVNQC\nAt Epoch: 37000.00\nRCRPFNGRETARNAQCIVKMKGDQTILSPPSEVKGKAAKAASEGVKTFAFDKSYWSFDRNAPNYAGQDNLHEDLGKPLLDNAFQGYNNCIFAYGQTGSGKSYSMMGYGADPGIIPKICQDMFERIKVVQQDKNVGCTVEVSYLEIYNERVRDLLNPSNKGNLRVREHPSTGPYVEDLAKLVVQSFQEIENLMDEGNKARTVAATNMNETSSRSHAVFTLTLTQKRHDTDAGMTGERVAKISLVDLAGSERAQSTGATGARLKEGAEINRSLSTLGRVIAALADMSQGKKKTQVPYRDSVLTWLLKDSLGGNSMTAMIAAISPADINFEETLSTLRYADSAKRI\nAt Epoch: 38000.00\nRIRPLSTMERDSQGYGRCLRQESAKTLVWLGHPETRFTFDHIACEKISQENLFKVAGQPMVENCLSGYNSCMFAYGQTGSGKTYTMMGGIYELEGKLNEDCGLTLRIFEHLFTRIGMEEKSKRDVKLKYSCKCSFLEIYNEQITDLLEPSSTNLQLREDSKKGVYVENLTEHSVSTINDVVKLLLQGAANRKMAATYMNSESSRSHSVFTCIIESHWEKDSRTHLRFARLNLVDLAGSERQKSSGAEGDRLKEAANINKSLSTLGLVIMSLVDLAHGKHRHIPYRDSRLTFLLQDSLGGNSKTTVIANVSPSFCSANETLSTLKFAQRAKQI\nAt Epoch: 39000.00\nRVRPQNEHELQGNCRTLIKVVDDKMLIFDPKTEENPFFYHGVAQKGRDLLKKQNKELQFIFDKIFNMQSDNTDVFEGSTKELICNLLDGYNCSVFAYGATGAGKTHTMLGNNEDPGITYRTVAELFSEIEKQGEHREFNLGVTYLEIYNENVQDLLHRSGPLHLRDDGRCGVIVAGLKIIAIHSAEELLTLLAKGNRNRTQHPTDANEESSRSHAVFQVYINITNKLDGQVRQVKLSMIDLAGSERASATGCKGPRFKEGANINKSLLALGNCINNLADGAKHITYRDSKLTRLLKDSLGGNCQTVMIANIAPSSFSYEDTYNTLRYADRAKKI\nAt Epoch: 40000.00\nRVRPFTVVESGNGESQECVTIEAPDTVVLKAPRSCQSNRQSEKSLPQTAQRFSFTQVFGPDASQRKVFEGSVRGLVRDVLEGGNCLVFTYGVTNAGKTFTFLGPDHDSGLLPRSLSVIFNSIEGRLYSRSDLKPQRCRDFSRLTPDQQAAESSSKKNLLRLLKEVTHIHTHTHTHTHTHTHT\nAt Epoch: 41000.00\nIIKIMQEEDKAVSSPEHPLQTNSLCIFGEACTNRDVYMKTTHPLIQHIFNGGNATCFAYGQTGAGKTYTMIGTHQNPGLYALAARDIFRQLEVSQPRRHLFVWISFYEIYCGQLYDLLNRRKRLFAREDSKHVVQIVGLQELQVDSVELLLEVILKGSKERSTGATGVNADSSRSHAIIQIQIKDSAKRTFGRISFIDLAGSERAADARDSDRQTKMEGAEINQSLLALKECIRALDQEHTHTPFRQSKLTQVLRDSFIGDAKTCMIANISPSHVATEHTLNTLRYADRVKEL\nAt Epoch: 42000.00\nRCRPFSDEELRSNAPQVVTCNDYSREVAVSQSIAGKHIDRVFTFDKVFGPSARQKDLYEQAVTPIVNEVLEGFNCTIFAYGQTGTGKTYTMEGECKRAKSGPNGELPPEAGVIPRAVKQIFDTLEGQNAEYSVKVTFLELYNEEITDLLAPEEISKVSLEEKQKKQLPLMEDGFDKRGVKSTDSCSEEMFDTMMNRARDGRSRPIVAEKRGSRR\nAt Epoch: 43000.00\nKTFVFSKNMNSKFLRRTKSIEKIQEIVKNEEKKNNTNQPSLNLELIQQNKPVIFVEPQNKCNQNIQNLKKYDQESKNYLRMRFKNRPERIKIGQTFIIYDETLKAKGKIIKDKQDSKPLNIHESKIDGIYVEGLSEYQCTHYYDAIQLMKRGEKNRKIRQTQMNNKSSRSHTILQFSIESTNNNNKNIMKRSKVNLCDLAGSEKINKNEIIQNDHFNELKNINQSLSTLGKIIYNLSCNQKLPMPFRESKLTRILQDSLTGNCKTIVIGNISPSLINIEETISTLKFVDRAKNI\nAt Epoch: 44000.00\nRFRPQNRREIESGGEPIVTFDSDDTCKLESQEATGSFTFDRVFDMASKQSDIFDFSIRPTVDDILNGYNGTVFAYGQTGAGKSYTMMGTDMEDEQGRGVIPRIVEQIFASIVASPSNIEYTVRVSYMEIYMERIRDLLVPQNDNLPIHEEKNRGVYVKGLLEIYVSSVQEVYEVMRKGGNSRAVAATNMNQESSRSHSIFVITITQKNVETGSAKSGQLFLVDLAGSEKVGKTGASGQTLEEAKKINKSLSALGMVINSLTDGKSSHIPYRDSKLTRILQESLGGNSRTTLIINASPSSYNDAETLSTMRFGMRAKAI\nAt Epoch: 45000.00\nRVRPPSKRETAEGSRIILNVDEKVARIKNIRLDHKPDGCEDTRERLIEFGFDSCYWSVDPEDPKYASQEMVFQDLGTLVLSEAISGYNVCLFAYGQTGSGKTYTMMGTPASIGLTPRICEGLFSYDEGSPETPNSFRVEVSFLEIYNERVRDLLHKSEEKKPYTLRVREHPERGPYVQGLSQHVVTSYEQVVALLEEGMENRITAATHIHDASSRSHAIFTIQYTQAMLEDNLPTEITSKINLVDLAGSERASPEYCKDRLTEGSNINRSLVTLGIVISTLAQNSQMTSSCQSINSIASDGDSGSPSGGSTNGSKRQPYVPYRDSILTWLLKDSLGGNSKTIMIATVSPASSSYNETMSTLRYASHAKNI\nAt Epoch: 46000.00\nRVRPTSGHSAWNSPQGSNSIQLDPAHARNPNLMSSNPSSLSTAPPTTYHFDSILTGIPNKPIYTTVARSHVHAAMEGFNAVIFAYGQTASGKTYTLSGDENEPGIIPRAMRDVFGFIKRTPDREYLLRCSYLEIYNETIYDLLAPPMGGSGSQVQIQGGTGMEVILTPLREEVVTSLKGVNEVLRRGERHRRTACTDWNERSSRSHSVFRLVIESRERGSGPGPLDDADMRAPSRSGRATPGNGRATPGPGNAGSRLQARGGKSVQTSILSLIDLAGSEKATSDKDRTREGKYINTSLLTLGSVIGTLSENAAKNKSDYVPYRNSKLTRMLQPSLAGNARISVICTINPDPSAVGETSSTLGFAKRVKGV\nAt Epoch: 47000.00\nVKVAVHVRPLIGDERLQGCKECVSVTPGKPQVQIGTHSFTFDHVYGSGGAPSTAMFEECIAPLVEGLFQGYNGTVLAYGQTGSGKTYTMGTGSKDGSQTGLIPQVMNALFSKIETLKNQTEFQLHVSFIEILKEEVRDLLDSVSLNKVENGNGHAGRVTVSGRQPIQIRETSNGAITLAGSTEIFVRTLQEMSTCLEQGSLSRATGSTNMNNQSSRSHAIFTITLEQMRKIHSVFPGNDTPDEDMGEEYFCAKLHLVDLAGSERAKRTGSDGVRLKEGIHINKGLLALGNVISALGDEKKRKEGVHVPYRDSKLTRLLQDSLGGNSKTVMIACISPADINAEETLNTLKYANRARNI\nAt Epoch: 48000.00\nRLRPLNEKEISRNDALDWECINDTTIIFKNHLPIPERSMYPSAYTFDRVFRSDSTTREVYEAGAKEVALSVVSGINSSIFAYGQTSSGKTFTMSGITEYTMADIYDHIERHKEREFLLKFSAMEIYNESVRDLLSSDTAPLRLLDDPERGTIVEKLTEETLRDWNHLIELLSLCEAQRQIGETALNETSSRSHQILRLTVESSAREFLGNDNSSVLTSTVNFVDLAGSERASQSLSAGTRLKEGCHINRSLLTLGTVIRKLSKGRSGHIPYRDSKLTRILQSSLGGNAKTAIICTMSPARSHVEQSRNTLLFASCAKEV\nAt Epoch: 49000.00\nPSPRPSISQTPIRTKLQLVDLAGSESVGMSGVSGAALWENSCINRSLSALSDVLGALAEQRPHVPYRNSKLTHLLQDSIGGDAKLLVMLCVSPTQRFLTESLQSLGFGSRARQI\nAt Epoch: 50000.00\nKEYTFDGVFDQESNQKEVYEDVGKPVLKDVLQGYNGSILAYGQTGAGKTHSLLNSGMGVDGKPDPKQAGLLPRLVAALFVHVGADVKHVYTVEASMLQIYNEQVDCLLGDDREKAQGLQVTGKSEVKGLVWHKCKTPNELLQCFQKGRMNLVYAETKMNKSSSRSHAVFQIKVSKRPRALDKTGTKGGKVEMKATFGKLTVVDLAGSERIKKSGVTGTQLKEATNINSSLLSFGNIVQALAEKKKFIPYRDSKLTRILEDSVGGNCKTSLLVCCSPSAESSDETVSTLEFASRAARI\nAt Epoch: 51000.00\nVEDMATLAQLHEGSIMHNLHIRYKKDNIYTYIGSILVSVNPYKSISGLYDITSMEQYSSYHLGERPPHIFAIANECYHCLWKRNDNQCVLISGESGAGKTESTKLILKYLSAMSQHSLDVTAKENVSYVEQAILESSPIMEVFGNAKTIHNNNSSRFGKFIKLNFCQKGNIQGGRIIDYLLEKNRVVRQNPGERNYHIFYALLAGTDEAQKEMFYLSEKENYYYLRQFGCIVDNAIDDQRTFQEVMTAMRVMKFSSEEILEVLKLLAGVLHLGNIEFVIAGGAQVSSKNALGRAAELLGLDSMKFTEVLTHRSMILRGEEISTPLTVEQGIDSRDSMAMALYSQCFSWIIKKINNRIKGKEDYRSVGVLDIFGFENFEVNRFEQFNINYANEKLQEYFNKHIFSLEQLEYNRDGLIWEDIDWMDNGECLDLIEKKMGILALINEESHFPKGTDDTLLAKLHSHHSKNPFYVKPRVLDHYFGVKHYAGEVLYHVKGILEKNRDTFRCDVLNLLCESRLDFIYDLFEHASSKINEDTFKSGTKHQKPTVSSQFKNSLHSLMATLSTSNPFFVRCIKPNDQKMPDQFDQTIVLNQLRYSGMLETVKIRRAGFPIRRQFEDFCARYKILMRNLSLPDDLKAKCAALLYCYDNTNTDWQLGRTKVFIR\nAt Epoch: 52000.00\nMDTKLPSKLFIGVLDIAGFEIFQLNSFEQLCINFTNEKLQQFFNHHMFVLEQEEYKMQGLEWTFVDFGLDLQGCIDLIEKPLGILSILEEECMFPKTTDITFNAKLLNNHLGKSPNFAKSKPDKKRKYESHFEILHYAGVVPYNLNGWLDKNKDPLNETAVELFQQSSNELVAMLYQDYVRAY\nAt Epoch: 53000.00\nVSDLTLISKISNEAINDNLKIRFQNGEIYTYIGHVLVSVNPFRDLGIYTDAVLHSYQGKNRLEAPPHVFAIAEASYYNMKAYKENQCVIISGESGAGKTEAAKRIMQYIASVSGGSNSSIQEIKDMVLATNPLLESFGNAKTLRNNNSSRFGKYLEIQFNDQGEPVGANINNYLLEKSRVVGQVKEERDFHIFYQFTKAASETYRSTYGVQQPNTYAYLSKSKCYDVNGIDDKADFKDTLNAMKVIGMSQQEIDEVFRMLAAVLWIGNVSFRENDEGNAEIVDQSVVDFVAYLLEVDSSHVNKAMSTRTIETARGGRRGSTYDVPMNIAQASSVRDALSMAIYTNMFDWIVQRINASLKARSAISHSIGILDIYGFEIFEKNSFEQLCINYVNEKLQQIFIQLTLKTEQEEYAREQIQWTPIKYFDNKVVCELIEEKRPPGVFAALNDACATAHADPAAADGTFVQRLNALSSNPNFAPRQGQFVIKHYAGDVNYEVAGMTDKNKDQLLKDLLNLVGESGNAFVQTIFPDRIDQDNKRRPPTAGDKIKASANDLVATLMKCTPSYIRTIKPNENKSPTEYNDGNVMHQIKYLGLQENVRIRRAGFAYRQTFEKFVERFYLLSPKCSYAGEYTWTGDAKSGVKQILKDTSIPAEEWQMGMSKAFIK\nAt Epoch: 54000.00\nVEDVCQLPHLNESSVLHVLRQRFANNLIHTRAGPVLLIVNPMAPLALYSEKVASMFRGCKAEDDMPPHIFAQAQTAYRAMLETRRDQSLIFLGRSGAGKTTSFKHALYYLTLASRQELQPSVRALTVEKVSAIGTIMDAFGHERTSLNGNATKFTQIFALDFDHSGQIVSGSIQIMPIDRMRPSGGSNRGRSGVPRWSFLAGVDGGALRKELLLEPAAGESSPGGSATVEQESIDYQRLCQAFRVLNIDQAAVRGIWYVLAAIHHLSQSGAVIVAGRVQFVNPRSAQKAAMLLGIPMEDLLSYVFPENGSGGATKATLNTAVVVECLTAFTEALYTELFYTIVGLINKSIAAVTPHQTIGSVLLVDVPGFQNPASVGGGTAASTLADLRFNYLHERLQLLFHNAMLVQPRARYAQEMVTVEDSL\nAt Epoch: 55000.00\nEDKLKLERDFSRYNYLSLDSAKVNGVDDAANFRTVRNAMQIVGFMDHEAEAVLEVVAAVLKLGNIEFKPESRVNGLDESKIKDKNGSFWLDVK\nAt Epoch: 56000.00\nVDDLMQLSYLNEPSVLYNLQYRYDRDMIYTKAGPVLVAINPFKEVQLYGNVYIEAYKSKSIDSPHVYAIADTAIHEMIRDEVNQSIIISGESGAGKTETAKIAMQYLAALGGGTGMEYEILQTNPILEAFGNAKTARNDNSSRFGKLIEIHFSPNGKISGAKIQTFLLEKSRVVQCAAGERSYHIFYQLCAGASKSLRDKLNLRSVEEYKYLKQSSCFVINGVNDAERFQSVMAAMKVVHIRQQDRDNVYAMLAAILWLGNISFNVIDNENHVEVVADEAAQTVSKLLGCDIQDLKLALCTRKMRVRSDTIIQKLTLTQAIDTRDALAKSLYASLFEWLVEQINMSLEVGKRRTGRSISILDIYGFESFEKNSFEQFCINYANERLQQHFNRHLFKLEQEEYIQDGIDWARVDFEDNQNCLKLFEKKPLGLLSLLDEESTFPNGTDLTFANKLKQHLHSNSCFKGERGKAFTVSHYAGEVVYDTTGFLEKNRDLLHIDSIQLLASCSCHLPQIFASKMLTQSDAQEGSPYRSSGVDSQRLSVATKFKGQLFQLMQRLGNTTPHFIRCIKPNKLQLPSTYEQSLILQQLKCCGVLEVVRISRSGYPTRMSHQKFARRYGFLLLENVASQDPLSVSVAILHQFNILPEMYQVGYTKLFFR\nAt Epoch: 57000.00\nIDDLTSLSHLNEPAVLHNLQVRYGMHNIYTYSGIVLVALNPFARVGVYSQDTLEAYAGRMRGELEPHLFAISEDAFQGMVRDRKNQTIIVSGESGAGKTVSAKYIMRYFASAHEAQRDVEHQEQTAMSGVEEQILATNPVLEAFGNAKTTRNDNSSRFGKFLEIRFSERHAIEAAFIRTYLLERSRLVYQPPTERNYHVFYQLLASDRALDEAQREALGLQGATWETFHYTRQGGSGEIVNVDDAREFEKTSAALGVVGVDATTQQQVFALLAALLHMGNIEITGSNSAAVADDDAAFAQATGLLQVDAAQFRKWLTRRQIVTRSEKIVSNMTRAQALVVRDSVAKYVYAHVFEWIVRTINGVLTGGGAGPAASFIGVLDIYGFEHFEHNSFEQFCINYANEKLQQNFNRHVFKLEQEEYQREQLANWTFVDFQDNQPCIDLIEGRLGVLALLDEECRLQQGSDAKFAEKLARQFAEQPVRQLPADSPAAFFRKPRFGADSFTIRHYAHDVAYEAAGFLEKNRDSVPDEIQNVLRASSAPLLAEVLADTSAAAADSGTATAVTASQTPARLSVRAPRRPTLGAVFKHSLAGLMETIEATESHYIRCIKPNDAKHAWVFDAPMVLSQLRACGVLETIRISCAGYPSRLPIPDFIHRYRVLLSDPGAPLRAASLDAFREFATQTLAEAFGARDCWQVGLTKVFFR\nAt Epoch: 58000.00\nERLNDTSELISYVDNQECLNLIASRSGGVFSTIDAISRLPGPSDRKLNERLHTLFKRHPCFPTPHPKEAHEMFCIVHYAGMVKYHIESFIDKNNNIISAQFEELMAISKSSVLQAQPLLSSASANSSPPTSQKGGSVTHMFSVQMRGLASELEGTRCNFIRCIKPNAEMEVGVFDRASVVDQLRCSGTVQACSVLRVGLPTRILYAEVVDTYLPLVGQETYEKFNCNERLFTQAICAALAFPSDAYRLGDTRLFFR\nAt Epoch: 59000.00\nINDLALSPSTSDDVLVSVLRERFLSDTVYTAIGSSALVVVNPYKYVSSNADNVLLDYAAEYRDTDAHDDRHVKPPHIFQLANNAYFHMRRTNMDQCILLSGESGSGKSETRRLAIKSILELSVTSPGKKGGKMATQIPSGEYILESFGNARTLQNPNASRFGKYTELQFSERGRLCGIKTLDYYLEKSRVAGAPGGERNFHVFYYLCSGASQEERQHLKLADKSTFRYLGQRPTGGREAVTEDSQRFDRLKMAMKSVGLSKRHIAQTFQLLAAILHLGNIDFTMDKSRNEDAAVVKNVDQLEIVADFLGVQPHALEAVMQYKTKLVHKELCTIFLDPEGAGGNRDDLAKTLYSLLFALLNETMNQNLCRDDFLTFIGLFDLPGFQNISSSASRTNSLDQFCVNFANERLQNWVQKRIFERNNKEYEAEEIASLIPTIPFFDNSECIRLMSHQPGGLIHIMDDQARRQPKKNDHTMVEAFSKRWGNHSSFRAGQMDRSGFPTFTVNHYVGPVTYSAESWLERDTDALNPDFVSLLRGATLNAD\n(59149, 128)\n../../out/201027/embedding/seq2seq/pfamA_target_mini_balanced.npy\nloaded dict file for weights ../../data/201025/mini_seq2seq_encoder_balanced.pt\noutput embedding for pfamA_random\nAt Epoch: 0.00\nNVVYVGNKEVMSYVLAVTTQFNEGSDEVVIKARGRAISTAVDTAEVVRNRFLEDVEVEDIKIST\nAt Epoch: 1000.00\nFEATYLVVSYKLDGIIRASGQVDDRGYIRGTKMKLMMDGNAIVDYMMVGTKFDGGENSVDNASGLFYSPYQEADEAGTFLVTSEPGSIQPVVGVITRYALSCFPDYADISQGAKPNG\n(1600, 128)\n../../out/201027/embedding/seq2seq/pfamA_random_mini_balanced.npy\nloaded dict file for weights ../../data/201025/mini_seq2seq_encoder_balanced.pt\noutput embedding for motor_toolkit\nAt Epoch: 0.00\nMASQPNSSAKKKEEKGKNIQVVVRCRPFNLAERKASAHSIVECDPVRKEVSVRTGGLADKSSRKTYTFDMVFGASTKQIDVYRSVVCPILDEVIMGYNCTIFAYGQTGTGKTFTMEGERSPNEEYTWEEDPLAGIIPRTLHQIFEKLTDNGTEFSVKVSLLEIYNEELFDLLNPSSDVSERLQMFDDPRNKRGVIIKGLEEITVHNKDEVYQILEKGAAKRTTAATLMNAYSSRSHSVFSVTIHMKETTIDGEELVKIGKLNLVDLAGSENIGRSGAVDKRAREAGNINQSLLTLGRVITALVERTPHVPYRESKLTRILQDSLGGRTRTSIIATISPASLNLEETLSTLEYAHRAKNILNKPEVNQKLTKKALIKEYTEEIERLKRDLAAAREKNGVYISEENFRVMSGKLTVQEEQIVELIEKIGAVEEELNRVTELFMDNKNELDQCKSDLQNKTQELETTQKHLQETKLQLVKEEYITSALESTEEKLHDAASKLLNTVEETTKDVSGLHSKLDRKKAVDQHNAEAQDIFGKNLNSLFNNMEELIKDGSSKQKAMLEVHKTLFGNLLSSSVSALDTITTVALGSLTSIPENVSTHVSQIFNMILKEQSLAAESKTVLQELINVLKTDLLSSLEMILSPTVVSILKINSQLKHIFKTSLTVADKIEDQKKELDGFLSILCNNLHELQENTICSLVESQKQCGNLTEDLKTIKQTHSQELCKLMNLWTERFCALEEKCENIQKPLSSVQENIQQKSKDIVNKMTFHSQKFCADSDGFSQELRNFNQEGTKLVEESVKHSDKLNGNLEKISQETEQRCESLNTRTVYFSEQWVSSLNEREQELHNLLEVVSQCCEASSSDITEKSDGRKAAHEKQHNIFLDQMTIDEDKLIAQNLELNETIKIGLTKLNCFLEQDLKLDIPTGTTPQRKSYLYPSTLVRTEPREHLLDQLKRKQPELLMMLNCSENNKEETIPDVDVEEAVLGQYTEEPLSQEPSVDAGVDCSSIGGVPFFQHKKSHGKDKENRGINTLERSKVEETTEHLVTKSRLPLRAQINL\nAt Epoch: 1000.00\nMEDQEMHLKVRRVADKFTESMYFLANEPSVALYRLQEHVRRSLPELVQHKTDMQSWEEQSQGAIYTVEYACSAVKSMTNSSIYFKNIDSLLRQTISMKEQISNSQGRSPHVSAPSASS\nAt Epoch: 2000.00\nMVIGTPVTTPLSKIVRTPSRVPGSRRTTPSKIREEKILVTIRVRPLSPKEQAAYDLIAWDFPDEQTIVSKNLNHERHTGPYSFDYVFDPTCSTSKVYEQGARDVALSALNGINATIFAYGQTSSGKTFTMRGITESAVNDIYGRIKLTTERDFVLKFSALEIYNETVVDLLNRESVSLRLLDDPEKGVIVEKQVEEIVKDEEHLKTLIGTVEAHRQVGETALNDKSSRSHQIIRLTIESSIRENSGCVKSFLATLNLVDLAGSERASQTSADGTRLKEGSHINRSLLTVTNVIRKLSCSGGKRSGHIPYRDSKLTRILQASLGGNSRTAIICTLSPALSHLEQSRNTLCFATSAKEVTTTAQVNMVVAEKQLLKHLQKEVSRLEAELRSPDPAASPCLRSLLIEKERKIQKMEEEMNELKRQRDLAQSQLELERRSKKELKGSDHHGPSRQVVKCLSFTPEDEEVSGASLSTNLGRKSLLERQAAIRRSTNSTNPSMLVHEIRKLEMRQRQLGDEANHALQLLHKEFASHRIGSQGATETIAKLFSEIKELQKISCIPEQIEIKDKASLKEEIARLRSQESNIASLEQKLENVQRSIDELVMHLPSCHESADSRTAPSKKKRVLPFNLSNTSNIPNIIRSPCSPMSPSSCNIVEGEIENRAPPECNNVGSAGDSFCSQLSTPVKSKDDNCTPGSRQSNSVNMKKMQTMFKKAAEDNIRSIKAYVTELKERVAKLQYQKQLLVCQVLELEANEAASDEADISDQSPLSWHLVFEDQRQQIIMLWHLCHVSLVHRTQFYMLFKGDPSDQIYLEVELRRLTWLDEHLAGLGNASPALLGDDAAGYVSSSIKALKQEREYLAKRVSSKLNAEEREMLYVKWDIPPDGKQRRRLQLVNKLWSDPLNMQNVRDSAEVVAKLVGFCETGEHVSKEMFQLNFVSPSDKKTWIGWNLISNLLHL\nAt Epoch: 3000.00\nMADEEDPWGFDDGGEEEKAASTQAGTPAPPSKAPSVASDHKADSVVAGTPANEEAAPEEVEEIKAPPPPPEDDGYRKPVQLYRHWVRPKFLQYKYMYNYRTNYYDDVIDYIDKKQTGVAREIPRPQTWAERVLRTRNISGSDIDSYAPAKRDKQLIQTLAASIRTYNYHTKAYINQRYASVL\n(3255, 128)\n../../out/201027/embedding/seq2seq/motor_toolkit_mini_balanced.npy\nloaded dict file for weights ../../data/201025/mini_seq2seq_encoder_balanced_target.pt\noutput embedding for pfamA_motors_balanced\nAt Epoch: 0.00\nHQDNVHARSLMGLVRNVFEQAGLEKTALDAVAVSSGPGSYTGLRIGVSVAKGLAYALDKPVIGVGTLEALAFRAIPFSDSTDTIIPMLDARRMEVYALVMDGLGDTLISPQPFILEDNPFMEYLEKGKVFFLGDGVPKSKEILSHPNSRFVPLFNSSQSIGELAYKKFLKADFESLAYFEPNYIKEFRI\nAt Epoch: 1000.00\nLAAEARGDRAEAARILGAGAANLVGLLDIDRVVLGGRTVAADEDAYVRGVRAVIADRAARGAGGAHVTVTVADGGDRPVAEGAAQLVLA\nAt Epoch: 2000.00\nARKIGIDLGTTNLLICVDNKGILVDEPSIITVDATTKKCIAAGLDARDMLGRTPKNMICIRPLKDGVVADFEATDMMLNYFLKKCDLKGMFKKNVILICHPTKITSVEKNAIRDCAYRAGAKKVYLEEEPKIAALGAGLDIGKASGNMVLDIGGGTSDIAVLSLGDIVCSTSIKTAGNKITQDILENVRIQKKMYIGEQTADEIKRRIANALVVKEPETITISGRDVETGLPHSIDINSNEVESYIRSSLQEIVHATKTILEVTPPELAADIVQHGLVLTGGGALLKNLDQLMRNELQIPVYVAENALKCVVDGCTIMLQNL\nAt Epoch: 3000.00\nNSLPSGDQHKAQQLTADYLGALKRHLIDSLKNQLGEHHAKATPLQFILTVPAVWSDAAKEKTLQAAETAGLGQHAPILMISEPEAAATYVLFRKELGGLSTGDTFVVCDAGGGTVDLISYTIEQLEPALQVKEAAPGSGGLCGSTYLNRRFQEFLVTKLGQEEGFDNETVGDAMKKFDEEIKREYSPNVPNPNYWVPVPGLATNPRLGIRRNKMTLPPDDVREILKPVIDEVVQLVRKQIQSTEREVKAVLLVGGFGGSQYLLERLKETVTKATVILQ\nAt Epoch: 4000.00\nHIAVDIGGSLAKLVYFSRDPTSKELGGRLNFLKFETARIDECIDFLRKLKLKYEIINGSRPSDLCVMATGGGAFKYYDEIKGALEVEVVREDEMECLIIGLDFFITEIPHEVFTYSQEEPMRFIAARPNIYPYLLVNIGSGVSMVKVSGPRQYERVGGTSLGGGTLWGLLSLLTGARTFEDMLSLAERGDNTAVDMLVGDIYGSGYGKIGLKSTTIASSFGKVYKMKRQAEQEAEDTGNLKEDSSQEHGRSFKSEDISKSLLYAVSNNIGQIAYLHAEKHNLEHIYFGGSFIGGHPQTMHTLSYAIKFWSKGEKQAYFLRHEGYLGSVGAFLK\nAt Epoch: 5000.00\nQPLGSFLFLGPTGVGKTELAKALAYELFDDEKHMVRIDMSEFMEQHSVARLIGAPPGYVGYDEGGQLSEAVRRKPYSVVLFDEVEKAHPQVWNVLLQVLDDGRLTDGKGKTVDFSNVVIIMTSNLGSQYLLAEAQLETISQHVKDSVMGEVRKHFRPEFLNRLDDM\nAt Epoch: 6000.00\nVNGVSFSVEAGETLAIVGESGCGKSVTSLSIMGLIASPGTITGGEITFQGRDLVKLSRKELRKLRGNEMSMIFQEPMTSLNPVFTIGNQLAEVFRVHQGTSKAEAKQKSIDMLQRVGIANASKLVRQFPHQLSGGMRQRVMIAMALACEPKLLIADEPTT\nAt Epoch: 7000.00\nYQHEGLDWLAKLYANQTNGILADEMGLGKTIQTIALLAHLAEEHHIWGPHLIVVPTSVILNWEMEFKKFLPGFKVLSYYGSVEERAQKRKGWSNPDIWNVVITSYQLILKDLPAIRVPEWHYMILDEAHNIKNFNSQRYQAMIRLKTHARLLLTGTPLQNSIIELWSLLTFLTAGQDGQGMGDLEEFTEWFRRPVDEIFVDGKSKLGNEAQDIVNKLHHSLRPYLLRRLKSSVEKQLPGKYEHTVICRLSKRQRQLYDAFMGLSDTKAKLTSGNMISVSQALMSLRKVCNHPDLF\nAt Epoch: 8000.00\nKQKNFRQFCFPKKYEFQIPQKFLAEFINPKTPYTGILVYHRIGAGKTCTAINIAENFKNKKRIMVVLPASLKGNFRSELRSLCADNNYLSANDRQKLKELEPSSQEYREIIQKSDKLIEKYYTIYSYNKFVDLIKNNLLNLTNTLLIIDEVHNMISETGTYYESLYKIIHSSPDDLRLVIMTATPIF\nAt Epoch: 9000.00\nFVIGIGKNGVDCVLRCMHLTEKRFGKDPKKVRFLCIGEETPLGERSYEGSAPGDGFTLPIDPEEAIYKYLNNPAKLPESAQIWFDSGLKNYSPAAPTYGLTKRQCGRIALFHYLKQIMKLTGEAMADFSGSDRSLEIVITGNLGDVFCGGMFIDLPYILAKLFSDAPYPVKFTGYFFAADTASLVETDQRDVGCYQANTIVAKAELDKFQLHRKRFTQKYSRTFEVDSDKPPYSACFLIPAADSYGLTMSRTAEKILNRMEIIFSKDDDAERIISYNMLRPEAAHDFRYLAFNVMACEIPTGKIMSYLAIKLFERLNR\nAt Epoch: 10000.00\nIIKVIGVGGGGSNAVTHMYKQGIVGVDFAICNTDAQAMEMSPVPTRIHLGPDLTEGRGAGSKPNIGKLACEESIDEVRKYLENNCRMLFITAGMGGGTGTGAAPIIAKAAKEMDVLTVGIVTLPFTFEGRRRTNQGMEGLLELKKHVDTLIVISNDKLRQIHG\nAt Epoch: 11000.00\nCQAGQTGNVFWELYCLEHGIQPNGPMPSDKTIGGDDSFNTFFSVMAVDKHVPVFVDPAPMIIDGVCTGTYCQHSHPEQSNTGKEDAADNDQRHYTLDKRIINLILKPVHKSSGFLVFHSFGGGIGSRFTSLLIEWKSKLEFSINVPPPRFPQLYLSPKNIFTTQTILEHPDFAFMLNYEAS\nAt Epoch: 12000.00\nLVIGMGSTGTEILEALADRIDWEVGGLQRAPWLEFLAVETDVAKPNRFNGTDDFKTLGIPATAWRDILHRPEIHEASIALNTWADAETLAQLPAQSIDSGAGHIRMVGRLALLYPPNYSEIKNAITQRVARLRNLTDAQAKAALNVNNAGLEMDVQFAVNASTGQTGVRVIVVGTLCGGTCSGTASDIGILLRTVLEDEEKTLAMFTLPHPNLSISQKSDAEIWKTNAYHALAELNQYHLHTDTERYKTIKFPDKPEGSPVLPHDAMPYDLVYLLRPNSTENVDLMRLTQAIADRMFLNVFVPETDPMAYMVNAGPVTVQQGRAFAFSTFGLSTIEYPMRRILEALKYRTLVHAVD\nAt Epoch: 13000.00\nEVISIHVGQCGVQVGNAVWELYCAEHAVKTDGSLYEHPHDQEWVETFFNLSEKGRYVPRCLFIDLEPSVIDEIRVGPWRSLFHPDKLITGYEDAANNFARGYFTVGKVLLSPILNEVRRTIEQCDGLQGLLFFRSLGGGTGAGLTAAILDVLGDYRKYTKVEIPIYPAPSLSPAVVEPYNCIFGEHFAMEDFNMGLLMDNEALYDVCS\nAt Epoch: 14000.00\nGVAIMSTGYGEGENRVKHAIDEALHSPLLNNDDIFNSKKVLLSITFCAKDQDQLTMEEMNEINDFMTKFGEDVETKWGVATDDTLEKKVKITVLATGFG\nAt Epoch: 15000.00\nPRIHFPLATYSPLFSADKAHHEQNSVMEMTFACFENGNQMVKCDPKEGKYMACCLLYRGDVAPKETSGAVAAIKTKRTIQFVDWCPTGFKLGVCNEPAACVPGGDLAKVTRSLCMLSNTTSIASAWNRLDH\nAt Epoch: 16000.00\nGMAMMGSGFAQGIDRARLATEQAISSPFLDDVTLDGARGILVNITTAPGCLKMSEYREIMKAVNANAHPDAECKVGTAEDDSMSEDAIRVTIIATGLK\nAt Epoch: 17000.00\nGVAHMGIGVGKGENAAQDAVRAAIESPLLETSIEGAENVLLNITGGSEFSLVDMGEVSSIVRDLVSEEANIIVGTAMDDNLKDEIKVTLIATGLD\n(18000, 128)\n../../out/201027/embedding/seq2seq/pfamA_motors_balanced_mini_balanced_target.npy\nloaded dict file for weights ../../data/201025/mini_seq2seq_encoder_balanced_target.pt\noutput embedding for pfamA_target\nAt Epoch: 0.00\nPDSAPIVIDNGASTFRIGWAGEAEPRVSFRNIVQRPRHRSSGETVTVVGDTDPALMKYFDCTRTSIRSAFDDDVVYQFEYMEYILDYAFDRLGATSEVGHPILMTECECNPSFSRARMSELLFETYGVPSVAFGIDDVFSYKYNQKLGNCGEDGLAISCEFGTCHVVPFLKGQPVLGACCRTNVGGSHITDFLRQLLSLKYPYHVANFSWEKAEELKKEHCYIAADYMSELQIFKNNKEEAEEKTRYWQLPWVPPPRDEPPSEEELARKAAYKEKAGQRLRDMAAAKKSQKIADLEEKLSGLEDLMDHLDGADEQEATSILGRSGYLSQQEIKSDILKATQSLRKAKGESNGNEENADASGADKYPLVSVPDDMLTPEQLKEKKKQILLKTTTEGKLRAKQKRAEE\nAt Epoch: 1000.00\nFRPVIIDNGSGRIKAGFASDERPRFICPNVVGEVKHKKFFSFSESQQCYVGNDALAHRAILKLSYPIKHGVISDWNGMEKVWSSVITGLGVSLKHHPVLLTEAPLNPKAKREEVCERFFEGFDCPAFYIGIQAVMSLYSTGKITGVVVESGQGVSCSVPIYQGYAIWHAIKRLNLAGHELTEYLSKLLRERGYCFKSSAEYEIVRDMKEKHCFVALDYEEALNKAAMSDELHVSYEMPDGQIVLIGSERFRCLEALFRPSLLGLEDVGIHWMVYNSIMKSDLDIRKDLYANIVLSGGTTMHEGFQERLQAEVVALAPRTVKVRVIAKPEVWTFGSVL\nAt Epoch: 2000.00\nMKEKHCYVALDFEQESNHNIKHSYELPDGQIIEIGAEIFRAPEVLFQPMMIGLEQSGIHEMAFNSIFKSDLEIRRDLYGNVVLSGGTSMLPGIADRLQKELMHLIPPNMMAMVVAPSERKNSTWTGGSMLASLSTFQERWIPKEAYDETGPGIVHRYCF\nAt Epoch: 3000.00\nLGIQRRSVLEHGLVADWDVMEEYWCHLFNRRVCVEPQDVGVLLTEPAVTPYEQRERTAEILFESFGVPKLFIGSQALFLLHSVGDRCDTAVVVESGAGVTQVVPIVAGYAVAAAARRFPVAGLDVTQYVLNNLREHEQGIEMEQALEVAEQVKVRYGCMAKDFARECAEAESKLPSYAIRNTELHTRAGAPYSIDVDYEQLLVPETLFQPDSLAAPSTVTASLFGGLPAVIDAVVWSCPMDCRRSLYANVIVSGGNTRLPYFAKRLHGALRHALDERATGVIAASGGPLGRQVEYEVNVRDYSQAMHAVWRGASAFAASPEYETSAVTRAAYMECGAAVMHQHH\nAt Epoch: 4000.00\nARLAPLVIDNGTGYSSQETQILRSSFRTAIATRGTSGGGSASGPSITGRPSIPSKPGALSASSNIATKRGIDDLDFFIGDEAIANSKTYNVSYPIRHGQIEDWDLMERYWQQTIFKYLRAEPEDHHVLLTEPPMNAPENREQTAEIMFEGLNIQGLYIAVQAVLALAASWSSNKVTDRTLTGTVIDSGDGVTHVIPVAEGYVIGSSIKHIPIAGRDITYFVQQLLRDRNESLNIPVDESLRIAEKIKEDYGYVCGDMVKEFRKYDSEPEKYIIKHEGFDTRTNKAYTIDVGYERFLAAEVFFNPEIYSSDFLTPLPEVVDNVIQTSPIDVRRGLYKNIVLSGGSTMYDHFGRRLQRDLKTIVDDRLYASEVASGGLIKSSGMDVNVITHKRQRYAVWFGGSLMASTPEFYSHCHSKADYMEYGPSICRRYQ\nAt Epoch: 5000.00\nSQDRKVVVCDNGTGFVKCGCAGPNFPEHIFPALVRRTVIRSTTKDLMVGDEASELRLMLEVNYPVGNGIVRNWNDMKHLWDYTFGAEKLIPKFVYVAIQTVL\nAt Epoch: 6000.00\nMMKLVQNKAAYVALNIQQELELAKKIPSPVNEEYELPCGHFMNFRSQKFRNPEALFQPSSARFVKDRENVGVRKMIFNSIMKCDIGIRNYLFKNIMLTVGSTLFPGFVEGITKEILELGSSTLAFKFSDLIAREINHKFANKMFRNVAPPNRMYNAGVGGPALALLNTFEQASPFKTQFY\nAt Epoch: 7000.00\nMTEQHNIHLNTSYQLPDGHVIRIGSERFRCPEALFQPLLLRCLWSHVEMFVVSIMKCDLDMRRKLYENIILSGGSTMFPGMGQRMTKELRVLVVWSRPVPLYSSWL\nAt Epoch: 8000.00\nVGLDIGTTKICAIVGRKNEFGKLEVLGMGKAESEGVVKGIVFNIDKTVYAIEKAIKDAGDQAGIDIGVVNVGIAGQHIRSFIQHGGITRTSKEDEITIADVERLTQDMYRMVVPPGSQIIHVMPQDYMVDYEEGIKEPVGMSGVRLEADFHIITAQTNAINNINKCVRRTGLEIDDLILEPLASSLAV\nAt Epoch: 9000.00\nALIDVGAGTSDICVTRDGSIIAYGMIPMAGDELTEVLVHEFLVDFATAEQIKRASTEGGNITYEDIMGISHTIKSEDVYKLTDPVMKKISGEVASKIKELNGDKSVSAAFVVGGGGKIHGFTEALSKDLDIVSERVALRGEEVMKNIVFEQNDIQKDSLLVTPIG\nAt Epoch: 10000.00\nTVIDLGYNSSRTIIFKDGIPKLFYSFPYGIKYILKDISNVLKVSEKEAHRLLTEEGACLRDTRTIKKVEFQPITGTGYSYTSLGLLNKIIYARVREIISRLNGELSRISYEKTYEIGALQGGIVLTGGGSKIRNIDQTIRELMGENYRKSSLVSLDYFRDVPEEIKKDSTYLSVFG\nAt Epoch: 11000.00\nACIDMGGGVTGVSLFLKKHMLFADAVRMGGDLVTRDISQGLRVPLPVAEWLKTRHGGLEATGRDDREMIDVTGEPGEDWDGERRFVSRADLIGIMRPRVEEILDGVREILEAAGFDQMPSRQVVLTGGASQIPGLDTLAMRILGYNVRIGRPLRIQGLAQQHTASCHAATVGLA\nAt Epoch: 12000.00\nEAKRVAAQFAFSSDDVRRATKEFINQMEEGLQKDHTDLSQIPTYVTAVPNGTEKGLYMAVDLGGTNFRVCSIMLHGNSTFTLTQTKVAIPRELMVAKTSKELFSFLAKQIELFLKAHHNEHYQGHIRRRKTTSLEEGYRDEEIFNLGFTFSFPVHQIGINKGVLMRWTKGFDIPDAVGKDVCALLQAEIDELHLPVRVAALVNDTVGTLMARSY\nAt Epoch: 13000.00\nKVENMLSGIHLSEEVVSRVKSVFLSEIELGINEEPSSLQMENTYVPELPDGTEEGLFLALDVGGTNFRVLLLELMEGRLVREEVKHYHITDELRLGPGIDLFDFLATCIADFVKEFNIADQTLPLGFTFSFPMHQRSMDCGCLVTWTKSFKCAGVQGEDVVEMLREAIRRRGDIKVDVVAVLNDTTGTLMQGAL\nAt Epoch: 14000.00\nAGLLKKFEAPLADVPGIARAFEVIYHSLALTASNQFLPTPIRALPTGEEKGRFLALDLGGTNLRVAVVRLYGGDGLKVCTQRSWSIPEHFKSGAAEVLFRWVADRIGDVVGEYLGDVGSEERERILSEGMELGITFSFPMEQTTHDSALLMPMGKGFTFTTTNDLSSLLKMAYDDLLSTTTPAHPLPKLDIVSITNDSISTLLSAAY\nAt Epoch: 15000.00\nVMGLLLGAGCNATVPMLIDDLHESKVRHIRLADPKAVETLVTTEWTLRAASEPLSNLNLITSWDSQLNASGDRPGFQPLEYMIAGRYLGELVRIIVHDYFHRILAISKEDLPDKLMKPYALTTEFLSLVVAPSQSGEELLADLERELPSPPLSGWKWTPSLADIVRATTTKIQRRAASLIAAASVGLLACTREIKLADLKEGKSVAETPVVCPSAVPTADPIALPHRSPGSNSPPKVKGQNNPEELVIAFSGGLIQHWPGFRESIQWHIDRLVLRGGPQELGKSIFLREVSDGGLVGVGVLAGT\nAt Epoch: 16000.00\nEIGLIVGTGTNACYMEELKNVELLDGDEGQMCVNMEWGAFGDNNCLEDITTSFDHDVDTFSINPGKQRYEKMISGMYLGEIVRQILIVLTRRGILFGGKISERLLTRDLFPTRFLSLIESDTLGLVQVRSILTELGLRSTCDDTMLVKEVCTTVSRRAAQLCAAGVAAVVEKMRANRGLDQLKVTVGVDGTLYKLHPHFAGVVQETVKILAPKCDVTFLQSDDGSGRGAALITAV\nAt Epoch: 17000.00\nRLGVIVGTGTNACYMEKLENCELWDGDDQEPRQVIVNTEWGAFGDNGVIDFVRSHYDWEVDEESLNPSHQKFEKMISGMYMGELARRVILRLAREHLIFNGRLSQKMKTAYAFKTKYISEIESDPKGCFDETRKVLAKLDQVGSDDDCQCLKLVVSRVSSRAAHLVSAAIATVLNKMKRPHTTVGVDGSVYRYHPKFHQLMEAKIAELTNPDYKFDLMLSEDGSGRGAALVAAV\nAt Epoch: 18000.00\nQVDIGIDLGTANTLVYLRGHGIVMDEPSVVAVTRGSHTVLNDGAAVGLEAKKMLGKTSYSVDVIRPLREGVIANFPITEAMLRYFISRVKARRMFSQTRVVIAIPFGITHAEMKAVYNSTMRAGADKVHLIEETLAAGLGSGLRIDDPTANLVVDIGGGTTGISVISVADIAFGATVRCAGDHMTDAVSDFIRERYKLQIGQQTAEQLKIELGSALPQNEHAAMQIRGQGENGRPATIEVSADDVREALRAPLHKILRGIDWVLENTPPELSADLVDRGILVTGGGALLPRIDDLISDHTGLNVTVADDPLTCVARGAGAYLDTINWQRS\nAt Epoch: 19000.00\nTKDMGIDLGTANTLVYSKGKGIVLREPSVVAINNLTKKPLAVGTEAKQMIGRTPGNIVAIRPLKDGVIADFDITQTMLKKFIEKITNKSAFTSPRIIVCFPSGVTEVERRAIEEATKQAGAREVVLMEEPMAAAIGAGLPVDEPTGSMIVDIGGGTTEVAIISLGGIVTSKSLRIAGDELDQAIIGYIKREYNLMIGERTSEQIKMEIGSAFKADEFEEEASMEIKGRDLISGLPKTVVVTESQIREALKEPVAAIIEAIKTTLEKTPPELAADIMDKGIMLAGGGALLKGLDALINHETHMPVHIAESPLDCVALGAGKALDKFDLIRQ\nAt Epoch: 20000.00\nKSDIGIDLGTASVLVYIKGKGVVIQEPSVVAIDRDTNKLLAVGEDARRMLGRTPGNIIAIRPLKDGVISDYEVTQRMLKYFIEKAIGKNNLFLRPRIVVCVPSGITEVEKRAVIQASNQAGARKTYLIEEPIAAAIGADLDITEPRGKMIIDIGGGTTDVAVISLGGIVVNSSIKVGGNTFDTYITRYIRKKHNLMIGERSAEELKVVIGTAYKREKEVSMDIRGRYLLTGLPEIVQVTSSELLEALSEPLEAIVDAVKSVLEKTPPELASDIGEKGIMMTGGSSLLHGIDKLIKERTGIKVNIAEDPVSCVATGTGRSLESIDVLEN\nAt Epoch: 21000.00\nGTDIGIDLGTASVLVYIKGKGVVLKEPSVVATDNTKRKVLAVGEEARQMIGRTPGNIIATRPLRDGVISDYDVTERMLRHFIKKARGNSVSLLRPRVIICIPCEATEVEKRAVKDAALSAGAGKVYLIEEPVAAAIGAGLDISKASGSMIVDIGGGTTDVAVLSLGGMVVRSSIKIAGDKFDEAIIRYIRKKHNIMIGERTAEELKINIGTAYPRSEEVTMDIRGRDLVTGLPKNITVSSEEMREALEETTSAIVDCVHSVLEHTPPELSADIINKGIIMTGGGSLLYGLDLLIQSRTHVTTTVAKDSICCVAYGTGEALENLDKFAE\nAt Epoch: 22000.00\nKIKVIGVGGGGGNAVNRMVAMEVKNVEFIAINTDEHVLRLSKASQKIQIGEKLTKGKGAGSMPAIGQSAAEESKDEISGVLKDTDMVFVTAGMGGGTGTGAAPVVAKIAKDMGILTVGVVTKPFAFEGKRRMTQAEQGIAELSACVDSLIIVPNERLKYVSD\nAt Epoch: 23000.00\nEIISISVGQCGNQIGQQFWRTISQEHGLSMDGHSTNTASPLEKENLGVYFSESSDRYVPRAVLVDLESGVLDSVKSSSQGQLFRPDNFINAASGAGNNWAKGFYTDGTELIDEIIDTIRKESESCDSLQGFQLTHSLGGGTGSGLGTLLVSKIKEEFPDRMLATFSVFPSAKVSDTVVEPYNATLSIHQLIENADQVFTIDNEALFDICT\nAt Epoch: 24000.00\nMIAEDHGIGPDGIYSGSSELQRGRMEVFFRETEENKHFPRAVVVDLESDSLNAVLQSTHRALFQGDNFVSGRGGTGNIWAKGFYGEGRRYIDEVLEVIRKEADICEGLQGFNVAHSLCGGTGSGFGALIIEKIHEQYPNRLISTFSTVSSNRLLGVMKQPYNTILSLQHLAENANITYCIDSDGLHDISR\nAt Epoch: 25000.00\nLIKVVGVGGGGGNAVNRMIQSGLRGVEFIAINTDAQALLMSDADVRLDIGRQLTRGLGAGSDPEVGRQAAEEHREEIEEALKGADMVFITAGEGGGTGTGGAPVVAEIARGLGALTIGVVTRPFGFEGRRRAQQAEDGISRLREYVDTLIVIPNDRLLTIAN\nAt Epoch: 26000.00\nSIVTVQLGQCGNQIGFEVFDALFRDSRCSQGLCSKSENEAYQASCSERFFREEENGVPVARAVLVDMEPKAINQTLSKAAPSGGWKYGQHACFCQKQGSGNNWAYGYSVHGPKHEESIMNLIQKEVEKCDSLSGFFIIMSMAGGTGSGLGAFVTQKLQDQYSNALLIHENDAVHKIC\nAt Epoch: 27000.00\nLRACFWEIISDEHGIDPSGVYRGTADIQLERISVYYNEATGGRYVPRSVLVDLEPGTMDAANNSLKGGSSPSHGRAATPSLSPESLKARVKSERRRKQLGQRHYTEGAELVDSVLDGIRKECESCDCLQVSAAP\nAt Epoch: 28000.00\nLIIGLGGTGGRIIRALRKIIYQEFRTIHPPDVNIAYLYIDSDDEMMALDDPRWKILGHSVQLGIDSQLLIQGADLEERLNNIHNYPGIKEWIGNRGDWKDILRSFAGGRVYGGQKRRLGRFLLACNIDAFINQLTLQVNHLQRISNQAEVTFHICCGLAGGTGSGSVIDTIVQLRKHYPYSNQGLTYPLLVYAYLPEKNPNPKWDTGNYQANGYAALMELNALSAGRFDPTDLMGGKPVACGVAFNGLYLFTNQNEKNVIVGVENEIPQIVADFLYQKIIAVSKVAWTSLAFLEDAQNGDSTPETAAIPNSRLPERAKRLLTFGINRIAIPEEEIKEYLTYHFARQAAL\nAt Epoch: 29000.00\nNSEISGHELAADLVTNALANPLYTDDRNHAERCISFLHAGTDLTLGEVETVREEITSQIDSGVGLELFTADTTKMMGNKRRLTL\nAt Epoch: 30000.00\nGTALMGIGTGSGKTSAEDAAVAAISSPLLDAPVDEATGVVFNIIGGESLSLQEVDRAAKVIYNNVHEDANVIFGALVDDEITDGTVSITVLATGFY\nAt Epoch: 31000.00\nGTALMGIGSASGENRTAEATKKAISSPLLEVSIDGAEQILLNVTGGPDLSLFEAQDASEIIASASSDDVNIIFGTSINESLGDEVVVTVIATGID\nAt Epoch: 32000.00\nPRIHFPLATLAPIISAAKAQHEQNSVAEMTFSCFETGNQMVKCEPREGKYMACCLLFRGDVIPKDANGAVATIKTKRTIQFVDWCPTGFKLGICNEPPAAVPGADLAPVSRSLCMLSNTTAISSAWSRLNK\nAt Epoch: 33000.00\nPRIHFPLVSFAPVLSKSKSSHESSNVQEITNACFEPSNQLVKCDPKAGKYMATCLLYRGDVVNRDVQNAVSMLKNKKTIQLVDWCPTGFKIGLCYKPPHYVPDGDLAPATRSVCALSNTTAIAEAWQRIDE\nAt Epoch: 34000.00\nTASSPIVFILSPGSDPASDLMKLAESSGFGGSKFKFLAMGQGQDKVAASRGQWLMLQNCHLLVKWLKELEKALERITKPNPNFRLWITTNPIEDFPIGILQNSLKVV\nAt Epoch: 35000.00\nEPRTPMVGLLSMGSDPTTSIELLAKKHKKECKAISMGQGQEIHARRLMSNSLQNGGWVLLQNCHLSLDYLMEVMDQLVEAETVHEDFSLWVTCEVHPKFPISFLQQSIKFT\nAt Epoch: 36000.00\nRVRPPLDCERDKMLCNLSYLDEATMEIASFEPTAKGKSIAHTFTFDQVFDHSSEQESIFEMVSPLIQSALDGYNICIFAYGQTGSGKTYTMDGIPSNPGVIPRTVDLLFDSIKNYRHLGWEYEIKVTFLEIYNEVLYDLLSNEQKDMEIRMVKNSKNDIYVSNITHETVGSAGRLRELMQIAKMNRATAATVGNERSSRSHAVTKIELIGTHAKKQELSIGSINLVDLAGSESPKTSTRMNETKNINRSLSELTNVILALLQKQDHIPYRNSKLTHLLMPSLGGNSKTLMFINIAPLQDCFVESLKSLRFAATVNQC\nAt Epoch: 37000.00\nRCRPFNGRETARNAQCIVKMKGDQTILSPPSEVKGKAAKAASEGVKTFAFDKSYWSFDRNAPNYAGQDNLHEDLGKPLLDNAFQGYNNCIFAYGQTGSGKSYSMMGYGADPGIIPKICQDMFERIKVVQQDKNVGCTVEVSYLEIYNERVRDLLNPSNKGNLRVREHPSTGPYVEDLAKLVVQSFQEIENLMDEGNKARTVAATNMNETSSRSHAVFTLTLTQKRHDTDAGMTGERVAKISLVDLAGSERAQSTGATGARLKEGAEINRSLSTLGRVIAALADMSQGKKKTQVPYRDSVLTWLLKDSLGGNSMTAMIAAISPADINFEETLSTLRYADSAKRI\nAt Epoch: 38000.00\nRIRPLSTMERDSQGYGRCLRQESAKTLVWLGHPETRFTFDHIACEKISQENLFKVAGQPMVENCLSGYNSCMFAYGQTGSGKTYTMMGGIYELEGKLNEDCGLTLRIFEHLFTRIGMEEKSKRDVKLKYSCKCSFLEIYNEQITDLLEPSSTNLQLREDSKKGVYVENLTEHSVSTINDVVKLLLQGAANRKMAATYMNSESSRSHSVFTCIIESHWEKDSRTHLRFARLNLVDLAGSERQKSSGAEGDRLKEAANINKSLSTLGLVIMSLVDLAHGKHRHIPYRDSRLTFLLQDSLGGNSKTTVIANVSPSFCSANETLSTLKFAQRAKQI\nAt Epoch: 39000.00\nRVRPQNEHELQGNCRTLIKVVDDKMLIFDPKTEENPFFYHGVAQKGRDLLKKQNKELQFIFDKIFNMQSDNTDVFEGSTKELICNLLDGYNCSVFAYGATGAGKTHTMLGNNEDPGITYRTVAELFSEIEKQGEHREFNLGVTYLEIYNENVQDLLHRSGPLHLRDDGRCGVIVAGLKIIAIHSAEELLTLLAKGNRNRTQHPTDANEESSRSHAVFQVYINITNKLDGQVRQVKLSMIDLAGSERASATGCKGPRFKEGANINKSLLALGNCINNLADGAKHITYRDSKLTRLLKDSLGGNCQTVMIANIAPSSFSYEDTYNTLRYADRAKKI\nAt Epoch: 40000.00\nRVRPFTVVESGNGESQECVTIEAPDTVVLKAPRSCQSNRQSEKSLPQTAQRFSFTQVFGPDASQRKVFEGSVRGLVRDVLEGGNCLVFTYGVTNAGKTFTFLGPDHDSGLLPRSLSVIFNSIEGRLYSRSDLKPQRCRDFSRLTPDQQAAESSSKKNLLRLLKEVTHIHTHTHTHTHTHTHT\nAt Epoch: 41000.00\nIIKIMQEEDKAVSSPEHPLQTNSLCIFGEACTNRDVYMKTTHPLIQHIFNGGNATCFAYGQTGAGKTYTMIGTHQNPGLYALAARDIFRQLEVSQPRRHLFVWISFYEIYCGQLYDLLNRRKRLFAREDSKHVVQIVGLQELQVDSVELLLEVILKGSKERSTGATGVNADSSRSHAIIQIQIKDSAKRTFGRISFIDLAGSERAADARDSDRQTKMEGAEINQSLLALKECIRALDQEHTHTPFRQSKLTQVLRDSFIGDAKTCMIANISPSHVATEHTLNTLRYADRVKEL\nAt Epoch: 42000.00\nRCRPFSDEELRSNAPQVVTCNDYSREVAVSQSIAGKHIDRVFTFDKVFGPSARQKDLYEQAVTPIVNEVLEGFNCTIFAYGQTGTGKTYTMEGECKRAKSGPNGELPPEAGVIPRAVKQIFDTLEGQNAEYSVKVTFLELYNEEITDLLAPEEISKVSLEEKQKKQLPLMEDGFDKRGVKSTDSCSEEMFDTMMNRARDGRSRPIVAEKRGSRR\nAt Epoch: 43000.00\nKTFVFSKNMNSKFLRRTKSIEKIQEIVKNEEKKNNTNQPSLNLELIQQNKPVIFVEPQNKCNQNIQNLKKYDQESKNYLRMRFKNRPERIKIGQTFIIYDETLKAKGKIIKDKQDSKPLNIHESKIDGIYVEGLSEYQCTHYYDAIQLMKRGEKNRKIRQTQMNNKSSRSHTILQFSIESTNNNNKNIMKRSKVNLCDLAGSEKINKNEIIQNDHFNELKNINQSLSTLGKIIYNLSCNQKLPMPFRESKLTRILQDSLTGNCKTIVIGNISPSLINIEETISTLKFVDRAKNI\nAt Epoch: 44000.00\nRFRPQNRREIESGGEPIVTFDSDDTCKLESQEATGSFTFDRVFDMASKQSDIFDFSIRPTVDDILNGYNGTVFAYGQTGAGKSYTMMGTDMEDEQGRGVIPRIVEQIFASIVASPSNIEYTVRVSYMEIYMERIRDLLVPQNDNLPIHEEKNRGVYVKGLLEIYVSSVQEVYEVMRKGGNSRAVAATNMNQESSRSHSIFVITITQKNVETGSAKSGQLFLVDLAGSEKVGKTGASGQTLEEAKKINKSLSALGMVINSLTDGKSSHIPYRDSKLTRILQESLGGNSRTTLIINASPSSYNDAETLSTMRFGMRAKAI\nAt Epoch: 45000.00\nRVRPPSKRETAEGSRIILNVDEKVARIKNIRLDHKPDGCEDTRERLIEFGFDSCYWSVDPEDPKYASQEMVFQDLGTLVLSEAISGYNVCLFAYGQTGSGKTYTMMGTPASIGLTPRICEGLFSYDEGSPETPNSFRVEVSFLEIYNERVRDLLHKSEEKKPYTLRVREHPERGPYVQGLSQHVVTSYEQVVALLEEGMENRITAATHIHDASSRSHAIFTIQYTQAMLEDNLPTEITSKINLVDLAGSERASPEYCKDRLTEGSNINRSLVTLGIVISTLAQNSQMTSSCQSINSIASDGDSGSPSGGSTNGSKRQPYVPYRDSILTWLLKDSLGGNSKTIMIATVSPASSSYNETMSTLRYASHAKNI\nAt Epoch: 46000.00\nRVRPTSGHSAWNSPQGSNSIQLDPAHARNPNLMSSNPSSLSTAPPTTYHFDSILTGIPNKPIYTTVARSHVHAAMEGFNAVIFAYGQTASGKTYTLSGDENEPGIIPRAMRDVFGFIKRTPDREYLLRCSYLEIYNETIYDLLAPPMGGSGSQVQIQGGTGMEVILTPLREEVVTSLKGVNEVLRRGERHRRTACTDWNERSSRSHSVFRLVIESRERGSGPGPLDDADMRAPSRSGRATPGNGRATPGPGNAGSRLQARGGKSVQTSILSLIDLAGSEKATSDKDRTREGKYINTSLLTLGSVIGTLSENAAKNKSDYVPYRNSKLTRMLQPSLAGNARISVICTINPDPSAVGETSSTLGFAKRVKGV\nAt Epoch: 47000.00\nVKVAVHVRPLIGDERLQGCKECVSVTPGKPQVQIGTHSFTFDHVYGSGGAPSTAMFEECIAPLVEGLFQGYNGTVLAYGQTGSGKTYTMGTGSKDGSQTGLIPQVMNALFSKIETLKNQTEFQLHVSFIEILKEEVRDLLDSVSLNKVENGNGHAGRVTVSGRQPIQIRETSNGAITLAGSTEIFVRTLQEMSTCLEQGSLSRATGSTNMNNQSSRSHAIFTITLEQMRKIHSVFPGNDTPDEDMGEEYFCAKLHLVDLAGSERAKRTGSDGVRLKEGIHINKGLLALGNVISALGDEKKRKEGVHVPYRDSKLTRLLQDSLGGNSKTVMIACISPADINAEETLNTLKYANRARNI\nAt Epoch: 48000.00\nRLRPLNEKEISRNDALDWECINDTTIIFKNHLPIPERSMYPSAYTFDRVFRSDSTTREVYEAGAKEVALSVVSGINSSIFAYGQTSSGKTFTMSGITEYTMADIYDHIERHKEREFLLKFSAMEIYNESVRDLLSSDTAPLRLLDDPERGTIVEKLTEETLRDWNHLIELLSLCEAQRQIGETALNETSSRSHQILRLTVESSAREFLGNDNSSVLTSTVNFVDLAGSERASQSLSAGTRLKEGCHINRSLLTLGTVIRKLSKGRSGHIPYRDSKLTRILQSSLGGNAKTAIICTMSPARSHVEQSRNTLLFASCAKEV\nAt Epoch: 49000.00\nPSPRPSISQTPIRTKLQLVDLAGSESVGMSGVSGAALWENSCINRSLSALSDVLGALAEQRPHVPYRNSKLTHLLQDSIGGDAKLLVMLCVSPTQRFLTESLQSLGFGSRARQI\nAt Epoch: 50000.00\nKEYTFDGVFDQESNQKEVYEDVGKPVLKDVLQGYNGSILAYGQTGAGKTHSLLNSGMGVDGKPDPKQAGLLPRLVAALFVHVGADVKHVYTVEASMLQIYNEQVDCLLGDDREKAQGLQVTGKSEVKGLVWHKCKTPNELLQCFQKGRMNLVYAETKMNKSSSRSHAVFQIKVSKRPRALDKTGTKGGKVEMKATFGKLTVVDLAGSERIKKSGVTGTQLKEATNINSSLLSFGNIVQALAEKKKFIPYRDSKLTRILEDSVGGNCKTSLLVCCSPSAESSDETVSTLEFASRAARI\nAt Epoch: 51000.00\nVEDMATLAQLHEGSIMHNLHIRYKKDNIYTYIGSILVSVNPYKSISGLYDITSMEQYSSYHLGERPPHIFAIANECYHCLWKRNDNQCVLISGESGAGKTESTKLILKYLSAMSQHSLDVTAKENVSYVEQAILESSPIMEVFGNAKTIHNNNSSRFGKFIKLNFCQKGNIQGGRIIDYLLEKNRVVRQNPGERNYHIFYALLAGTDEAQKEMFYLSEKENYYYLRQFGCIVDNAIDDQRTFQEVMTAMRVMKFSSEEILEVLKLLAGVLHLGNIEFVIAGGAQVSSKNALGRAAELLGLDSMKFTEVLTHRSMILRGEEISTPLTVEQGIDSRDSMAMALYSQCFSWIIKKINNRIKGKEDYRSVGVLDIFGFENFEVNRFEQFNINYANEKLQEYFNKHIFSLEQLEYNRDGLIWEDIDWMDNGECLDLIEKKMGILALINEESHFPKGTDDTLLAKLHSHHSKNPFYVKPRVLDHYFGVKHYAGEVLYHVKGILEKNRDTFRCDVLNLLCESRLDFIYDLFEHASSKINEDTFKSGTKHQKPTVSSQFKNSLHSLMATLSTSNPFFVRCIKPNDQKMPDQFDQTIVLNQLRYSGMLETVKIRRAGFPIRRQFEDFCARYKILMRNLSLPDDLKAKCAALLYCYDNTNTDWQLGRTKVFIR\nAt Epoch: 52000.00\nMDTKLPSKLFIGVLDIAGFEIFQLNSFEQLCINFTNEKLQQFFNHHMFVLEQEEYKMQGLEWTFVDFGLDLQGCIDLIEKPLGILSILEEECMFPKTTDITFNAKLLNNHLGKSPNFAKSKPDKKRKYESHFEILHYAGVVPYNLNGWLDKNKDPLNETAVELFQQSSNELVAMLYQDYVRAY\nAt Epoch: 53000.00\nVSDLTLISKISNEAINDNLKIRFQNGEIYTYIGHVLVSVNPFRDLGIYTDAVLHSYQGKNRLEAPPHVFAIAEASYYNMKAYKENQCVIISGESGAGKTEAAKRIMQYIASVSGGSNSSIQEIKDMVLATNPLLESFGNAKTLRNNNSSRFGKYLEIQFNDQGEPVGANINNYLLEKSRVVGQVKEERDFHIFYQFTKAASETYRSTYGVQQPNTYAYLSKSKCYDVNGIDDKADFKDTLNAMKVIGMSQQEIDEVFRMLAAVLWIGNVSFRENDEGNAEIVDQSVVDFVAYLLEVDSSHVNKAMSTRTIETARGGRRGSTYDVPMNIAQASSVRDALSMAIYTNMFDWIVQRINASLKARSAISHSIGILDIYGFEIFEKNSFEQLCINYVNEKLQQIFIQLTLKTEQEEYAREQIQWTPIKYFDNKVVCELIEEKRPPGVFAALNDACATAHADPAAADGTFVQRLNALSSNPNFAPRQGQFVIKHYAGDVNYEVAGMTDKNKDQLLKDLLNLVGESGNAFVQTIFPDRIDQDNKRRPPTAGDKIKASANDLVATLMKCTPSYIRTIKPNENKSPTEYNDGNVMHQIKYLGLQENVRIRRAGFAYRQTFEKFVERFYLLSPKCSYAGEYTWTGDAKSGVKQILKDTSIPAEEWQMGMSKAFIK\nAt Epoch: 54000.00\nVEDVCQLPHLNESSVLHVLRQRFANNLIHTRAGPVLLIVNPMAPLALYSEKVASMFRGCKAEDDMPPHIFAQAQTAYRAMLETRRDQSLIFLGRSGAGKTTSFKHALYYLTLASRQELQPSVRALTVEKVSAIGTIMDAFGHERTSLNGNATKFTQIFALDFDHSGQIVSGSIQIMPIDRMRPSGGSNRGRSGVPRWSFLAGVDGGALRKELLLEPAAGESSPGGSATVEQESIDYQRLCQAFRVLNIDQAAVRGIWYVLAAIHHLSQSGAVIVAGRVQFVNPRSAQKAAMLLGIPMEDLLSYVFPENGSGGATKATLNTAVVVECLTAFTEALYTELFYTIVGLINKSIAAVTPHQTIGSVLLVDVPGFQNPASVGGGTAASTLADLRFNYLHERLQLLFHNAMLVQPRARYAQEMVTVEDSL\nAt Epoch: 55000.00\nEDKLKLERDFSRYNYLSLDSAKVNGVDDAANFRTVRNAMQIVGFMDHEAEAVLEVVAAVLKLGNIEFKPESRVNGLDESKIKDKNGSFWLDVK\nAt Epoch: 56000.00\nVDDLMQLSYLNEPSVLYNLQYRYDRDMIYTKAGPVLVAINPFKEVQLYGNVYIEAYKSKSIDSPHVYAIADTAIHEMIRDEVNQSIIISGESGAGKTETAKIAMQYLAALGGGTGMEYEILQTNPILEAFGNAKTARNDNSSRFGKLIEIHFSPNGKISGAKIQTFLLEKSRVVQCAAGERSYHIFYQLCAGASKSLRDKLNLRSVEEYKYLKQSSCFVINGVNDAERFQSVMAAMKVVHIRQQDRDNVYAMLAAILWLGNISFNVIDNENHVEVVADEAAQTVSKLLGCDIQDLKLALCTRKMRVRSDTIIQKLTLTQAIDTRDALAKSLYASLFEWLVEQINMSLEVGKRRTGRSISILDIYGFESFEKNSFEQFCINYANERLQQHFNRHLFKLEQEEYIQDGIDWARVDFEDNQNCLKLFEKKPLGLLSLLDEESTFPNGTDLTFANKLKQHLHSNSCFKGERGKAFTVSHYAGEVVYDTTGFLEKNRDLLHIDSIQLLASCSCHLPQIFASKMLTQSDAQEGSPYRSSGVDSQRLSVATKFKGQLFQLMQRLGNTTPHFIRCIKPNKLQLPSTYEQSLILQQLKCCGVLEVVRISRSGYPTRMSHQKFARRYGFLLLENVASQDPLSVSVAILHQFNILPEMYQVGYTKLFFR\nAt Epoch: 57000.00\nIDDLTSLSHLNEPAVLHNLQVRYGMHNIYTYSGIVLVALNPFARVGVYSQDTLEAYAGRMRGELEPHLFAISEDAFQGMVRDRKNQTIIVSGESGAGKTVSAKYIMRYFASAHEAQRDVEHQEQTAMSGVEEQILATNPVLEAFGNAKTTRNDNSSRFGKFLEIRFSERHAIEAAFIRTYLLERSRLVYQPPTERNYHVFYQLLASDRALDEAQREALGLQGATWETFHYTRQGGSGEIVNVDDAREFEKTSAALGVVGVDATTQQQVFALLAALLHMGNIEITGSNSAAVADDDAAFAQATGLLQVDAAQFRKWLTRRQIVTRSEKIVSNMTRAQALVVRDSVAKYVYAHVFEWIVRTINGVLTGGGAGPAASFIGVLDIYGFEHFEHNSFEQFCINYANEKLQQNFNRHVFKLEQEEYQREQLANWTFVDFQDNQPCIDLIEGRLGVLALLDEECRLQQGSDAKFAEKLARQFAEQPVRQLPADSPAAFFRKPRFGADSFTIRHYAHDVAYEAAGFLEKNRDSVPDEIQNVLRASSAPLLAEVLADTSAAAADSGTATAVTASQTPARLSVRAPRRPTLGAVFKHSLAGLMETIEATESHYIRCIKPNDAKHAWVFDAPMVLSQLRACGVLETIRISCAGYPSRLPIPDFIHRYRVLLSDPGAPLRAASLDAFREFATQTLAEAFGARDCWQVGLTKVFFR\nAt Epoch: 58000.00\nERLNDTSELISYVDNQECLNLIASRSGGVFSTIDAISRLPGPSDRKLNERLHTLFKRHPCFPTPHPKEAHEMFCIVHYAGMVKYHIESFIDKNNNIISAQFEELMAISKSSVLQAQPLLSSASANSSPPTSQKGGSVTHMFSVQMRGLASELEGTRCNFIRCIKPNAEMEVGVFDRASVVDQLRCSGTVQACSVLRVGLPTRILYAEVVDTYLPLVGQETYEKFNCNERLFTQAICAALAFPSDAYRLGDTRLFFR\nAt Epoch: 59000.00\nINDLALSPSTSDDVLVSVLRERFLSDTVYTAIGSSALVVVNPYKYVSSNADNVLLDYAAEYRDTDAHDDRHVKPPHIFQLANNAYFHMRRTNMDQCILLSGESGSGKSETRRLAIKSILELSVTSPGKKGGKMATQIPSGEYILESFGNARTLQNPNASRFGKYTELQFSERGRLCGIKTLDYYLEKSRVAGAPGGERNFHVFYYLCSGASQEERQHLKLADKSTFRYLGQRPTGGREAVTEDSQRFDRLKMAMKSVGLSKRHIAQTFQLLAAILHLGNIDFTMDKSRNEDAAVVKNVDQLEIVADFLGVQPHALEAVMQYKTKLVHKELCTIFLDPEGAGGNRDDLAKTLYSLLFALLNETMNQNLCRDDFLTFIGLFDLPGFQNISSSASRTNSLDQFCVNFANERLQNWVQKRIFERNNKEYEAEEIASLIPTIPFFDNSECIRLMSHQPGGLIHIMDDQARRQPKKNDHTMVEAFSKRWGNHSSFRAGQMDRSGFPTFTVNHYVGPVTYSAESWLERDTDALNPDFVSLLRGATLNAD\n(59149, 128)\n../../out/201027/embedding/seq2seq/pfamA_target_mini_balanced_target.npy\nloaded dict file for weights ../../data/201025/mini_seq2seq_encoder_balanced_target.pt\noutput embedding for pfamA_random\nAt Epoch: 0.00\nNVVYVGNKEVMSYVLAVTTQFNEGSDEVVIKARGRAISTAVDTAEVVRNRFLEDVEVEDIKIST\nAt Epoch: 1000.00\nFEATYLVVSYKLDGIIRASGQVDDRGYIRGTKMKLMMDGNAIVDYMMVGTKFDGGENSVDNASGLFYSPYQEADEAGTFLVTSEPGSIQPVVGVITRYALSCFPDYADISQGAKPNG\n(1600, 128)\n../../out/201027/embedding/seq2seq/pfamA_random_mini_balanced_target.npy\nloaded dict file for weights ../../data/201025/mini_seq2seq_encoder_balanced_target.pt\noutput embedding for motor_toolkit\nAt Epoch: 0.00\nMASQPNSSAKKKEEKGKNIQVVVRCRPFNLAERKASAHSIVECDPVRKEVSVRTGGLADKSSRKTYTFDMVFGASTKQIDVYRSVVCPILDEVIMGYNCTIFAYGQTGTGKTFTMEGERSPNEEYTWEEDPLAGIIPRTLHQIFEKLTDNGTEFSVKVSLLEIYNEELFDLLNPSSDVSERLQMFDDPRNKRGVIIKGLEEITVHNKDEVYQILEKGAAKRTTAATLMNAYSSRSHSVFSVTIHMKETTIDGEELVKIGKLNLVDLAGSENIGRSGAVDKRAREAGNINQSLLTLGRVITALVERTPHVPYRESKLTRILQDSLGGRTRTSIIATISPASLNLEETLSTLEYAHRAKNILNKPEVNQKLTKKALIKEYTEEIERLKRDLAAAREKNGVYISEENFRVMSGKLTVQEEQIVELIEKIGAVEEELNRVTELFMDNKNELDQCKSDLQNKTQELETTQKHLQETKLQLVKEEYITSALESTEEKLHDAASKLLNTVEETTKDVSGLHSKLDRKKAVDQHNAEAQDIFGKNLNSLFNNMEELIKDGSSKQKAMLEVHKTLFGNLLSSSVSALDTITTVALGSLTSIPENVSTHVSQIFNMILKEQSLAAESKTVLQELINVLKTDLLSSLEMILSPTVVSILKINSQLKHIFKTSLTVADKIEDQKKELDGFLSILCNNLHELQENTICSLVESQKQCGNLTEDLKTIKQTHSQELCKLMNLWTERFCALEEKCENIQKPLSSVQENIQQKSKDIVNKMTFHSQKFCADSDGFSQELRNFNQEGTKLVEESVKHSDKLNGNLEKISQETEQRCESLNTRTVYFSEQWVSSLNEREQELHNLLEVVSQCCEASSSDITEKSDGRKAAHEKQHNIFLDQMTIDEDKLIAQNLELNETIKIGLTKLNCFLEQDLKLDIPTGTTPQRKSYLYPSTLVRTEPREHLLDQLKRKQPELLMMLNCSENNKEETIPDVDVEEAVLGQYTEEPLSQEPSVDAGVDCSSIGGVPFFQHKKSHGKDKENRGINTLERSKVEETTEHLVTKSRLPLRAQINL\nAt Epoch: 1000.00\nMEDQEMHLKVRRVADKFTESMYFLANEPSVALYRLQEHVRRSLPELVQHKTDMQSWEEQSQGAIYTVEYACSAVKSMTNSSIYFKNIDSLLRQTISMKEQISNSQGRSPHVSAPSASS\nAt Epoch: 2000.00\nMVIGTPVTTPLSKIVRTPSRVPGSRRTTPSKIREEKILVTIRVRPLSPKEQAAYDLIAWDFPDEQTIVSKNLNHERHTGPYSFDYVFDPTCSTSKVYEQGARDVALSALNGINATIFAYGQTSSGKTFTMRGITESAVNDIYGRIKLTTERDFVLKFSALEIYNETVVDLLNRESVSLRLLDDPEKGVIVEKQVEEIVKDEEHLKTLIGTVEAHRQVGETALNDKSSRSHQIIRLTIESSIRENSGCVKSFLATLNLVDLAGSERASQTSADGTRLKEGSHINRSLLTVTNVIRKLSCSGGKRSGHIPYRDSKLTRILQASLGGNSRTAIICTLSPALSHLEQSRNTLCFATSAKEVTTTAQVNMVVAEKQLLKHLQKEVSRLEAELRSPDPAASPCLRSLLIEKERKIQKMEEEMNELKRQRDLAQSQLELERRSKKELKGSDHHGPSRQVVKCLSFTPEDEEVSGASLSTNLGRKSLLERQAAIRRSTNSTNPSMLVHEIRKLEMRQRQLGDEANHALQLLHKEFASHRIGSQGATETIAKLFSEIKELQKISCIPEQIEIKDKASLKEEIARLRSQESNIASLEQKLENVQRSIDELVMHLPSCHESADSRTAPSKKKRVLPFNLSNTSNIPNIIRSPCSPMSPSSCNIVEGEIENRAPPECNNVGSAGDSFCSQLSTPVKSKDDNCTPGSRQSNSVNMKKMQTMFKKAAEDNIRSIKAYVTELKERVAKLQYQKQLLVCQVLELEANEAASDEADISDQSPLSWHLVFEDQRQQIIMLWHLCHVSLVHRTQFYMLFKGDPSDQIYLEVELRRLTWLDEHLAGLGNASPALLGDDAAGYVSSSIKALKQEREYLAKRVSSKLNAEEREMLYVKWDIPPDGKQRRRLQLVNKLWSDPLNMQNVRDSAEVVAKLVGFCETGEHVSKEMFQLNFVSPSDKKTWIGWNLISNLLHL\nAt Epoch: 3000.00\nMADEEDPWGFDDGGEEEKAASTQAGTPAPPSKAPSVASDHKADSVVAGTPANEEAAPEEVEEIKAPPPPPEDDGYRKPVQLYRHWVRPKFLQYKYMYNYRTNYYDDVIDYIDKKQTGVAREIPRPQTWAERVLRTRNISGSDIDSYAPAKRDKQLIQTLAASIRTYNYHTKAYINQRYASVL\n(3255, 128)\n../../out/201027/embedding/seq2seq/motor_toolkit_mini_balanced_target.npy\n"
],
[
"i = 4\ndict_file = dict_files[i]\nout_path = out_paths[i]\nfor i in range(len(data)):\n dat = data[i]\n dat_name = data_names[i]\n seq_col = seq_cols[i]\n generate_embedding_lstm(dict_file,dat,dat_name,out_path,out_dir,seq_col)",
"loaded dict file for weights ../../data/first_try/seq2seq_encoder_df_dev_201012_230k.pt\noutput embedding for pfamA_motors_balanced\nAt Epoch: 0.00\nHQDNVHARSLMGLVRNVFEQAGLEKTALDAVAVSSGPGSYTGLRIGVSVAKGLAYALDKPVIGVGTLEALAFRAIPFSDSTDTIIPMLDARRMEVYALVMDGLGDTLISPQPFILEDNPFMEYLEKGKVFFLGDGVPKSKEILSHPNSRFVPLFNSSQSIGELAYKKFLKADFESLAYFEPNYIKEFRI\nAt Epoch: 1000.00\nLAAEARGDRAEAARILGAGAANLVGLLDIDRVVLGGRTVAADEDAYVRGVRAVIADRAARGAGGAHVTVTVADGGDRPVAEGAAQLVLA\nAt Epoch: 2000.00\nARKIGIDLGTTNLLICVDNKGILVDEPSIITVDATTKKCIAAGLDARDMLGRTPKNMICIRPLKDGVVADFEATDMMLNYFLKKCDLKGMFKKNVILICHPTKITSVEKNAIRDCAYRAGAKKVYLEEEPKIAALGAGLDIGKASGNMVLDIGGGTSDIAVLSLGDIVCSTSIKTAGNKITQDILENVRIQKKMYIGEQTADEIKRRIANALVVKEPETITISGRDVETGLPHSIDINSNEVESYIRSSLQEIVHATKTILEVTPPELAADIVQHGLVLTGGGALLKNLDQLMRNELQIPVYVAENALKCVVDGCTIMLQNL\nAt Epoch: 3000.00\nNSLPSGDQHKAQQLTADYLGALKRHLIDSLKNQLGEHHAKATPLQFILTVPAVWSDAAKEKTLQAAETAGLGQHAPILMISEPEAAATYVLFRKELGGLSTGDTFVVCDAGGGTVDLISYTIEQLEPALQVKEAAPGSGGLCGSTYLNRRFQEFLVTKLGQEEGFDNETVGDAMKKFDEEIKREYSPNVPNPNYWVPVPGLATNPRLGIRRNKMTLPPDDVREILKPVIDEVVQLVRKQIQSTEREVKAVLLVGGFGGSQYLLERLKETVTKATVILQ\nAt Epoch: 4000.00\nHIAVDIGGSLAKLVYFSRDPTSKELGGRLNFLKFETARIDECIDFLRKLKLKYEIINGSRPSDLCVMATGGGAFKYYDEIKGALEVEVVREDEMECLIIGLDFFITEIPHEVFTYSQEEPMRFIAARPNIYPYLLVNIGSGVSMVKVSGPRQYERVGGTSLGGGTLWGLLSLLTGARTFEDMLSLAERGDNTAVDMLVGDIYGSGYGKIGLKSTTIASSFGKVYKMKRQAEQEAEDTGNLKEDSSQEHGRSFKSEDISKSLLYAVSNNIGQIAYLHAEKHNLEHIYFGGSFIGGHPQTMHTLSYAIKFWSKGEKQAYFLRHEGYLGSVGAFLK\nAt Epoch: 5000.00\nQPLGSFLFLGPTGVGKTELAKALAYELFDDEKHMVRIDMSEFMEQHSVARLIGAPPGYVGYDEGGQLSEAVRRKPYSVVLFDEVEKAHPQVWNVLLQVLDDGRLTDGKGKTVDFSNVVIIMTSNLGSQYLLAEAQLETISQHVKDSVMGEVRKHFRPEFLNRLDDM\nAt Epoch: 6000.00\nVNGVSFSVEAGETLAIVGESGCGKSVTSLSIMGLIASPGTITGGEITFQGRDLVKLSRKELRKLRGNEMSMIFQEPMTSLNPVFTIGNQLAEVFRVHQGTSKAEAKQKSIDMLQRVGIANASKLVRQFPHQLSGGMRQRVMIAMALACEPKLLIADEPTT\nAt Epoch: 7000.00\nYQHEGLDWLAKLYANQTNGILADEMGLGKTIQTIALLAHLAEEHHIWGPHLIVVPTSVILNWEMEFKKFLPGFKVLSYYGSVEERAQKRKGWSNPDIWNVVITSYQLILKDLPAIRVPEWHYMILDEAHNIKNFNSQRYQAMIRLKTHARLLLTGTPLQNSIIELWSLLTFLTAGQDGQGMGDLEEFTEWFRRPVDEIFVDGKSKLGNEAQDIVNKLHHSLRPYLLRRLKSSVEKQLPGKYEHTVICRLSKRQRQLYDAFMGLSDTKAKLTSGNMISVSQALMSLRKVCNHPDLF\nAt Epoch: 8000.00\nKQKNFRQFCFPKKYEFQIPQKFLAEFINPKTPYTGILVYHRIGAGKTCTAINIAENFKNKKRIMVVLPASLKGNFRSELRSLCADNNYLSANDRQKLKELEPSSQEYREIIQKSDKLIEKYYTIYSYNKFVDLIKNNLLNLTNTLLIIDEVHNMISETGTYYESLYKIIHSSPDDLRLVIMTATPIF\nAt Epoch: 9000.00\nFVIGIGKNGVDCVLRCMHLTEKRFGKDPKKVRFLCIGEETPLGERSYEGSAPGDGFTLPIDPEEAIYKYLNNPAKLPESAQIWFDSGLKNYSPAAPTYGLTKRQCGRIALFHYLKQIMKLTGEAMADFSGSDRSLEIVITGNLGDVFCGGMFIDLPYILAKLFSDAPYPVKFTGYFFAADTASLVETDQRDVGCYQANTIVAKAELDKFQLHRKRFTQKYSRTFEVDSDKPPYSACFLIPAADSYGLTMSRTAEKILNRMEIIFSKDDDAERIISYNMLRPEAAHDFRYLAFNVMACEIPTGKIMSYLAIKLFERLNR\nAt Epoch: 10000.00\nIIKVIGVGGGGSNAVTHMYKQGIVGVDFAICNTDAQAMEMSPVPTRIHLGPDLTEGRGAGSKPNIGKLACEESIDEVRKYLENNCRMLFITAGMGGGTGTGAAPIIAKAAKEMDVLTVGIVTLPFTFEGRRRTNQGMEGLLELKKHVDTLIVISNDKLRQIHG\nAt Epoch: 11000.00\nCQAGQTGNVFWELYCLEHGIQPNGPMPSDKTIGGDDSFNTFFSVMAVDKHVPVFVDPAPMIIDGVCTGTYCQHSHPEQSNTGKEDAADNDQRHYTLDKRIINLILKPVHKSSGFLVFHSFGGGIGSRFTSLLIEWKSKLEFSINVPPPRFPQLYLSPKNIFTTQTILEHPDFAFMLNYEAS\nAt Epoch: 12000.00\nLVIGMGSTGTEILEALADRIDWEVGGLQRAPWLEFLAVETDVAKPNRFNGTDDFKTLGIPATAWRDILHRPEIHEASIALNTWADAETLAQLPAQSIDSGAGHIRMVGRLALLYPPNYSEIKNAITQRVARLRNLTDAQAKAALNVNNAGLEMDVQFAVNASTGQTGVRVIVVGTLCGGTCSGTASDIGILLRTVLEDEEKTLAMFTLPHPNLSISQKSDAEIWKTNAYHALAELNQYHLHTDTERYKTIKFPDKPEGSPVLPHDAMPYDLVYLLRPNSTENVDLMRLTQAIADRMFLNVFVPETDPMAYMVNAGPVTVQQGRAFAFSTFGLSTIEYPMRRILEALKYRTLVHAVD\nAt Epoch: 13000.00\nEVISIHVGQCGVQVGNAVWELYCAEHAVKTDGSLYEHPHDQEWVETFFNLSEKGRYVPRCLFIDLEPSVIDEIRVGPWRSLFHPDKLITGYEDAANNFARGYFTVGKVLLSPILNEVRRTIEQCDGLQGLLFFRSLGGGTGAGLTAAILDVLGDYRKYTKVEIPIYPAPSLSPAVVEPYNCIFGEHFAMEDFNMGLLMDNEALYDVCS\nAt Epoch: 14000.00\nGVAIMSTGYGEGENRVKHAIDEALHSPLLNNDDIFNSKKVLLSITFCAKDQDQLTMEEMNEINDFMTKFGEDVETKWGVATDDTLEKKVKITVLATGFG\nAt Epoch: 15000.00\nPRIHFPLATYSPLFSADKAHHEQNSVMEMTFACFENGNQMVKCDPKEGKYMACCLLYRGDVAPKETSGAVAAIKTKRTIQFVDWCPTGFKLGVCNEPAACVPGGDLAKVTRSLCMLSNTTSIASAWNRLDH\nAt Epoch: 16000.00\nGMAMMGSGFAQGIDRARLATEQAISSPFLDDVTLDGARGILVNITTAPGCLKMSEYREIMKAVNANAHPDAECKVGTAEDDSMSEDAIRVTIIATGLK\nAt Epoch: 17000.00\nGVAHMGIGVGKGENAAQDAVRAAIESPLLETSIEGAENVLLNITGGSEFSLVDMGEVSSIVRDLVSEEANIIVGTAMDDNLKDEIKVTLIATGLD\n(18000, 128)\n../../out/201027/embedding/seq2seq/pfamA_motors_balanced_raw.npy\nloaded dict file for weights ../../data/first_try/seq2seq_encoder_df_dev_201012_230k.pt\noutput embedding for pfamA_target\nAt Epoch: 0.00\nPDSAPIVIDNGASTFRIGWAGEAEPRVSFRNIVQRPRHRSSGETVTVVGDTDPALMKYFDCTRTSIRSAFDDDVVYQFEYMEYILDYAFDRLGATSEVGHPILMTECECNPSFSRARMSELLFETYGVPSVAFGIDDVFSYKYNQKLGNCGEDGLAISCEFGTCHVVPFLKGQPVLGACCRTNVGGSHITDFLRQLLSLKYPYHVANFSWEKAEELKKEHCYIAADYMSELQIFKNNKEEAEEKTRYWQLPWVPPPRDEPPSEEELARKAAYKEKAGQRLRDMAAAKKSQKIADLEEKLSGLEDLMDHLDGADEQEATSILGRSGYLSQQEIKSDILKATQSLRKAKGESNGNEENADASGADKYPLVSVPDDMLTPEQLKEKKKQILLKTTTEGKLRAKQKRAEE\nAt Epoch: 1000.00\nFRPVIIDNGSGRIKAGFASDERPRFICPNVVGEVKHKKFFSFSESQQCYVGNDALAHRAILKLSYPIKHGVISDWNGMEKVWSSVITGLGVSLKHHPVLLTEAPLNPKAKREEVCERFFEGFDCPAFYIGIQAVMSLYSTGKITGVVVESGQGVSCSVPIYQGYAIWHAIKRLNLAGHELTEYLSKLLRERGYCFKSSAEYEIVRDMKEKHCFVALDYEEALNKAAMSDELHVSYEMPDGQIVLIGSERFRCLEALFRPSLLGLEDVGIHWMVYNSIMKSDLDIRKDLYANIVLSGGTTMHEGFQERLQAEVVALAPRTVKVRVIAKPEVWTFGSVL\nAt Epoch: 2000.00\nMKEKHCYVALDFEQESNHNIKHSYELPDGQIIEIGAEIFRAPEVLFQPMMIGLEQSGIHEMAFNSIFKSDLEIRRDLYGNVVLSGGTSMLPGIADRLQKELMHLIPPNMMAMVVAPSERKNSTWTGGSMLASLSTFQERWIPKEAYDETGPGIVHRYCF\nAt Epoch: 3000.00\nLGIQRRSVLEHGLVADWDVMEEYWCHLFNRRVCVEPQDVGVLLTEPAVTPYEQRERTAEILFESFGVPKLFIGSQALFLLHSVGDRCDTAVVVESGAGVTQVVPIVAGYAVAAAARRFPVAGLDVTQYVLNNLREHEQGIEMEQALEVAEQVKVRYGCMAKDFARECAEAESKLPSYAIRNTELHTRAGAPYSIDVDYEQLLVPETLFQPDSLAAPSTVTASLFGGLPAVIDAVVWSCPMDCRRSLYANVIVSGGNTRLPYFAKRLHGALRHALDERATGVIAASGGPLGRQVEYEVNVRDYSQAMHAVWRGASAFAASPEYETSAVTRAAYMECGAAVMHQHH\nAt Epoch: 4000.00\nARLAPLVIDNGTGYSSQETQILRSSFRTAIATRGTSGGGSASGPSITGRPSIPSKPGALSASSNIATKRGIDDLDFFIGDEAIANSKTYNVSYPIRHGQIEDWDLMERYWQQTIFKYLRAEPEDHHVLLTEPPMNAPENREQTAEIMFEGLNIQGLYIAVQAVLALAASWSSNKVTDRTLTGTVIDSGDGVTHVIPVAEGYVIGSSIKHIPIAGRDITYFVQQLLRDRNESLNIPVDESLRIAEKIKEDYGYVCGDMVKEFRKYDSEPEKYIIKHEGFDTRTNKAYTIDVGYERFLAAEVFFNPEIYSSDFLTPLPEVVDNVIQTSPIDVRRGLYKNIVLSGGSTMYDHFGRRLQRDLKTIVDDRLYASEVASGGLIKSSGMDVNVITHKRQRYAVWFGGSLMASTPEFYSHCHSKADYMEYGPSICRRYQ\nAt Epoch: 5000.00\nSQDRKVVVCDNGTGFVKCGCAGPNFPEHIFPALVRRTVIRSTTKDLMVGDEASELRLMLEVNYPVGNGIVRNWNDMKHLWDYTFGAEKLIPKFVYVAIQTVL\nAt Epoch: 6000.00\nMMKLVQNKAAYVALNIQQELELAKKIPSPVNEEYELPCGHFMNFRSQKFRNPEALFQPSSARFVKDRENVGVRKMIFNSIMKCDIGIRNYLFKNIMLTVGSTLFPGFVEGITKEILELGSSTLAFKFSDLIAREINHKFANKMFRNVAPPNRMYNAGVGGPALALLNTFEQASPFKTQFY\nAt Epoch: 7000.00\nMTEQHNIHLNTSYQLPDGHVIRIGSERFRCPEALFQPLLLRCLWSHVEMFVVSIMKCDLDMRRKLYENIILSGGSTMFPGMGQRMTKELRVLVVWSRPVPLYSSWL\nAt Epoch: 8000.00\nVGLDIGTTKICAIVGRKNEFGKLEVLGMGKAESEGVVKGIVFNIDKTVYAIEKAIKDAGDQAGIDIGVVNVGIAGQHIRSFIQHGGITRTSKEDEITIADVERLTQDMYRMVVPPGSQIIHVMPQDYMVDYEEGIKEPVGMSGVRLEADFHIITAQTNAINNINKCVRRTGLEIDDLILEPLASSLAV\nAt Epoch: 9000.00\nALIDVGAGTSDICVTRDGSIIAYGMIPMAGDELTEVLVHEFLVDFATAEQIKRASTEGGNITYEDIMGISHTIKSEDVYKLTDPVMKKISGEVASKIKELNGDKSVSAAFVVGGGGKIHGFTEALSKDLDIVSERVALRGEEVMKNIVFEQNDIQKDSLLVTPIG\nAt Epoch: 10000.00\nTVIDLGYNSSRTIIFKDGIPKLFYSFPYGIKYILKDISNVLKVSEKEAHRLLTEEGACLRDTRTIKKVEFQPITGTGYSYTSLGLLNKIIYARVREIISRLNGELSRISYEKTYEIGALQGGIVLTGGGSKIRNIDQTIRELMGENYRKSSLVSLDYFRDVPEEIKKDSTYLSVFG\nAt Epoch: 11000.00\nACIDMGGGVTGVSLFLKKHMLFADAVRMGGDLVTRDISQGLRVPLPVAEWLKTRHGGLEATGRDDREMIDVTGEPGEDWDGERRFVSRADLIGIMRPRVEEILDGVREILEAAGFDQMPSRQVVLTGGASQIPGLDTLAMRILGYNVRIGRPLRIQGLAQQHTASCHAATVGLA\nAt Epoch: 12000.00\nEAKRVAAQFAFSSDDVRRATKEFINQMEEGLQKDHTDLSQIPTYVTAVPNGTEKGLYMAVDLGGTNFRVCSIMLHGNSTFTLTQTKVAIPRELMVAKTSKELFSFLAKQIELFLKAHHNEHYQGHIRRRKTTSLEEGYRDEEIFNLGFTFSFPVHQIGINKGVLMRWTKGFDIPDAVGKDVCALLQAEIDELHLPVRVAALVNDTVGTLMARSY\nAt Epoch: 13000.00\nKVENMLSGIHLSEEVVSRVKSVFLSEIELGINEEPSSLQMENTYVPELPDGTEEGLFLALDVGGTNFRVLLLELMEGRLVREEVKHYHITDELRLGPGIDLFDFLATCIADFVKEFNIADQTLPLGFTFSFPMHQRSMDCGCLVTWTKSFKCAGVQGEDVVEMLREAIRRRGDIKVDVVAVLNDTTGTLMQGAL\nAt Epoch: 14000.00\nAGLLKKFEAPLADVPGIARAFEVIYHSLALTASNQFLPTPIRALPTGEEKGRFLALDLGGTNLRVAVVRLYGGDGLKVCTQRSWSIPEHFKSGAAEVLFRWVADRIGDVVGEYLGDVGSEERERILSEGMELGITFSFPMEQTTHDSALLMPMGKGFTFTTTNDLSSLLKMAYDDLLSTTTPAHPLPKLDIVSITNDSISTLLSAAY\nAt Epoch: 15000.00\nVMGLLLGAGCNATVPMLIDDLHESKVRHIRLADPKAVETLVTTEWTLRAASEPLSNLNLITSWDSQLNASGDRPGFQPLEYMIAGRYLGELVRIIVHDYFHRILAISKEDLPDKLMKPYALTTEFLSLVVAPSQSGEELLADLERELPSPPLSGWKWTPSLADIVRATTTKIQRRAASLIAAASVGLLACTREIKLADLKEGKSVAETPVVCPSAVPTADPIALPHRSPGSNSPPKVKGQNNPEELVIAFSGGLIQHWPGFRESIQWHIDRLVLRGGPQELGKSIFLREVSDGGLVGVGVLAGT\nAt Epoch: 16000.00\nEIGLIVGTGTNACYMEELKNVELLDGDEGQMCVNMEWGAFGDNNCLEDITTSFDHDVDTFSINPGKQRYEKMISGMYLGEIVRQILIVLTRRGILFGGKISERLLTRDLFPTRFLSLIESDTLGLVQVRSILTELGLRSTCDDTMLVKEVCTTVSRRAAQLCAAGVAAVVEKMRANRGLDQLKVTVGVDGTLYKLHPHFAGVVQETVKILAPKCDVTFLQSDDGSGRGAALITAV\nAt Epoch: 17000.00\nRLGVIVGTGTNACYMEKLENCELWDGDDQEPRQVIVNTEWGAFGDNGVIDFVRSHYDWEVDEESLNPSHQKFEKMISGMYMGELARRVILRLAREHLIFNGRLSQKMKTAYAFKTKYISEIESDPKGCFDETRKVLAKLDQVGSDDDCQCLKLVVSRVSSRAAHLVSAAIATVLNKMKRPHTTVGVDGSVYRYHPKFHQLMEAKIAELTNPDYKFDLMLSEDGSGRGAALVAAV\nAt Epoch: 18000.00\nQVDIGIDLGTANTLVYLRGHGIVMDEPSVVAVTRGSHTVLNDGAAVGLEAKKMLGKTSYSVDVIRPLREGVIANFPITEAMLRYFISRVKARRMFSQTRVVIAIPFGITHAEMKAVYNSTMRAGADKVHLIEETLAAGLGSGLRIDDPTANLVVDIGGGTTGISVISVADIAFGATVRCAGDHMTDAVSDFIRERYKLQIGQQTAEQLKIELGSALPQNEHAAMQIRGQGENGRPATIEVSADDVREALRAPLHKILRGIDWVLENTPPELSADLVDRGILVTGGGALLPRIDDLISDHTGLNVTVADDPLTCVARGAGAYLDTINWQRS\nAt Epoch: 19000.00\nTKDMGIDLGTANTLVYSKGKGIVLREPSVVAINNLTKKPLAVGTEAKQMIGRTPGNIVAIRPLKDGVIADFDITQTMLKKFIEKITNKSAFTSPRIIVCFPSGVTEVERRAIEEATKQAGAREVVLMEEPMAAAIGAGLPVDEPTGSMIVDIGGGTTEVAIISLGGIVTSKSLRIAGDELDQAIIGYIKREYNLMIGERTSEQIKMEIGSAFKADEFEEEASMEIKGRDLISGLPKTVVVTESQIREALKEPVAAIIEAIKTTLEKTPPELAADIMDKGIMLAGGGALLKGLDALINHETHMPVHIAESPLDCVALGAGKALDKFDLIRQ\nAt Epoch: 20000.00\nKSDIGIDLGTASVLVYIKGKGVVIQEPSVVAIDRDTNKLLAVGEDARRMLGRTPGNIIAIRPLKDGVISDYEVTQRMLKYFIEKAIGKNNLFLRPRIVVCVPSGITEVEKRAVIQASNQAGARKTYLIEEPIAAAIGADLDITEPRGKMIIDIGGGTTDVAVISLGGIVVNSSIKVGGNTFDTYITRYIRKKHNLMIGERSAEELKVVIGTAYKREKEVSMDIRGRYLLTGLPEIVQVTSSELLEALSEPLEAIVDAVKSVLEKTPPELASDIGEKGIMMTGGSSLLHGIDKLIKERTGIKVNIAEDPVSCVATGTGRSLESIDVLEN\nAt Epoch: 21000.00\nGTDIGIDLGTASVLVYIKGKGVVLKEPSVVATDNTKRKVLAVGEEARQMIGRTPGNIIATRPLRDGVISDYDVTERMLRHFIKKARGNSVSLLRPRVIICIPCEATEVEKRAVKDAALSAGAGKVYLIEEPVAAAIGAGLDISKASGSMIVDIGGGTTDVAVLSLGGMVVRSSIKIAGDKFDEAIIRYIRKKHNIMIGERTAEELKINIGTAYPRSEEVTMDIRGRDLVTGLPKNITVSSEEMREALEETTSAIVDCVHSVLEHTPPELSADIINKGIIMTGGGSLLYGLDLLIQSRTHVTTTVAKDSICCVAYGTGEALENLDKFAE\nAt Epoch: 22000.00\nKIKVIGVGGGGGNAVNRMVAMEVKNVEFIAINTDEHVLRLSKASQKIQIGEKLTKGKGAGSMPAIGQSAAEESKDEISGVLKDTDMVFVTAGMGGGTGTGAAPVVAKIAKDMGILTVGVVTKPFAFEGKRRMTQAEQGIAELSACVDSLIIVPNERLKYVSD\nAt Epoch: 23000.00\nEIISISVGQCGNQIGQQFWRTISQEHGLSMDGHSTNTASPLEKENLGVYFSESSDRYVPRAVLVDLESGVLDSVKSSSQGQLFRPDNFINAASGAGNNWAKGFYTDGTELIDEIIDTIRKESESCDSLQGFQLTHSLGGGTGSGLGTLLVSKIKEEFPDRMLATFSVFPSAKVSDTVVEPYNATLSIHQLIENADQVFTIDNEALFDICT\nAt Epoch: 24000.00\nMIAEDHGIGPDGIYSGSSELQRGRMEVFFRETEENKHFPRAVVVDLESDSLNAVLQSTHRALFQGDNFVSGRGGTGNIWAKGFYGEGRRYIDEVLEVIRKEADICEGLQGFNVAHSLCGGTGSGFGALIIEKIHEQYPNRLISTFSTVSSNRLLGVMKQPYNTILSLQHLAENANITYCIDSDGLHDISR\nAt Epoch: 25000.00\nLIKVVGVGGGGGNAVNRMIQSGLRGVEFIAINTDAQALLMSDADVRLDIGRQLTRGLGAGSDPEVGRQAAEEHREEIEEALKGADMVFITAGEGGGTGTGGAPVVAEIARGLGALTIGVVTRPFGFEGRRRAQQAEDGISRLREYVDTLIVIPNDRLLTIAN\nAt Epoch: 26000.00\nSIVTVQLGQCGNQIGFEVFDALFRDSRCSQGLCSKSENEAYQASCSERFFREEENGVPVARAVLVDMEPKAINQTLSKAAPSGGWKYGQHACFCQKQGSGNNWAYGYSVHGPKHEESIMNLIQKEVEKCDSLSGFFIIMSMAGGTGSGLGAFVTQKLQDQYSNALLIHENDAVHKIC\nAt Epoch: 27000.00\nLRACFWEIISDEHGIDPSGVYRGTADIQLERISVYYNEATGGRYVPRSVLVDLEPGTMDAANNSLKGGSSPSHGRAATPSLSPESLKARVKSERRRKQLGQRHYTEGAELVDSVLDGIRKECESCDCLQVSAAP\nAt Epoch: 28000.00\nLIIGLGGTGGRIIRALRKIIYQEFRTIHPPDVNIAYLYIDSDDEMMALDDPRWKILGHSVQLGIDSQLLIQGADLEERLNNIHNYPGIKEWIGNRGDWKDILRSFAGGRVYGGQKRRLGRFLLACNIDAFINQLTLQVNHLQRISNQAEVTFHICCGLAGGTGSGSVIDTIVQLRKHYPYSNQGLTYPLLVYAYLPEKNPNPKWDTGNYQANGYAALMELNALSAGRFDPTDLMGGKPVACGVAFNGLYLFTNQNEKNVIVGVENEIPQIVADFLYQKIIAVSKVAWTSLAFLEDAQNGDSTPETAAIPNSRLPERAKRLLTFGINRIAIPEEEIKEYLTYHFARQAAL\nAt Epoch: 29000.00\nNSEISGHELAADLVTNALANPLYTDDRNHAERCISFLHAGTDLTLGEVETVREEITSQIDSGVGLELFTADTTKMMGNKRRLTL\nAt Epoch: 30000.00\nGTALMGIGTGSGKTSAEDAAVAAISSPLLDAPVDEATGVVFNIIGGESLSLQEVDRAAKVIYNNVHEDANVIFGALVDDEITDGTVSITVLATGFY\nAt Epoch: 31000.00\nGTALMGIGSASGENRTAEATKKAISSPLLEVSIDGAEQILLNVTGGPDLSLFEAQDASEIIASASSDDVNIIFGTSINESLGDEVVVTVIATGID\nAt Epoch: 32000.00\nPRIHFPLATLAPIISAAKAQHEQNSVAEMTFSCFETGNQMVKCEPREGKYMACCLLFRGDVIPKDANGAVATIKTKRTIQFVDWCPTGFKLGICNEPPAAVPGADLAPVSRSLCMLSNTTAISSAWSRLNK\nAt Epoch: 33000.00\nPRIHFPLVSFAPVLSKSKSSHESSNVQEITNACFEPSNQLVKCDPKAGKYMATCLLYRGDVVNRDVQNAVSMLKNKKTIQLVDWCPTGFKIGLCYKPPHYVPDGDLAPATRSVCALSNTTAIAEAWQRIDE\nAt Epoch: 34000.00\nTASSPIVFILSPGSDPASDLMKLAESSGFGGSKFKFLAMGQGQDKVAASRGQWLMLQNCHLLVKWLKELEKALERITKPNPNFRLWITTNPIEDFPIGILQNSLKVV\nAt Epoch: 35000.00\nEPRTPMVGLLSMGSDPTTSIELLAKKHKKECKAISMGQGQEIHARRLMSNSLQNGGWVLLQNCHLSLDYLMEVMDQLVEAETVHEDFSLWVTCEVHPKFPISFLQQSIKFT\nAt Epoch: 36000.00\nRVRPPLDCERDKMLCNLSYLDEATMEIASFEPTAKGKSIAHTFTFDQVFDHSSEQESIFEMVSPLIQSALDGYNICIFAYGQTGSGKTYTMDGIPSNPGVIPRTVDLLFDSIKNYRHLGWEYEIKVTFLEIYNEVLYDLLSNEQKDMEIRMVKNSKNDIYVSNITHETVGSAGRLRELMQIAKMNRATAATVGNERSSRSHAVTKIELIGTHAKKQELSIGSINLVDLAGSESPKTSTRMNETKNINRSLSELTNVILALLQKQDHIPYRNSKLTHLLMPSLGGNSKTLMFINIAPLQDCFVESLKSLRFAATVNQC\nAt Epoch: 37000.00\nRCRPFNGRETARNAQCIVKMKGDQTILSPPSEVKGKAAKAASEGVKTFAFDKSYWSFDRNAPNYAGQDNLHEDLGKPLLDNAFQGYNNCIFAYGQTGSGKSYSMMGYGADPGIIPKICQDMFERIKVVQQDKNVGCTVEVSYLEIYNERVRDLLNPSNKGNLRVREHPSTGPYVEDLAKLVVQSFQEIENLMDEGNKARTVAATNMNETSSRSHAVFTLTLTQKRHDTDAGMTGERVAKISLVDLAGSERAQSTGATGARLKEGAEINRSLSTLGRVIAALADMSQGKKKTQVPYRDSVLTWLLKDSLGGNSMTAMIAAISPADINFEETLSTLRYADSAKRI\nAt Epoch: 38000.00\nRIRPLSTMERDSQGYGRCLRQESAKTLVWLGHPETRFTFDHIACEKISQENLFKVAGQPMVENCLSGYNSCMFAYGQTGSGKTYTMMGGIYELEGKLNEDCGLTLRIFEHLFTRIGMEEKSKRDVKLKYSCKCSFLEIYNEQITDLLEPSSTNLQLREDSKKGVYVENLTEHSVSTINDVVKLLLQGAANRKMAATYMNSESSRSHSVFTCIIESHWEKDSRTHLRFARLNLVDLAGSERQKSSGAEGDRLKEAANINKSLSTLGLVIMSLVDLAHGKHRHIPYRDSRLTFLLQDSLGGNSKTTVIANVSPSFCSANETLSTLKFAQRAKQI\nAt Epoch: 39000.00\nRVRPQNEHELQGNCRTLIKVVDDKMLIFDPKTEENPFFYHGVAQKGRDLLKKQNKELQFIFDKIFNMQSDNTDVFEGSTKELICNLLDGYNCSVFAYGATGAGKTHTMLGNNEDPGITYRTVAELFSEIEKQGEHREFNLGVTYLEIYNENVQDLLHRSGPLHLRDDGRCGVIVAGLKIIAIHSAEELLTLLAKGNRNRTQHPTDANEESSRSHAVFQVYINITNKLDGQVRQVKLSMIDLAGSERASATGCKGPRFKEGANINKSLLALGNCINNLADGAKHITYRDSKLTRLLKDSLGGNCQTVMIANIAPSSFSYEDTYNTLRYADRAKKI\nAt Epoch: 40000.00\nRVRPFTVVESGNGESQECVTIEAPDTVVLKAPRSCQSNRQSEKSLPQTAQRFSFTQVFGPDASQRKVFEGSVRGLVRDVLEGGNCLVFTYGVTNAGKTFTFLGPDHDSGLLPRSLSVIFNSIEGRLYSRSDLKPQRCRDFSRLTPDQQAAESSSKKNLLRLLKEVTHIHTHTHTHTHTHTHT\nAt Epoch: 41000.00\nIIKIMQEEDKAVSSPEHPLQTNSLCIFGEACTNRDVYMKTTHPLIQHIFNGGNATCFAYGQTGAGKTYTMIGTHQNPGLYALAARDIFRQLEVSQPRRHLFVWISFYEIYCGQLYDLLNRRKRLFAREDSKHVVQIVGLQELQVDSVELLLEVILKGSKERSTGATGVNADSSRSHAIIQIQIKDSAKRTFGRISFIDLAGSERAADARDSDRQTKMEGAEINQSLLALKECIRALDQEHTHTPFRQSKLTQVLRDSFIGDAKTCMIANISPSHVATEHTLNTLRYADRVKEL\nAt Epoch: 42000.00\nRCRPFSDEELRSNAPQVVTCNDYSREVAVSQSIAGKHIDRVFTFDKVFGPSARQKDLYEQAVTPIVNEVLEGFNCTIFAYGQTGTGKTYTMEGECKRAKSGPNGELPPEAGVIPRAVKQIFDTLEGQNAEYSVKVTFLELYNEEITDLLAPEEISKVSLEEKQKKQLPLMEDGFDKRGVKSTDSCSEEMFDTMMNRARDGRSRPIVAEKRGSRR\nAt Epoch: 43000.00\nKTFVFSKNMNSKFLRRTKSIEKIQEIVKNEEKKNNTNQPSLNLELIQQNKPVIFVEPQNKCNQNIQNLKKYDQESKNYLRMRFKNRPERIKIGQTFIIYDETLKAKGKIIKDKQDSKPLNIHESKIDGIYVEGLSEYQCTHYYDAIQLMKRGEKNRKIRQTQMNNKSSRSHTILQFSIESTNNNNKNIMKRSKVNLCDLAGSEKINKNEIIQNDHFNELKNINQSLSTLGKIIYNLSCNQKLPMPFRESKLTRILQDSLTGNCKTIVIGNISPSLINIEETISTLKFVDRAKNI\nAt Epoch: 44000.00\nRFRPQNRREIESGGEPIVTFDSDDTCKLESQEATGSFTFDRVFDMASKQSDIFDFSIRPTVDDILNGYNGTVFAYGQTGAGKSYTMMGTDMEDEQGRGVIPRIVEQIFASIVASPSNIEYTVRVSYMEIYMERIRDLLVPQNDNLPIHEEKNRGVYVKGLLEIYVSSVQEVYEVMRKGGNSRAVAATNMNQESSRSHSIFVITITQKNVETGSAKSGQLFLVDLAGSEKVGKTGASGQTLEEAKKINKSLSALGMVINSLTDGKSSHIPYRDSKLTRILQESLGGNSRTTLIINASPSSYNDAETLSTMRFGMRAKAI\nAt Epoch: 45000.00\nRVRPPSKRETAEGSRIILNVDEKVARIKNIRLDHKPDGCEDTRERLIEFGFDSCYWSVDPEDPKYASQEMVFQDLGTLVLSEAISGYNVCLFAYGQTGSGKTYTMMGTPASIGLTPRICEGLFSYDEGSPETPNSFRVEVSFLEIYNERVRDLLHKSEEKKPYTLRVREHPERGPYVQGLSQHVVTSYEQVVALLEEGMENRITAATHIHDASSRSHAIFTIQYTQAMLEDNLPTEITSKINLVDLAGSERASPEYCKDRLTEGSNINRSLVTLGIVISTLAQNSQMTSSCQSINSIASDGDSGSPSGGSTNGSKRQPYVPYRDSILTWLLKDSLGGNSKTIMIATVSPASSSYNETMSTLRYASHAKNI\nAt Epoch: 46000.00\nRVRPTSGHSAWNSPQGSNSIQLDPAHARNPNLMSSNPSSLSTAPPTTYHFDSILTGIPNKPIYTTVARSHVHAAMEGFNAVIFAYGQTASGKTYTLSGDENEPGIIPRAMRDVFGFIKRTPDREYLLRCSYLEIYNETIYDLLAPPMGGSGSQVQIQGGTGMEVILTPLREEVVTSLKGVNEVLRRGERHRRTACTDWNERSSRSHSVFRLVIESRERGSGPGPLDDADMRAPSRSGRATPGNGRATPGPGNAGSRLQARGGKSVQTSILSLIDLAGSEKATSDKDRTREGKYINTSLLTLGSVIGTLSENAAKNKSDYVPYRNSKLTRMLQPSLAGNARISVICTINPDPSAVGETSSTLGFAKRVKGV\nAt Epoch: 47000.00\nVKVAVHVRPLIGDERLQGCKECVSVTPGKPQVQIGTHSFTFDHVYGSGGAPSTAMFEECIAPLVEGLFQGYNGTVLAYGQTGSGKTYTMGTGSKDGSQTGLIPQVMNALFSKIETLKNQTEFQLHVSFIEILKEEVRDLLDSVSLNKVENGNGHAGRVTVSGRQPIQIRETSNGAITLAGSTEIFVRTLQEMSTCLEQGSLSRATGSTNMNNQSSRSHAIFTITLEQMRKIHSVFPGNDTPDEDMGEEYFCAKLHLVDLAGSERAKRTGSDGVRLKEGIHINKGLLALGNVISALGDEKKRKEGVHVPYRDSKLTRLLQDSLGGNSKTVMIACISPADINAEETLNTLKYANRARNI\nAt Epoch: 48000.00\nRLRPLNEKEISRNDALDWECINDTTIIFKNHLPIPERSMYPSAYTFDRVFRSDSTTREVYEAGAKEVALSVVSGINSSIFAYGQTSSGKTFTMSGITEYTMADIYDHIERHKEREFLLKFSAMEIYNESVRDLLSSDTAPLRLLDDPERGTIVEKLTEETLRDWNHLIELLSLCEAQRQIGETALNETSSRSHQILRLTVESSAREFLGNDNSSVLTSTVNFVDLAGSERASQSLSAGTRLKEGCHINRSLLTLGTVIRKLSKGRSGHIPYRDSKLTRILQSSLGGNAKTAIICTMSPARSHVEQSRNTLLFASCAKEV\nAt Epoch: 49000.00\nPSPRPSISQTPIRTKLQLVDLAGSESVGMSGVSGAALWENSCINRSLSALSDVLGALAEQRPHVPYRNSKLTHLLQDSIGGDAKLLVMLCVSPTQRFLTESLQSLGFGSRARQI\nAt Epoch: 50000.00\nKEYTFDGVFDQESNQKEVYEDVGKPVLKDVLQGYNGSILAYGQTGAGKTHSLLNSGMGVDGKPDPKQAGLLPRLVAALFVHVGADVKHVYTVEASMLQIYNEQVDCLLGDDREKAQGLQVTGKSEVKGLVWHKCKTPNELLQCFQKGRMNLVYAETKMNKSSSRSHAVFQIKVSKRPRALDKTGTKGGKVEMKATFGKLTVVDLAGSERIKKSGVTGTQLKEATNINSSLLSFGNIVQALAEKKKFIPYRDSKLTRILEDSVGGNCKTSLLVCCSPSAESSDETVSTLEFASRAARI\nAt Epoch: 51000.00\nVEDMATLAQLHEGSIMHNLHIRYKKDNIYTYIGSILVSVNPYKSISGLYDITSMEQYSSYHLGERPPHIFAIANECYHCLWKRNDNQCVLISGESGAGKTESTKLILKYLSAMSQHSLDVTAKENVSYVEQAILESSPIMEVFGNAKTIHNNNSSRFGKFIKLNFCQKGNIQGGRIIDYLLEKNRVVRQNPGERNYHIFYALLAGTDEAQKEMFYLSEKENYYYLRQFGCIVDNAIDDQRTFQEVMTAMRVMKFSSEEILEVLKLLAGVLHLGNIEFVIAGGAQVSSKNALGRAAELLGLDSMKFTEVLTHRSMILRGEEISTPLTVEQGIDSRDSMAMALYSQCFSWIIKKINNRIKGKEDYRSVGVLDIFGFENFEVNRFEQFNINYANEKLQEYFNKHIFSLEQLEYNRDGLIWEDIDWMDNGECLDLIEKKMGILALINEESHFPKGTDDTLLAKLHSHHSKNPFYVKPRVLDHYFGVKHYAGEVLYHVKGILEKNRDTFRCDVLNLLCESRLDFIYDLFEHASSKINEDTFKSGTKHQKPTVSSQFKNSLHSLMATLSTSNPFFVRCIKPNDQKMPDQFDQTIVLNQLRYSGMLETVKIRRAGFPIRRQFEDFCARYKILMRNLSLPDDLKAKCAALLYCYDNTNTDWQLGRTKVFIR\nAt Epoch: 52000.00\nMDTKLPSKLFIGVLDIAGFEIFQLNSFEQLCINFTNEKLQQFFNHHMFVLEQEEYKMQGLEWTFVDFGLDLQGCIDLIEKPLGILSILEEECMFPKTTDITFNAKLLNNHLGKSPNFAKSKPDKKRKYESHFEILHYAGVVPYNLNGWLDKNKDPLNETAVELFQQSSNELVAMLYQDYVRAY\nAt Epoch: 53000.00\nVSDLTLISKISNEAINDNLKIRFQNGEIYTYIGHVLVSVNPFRDLGIYTDAVLHSYQGKNRLEAPPHVFAIAEASYYNMKAYKENQCVIISGESGAGKTEAAKRIMQYIASVSGGSNSSIQEIKDMVLATNPLLESFGNAKTLRNNNSSRFGKYLEIQFNDQGEPVGANINNYLLEKSRVVGQVKEERDFHIFYQFTKAASETYRSTYGVQQPNTYAYLSKSKCYDVNGIDDKADFKDTLNAMKVIGMSQQEIDEVFRMLAAVLWIGNVSFRENDEGNAEIVDQSVVDFVAYLLEVDSSHVNKAMSTRTIETARGGRRGSTYDVPMNIAQASSVRDALSMAIYTNMFDWIVQRINASLKARSAISHSIGILDIYGFEIFEKNSFEQLCINYVNEKLQQIFIQLTLKTEQEEYAREQIQWTPIKYFDNKVVCELIEEKRPPGVFAALNDACATAHADPAAADGTFVQRLNALSSNPNFAPRQGQFVIKHYAGDVNYEVAGMTDKNKDQLLKDLLNLVGESGNAFVQTIFPDRIDQDNKRRPPTAGDKIKASANDLVATLMKCTPSYIRTIKPNENKSPTEYNDGNVMHQIKYLGLQENVRIRRAGFAYRQTFEKFVERFYLLSPKCSYAGEYTWTGDAKSGVKQILKDTSIPAEEWQMGMSKAFIK\nAt Epoch: 54000.00\nVEDVCQLPHLNESSVLHVLRQRFANNLIHTRAGPVLLIVNPMAPLALYSEKVASMFRGCKAEDDMPPHIFAQAQTAYRAMLETRRDQSLIFLGRSGAGKTTSFKHALYYLTLASRQELQPSVRALTVEKVSAIGTIMDAFGHERTSLNGNATKFTQIFALDFDHSGQIVSGSIQIMPIDRMRPSGGSNRGRSGVPRWSFLAGVDGGALRKELLLEPAAGESSPGGSATVEQESIDYQRLCQAFRVLNIDQAAVRGIWYVLAAIHHLSQSGAVIVAGRVQFVNPRSAQKAAMLLGIPMEDLLSYVFPENGSGGATKATLNTAVVVECLTAFTEALYTELFYTIVGLINKSIAAVTPHQTIGSVLLVDVPGFQNPASVGGGTAASTLADLRFNYLHERLQLLFHNAMLVQPRARYAQEMVTVEDSL\nAt Epoch: 55000.00\nEDKLKLERDFSRYNYLSLDSAKVNGVDDAANFRTVRNAMQIVGFMDHEAEAVLEVVAAVLKLGNIEFKPESRVNGLDESKIKDKNGSFWLDVK\nAt Epoch: 56000.00\nVDDLMQLSYLNEPSVLYNLQYRYDRDMIYTKAGPVLVAINPFKEVQLYGNVYIEAYKSKSIDSPHVYAIADTAIHEMIRDEVNQSIIISGESGAGKTETAKIAMQYLAALGGGTGMEYEILQTNPILEAFGNAKTARNDNSSRFGKLIEIHFSPNGKISGAKIQTFLLEKSRVVQCAAGERSYHIFYQLCAGASKSLRDKLNLRSVEEYKYLKQSSCFVINGVNDAERFQSVMAAMKVVHIRQQDRDNVYAMLAAILWLGNISFNVIDNENHVEVVADEAAQTVSKLLGCDIQDLKLALCTRKMRVRSDTIIQKLTLTQAIDTRDALAKSLYASLFEWLVEQINMSLEVGKRRTGRSISILDIYGFESFEKNSFEQFCINYANERLQQHFNRHLFKLEQEEYIQDGIDWARVDFEDNQNCLKLFEKKPLGLLSLLDEESTFPNGTDLTFANKLKQHLHSNSCFKGERGKAFTVSHYAGEVVYDTTGFLEKNRDLLHIDSIQLLASCSCHLPQIFASKMLTQSDAQEGSPYRSSGVDSQRLSVATKFKGQLFQLMQRLGNTTPHFIRCIKPNKLQLPSTYEQSLILQQLKCCGVLEVVRISRSGYPTRMSHQKFARRYGFLLLENVASQDPLSVSVAILHQFNILPEMYQVGYTKLFFR\nAt Epoch: 57000.00\nIDDLTSLSHLNEPAVLHNLQVRYGMHNIYTYSGIVLVALNPFARVGVYSQDTLEAYAGRMRGELEPHLFAISEDAFQGMVRDRKNQTIIVSGESGAGKTVSAKYIMRYFASAHEAQRDVEHQEQTAMSGVEEQILATNPVLEAFGNAKTTRNDNSSRFGKFLEIRFSERHAIEAAFIRTYLLERSRLVYQPPTERNYHVFYQLLASDRALDEAQREALGLQGATWETFHYTRQGGSGEIVNVDDAREFEKTSAALGVVGVDATTQQQVFALLAALLHMGNIEITGSNSAAVADDDAAFAQATGLLQVDAAQFRKWLTRRQIVTRSEKIVSNMTRAQALVVRDSVAKYVYAHVFEWIVRTINGVLTGGGAGPAASFIGVLDIYGFEHFEHNSFEQFCINYANEKLQQNFNRHVFKLEQEEYQREQLANWTFVDFQDNQPCIDLIEGRLGVLALLDEECRLQQGSDAKFAEKLARQFAEQPVRQLPADSPAAFFRKPRFGADSFTIRHYAHDVAYEAAGFLEKNRDSVPDEIQNVLRASSAPLLAEVLADTSAAAADSGTATAVTASQTPARLSVRAPRRPTLGAVFKHSLAGLMETIEATESHYIRCIKPNDAKHAWVFDAPMVLSQLRACGVLETIRISCAGYPSRLPIPDFIHRYRVLLSDPGAPLRAASLDAFREFATQTLAEAFGARDCWQVGLTKVFFR\nAt Epoch: 58000.00\nERLNDTSELISYVDNQECLNLIASRSGGVFSTIDAISRLPGPSDRKLNERLHTLFKRHPCFPTPHPKEAHEMFCIVHYAGMVKYHIESFIDKNNNIISAQFEELMAISKSSVLQAQPLLSSASANSSPPTSQKGGSVTHMFSVQMRGLASELEGTRCNFIRCIKPNAEMEVGVFDRASVVDQLRCSGTVQACSVLRVGLPTRILYAEVVDTYLPLVGQETYEKFNCNERLFTQAICAALAFPSDAYRLGDTRLFFR\nAt Epoch: 59000.00\nINDLALSPSTSDDVLVSVLRERFLSDTVYTAIGSSALVVVNPYKYVSSNADNVLLDYAAEYRDTDAHDDRHVKPPHIFQLANNAYFHMRRTNMDQCILLSGESGSGKSETRRLAIKSILELSVTSPGKKGGKMATQIPSGEYILESFGNARTLQNPNASRFGKYTELQFSERGRLCGIKTLDYYLEKSRVAGAPGGERNFHVFYYLCSGASQEERQHLKLADKSTFRYLGQRPTGGREAVTEDSQRFDRLKMAMKSVGLSKRHIAQTFQLLAAILHLGNIDFTMDKSRNEDAAVVKNVDQLEIVADFLGVQPHALEAVMQYKTKLVHKELCTIFLDPEGAGGNRDDLAKTLYSLLFALLNETMNQNLCRDDFLTFIGLFDLPGFQNISSSASRTNSLDQFCVNFANERLQNWVQKRIFERNNKEYEAEEIASLIPTIPFFDNSECIRLMSHQPGGLIHIMDDQARRQPKKNDHTMVEAFSKRWGNHSSFRAGQMDRSGFPTFTVNHYVGPVTYSAESWLERDTDALNPDFVSLLRGATLNAD\n(59149, 128)\n../../out/201027/embedding/seq2seq/pfamA_target_raw.npy\nloaded dict file for weights ../../data/first_try/seq2seq_encoder_df_dev_201012_230k.pt\noutput embedding for pfamA_random\nAt Epoch: 0.00\nNVVYVGNKEVMSYVLAVTTQFNEGSDEVVIKARGRAISTAVDTAEVVRNRFLEDVEVEDIKIST\nAt Epoch: 1000.00\nFEATYLVVSYKLDGIIRASGQVDDRGYIRGTKMKLMMDGNAIVDYMMVGTKFDGGENSVDNASGLFYSPYQEADEAGTFLVTSEPGSIQPVVGVITRYALSCFPDYADISQGAKPNG\n(1600, 128)\n../../out/201027/embedding/seq2seq/pfamA_random_raw.npy\nloaded dict file for weights ../../data/first_try/seq2seq_encoder_df_dev_201012_230k.pt\noutput embedding for motor_toolkit\nAt Epoch: 0.00\nMASQPNSSAKKKEEKGKNIQVVVRCRPFNLAERKASAHSIVECDPVRKEVSVRTGGLADKSSRKTYTFDMVFGASTKQIDVYRSVVCPILDEVIMGYNCTIFAYGQTGTGKTFTMEGERSPNEEYTWEEDPLAGIIPRTLHQIFEKLTDNGTEFSVKVSLLEIYNEELFDLLNPSSDVSERLQMFDDPRNKRGVIIKGLEEITVHNKDEVYQILEKGAAKRTTAATLMNAYSSRSHSVFSVTIHMKETTIDGEELVKIGKLNLVDLAGSENIGRSGAVDKRAREAGNINQSLLTLGRVITALVERTPHVPYRESKLTRILQDSLGGRTRTSIIATISPASLNLEETLSTLEYAHRAKNILNKPEVNQKLTKKALIKEYTEEIERLKRDLAAAREKNGVYISEENFRVMSGKLTVQEEQIVELIEKIGAVEEELNRVTELFMDNKNELDQCKSDLQNKTQELETTQKHLQETKLQLVKEEYITSALESTEEKLHDAASKLLNTVEETTKDVSGLHSKLDRKKAVDQHNAEAQDIFGKNLNSLFNNMEELIKDGSSKQKAMLEVHKTLFGNLLSSSVSALDTITTVALGSLTSIPENVSTHVSQIFNMILKEQSLAAESKTVLQELINVLKTDLLSSLEMILSPTVVSILKINSQLKHIFKTSLTVADKIEDQKKELDGFLSILCNNLHELQENTICSLVESQKQCGNLTEDLKTIKQTHSQELCKLMNLWTERFCALEEKCENIQKPLSSVQENIQQKSKDIVNKMTFHSQKFCADSDGFSQELRNFNQEGTKLVEESVKHSDKLNGNLEKISQETEQRCESLNTRTVYFSEQWVSSLNEREQELHNLLEVVSQCCEASSSDITEKSDGRKAAHEKQHNIFLDQMTIDEDKLIAQNLELNETIKIGLTKLNCFLEQDLKLDIPTGTTPQRKSYLYPSTLVRTEPREHLLDQLKRKQPELLMMLNCSENNKEETIPDVDVEEAVLGQYTEEPLSQEPSVDAGVDCSSIGGVPFFQHKKSHGKDKENRGINTLERSKVEETTEHLVTKSRLPLRAQINL\nAt Epoch: 1000.00\nMEDQEMHLKVRRVADKFTESMYFLANEPSVALYRLQEHVRRSLPELVQHKTDMQSWEEQSQGAIYTVEYACSAVKSMTNSSIYFKNIDSLLRQTISMKEQISNSQGRSPHVSAPSASS\nAt Epoch: 2000.00\nMVIGTPVTTPLSKIVRTPSRVPGSRRTTPSKIREEKILVTIRVRPLSPKEQAAYDLIAWDFPDEQTIVSKNLNHERHTGPYSFDYVFDPTCSTSKVYEQGARDVALSALNGINATIFAYGQTSSGKTFTMRGITESAVNDIYGRIKLTTERDFVLKFSALEIYNETVVDLLNRESVSLRLLDDPEKGVIVEKQVEEIVKDEEHLKTLIGTVEAHRQVGETALNDKSSRSHQIIRLTIESSIRENSGCVKSFLATLNLVDLAGSERASQTSADGTRLKEGSHINRSLLTVTNVIRKLSCSGGKRSGHIPYRDSKLTRILQASLGGNSRTAIICTLSPALSHLEQSRNTLCFATSAKEVTTTAQVNMVVAEKQLLKHLQKEVSRLEAELRSPDPAASPCLRSLLIEKERKIQKMEEEMNELKRQRDLAQSQLELERRSKKELKGSDHHGPSRQVVKCLSFTPEDEEVSGASLSTNLGRKSLLERQAAIRRSTNSTNPSMLVHEIRKLEMRQRQLGDEANHALQLLHKEFASHRIGSQGATETIAKLFSEIKELQKISCIPEQIEIKDKASLKEEIARLRSQESNIASLEQKLENVQRSIDELVMHLPSCHESADSRTAPSKKKRVLPFNLSNTSNIPNIIRSPCSPMSPSSCNIVEGEIENRAPPECNNVGSAGDSFCSQLSTPVKSKDDNCTPGSRQSNSVNMKKMQTMFKKAAEDNIRSIKAYVTELKERVAKLQYQKQLLVCQVLELEANEAASDEADISDQSPLSWHLVFEDQRQQIIMLWHLCHVSLVHRTQFYMLFKGDPSDQIYLEVELRRLTWLDEHLAGLGNASPALLGDDAAGYVSSSIKALKQEREYLAKRVSSKLNAEEREMLYVKWDIPPDGKQRRRLQLVNKLWSDPLNMQNVRDSAEVVAKLVGFCETGEHVSKEMFQLNFVSPSDKKTWIGWNLISNLLHL\nAt Epoch: 3000.00\nMADEEDPWGFDDGGEEEKAASTQAGTPAPPSKAPSVASDHKADSVVAGTPANEEAAPEEVEEIKAPPPPPEDDGYRKPVQLYRHWVRPKFLQYKYMYNYRTNYYDDVIDYIDKKQTGVAREIPRPQTWAERVLRTRNISGSDIDSYAPAKRDKQLIQTLAASIRTYNYHTKAYINQRYASVL\n(3255, 128)\n../../out/201027/embedding/seq2seq/motor_toolkit_raw.npy\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbb97eeb9ee8936a89981c3f90f7ea749c2698df
| 32,842 |
ipynb
|
Jupyter Notebook
|
lead_lag_compensator.ipynb
|
sentry5588/misc
|
201c327fa08f41d2afec24c493444bb0749c9c10
|
[
"MIT"
] | null | null | null |
lead_lag_compensator.ipynb
|
sentry5588/misc
|
201c327fa08f41d2afec24c493444bb0749c9c10
|
[
"MIT"
] | null | null | null |
lead_lag_compensator.ipynb
|
sentry5588/misc
|
201c327fa08f41d2afec24c493444bb0749c9c10
|
[
"MIT"
] | null | null | null | 443.810811 | 31,144 | 0.943426 |
[
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport control\n\n# parameter for a lag compensator\na_lag = 30.0\nb_lag = 60.0\na_lead = 10.0\nb_lead = 5.0\n\n# Simulate continous transfer function response\nsys_lag = control.TransferFunction([a_lag, 1], [b_lag, 1])\nsys_lead = control.TransferFunction([a_lead, 1], [b_lead, 1])\nsys_lead_lag = control.TransferFunction([a_lag * a_lead, a_lag + a_lead, 1], \n [b_lag * b_lead, b_lag + b_lead, 1])\n\nplt.figure(0)\nmag, phase, omega = control.bode_plot(sys_lag)\nmag, phase, omega = control.bode_plot(sys_lead)\nmag, phase, omega = control.bode_plot(sys_lead_lag)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code"
]
] |
cbb98b58de57e327b57459a5adcfcd324711f152
| 214,044 |
ipynb
|
Jupyter Notebook
|
lab/09-apps/scikit/plot_cluster_iris.ipynb
|
iten-engineering/python
|
97a79973c7727cd881974462db99a99d612b55f9
|
[
"MIT"
] | null | null | null |
lab/09-apps/scikit/plot_cluster_iris.ipynb
|
iten-engineering/python
|
97a79973c7727cd881974462db99a99d612b55f9
|
[
"MIT"
] | null | null | null |
lab/09-apps/scikit/plot_cluster_iris.ipynb
|
iten-engineering/python
|
97a79973c7727cd881974462db99a99d612b55f9
|
[
"MIT"
] | 1 |
2021-08-12T10:50:42.000Z
|
2021-08-12T10:50:42.000Z
| 1,963.706422 | 54,171 | 0.958457 |
[
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"\n# K-means Clustering\n\n\nThe plots display firstly what a K-means algorithm would yield\nusing three clusters. It is then shown what the effect of a bad\ninitialization is on the classification process:\nBy setting n_init to only 1 (default is 10), the amount of\ntimes that the algorithm will be run with different centroid\nseeds is reduced.\nThe next plot displays what using eight clusters would deliver\nand finally the ground truth.\n",
"_____no_output_____"
]
],
[
[
"print(__doc__)\n\n\n# Code source: Gaël Varoquaux\n# Modified for documentation by Jaques Grobler\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n# Though the following import is not directly being used, it is required\n# for 3D projection to work\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom sklearn.cluster import KMeans\nfrom sklearn import datasets\n\nnp.random.seed(5)\n\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\nestimators = [('k_means_iris_8', KMeans(n_clusters=8)),\n ('k_means_iris_3', KMeans(n_clusters=3)),\n ('k_means_iris_bad_init', KMeans(n_clusters=3, n_init=1,\n init='random'))]\n\nfignum = 1\ntitles = ['8 clusters', '3 clusters', '3 clusters, bad initialization']\nfor name, est in estimators:\n fig = plt.figure(fignum, figsize=(4, 3))\n ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)\n est.fit(X)\n labels = est.labels_\n\n ax.scatter(X[:, 3], X[:, 0], X[:, 2],\n c=labels.astype(np.float), edgecolor='k')\n\n ax.w_xaxis.set_ticklabels([])\n ax.w_yaxis.set_ticklabels([])\n ax.w_zaxis.set_ticklabels([])\n ax.set_xlabel('Petal width')\n ax.set_ylabel('Sepal length')\n ax.set_zlabel('Petal length')\n ax.set_title(titles[fignum - 1])\n ax.dist = 12\n fignum = fignum + 1\n\n# Plot the ground truth\nfig = plt.figure(fignum, figsize=(4, 3))\nax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)\n\nfor name, label in [('Setosa', 0),\n ('Versicolour', 1),\n ('Virginica', 2)]:\n ax.text3D(X[y == label, 3].mean(),\n X[y == label, 0].mean(),\n X[y == label, 2].mean() + 2, name,\n horizontalalignment='center',\n bbox=dict(alpha=.2, edgecolor='w', facecolor='w'))\n# Reorder the labels to have colors matching the cluster results\ny = np.choose(y, [1, 2, 0]).astype(np.float)\nax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y, edgecolor='k')\n\nax.w_xaxis.set_ticklabels([])\nax.w_yaxis.set_ticklabels([])\nax.w_zaxis.set_ticklabels([])\nax.set_xlabel('Petal width')\nax.set_ylabel('Sepal length')\nax.set_zlabel('Petal length')\nax.set_title('Ground Truth')\nax.dist = 12\n\nfig.show()",
"Automatically created module for IPython interactive environment\n"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cbb99245c993f5a024a903ae467871814e2e6dbc
| 63,176 |
ipynb
|
Jupyter Notebook
|
assignments/assignment1/Linear classifier.ipynb
|
mike-chesnokov/dlcourse_ai
|
481e1b6c8fda2b7a06661ca670e9ebd1ffbec858
|
[
"MIT"
] | null | null | null |
assignments/assignment1/Linear classifier.ipynb
|
mike-chesnokov/dlcourse_ai
|
481e1b6c8fda2b7a06661ca670e9ebd1ffbec858
|
[
"MIT"
] | null | null | null |
assignments/assignment1/Linear classifier.ipynb
|
mike-chesnokov/dlcourse_ai
|
481e1b6c8fda2b7a06661ca670e9ebd1ffbec858
|
[
"MIT"
] | null | null | null | 35.432417 | 13,092 | 0.621122 |
[
[
[
"# Задание 1.2 - Линейный классификатор (Linear classifier)\n\nВ этом задании мы реализуем другую модель машинного обучения - линейный классификатор. Линейный классификатор подбирает для каждого класса веса, на которые нужно умножить значение каждого признака и потом сложить вместе.\nТот класс, у которого эта сумма больше, и является предсказанием модели.\n\nВ этом задании вы:\n- потренируетесь считать градиенты различных многомерных функций\n- реализуете подсчет градиентов через линейную модель и функцию потерь softmax\n- реализуете процесс тренировки линейного классификатора\n- подберете параметры тренировки на практике\n\nНа всякий случай, еще раз ссылка на туториал по numpy: \nhttp://cs231n.github.io/python-numpy-tutorial/",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"from dataset import load_svhn, random_split_train_val\nfrom gradient_check import check_gradient\nfrom metrics import multiclass_accuracy \nimport linear_classifer",
"_____no_output_____"
]
],
[
[
"# Как всегда, первым делом загружаем данные\n\nМы будем использовать все тот же SVHN.",
"_____no_output_____"
]
],
[
[
"def prepare_for_linear_classifier(train_X, test_X):\n train_flat = train_X.reshape(train_X.shape[0], -1).astype(np.float) / 255.0\n test_flat = test_X.reshape(test_X.shape[0], -1).astype(np.float) / 255.0\n \n # Subtract mean\n mean_image = np.mean(train_flat, axis = 0)\n train_flat -= mean_image\n test_flat -= mean_image\n \n # Add another channel with ones as a bias term\n train_flat_with_ones = np.hstack([train_flat, np.ones((train_X.shape[0], 1))])\n test_flat_with_ones = np.hstack([test_flat, np.ones((test_X.shape[0], 1))]) \n return train_flat_with_ones, test_flat_with_ones\n \ntrain_X, train_y, test_X, test_y = load_svhn(\"data\", max_train=10000, max_test=1000) \ntrain_X, test_X = prepare_for_linear_classifier(train_X, test_X)\n# Split train into train and val\ntrain_X, train_y, val_X, val_y = random_split_train_val(train_X, train_y, num_val = 1000)",
"_____no_output_____"
]
],
[
[
"# Играемся с градиентами!\n\nВ этом курсе мы будем писать много функций, которые вычисляют градиенты аналитическим методом.\n\nВсе функции, в которых мы будем вычислять градиенты будут написаны по одной и той же схеме. \nОни будут получать на вход точку, где нужно вычислить значение и градиент функции, а на выходе будут выдавать кортеж (tuple) из двух значений - собственно значения функции в этой точке (всегда одно число) и аналитического значения градиента в той же точке (той же размерности, что и вход).\n```\ndef f(x):\n \"\"\"\n Computes function and analytic gradient at x\n \n x: np array of float, input to the function\n \n Returns:\n value: float, value of the function \n grad: np array of float, same shape as x\n \"\"\"\n ...\n \n return value, grad\n```\n\nНеобходимым инструментом во время реализации кода, вычисляющего градиенты, является функция его проверки. Эта функция вычисляет градиент численным методом и сверяет результат с градиентом, вычисленным аналитическим методом.\n\nМы начнем с того, чтобы реализовать вычисление численного градиента (numeric gradient) в функции `check_gradient` в `gradient_check.py`. Эта функция будет принимать на вход функции формата, заданного выше, использовать значение `value` для вычисления численного градиента и сравнит его с аналитическим - они должны сходиться.\n\nНапишите часть функции, которая вычисляет градиент с помощью численной производной для каждой координаты. Для вычисления производной используйте так называемую two-point formula (https://en.wikipedia.org/wiki/Numerical_differentiation):\n\n\n\nВсе функции приведенные в следующей клетке должны проходить gradient check.",
"_____no_output_____"
]
],
[
[
"# TODO: Implement check_gradient function in gradient_check.py\n# All the functions below should pass the gradient check\n\ndef square(x):\n return float(x*x), 2*x\n\ncheck_gradient(square, np.array([3.0]))\n\ndef array_sum(x):\n assert x.shape == (2,), x.shape\n return np.sum(x), np.ones_like(x)\n\ncheck_gradient(array_sum, np.array([3.0, 2.0]))\n\ndef array_2d_sum(x):\n assert x.shape == (2,2)\n return np.sum(x), np.ones_like(x)\n\ncheck_gradient(array_2d_sum, np.array([[3.0, 2.0], [1.0, 0.0]]))",
"Gradient check passed!\nGradient check passed!\nGradient check passed!\n"
]
],
[
[
"## Начинаем писать свои функции, считающие аналитический градиент\n\nТеперь реализуем функцию softmax, которая получает на вход оценки для каждого класса и преобразует их в вероятности от 0 до 1:\n\n\n**Важно:** Практический аспект вычисления этой функции заключается в том, что в ней учавствует вычисление экспоненты от потенциально очень больших чисел - это может привести к очень большим значениям в числителе и знаменателе за пределами диапазона float.\n\nК счастью, у этой проблемы есть простое решение -- перед вычислением softmax вычесть из всех оценок максимальное значение среди всех оценок:\n```\npredictions -= np.max(predictions)\n```\n(подробнее здесь - http://cs231n.github.io/linear-classify/#softmax, секция `Practical issues: Numeric stability`)",
"_____no_output_____"
]
],
[
[
"# TODO Implement softmax and cross-entropy for single sample\nprobs = linear_classifer.softmax(np.array([-10, 0, 10]))\n\n# Make sure it works for big numbers too!\nprobs = linear_classifer.softmax(np.array([1000, 0, 0]))\nassert np.isclose(probs[0], 1.0)",
"_____no_output_____"
]
],
[
[
"Кроме этого, мы реализуем cross-entropy loss, которую мы будем использовать как функцию ошибки (error function).\nВ общем виде cross-entropy определена следующим образом:\n\n\nгде x - все классы, p(x) - истинная вероятность принадлежности сэмпла классу x, а q(x) - вероятность принадлежности классу x, предсказанная моделью. \nВ нашем случае сэмпл принадлежит только одному классу, индекс которого передается функции. Для него p(x) равна 1, а для остальных классов - 0. \n\nЭто позволяет реализовать функцию проще!",
"_____no_output_____"
]
],
[
[
"probs = linear_classifer.softmax(np.array([-5, 0, 5]))\nlinear_classifer.cross_entropy_loss(probs, 1)",
"_____no_output_____"
]
],
[
[
"После того как мы реализовали сами функции, мы можем реализовать градиент.\n\nОказывается, что вычисление градиента становится гораздо проще, если объединить эти функции в одну, которая сначала вычисляет вероятности через softmax, а потом использует их для вычисления функции ошибки через cross-entropy loss.\n\nЭта функция `softmax_with_cross_entropy` будет возвращает и значение ошибки, и градиент по входным параметрам. Мы проверим корректность реализации с помощью `check_gradient`.",
"_____no_output_____"
]
],
[
[
"# TODO Implement combined function or softmax and cross entropy and produces gradient\nloss, grad = linear_classifer.softmax_with_cross_entropy(np.array([1, 0, 0]), 1)\ncheck_gradient(lambda x: linear_classifer.softmax_with_cross_entropy(x, 1), np.array([1, 0, 0], np.float))",
"Gradient check passed!\n"
],
[
"linear_classifer.softmax_with_cross_entropy(np.array([1, 0, 0]), 0)",
"_____no_output_____"
],
[
"linear_classifer.softmax(np.array([1, 0, 0]))",
"_____no_output_____"
],
[
"np.log(linear_classifer.softmax(np.array([1, 0, 0])))",
"_____no_output_____"
]
],
[
[
"В качестве метода тренировки мы будем использовать стохастический градиентный спуск (stochastic gradient descent или SGD), который работает с батчами сэмплов. \n\nПоэтому все наши фукнции будут получать не один пример, а батч, то есть входом будет не вектор из `num_classes` оценок, а матрица размерности `batch_size, num_classes`. Индекс примера в батче всегда будет первым измерением.\n\nСледующий шаг - переписать наши функции так, чтобы они поддерживали батчи.\n\nФинальное значение функции ошибки должно остаться числом, и оно равно среднему значению ошибки среди всех примеров в батче.",
"_____no_output_____"
]
],
[
[
"# TODO Extend combined function so it can receive a 2d array with batch of samples\nnp.random.seed(42)\n# Test batch_size = 1\nnum_classes = 3\nbatch_size = 2\npredictions = np.random.randint(-1, 3, size=(batch_size, num_classes)).astype(np.float)\ntarget_index = np.random.randint(0, num_classes, size=(batch_size, 1)).astype(np.int)\nloss, grads = linear_classifer.softmax_with_cross_entropy(predictions, target_index)\n\ncheck_gradient(lambda x: linear_classifer.softmax_with_cross_entropy(x, target_index), predictions)",
"Gradient check passed!\n"
],
[
"delta=1e-5\ntol = 1e-4 \nz_ = predictions.copy()\nit = np.nditer(grads, flags=['multi_index'], op_flags=['readwrite'])\n\nwhile not it.finished:\n ix = it.multi_index\n print(grads[ix])\n f = lambda x: linear_classifer.softmax_with_cross_entropy(x, target_index)\n \n delta_vec = np.zeros_like(z_)\n delta_vec[ix] += delta\n print('delta', delta_vec, '\\n', z_ + delta_vec)\n \n temp1 = f(z_ + delta_vec)[0]\n temp2 = f(z_ - delta_vec)[0]\n print('temp1', temp1, '\\ntemp2', temp2)\n print((temp1 - temp2)/(2 * delta), '\\n')\n \n it.iternext()",
"-0.37025176982879043\ndelta [[1.e-05 0.e+00 0.e+00]\n [0.e+00 0.e+00 0.e+00]] \n [[ 1.00001 2. -1. ]\n [ 1. 1. 2. ]]\ntemp1 1.4502247628372245 \ntemp2 1.4502321678726209\n-0.37025176982119484 \n\n0.3526922563491206\ndelta [[0.e+00 1.e-05 0.e+00]\n [0.e+00 0.e+00 0.e+00]] \n [[ 1. 2.00001 -1. ]\n [ 1. 1. 2. ]]\ntemp1 1.4502319922778777 \ntemp2 1.4502249384327506\n0.35269225635570217 \n\n0.017559513479669865\ndelta [[0.e+00 0.e+00 1.e-05]\n [0.e+00 0.e+00 0.e+00]] \n [[ 1. 2. -0.99999]\n [ 1. 1. 2. ]]\ntemp1 1.4502286409461007 \ntemp2 1.450228289755831\n0.01755951348769713 \n\n-0.3940292211914573\ndelta [[0.e+00 0.e+00 0.e+00]\n [1.e-05 0.e+00 0.e+00]] \n [[ 1. 2. -1. ]\n [ 1.00001 1. 2. ]]\ntemp1 1.4502245250620822 \ntemp2 1.4502324056465061\n-0.3940292211956908 \n\n0.10597077880854272\ndelta [[0.e+00 0.e+00 0.e+00]\n [0.e+00 1.e-05 0.e+00]] \n [[ 1. 2. -1. ]\n [ 1. 1.00001 2. ]]\ntemp1 1.4502295250620825 \ntemp2 1.450227405646506\n0.10597077881868698 \n\n0.28805844238291456\ndelta [[0.e+00 0.e+00 0.e+00]\n [0.e+00 0.e+00 1.e-05]] \n [[ 1. 2. -1. ]\n [ 1. 1. 2.00001]]\ntemp1 1.4502313459406477 \ntemp2 1.4502255847718\n0.2880584423881061 \n\n"
],
[
"# TODO Extend combined function so it can receive a 2d array with batch of samples\nnp.random.seed(42)\n# Test batch_size = 1\nnum_classes = 4\nbatch_size = 1\npredictions = np.random.randint(-1, 3, size=(batch_size, num_classes)).astype(np.float)\ntarget_index = np.random.randint(0, num_classes, size=(batch_size, 1)).astype(np.int)\ncheck_gradient(lambda x: linear_classifer.softmax_with_cross_entropy(x, target_index), predictions)\n\n# Test batch_size = 3\nnum_classes = 4\nbatch_size = 3\npredictions = np.random.randint(-1, 3, size=(batch_size, num_classes)).astype(np.float)\ntarget_index = np.random.randint(0, num_classes, size=(batch_size, 1)).astype(np.int)\ncheck_gradient(lambda x: linear_classifer.softmax_with_cross_entropy(x, target_index), predictions)",
"Gradient check passed!\nGradient check passed!\n"
]
],
[
[
"### Наконец, реализуем сам линейный классификатор!\n\nsoftmax и cross-entropy получают на вход оценки, которые выдает линейный классификатор.\n\nОн делает это очень просто: для каждого класса есть набор весов, на которые надо умножить пиксели картинки и сложить. Получившееся число и является оценкой класса, идущей на вход softmax.\n\nТаким образом, линейный классификатор можно представить как умножение вектора с пикселями на матрицу W размера `num_features, num_classes`. Такой подход легко расширяется на случай батча векторов с пикселями X размера `batch_size, num_features`:\n\n`predictions = X * W`, где `*` - матричное умножение.\n\nРеализуйте функцию подсчета линейного классификатора и градиентов по весам `linear_softmax` в файле `linear_classifer.py`",
"_____no_output_____"
]
],
[
[
"# TODO Implement linear_softmax function that uses softmax with cross-entropy for linear classifier\nbatch_size = 2\nnum_classes = 2\nnum_features = 3\nnp.random.seed(42)\nW = np.random.randint(-1, 3, size=(num_features, num_classes)).astype(np.float)\nX = np.random.randint(-1, 3, size=(batch_size, num_features)).astype(np.float)\ntarget_index = np.ones(batch_size, dtype=np.int)",
"_____no_output_____"
],
[
"X",
"_____no_output_____"
],
[
"W",
"_____no_output_____"
],
[
"predictions = np.dot(X, W)\npredictions",
"_____no_output_____"
],
[
"loss, grads = linear_classifer.linear_softmax(X, W, target_index)\ngrads",
"_____no_output_____"
],
[
"delta=1e-5\ntol = 1e-4 \nz_ = W.copy()\nit = np.nditer(grads, flags=['multi_index'], op_flags=['readwrite'])\n\nwhile not it.finished:\n ix = it.multi_index\n print(grads[ix])\n f = lambda w: linear_classifer.linear_softmax(X, w, target_index)\n \n delta_vec = np.zeros_like(z_)\n delta_vec[ix] += delta\n print('delta', delta_vec, '\\n', z_ + delta_vec)\n \n temp1 = f(z_ + delta_vec)[0]\n temp2 = f(z_ - delta_vec)[0]\n print('temp1', temp1, '\\ntemp2', temp2)\n print((temp1 - temp2)/(2 * delta), '\\n')\n \n it.iternext()",
"-0.44039853898894116\ndelta [[1.e-05 0.e+00]\n [0.e+00 0.e+00]\n [0.e+00 0.e+00]] \n [[ 1.00001 2. ]\n [-1. 1. ]\n [ 1. 2. ]]\ntemp1 1.0877532773255922 \ntemp2 1.087762085296372\n-0.4403985389922482 \n\n-0.4166856024001578\ndelta [[0.e+00 0.e+00]\n [1.e-05 0.e+00]\n [0.e+00 0.e+00]] \n [[ 1. 2. ]\n [-0.99999 1. ]\n [ 1. 2. ]]\ntemp1 1.0877535144560875 \ntemp2 1.0877618481681357\n-0.4166856024112597 \n\n0.46411147557772453\ndelta [[0.e+00 0.e+00]\n [0.e+00 0.e+00]\n [1.e-05 0.e+00]] \n [[ 1. 2. ]\n [-1. 1. ]\n [ 1.00001 2. ]]\ntemp1 1.0877623224268673 \ntemp2 1.0877530401973559\n0.4641114755732367 \n\n0.4403985389889412\ndelta [[0.e+00 1.e-05]\n [0.e+00 0.e+00]\n [0.e+00 0.e+00]] \n [[ 1. 2.00001]\n [-1. 1. ]\n [ 1. 2. ]]\ntemp1 1.087762085296372 \ntemp2 1.0877532773255922\n0.4403985389922482 \n\n0.4166856024001579\ndelta [[0.e+00 0.e+00]\n [0.e+00 1.e-05]\n [0.e+00 0.e+00]] \n [[ 1. 2. ]\n [-1. 1.00001]\n [ 1. 2. ]]\ntemp1 1.0877618481681357 \ntemp2 1.0877535144560875\n0.4166856024112597 \n\n-0.46411147557772453\ndelta [[0.e+00 0.e+00]\n [0.e+00 0.e+00]\n [0.e+00 1.e-05]] \n [[ 1. 2. ]\n [-1. 1. ]\n [ 1. 2.00001]]\ntemp1 1.0877530401973559 \ntemp2 1.0877623224268673\n-0.4641114755732367 \n\n"
],
[
"# TODO Implement linear_softmax function that uses softmax with cross-entropy for linear classifier\nbatch_size = 2\nnum_classes = 2\nnum_features = 3\nnp.random.seed(42)\nW = np.random.randint(-1, 3, size=(num_features, num_classes)).astype(np.float)\nX = np.random.randint(-1, 3, size=(batch_size, num_features)).astype(np.float)\ntarget_index = np.ones(batch_size, dtype=np.int)\n\nloss, dW = linear_classifer.linear_softmax(X, W, target_index)\ncheck_gradient(lambda w: linear_classifer.linear_softmax(X, w, target_index), W)",
"Gradient check passed!\n"
],
[
"dW",
"_____no_output_____"
]
],
[
[
"### И теперь регуляризация\n\nМы будем использовать L2 regularization для весов как часть общей функции ошибки.\n\nНапомним, L2 regularization определяется как\n\nl2_reg_loss = regularization_strength * sum<sub>ij</sub> W[i, j]<sup>2</sup>\n\nРеализуйте функцию для его вычисления и вычисления соотвествующих градиентов.",
"_____no_output_____"
]
],
[
[
"# TODO Implement l2_regularization function that implements loss for L2 regularization\nlinear_classifer.l2_regularization(W, 0.01)\ncheck_gradient(lambda w: linear_classifer.l2_regularization(w, 0.01), W)",
"Gradient check passed!\n"
]
],
[
[
"# Тренировка!",
"_____no_output_____"
],
[
"Градиенты в порядке, реализуем процесс тренировки!",
"_____no_output_____"
]
],
[
[
"# TODO: Implement LinearSoftmaxClassifier.fit function\nclassifier = linear_classifer.LinearSoftmaxClassifier()\nloss_history = classifier.fit(train_X, train_y, epochs=10, learning_rate=1e-3, batch_size=300, reg=1e1)",
"Epoch 0, loss: 2.395944\nEpoch 1, loss: 2.329967\nEpoch 2, loss: 2.309746\nEpoch 3, loss: 2.303854\nEpoch 4, loss: 2.302685\nEpoch 5, loss: 2.303024\nEpoch 6, loss: 2.301804\nEpoch 7, loss: 2.302362\nEpoch 8, loss: 2.301462\nEpoch 9, loss: 2.301036\n"
],
[
"# let's look at the loss history!\nplt.plot(loss_history)\nplt.show()",
"_____no_output_____"
],
[
"# Let's check how it performs on validation set\npred = classifier.predict(val_X)\naccuracy = multiclass_accuracy(pred, val_y)\nprint(\"Accuracy: \", accuracy)\n\n# Now, let's train more and see if it performs better\nclassifier.fit(train_X, train_y, epochs=100, learning_rate=1e-3, batch_size=300, reg=1e1)\npred = classifier.predict(val_X)\naccuracy = multiclass_accuracy(pred, val_y)\nprint(\"Accuracy after training for 100 epochs: \", accuracy)",
"Accuracy: 0.119\nEpoch 0, loss: 2.301775\nEpoch 1, loss: 2.302470\nEpoch 2, loss: 2.301517\nEpoch 3, loss: 2.302304\nEpoch 4, loss: 2.301261\nEpoch 5, loss: 2.301431\nEpoch 6, loss: 2.302390\nEpoch 7, loss: 2.302629\nEpoch 8, loss: 2.301360\nEpoch 9, loss: 2.301406\nEpoch 10, loss: 2.301995\nEpoch 11, loss: 2.301792\nEpoch 12, loss: 2.302865\nEpoch 13, loss: 2.300573\nEpoch 14, loss: 2.301694\nEpoch 15, loss: 2.301212\nEpoch 16, loss: 2.302294\nEpoch 17, loss: 2.301142\nEpoch 18, loss: 2.301996\nEpoch 19, loss: 2.301716\nEpoch 20, loss: 2.301361\nEpoch 21, loss: 2.302726\nEpoch 22, loss: 2.301701\nEpoch 23, loss: 2.301551\nEpoch 24, loss: 2.302130\nEpoch 25, loss: 2.302418\nEpoch 26, loss: 2.301620\nEpoch 27, loss: 2.301936\nEpoch 28, loss: 2.301969\nEpoch 29, loss: 2.301542\nEpoch 30, loss: 2.301278\nEpoch 31, loss: 2.302039\nEpoch 32, loss: 2.302859\nEpoch 33, loss: 2.303249\nEpoch 34, loss: 2.301944\nEpoch 35, loss: 2.301846\nEpoch 36, loss: 2.302339\nEpoch 37, loss: 2.302446\nEpoch 38, loss: 2.301568\nEpoch 39, loss: 2.301545\nEpoch 40, loss: 2.301864\nEpoch 41, loss: 2.302947\nEpoch 42, loss: 2.302325\nEpoch 43, loss: 2.301754\nEpoch 44, loss: 2.301960\nEpoch 45, loss: 2.301942\nEpoch 46, loss: 2.301696\nEpoch 47, loss: 2.302248\nEpoch 48, loss: 2.301740\nEpoch 49, loss: 2.301784\nEpoch 50, loss: 2.301235\nEpoch 51, loss: 2.301655\nEpoch 52, loss: 2.302594\nEpoch 53, loss: 2.301782\nEpoch 54, loss: 2.302082\nEpoch 55, loss: 2.302244\nEpoch 56, loss: 2.302239\nEpoch 57, loss: 2.303319\nEpoch 58, loss: 2.301913\nEpoch 59, loss: 2.302222\nEpoch 60, loss: 2.301606\nEpoch 61, loss: 2.301208\nEpoch 62, loss: 2.301827\nEpoch 63, loss: 2.302937\nEpoch 64, loss: 2.302002\nEpoch 65, loss: 2.302777\nEpoch 66, loss: 2.302968\nEpoch 67, loss: 2.302866\nEpoch 68, loss: 2.302086\nEpoch 69, loss: 2.301090\nEpoch 70, loss: 2.301826\nEpoch 71, loss: 2.300684\nEpoch 72, loss: 2.302248\nEpoch 73, loss: 2.302451\nEpoch 74, loss: 2.301693\nEpoch 75, loss: 2.301706\nEpoch 76, loss: 2.301780\nEpoch 77, loss: 2.301653\nEpoch 78, loss: 2.301849\nEpoch 79, loss: 2.302842\nEpoch 80, loss: 2.302507\nEpoch 81, loss: 2.302186\nEpoch 82, loss: 2.301207\nEpoch 83, loss: 2.302617\nEpoch 84, loss: 2.302322\nEpoch 85, loss: 2.302050\nEpoch 86, loss: 2.301836\nEpoch 87, loss: 2.301914\nEpoch 88, loss: 2.302461\nEpoch 89, loss: 2.301592\nEpoch 90, loss: 2.302681\nEpoch 91, loss: 2.302394\nEpoch 92, loss: 2.302126\nEpoch 93, loss: 2.301533\nEpoch 94, loss: 2.302000\nEpoch 95, loss: 2.302418\nEpoch 96, loss: 2.302238\nEpoch 97, loss: 2.302749\nEpoch 98, loss: 2.301531\nEpoch 99, loss: 2.302757\nAccuracy after training for 100 epochs: 0.125\n"
]
],
[
[
"### Как и раньше, используем кросс-валидацию для подбора гиперпараметтов.\n\nВ этот раз, чтобы тренировка занимала разумное время, мы будем использовать только одно разделение на тренировочные (training) и проверочные (validation) данные.\n\nТеперь нам нужно подобрать не один, а два гиперпараметра! Не ограничивайте себя изначальными значениями в коде. \nДобейтесь точности более чем **20%** на проверочных данных (validation data).",
"_____no_output_____"
]
],
[
[
"from tqdm import tqdm\nfrom itertools import product",
"_____no_output_____"
],
[
"%%time\nnum_epochs = 200\nbatch_size = 300\n\nlearning_rates = [1e-1, 1e-2, 1e-3, 1e-4]\nreg_strengths = [1e-1, 1e-2, 1e-3, 1e-4]\n\nbest_classifier = None\nbest_val_accuracy = None\n\n# TODO use validation set to find the best hyperparameters\n# hint: for best results, you might need to try more values for learning rate and regularization strength \n# than provided initially\nfor lr, reg in tqdm(product(learning_rates, reg_strengths)):\n classifier = linear_classifer.LinearSoftmaxClassifier()\n classifier.fit(train_X, train_y, epochs=10, learning_rate=lr, \n batch_size=100, reg=reg, random_state=77)\n \n # make prediction on validation\n pred = classifier.predict(val_X)\n accuracy = multiclass_accuracy(pred, val_y)\n print('lr={lr}; reg={reg}'.format(lr=lr, reg=reg))\n print(\"Accuracy current: \", accuracy, '\\n')\n \n if (best_val_accuracy is None) or (best_val_accuracy < accuracy):\n best_val_accuracy = accuracy\n best_classifier = classifier\n \nprint('best validation accuracy achieved: %f' % best_val_accuracy)",
"\r0it [00:00, ?it/s]"
]
],
[
[
"# Какой же точности мы добились на тестовых данных?",
"_____no_output_____"
]
],
[
[
"test_pred = best_classifier.predict(test_X)\ntest_accuracy = multiclass_accuracy(test_pred, test_y)\nprint('Linear softmax classifier test set accuracy: %f' % (test_accuracy, ))",
"Linear softmax classifier test set accuracy: 0.204000\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cbb9a017bf3191780f121a94a6f7f662286e4ce2
| 522,150 |
ipynb
|
Jupyter Notebook
|
docs/examples/DataSet/Performing-measurements-using-qcodes-parameters-and-dataset.ipynb
|
jakeogh/Qcodes
|
3042317038e89264d481b212c9640c4d6b356c88
|
[
"MIT"
] | 1 |
2018-12-21T17:49:30.000Z
|
2018-12-21T17:49:30.000Z
|
docs/examples/DataSet/Performing-measurements-using-qcodes-parameters-and-dataset.ipynb
|
jakeogh/Qcodes
|
3042317038e89264d481b212c9640c4d6b356c88
|
[
"MIT"
] | 645 |
2018-03-09T08:48:54.000Z
|
2022-03-29T23:15:42.000Z
|
docs/examples/DataSet/Performing-measurements-using-qcodes-parameters-and-dataset.ipynb
|
jakeogh/Qcodes
|
3042317038e89264d481b212c9640c4d6b356c88
|
[
"MIT"
] | 1 |
2020-10-02T11:08:53.000Z
|
2020-10-02T11:08:53.000Z
| 144.359967 | 83,400 | 0.845849 |
[
[
[
"# Performing measurements using QCoDeS parameters and DataSet",
"_____no_output_____"
],
[
"This notebook shows some ways of performing different measurements using \nQCoDeS parameters and the [DataSet](DataSet-class-walkthrough.ipynb) via a powerful ``Measurement`` context manager. Here, it is assumed that the reader has some degree of familiarity with fundamental objects and methods of QCoDeS.",
"_____no_output_____"
],
[
"## Implementing a measurement",
"_____no_output_____"
],
[
"Now, let us start with necessary imports:",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy.random as rd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom time import sleep, monotonic\n\nimport qcodes as qc\nfrom qcodes import Station, load_or_create_experiment, \\\n initialise_database, Measurement, load_by_run_spec, load_by_guid\nfrom qcodes.tests.instrument_mocks import DummyInstrument, DummyInstrumentWithMeasurement\nfrom qcodes.dataset.plotting import plot_dataset\nfrom qcodes.dataset.descriptions.detect_shapes import detect_shape_of_measurement\n\nqc.logger.start_all_logging()",
"Logging hadn't been started.\nActivating auto-logging. Current session state plus future input saved.\nFilename : C:\\Users\\Jens-work\\.qcodes\\logs\\command_history.log\nMode : append\nOutput logging : True\nRaw input log : False\nTimestamping : True\nState : active\nQcodes Logfile : C:\\Users\\Jens-work\\.qcodes\\logs\\210129-14216-qcodes.log\nActivating auto-logging. Current session state plus future input saved.\nFilename : C:\\Users\\Jens-work\\.qcodes\\logs\\command_history.log\nMode : append\nOutput logging : True\nRaw input log : False\nTimestamping : True\nState : active\nQcodes Logfile : C:\\Users\\Jens-work\\.qcodes\\logs\\210129-14216-qcodes.log\n"
]
],
[
[
"In what follows, we shall define some utility functions as well as declare our dummy instruments. We, then, add these instruments to a ``Station`` object. \n\nThe dummy dmm is setup to generate an output depending on the values set on the dummy dac simulating a real experiment.",
"_____no_output_____"
]
],
[
[
"# preparatory mocking of physical setup\n\ndac = DummyInstrument('dac', gates=['ch1', 'ch2'])\ndmm = DummyInstrumentWithMeasurement(name='dmm', setter_instr=dac)\n\nstation = qc.Station(dmm, dac)",
"_____no_output_____"
],
[
"# now make some silly set-up and tear-down actions\n\ndef veryfirst():\n print('Starting the measurement')\n\ndef numbertwo(inst1, inst2):\n print('Doing stuff with the following two instruments: {}, {}'.format(inst1, inst2))\n \ndef thelast():\n print('End of experiment')",
"_____no_output_____"
]
],
[
[
"**Note** that database and experiments may be missing.\n\nIf this is the first time you create a dataset, the underlying database file has\nmost likely not been created. The following cell creates the database file. Please\nrefer to documentation on [The Experiment Container](The-Experiment-Container.ipynb) for details.\n\nFurthermore, datasets are associated to an experiment. By default, a dataset (or \"run\")\nis appended to the latest existing experiments. If no experiment has been created,\nwe must create one. We do that by calling the `load_or_create_experiment` function.\n\nHere we explicitly pass the loaded or created experiment to the `Measurement` object to ensure that we are always\nusing the `performing_meas_using_parameters_and_dataset` `Experiment` created within this tutorial. Note that a keyword argument `name` can also be set as any string value for `Measurement` which later becomes the `name` of the dataset that running that `Measurement` produces.",
"_____no_output_____"
]
],
[
[
"initialise_database()\nexp = load_or_create_experiment(\n experiment_name='performing_meas_using_parameters_and_dataset',\n sample_name=\"no sample\"\n)",
"_____no_output_____"
]
],
[
[
"And then run an experiment:",
"_____no_output_____"
]
],
[
[
"meas = Measurement(exp=exp, name='exponential_decay')\nmeas.register_parameter(dac.ch1) # register the first independent parameter\nmeas.register_parameter(dmm.v1, setpoints=(dac.ch1,)) # now register the dependent oone\n\nmeas.add_before_run(veryfirst, ()) # add a set-up action\nmeas.add_before_run(numbertwo, (dmm, dac)) # add another set-up action\nmeas.add_after_run(thelast, ()) # add a tear-down action\n\nmeas.write_period = 0.5\n\nwith meas.run() as datasaver: \n for set_v in np.linspace(0, 25, 10):\n dac.ch1.set(set_v)\n get_v = dmm.v1.get()\n datasaver.add_result((dac.ch1, set_v),\n (dmm.v1, get_v))\n \n dataset1D = datasaver.dataset # convenient to have for data access and plotting",
"Starting the measurement\nDoing stuff with the following two instruments: <DummyInstrumentWithMeasurement: dmm>, <DummyInstrument: dac>\nStarting experimental run with id: 380. \nEnd of experiment\n"
],
[
"ax, cbax = plot_dataset(dataset1D)",
"_____no_output_____"
]
],
[
[
"And let's add an example of a 2D measurement. For the 2D, we'll need a new batch of parameters, notably one with two \nother parameters as setpoints. We therefore define a new Measurement with new parameters.",
"_____no_output_____"
]
],
[
[
"meas = Measurement(exp=exp, name='2D_measurement_example')\nmeas.register_parameter(dac.ch1) # register the first independent parameter\nmeas.register_parameter(dac.ch2) # register the second independent parameter\nmeas.register_parameter(dmm.v2, setpoints=(dac.ch1, dac.ch2)) # now register the dependent oone",
"_____no_output_____"
],
[
"# run a 2D sweep\n\nwith meas.run() as datasaver:\n\n for v1 in np.linspace(-1, 1, 200):\n for v2 in np.linspace(-1, 1, 200):\n dac.ch1(v1)\n dac.ch2(v2)\n val = dmm.v2.get()\n datasaver.add_result((dac.ch1, v1),\n (dac.ch2, v2),\n (dmm.v2, val))\n \n dataset2D = datasaver.dataset",
"Starting experimental run with id: 381. \n"
],
[
"ax, cbax = plot_dataset(dataset2D)",
"_____no_output_____"
]
],
[
[
"## Accessing and exporting the measured data",
"_____no_output_____"
],
[
"QCoDeS ``DataSet`` implements a number of methods for accessing the data of a given dataset. Here we will concentrate on the two most user friendly methods. For a more detailed walkthrough of the `DataSet` class, refer to [DataSet class walkthrough](DataSet-class-walkthrough.ipynb) notebook.",
"_____no_output_____"
],
[
"The method `get_parameter_data` returns the data as a dictionary of ``numpy`` arrays. The dictionary is indexed by the measured (dependent) parameter in the outermost level and the names of the dependent and independent parameters in the innermost level. The first parameter in the innermost level is always the dependent parameter.",
"_____no_output_____"
]
],
[
[
"dataset1D.get_parameter_data()",
"_____no_output_____"
]
],
[
[
"By default `get_parameter_data` returns all data stored in the dataset. The data that is specific to one or more measured parameters can be returned by passing the parameter name(s) or by using `ParamSpec` object:",
"_____no_output_____"
]
],
[
[
"dataset1D.get_parameter_data('dmm_v1')",
"_____no_output_____"
]
],
[
[
"You can also simply fetch the data for one or more dependent parameter",
"_____no_output_____"
]
],
[
[
"dataset1D.get_parameter_data('dac_ch1')",
"_____no_output_____"
]
],
[
[
"For more details about accessing data of a given `DataSet`, see [Accessing data in DataSet notebook](Accessing-data-in-DataSet.ipynb).",
"_____no_output_____"
],
[
"The data can also be exported as one or more [Pandas](https://pandas.pydata.org/) DataFrames. \nThe DataFrames cane be returned either as a single dataframe or as a dictionary from measured parameters to DataFrames.\nIf you measure all parameters as a function of the same set of parameters you probably want to export to a single dataframe.",
"_____no_output_____"
]
],
[
[
"dataset1D.to_pandas_dataframe()",
"_____no_output_____"
]
],
[
[
"However, there may be cases where the data within a dataset cannot be put into a single dataframe. \nIn those cases you can use the other method to export the dataset to a dictionary from name of the measured parameter to Pandas dataframes.",
"_____no_output_____"
]
],
[
[
"dataset1D.to_pandas_dataframe_dict()",
"_____no_output_____"
]
],
[
[
"When exporting a two or higher dimensional datasets as a Pandas DataFrame a [MultiIndex](https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html) is used to index the measured parameter based on all the dependencies",
"_____no_output_____"
]
],
[
[
"dataset2D.to_pandas_dataframe()[0:10]",
"_____no_output_____"
]
],
[
[
"If your data is on a regular grid it may make sense to view the data as an [XArray](https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html) Dataset. The dataset can be directly exported to a XArray Dataset.",
"_____no_output_____"
]
],
[
[
"dataset2D.to_xarray_dataset()",
"_____no_output_____"
]
],
[
[
"Note, however, that XArray is only suited for data that is on a rectangular grid with few or no missing values. If the data does not lie on a grid, all the measured data points will have an unique combination of the two dependent parameters. When exporting to XArray, NaN's will therefore replace all the missing combinations of `dac_ch1` and `dac_ch2` and the data is unlikely to be useful in this format.",
"_____no_output_____"
],
[
"For more details about using Pandas and XArray see [Working With Pandas and XArray](./Working-With-Pandas-and-XArray.ipynb)",
"_____no_output_____"
],
[
"It is also possible to export the datasets directly to various file formats see [Exporting QCoDes Datasets](./Exporting-data-to-other-file-formats.ipynb)",
"_____no_output_____"
],
[
"## Reloading datasets",
"_____no_output_____"
],
[
"To load existing datasets QCoDeS provides several functions. The most useful and generic function is called `load_by_run_spec`. \nThis function takes one or more pieces of information about a dataset and will either, if the dataset is uniquely identifiable by the information, load the dataset or print information about all the datasets that match the supplied information allowing you to provide more information to uniquely identify the dataset.",
"_____no_output_____"
],
[
"Here, we will load a dataset based on the `captured_run_id` printed on the plot above.",
"_____no_output_____"
]
],
[
[
"dataset1D.captured_run_id",
"_____no_output_____"
],
[
"loaded_ds = load_by_run_spec(captured_run_id=dataset1D.captured_run_id)",
"_____no_output_____"
],
[
"loaded_ds.the_same_dataset_as(dataset1D)",
"_____no_output_____"
]
],
[
[
"As long as you are working within one database file the dataset should be uniquely identified by `captured_run_id`. However, once you mix several datasets from different database files this is likely not unique. See the following section and [Extracting runs from one DB file to another](Extracting-runs-from-one-DB-file-to-another.ipynb) for more information on how to handle this.",
"_____no_output_____"
],
[
"### DataSet GUID",
"_____no_output_____"
],
[
"Internally each dataset is refereed too by a Globally Unique Identifier (GUID) that ensures that the dataset uniquely identified even if datasets from several databases with potentially identical captured_run_id, experiment and sample names.\nA dataset can always be reloaded from the GUID if known. ",
"_____no_output_____"
]
],
[
[
"print(f\"Dataset GUID is: {dataset1D.guid}\")",
"Dataset GUID is: aaaaaaaa-0000-0000-0000-01774e8f852e\n"
],
[
"loaded_ds = load_by_guid(dataset1D.guid)",
"_____no_output_____"
],
[
"loaded_ds.the_same_dataset_as(dataset1D)",
"_____no_output_____"
]
],
[
[
"## Specifying shape of measurement\nAs the context manager allows you to store data of any shape (with the only restriction being that you supply values for both dependent and independent parameters together), it cannot know if the data is being measured on a grid. As a consequence, the Numpy array of data loaded from the dataset may not be of the shape that you expect. `plot_dataset`, `DataSet.to_pandas...` and `DataSet.to_xarray...` contain logic that can detect the shape of the data measured at load time. However, if you know the shape of the measurement that you are going to perform up front, you can choose to specify it before initializing the measurement using ``Measurement.set_shapes`` method.\n\n`dataset.get_parameter_data` and `dataset.cache.data` automatically makes use of this information to return shaped data when loaded from the database. Note that these two methods behave slightly different when loading data on a partially completed dataset. `dataset.get_parameter_data` will only reshape the data if the number of points measured matches the number of points expected according to the metadata. `dataset.cache.data` will however return a dataset with empty placeholders (either NaN, zeros or empty strings depending on the datatypes) for missing values in a partially filled dataset. \n\nNote that if you use the doNd functions demonstrated in [Using doNd functions in comparison to Measurement context manager for performing measurements](Using_doNd_functions_in_comparison_to_Measurement_context_manager_for_performing_measurements.ipynb) the shape information will be detected and stored automatically.\n\nIn the example below we show how the shape can be specified manually.\n",
"_____no_output_____"
]
],
[
[
"n_points_1 = 100\nn_points_2 = 200\n\nmeas_with_shape = Measurement(exp=exp, name='shape_specification_example_measurement')\nmeas_with_shape.register_parameter(dac.ch1) # register the first independent parameter\nmeas_with_shape.register_parameter(dac.ch2) # register the second independent parameter\nmeas_with_shape.register_parameter(dmm.v2, setpoints=(dac.ch1, dac.ch2)) # now register the dependent oone\n\nmeas_with_shape.set_shapes(detect_shape_of_measurement((dmm.v2,), (n_points_1, n_points_2)))\n\nwith meas_with_shape.run() as datasaver:\n\n for v1 in np.linspace(-1, 1, n_points_1):\n for v2 in np.linspace(-1, 1, n_points_2):\n dac.ch1(v1)\n dac.ch2(v2)\n val = dmm.v2.get()\n datasaver.add_result((dac.ch1, v1),\n (dac.ch2, v2),\n (dmm.v2, val))\n \n dataset = datasaver.dataset # convenient to have for plotting",
"Starting experimental run with id: 382. \n"
],
[
"for name, data in dataset.get_parameter_data()['dmm_v2'].items():\n print(f\"{name}: data.shape={data.shape}, expected_shape=({n_points_1},{n_points_2})\")\n assert data.shape == (n_points_1, n_points_2)",
"dmm_v2: data.shape=(100, 200), expected_shape=(100,200)\ndac_ch1: data.shape=(100, 200), expected_shape=(100,200)\ndac_ch2: data.shape=(100, 200), expected_shape=(100,200)\n"
]
],
[
[
"## Performing several measuments concurrently",
"_____no_output_____"
],
[
"It is possible to perform two or more measurements at the same time. This may be convenient if you need to measure several parameters as a function of the same independent parameters.",
"_____no_output_____"
]
],
[
[
"# setup two measurements\nmeas1 = Measurement(exp=exp, name='multi_measurement_1')\nmeas1.register_parameter(dac.ch1) \nmeas1.register_parameter(dac.ch2) \nmeas1.register_parameter(dmm.v1, setpoints=(dac.ch1, dac.ch2))\n\nmeas2 = Measurement(exp=exp, name='multi_measurement_2')\nmeas2.register_parameter(dac.ch1) \nmeas2.register_parameter(dac.ch2)\nmeas2.register_parameter(dmm.v2, setpoints=(dac.ch1, dac.ch2))\n\nwith meas1.run() as datasaver1, meas2.run() as datasaver2:\n\n v1points = np.concatenate((np.linspace(-2, -0.5, 10),\n np.linspace(-0.51, 0.5, 200),\n np.linspace(0.51, 2, 10)))\n v2points = np.concatenate((np.linspace(-2, -0.25, 10),\n np.linspace(-0.26, 0.5, 200),\n np.linspace(0.51, 2, 10)))\n \n for v1 in v1points:\n for v2 in v2points:\n dac.ch1(v1)\n dac.ch2(v2)\n val1 = dmm.v1.get()\n datasaver1.add_result((dac.ch1, v1),\n (dac.ch2, v2),\n (dmm.v1, val1))\n val2 = dmm.v2.get()\n datasaver2.add_result((dac.ch1, v1),\n (dac.ch2, v2),\n (dmm.v2, val2))\n",
"Starting experimental run with id: 383. \nStarting experimental run with id: 384. \n"
],
[
"ax, cbax = plot_dataset(datasaver1.dataset)",
"_____no_output_____"
],
[
"ax, cbax = plot_dataset(datasaver2.dataset)",
"_____no_output_____"
]
],
[
[
"## Interrupting measurements early\n\nThere may be cases where you do not want to complete a measurement. Currently QCoDeS is designed to allow the user\nto interrupt the measurements with a standard KeyBoardInterrupt. KeyBoardInterrupts can be raised with either a Ctrl-C keyboard shortcut or using the interrupt button in Juypter / Spyder which is typically in the form of a Square stop button. QCoDeS is designed such that KeyboardInterrupts are delayed around critical parts of the code and the measurement is stopped when its safe to do so. ",
"_____no_output_____"
],
[
"## QCoDeS Array and MultiParameter",
"_____no_output_____"
],
[
"The ``Measurement`` object supports automatic handling of ``Array`` and ``MultiParameters``. When registering these parameters \nthe individual components are unpacked and added to the dataset as if they were separate parameters. Lets consider a ``MultiParamter`` with array components as the most general case.\n\nFirst lets use a dummy instrument that produces data as ``Array`` and ``MultiParameters``.",
"_____no_output_____"
]
],
[
[
"from qcodes.tests.instrument_mocks import DummyChannelInstrument",
"_____no_output_____"
],
[
"mydummy = DummyChannelInstrument('MyDummy')",
"_____no_output_____"
]
],
[
[
"This instrument produces two ``Array``s with the names, shapes and setpoints given below.",
"_____no_output_____"
]
],
[
[
"mydummy.A.dummy_2d_multi_parameter.names",
"_____no_output_____"
],
[
"mydummy.A.dummy_2d_multi_parameter.shapes",
"_____no_output_____"
],
[
"mydummy.A.dummy_2d_multi_parameter.setpoint_names",
"_____no_output_____"
],
[
"meas = Measurement(exp=exp)\n\nmeas.register_parameter(mydummy.A.dummy_2d_multi_parameter)\nmeas.parameters",
"_____no_output_____"
]
],
[
[
"When adding the MultiParameter to the measurement we can see that we add each of the individual components as a \nseparate parameter.",
"_____no_output_____"
]
],
[
[
"with meas.run() as datasaver:\n datasaver.add_result((mydummy.A.dummy_2d_multi_parameter, mydummy.A.dummy_2d_multi_parameter()))",
"Starting experimental run with id: 385. \n"
]
],
[
[
"And when adding the result of a ``MultiParameter`` it is automatically unpacked into its components.",
"_____no_output_____"
]
],
[
[
"plot_dataset(datasaver.dataset)",
"_____no_output_____"
],
[
"datasaver.dataset.get_parameter_data('MyDummy_ChanA_that')",
"_____no_output_____"
],
[
"datasaver.dataset.to_pandas_dataframe()",
"_____no_output_____"
],
[
"datasaver.dataset.to_xarray_dataset()",
"_____no_output_____"
]
],
[
[
"## Avoiding verbosity of the Measurement context manager for simple measurements\n\nFor simple 1D/2D grid-type of measurements, it may feel like an overkill to use the verbose and flexible Measurement context manager construct. For this case, so-called ``doNd`` functions come ti rescue - convenient one- or two-line calls, read more about them in [Using doNd functions](./Using_doNd_functions_in_comparison_to_Measurement_context_manager_for_performing_measurements.ipynb).",
"_____no_output_____"
],
[
"## Optimizing measurement time\n\nThere are measurements that are data-heavy or time consuming, or both. QCoDeS provides some features and tools that should help in optimizing the measurement time. Some of those are:\n\n* [Saving data in the background](./Saving_data_in_the_background.ipynb)\n* Setting more appropriate ``paramtype`` when registering parameters, see [Paramtypes explained](./Paramtypes%20explained.ipynb)\n* Adding result to datasaver by creating threads per instrument, see [Threaded data acquisition](./Threaded%20data%20acquisition.ipynb)\n",
"_____no_output_____"
],
[
"## The power of the Measurement context manager construct\n\nThis new form is so free that we may easily do thing impossible with the old Loop construct.",
"_____no_output_____"
],
[
"Say, that from the plot of the above 1D measurement, \nwe decide that a voltage below 1 V is uninteresting,\nso we stop the sweep at that point, thus,\nwe do not know in advance how many points we'll measure.",
"_____no_output_____"
]
],
[
[
"meas = Measurement(exp=exp)\nmeas.register_parameter(dac.ch1) # register the first independent parameter\nmeas.register_parameter(dmm.v1, setpoints=(dac.ch1,)) # now register the dependent oone\n\nwith meas.run() as datasaver:\n \n for set_v in np.linspace(0, 25, 100):\n dac.ch1.set(set_v)\n get_v = dmm.v1.get() \n datasaver.add_result((dac.ch1, set_v),\n (dmm.v1, get_v))\n\n if get_v < 1:\n break\n \n dataset = datasaver.dataset",
"Starting experimental run with id: 386. \n"
],
[
"ax, cbax = plot_dataset(dataset)",
"_____no_output_____"
]
],
[
[
"Or we might want to simply get as many points as possible in 10 s\nrandomly sampling the region between 0 V and 10 V (for the setpoint axis).",
"_____no_output_____"
]
],
[
[
"from time import monotonic, sleep\n\nwith meas.run() as datasaver:\n \n t_start = monotonic()\n \n while monotonic() - t_start < 3:\n set_v = 10/2*(np.random.rand() + 1)\n dac.ch1.set(set_v)\n \n # some sleep to not get too many points (or to let the system settle)\n sleep(0.04)\n \n get_v = dmm.v1.get() \n datasaver.add_result((dac.ch1, set_v),\n (dmm.v1, get_v))\n \n dataset = datasaver.dataset # convenient to have for plotting",
"Starting experimental run with id: 387. \n"
],
[
"axes, cbax = plot_dataset(dataset)\n# we slightly tweak the plot to better visualise the highly non-standard axis spacing\naxes[0].lines[0].set_marker('o')\naxes[0].lines[0].set_markerfacecolor((0.6, 0.6, 0.9))\naxes[0].lines[0].set_markeredgecolor((0.4, 0.6, 0.9))\naxes[0].lines[0].set_color((0.8, 0.8, 0.8))",
"_____no_output_____"
]
],
[
[
"### Finer sampling in 2D",
"_____no_output_____"
],
[
"Looking at the plot of the 2D measurement above, we may decide to sample more finely in the central region:",
"_____no_output_____"
]
],
[
[
"meas = Measurement(exp=exp)\nmeas.register_parameter(dac.ch1) # register the first independent parameter\nmeas.register_parameter(dac.ch2) # register the second independent parameter\nmeas.register_parameter(dmm.v2, setpoints=(dac.ch1, dac.ch2)) # now register the dependent oone",
"_____no_output_____"
],
[
"with meas.run() as datasaver:\n\n v1points = np.concatenate((np.linspace(-1, -0.5, 5),\n np.linspace(-0.51, 0.5, 200),\n np.linspace(0.51, 1, 5)))\n v2points = np.concatenate((np.linspace(-1, -0.25, 5),\n np.linspace(-0.26, 0.5, 200),\n np.linspace(0.51, 1, 5)))\n \n for v1 in v1points:\n for v2 in v2points:\n dac.ch1(v1)\n dac.ch2(v2)\n val = dmm.v2.get()\n datasaver.add_result((dac.ch1, v1),\n (dac.ch2, v2),\n (dmm.v2, val))\n\n dataset = datasaver.dataset # convenient to have for plotting",
"Starting experimental run with id: 388. \n"
],
[
"ax, cbax = plot_dataset(dataset)",
"_____no_output_____"
]
],
[
[
"### Simple adaptive 2D sweep",
"_____no_output_____"
],
[
".. or even perform an adaptive sweep... ooohh...\n(the example below is a not-very-clever toy model example,\nbut it nicely shows a semi-realistic measurement that the old Loop\ncould not handle)",
"_____no_output_____"
]
],
[
[
"v1_points = np.linspace(-1, 1, 250)\nv2_points = np.linspace(1, -1, 250)\n\nthreshold = 0.25\n\nwith meas.run() as datasaver:\n # Do normal sweeping until the peak is detected\n \n for v2ind, v2 in enumerate(v2_points):\n for v1ind, v1 in enumerate(v1_points):\n dac.ch1(v1)\n dac.ch2(v2)\n val = dmm.v2.get()\n datasaver.add_result((dac.ch1, v1),\n (dac.ch2, v2),\n (dmm.v2, val))\n if val > threshold:\n break\n else:\n continue\n break\n \n print(v1ind, v2ind, val)\n print('-'*10)\n \n # now be more clever, meandering back and forth over the peak\n doneyet = False\n rowdone = False\n v1_step = 1\n while not doneyet:\n v2 = v2_points[v2ind]\n v1 = v1_points[v1ind+v1_step-1]\n dac.ch1(v1)\n dac.ch2(v2)\n val = dmm.v2.get()\n datasaver.add_result((dac.ch1, v1),\n (dac.ch2, v2),\n (dmm.v2, val))\n if val < threshold:\n if rowdone:\n doneyet = True\n v2ind += 1\n v1_step *= -1\n rowdone = True\n else:\n v1ind += v1_step\n rowdone = False\n \ndataset = datasaver.dataset # convenient to have for plotting",
"Starting experimental run with id: 389. \n130 46 0.2505535776516436\n----------\n"
],
[
"ax, cbax = plot_dataset(dataset)",
"2021-01-29 15:33:09,196 ¦ py.warnings ¦ WARNING ¦ warnings ¦ _showwarnmsg ¦ 110 ¦ c:\\users\\jens-work\\source\\repos\\qcodes\\qcodes\\dataset\\data_export.py:172: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray\n return np.array(rows)\n\n"
]
],
[
[
"### Random sampling ",
"_____no_output_____"
],
[
"We may also chose to sample completely randomly across the phase space",
"_____no_output_____"
]
],
[
[
"meas2 = Measurement(exp=exp, name='random_sampling_measurement')\nmeas2.register_parameter(dac.ch1) \nmeas2.register_parameter(dac.ch2)\nmeas2.register_parameter(dmm.v2, setpoints=(dac.ch1, dac.ch2))\n\nthreshold = 0.25\n\nnpoints = 5000\n\nwith meas2.run() as datasaver:\n for i in range(npoints):\n x = 2*(np.random.rand()-.5)\n y = 2*(np.random.rand()-.5)\n dac.ch1(x)\n dac.ch2(y)\n z = dmm.v2()\n datasaver.add_result((dac.ch1, x),\n (dac.ch2, y),\n (dmm.v2, z))\ndataset = datasaver.dataset # convenient to have for plotting",
"Starting experimental run with id: 390. \n"
],
[
"ax, cbax = plot_dataset(dataset)",
"_____no_output_____"
],
[
"datasaver.dataset.to_pandas_dataframe()[0:10]",
"_____no_output_____"
]
],
[
[
"Unlike the data measured above, which lies on a grid, here, all the measured data points have an unique combination of the two dependent parameters. When exporting to XArray NaN's will therefore replace all the missing combinations of `dac_ch1` and `dac_ch2` and the data is unlikely to be useful in this format. ",
"_____no_output_____"
]
],
[
[
"datasaver.dataset.to_xarray_dataset()",
"_____no_output_____"
]
],
[
[
"### Optimiser",
"_____no_output_____"
],
[
"An example to show that the algorithm is flexible enough to be used with completely unstructured data such as the output of an downhill simplex optimization. The downhill simplex is somewhat more sensitive to noise and it is important that 'fatol' is set to match the expected noise.",
"_____no_output_____"
]
],
[
[
"from scipy.optimize import minimize",
"_____no_output_____"
],
[
"def set_and_measure(*xk):\n dac.ch1(xk[0])\n dac.ch2(xk[1])\n return dmm.v2.get()\n\nnoise = 0.0005\nx0 = [np.random.rand(), np.random.rand()]\n\nwith meas.run() as datasaver:\n def mycallback(xk):\n dac.ch1(xk[0])\n dac.ch2(xk[1])\n datasaver.add_result((dac.ch1, xk[0]),\n (dac.ch2, xk[1]),\n (dmm.v2, dmm.v2.cache.get()))\n \n res = minimize(lambda x: -set_and_measure(*x),\n x0,\n method='Nelder-Mead',\n tol=1e-10, \n callback=mycallback,\n options={'fatol': noise})\n \n dataset = datasaver.dataset # convenient to have for plotting",
"Starting experimental run with id: 391. \n"
],
[
"res",
"_____no_output_____"
],
[
"ax, cbax = plot_dataset(dataset)",
"2021-01-29 15:33:10,709 ¦ py.warnings ¦ WARNING ¦ warnings ¦ _showwarnmsg ¦ 110 ¦ c:\\users\\jens-work\\source\\repos\\qcodes\\qcodes\\dataset\\data_export.py:172: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray\n return np.array(rows)\n\n"
]
],
[
[
"## Subscriptions\n\nThe ``Measurement`` object can also handle subscriptions to the dataset. Subscriptions are, under the hood, triggers in the underlying SQLite database. Therefore, the subscribers are only called when data is written to the database (which happens every `write_period`).\n\nWhen making a subscription, two things must be supplied: a function and a mutable state object. The function **MUST** have a call signature of `f(result_list, length, state, **kwargs)`, where ``result_list`` is a list of tuples of parameter values inserted in the dataset, ``length`` is an integer (the step number of the run), and ``state`` is the mutable state object. The function does not need to actually use these arguments, but the call signature must match this.\n\nLet us consider two generic examples:",
"_____no_output_____"
],
[
"### Subscription example 1: simple printing",
"_____no_output_____"
]
],
[
[
"def print_which_step(results_list, length, state):\n \"\"\"\n This subscriber does not use results_list nor state; it simply\n prints how many results we have added to the database\n \"\"\"\n print(f'The run now holds {length} rows')\n \n \nmeas = Measurement(exp=exp)\nmeas.register_parameter(dac.ch1)\nmeas.register_parameter(dmm.v1, setpoints=(dac.ch1,))\n\nmeas.write_period = 0.2 # We write to the database every 0.2s\n\nmeas.add_subscriber(print_which_step, state=[])\n\nwith meas.run() as datasaver:\n for n in range(7):\n datasaver.add_result((dac.ch1, n), (dmm.v1, n**2))\n print(f'Added points to measurement, step {n}.')\n sleep(0.2)",
"Starting experimental run with id: 392. \nAdded points to measurement, step 0.\nThe run now holds 2 rows\nAdded points to measurement, step 1.\nThe run now holds 3 rows\nAdded points to measurement, step 2.\nThe run now holds 4 rows\nAdded points to measurement, step 3.\nThe run now holds 5 rows\nAdded points to measurement, step 4.\nThe run now holds 6 rows\nAdded points to measurement, step 5.\nThe run now holds 7 rows\nAdded points to measurement, step 6.\nThe run now holds 7 rows\nThe run now holds 7 rows\n"
]
],
[
[
"### Subscription example 2: using the state\n\nWe add two subscribers now.",
"_____no_output_____"
]
],
[
[
"def get_list_of_first_param(results_list, length, state):\n \"\"\"\n Modify the state (a list) to hold all the values for\n the first parameter\n \"\"\"\n param_vals = [parvals[0] for parvals in results_list]\n state += param_vals\n\n\nmeas = Measurement(exp=exp)\nmeas.register_parameter(dac.ch1)\nmeas.register_parameter(dmm.v1, setpoints=(dac.ch1,))\n\nmeas.write_period = 0.2 # We write to the database every 0.2s\n\nfirst_param_list = []\n\nmeas.add_subscriber(print_which_step, state=[])\nmeas.add_subscriber(get_list_of_first_param, state=first_param_list)\n\nwith meas.run() as datasaver:\n for n in range(10):\n datasaver.add_result((dac.ch1, n), (dmm.v1, n**2))\n print(f'Added points to measurement, step {n}.')\n print(f'First parameter value list: {first_param_list}')\n sleep(0.1)",
"Starting experimental run with id: 393. \nAdded points to measurement, step 0.\nFirst parameter value list: []\nAdded points to measurement, step 1.\nFirst parameter value list: []\nThe run now holds 2 rows\nAdded points to measurement, step 2.\nFirst parameter value list: [0, 1, 2]\nAdded points to measurement, step 3.\nFirst parameter value list: [0, 1, 2]\nThe run now holds 5 rows\nAdded points to measurement, step 4.\nFirst parameter value list: [0, 1, 2, 3, 4]\nAdded points to measurement, step 5.\nFirst parameter value list: [0, 1, 2, 3, 4]\nThe run now holds 7 rows\nAdded points to measurement, step 6.\nFirst parameter value list: [0, 1, 2, 3, 4, 5, 6]\nAdded points to measurement, step 7.\nFirst parameter value list: [0, 1, 2, 3, 4, 5, 6]\nThe run now holds 9 rows\nAdded points to measurement, step 8.\nFirst parameter value list: [0, 1, 2, 3, 4, 5, 6, 7, 8]\nAdded points to measurement, step 9.\nFirst parameter value list: [0, 1, 2, 3, 4, 5, 6, 7, 8]\nThe run now holds 10 rows\nThe run now holds 10 rows\nThe run now holds 10 rows\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cbb9a8241ab206f62ad24705afae1f9566a08e1e
| 2,650 |
ipynb
|
Jupyter Notebook
|
docs/notebooks/domain_thesaurus.ipynb
|
DunZhang/DomainSpecificThesaurus
|
539dcdbe618ade1864e56423667f28afb800e1e1
|
[
"MIT"
] | 13 |
2019-02-21T23:16:55.000Z
|
2022-03-03T16:27:24.000Z
|
docs/notebooks/domain_thesaurus.ipynb
|
DunZhang/DomainSpecificThesaurus
|
539dcdbe618ade1864e56423667f28afb800e1e1
|
[
"MIT"
] | 3 |
2021-04-28T11:49:20.000Z
|
2022-02-25T17:31:06.000Z
|
docs/notebooks/domain_thesaurus.ipynb
|
DunZhang/DomainSpecificThesaurus
|
539dcdbe618ade1864e56423667f28afb800e1e1
|
[
"MIT"
] | 6 |
2019-02-23T16:01:36.000Z
|
2021-09-05T10:27:54.000Z
| 28.804348 | 128 | 0.593585 |
[
[
[
"This a simple tutorial to extract a domain thesaurus.",
"_____no_output_____"
]
],
[
[
"import os\nimport codecs\nimport DST\n# you can set your own output directory path\nDEFAULT_OUT_DIR = os.path.dirname(os.getcwd())\nfrom DST.domain_thesaurus.DomainThesaurus import DomainThesaurus\nfrom DST.datasets.DownloadData import DownloadData",
"_____no_output_____"
],
[
"# First, you should get clean domain corpus and general vocabulary. This may takes a long time.\n# We already provide clean domain corpus and general vocabulary, so you can download and use them.\n\ndownload_data = DownloadData()\n# download the domain corpus\ndownload_data.download_data(os.path.join(DEFAULT_OUT_DIR, \"eng_corpus.zip\"), download_file_name=\"eng_corpus\",\n overwrite=False)\n# download general vocab\ndownload_data.download_data(os.path.join(DEFAULT_OUT_DIR, \"general_vocab.zip\"), download_file_name=\"general_vocab\",\n overwrite=False)",
"_____no_output_____"
],
[
"\n# start to extract domain thesaurus\n# for different datasets, you should set different parameters\n# In this example, we use default parameters\n# The \"cleanEng.txt\" and \"general_vocab.json\" are the files you download\ndst = DomainThesaurus(domain_specific_corpus_path=os.path.join(DEFAULT_OUT_DIR, \"cleanEng.txt\"),\n general_vocab_path=os.path.join(DEFAULT_OUT_DIR, \"general_vocab.json\"),\n outputDir=DEFAULT_OUT_DIR)\neng_domain_thesaurus = dst.extract()\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cbb9b43c8ebd78a8c4920f7aba0124b48460f406
| 8,886 |
ipynb
|
Jupyter Notebook
|
Nbs/01_activations.ipynb
|
ayasyrev/model_constructor
|
3759a02dd9f7aa1ca3e6a4a5aefe72380886207e
|
[
"Apache-2.0"
] | 3 |
2020-08-02T09:18:27.000Z
|
2021-12-22T07:43:37.000Z
|
Nbs/01_activations.ipynb
|
ayasyrev/model_constructor
|
3759a02dd9f7aa1ca3e6a4a5aefe72380886207e
|
[
"Apache-2.0"
] | 16 |
2020-11-09T11:35:13.000Z
|
2021-12-23T13:04:54.000Z
|
Nbs/01_activations.ipynb
|
ayasyrev/model_constructor
|
3759a02dd9f7aa1ca3e6a4a5aefe72380886207e
|
[
"Apache-2.0"
] | 2 |
2020-04-08T20:56:48.000Z
|
2021-01-20T13:37:52.000Z
| 28.572347 | 125 | 0.543214 |
[
[
[
"# Activations functions.\n\n> Activations functions. Set of act_fn.",
"_____no_output_____"
],
[
"Activation functions, forked from https://github.com/rwightman/pytorch-image-models/timm/models/layers/activations.py",
"_____no_output_____"
],
[
"Mish: Self Regularized \nNon-Monotonic Activation Function \nhttps://github.com/digantamisra98/Mish \nfastai forum discussion https://forums.fast.ai/t/meet-mish-new-activation-function-possible-successor-to-relu \n",
"_____no_output_____"
],
[
"Mish is in Pytorch from version 1.9. Use this version! ",
"_____no_output_____"
]
],
[
[
"# hide\n# forked from https://github.com/rwightman/pytorch-image-models/timm/models/layers/activations.py\n\nimport torch\nfrom torch import nn as nn\nfrom torch.nn import functional as F",
"_____no_output_____"
]
],
[
[
"## Mish",
"_____no_output_____"
]
],
[
[
"def mish(x, inplace: bool = False):\n \"\"\"Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681\n NOTE: I don't have a working inplace variant\n \"\"\"\n return x.mul(F.softplus(x).tanh())\n\n\nclass Mish(nn.Module):\n \"\"\"Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681\"\"\"\n def __init__(self, inplace: bool = False):\n \"\"\"NOTE: inplace variant not working \"\"\"\n super(Mish, self).__init__()\n\n def forward(self, x):\n return mish(x)",
"_____no_output_____"
]
],
[
[
"## MishJit",
"_____no_output_____"
]
],
[
[
"@torch.jit.script\ndef mish_jit(x, _inplace: bool = False):\n \"\"\"Jit version of Mish.\n Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681\n \"\"\"\n return x.mul(F.softplus(x).tanh())\n\n\nclass MishJit(nn.Module):\n def __init__(self, inplace: bool = False):\n \"\"\"Jit version of Mish.\n Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681\"\"\"\n super(MishJit, self).__init__()\n\n def forward(self, x):\n return mish_jit(x)",
"_____no_output_____"
]
],
[
[
"## MishJitMe - memory-efficient.",
"_____no_output_____"
]
],
[
[
"@torch.jit.script\ndef mish_jit_fwd(x):\n # return x.mul(torch.tanh(F.softplus(x)))\n return x.mul(F.softplus(x).tanh())\n\n\[email protected]\ndef mish_jit_bwd(x, grad_output):\n x_sigmoid = torch.sigmoid(x)\n x_tanh_sp = F.softplus(x).tanh()\n return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp))\n\n\nclass MishJitAutoFn(torch.autograd.Function):\n \"\"\" Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681\n A memory efficient, jit scripted variant of Mish\"\"\"\n @staticmethod\n def forward(ctx, x):\n ctx.save_for_backward(x)\n return mish_jit_fwd(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n x = ctx.saved_tensors[0]\n return mish_jit_bwd(x, grad_output)\n\n\ndef mish_me(x, inplace=False):\n return MishJitAutoFn.apply(x)\n\n\nclass MishMe(nn.Module):\n \"\"\" Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681\n A memory efficient, jit scripted variant of Mish\"\"\"\n def __init__(self, inplace: bool = False):\n super(MishMe, self).__init__()\n\n def forward(self, x):\n return MishJitAutoFn.apply(x)",
"_____no_output_____"
]
],
[
[
"## HardMishJit",
"_____no_output_____"
]
],
[
[
"@torch.jit.script\ndef hard_mish_jit(x, inplace: bool = False):\n \"\"\" Hard Mish\n Experimental, based on notes by Mish author Diganta Misra at\n https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md\n \"\"\"\n return 0.5 * x * (x + 2).clamp(min=0, max=2)\n\n\nclass HardMishJit(nn.Module):\n \"\"\" Hard Mish\n Experimental, based on notes by Mish author Diganta Misra at\n https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md\n \"\"\"\n def __init__(self, inplace: bool = False):\n super(HardMishJit, self).__init__()\n\n def forward(self, x):\n return hard_mish_jit(x)",
"_____no_output_____"
]
],
[
[
"## HardMishJitMe - memory efficient.",
"_____no_output_____"
]
],
[
[
"@torch.jit.script\ndef hard_mish_jit_fwd(x):\n return 0.5 * x * (x + 2).clamp(min=0, max=2)\n\n\[email protected]\ndef hard_mish_jit_bwd(x, grad_output):\n m = torch.ones_like(x) * (x >= -2.)\n m = torch.where((x >= -2.) & (x <= 0.), x + 1., m)\n return grad_output * m\n\n\nclass HardMishJitAutoFn(torch.autograd.Function):\n \"\"\" A memory efficient, jit scripted variant of Hard Mish\n Experimental, based on notes by Mish author Diganta Misra at\n https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md\n \"\"\"\n @staticmethod\n def forward(ctx, x):\n ctx.save_for_backward(x)\n return hard_mish_jit_fwd(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n x = ctx.saved_tensors[0]\n return hard_mish_jit_bwd(x, grad_output)\n\n\ndef hard_mish_me(x, inplace: bool = False):\n return HardMishJitAutoFn.apply(x)\n\n\nclass HardMishMe(nn.Module):\n \"\"\" A memory efficient, jit scripted variant of Hard Mish\n Experimental, based on notes by Mish author Diganta Misra at\n https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md\n \"\"\"\n def __init__(self, inplace: bool = False):\n super(HardMishMe, self).__init__()\n\n def forward(self, x):\n return HardMishJitAutoFn.apply(x)",
"_____no_output_____"
],
[
"#hide\nact_fn = Mish(inplace=True)",
"_____no_output_____"
]
],
[
[
"# end\nmodel_constructor\nby ayasyrev",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
cbb9c6811f35b309de4d5ce182cef9fa72da1a5c
| 4,004 |
ipynb
|
Jupyter Notebook
|
Analysis/hypothesis+testing.ipynb
|
apoorvegupta/Data-Cleaning-and-Analysis
|
e92db3848efebd9bc883ea056961509856a9a3c6
|
[
"MIT"
] | null | null | null |
Analysis/hypothesis+testing.ipynb
|
apoorvegupta/Data-Cleaning-and-Analysis
|
e92db3848efebd9bc883ea056961509856a9a3c6
|
[
"MIT"
] | null | null | null |
Analysis/hypothesis+testing.ipynb
|
apoorvegupta/Data-Cleaning-and-Analysis
|
e92db3848efebd9bc883ea056961509856a9a3c6
|
[
"MIT"
] | null | null | null | 27.805556 | 84 | 0.536963 |
[
[
[
"from scipy.stats import norm\nfrom math import sqrt\nimport statistics\nimport pandas as pd\n\ndf = pd.read_csv(\"C:/Users/PRATYUSH/Desktop/IDS/Data/USvideos.csv\")\n\ndef two_sided_hypo(sample_mean, pop_mean, std_dev, sample_size, alpha):\n actual_z = abs(norm.ppf(alpha/2))\n hypo_z = (sample_mean - pop_mean) / (std_dev/sqrt(sample_size))\n print('actual z value :', actual_z)\n print('hypothesis z value :', hypo_z, '\\n')\n if hypo_z >= actual_z or hypo_z <= -(actual_z):\n return True\n else:\n return False\n \n\nalpha = 0.05\nsample_mean = 4883.16\npop_mean = df['comment_count'].mean()\nsample_size = 1000\nstd_dev = statistics.stdev(df.comment_count)\n\n\nprint('H0 : μ =', pop_mean)\nprint('H1 : μ !=', pop_mean)\nprint('alpha value is :', alpha, '\\n')\n\nreject = two_sided_hypo(sample_mean, pop_mean, std_dev, sample_size, alpha)\nif reject:\n print('Reject NULL hypothesis')\nelse:\n print('Failed to reject NULL hypothesis')\n#variation with different parameters can be shown here",
"5976.430471698113\nH0 : μ = 5976.430471698113\nH1 : μ != 5976.430471698113\nalpha value is : 0.05 \n\nactual z value : 1.95996398454\nhypothesis z value : -1.0228801695200331 \n\nFailed to reject NULL hypothesis\n"
],
[
"#one sided hypothesis test(for smaller than in NULL hypothesis)\ndef one_sided_hypo(sample_mean, pop_mean, std_dev, sample_size, alpha):\n actual_z = abs(norm.ppf(alpha))\n hypo_z = (sample_mean - pop_mean) / (std_dev/sqrt(sample_size))\n print('actual z value :', actual_z)\n print('hypothesis z value :', hypo_z, '\\n')\n if hypo_z >= actual_z:\n return True\n else:\n return False\n \nalpha = 0.05\nsample_mean = 4883.16\npop_mean = df['comment_count'].mean()\nsample_size = 1000\nstd_dev = statistics.stdev(df.comment_count)\n\nprint('H0 : μ <=', pop_mean)\nprint('H1 : μ >', pop_mean)\nprint('alpha value is :', alpha, '\\n')\n\nreject = one_sided_hypo(sample_mean, pop_mean, std_dev, sample_size, alpha)\nif reject:\n print('Reject NULL hypothesis')\nelse:\n print('Failed to reject NULL hypothesis')\n#variation with different parameters can be shown here",
"H0 : μ <= 5976.430471698113\nH1 : μ > 5976.430471698113\nalpha value is : 0.05 \n\nactual z value : 1.64485362695\nhypothesis z value : -1.0228801695200331 \n\nFailed to reject NULL hypothesis\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
cbb9d311b581e49acbcededcbf0f551300ca8514
| 165,603 |
ipynb
|
Jupyter Notebook
|
PartA_Q5.ipynb
|
guptajay/CZ4042-Assignment-1
|
427cb85a2be152130281482ad778a4058c0a694d
|
[
"MIT"
] | null | null | null |
PartA_Q5.ipynb
|
guptajay/CZ4042-Assignment-1
|
427cb85a2be152130281482ad778a4058c0a694d
|
[
"MIT"
] | null | null | null |
PartA_Q5.ipynb
|
guptajay/CZ4042-Assignment-1
|
427cb85a2be152130281482ad778a4058c0a694d
|
[
"MIT"
] | null | null | null | 178.259419 | 66,418 | 0.851567 |
[
[
[
"# CZ4042 Neural Networks & Deep Learning\n## Assignment - 1: Part A, Question 5\n\n> Gupta Jay \n> U1822549K \n> School of Computer Science and Engineering \n> Nanyang Technological University, Singapore ",
"_____no_output_____"
],
[
"## Imports",
"_____no_output_____"
]
],
[
[
"# Setting the seed here is sufficient. \n# If you don't plan to use these starter code, make sure you add this cell.\n\nSEED = 42\n\nimport os\nos.environ['TF_CUDNN_DETERMINISTIC'] = '1'\n\nimport random \nrandom.seed(SEED)\n\nimport numpy as np\nnp.random.seed(SEED)\n\nimport tensorflow as tf\ntf.random.set_seed(SEED)",
"_____no_output_____"
],
[
"import pandas as pd\nimport numpy as np\n\n# tensorflow libraries\nimport tensorflow as tf\nimport tensorflow.keras as keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import Sequential\n\n# sklearn libraries are useful for preprocessing, performance measures, etc.\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import train_test_split\n\nfrom matplotlib import pyplot as plt",
"_____no_output_____"
],
[
"print(tf.__version__)",
"2.6.0\n"
],
[
"# Supress any warnings for clean presentation\nimport warnings\nwarnings.filterwarnings(\"ignore\")",
"_____no_output_____"
],
[
"df = pd.read_csv('./features_30_sec.csv')\ndf.head()",
"_____no_output_____"
],
[
"df['label'].value_counts()",
"_____no_output_____"
]
],
[
[
"Split and scale dataset",
"_____no_output_____"
]
],
[
[
"columns_to_drop = ['label','filename', 'length']\n\ndef prepare_dataset(df, columns_to_drop, test_size, random_state):\n\n # Encode the labels from 0 to n_classes-1 \n label_encoder = preprocessing.LabelEncoder()\n df['label'] = label_encoder.fit_transform(df['label'])\n \n # devide data to train and test\n df_train, df_test = train_test_split(df, test_size=test_size, random_state=random_state)\n \n # scale the training inputs\n x_train = df_train.drop(columns_to_drop,axis=1)\n y_train = df_train['label'].to_numpy()\n \n standard_scaler = preprocessing.StandardScaler()\n x_train_scaled = standard_scaler.fit_transform(x_train)\n\n #scale and prepare testing data\n x_test = df_test.drop(columns_to_drop,axis=1)\n x_test_scaled = standard_scaler.transform(x_test)\n y_test = df_test['label'].to_numpy() \n \n return x_train_scaled, y_train, x_test_scaled, y_test",
"_____no_output_____"
],
[
"X_train, y_train, X_test, y_test = prepare_dataset(df, columns_to_drop, test_size=0.3, random_state=0)\n\nprint(X_train.shape, y_train.shape)\nprint(X_test.shape, y_test.shape)",
"(700, 57) (700,)\n(300, 57) (300,)\n"
]
],
[
[
"## Part A: Investigation on Dropouts",
"_____no_output_____"
]
],
[
[
"def getModel(first_layer_activation, second_layer_activation, add_dropout):\n # Inputs -> Dense -> Dropout -> Dense (Output)\n model = keras.Sequential()\n model.add(layers.Dense(16, activation=first_layer_activation, input_dim=X_train.shape[1]))\n # Parameterized Dropout if required\n if(add_dropout):\n model.add(layers.Dropout(0.3))\n model.add(layers.Dense(df['label'].nunique(), activation=second_layer_activation))\n return model\n\ndef getOptimizer():\n # Adam Optimizer (Default Parameters) -> Stochastic Gradient Descent\n optimizer = keras.optimizers.Adam()\n return optimizer",
"_____no_output_____"
],
[
"loss = \"sparse_categorical_crossentropy\"\nmetrics = [\"accuracy\"]\noptimizer = getOptimizer()\n\n# Model without the Dropout layer\nmodel_without_dropout = getModel(\"relu\", \"softmax\", add_dropout=False)\nmodel_without_dropout.compile(loss=loss, optimizer=optimizer, metrics=metrics)\n\n# Model with the Dropout layer\nmodel_with_dropout = getModel(\"relu\", \"softmax\", add_dropout=True)\nmodel_with_dropout.compile(loss=loss, optimizer=optimizer, metrics=metrics)",
"_____no_output_____"
],
[
"epochs = 50\nverbose = 1\nbatch_size = 1\n\n# Fit both models\nhistory_without_dropout = model_without_dropout.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), verbose=verbose, batch_size=batch_size)\nhistory_with_dropout = model_with_dropout.fit(X_train, y_train, epochs=epochs, validation_data=(X_test, y_test), verbose=verbose, batch_size=batch_size)",
"2021-10-12 11:58:22.847355: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:185] None of the MLIR Optimization Passes are enabled (registered 2)\n"
]
],
[
[
"### Plotting Accuracy agaist Training Epochs",
"_____no_output_____"
]
],
[
[
"plt.style.use(\"seaborn\")\n\n# Plot Configuration\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 5))\nfig.suptitle('Model with Dropout vs. Model without Dropout')\n\n# Plot for model with Dropout Layer\n# ------\n# Extracting Train & Test Accuracy from model history\naccuracy = history_with_dropout.history['accuracy']\nval_accuracy = history_with_dropout.history['val_accuracy']\n\n# Plotting\nax1.plot(accuracy)\nax1.plot(val_accuracy)\n\n# Labelling\nax1.set_title('Model with Dropout Layer (Accuracy)')\nax1.set_ylabel('Accuracy')\nax1.set_xlabel('Epoch')\nax1.legend(['train', 'test'], loc='lower right')\nax1.set_yticks(np.arange(0.2, 0.9, 0.1))\n# ------\n\n# Plot for model without Dropout Layer\n# ------\n# Extracting Train & Test Accuracy from model history\naccuracy = history_without_dropout.history['accuracy']\nval_accuracy = history_without_dropout.history['val_accuracy']\n\n# Plotting\nax2.plot(accuracy)\nax2.plot(val_accuracy)\n\n# Labelling\nax2.set_title('Model without Dropout Layer (Accuracy)')\nax2.set_ylabel('Accuracy')\nax2.set_xlabel('Epoch')\nax2.legend(['train', 'test'], loc='lower right')\nax2.set_yticks(np.arange(0.2, 0.9, 0.1))\n# ------\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Plotting Loss agaist Training Epochs",
"_____no_output_____"
]
],
[
[
"plt.style.use(\"seaborn\")\n\n# Plot Configuration\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 5))\nfig.suptitle('Model with Dropout vs. Model without Dropout')\n\n# Plot for model with Dropout Layer\n# ------\n# Extracting Train & Test Loss from model history\nloss = history_with_dropout.history['loss']\nval_loss = history_with_dropout.history['val_loss']\n\n# Plotting\nax1.plot(loss)\nax1.plot(val_loss)\n\n# Labelling\nax1.set_title('Model with Dropout Layer (Loss)')\nax1.set_ylabel('Sparse Categorical Crossentropy Loss')\nax1.set_xlabel('Epoch')\nax1.legend(['train', 'test'], loc='upper right')\nax1.set_xticks(np.arange(0, 50+1, 5.0))\n# ------\n\n# Plot for model without Dropout Layer\n# ------\n# Extracting Train & Test Loss from model history\nloss = history_without_dropout.history['loss']\nval_loss = history_without_dropout.history['val_loss']\n\n# Plotting\nax2.plot(loss)\nax2.plot(val_loss)\n\n# Labelling\nax2.set_title('Model without Dropout Layer (Loss)')\nax2.set_ylabel('Sparse Categorical Crossentropy Loss')\nax2.set_xlabel('Epoch')\nax2.legend(['train', 'test'], loc='upper right')\nax2.set_xticks(np.arange(0, 50+1, 5.0))\n# ------\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Part B: Effect of removing Dropout \n\nFrom the graphs plotted above, we observe that the model without the Dropout layer has `lower test accuracies` and `higher test losses` as compared to the neural network model with the dropout layers. However, the `train accuracy` remains high and `train loss` remains low. This means that without the dropout layer, the model is overfitting on the data.\n\nOverfitting is one of the problems that occur during training of neural networks, which drives the training error of the network to a very small value at the expense of the test error. The network learns to respond correctly to the training inputs by remembering them too much but is unable to generalize to produce correct outputs to novel inputs. (Ref: CZ4042 Lecture 5 (NTU) - Model Selection & Overfitting, Jagath Chandana Rajapakse)",
"_____no_output_____"
],
[
"## Part C: Addressing Overfitting\n\n**Use Regularization**\n\nUsing dropouts and early stopping are ways to prevent overfitting. Since we have already used both of them, another way to address overfitting is by using `L1 & L2 Regularization` techniques. During training, some weights attain large values to reduce training error, jeopardizing its ability to generalizing. In order to avoid this, a penalty term regularization term) is added to the cost function. (Ref: CZ4042 Lecture 5 (NTU) - Model Selection & Overfitting, Jagath Chandana Rajapakse)\n\n* **L1 Regualization (Lasso Regression)** - Add **\"absolute value of magnitude\"** of coefficient as penalty term to the loss function.\n* **L2 Regualization (Ridge Regression)** - Add **\"squared magnitude\"** of coefficient as penalty term to the loss function.",
"_____no_output_____"
],
[
"## End of Part A, Question 5. ",
"_____no_output_____"
],
[
"## Conclusion\n\nFor questions in Part A, we examined building a neural network model that predicts the genre of a song by using many features as inputs, and tried to optimize the parameters of the model. Firstly, we built a simple two-layer feedforward neural network. After examining its accuracy and losses, we experimented with different batch sizes and different number of hidden neurons for model training. Afterwards, we investigated whether a three-layer neural network will be better, and the effects of having dropout layers. \n\nIn our final results, I conclude that a 2-layer neural network (with dropouts) with a batch size of `4` and hidden neuron size of `32` works optimally on the given dataset. \n\n### Limitations \nIn our current approach, we need to first extract the features from the audio clips, adding an extra step to our machine learning pipeline. Therefore, the quality of our model depends on the quality of feature extraction.\n\nWe see that in our optimized model, there are still some signs of overftting which should be addressed. \n\nI also think that the number of data points (training examples) are low, and our model will perform better if we have more data.\n\n### Most Impactful Optimization\nFinding the optimal batch size made our model better, however, increase the number of hidden neurons had the most impact on our model performance. As the number of neurons increased, the model was able to learn more complex feature representations of the input data. The functions learnt closely resemble the complexity of the data and the model is able to generalize well on unseen real world data. \n\n### Better Options\nAudio clips are waveform data a.k.a sequential data. There another type of Neural Networks called as `Recurrent Neural Networks` (RNNs), which are helpful in modelling sequential data. Like recurrent neural networks (RNNs), `transformers` are another type of netowrks designed to handle sequential input data. These options may be more suitable for audio datasets.\n\n### Extensions of current modelling approach\nOur current approach can be used for any set of features. Like audio, we can use some modelling techniques to extract features from image data, and then use our model to train on the data. Our pipeline will mostly remain the same, however, the image data preprocessing will be different. \n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
cbb9d766f38995f211e8aaf691e83c76304458ef
| 97,098 |
ipynb
|
Jupyter Notebook
|
notebooks/t-distributed_stochastic_neighbor_embedding.ipynb
|
broadinstitute/2020-goodman-lee
|
2be629d73207fa349cfb048de9a8202aec1cc100
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/t-distributed_stochastic_neighbor_embedding.ipynb
|
broadinstitute/2020-goodman-lee
|
2be629d73207fa349cfb048de9a8202aec1cc100
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/t-distributed_stochastic_neighbor_embedding.ipynb
|
broadinstitute/2020-goodman-lee
|
2be629d73207fa349cfb048de9a8202aec1cc100
|
[
"BSD-3-Clause"
] | null | null | null | 924.742857 | 95,140 | 0.956178 |
[
[
[
"# $t$-distributed stochastic neighbor embedding ($t$-SNE)",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot\nimport numpy\nimport pandas\nimport seaborn\nimport sklearn.manifold",
"_____no_output_____"
],
[
"x = numpy.random.random((256, 128))",
"_____no_output_____"
],
[
"data = pandas.DataFrame(sklearn.manifold.TSNE(n_components=2).fit_transform(x))",
"_____no_output_____"
],
[
"matplotlib.pyplot.figure(figsize=(16, 8))\n\nseaborn.scatterplot(data=data, alpha=0.5)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
cbb9de94cdffb820526fec97547e30543a0870f5
| 50,101 |
ipynb
|
Jupyter Notebook
|
ztffollow/nb/Forecasting.ipynb
|
divijsharma18/timedomain
|
73dbe66cd3c5a0d9cf5b2cb240c7c2fdd937c839
|
[
"MIT"
] | 4 |
2021-02-24T15:02:35.000Z
|
2022-01-18T19:24:27.000Z
|
ztffollow/nb/Forecasting.ipynb
|
divijsharma18/timedomain
|
73dbe66cd3c5a0d9cf5b2cb240c7c2fdd937c839
|
[
"MIT"
] | 35 |
2020-11-06T17:51:08.000Z
|
2021-10-14T01:47:16.000Z
|
ztffollow/nb/Forecasting.ipynb
|
divijsharma18/timedomain
|
73dbe66cd3c5a0d9cf5b2cb240c7c2fdd937c839
|
[
"MIT"
] | 10 |
2020-03-13T20:34:15.000Z
|
2021-09-23T13:35:27.000Z
| 54.339479 | 14,000 | 0.672601 |
[
[
[
"# Testing the ALeRCE forecasting tool (under construction)",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport requests",
"_____no_output_____"
]
],
[
[
"# Load the ALeRCE client \nhttps://alerce-new-python-client.readthedocs.io/en/main/tutorials/ztf_api.html#usage",
"_____no_output_____"
]
],
[
[
"my_config = {\n \"ZTF_API_URL\": \"https://dev.api.alerce.online\"\n}\nfrom alerce.core import Alerce\nalerce = Alerce()\nalerce.load_config_from_object(my_config)",
"_____no_output_____"
]
],
[
[
"# Select a SN",
"_____no_output_____"
]
],
[
[
"oid = \"ZTF21aaqftuq\" #\"ZTF21aaqftuq\"",
"_____no_output_____"
]
],
[
[
"# Check in ALeRCE explorer",
"_____no_output_____"
]
],
[
[
"from IPython.core.display import display, HTML\ndisplay(HTML(\"<a href=\\\"https://dev.alerce.online/object/%s\\\">%s</a>\" % (oid, oid)))",
"_____no_output_____"
]
],
[
[
"# Query object statistics",
"_____no_output_____"
]
],
[
[
"alerce.query_objects(oid=oid, format='pandas')",
"_____no_output_____"
]
],
[
[
"# Query object band dependent statistics",
"_____no_output_____"
]
],
[
[
"alerce.query_magstats(oid=oid, format='pandas')",
"_____no_output_____"
]
],
[
[
"# Get light curve",
"_____no_output_____"
]
],
[
[
"# Getting detections for an object\ndet = alerce.query_detections(oid, format=\"pandas\")\ndisplay(det)\n\n# Getting non detections for an object\nnon_det = alerce.query_non_detections(oid, format=\"pandas\")\ndisplay(non_det)\nnon_det",
"_____no_output_____"
]
],
[
[
"# Plot stamps",
"_____no_output_____"
]
],
[
[
"#alerce.get_stamps(oid, candid=None)#candid=det.loc[det.has_stamp].candid.min())\nalerce.plot_stamps(oid, candid=det.loc[det.has_stamp].candid.min())",
"_____no_output_____"
]
],
[
[
"# Plot light curve",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()\ncolors = {1: 'g', 2: 'r'}\nfor fid in det.fid.unique():\n mask = det.fid == fid\n ax.errorbar(det.loc[mask].mjd, det.loc[mask].magpsf, yerr=det.loc[mask].sigmapsf, c=colors[fid], marker='o')\nax.set_ylim(ax.get_ylim()[::-1])\nax.set_xlabel(\"MJD\")\nax.set_ylabel(\"mag\")",
"_____no_output_____"
]
],
[
[
"# Forecasting",
"_____no_output_____"
]
],
[
[
"times = np.linspace(det.mjd.min() - 10, det.mjd.max() + 30, 10)",
"_____no_output_____"
],
[
"forecasts = []\nfor mjd in times:\n response = requests.get(\"http://3.238.105.175:8081/parametric/sn?oid=%s&mjd=%s\" % (oid, mjd))\n forecasts += response.json()[\"forecast\"]",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\ncolors = {1: 'g', 2: 'r'}\nfor fid in det.fid.unique():\n mask = det.fid == fid\n ax.errorbar(det.loc[mask].mjd, det.loc[mask].magpsf, yerr=det.loc[mask].sigmapsf, c=colors[fid], marker='o') \nfor i in forecasts:\n if not i[\"magpsf\"] is None:\n ax.scatter(i[\"mjd\"], i[\"magpsf\"], c=colors[fid], marker='*')\nax.set_ylim(ax.get_ylim()[::-1])\nax.set_xlabel(\"MJD\")\nax.set_ylabel(\"mag\")",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cbb9e43116901432578ea437fbd0bf67cb613e2f
| 225,452 |
ipynb
|
Jupyter Notebook
|
Workshop/MLP_107.ipynb
|
ShepherdCode/ShepherdML
|
fd8d71c63f7bd788ea0052294d93e43246254a12
|
[
"MIT"
] | null | null | null |
Workshop/MLP_107.ipynb
|
ShepherdCode/ShepherdML
|
fd8d71c63f7bd788ea0052294d93e43246254a12
|
[
"MIT"
] | 4 |
2020-03-24T18:05:09.000Z
|
2020-12-22T17:42:54.000Z
|
Workshop/MLP_107.ipynb
|
ShepherdCode/ShepherdML
|
fd8d71c63f7bd788ea0052294d93e43246254a12
|
[
"MIT"
] | null | null | null | 104.521094 | 35,228 | 0.684935 |
[
[
[
"# MLP 107",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\nPATH='/content/drive/'\ndrive.mount(PATH)\nDATAPATH=PATH+'My Drive/data/'\nPC_FILENAME = DATAPATH+'pcRNA.fasta'\nNC_FILENAME = DATAPATH+'ncRNA.fasta'\n",
"Drive already mounted at /content/drive/; to attempt to forcibly remount, call drive.mount(\"/content/drive/\", force_remount=True).\n"
],
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import RepeatedKFold\nfrom sklearn.model_selection import StratifiedKFold\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras.wrappers.scikit_learn import KerasRegressor\nfrom keras.models import Sequential\nfrom keras.layers import Bidirectional\nfrom keras.layers import GRU\nfrom keras.layers import Dense\nfrom keras.layers import LayerNormalization\nimport time\n\ndt='float32'\ntf.keras.backend.set_floatx(dt)\n\nEPOCHS=200\nSPLITS=1\nK=4\nVOCABULARY_SIZE=4**K+1 # e.g. K=3 => 64 DNA K-mers + 'NNN'\nEMBED_DIMEN=16\nFILENAME='MLP107'",
"_____no_output_____"
]
],
[
[
"## Load and partition sequences",
"_____no_output_____"
]
],
[
[
"# Assume file was preprocessed to contain one line per seq.\n# Prefer Pandas dataframe but df does not support append.\n# For conversion to tensor, must avoid python lists.\ndef load_fasta(filename,label):\n DEFLINE='>'\n labels=[]\n seqs=[]\n lens=[]\n nums=[]\n num=0\n with open (filename,'r') as infile:\n for line in infile:\n if line[0]!=DEFLINE:\n seq=line.rstrip()\n num += 1 # first seqnum is 1\n seqlen=len(seq)\n nums.append(num)\n labels.append(label)\n seqs.append(seq)\n lens.append(seqlen)\n df1=pd.DataFrame(nums,columns=['seqnum'])\n df2=pd.DataFrame(labels,columns=['class'])\n df3=pd.DataFrame(seqs,columns=['sequence'])\n df4=pd.DataFrame(lens,columns=['seqlen'])\n df=pd.concat((df1,df2,df3,df4),axis=1)\n return df\n\n# Split into train/test stratified by sequence length.\ndef sizebin(df):\n return pd.cut(df[\"seqlen\"],\n bins=[0,1000,2000,4000,8000,16000,np.inf],\n labels=[0,1,2,3,4,5])\ndef make_train_test(data):\n bin_labels= sizebin(data)\n from sklearn.model_selection import StratifiedShuffleSplit\n splitter = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=37863)\n # split(x,y) expects that y is the labels. \n # Trick: Instead of y, give it it the bin labels that we generated.\n for train_index,test_index in splitter.split(data,bin_labels):\n train_set = data.iloc[train_index]\n test_set = data.iloc[test_index]\n return (train_set,test_set)\n\ndef separate_X_and_y(data):\n y= data[['class']].copy()\n X= data.drop(columns=['class','seqnum','seqlen'])\n return (X,y)\n\ndef make_slice(data_set,min_len,max_len):\n print(\"original \"+str(data_set.shape))\n too_short = data_set[ data_set['seqlen'] < min_len ].index\n no_short=data_set.drop(too_short)\n print(\"no short \"+str(no_short.shape))\n too_long = no_short[ no_short['seqlen'] >= max_len ].index\n no_long_no_short=no_short.drop(too_long)\n print(\"no long, no short \"+str(no_long_no_short.shape))\n return no_long_no_short\n",
"_____no_output_____"
]
],
[
[
"## Make K-mers",
"_____no_output_____"
]
],
[
[
"def make_kmer_table(K):\n npad='N'*K\n shorter_kmers=['']\n for i in range(K):\n longer_kmers=[]\n for mer in shorter_kmers:\n longer_kmers.append(mer+'A')\n longer_kmers.append(mer+'C')\n longer_kmers.append(mer+'G')\n longer_kmers.append(mer+'T')\n shorter_kmers = longer_kmers\n all_kmers = shorter_kmers\n kmer_dict = {}\n kmer_dict[npad]=0\n value=1\n for mer in all_kmers:\n kmer_dict[mer]=value\n value += 1\n return kmer_dict\n\nKMER_TABLE=make_kmer_table(K)\n\ndef strings_to_vectors(data,uniform_len):\n all_seqs=[]\n for seq in data['sequence']:\n i=0\n seqlen=len(seq)\n kmers=[]\n while i < seqlen-K+1 -1: # stop at minus one for spaced seed\n kmer=seq[i:i+2]+seq[i+3:i+5] # SPACED SEED 2/1/2 for K=4\n #kmer=seq[i:i+K] \n i += 1\n value=KMER_TABLE[kmer]\n kmers.append(value)\n pad_val=0\n while i < uniform_len:\n kmers.append(pad_val)\n i += 1\n all_seqs.append(kmers)\n pd2d=pd.DataFrame(all_seqs)\n return pd2d # return 2D dataframe, uniform dimensions",
"_____no_output_____"
],
[
"def make_kmers(MAXLEN,train_set):\n (X_train_all,y_train_all)=separate_X_and_y(train_set)\n\n # The returned values are Pandas dataframes.\n # print(X_train_all.shape,y_train_all.shape)\n # (X_train_all,y_train_all)\n # y: Pandas dataframe to Python list.\n # y_train_all=y_train_all.values.tolist()\n # The sequences lengths are bounded but not uniform.\n X_train_all\n print(type(X_train_all))\n print(X_train_all.shape)\n print(X_train_all.iloc[0])\n print(len(X_train_all.iloc[0]['sequence']))\n\n # X: List of string to List of uniform-length ordered lists of K-mers.\n X_train_kmers=strings_to_vectors(X_train_all,MAXLEN)\n # X: true 2D array (no more lists)\n X_train_kmers.shape\n\n print(\"transform...\")\n # From pandas dataframe to numpy to list to numpy\n print(type(X_train_kmers))\n num_seqs=len(X_train_kmers)\n tmp_seqs=[]\n for i in range(num_seqs):\n kmer_sequence=X_train_kmers.iloc[i]\n tmp_seqs.append(kmer_sequence)\n X_train_kmers=np.array(tmp_seqs)\n tmp_seqs=None\n print(type(X_train_kmers))\n print(X_train_kmers)\n\n labels=y_train_all.to_numpy()\n return (X_train_kmers,labels)",
"_____no_output_____"
],
[
"def make_frequencies(Xin):\n # Input: numpy X(numseq,seqlen) list of vectors of kmerval where val0=NNN,val1=AAA,etc. \n # Output: numpy X(numseq,65) list of frequencies of 0,1,etc.\n Xout=[]\n VOCABULARY_SIZE= 4**K + 1 # plus one for 'NNN'\n for seq in Xin:\n freqs =[0] * VOCABULARY_SIZE\n total = 0\n for kmerval in seq:\n freqs[kmerval] += 1\n total += 1\n for c in range(VOCABULARY_SIZE):\n freqs[c] = freqs[c]/total\n Xout.append(freqs)\n Xnum = np.asarray(Xout)\n return (Xnum)",
"_____no_output_____"
]
],
[
[
"## Build model",
"_____no_output_____"
]
],
[
[
"def build_model(maxlen,dimen):\n act=\"sigmoid\"\n\n embed_layer = keras.layers.Embedding(\n VOCABULARY_SIZE,EMBED_DIMEN,input_length=maxlen);\n \n neurons=16\n dense1_layer = keras.layers.Dense(neurons, activation=act,dtype=dt,input_dim=VOCABULARY_SIZE)\n dense2_layer = keras.layers.Dense(neurons, activation=act,dtype=dt)\n dense3_layer = keras.layers.Dense(neurons, activation=act,dtype=dt)\n output_layer = keras.layers.Dense(1, activation=act,dtype=dt)\n\n mlp = keras.models.Sequential()\n #mlp.add(embed_layer)\n mlp.add(dense1_layer)\n mlp.add(dense2_layer)\n #mlp.add(dense3_layer)\n mlp.add(output_layer)\n \n bc=tf.keras.losses.BinaryCrossentropy(from_logits=False)\n print(\"COMPILE...\")\n mlp.compile(loss=bc, optimizer=\"Adam\",metrics=[\"accuracy\"])\n print(\"...COMPILED\")\n return mlp",
"_____no_output_____"
]
],
[
[
"## Cross validation",
"_____no_output_____"
]
],
[
[
"def do_cross_validation(X,y,eps,maxlen,dimen):\n model = None\n cv_scores = []\n fold=0\n splitter = ShuffleSplit(n_splits=SPLITS, test_size=0.2, random_state=37863)\n for train_index,valid_index in splitter.split(X):\n X_train=X[train_index] # use iloc[] for dataframe\n y_train=y[train_index]\n X_valid=X[valid_index]\n y_valid=y[valid_index]\n\n print(\"BUILD MODEL\")\n model=build_model(maxlen,dimen)\n\n print(\"FIT\")\n start_time=time.time()\n # this is complaining about string to float\n history=model.fit(X_train, y_train, # batch_size=10, default=32 works nicely\n epochs=eps, verbose=1, # verbose=1 for ascii art, verbose=0 for none\n validation_data=(X_valid,y_valid) )\n end_time=time.time()\n elapsed_time=(end_time-start_time)\n \n fold += 1\n print(\"Fold %d, %d epochs, %d sec\"%(fold,eps,elapsed_time))\n\n pd.DataFrame(history.history).plot(figsize=(8,5))\n plt.grid(True)\n plt.gca().set_ylim(0,1)\n plt.show()\n\n scores = model.evaluate(X_valid, y_valid, verbose=0)\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n # What are the other metrics_names?\n # Try this from Geron page 505:\n # np.mean(keras.losses.mean_squared_error(y_valid,y_pred))\n cv_scores.append(scores[1] * 100)\n print()\n print(\"Validation core mean %.2f%% (+/- %.2f%%)\" % (np.mean(cv_scores), np.std(cv_scores)))\n return model",
"_____no_output_____"
]
],
[
[
"## Load",
"_____no_output_____"
]
],
[
[
"print(\"Load data from files.\")\nnc_seq=load_fasta(NC_FILENAME,0)\npc_seq=load_fasta(PC_FILENAME,1)\nall_seq=pd.concat((nc_seq,pc_seq),axis=0)\n\nprint(\"Put aside the test portion.\")\n(train_set,test_set)=make_train_test(all_seq)\n# Do this later when using the test data:\n# (X_test,y_test)=separate_X_and_y(test_set)\n\nnc_seq=None\npc_seq=None\nall_seq=None\n\nprint(\"Ready: train_set\")\ntrain_set",
"Load data from files.\nPut aside the test portion.\nReady: train_set\n"
]
],
[
[
"## Len 200-1Kb",
"_____no_output_____"
]
],
[
[
"MINLEN=200\nMAXLEN=1000\n\nprint (\"Compile the model\")\nmodel=build_model(MAXLEN,EMBED_DIMEN)\nprint (\"Summarize the model\")\nprint(model.summary()) # Print this only once\n\nprint(\"Working on full training set, slice by sequence length.\")\nprint(\"Slice size range [%d - %d)\"%(MINLEN,MAXLEN))\nsubset=make_slice(train_set,MINLEN,MAXLEN)# One array to two: X and y\n\nprint (\"Sequence to Kmer\")\n(X_train,y_train)=make_kmers(MAXLEN,subset)\nX_train\nX_train=make_frequencies(X_train)\nX_train\nprint (\"Cross valiation\")\nmodel1 = do_cross_validation(X_train,y_train,EPOCHS,MAXLEN,EMBED_DIMEN)\nmodel1.save(FILENAME+'.short.model')",
"Compile the model\nCOMPILE...\n...COMPILED\nSummarize the model\nModel: \"sequential_36\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_144 (Dense) (None, 16) 4128 \n_________________________________________________________________\ndense_145 (Dense) (None, 16) 272 \n_________________________________________________________________\ndense_147 (Dense) (None, 1) 17 \n=================================================================\nTotal params: 4,417\nTrainable params: 4,417\nNon-trainable params: 0\n_________________________________________________________________\nNone\nWorking on full training set, slice by sequence length.\nSlice size range [200 - 1000)\noriginal (30290, 4)\nno short (30290, 4)\nno long, no short (8879, 4)\nSequence to Kmer\n<class 'pandas.core.frame.DataFrame'>\n(8879, 1)\nsequence AGTCCCTCCCCAGCCCAGCAGTCCCTCCAGGCTACATCCAGGAGAC...\nName: 1280, dtype: object\n348\ntransform...\n<class 'pandas.core.frame.DataFrame'>\n<class 'numpy.ndarray'>\n[[ 38 182 216 ... 0 0 0]\n [ 46 136 61 ... 0 0 0]\n [140 30 104 ... 0 0 0]\n ...\n [153 68 48 ... 0 0 0]\n [239 137 49 ... 0 0 0]\n [140 15 41 ... 0 0 0]]\nCross valiation\nBUILD MODEL\nCOMPILE...\n...COMPILED\nFIT\nEpoch 1/200\n222/222 [==============================] - 0s 2ms/step - loss: 0.6945 - accuracy: 0.5088 - val_loss: 0.6914 - val_accuracy: 0.5242\nEpoch 2/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.6903 - accuracy: 0.5547 - val_loss: 0.6882 - val_accuracy: 0.6723\nEpoch 3/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.6850 - accuracy: 0.6102 - val_loss: 0.6815 - val_accuracy: 0.6070\nEpoch 4/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.6736 - accuracy: 0.6611 - val_loss: 0.6666 - val_accuracy: 0.6774\nEpoch 5/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.6525 - accuracy: 0.6791 - val_loss: 0.6430 - val_accuracy: 0.6903\nEpoch 6/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.6242 - accuracy: 0.6891 - val_loss: 0.6184 - val_accuracy: 0.6909\nEpoch 7/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5993 - accuracy: 0.6932 - val_loss: 0.5994 - val_accuracy: 0.6914\nEpoch 8/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5835 - accuracy: 0.6979 - val_loss: 0.5892 - val_accuracy: 0.6976\nEpoch 9/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5740 - accuracy: 0.7025 - val_loss: 0.5834 - val_accuracy: 0.6943\nEpoch 10/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5680 - accuracy: 0.7042 - val_loss: 0.5820 - val_accuracy: 0.6881\nEpoch 11/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5631 - accuracy: 0.7053 - val_loss: 0.5757 - val_accuracy: 0.6976\nEpoch 12/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5589 - accuracy: 0.7111 - val_loss: 0.5722 - val_accuracy: 0.6988\nEpoch 13/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5558 - accuracy: 0.7124 - val_loss: 0.5717 - val_accuracy: 0.7010\nEpoch 14/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5527 - accuracy: 0.7173 - val_loss: 0.5673 - val_accuracy: 0.7050\nEpoch 15/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5497 - accuracy: 0.7181 - val_loss: 0.5648 - val_accuracy: 0.7044\nEpoch 16/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5471 - accuracy: 0.7205 - val_loss: 0.5621 - val_accuracy: 0.7100\nEpoch 17/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5447 - accuracy: 0.7210 - val_loss: 0.5599 - val_accuracy: 0.7100\nEpoch 18/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5423 - accuracy: 0.7241 - val_loss: 0.5596 - val_accuracy: 0.7111\nEpoch 19/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5401 - accuracy: 0.7241 - val_loss: 0.5557 - val_accuracy: 0.7111\nEpoch 20/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5369 - accuracy: 0.7301 - val_loss: 0.5544 - val_accuracy: 0.7134\nEpoch 21/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5348 - accuracy: 0.7288 - val_loss: 0.5549 - val_accuracy: 0.7145\nEpoch 22/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5322 - accuracy: 0.7310 - val_loss: 0.5520 - val_accuracy: 0.7173\nEpoch 23/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5299 - accuracy: 0.7324 - val_loss: 0.5526 - val_accuracy: 0.7145\nEpoch 24/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5278 - accuracy: 0.7349 - val_loss: 0.5469 - val_accuracy: 0.7207\nEpoch 25/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5251 - accuracy: 0.7370 - val_loss: 0.5432 - val_accuracy: 0.7218\nEpoch 26/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5223 - accuracy: 0.7364 - val_loss: 0.5401 - val_accuracy: 0.7247\nEpoch 27/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5197 - accuracy: 0.7401 - val_loss: 0.5383 - val_accuracy: 0.7292\nEpoch 28/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5172 - accuracy: 0.7431 - val_loss: 0.5360 - val_accuracy: 0.7297\nEpoch 29/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5138 - accuracy: 0.7439 - val_loss: 0.5341 - val_accuracy: 0.7309\nEpoch 30/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5121 - accuracy: 0.7450 - val_loss: 0.5297 - val_accuracy: 0.7331\nEpoch 31/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5087 - accuracy: 0.7487 - val_loss: 0.5281 - val_accuracy: 0.7348\nEpoch 32/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5055 - accuracy: 0.7491 - val_loss: 0.5241 - val_accuracy: 0.7354\nEpoch 33/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.5027 - accuracy: 0.7505 - val_loss: 0.5211 - val_accuracy: 0.7370\nEpoch 34/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4991 - accuracy: 0.7531 - val_loss: 0.5183 - val_accuracy: 0.7404\nEpoch 35/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4959 - accuracy: 0.7569 - val_loss: 0.5165 - val_accuracy: 0.7427\nEpoch 36/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4933 - accuracy: 0.7605 - val_loss: 0.5121 - val_accuracy: 0.7461\nEpoch 37/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4904 - accuracy: 0.7607 - val_loss: 0.5098 - val_accuracy: 0.7461\nEpoch 38/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4856 - accuracy: 0.7642 - val_loss: 0.5054 - val_accuracy: 0.7511\nEpoch 39/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4822 - accuracy: 0.7659 - val_loss: 0.5017 - val_accuracy: 0.7556\nEpoch 40/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4780 - accuracy: 0.7705 - val_loss: 0.4986 - val_accuracy: 0.7579\nEpoch 41/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4751 - accuracy: 0.7704 - val_loss: 0.4945 - val_accuracy: 0.7539\nEpoch 42/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4706 - accuracy: 0.7740 - val_loss: 0.4910 - val_accuracy: 0.7584\nEpoch 43/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4672 - accuracy: 0.7756 - val_loss: 0.4878 - val_accuracy: 0.7607\nEpoch 44/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4639 - accuracy: 0.7794 - val_loss: 0.4848 - val_accuracy: 0.7686\nEpoch 45/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4597 - accuracy: 0.7814 - val_loss: 0.4836 - val_accuracy: 0.7675\nEpoch 46/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4570 - accuracy: 0.7835 - val_loss: 0.4769 - val_accuracy: 0.7753\nEpoch 47/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4525 - accuracy: 0.7864 - val_loss: 0.4736 - val_accuracy: 0.7765\nEpoch 48/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4491 - accuracy: 0.7891 - val_loss: 0.4703 - val_accuracy: 0.7804\nEpoch 49/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4454 - accuracy: 0.7923 - val_loss: 0.4688 - val_accuracy: 0.7776\nEpoch 50/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4427 - accuracy: 0.7925 - val_loss: 0.4639 - val_accuracy: 0.7810\nEpoch 51/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4407 - accuracy: 0.7949 - val_loss: 0.4613 - val_accuracy: 0.7798\nEpoch 52/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4361 - accuracy: 0.7981 - val_loss: 0.4583 - val_accuracy: 0.7849\nEpoch 53/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4341 - accuracy: 0.8002 - val_loss: 0.4619 - val_accuracy: 0.7827\nEpoch 54/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4310 - accuracy: 0.8012 - val_loss: 0.4553 - val_accuracy: 0.7872\nEpoch 55/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4289 - accuracy: 0.8018 - val_loss: 0.4516 - val_accuracy: 0.7872\nEpoch 56/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4262 - accuracy: 0.8037 - val_loss: 0.4508 - val_accuracy: 0.7838\nEpoch 57/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4238 - accuracy: 0.8053 - val_loss: 0.4474 - val_accuracy: 0.7877\nEpoch 58/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4214 - accuracy: 0.8101 - val_loss: 0.4452 - val_accuracy: 0.7889\nEpoch 59/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4197 - accuracy: 0.8102 - val_loss: 0.4436 - val_accuracy: 0.7894\nEpoch 60/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4179 - accuracy: 0.8075 - val_loss: 0.4419 - val_accuracy: 0.7928\nEpoch 61/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4164 - accuracy: 0.8113 - val_loss: 0.4406 - val_accuracy: 0.7922\nEpoch 62/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4141 - accuracy: 0.8133 - val_loss: 0.4474 - val_accuracy: 0.7815\nEpoch 63/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4131 - accuracy: 0.8139 - val_loss: 0.4380 - val_accuracy: 0.7894\nEpoch 64/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4117 - accuracy: 0.8106 - val_loss: 0.4375 - val_accuracy: 0.7911\nEpoch 65/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4105 - accuracy: 0.8130 - val_loss: 0.4359 - val_accuracy: 0.7956\nEpoch 66/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4098 - accuracy: 0.8126 - val_loss: 0.4349 - val_accuracy: 0.7911\nEpoch 67/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4088 - accuracy: 0.8119 - val_loss: 0.4335 - val_accuracy: 0.7934\nEpoch 68/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4070 - accuracy: 0.8153 - val_loss: 0.4326 - val_accuracy: 0.7939\nEpoch 69/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4057 - accuracy: 0.8161 - val_loss: 0.4324 - val_accuracy: 0.7934\nEpoch 70/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4057 - accuracy: 0.8157 - val_loss: 0.4310 - val_accuracy: 0.7939\nEpoch 71/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4037 - accuracy: 0.8167 - val_loss: 0.4365 - val_accuracy: 0.7928\nEpoch 72/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4033 - accuracy: 0.8164 - val_loss: 0.4353 - val_accuracy: 0.7945\nEpoch 73/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4021 - accuracy: 0.8178 - val_loss: 0.4290 - val_accuracy: 0.7928\nEpoch 74/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4015 - accuracy: 0.8188 - val_loss: 0.4287 - val_accuracy: 0.7956\nEpoch 75/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4007 - accuracy: 0.8191 - val_loss: 0.4281 - val_accuracy: 0.7962\nEpoch 76/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.4013 - accuracy: 0.8166 - val_loss: 0.4280 - val_accuracy: 0.7928\nEpoch 77/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3998 - accuracy: 0.8198 - val_loss: 0.4301 - val_accuracy: 0.7973\nEpoch 78/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3982 - accuracy: 0.8194 - val_loss: 0.4271 - val_accuracy: 0.7911\nEpoch 79/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3980 - accuracy: 0.8198 - val_loss: 0.4263 - val_accuracy: 0.7967\nEpoch 80/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3977 - accuracy: 0.8177 - val_loss: 0.4257 - val_accuracy: 0.7917\nEpoch 81/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3977 - accuracy: 0.8197 - val_loss: 0.4341 - val_accuracy: 0.7973\nEpoch 82/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3974 - accuracy: 0.8218 - val_loss: 0.4256 - val_accuracy: 0.7973\nEpoch 83/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3960 - accuracy: 0.8202 - val_loss: 0.4247 - val_accuracy: 0.7956\nEpoch 84/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3954 - accuracy: 0.8209 - val_loss: 0.4241 - val_accuracy: 0.7967\nEpoch 85/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3959 - accuracy: 0.8229 - val_loss: 0.4257 - val_accuracy: 0.7928\nEpoch 86/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3952 - accuracy: 0.8205 - val_loss: 0.4242 - val_accuracy: 0.7934\nEpoch 87/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3946 - accuracy: 0.8205 - val_loss: 0.4242 - val_accuracy: 0.7962\nEpoch 88/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3942 - accuracy: 0.8204 - val_loss: 0.4230 - val_accuracy: 0.7973\nEpoch 89/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3937 - accuracy: 0.8208 - val_loss: 0.4230 - val_accuracy: 0.7967\nEpoch 90/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3931 - accuracy: 0.8233 - val_loss: 0.4226 - val_accuracy: 0.7973\nEpoch 91/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3929 - accuracy: 0.8206 - val_loss: 0.4223 - val_accuracy: 0.7973\nEpoch 92/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3925 - accuracy: 0.8228 - val_loss: 0.4225 - val_accuracy: 0.7973\nEpoch 93/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3925 - accuracy: 0.8223 - val_loss: 0.4233 - val_accuracy: 0.7990\nEpoch 94/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3917 - accuracy: 0.8249 - val_loss: 0.4216 - val_accuracy: 0.7990\nEpoch 95/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3906 - accuracy: 0.8232 - val_loss: 0.4225 - val_accuracy: 0.7984\nEpoch 96/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3896 - accuracy: 0.8249 - val_loss: 0.4229 - val_accuracy: 0.7928\nEpoch 97/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3905 - accuracy: 0.8240 - val_loss: 0.4214 - val_accuracy: 0.7984\nEpoch 98/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3908 - accuracy: 0.8256 - val_loss: 0.4215 - val_accuracy: 0.7962\nEpoch 99/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3895 - accuracy: 0.8220 - val_loss: 0.4218 - val_accuracy: 0.8001\nEpoch 100/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3897 - accuracy: 0.8236 - val_loss: 0.4216 - val_accuracy: 0.8007\nEpoch 101/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3890 - accuracy: 0.8244 - val_loss: 0.4218 - val_accuracy: 0.7928\nEpoch 102/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3901 - accuracy: 0.8237 - val_loss: 0.4208 - val_accuracy: 0.7967\nEpoch 103/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3896 - accuracy: 0.8235 - val_loss: 0.4208 - val_accuracy: 0.7984\nEpoch 104/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3883 - accuracy: 0.8222 - val_loss: 0.4209 - val_accuracy: 0.8018\nEpoch 105/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3880 - accuracy: 0.8246 - val_loss: 0.4212 - val_accuracy: 0.8012\nEpoch 106/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3884 - accuracy: 0.8240 - val_loss: 0.4206 - val_accuracy: 0.7962\nEpoch 107/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3884 - accuracy: 0.8250 - val_loss: 0.4199 - val_accuracy: 0.8001\nEpoch 108/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3873 - accuracy: 0.8250 - val_loss: 0.4228 - val_accuracy: 0.7934\nEpoch 109/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3881 - accuracy: 0.8230 - val_loss: 0.4201 - val_accuracy: 0.7995\nEpoch 110/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3874 - accuracy: 0.8243 - val_loss: 0.4199 - val_accuracy: 0.7979\nEpoch 111/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3870 - accuracy: 0.8243 - val_loss: 0.4193 - val_accuracy: 0.8007\nEpoch 112/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3863 - accuracy: 0.8261 - val_loss: 0.4197 - val_accuracy: 0.8024\nEpoch 113/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3865 - accuracy: 0.8253 - val_loss: 0.4192 - val_accuracy: 0.8007\nEpoch 114/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3864 - accuracy: 0.8247 - val_loss: 0.4189 - val_accuracy: 0.8012\nEpoch 115/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3864 - accuracy: 0.8258 - val_loss: 0.4189 - val_accuracy: 0.8018\nEpoch 116/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3870 - accuracy: 0.8267 - val_loss: 0.4218 - val_accuracy: 0.8001\nEpoch 117/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3859 - accuracy: 0.8242 - val_loss: 0.4207 - val_accuracy: 0.7956\nEpoch 118/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3868 - accuracy: 0.8258 - val_loss: 0.4198 - val_accuracy: 0.8024\nEpoch 119/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3852 - accuracy: 0.8264 - val_loss: 0.4187 - val_accuracy: 0.8018\nEpoch 120/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3862 - accuracy: 0.8253 - val_loss: 0.4214 - val_accuracy: 0.7956\nEpoch 121/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3848 - accuracy: 0.8253 - val_loss: 0.4184 - val_accuracy: 0.8018\nEpoch 122/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3847 - accuracy: 0.8250 - val_loss: 0.4184 - val_accuracy: 0.8001\nEpoch 123/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3849 - accuracy: 0.8244 - val_loss: 0.4184 - val_accuracy: 0.8007\nEpoch 124/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3838 - accuracy: 0.8273 - val_loss: 0.4197 - val_accuracy: 0.7962\nEpoch 125/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3844 - accuracy: 0.8267 - val_loss: 0.4183 - val_accuracy: 0.8012\nEpoch 126/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3845 - accuracy: 0.8264 - val_loss: 0.4182 - val_accuracy: 0.8018\nEpoch 127/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3847 - accuracy: 0.8242 - val_loss: 0.4192 - val_accuracy: 0.8007\nEpoch 128/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3855 - accuracy: 0.8204 - val_loss: 0.4259 - val_accuracy: 0.8041\nEpoch 129/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3836 - accuracy: 0.8249 - val_loss: 0.4182 - val_accuracy: 0.8018\nEpoch 130/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3832 - accuracy: 0.8284 - val_loss: 0.4195 - val_accuracy: 0.7995\nEpoch 131/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3832 - accuracy: 0.8264 - val_loss: 0.4180 - val_accuracy: 0.8029\nEpoch 132/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3834 - accuracy: 0.8253 - val_loss: 0.4180 - val_accuracy: 0.8018\nEpoch 133/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3826 - accuracy: 0.8273 - val_loss: 0.4181 - val_accuracy: 0.7984\nEpoch 134/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3842 - accuracy: 0.8270 - val_loss: 0.4179 - val_accuracy: 0.8035\nEpoch 135/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3823 - accuracy: 0.8258 - val_loss: 0.4179 - val_accuracy: 0.8024\nEpoch 136/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3826 - accuracy: 0.8270 - val_loss: 0.4235 - val_accuracy: 0.7967\nEpoch 137/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3828 - accuracy: 0.8254 - val_loss: 0.4178 - val_accuracy: 0.8029\nEpoch 138/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3825 - accuracy: 0.8263 - val_loss: 0.4192 - val_accuracy: 0.7967\nEpoch 139/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3830 - accuracy: 0.8291 - val_loss: 0.4178 - val_accuracy: 0.8024\nEpoch 140/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3824 - accuracy: 0.8253 - val_loss: 0.4178 - val_accuracy: 0.8024\nEpoch 141/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3816 - accuracy: 0.8275 - val_loss: 0.4216 - val_accuracy: 0.8012\nEpoch 142/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3821 - accuracy: 0.8267 - val_loss: 0.4179 - val_accuracy: 0.8007\nEpoch 143/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3821 - accuracy: 0.8275 - val_loss: 0.4178 - val_accuracy: 0.7990\nEpoch 144/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3822 - accuracy: 0.8254 - val_loss: 0.4177 - val_accuracy: 0.8024\nEpoch 145/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3828 - accuracy: 0.8263 - val_loss: 0.4194 - val_accuracy: 0.7973\nEpoch 146/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3814 - accuracy: 0.8275 - val_loss: 0.4206 - val_accuracy: 0.7995\nEpoch 147/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3820 - accuracy: 0.8280 - val_loss: 0.4181 - val_accuracy: 0.8001\nEpoch 148/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3814 - accuracy: 0.8268 - val_loss: 0.4182 - val_accuracy: 0.8001\nEpoch 149/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3814 - accuracy: 0.8257 - val_loss: 0.4182 - val_accuracy: 0.7990\nEpoch 150/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3807 - accuracy: 0.8280 - val_loss: 0.4189 - val_accuracy: 0.8001\nEpoch 151/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3813 - accuracy: 0.8263 - val_loss: 0.4177 - val_accuracy: 0.7995\nEpoch 152/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3809 - accuracy: 0.8266 - val_loss: 0.4177 - val_accuracy: 0.8007\nEpoch 153/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3816 - accuracy: 0.8278 - val_loss: 0.4179 - val_accuracy: 0.7995\nEpoch 154/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3801 - accuracy: 0.8295 - val_loss: 0.4185 - val_accuracy: 0.7995\nEpoch 155/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3798 - accuracy: 0.8268 - val_loss: 0.4187 - val_accuracy: 0.8018\nEpoch 156/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3807 - accuracy: 0.8274 - val_loss: 0.4188 - val_accuracy: 0.8024\nEpoch 157/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3803 - accuracy: 0.8267 - val_loss: 0.4184 - val_accuracy: 0.8012\nEpoch 158/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3799 - accuracy: 0.8274 - val_loss: 0.4187 - val_accuracy: 0.8001\nEpoch 159/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3813 - accuracy: 0.8246 - val_loss: 0.4179 - val_accuracy: 0.7995\nEpoch 160/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3800 - accuracy: 0.8254 - val_loss: 0.4178 - val_accuracy: 0.7979\nEpoch 161/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3797 - accuracy: 0.8274 - val_loss: 0.4202 - val_accuracy: 0.8018\nEpoch 162/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3793 - accuracy: 0.8288 - val_loss: 0.4201 - val_accuracy: 0.7995\nEpoch 163/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3818 - accuracy: 0.8230 - val_loss: 0.4182 - val_accuracy: 0.7995\nEpoch 164/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3797 - accuracy: 0.8285 - val_loss: 0.4235 - val_accuracy: 0.8012\nEpoch 165/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3797 - accuracy: 0.8270 - val_loss: 0.4286 - val_accuracy: 0.8069\nEpoch 166/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3800 - accuracy: 0.8287 - val_loss: 0.4196 - val_accuracy: 0.8041\nEpoch 167/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3798 - accuracy: 0.8260 - val_loss: 0.4188 - val_accuracy: 0.8029\nEpoch 168/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3792 - accuracy: 0.8247 - val_loss: 0.4180 - val_accuracy: 0.7990\nEpoch 169/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3788 - accuracy: 0.8284 - val_loss: 0.4180 - val_accuracy: 0.8012\nEpoch 170/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3791 - accuracy: 0.8270 - val_loss: 0.4184 - val_accuracy: 0.8024\nEpoch 171/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3785 - accuracy: 0.8291 - val_loss: 0.4185 - val_accuracy: 0.7984\nEpoch 172/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3790 - accuracy: 0.8285 - val_loss: 0.4195 - val_accuracy: 0.8007\nEpoch 173/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3793 - accuracy: 0.8267 - val_loss: 0.4190 - val_accuracy: 0.7995\nEpoch 174/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3796 - accuracy: 0.8246 - val_loss: 0.4180 - val_accuracy: 0.8007\nEpoch 175/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3784 - accuracy: 0.8296 - val_loss: 0.4205 - val_accuracy: 0.8041\nEpoch 176/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3790 - accuracy: 0.8267 - val_loss: 0.4190 - val_accuracy: 0.8029\nEpoch 177/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3786 - accuracy: 0.8299 - val_loss: 0.4180 - val_accuracy: 0.7995\nEpoch 178/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3791 - accuracy: 0.8287 - val_loss: 0.4184 - val_accuracy: 0.7973\nEpoch 179/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3783 - accuracy: 0.8270 - val_loss: 0.4186 - val_accuracy: 0.8018\nEpoch 180/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3796 - accuracy: 0.8264 - val_loss: 0.4181 - val_accuracy: 0.8007\nEpoch 181/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3783 - accuracy: 0.8284 - val_loss: 0.4185 - val_accuracy: 0.8018\nEpoch 182/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3786 - accuracy: 0.8285 - val_loss: 0.4185 - val_accuracy: 0.8018\nEpoch 183/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3777 - accuracy: 0.8282 - val_loss: 0.4183 - val_accuracy: 0.8024\nEpoch 184/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3789 - accuracy: 0.8285 - val_loss: 0.4184 - val_accuracy: 0.8001\nEpoch 185/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3779 - accuracy: 0.8285 - val_loss: 0.4183 - val_accuracy: 0.8001\nEpoch 186/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3783 - accuracy: 0.8280 - val_loss: 0.4191 - val_accuracy: 0.7984\nEpoch 187/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3777 - accuracy: 0.8263 - val_loss: 0.4183 - val_accuracy: 0.8007\nEpoch 188/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3782 - accuracy: 0.8271 - val_loss: 0.4204 - val_accuracy: 0.8007\nEpoch 189/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3781 - accuracy: 0.8277 - val_loss: 0.4236 - val_accuracy: 0.7973\nEpoch 190/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3779 - accuracy: 0.8277 - val_loss: 0.4185 - val_accuracy: 0.8012\nEpoch 191/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3781 - accuracy: 0.8296 - val_loss: 0.4200 - val_accuracy: 0.8024\nEpoch 192/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3779 - accuracy: 0.8261 - val_loss: 0.4185 - val_accuracy: 0.8001\nEpoch 193/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3775 - accuracy: 0.8294 - val_loss: 0.4197 - val_accuracy: 0.8012\nEpoch 194/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3783 - accuracy: 0.8274 - val_loss: 0.4185 - val_accuracy: 0.8007\nEpoch 195/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3775 - accuracy: 0.8275 - val_loss: 0.4217 - val_accuracy: 0.8024\nEpoch 196/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3790 - accuracy: 0.8278 - val_loss: 0.4321 - val_accuracy: 0.8001\nEpoch 197/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3780 - accuracy: 0.8309 - val_loss: 0.4189 - val_accuracy: 0.8018\nEpoch 198/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3778 - accuracy: 0.8278 - val_loss: 0.4186 - val_accuracy: 0.8024\nEpoch 199/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3785 - accuracy: 0.8268 - val_loss: 0.4191 - val_accuracy: 0.7984\nEpoch 200/200\n222/222 [==============================] - 0s 1ms/step - loss: 0.3774 - accuracy: 0.8294 - val_loss: 0.4186 - val_accuracy: 0.8007\nFold 1, 200 epochs, 57 sec\n"
]
],
[
[
"## Len 1Kb-2Kb",
"_____no_output_____"
]
],
[
[
"MINLEN=1000\nMAXLEN=2000\n\nprint (\"Compile the model\")\nmodel=build_model(MAXLEN,EMBED_DIMEN)\nprint (\"Summarize the model\")\nprint(model.summary()) # Print this only once\n\nprint(\"Working on full training set, slice by sequence length.\")\nprint(\"Slice size range [%d - %d)\"%(MINLEN,MAXLEN))\nsubset=make_slice(train_set,MINLEN,MAXLEN)# One array to two: X and y\n\nprint (\"Sequence to Kmer\")\n(X_train,y_train)=make_kmers(MAXLEN,subset)\nX_train\nX_train=make_frequencies(X_train)\nX_train\nprint (\"Cross valiation\")\nmodel2 = do_cross_validation(X_train,y_train,EPOCHS,MAXLEN,EMBED_DIMEN)\nmodel2.save(FILENAME+'.medium.model')",
"Compile the model\nCOMPILE...\n...COMPILED\nSummarize the model\nModel: \"sequential_38\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_152 (Dense) (None, 16) 4128 \n_________________________________________________________________\ndense_153 (Dense) (None, 16) 272 \n_________________________________________________________________\ndense_155 (Dense) (None, 1) 17 \n=================================================================\nTotal params: 4,417\nTrainable params: 4,417\nNon-trainable params: 0\n_________________________________________________________________\nNone\nWorking on full training set, slice by sequence length.\nSlice size range [1000 - 2000)\noriginal (30290, 4)\nno short (9273, 4)\nno long, no short (3368, 4)\nSequence to Kmer\n<class 'pandas.core.frame.DataFrame'>\n(3368, 1)\nsequence GGCGGGGTCGACTGACGGTAACGGGGCAGAGAGGCTGTTCGCAGAG...\nName: 12641, dtype: object\n1338\ntransform...\n<class 'pandas.core.frame.DataFrame'>\n<class 'numpy.ndarray'>\n[[171 155 107 ... 0 0 0]\n [229 132 31 ... 0 0 0]\n [111 170 182 ... 0 0 0]\n ...\n [169 177 226 ... 0 0 0]\n [ 36 158 69 ... 0 0 0]\n [192 240 192 ... 0 0 0]]\nCross valiation\nBUILD MODEL\nCOMPILE...\n...COMPILED\nFIT\nEpoch 1/200\n85/85 [==============================] - 0s 3ms/step - loss: 0.6647 - accuracy: 0.6221 - val_loss: 0.6724 - val_accuracy: 0.6039\nEpoch 2/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6630 - accuracy: 0.6221 - val_loss: 0.6720 - val_accuracy: 0.6039\nEpoch 3/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6616 - accuracy: 0.6221 - val_loss: 0.6694 - val_accuracy: 0.6039\nEpoch 4/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6622 - accuracy: 0.6221 - val_loss: 0.6688 - val_accuracy: 0.6039\nEpoch 5/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6609 - accuracy: 0.6221 - val_loss: 0.6680 - val_accuracy: 0.6039\nEpoch 6/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6601 - accuracy: 0.6221 - val_loss: 0.6693 - val_accuracy: 0.6039\nEpoch 7/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6591 - accuracy: 0.6221 - val_loss: 0.6658 - val_accuracy: 0.6039\nEpoch 8/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.6591 - accuracy: 0.6221 - val_loss: 0.6647 - val_accuracy: 0.6039\nEpoch 9/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6575 - accuracy: 0.6221 - val_loss: 0.6638 - val_accuracy: 0.6039\nEpoch 10/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6563 - accuracy: 0.6221 - val_loss: 0.6624 - val_accuracy: 0.6039\nEpoch 11/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6553 - accuracy: 0.6221 - val_loss: 0.6607 - val_accuracy: 0.6039\nEpoch 12/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6537 - accuracy: 0.6221 - val_loss: 0.6578 - val_accuracy: 0.6039\nEpoch 13/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6524 - accuracy: 0.6221 - val_loss: 0.6574 - val_accuracy: 0.6039\nEpoch 14/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6503 - accuracy: 0.6221 - val_loss: 0.6543 - val_accuracy: 0.6039\nEpoch 15/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6478 - accuracy: 0.6221 - val_loss: 0.6517 - val_accuracy: 0.6039\nEpoch 16/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6454 - accuracy: 0.6221 - val_loss: 0.6475 - val_accuracy: 0.6039\nEpoch 17/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6424 - accuracy: 0.6221 - val_loss: 0.6438 - val_accuracy: 0.6039\nEpoch 18/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6401 - accuracy: 0.6221 - val_loss: 0.6407 - val_accuracy: 0.6039\nEpoch 19/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6376 - accuracy: 0.6221 - val_loss: 0.6376 - val_accuracy: 0.6039\nEpoch 20/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6340 - accuracy: 0.6221 - val_loss: 0.6318 - val_accuracy: 0.6098\nEpoch 21/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6305 - accuracy: 0.6232 - val_loss: 0.6280 - val_accuracy: 0.6098\nEpoch 22/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6269 - accuracy: 0.6251 - val_loss: 0.6239 - val_accuracy: 0.6142\nEpoch 23/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6233 - accuracy: 0.6281 - val_loss: 0.6243 - val_accuracy: 0.6039\nEpoch 24/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6202 - accuracy: 0.6329 - val_loss: 0.6143 - val_accuracy: 0.6217\nEpoch 25/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6165 - accuracy: 0.6399 - val_loss: 0.6120 - val_accuracy: 0.6187\nEpoch 26/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6136 - accuracy: 0.6385 - val_loss: 0.6057 - val_accuracy: 0.6320\nEpoch 27/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6101 - accuracy: 0.6481 - val_loss: 0.6009 - val_accuracy: 0.6558\nEpoch 28/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6081 - accuracy: 0.6526 - val_loss: 0.6006 - val_accuracy: 0.6291\nEpoch 29/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6042 - accuracy: 0.6611 - val_loss: 0.5934 - val_accuracy: 0.6706\nEpoch 30/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.6030 - accuracy: 0.6622 - val_loss: 0.5914 - val_accuracy: 0.6499\nEpoch 31/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5990 - accuracy: 0.6626 - val_loss: 0.5873 - val_accuracy: 0.6706\nEpoch 32/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5970 - accuracy: 0.6659 - val_loss: 0.5862 - val_accuracy: 0.6528\nEpoch 33/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5944 - accuracy: 0.6715 - val_loss: 0.5867 - val_accuracy: 0.6454\nEpoch 34/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5925 - accuracy: 0.6730 - val_loss: 0.5853 - val_accuracy: 0.6439\nEpoch 35/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5915 - accuracy: 0.6745 - val_loss: 0.5797 - val_accuracy: 0.6691\nEpoch 36/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5894 - accuracy: 0.6711 - val_loss: 0.5776 - val_accuracy: 0.6706\nEpoch 37/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5881 - accuracy: 0.6759 - val_loss: 0.5722 - val_accuracy: 0.7047\nEpoch 38/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5866 - accuracy: 0.6752 - val_loss: 0.5715 - val_accuracy: 0.6914\nEpoch 39/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5846 - accuracy: 0.6785 - val_loss: 0.5690 - val_accuracy: 0.7003\nEpoch 40/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5833 - accuracy: 0.6774 - val_loss: 0.5704 - val_accuracy: 0.6825\nEpoch 41/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5834 - accuracy: 0.6756 - val_loss: 0.5674 - val_accuracy: 0.6929\nEpoch 42/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5809 - accuracy: 0.6800 - val_loss: 0.5643 - val_accuracy: 0.7033\nEpoch 43/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5793 - accuracy: 0.6797 - val_loss: 0.5621 - val_accuracy: 0.7255\nEpoch 44/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.5786 - accuracy: 0.6759 - val_loss: 0.5625 - val_accuracy: 0.7003\nEpoch 45/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5776 - accuracy: 0.6852 - val_loss: 0.5591 - val_accuracy: 0.7240\nEpoch 46/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.5759 - accuracy: 0.6860 - val_loss: 0.5583 - val_accuracy: 0.7151\nEpoch 47/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5743 - accuracy: 0.6908 - val_loss: 0.5565 - val_accuracy: 0.7196\nEpoch 48/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5738 - accuracy: 0.6845 - val_loss: 0.5554 - val_accuracy: 0.7166\nEpoch 49/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5725 - accuracy: 0.6893 - val_loss: 0.5565 - val_accuracy: 0.7047\nEpoch 50/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5712 - accuracy: 0.6867 - val_loss: 0.5606 - val_accuracy: 0.6855\nEpoch 51/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5693 - accuracy: 0.6893 - val_loss: 0.5514 - val_accuracy: 0.7270\nEpoch 52/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5695 - accuracy: 0.6978 - val_loss: 0.5497 - val_accuracy: 0.7285\nEpoch 53/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5676 - accuracy: 0.6919 - val_loss: 0.5515 - val_accuracy: 0.7092\nEpoch 54/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5658 - accuracy: 0.6978 - val_loss: 0.5480 - val_accuracy: 0.7270\nEpoch 55/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5643 - accuracy: 0.6938 - val_loss: 0.5460 - val_accuracy: 0.7329\nEpoch 56/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5642 - accuracy: 0.6952 - val_loss: 0.5464 - val_accuracy: 0.7196\nEpoch 57/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5619 - accuracy: 0.7004 - val_loss: 0.5442 - val_accuracy: 0.7285\nEpoch 58/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5616 - accuracy: 0.6990 - val_loss: 0.5427 - val_accuracy: 0.7344\nEpoch 59/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5605 - accuracy: 0.7019 - val_loss: 0.5411 - val_accuracy: 0.7359\nEpoch 60/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5583 - accuracy: 0.7045 - val_loss: 0.5409 - val_accuracy: 0.7285\nEpoch 61/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5567 - accuracy: 0.7030 - val_loss: 0.5426 - val_accuracy: 0.7136\nEpoch 62/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5566 - accuracy: 0.7034 - val_loss: 0.5406 - val_accuracy: 0.7166\nEpoch 63/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5548 - accuracy: 0.7086 - val_loss: 0.5371 - val_accuracy: 0.7300\nEpoch 64/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5520 - accuracy: 0.7108 - val_loss: 0.5389 - val_accuracy: 0.7226\nEpoch 65/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5512 - accuracy: 0.7123 - val_loss: 0.5343 - val_accuracy: 0.7359\nEpoch 66/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5496 - accuracy: 0.7142 - val_loss: 0.5329 - val_accuracy: 0.7359\nEpoch 67/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5482 - accuracy: 0.7138 - val_loss: 0.5293 - val_accuracy: 0.7448\nEpoch 68/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5474 - accuracy: 0.7164 - val_loss: 0.5291 - val_accuracy: 0.7374\nEpoch 69/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5445 - accuracy: 0.7168 - val_loss: 0.5265 - val_accuracy: 0.7433\nEpoch 70/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5435 - accuracy: 0.7153 - val_loss: 0.5274 - val_accuracy: 0.7418\nEpoch 71/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5417 - accuracy: 0.7201 - val_loss: 0.5274 - val_accuracy: 0.7433\nEpoch 72/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5394 - accuracy: 0.7231 - val_loss: 0.5227 - val_accuracy: 0.7404\nEpoch 73/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5378 - accuracy: 0.7235 - val_loss: 0.5244 - val_accuracy: 0.7433\nEpoch 74/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5370 - accuracy: 0.7283 - val_loss: 0.5228 - val_accuracy: 0.7433\nEpoch 75/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5343 - accuracy: 0.7309 - val_loss: 0.5195 - val_accuracy: 0.7522\nEpoch 76/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.5321 - accuracy: 0.7305 - val_loss: 0.5161 - val_accuracy: 0.7478\nEpoch 77/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.5308 - accuracy: 0.7313 - val_loss: 0.5131 - val_accuracy: 0.7493\nEpoch 78/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5290 - accuracy: 0.7309 - val_loss: 0.5114 - val_accuracy: 0.7507\nEpoch 79/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5263 - accuracy: 0.7346 - val_loss: 0.5184 - val_accuracy: 0.7389\nEpoch 80/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5252 - accuracy: 0.7342 - val_loss: 0.5098 - val_accuracy: 0.7582\nEpoch 81/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5239 - accuracy: 0.7390 - val_loss: 0.5127 - val_accuracy: 0.7448\nEpoch 82/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5216 - accuracy: 0.7454 - val_loss: 0.5036 - val_accuracy: 0.7626\nEpoch 83/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5199 - accuracy: 0.7465 - val_loss: 0.5080 - val_accuracy: 0.7552\nEpoch 84/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.5171 - accuracy: 0.7442 - val_loss: 0.5000 - val_accuracy: 0.7671\nEpoch 85/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5150 - accuracy: 0.7454 - val_loss: 0.5034 - val_accuracy: 0.7567\nEpoch 86/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5128 - accuracy: 0.7539 - val_loss: 0.4982 - val_accuracy: 0.7641\nEpoch 87/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5104 - accuracy: 0.7509 - val_loss: 0.4946 - val_accuracy: 0.7700\nEpoch 88/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5085 - accuracy: 0.7595 - val_loss: 0.4945 - val_accuracy: 0.7641\nEpoch 89/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5058 - accuracy: 0.7532 - val_loss: 0.4905 - val_accuracy: 0.7730\nEpoch 90/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5043 - accuracy: 0.7602 - val_loss: 0.4961 - val_accuracy: 0.7537\nEpoch 91/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.5016 - accuracy: 0.7610 - val_loss: 0.4881 - val_accuracy: 0.7730\nEpoch 92/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.4993 - accuracy: 0.7647 - val_loss: 0.4848 - val_accuracy: 0.7804\nEpoch 93/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4974 - accuracy: 0.7650 - val_loss: 0.4845 - val_accuracy: 0.7760\nEpoch 94/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4947 - accuracy: 0.7695 - val_loss: 0.4803 - val_accuracy: 0.7878\nEpoch 95/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.4922 - accuracy: 0.7713 - val_loss: 0.4780 - val_accuracy: 0.7893\nEpoch 96/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4902 - accuracy: 0.7773 - val_loss: 0.4789 - val_accuracy: 0.7730\nEpoch 97/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4878 - accuracy: 0.7777 - val_loss: 0.4748 - val_accuracy: 0.7893\nEpoch 98/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.4859 - accuracy: 0.7791 - val_loss: 0.4726 - val_accuracy: 0.7953\nEpoch 99/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4845 - accuracy: 0.7814 - val_loss: 0.4733 - val_accuracy: 0.7789\nEpoch 100/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.4805 - accuracy: 0.7858 - val_loss: 0.4706 - val_accuracy: 0.7804\nEpoch 101/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4785 - accuracy: 0.7873 - val_loss: 0.4665 - val_accuracy: 0.7953\nEpoch 102/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4766 - accuracy: 0.7843 - val_loss: 0.4646 - val_accuracy: 0.7938\nEpoch 103/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4744 - accuracy: 0.7936 - val_loss: 0.4655 - val_accuracy: 0.7834\nEpoch 104/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4728 - accuracy: 0.7799 - val_loss: 0.4636 - val_accuracy: 0.7834\nEpoch 105/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4707 - accuracy: 0.7932 - val_loss: 0.4644 - val_accuracy: 0.7715\nEpoch 106/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4679 - accuracy: 0.7929 - val_loss: 0.4584 - val_accuracy: 0.7878\nEpoch 107/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4657 - accuracy: 0.7944 - val_loss: 0.4562 - val_accuracy: 0.7878\nEpoch 108/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4626 - accuracy: 0.7984 - val_loss: 0.4529 - val_accuracy: 0.8012\nEpoch 109/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4619 - accuracy: 0.7977 - val_loss: 0.4524 - val_accuracy: 0.7893\nEpoch 110/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4591 - accuracy: 0.8025 - val_loss: 0.4506 - val_accuracy: 0.7908\nEpoch 111/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.4573 - accuracy: 0.8025 - val_loss: 0.4476 - val_accuracy: 0.8012\nEpoch 112/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4543 - accuracy: 0.8044 - val_loss: 0.4489 - val_accuracy: 0.7804\nEpoch 113/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4549 - accuracy: 0.8066 - val_loss: 0.4446 - val_accuracy: 0.7982\nEpoch 114/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4510 - accuracy: 0.8003 - val_loss: 0.4466 - val_accuracy: 0.7849\nEpoch 115/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4496 - accuracy: 0.8070 - val_loss: 0.4435 - val_accuracy: 0.7893\nEpoch 116/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4466 - accuracy: 0.8088 - val_loss: 0.4388 - val_accuracy: 0.8131\nEpoch 117/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4458 - accuracy: 0.8036 - val_loss: 0.4372 - val_accuracy: 0.8131\nEpoch 118/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4436 - accuracy: 0.8103 - val_loss: 0.4362 - val_accuracy: 0.8042\nEpoch 119/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4418 - accuracy: 0.8092 - val_loss: 0.4362 - val_accuracy: 0.7953\nEpoch 120/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4413 - accuracy: 0.8070 - val_loss: 0.4393 - val_accuracy: 0.7864\nEpoch 121/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4380 - accuracy: 0.8155 - val_loss: 0.4362 - val_accuracy: 0.7864\nEpoch 122/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4367 - accuracy: 0.8099 - val_loss: 0.4367 - val_accuracy: 0.7864\nEpoch 123/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4345 - accuracy: 0.8163 - val_loss: 0.4346 - val_accuracy: 0.7893\nEpoch 124/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4333 - accuracy: 0.8099 - val_loss: 0.4272 - val_accuracy: 0.8160\nEpoch 125/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4312 - accuracy: 0.8151 - val_loss: 0.4310 - val_accuracy: 0.7938\nEpoch 126/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.4310 - accuracy: 0.8122 - val_loss: 0.4344 - val_accuracy: 0.7834\nEpoch 127/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.4299 - accuracy: 0.8129 - val_loss: 0.4303 - val_accuracy: 0.7923\nEpoch 128/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.4269 - accuracy: 0.8196 - val_loss: 0.4236 - val_accuracy: 0.8027\nEpoch 129/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4267 - accuracy: 0.8151 - val_loss: 0.4283 - val_accuracy: 0.7908\nEpoch 130/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4244 - accuracy: 0.8226 - val_loss: 0.4226 - val_accuracy: 0.8027\nEpoch 131/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4235 - accuracy: 0.8155 - val_loss: 0.4227 - val_accuracy: 0.7982\nEpoch 132/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4209 - accuracy: 0.8218 - val_loss: 0.4178 - val_accuracy: 0.8131\nEpoch 133/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4202 - accuracy: 0.8207 - val_loss: 0.4173 - val_accuracy: 0.8027\nEpoch 134/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4190 - accuracy: 0.8226 - val_loss: 0.4180 - val_accuracy: 0.8056\nEpoch 135/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4191 - accuracy: 0.8237 - val_loss: 0.4248 - val_accuracy: 0.7849\nEpoch 136/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4168 - accuracy: 0.8233 - val_loss: 0.4141 - val_accuracy: 0.8071\nEpoch 137/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4155 - accuracy: 0.8196 - val_loss: 0.4200 - val_accuracy: 0.7982\nEpoch 138/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4149 - accuracy: 0.8189 - val_loss: 0.4140 - val_accuracy: 0.8101\nEpoch 139/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.4127 - accuracy: 0.8226 - val_loss: 0.4138 - val_accuracy: 0.8056\nEpoch 140/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4115 - accuracy: 0.8255 - val_loss: 0.4100 - val_accuracy: 0.8190\nEpoch 141/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4119 - accuracy: 0.8222 - val_loss: 0.4097 - val_accuracy: 0.8101\nEpoch 142/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.4095 - accuracy: 0.8237 - val_loss: 0.4168 - val_accuracy: 0.7908\nEpoch 143/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4087 - accuracy: 0.8267 - val_loss: 0.4083 - val_accuracy: 0.8086\nEpoch 144/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4079 - accuracy: 0.8255 - val_loss: 0.4082 - val_accuracy: 0.8116\nEpoch 145/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4068 - accuracy: 0.8233 - val_loss: 0.4068 - val_accuracy: 0.8101\nEpoch 146/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4050 - accuracy: 0.8293 - val_loss: 0.4080 - val_accuracy: 0.8101\nEpoch 147/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4045 - accuracy: 0.8300 - val_loss: 0.4087 - val_accuracy: 0.8116\nEpoch 148/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.4031 - accuracy: 0.8274 - val_loss: 0.4067 - val_accuracy: 0.8145\nEpoch 149/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4027 - accuracy: 0.8248 - val_loss: 0.4037 - val_accuracy: 0.8175\nEpoch 150/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4031 - accuracy: 0.8263 - val_loss: 0.4028 - val_accuracy: 0.8131\nEpoch 151/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.4021 - accuracy: 0.8233 - val_loss: 0.4078 - val_accuracy: 0.8027\nEpoch 152/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3994 - accuracy: 0.8293 - val_loss: 0.4030 - val_accuracy: 0.8175\nEpoch 153/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.3987 - accuracy: 0.8289 - val_loss: 0.4039 - val_accuracy: 0.8160\nEpoch 154/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.3982 - accuracy: 0.8318 - val_loss: 0.4023 - val_accuracy: 0.8175\nEpoch 155/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.3981 - accuracy: 0.8300 - val_loss: 0.4031 - val_accuracy: 0.8145\nEpoch 156/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.3962 - accuracy: 0.8248 - val_loss: 0.4078 - val_accuracy: 0.8012\nEpoch 157/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3956 - accuracy: 0.8293 - val_loss: 0.3981 - val_accuracy: 0.8131\nEpoch 158/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.3966 - accuracy: 0.8307 - val_loss: 0.4005 - val_accuracy: 0.8131\nEpoch 159/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3941 - accuracy: 0.8337 - val_loss: 0.3991 - val_accuracy: 0.8190\nEpoch 160/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3939 - accuracy: 0.8304 - val_loss: 0.3965 - val_accuracy: 0.8145\nEpoch 161/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.3932 - accuracy: 0.8263 - val_loss: 0.4037 - val_accuracy: 0.8042\nEpoch 162/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3925 - accuracy: 0.8318 - val_loss: 0.3961 - val_accuracy: 0.8190\nEpoch 163/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.3922 - accuracy: 0.8304 - val_loss: 0.4019 - val_accuracy: 0.8027\nEpoch 164/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3920 - accuracy: 0.8285 - val_loss: 0.3952 - val_accuracy: 0.8190\nEpoch 165/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3903 - accuracy: 0.8267 - val_loss: 0.3985 - val_accuracy: 0.8101\nEpoch 166/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3905 - accuracy: 0.8307 - val_loss: 0.3944 - val_accuracy: 0.8190\nEpoch 167/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3890 - accuracy: 0.8315 - val_loss: 0.3981 - val_accuracy: 0.8086\nEpoch 168/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3889 - accuracy: 0.8318 - val_loss: 0.4013 - val_accuracy: 0.8012\nEpoch 169/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3884 - accuracy: 0.8333 - val_loss: 0.3934 - val_accuracy: 0.8190\nEpoch 170/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3879 - accuracy: 0.8337 - val_loss: 0.3920 - val_accuracy: 0.8160\nEpoch 171/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3866 - accuracy: 0.8278 - val_loss: 0.3926 - val_accuracy: 0.8160\nEpoch 172/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3860 - accuracy: 0.8382 - val_loss: 0.3913 - val_accuracy: 0.8175\nEpoch 173/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3856 - accuracy: 0.8348 - val_loss: 0.3920 - val_accuracy: 0.8160\nEpoch 174/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3852 - accuracy: 0.8318 - val_loss: 0.3956 - val_accuracy: 0.8071\nEpoch 175/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3849 - accuracy: 0.8326 - val_loss: 0.3904 - val_accuracy: 0.8205\nEpoch 176/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3855 - accuracy: 0.8337 - val_loss: 0.3936 - val_accuracy: 0.8175\nEpoch 177/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3849 - accuracy: 0.8330 - val_loss: 0.3924 - val_accuracy: 0.8205\nEpoch 178/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.3834 - accuracy: 0.8370 - val_loss: 0.3894 - val_accuracy: 0.8234\nEpoch 179/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3827 - accuracy: 0.8307 - val_loss: 0.3905 - val_accuracy: 0.8190\nEpoch 180/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.3819 - accuracy: 0.8344 - val_loss: 0.3905 - val_accuracy: 0.8190\nEpoch 181/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3826 - accuracy: 0.8344 - val_loss: 0.3886 - val_accuracy: 0.8234\nEpoch 182/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3824 - accuracy: 0.8285 - val_loss: 0.3890 - val_accuracy: 0.8220\nEpoch 183/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.3813 - accuracy: 0.8367 - val_loss: 0.3881 - val_accuracy: 0.8220\nEpoch 184/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3804 - accuracy: 0.8385 - val_loss: 0.3887 - val_accuracy: 0.8234\nEpoch 185/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3808 - accuracy: 0.8307 - val_loss: 0.3880 - val_accuracy: 0.8205\nEpoch 186/200\n85/85 [==============================] - 0s 2ms/step - loss: 0.3805 - accuracy: 0.8326 - val_loss: 0.3887 - val_accuracy: 0.8249\nEpoch 187/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3795 - accuracy: 0.8363 - val_loss: 0.3877 - val_accuracy: 0.8220\nEpoch 188/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3798 - accuracy: 0.8356 - val_loss: 0.3957 - val_accuracy: 0.8012\nEpoch 189/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3796 - accuracy: 0.8393 - val_loss: 0.3890 - val_accuracy: 0.8264\nEpoch 190/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3793 - accuracy: 0.8352 - val_loss: 0.3889 - val_accuracy: 0.8264\nEpoch 191/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3787 - accuracy: 0.8344 - val_loss: 0.3865 - val_accuracy: 0.8279\nEpoch 192/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3785 - accuracy: 0.8341 - val_loss: 0.3935 - val_accuracy: 0.8012\nEpoch 193/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3778 - accuracy: 0.8326 - val_loss: 0.3929 - val_accuracy: 0.8027\nEpoch 194/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3766 - accuracy: 0.8374 - val_loss: 0.4021 - val_accuracy: 0.7967\nEpoch 195/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3773 - accuracy: 0.8330 - val_loss: 0.3877 - val_accuracy: 0.8264\nEpoch 196/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3784 - accuracy: 0.8385 - val_loss: 0.3970 - val_accuracy: 0.7997\nEpoch 197/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3763 - accuracy: 0.8359 - val_loss: 0.3855 - val_accuracy: 0.8249\nEpoch 198/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3769 - accuracy: 0.8333 - val_loss: 0.3851 - val_accuracy: 0.8264\nEpoch 199/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3761 - accuracy: 0.8389 - val_loss: 0.3913 - val_accuracy: 0.8056\nEpoch 200/200\n85/85 [==============================] - 0s 1ms/step - loss: 0.3747 - accuracy: 0.8389 - val_loss: 0.3933 - val_accuracy: 0.8027\nFold 1, 200 epochs, 25 sec\n"
]
],
[
[
"## Len 2Kb-3Kb",
"_____no_output_____"
]
],
[
[
"MINLEN=2000\nMAXLEN=3000\n\nprint (\"Compile the model\")\nmodel=build_model(MAXLEN,EMBED_DIMEN)\nprint (\"Summarize the model\")\nprint(model.summary()) # Print this only once\n\nprint(\"Working on full training set, slice by sequence length.\")\nprint(\"Slice size range [%d - %d)\"%(MINLEN,MAXLEN))\nsubset=make_slice(train_set,MINLEN,MAXLEN)# One array to two: X and y\n\nprint (\"Sequence to Kmer\")\n(X_train,y_train)=make_kmers(MAXLEN,subset)\nX_train\nX_train=make_frequencies(X_train)\nX_train\nprint (\"Cross valiation\")\nmodel3 = do_cross_validation(X_train,y_train,EPOCHS,MAXLEN,EMBED_DIMEN)\nmodel3.save(FILENAME+'.long.model')",
"Compile the model\nCOMPILE...\n...COMPILED\nSummarize the model\nModel: \"sequential_40\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_160 (Dense) (None, 16) 4128 \n_________________________________________________________________\ndense_161 (Dense) (None, 16) 272 \n_________________________________________________________________\ndense_163 (Dense) (None, 1) 17 \n=================================================================\nTotal params: 4,417\nTrainable params: 4,417\nNon-trainable params: 0\n_________________________________________________________________\nNone\nWorking on full training set, slice by sequence length.\nSlice size range [2000 - 3000)\noriginal (30290, 4)\nno short (3221, 4)\nno long, no short (1351, 4)\nSequence to Kmer\n<class 'pandas.core.frame.DataFrame'>\n(1351, 1)\nsequence GTCATTCTAGCTGCCTGCTGCCTCCGCAGCGTCCCCCCAGCTCTCC...\nName: 19713, dtype: object\n2039\ntransform...\n<class 'pandas.core.frame.DataFrame'>\n<class 'numpy.ndarray'>\n[[180 224 78 ... 0 0 0]\n [ 5 36 159 ... 0 0 0]\n [ 46 181 243 ... 0 0 0]\n ...\n [ 51 202 5 ... 0 0 0]\n [145 99 138 ... 0 0 0]\n [ 47 138 56 ... 0 0 0]]\nCross valiation\nBUILD MODEL\nCOMPILE...\n...COMPILED\nFIT\nEpoch 1/200\n34/34 [==============================] - 0s 5ms/step - loss: 0.8595 - accuracy: 0.2917 - val_loss: 0.7417 - val_accuracy: 0.3210\nEpoch 2/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6907 - accuracy: 0.5398 - val_loss: 0.6542 - val_accuracy: 0.6790\nEpoch 3/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6276 - accuracy: 0.7083 - val_loss: 0.6308 - val_accuracy: 0.6790\nEpoch 4/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6088 - accuracy: 0.7083 - val_loss: 0.6275 - val_accuracy: 0.6790\nEpoch 5/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6045 - accuracy: 0.7083 - val_loss: 0.6280 - val_accuracy: 0.6790\nEpoch 6/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6036 - accuracy: 0.7083 - val_loss: 0.6288 - val_accuracy: 0.6790\nEpoch 7/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6032 - accuracy: 0.7083 - val_loss: 0.6286 - val_accuracy: 0.6790\nEpoch 8/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6032 - accuracy: 0.7083 - val_loss: 0.6284 - val_accuracy: 0.6790\nEpoch 9/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6028 - accuracy: 0.7083 - val_loss: 0.6288 - val_accuracy: 0.6790\nEpoch 10/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6031 - accuracy: 0.7083 - val_loss: 0.6283 - val_accuracy: 0.6790\nEpoch 11/200\n34/34 [==============================] - 0s 1ms/step - loss: 0.6026 - accuracy: 0.7083 - val_loss: 0.6280 - val_accuracy: 0.6790\nEpoch 12/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6023 - accuracy: 0.7083 - val_loss: 0.6282 - val_accuracy: 0.6790\nEpoch 13/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6021 - accuracy: 0.7083 - val_loss: 0.6274 - val_accuracy: 0.6790\nEpoch 14/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6023 - accuracy: 0.7083 - val_loss: 0.6278 - val_accuracy: 0.6790\nEpoch 15/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6022 - accuracy: 0.7083 - val_loss: 0.6274 - val_accuracy: 0.6790\nEpoch 16/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6016 - accuracy: 0.7083 - val_loss: 0.6277 - val_accuracy: 0.6790\nEpoch 17/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6014 - accuracy: 0.7083 - val_loss: 0.6268 - val_accuracy: 0.6790\nEpoch 18/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6011 - accuracy: 0.7083 - val_loss: 0.6273 - val_accuracy: 0.6790\nEpoch 19/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6010 - accuracy: 0.7083 - val_loss: 0.6270 - val_accuracy: 0.6790\nEpoch 20/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6009 - accuracy: 0.7083 - val_loss: 0.6264 - val_accuracy: 0.6790\nEpoch 21/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6006 - accuracy: 0.7083 - val_loss: 0.6256 - val_accuracy: 0.6790\nEpoch 22/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6003 - accuracy: 0.7083 - val_loss: 0.6258 - val_accuracy: 0.6790\nEpoch 23/200\n34/34 [==============================] - 0s 1ms/step - loss: 0.6003 - accuracy: 0.7083 - val_loss: 0.6255 - val_accuracy: 0.6790\nEpoch 24/200\n34/34 [==============================] - 0s 1ms/step - loss: 0.6000 - accuracy: 0.7083 - val_loss: 0.6244 - val_accuracy: 0.6790\nEpoch 25/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.6007 - accuracy: 0.7083 - val_loss: 0.6259 - val_accuracy: 0.6790\nEpoch 26/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5997 - accuracy: 0.7083 - val_loss: 0.6240 - val_accuracy: 0.6790\nEpoch 27/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5991 - accuracy: 0.7083 - val_loss: 0.6245 - val_accuracy: 0.6790\nEpoch 28/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5989 - accuracy: 0.7083 - val_loss: 0.6239 - val_accuracy: 0.6790\nEpoch 29/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5986 - accuracy: 0.7083 - val_loss: 0.6244 - val_accuracy: 0.6790\nEpoch 30/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5981 - accuracy: 0.7083 - val_loss: 0.6235 - val_accuracy: 0.6790\nEpoch 31/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5980 - accuracy: 0.7083 - val_loss: 0.6229 - val_accuracy: 0.6790\nEpoch 32/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5977 - accuracy: 0.7083 - val_loss: 0.6231 - val_accuracy: 0.6790\nEpoch 33/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5974 - accuracy: 0.7083 - val_loss: 0.6228 - val_accuracy: 0.6790\nEpoch 34/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5974 - accuracy: 0.7083 - val_loss: 0.6215 - val_accuracy: 0.6790\nEpoch 35/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5967 - accuracy: 0.7083 - val_loss: 0.6218 - val_accuracy: 0.6790\nEpoch 36/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5963 - accuracy: 0.7083 - val_loss: 0.6219 - val_accuracy: 0.6790\nEpoch 37/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5962 - accuracy: 0.7083 - val_loss: 0.6205 - val_accuracy: 0.6790\nEpoch 38/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5956 - accuracy: 0.7083 - val_loss: 0.6203 - val_accuracy: 0.6790\nEpoch 39/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5954 - accuracy: 0.7083 - val_loss: 0.6211 - val_accuracy: 0.6790\nEpoch 40/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5955 - accuracy: 0.7083 - val_loss: 0.6197 - val_accuracy: 0.6790\nEpoch 41/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5947 - accuracy: 0.7083 - val_loss: 0.6186 - val_accuracy: 0.6790\nEpoch 42/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5939 - accuracy: 0.7083 - val_loss: 0.6189 - val_accuracy: 0.6790\nEpoch 43/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5944 - accuracy: 0.7083 - val_loss: 0.6174 - val_accuracy: 0.6790\nEpoch 44/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5930 - accuracy: 0.7083 - val_loss: 0.6183 - val_accuracy: 0.6790\nEpoch 45/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5931 - accuracy: 0.7083 - val_loss: 0.6174 - val_accuracy: 0.6790\nEpoch 46/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5925 - accuracy: 0.7083 - val_loss: 0.6168 - val_accuracy: 0.6790\nEpoch 47/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5917 - accuracy: 0.7083 - val_loss: 0.6157 - val_accuracy: 0.6790\nEpoch 48/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5916 - accuracy: 0.7083 - val_loss: 0.6152 - val_accuracy: 0.6790\nEpoch 49/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5904 - accuracy: 0.7083 - val_loss: 0.6157 - val_accuracy: 0.6790\nEpoch 50/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5899 - accuracy: 0.7083 - val_loss: 0.6142 - val_accuracy: 0.6790\nEpoch 51/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5899 - accuracy: 0.7083 - val_loss: 0.6134 - val_accuracy: 0.6790\nEpoch 52/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5899 - accuracy: 0.7083 - val_loss: 0.6127 - val_accuracy: 0.6790\nEpoch 53/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5881 - accuracy: 0.7083 - val_loss: 0.6116 - val_accuracy: 0.6790\nEpoch 54/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5875 - accuracy: 0.7083 - val_loss: 0.6121 - val_accuracy: 0.6790\nEpoch 55/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5867 - accuracy: 0.7083 - val_loss: 0.6107 - val_accuracy: 0.6790\nEpoch 56/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5861 - accuracy: 0.7083 - val_loss: 0.6085 - val_accuracy: 0.6790\nEpoch 57/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5853 - accuracy: 0.7083 - val_loss: 0.6088 - val_accuracy: 0.6790\nEpoch 58/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5843 - accuracy: 0.7083 - val_loss: 0.6074 - val_accuracy: 0.6790\nEpoch 59/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5836 - accuracy: 0.7083 - val_loss: 0.6066 - val_accuracy: 0.6790\nEpoch 60/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5839 - accuracy: 0.7083 - val_loss: 0.6062 - val_accuracy: 0.6790\nEpoch 61/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5821 - accuracy: 0.7083 - val_loss: 0.6043 - val_accuracy: 0.6790\nEpoch 62/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5811 - accuracy: 0.7083 - val_loss: 0.6039 - val_accuracy: 0.6790\nEpoch 63/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5806 - accuracy: 0.7083 - val_loss: 0.6025 - val_accuracy: 0.6790\nEpoch 64/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5798 - accuracy: 0.7083 - val_loss: 0.6042 - val_accuracy: 0.6790\nEpoch 65/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5780 - accuracy: 0.7083 - val_loss: 0.5999 - val_accuracy: 0.6790\nEpoch 66/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5777 - accuracy: 0.7083 - val_loss: 0.5986 - val_accuracy: 0.6790\nEpoch 67/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5763 - accuracy: 0.7083 - val_loss: 0.5985 - val_accuracy: 0.6790\nEpoch 68/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5757 - accuracy: 0.7083 - val_loss: 0.5970 - val_accuracy: 0.6790\nEpoch 69/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5745 - accuracy: 0.7083 - val_loss: 0.5960 - val_accuracy: 0.6790\nEpoch 70/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5733 - accuracy: 0.7083 - val_loss: 0.5942 - val_accuracy: 0.6790\nEpoch 71/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5725 - accuracy: 0.7083 - val_loss: 0.5930 - val_accuracy: 0.6790\nEpoch 72/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5711 - accuracy: 0.7083 - val_loss: 0.5923 - val_accuracy: 0.6790\nEpoch 73/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5709 - accuracy: 0.7083 - val_loss: 0.5900 - val_accuracy: 0.6790\nEpoch 74/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5702 - accuracy: 0.7083 - val_loss: 0.5886 - val_accuracy: 0.6790\nEpoch 75/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5682 - accuracy: 0.7083 - val_loss: 0.5879 - val_accuracy: 0.6790\nEpoch 76/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5674 - accuracy: 0.7083 - val_loss: 0.5881 - val_accuracy: 0.6790\nEpoch 77/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5666 - accuracy: 0.7083 - val_loss: 0.5853 - val_accuracy: 0.6790\nEpoch 78/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5647 - accuracy: 0.7083 - val_loss: 0.5835 - val_accuracy: 0.6790\nEpoch 79/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5643 - accuracy: 0.7083 - val_loss: 0.5838 - val_accuracy: 0.6790\nEpoch 80/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5627 - accuracy: 0.7083 - val_loss: 0.5828 - val_accuracy: 0.6790\nEpoch 81/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5621 - accuracy: 0.7083 - val_loss: 0.5809 - val_accuracy: 0.6790\nEpoch 82/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5606 - accuracy: 0.7083 - val_loss: 0.5805 - val_accuracy: 0.6790\nEpoch 83/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5595 - accuracy: 0.7083 - val_loss: 0.5783 - val_accuracy: 0.6827\nEpoch 84/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5582 - accuracy: 0.7083 - val_loss: 0.5765 - val_accuracy: 0.6827\nEpoch 85/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5583 - accuracy: 0.7083 - val_loss: 0.5768 - val_accuracy: 0.6827\nEpoch 86/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5568 - accuracy: 0.7083 - val_loss: 0.5736 - val_accuracy: 0.6827\nEpoch 87/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5567 - accuracy: 0.7093 - val_loss: 0.5725 - val_accuracy: 0.6827\nEpoch 88/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5544 - accuracy: 0.7093 - val_loss: 0.5732 - val_accuracy: 0.6827\nEpoch 89/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5553 - accuracy: 0.7111 - val_loss: 0.5721 - val_accuracy: 0.6827\nEpoch 90/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5523 - accuracy: 0.7093 - val_loss: 0.5697 - val_accuracy: 0.6863\nEpoch 91/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5517 - accuracy: 0.7093 - val_loss: 0.5692 - val_accuracy: 0.6863\nEpoch 92/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5505 - accuracy: 0.7120 - val_loss: 0.5669 - val_accuracy: 0.6863\nEpoch 93/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5500 - accuracy: 0.7139 - val_loss: 0.5666 - val_accuracy: 0.6863\nEpoch 94/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5490 - accuracy: 0.7130 - val_loss: 0.5669 - val_accuracy: 0.6863\nEpoch 95/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5484 - accuracy: 0.7130 - val_loss: 0.5642 - val_accuracy: 0.6900\nEpoch 96/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5478 - accuracy: 0.7111 - val_loss: 0.5642 - val_accuracy: 0.6900\nEpoch 97/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5459 - accuracy: 0.7148 - val_loss: 0.5615 - val_accuracy: 0.6863\nEpoch 98/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5457 - accuracy: 0.7139 - val_loss: 0.5604 - val_accuracy: 0.6900\nEpoch 99/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5449 - accuracy: 0.7148 - val_loss: 0.5615 - val_accuracy: 0.6900\nEpoch 100/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5435 - accuracy: 0.7139 - val_loss: 0.5592 - val_accuracy: 0.6863\nEpoch 101/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5428 - accuracy: 0.7157 - val_loss: 0.5594 - val_accuracy: 0.6863\nEpoch 102/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5422 - accuracy: 0.7167 - val_loss: 0.5568 - val_accuracy: 0.6900\nEpoch 103/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5416 - accuracy: 0.7176 - val_loss: 0.5556 - val_accuracy: 0.6900\nEpoch 104/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5411 - accuracy: 0.7130 - val_loss: 0.5568 - val_accuracy: 0.6900\nEpoch 105/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5402 - accuracy: 0.7231 - val_loss: 0.5537 - val_accuracy: 0.6974\nEpoch 106/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5386 - accuracy: 0.7194 - val_loss: 0.5537 - val_accuracy: 0.6900\nEpoch 107/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5384 - accuracy: 0.7204 - val_loss: 0.5520 - val_accuracy: 0.7011\nEpoch 108/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5376 - accuracy: 0.7259 - val_loss: 0.5505 - val_accuracy: 0.7085\nEpoch 109/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5362 - accuracy: 0.7278 - val_loss: 0.5503 - val_accuracy: 0.7011\nEpoch 110/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5359 - accuracy: 0.7259 - val_loss: 0.5506 - val_accuracy: 0.6974\nEpoch 111/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5351 - accuracy: 0.7278 - val_loss: 0.5484 - val_accuracy: 0.7122\nEpoch 112/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5344 - accuracy: 0.7278 - val_loss: 0.5471 - val_accuracy: 0.7159\nEpoch 113/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5337 - accuracy: 0.7315 - val_loss: 0.5484 - val_accuracy: 0.7011\nEpoch 114/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5333 - accuracy: 0.7241 - val_loss: 0.5464 - val_accuracy: 0.7122\nEpoch 115/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5323 - accuracy: 0.7315 - val_loss: 0.5455 - val_accuracy: 0.7122\nEpoch 116/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5313 - accuracy: 0.7333 - val_loss: 0.5456 - val_accuracy: 0.7122\nEpoch 117/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5305 - accuracy: 0.7306 - val_loss: 0.5443 - val_accuracy: 0.7122\nEpoch 118/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5306 - accuracy: 0.7380 - val_loss: 0.5442 - val_accuracy: 0.7122\nEpoch 119/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5290 - accuracy: 0.7315 - val_loss: 0.5407 - val_accuracy: 0.7122\nEpoch 120/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5293 - accuracy: 0.7324 - val_loss: 0.5398 - val_accuracy: 0.7122\nEpoch 121/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5275 - accuracy: 0.7361 - val_loss: 0.5400 - val_accuracy: 0.7122\nEpoch 122/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5268 - accuracy: 0.7389 - val_loss: 0.5386 - val_accuracy: 0.7159\nEpoch 123/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5261 - accuracy: 0.7352 - val_loss: 0.5382 - val_accuracy: 0.7122\nEpoch 124/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5262 - accuracy: 0.7333 - val_loss: 0.5392 - val_accuracy: 0.7122\nEpoch 125/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5248 - accuracy: 0.7389 - val_loss: 0.5367 - val_accuracy: 0.7159\nEpoch 126/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5249 - accuracy: 0.7343 - val_loss: 0.5372 - val_accuracy: 0.7159\nEpoch 127/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5231 - accuracy: 0.7407 - val_loss: 0.5353 - val_accuracy: 0.7122\nEpoch 128/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5226 - accuracy: 0.7361 - val_loss: 0.5342 - val_accuracy: 0.7122\nEpoch 129/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5215 - accuracy: 0.7380 - val_loss: 0.5334 - val_accuracy: 0.7159\nEpoch 130/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5206 - accuracy: 0.7417 - val_loss: 0.5321 - val_accuracy: 0.7196\nEpoch 131/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5205 - accuracy: 0.7417 - val_loss: 0.5318 - val_accuracy: 0.7196\nEpoch 132/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5196 - accuracy: 0.7407 - val_loss: 0.5298 - val_accuracy: 0.7380\nEpoch 133/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5194 - accuracy: 0.7398 - val_loss: 0.5288 - val_accuracy: 0.7343\nEpoch 134/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5183 - accuracy: 0.7444 - val_loss: 0.5312 - val_accuracy: 0.7122\nEpoch 135/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5177 - accuracy: 0.7380 - val_loss: 0.5279 - val_accuracy: 0.7343\nEpoch 136/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5160 - accuracy: 0.7454 - val_loss: 0.5270 - val_accuracy: 0.7380\nEpoch 137/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5159 - accuracy: 0.7343 - val_loss: 0.5266 - val_accuracy: 0.7343\nEpoch 138/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5153 - accuracy: 0.7454 - val_loss: 0.5272 - val_accuracy: 0.7232\nEpoch 139/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5141 - accuracy: 0.7454 - val_loss: 0.5254 - val_accuracy: 0.7306\nEpoch 140/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5130 - accuracy: 0.7500 - val_loss: 0.5229 - val_accuracy: 0.7343\nEpoch 141/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5124 - accuracy: 0.7463 - val_loss: 0.5234 - val_accuracy: 0.7380\nEpoch 142/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5120 - accuracy: 0.7500 - val_loss: 0.5215 - val_accuracy: 0.7380\nEpoch 143/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5106 - accuracy: 0.7463 - val_loss: 0.5222 - val_accuracy: 0.7343\nEpoch 144/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5100 - accuracy: 0.7435 - val_loss: 0.5217 - val_accuracy: 0.7343\nEpoch 145/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5093 - accuracy: 0.7481 - val_loss: 0.5199 - val_accuracy: 0.7380\nEpoch 146/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5089 - accuracy: 0.7500 - val_loss: 0.5186 - val_accuracy: 0.7417\nEpoch 147/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5073 - accuracy: 0.7463 - val_loss: 0.5189 - val_accuracy: 0.7380\nEpoch 148/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5068 - accuracy: 0.7519 - val_loss: 0.5165 - val_accuracy: 0.7454\nEpoch 149/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5063 - accuracy: 0.7444 - val_loss: 0.5153 - val_accuracy: 0.7528\nEpoch 150/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5051 - accuracy: 0.7546 - val_loss: 0.5166 - val_accuracy: 0.7417\nEpoch 151/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5048 - accuracy: 0.7509 - val_loss: 0.5145 - val_accuracy: 0.7454\nEpoch 152/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5032 - accuracy: 0.7509 - val_loss: 0.5121 - val_accuracy: 0.7565\nEpoch 153/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5031 - accuracy: 0.7583 - val_loss: 0.5163 - val_accuracy: 0.7306\nEpoch 154/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5030 - accuracy: 0.7519 - val_loss: 0.5117 - val_accuracy: 0.7491\nEpoch 155/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5006 - accuracy: 0.7546 - val_loss: 0.5103 - val_accuracy: 0.7638\nEpoch 156/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5002 - accuracy: 0.7583 - val_loss: 0.5117 - val_accuracy: 0.7454\nEpoch 157/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.5000 - accuracy: 0.7519 - val_loss: 0.5108 - val_accuracy: 0.7491\nEpoch 158/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4991 - accuracy: 0.7472 - val_loss: 0.5065 - val_accuracy: 0.7601\nEpoch 159/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4988 - accuracy: 0.7574 - val_loss: 0.5066 - val_accuracy: 0.7601\nEpoch 160/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4972 - accuracy: 0.7620 - val_loss: 0.5046 - val_accuracy: 0.7638\nEpoch 161/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4964 - accuracy: 0.7602 - val_loss: 0.5037 - val_accuracy: 0.7675\nEpoch 162/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4948 - accuracy: 0.7639 - val_loss: 0.5057 - val_accuracy: 0.7528\nEpoch 163/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4933 - accuracy: 0.7639 - val_loss: 0.5035 - val_accuracy: 0.7601\nEpoch 164/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4927 - accuracy: 0.7620 - val_loss: 0.5013 - val_accuracy: 0.7638\nEpoch 165/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4917 - accuracy: 0.7657 - val_loss: 0.5029 - val_accuracy: 0.7601\nEpoch 166/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4911 - accuracy: 0.7657 - val_loss: 0.5015 - val_accuracy: 0.7638\nEpoch 167/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4903 - accuracy: 0.7630 - val_loss: 0.4983 - val_accuracy: 0.7675\nEpoch 168/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4916 - accuracy: 0.7546 - val_loss: 0.4963 - val_accuracy: 0.7565\nEpoch 169/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4882 - accuracy: 0.7685 - val_loss: 0.4967 - val_accuracy: 0.7675\nEpoch 170/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4869 - accuracy: 0.7657 - val_loss: 0.4978 - val_accuracy: 0.7638\nEpoch 171/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4861 - accuracy: 0.7667 - val_loss: 0.4960 - val_accuracy: 0.7675\nEpoch 172/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4849 - accuracy: 0.7722 - val_loss: 0.4961 - val_accuracy: 0.7638\nEpoch 173/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4841 - accuracy: 0.7657 - val_loss: 0.4937 - val_accuracy: 0.7675\nEpoch 174/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4826 - accuracy: 0.7704 - val_loss: 0.4913 - val_accuracy: 0.7675\nEpoch 175/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4819 - accuracy: 0.7667 - val_loss: 0.4937 - val_accuracy: 0.7675\nEpoch 176/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4817 - accuracy: 0.7694 - val_loss: 0.4891 - val_accuracy: 0.7712\nEpoch 177/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4806 - accuracy: 0.7722 - val_loss: 0.4872 - val_accuracy: 0.7638\nEpoch 178/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4798 - accuracy: 0.7704 - val_loss: 0.4915 - val_accuracy: 0.7565\nEpoch 179/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4787 - accuracy: 0.7713 - val_loss: 0.4885 - val_accuracy: 0.7749\nEpoch 180/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4773 - accuracy: 0.7713 - val_loss: 0.4863 - val_accuracy: 0.7749\nEpoch 181/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4772 - accuracy: 0.7704 - val_loss: 0.4831 - val_accuracy: 0.7712\nEpoch 182/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4745 - accuracy: 0.7713 - val_loss: 0.4832 - val_accuracy: 0.7712\nEpoch 183/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4733 - accuracy: 0.7787 - val_loss: 0.4813 - val_accuracy: 0.7712\nEpoch 184/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4723 - accuracy: 0.7741 - val_loss: 0.4819 - val_accuracy: 0.7749\nEpoch 185/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4710 - accuracy: 0.7806 - val_loss: 0.4800 - val_accuracy: 0.7749\nEpoch 186/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4706 - accuracy: 0.7741 - val_loss: 0.4816 - val_accuracy: 0.7749\nEpoch 187/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4688 - accuracy: 0.7787 - val_loss: 0.4788 - val_accuracy: 0.7749\nEpoch 188/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4676 - accuracy: 0.7815 - val_loss: 0.4778 - val_accuracy: 0.7749\nEpoch 189/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4669 - accuracy: 0.7796 - val_loss: 0.4772 - val_accuracy: 0.7749\nEpoch 190/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4664 - accuracy: 0.7741 - val_loss: 0.4723 - val_accuracy: 0.7749\nEpoch 191/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4663 - accuracy: 0.7806 - val_loss: 0.4735 - val_accuracy: 0.7675\nEpoch 192/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4631 - accuracy: 0.7806 - val_loss: 0.4741 - val_accuracy: 0.7749\nEpoch 193/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4629 - accuracy: 0.7787 - val_loss: 0.4704 - val_accuracy: 0.7823\nEpoch 194/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4622 - accuracy: 0.7852 - val_loss: 0.4684 - val_accuracy: 0.7823\nEpoch 195/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4602 - accuracy: 0.7861 - val_loss: 0.4706 - val_accuracy: 0.7749\nEpoch 196/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4589 - accuracy: 0.7880 - val_loss: 0.4667 - val_accuracy: 0.7823\nEpoch 197/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4574 - accuracy: 0.7889 - val_loss: 0.4678 - val_accuracy: 0.7786\nEpoch 198/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4564 - accuracy: 0.7898 - val_loss: 0.4648 - val_accuracy: 0.7786\nEpoch 199/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4559 - accuracy: 0.7907 - val_loss: 0.4648 - val_accuracy: 0.7897\nEpoch 200/200\n34/34 [==============================] - 0s 2ms/step - loss: 0.4546 - accuracy: 0.7898 - val_loss: 0.4646 - val_accuracy: 0.7823\nFold 1, 200 epochs, 13 sec\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cbb9e5355f8a300acf343fbb392c95f0f754e915
| 21,540 |
ipynb
|
Jupyter Notebook
|
tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb
|
HQhalo/NeMo
|
1407d014be4191c6a64b23941887b7e109075873
|
[
"Apache-2.0"
] | 4,145 |
2019-09-13T08:29:43.000Z
|
2022-03-31T18:31:44.000Z
|
tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb
|
HQhalo/NeMo
|
1407d014be4191c6a64b23941887b7e109075873
|
[
"Apache-2.0"
] | 2,031 |
2019-09-17T16:51:39.000Z
|
2022-03-31T23:52:41.000Z
|
tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb
|
HQhalo/NeMo
|
1407d014be4191c6a64b23941887b7e109075873
|
[
"Apache-2.0"
] | 1,041 |
2019-09-13T10:08:21.000Z
|
2022-03-30T06:37:38.000Z
| 34.136292 | 505 | 0.641411 |
[
[
[
"\"\"\"\nYou can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n\nInstructions for setting up Colab are as follows:\n1. Open a new Python 3 notebook.\n2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n4. Run this cell to set up dependencies.\n\"\"\"\n# If you're using Google Colab and not running locally, run this cell.\n\n## Install dependencies\n!pip install wget\n!apt-get install sox libsndfile1 ffmpeg\n!pip install unidecode\n\n# ## Install NeMo\nBRANCH = 'main'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n\n## Install TorchAudio\n!pip install torchaudio -f https://download.pytorch.org/whl/torch_stable.html",
"_____no_output_____"
]
],
[
[
"## Introduction\nWho Speaks When? Speaker Diarization is the task of segmenting audio recordings by speaker labels. \nA diarization system consists of Voice Activity Detection (VAD) model to get the time stamps of audio where speech is being spoken ignoring the background and Speaker Embeddings model to get speaker embeddings on segments that were previously time stamped. These speaker embeddings would then be clustered into clusters based on number of speakers present in the audio recording.\n\nIn NeMo we support both **oracle VAD** and **non-oracle VAD** diarization. \n\nIn this tutorial, we shall first demonstrate how to perform diarization with a oracle VAD time stamps (we assume we already have speech time stamps) and pretrained speaker verification model which can be found in tutorial for [Speaker Identification and Verification in NeMo](https://github.com/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb).\n\nIn ORACLE-VAD-DIARIZATION we show how to perform VAD and then diarization if ground truth timestamped speech were not available (non-oracle VAD). We also have tutorials for [VAD training in NeMo](https://github.com/NVIDIA/NeMo/blob/main/tutorials/asr/Voice_Activity_Detection.ipynb) and [online offline microphone inference](https://github.com/NVIDIA/NeMo/blob/main/tutorials/asr/Online_Offline_Microphone_VAD_Demo.ipynb), where you can custom your model and training/finetuning on your own data.\n\nFor demonstration purposes we would be using simulated audio from [an4 dataset](http://www.speech.cs.cmu.edu/databases/an4/)",
"_____no_output_____"
]
],
[
[
"import os\nimport wget\nROOT = os.getcwd()\ndata_dir = os.path.join(ROOT,'data')\nos.makedirs(data_dir, exist_ok=True)\nan4_audio = os.path.join(data_dir,'an4_diarize_test.wav')\nan4_rttm = os.path.join(data_dir,'an4_diarize_test.rttm')\nif not os.path.exists(an4_audio):\n an4_audio_url = \"https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.wav\"\n an4_audio = wget.download(an4_audio_url, data_dir)\nif not os.path.exists(an4_rttm):\n an4_rttm_url = \"https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.rttm\"\n an4_rttm = wget.download(an4_rttm_url, data_dir)",
"_____no_output_____"
]
],
[
[
"Let's plot and listen to the audio and visualize the RTTM speaker labels",
"_____no_output_____"
]
],
[
[
"import IPython\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport librosa\n\nsr = 16000\nsignal, sr = librosa.load(an4_audio,sr=sr) \n\nfig,ax = plt.subplots(1,1)\nfig.set_figwidth(20)\nfig.set_figheight(2)\nplt.plot(np.arange(len(signal)),signal,'gray')\nfig.suptitle('Reference merged an4 audio', fontsize=16)\nplt.xlabel('time (secs)', fontsize=18)\nax.margins(x=0)\nplt.ylabel('signal strength', fontsize=16);\na,_ = plt.xticks();plt.xticks(a,a/sr);\n\nIPython.display.Audio(an4_audio)",
"_____no_output_____"
]
],
[
[
"We would use [pyannote_metrics](https://pyannote.github.io/pyannote-metrics/) for visualization and score calculation purposes. Hence all the labels in rttm formats would eventually be converted to pyannote objects, we created two helper functions rttm_to_labels (for NeMo intermediate processing) and labels_to_pyannote_object for scoring and visualization format",
"_____no_output_____"
]
],
[
[
"from nemo.collections.asr.parts.utils.speaker_utils import rttm_to_labels, labels_to_pyannote_object",
"_____no_output_____"
]
],
[
[
"Let's load ground truth RTTM labels and view the reference Annotation timestamps visually",
"_____no_output_____"
]
],
[
[
"# view the sample rttm file\n!cat {an4_rttm}",
"_____no_output_____"
],
[
"labels = rttm_to_labels(an4_rttm)\nreference = labels_to_pyannote_object(labels)\nprint(labels)\nreference",
"_____no_output_____"
]
],
[
[
"Speaker Diarization scripts commonly expects following arguments:\n1. manifest_filepath : Path to manifest file containing json lines of format: {'audio_filepath': /path/to/audio_file, 'offset': 0, 'duration':None, 'label': 'infer', 'text': '-', 'num_speakers': None, 'rttm_filepath': /path/to/rttm/file, 'uem_filepath'='/path/to/uem/filepath'}\n2. out_dir : directory where outputs and intermediate files are stored. \n3. oracle_vad: If this is true then we extract speech activity labels from rttm files, if False then either \n4. vad.model_path or external_manifestpath containing speech activity labels has to be passed. \n\nMandatory fields are audio_filepath, offset, duration, label and text. For the rest if you would like to evaluate with known number of speakers pass the value else None. If you would like to score the system with known rttms then that should be passed as well, else None. uem file is used to score only part of your audio for evaluation purposes, hence pass if you would like to evaluate on it else None.\n\n\n**Note** we expect audio and corresponding RTTM have **same base name** and the name should be **unique**. \n\nFor eg: if audio file name is **test_an4**.wav, if provided we expect corresponding rttm file name to be **test_an4**.rttm (note the matching **test_an4** base name)\n",
"_____no_output_____"
],
[
"Lets create manifest with the an4 audio and rttm available. If you have more than one files you may also use the script `pathsfiles_to_manifest.py` to generate manifest file from list of audio files and optionally rttm files ",
"_____no_output_____"
]
],
[
[
"# Create a manifest for input with below format. \n# {'audio_filepath': /path/to/audio_file, 'offset': 0, 'duration':None, 'label': 'infer', 'text': '-', \n# 'num_speakers': None, 'rttm_filepath': /path/to/rttm/file, 'uem_filepath'='/path/to/uem/filepath'}\nimport json\nmeta = {\n 'audio_filepath': an4_audio, \n 'offset': 0, \n 'duration':None, \n 'label': 'infer', \n 'text': '-', \n 'num_speakers': 2, \n 'rttm_filepath': an4_rttm, \n 'uem_filepath' : None\n}\nwith open('data/input_manifest.json','w') as fp:\n json.dump(meta,fp)\n fp.write('\\n')\n\n!cat data/input_manifest.json\n\noutput_dir = os.path.join(ROOT, 'oracle_vad')\nos.makedirs(output_dir,exist_ok=True)",
"_____no_output_____"
]
],
[
[
"# ORACLE-VAD DIARIZATION",
"_____no_output_____"
],
[
"Oracle-vad diarization is to compute speaker embeddings from known speech label timestamps rather than depending on VAD output. This step can also be used to run speaker diarization with rttms generated from any external VAD, not just VAD model from NeMo.\n\nFor it, the first step is to start converting reference audio rttm(vad) time stamps to oracle manifest file. This manifest file would be sent to our speaker diarizer to extract embeddings.\n\nThis is just an argument in our config, and system automatically computes oracle manifest based on the rttms provided through input manifest file",
"_____no_output_____"
],
[
"Our config file is based on [hydra](https://hydra.cc/docs/intro/). \nWith hydra config, we ask users to provide values to variables that were filled with **???**, these are mandatory fields and scripts expect them for successful runs. And notice some variables were filled with **null** are optional variables. Those could be provided if needed but are not mandatory.",
"_____no_output_____"
]
],
[
[
"from omegaconf import OmegaConf\nMODEL_CONFIG = os.path.join(data_dir,'offline_diarization.yaml')\nif not os.path.exists(MODEL_CONFIG):\n config_url = \"https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/speaker_tasks/diarization/conf/offline_diarization.yaml\"\n MODEL_CONFIG = wget.download(config_url,data_dir)\n\nconfig = OmegaConf.load(MODEL_CONFIG)\nprint(OmegaConf.to_yaml(config))",
"_____no_output_____"
]
],
[
[
"Now we can perform speaker diarization based on timestamps generated from ground truth rttms rather than generating through VAD",
"_____no_output_____"
]
],
[
[
"pretrained_speaker_model='ecapa_tdnn'\nconfig.diarizer.manifest_filepath = 'data/input_manifest.json'\nconfig.diarizer.out_dir = output_dir #Directory to store intermediate files and prediction outputs\n\nconfig.diarizer.speaker_embeddings.model_path = pretrained_speaker_model\nconfig.diarizer.speaker_embeddings.parameters.window_length_in_sec = 1.5\nconfig.diarizer.speaker_embeddings.parameters.shift_length_in_sec = 0.75\nconfig.diarizer.oracle_vad = True # ----> ORACLE VAD \nconfig.diarizer.clustering.parameters.oracle_num_speakers = True",
"_____no_output_____"
],
[
"from nemo.collections.asr.models import ClusteringDiarizer\noracle_model = ClusteringDiarizer(cfg=config)",
"_____no_output_____"
],
[
"# And lets diarize\noracle_model.diarize()",
"_____no_output_____"
]
],
[
[
"With DER 0 -> means it clustered speaker embeddings correctly. Let's view ",
"_____no_output_____"
]
],
[
[
"!cat {output_dir}/pred_rttms/an4_diarize_test.rttm",
"_____no_output_____"
],
[
"pred_labels = rttm_to_labels(output_dir+'/pred_rttms/an4_diarize_test.rttm')\nhypothesis = labels_to_pyannote_object(pred_labels)\nhypothesis",
"_____no_output_____"
],
[
"reference",
"_____no_output_____"
]
],
[
[
"# VAD DIARIZATION",
"_____no_output_____"
],
[
"In this method we compute VAD time stamps using NeMo VAD model on input manifest file and then use these time stamps of speech label to find speaker embeddings followed by clustering them into num of speakers",
"_____no_output_____"
],
[
"Before we proceed let's look at the speaker diarization config, which we would be depending up on for vad computation\nand speaker embedding extraction",
"_____no_output_____"
]
],
[
[
"print(OmegaConf.to_yaml(config))",
"_____no_output_____"
]
],
[
[
"As can be seen most of the variables in config are self explanatory \nwith VAD variables under vad section and speaker related variables under speaker embeddings section. ",
"_____no_output_____"
],
[
"To perform VAD based diarization we can ignore `oracle_vad_manifest` in `speaker_embeddings` section for now and needs to fill up the rest. We also needs to provide pretrained `model_path` of vad and speaker embeddings .nemo models",
"_____no_output_____"
]
],
[
[
"pretrained_vad = 'vad_marblenet'\npretrained_speaker_model = 'ecapa_tdnn'",
"_____no_output_____"
]
],
[
[
"Note in this tutorial, we use the VAD model MarbleNet-3x2 introduced and published in [ICASSP MarbleNet](https://arxiv.org/pdf/2010.13886.pdf). You might need to tune on dev set similar to your dataset if you would like to improve the performance.\n\nAnd the speakerNet-M-Diarization model achieves 7.3% confusion error rate on CH109 set with oracle vad. This model is trained on voxceleb1, voxceleb2, Fisher, SwitchBoard datasets. So for more improved performance specific to your dataset, finetune speaker verification model with a devset similar to your test set.",
"_____no_output_____"
]
],
[
[
"output_dir = os.path.join(ROOT,'outputs')\nconfig.diarizer.manifest_filepath = 'data/input_manifest.json'\nconfig.diarizer.out_dir = output_dir #Directory to store intermediate files and prediction outputs\n\nconfig.diarizer.speaker_embeddings.model_path = pretrained_speaker_model\nconfig.diarizer.speaker_embeddings.parameters.window_length_in_sec = 1.5\nconfig.diarizer.speaker_embeddings.parameters.shift_length_in_sec = 0.75\nconfig.diarizer.oracle_vad = False # compute VAD provided with model_path to vad config\nconfig.diarizer.clustering.parameters.oracle_num_speakers=True\n\n#Here we use our inhouse pretrained NeMo VAD \nconfig.diarizer.vad.model_path = pretrained_vad\nconfig.diarizer.vad.window_length_in_sec = 0.15\nconfig.diarizer.vad.shift_length_in_sec = 0.01\nconfig.diarizer.vad.parameters.onset = 0.8 \nconfig.diarizer.vad.parameters.offset = 0.6\nconfig.diarizer.vad.parameters.min_duration_on = 0.1\nconfig.diarizer.vad.parameters.min_duration_off = 0.4",
"_____no_output_____"
]
],
[
[
"Now that we passed all the variables we needed lets initialize the clustering model with above config",
"_____no_output_____"
]
],
[
[
"from nemo.collections.asr.models import ClusteringDiarizer\nsd_model = ClusteringDiarizer(cfg=config)",
"_____no_output_____"
]
],
[
[
"And Diarize with single line of code",
"_____no_output_____"
]
],
[
[
"sd_model.diarize()",
"_____no_output_____"
]
],
[
[
"As can be seen, we first performed VAD, then with the timestamps created in `{output_dir}/vad_outputs` by VAD we calculated speaker embeddings (`{output_dir}/speaker_outputs/embeddings/`) which are then clustered using spectral clustering. ",
"_____no_output_____"
],
[
"To generate VAD predicted time step. We perform VAD inference to have frame level prediction → (optional: use decision smoothing) → given `threshold`, write speech segment to RTTM-like time stamps manifest.\n\nwe use vad decision smoothing (87.5% overlap median) as described [here](https://github.com/NVIDIA/NeMo/blob/stable/nemo/collections/asr/parts/utils/vad_utils.py)\n\nyou can also tune the threshold on your dev set. Use this provided [script](https://github.com/NVIDIA/NeMo/blob/stable/scripts/voice_activity_detection/vad_tune_threshold.py)",
"_____no_output_____"
]
],
[
[
"# VAD predicted time stamps\n# you can also use single threshold(=onset=offset) for binarization and plot here\nfrom nemo.collections.asr.parts.utils.vad_utils import plot\nplot(\n an4_audio,\n 'outputs/vad_outputs/overlap_smoothing_output_median_0.875/an4_diarize_test.median', \n an4_rttm,\n per_args = config.diarizer.vad.parameters, #threshold\n ) \n\nprint(f\"postprocessing_params: {config.diarizer.vad.parameters}\")",
"_____no_output_____"
]
],
[
[
"Predicted outputs are written to `output_dir/pred_rttms` and see how we predicted along with VAD prediction",
"_____no_output_____"
]
],
[
[
"!cat outputs/pred_rttms/an4_diarize_test.rttm",
"_____no_output_____"
],
[
"pred_labels = rttm_to_labels('outputs/pred_rttms/an4_diarize_test.rttm')\nhypothesis = labels_to_pyannote_object(pred_labels)\nhypothesis",
"_____no_output_____"
],
[
"reference",
"_____no_output_____"
]
],
[
[
"# Storing and Restoring models",
"_____no_output_____"
],
[
"Now we can save the whole config and model parameters in a single .nemo and restore from it anytime.",
"_____no_output_____"
]
],
[
[
"oracle_model.save_to(os.path.join(output_dir,'diarize.nemo'))",
"_____no_output_____"
]
],
[
[
"Restore from saved model",
"_____no_output_____"
]
],
[
[
"del oracle_model\nimport nemo.collections.asr as nemo_asr\nrestored_model = nemo_asr.models.ClusteringDiarizer.restore_from(os.path.join(output_dir,'diarize.nemo'))",
"_____no_output_____"
]
],
[
[
"# ADD ON - ASR ",
"_____no_output_____"
]
],
[
[
"IPython.display.Audio(an4_audio)",
"_____no_output_____"
],
[
"quartznet = nemo_asr.models.EncDecCTCModel.from_pretrained(model_name=\"QuartzNet15x5Base-En\")\nfor fname, transcription in zip([an4_audio], quartznet.transcribe(paths2audio_files=[an4_audio])):\n print(f\"Audio in {fname} was recognized as:\\n{transcription}\")",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cbb9ec33597158172a86af5610103faf96476791
| 72,045 |
ipynb
|
Jupyter Notebook
|
Chapter04/Activity4.01/Activity04_01.ipynb
|
PacktWorkshops/The-Reinforcement-Learning-Workshop
|
04e8c72bc9e46d66846b748c074b26a1b724fae0
|
[
"MIT"
] | 24 |
2020-04-08T01:57:02.000Z
|
2022-03-24T18:36:14.000Z
|
Chapter04/Activity4.01/Activity04_01.ipynb
|
PacktWorkshops/The-Reinforcement-Learning-Workshop
|
04e8c72bc9e46d66846b748c074b26a1b724fae0
|
[
"MIT"
] | 10 |
2020-03-24T19:49:14.000Z
|
2022-03-12T00:33:01.000Z
|
Chapter04/Activity4.01/Activity04_01.ipynb
|
PacktWorkshops/The-Reinforcement-Learning-Workshop
|
04e8c72bc9e46d66846b748c074b26a1b724fae0
|
[
"MIT"
] | 32 |
2020-04-08T12:07:11.000Z
|
2022-03-25T15:49:10.000Z
| 45.626979 | 294 | 0.494469 |
[
[
[
"from baselines.ppo2.ppo2 import learn\nfrom baselines.ppo2 import defaults\nfrom baselines.common.vec_env import VecEnv, VecFrameStack\nfrom baselines.common.cmd_util import make_vec_env, make_env\nfrom baselines.common.models import register\nimport tensorflow as tf",
"_____no_output_____"
],
[
"@register(\"custom_cnn\")\ndef custom_cnn():\n def network_fn(input_shape, **conv_kwargs):\n \"\"\"\n Custom CNN\n \"\"\"\n print('input shape is {}'.format(input_shape))\n x_input = tf.keras.Input(shape=input_shape, dtype=tf.uint8)\n h = x_input\n h = tf.cast(h, tf.float32) / 255.\n \n h = tf.keras.layers.Conv2D(filters=32, kernel_size=8, strides=4, padding='valid',\n data_format='channels_last', activation='relu')(h)\n h2 = tf.keras.layers.Conv2D(filters=64, kernel_size=4, strides=2, padding='valid',\n data_format='channels_last', activation='relu')(h)\n h3 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding='valid',\n data_format='channels_last', activation='relu')(h2) \n h3 = tf.keras.layers.Flatten()(h3)\n h3 = tf.keras.layers.Dense(units=512, name='fc1', activation='relu')(h3)\n \n network = tf.keras.Model(inputs=[x_input], outputs=[h3])\n network.summary()\n return network\n\n return network_fn",
"_____no_output_____"
],
[
"def build_env(env_id, env_type):\n\n if env_type in {'atari', 'retro'}:\n env = make_vec_env(env_id, env_type, 1, None, gamestate=None, reward_scale=1.0)\n env = VecFrameStack(env, 4)\n\n else:\n env = make_vec_env(env_id, env_type, 1, None, reward_scale=1.0, flatten_dict_observations=True)\n\n return env",
"_____no_output_____"
],
[
"env_id = 'PongNoFrameskip-v0'\nenv_type = 'atari'\nprint(\"Env type = \", env_type)\n\nenv = build_env(env_id, env_type)\n\nmodel = learn(network=\"custom_cnn\", env=env, total_timesteps=1e4)",
"Env type = atari\nLogging to /tmp/openai-2020-05-11-16-19-42-770612\ninput shape is (84, 84, 4)\nModel: \"model\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) [(None, 84, 84, 4)] 0 \n_________________________________________________________________\ntf_op_layer_Cast (TensorFlow [(None, 84, 84, 4)] 0 \n_________________________________________________________________\ntf_op_layer_truediv (TensorF [(None, 84, 84, 4)] 0 \n_________________________________________________________________\nconv2d (Conv2D) (None, 20, 20, 32) 8224 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 9, 9, 64) 32832 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 7, 7, 64) 36928 \n_________________________________________________________________\nflatten (Flatten) (None, 3136) 0 \n_________________________________________________________________\nfc1 (Dense) (None, 512) 1606144 \n=================================================================\nTotal params: 1,684,128\nTrainable params: 1,684,128\nNon-trainable params: 0\n_________________________________________________________________\n--------------------------------------------\n| eplenmean | 1e+03 |\n| eprewmean | -20 |\n| fps | 213 |\n| loss/approxkl | 0.00012817292 |\n| loss/clipfrac | 0.0 |\n| loss/policy_entropy | 1.7916294 |\n| loss/policy_loss | -0.00050599687 |\n| loss/value_loss | 0.06880974 |\n| misc/explained_variance | 0.000675 |\n| misc/nupdates | 1 |\n| misc/serial_timesteps | 2048 |\n| misc/time_elapsed | 9.6 |\n| misc/total_timesteps | 2048 |\n--------------------------------------------\n"
],
[
"obs = env.reset()\nif not isinstance(env, VecEnv):\n obs = np.expand_dims(np.array(obs), axis=0)\n\nepisode_rew = 0\n \nwhile True:\n actions, _, state, _ = model.step(obs)\n obs, reward, done, info = env.step(actions.numpy())\n if not isinstance(env, VecEnv):\n obs = np.expand_dims(np.array(obs), axis=0)\n env.render()\n print(\"Reward = \", reward)\n episode_rew += reward\n \n if done:\n print('Episode Reward = {}'.format(episode_rew))\n break\n\nenv.close()",
"Reward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [1.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [-1.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [-1.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [-1.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [-1.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [-1.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [-1.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [-1.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [-1.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [-1.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [-1.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [-1.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [-1.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\nReward = [0.]\n"
],
[
"!python -m baselines.run --alg=ppo2 --env=PongNoFrameskip-v0 --num_timesteps=1e4 --save_path=./models/Pong_20M_ppo2 --log_path=./logs/Pong/",
"Logging to ./logs/Pong/\nenv_type: atari\nTraining ppo2 on atari:PongNoFrameskip-v0 with arguments \n{'nsteps': 128, 'nminibatches': 4, 'lam': 0.95, 'gamma': 0.99, 'noptepochs': 4, 'log_interval': 1, 'ent_coef': 0.01, 'lr': <function atari.<locals>.<lambda> at 0x7f5e0ec33950>, 'cliprange': 0.1, 'network': 'cnn'}\ninput shape is (84, 84, 4)\n2020-05-11 16:18:14.491437: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcuda.so.1\n2020-05-11 16:18:14.508280: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:14.508628: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1555] Found device 0 with properties: \npciBusID: 0000:01:00.0 name: GeForce GTX 1050 Ti computeCapability: 6.1\ncoreClock: 1.4175GHz coreCount: 6 deviceMemorySize: 3.94GiB deviceMemoryBandwidth: 104.43GiB/s\n2020-05-11 16:18:14.508836: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\n2020-05-11 16:18:14.510313: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n2020-05-11 16:18:14.511828: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10\n2020-05-11 16:18:14.512132: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10\n2020-05-11 16:18:14.513568: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10\n2020-05-11 16:18:14.514753: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10\n2020-05-11 16:18:14.520089: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7\n2020-05-11 16:18:14.520719: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:14.521119: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:14.521378: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1697] Adding visible gpu devices: 0\n2020-05-11 16:18:14.521710: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA\n2020-05-11 16:18:14.545966: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 3000000000 Hz\n2020-05-11 16:18:14.546276: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x56164e81a380 initialized for platform Host (this does not guarantee that XLA will be used). Devices:\n2020-05-11 16:18:14.546323: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version\n2020-05-11 16:18:14.546585: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:14.546950: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1555] Found device 0 with properties: \npciBusID: 0000:01:00.0 name: GeForce GTX 1050 Ti computeCapability: 6.1\ncoreClock: 1.4175GHz coreCount: 6 deviceMemorySize: 3.94GiB deviceMemoryBandwidth: 104.43GiB/s\n2020-05-11 16:18:14.547185: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\n2020-05-11 16:18:14.547209: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n2020-05-11 16:18:14.547224: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10\n2020-05-11 16:18:14.547239: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10\n2020-05-11 16:18:14.547253: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10\n2020-05-11 16:18:14.547267: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10\n2020-05-11 16:18:14.547283: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7\n2020-05-11 16:18:14.547413: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:14.547771: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:14.547976: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1697] Adding visible gpu devices: 0\n2020-05-11 16:18:14.548017: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\n2020-05-11 16:18:14.590032: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1096] Device interconnect StreamExecutor with strength 1 edge matrix:\n2020-05-11 16:18:14.590059: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1102] 0 \n2020-05-11 16:18:14.590066: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] 0: N \n2020-05-11 16:18:14.590247: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:14.590574: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:14.590843: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:14.591095: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1241] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 3131 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1050 Ti, pci bus id: 0000:01:00.0, compute capability: 6.1)\n2020-05-11 16:18:14.592840: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x561651657520 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:\n2020-05-11 16:18:14.593060: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): GeForce GTX 1050 Ti, Compute Capability 6.1\nStepping environment...\n2020-05-11 16:18:16.005779: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n2020-05-11 16:18:16.165020: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7\n2020-05-11 16:18:18.085070: W tensorflow/python/util/util.cc:319] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\n-------------------------------------------\n| eplenmean | nan |\n| eprewmean | nan |\n| fps | 144 |\n| loss/approxkl | 0.0033949534 |\n| loss/clipfrac | 0.21191406 |\n| loss/policy_entropy | 1.7889717 |\n| loss/policy_loss | -0.0017111386 |\n| loss/value_loss | 2.8520613 |\n| misc/explained_variance | -0.156 |\n| misc/nupdates | 1 |\n| misc/serial_timesteps | 128 |\n| misc/time_elapsed | 3.55 |\n| misc/total_timesteps | 512 |\n-------------------------------------------\nStepping environment...\n--------------------------------------------\n| eplenmean | nan |\n| eprewmean | nan |\n| fps | 509 |\n| loss/approxkl | 0.000985355 |\n| loss/clipfrac | 0.0390625 |\n| loss/policy_entropy | 1.7866873 |\n| loss/policy_loss | -0.00015214668 |\n| loss/value_loss | 0.14466678 |\n| misc/explained_variance | -0.00307 |\n| misc/nupdates | 2 |\n| misc/serial_timesteps | 256 |\n| misc/time_elapsed | 4.56 |\n| misc/total_timesteps | 1024 |\n--------------------------------------------\nStepping environment...\n"
],
[
"!python -m baselines.run --alg=ppo2 --env=PongNoFrameskip-v0 --num_timesteps=0 --load_path=./models/Pong_20M_ppo2 --play",
"Logging to /tmp/openai-2020-05-11-16-18-47-074034\nenv_type: atari\nTraining ppo2 on atari:PongNoFrameskip-v0 with arguments \n{'nsteps': 128, 'nminibatches': 4, 'lam': 0.95, 'gamma': 0.99, 'noptepochs': 4, 'log_interval': 1, 'ent_coef': 0.01, 'lr': <function atari.<locals>.<lambda> at 0x7f26893c39e0>, 'cliprange': 0.1, 'load_path': './models/Pong_20M_ppo2', 'network': 'cnn'}\ninput shape is (84, 84, 4)\n2020-05-11 16:18:49.304445: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcuda.so.1\n2020-05-11 16:18:49.320904: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:49.321414: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1555] Found device 0 with properties: \npciBusID: 0000:01:00.0 name: GeForce GTX 1050 Ti computeCapability: 6.1\ncoreClock: 1.4175GHz coreCount: 6 deviceMemorySize: 3.94GiB deviceMemoryBandwidth: 104.43GiB/s\n2020-05-11 16:18:49.321570: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\n2020-05-11 16:18:49.322987: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n2020-05-11 16:18:49.324291: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10\n2020-05-11 16:18:49.324509: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10\n2020-05-11 16:18:49.325942: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10\n2020-05-11 16:18:49.326768: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10\n2020-05-11 16:18:49.329802: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7\n2020-05-11 16:18:49.329992: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:49.330588: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:49.330842: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1697] Adding visible gpu devices: 0\n2020-05-11 16:18:49.331101: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA\n2020-05-11 16:18:49.353962: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 3000000000 Hz\n2020-05-11 16:18:49.354406: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x55819a43cb30 initialized for platform Host (this does not guarantee that XLA will be used). Devices:\n2020-05-11 16:18:49.354430: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version\n2020-05-11 16:18:49.354631: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:49.355148: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1555] Found device 0 with properties: \npciBusID: 0000:01:00.0 name: GeForce GTX 1050 Ti computeCapability: 6.1\ncoreClock: 1.4175GHz coreCount: 6 deviceMemorySize: 3.94GiB deviceMemoryBandwidth: 104.43GiB/s\n2020-05-11 16:18:49.355216: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\n2020-05-11 16:18:49.355236: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n2020-05-11 16:18:49.355251: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10\n2020-05-11 16:18:49.355266: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10\n2020-05-11 16:18:49.355281: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10\n2020-05-11 16:18:49.355295: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10\n2020-05-11 16:18:49.355310: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7\n2020-05-11 16:18:49.355399: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:49.355954: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:49.356290: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1697] Adding visible gpu devices: 0\n2020-05-11 16:18:49.356329: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\n2020-05-11 16:18:49.407762: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1096] Device interconnect StreamExecutor with strength 1 edge matrix:\n2020-05-11 16:18:49.407792: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1102] 0 \n2020-05-11 16:18:49.407800: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] 0: N \n2020-05-11 16:18:49.407962: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:49.408267: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:49.408516: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:18:49.408744: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1241] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 3131 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1050 Ti, pci bus id: 0000:01:00.0, compute capability: 6.1)\n2020-05-11 16:18:49.410237: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x55819b2b50f0 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:\n2020-05-11 16:18:49.410264: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): GeForce GTX 1050 Ti, Compute Capability 6.1\nRunning trained model\n2020-05-11 16:18:51.005881: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n2020-05-11 16:18:51.166422: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7\nepisode_rew=-21.0\nepisode_rew=-20.0\nepisode_rew=-20.0\nepisode_rew=-19.0\n"
],
[
"!wget -O pong_20M_ppo2.tar.gz https://github.com/PacktWorkshops/The-Reinforcement-Learning-Workshop/blob/master/Chapter04/pong_20M_ppo2.tar.gz?raw=true",
"--2020-05-11 16:19:08-- https://github.com/PacktWorkshops/The-Reinforcement-Learning-Workshop/blob/master/Chapter04/pong_20M_ppo2.tar.gz?raw=true\nResolving github.com (github.com)... 140.82.118.3\nConnecting to github.com (github.com)|140.82.118.3|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://github.com/PacktWorkshops/The-Reinforcement-Learning-Workshop/raw/master/Chapter04/pong_20M_ppo2.tar.gz [following]\n--2020-05-11 16:19:09-- https://github.com/PacktWorkshops/The-Reinforcement-Learning-Workshop/raw/master/Chapter04/pong_20M_ppo2.tar.gz\nReusing existing connection to github.com:443.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://raw.githubusercontent.com/PacktWorkshops/The-Reinforcement-Learning-Workshop/master/Chapter04/pong_20M_ppo2.tar.gz [following]\n--2020-05-11 16:19:09-- https://raw.githubusercontent.com/PacktWorkshops/The-Reinforcement-Learning-Workshop/master/Chapter04/pong_20M_ppo2.tar.gz\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.192.133, 151.101.128.133, 151.101.64.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.192.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 18284569 (17M) [application/octet-stream]\nSaving to: ‘pong_20M_ppo2.tar.gz’\n\npong_20M_ppo2.tar.g 100%[===================>] 17,44M 15,1MB/s in 1,2s \n\n2020-05-11 16:19:11 (15,1 MB/s) - ‘pong_20M_ppo2.tar.gz’ saved [18284569/18284569]\n\n"
],
[
"!tar xvzf pong_20M_ppo2.tar.gz",
"pong_20M_ppo2/ckpt-1.data-00000-of-00001\npong_20M_ppo2/ckpt-1.index\npong_20M_ppo2/\npong_20M_ppo2/checkpoint\n"
],
[
"!python -m baselines.run --alg=ppo2 --env=PongNoFrameskip-v0 --num_timesteps=0 --load_path=./pong_20M_ppo2 --play",
"Logging to /tmp/openai-2020-05-11-16-19-18-194254\nenv_type: atari\nTraining ppo2 on atari:PongNoFrameskip-v0 with arguments \n{'nsteps': 128, 'nminibatches': 4, 'lam': 0.95, 'gamma': 0.99, 'noptepochs': 4, 'log_interval': 1, 'ent_coef': 0.01, 'lr': <function atari.<locals>.<lambda> at 0x7f1c91994950>, 'cliprange': 0.1, 'load_path': './pong_20M_ppo2', 'network': 'cnn'}\ninput shape is (84, 84, 4)\n2020-05-11 16:19:20.445156: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcuda.so.1\n2020-05-11 16:19:20.460675: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:19:20.460956: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1555] Found device 0 with properties: \npciBusID: 0000:01:00.0 name: GeForce GTX 1050 Ti computeCapability: 6.1\ncoreClock: 1.4175GHz coreCount: 6 deviceMemorySize: 3.94GiB deviceMemoryBandwidth: 104.43GiB/s\n2020-05-11 16:19:20.461143: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\n2020-05-11 16:19:20.462636: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n2020-05-11 16:19:20.463881: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10\n2020-05-11 16:19:20.464122: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10\n2020-05-11 16:19:20.465510: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10\n2020-05-11 16:19:20.466348: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10\n2020-05-11 16:19:20.469364: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7\n2020-05-11 16:19:20.469530: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:19:20.469859: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:19:20.470079: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1697] Adding visible gpu devices: 0\n2020-05-11 16:19:20.470393: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA\n2020-05-11 16:19:20.493956: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 3000000000 Hz\n2020-05-11 16:19:20.494186: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x55bdb90ac750 initialized for platform Host (this does not guarantee that XLA will be used). Devices:\n2020-05-11 16:19:20.494209: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version\n2020-05-11 16:19:20.494427: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:19:20.494694: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1555] Found device 0 with properties: \npciBusID: 0000:01:00.0 name: GeForce GTX 1050 Ti computeCapability: 6.1\ncoreClock: 1.4175GHz coreCount: 6 deviceMemorySize: 3.94GiB deviceMemoryBandwidth: 104.43GiB/s\n2020-05-11 16:19:20.494767: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\n2020-05-11 16:19:20.494789: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n2020-05-11 16:19:20.494808: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10\n2020-05-11 16:19:20.494828: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10\n2020-05-11 16:19:20.494847: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10\n2020-05-11 16:19:20.494866: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10\n2020-05-11 16:19:20.494886: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7\n2020-05-11 16:19:20.494969: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:19:20.495259: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:19:20.495473: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1697] Adding visible gpu devices: 0\n2020-05-11 16:19:20.495522: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\n2020-05-11 16:19:20.537306: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1096] Device interconnect StreamExecutor with strength 1 edge matrix:\n2020-05-11 16:19:20.537336: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1102] 0 \n2020-05-11 16:19:20.537343: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] 0: N \n2020-05-11 16:19:20.537520: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:19:20.537848: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:19:20.538161: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-05-11 16:19:20.538422: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1241] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 3131 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1050 Ti, pci bus id: 0000:01:00.0, compute capability: 6.1)\n2020-05-11 16:19:20.540099: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x55bdb9f09850 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:\n2020-05-11 16:19:20.540149: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): GeForce GTX 1050 Ti, Compute Capability 6.1\nRunning trained model\n2020-05-11 16:19:22.014722: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n2020-05-11 16:19:22.172007: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbb9fbcf10ffd7c5a43e3f8c63d5b80e8e41588a
| 5,293 |
ipynb
|
Jupyter Notebook
|
notebooks/machine_learning/raw/ex2.ipynb
|
guesswhohaha/learntools
|
c1bd607ade5227f8c8977ff05bf9d04d0a8b7732
|
[
"Apache-2.0"
] | 359 |
2018-03-23T15:57:52.000Z
|
2022-03-25T21:56:28.000Z
|
notebooks/machine_learning/raw/ex2.ipynb
|
guesswhohaha/learntools
|
c1bd607ade5227f8c8977ff05bf9d04d0a8b7732
|
[
"Apache-2.0"
] | 84 |
2018-06-14T00:06:52.000Z
|
2022-02-08T17:25:54.000Z
|
notebooks/machine_learning/raw/ex2.ipynb
|
guesswhohaha/learntools
|
c1bd607ade5227f8c8977ff05bf9d04d0a8b7732
|
[
"Apache-2.0"
] | 213 |
2018-05-02T19:06:31.000Z
|
2022-03-20T15:40:34.000Z
| 26.20297 | 183 | 0.581901 |
[
[
[
"This exercise will test your ability to read a data file and understand statistics about the data.\n\nIn later exercises, you will apply techniques to filter the data, build a machine learning model, and iteratively improve your model.\n\nThe course examples use data from Melbourne. To ensure you can apply these techniques on your own, you will have to apply them to a new dataset (with house prices from Iowa).\n\n# Exercises\n\nRun the following cell to set up code-checking, which will verify your work as you go.",
"_____no_output_____"
]
],
[
[
"# Set up code checking\nfrom learntools.core import binder\nbinder.bind(globals())\nfrom learntools.machine_learning.ex2 import *\nprint(\"Setup Complete\")",
"_____no_output_____"
]
],
[
[
"## Step 1: Loading Data\nRead the Iowa data file into a Pandas DataFrame called `home_data`.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\n# Path of the file to read\niowa_file_path = '../input/home-data-for-ml-course/train.csv'\n\n# Fill in the line below to read the file into a variable home_data\nhome_data = ____\n\n# Check your answer\nstep_1.check()",
"_____no_output_____"
],
[
"#%%RM_IF(PROD)%%\n\n# Path of the file to read\niowa_file_path = '../input/home-data-for-ml-course/train.csv'\n\n# Fill in the line below to read the file into a variable home_data\nhome_data = 0\n\n# Call line below with no argument to check that you've loaded the data correctly\nstep_1.assert_check_failed()",
"_____no_output_____"
],
[
"#%%RM_IF(PROD)%%\n\n# Fill in the line below to read the file into a variable home_data\nhome_data = pd.DataFrame()\n\n# Call line below with no argument to check that you've loaded the data correctly\nstep_1.assert_check_failed()\n\nhome_data = pd.read_csv(iowa_file_path)\nstep_1.assert_check_passed()",
"_____no_output_____"
],
[
"# Lines below will give you a hint or solution code\n#_COMMENT_IF(PROD)_\nstep_1.hint()\n#_COMMENT_IF(PROD)_\nstep_1.solution()",
"_____no_output_____"
]
],
[
[
"## Step 2: Review The Data\nUse the command you learned to view summary statistics of the data. Then fill in variables to answer the following questions",
"_____no_output_____"
]
],
[
[
"# Print summary statistics in next line\n____",
"_____no_output_____"
],
[
"# What is the average lot size (rounded to nearest integer)?\navg_lot_size = ____\n\n# As of today, how old is the newest home (current year - the date in which it was built)\nnewest_home_age = ____\n\n# Check your answers\nstep_2.check()",
"_____no_output_____"
],
[
"#step_2.hint()\n#step_2.solution()",
"_____no_output_____"
]
],
[
[
"## Think About Your Data\n\nThe newest house in your data isn't that new. A few potential explanations for this:\n1. They haven't built new houses where this data was collected.\n1. The data was collected a long time ago. Houses built after the data publication wouldn't show up.\n\nIf the reason is explanation #1 above, does that affect your trust in the model you build with this data? What about if it is reason #2?\n\nHow could you dig into the data to see which explanation is more plausible?\n\nCheck out this **[discussion thread](https://www.kaggle.com/learn-forum/60581)** to see what others think or to add your ideas.\n\n#$KEEP_GOING$",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
cbb9fcf8ea50980f0ba45d06e08568620ecb3476
| 8,308 |
ipynb
|
Jupyter Notebook
|
hrbook/files/models/random_forest.ipynb
|
patilpushkarp/hra_book
|
4a9a6cc6d6710213da59c09dc5160ebe31d4f26f
|
[
"MIT"
] | null | null | null |
hrbook/files/models/random_forest.ipynb
|
patilpushkarp/hra_book
|
4a9a6cc6d6710213da59c09dc5160ebe31d4f26f
|
[
"MIT"
] | null | null | null |
hrbook/files/models/random_forest.ipynb
|
patilpushkarp/hra_book
|
4a9a6cc6d6710213da59c09dc5160ebe31d4f26f
|
[
"MIT"
] | null | null | null | 26.208202 | 204 | 0.501324 |
[
[
[
"# Random Forest Classifier",
"_____no_output_____"
]
],
[
[
"# Load the packages\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import classification_report",
"_____no_output_____"
],
[
"# Load the data\ntrain_df = pd.read_csv('./../../../../data/train/train.csv')\ntest_df = pd.read_csv('./../../../../data/test/test.csv')",
"_____no_output_____"
],
[
"# Load the feature selection result\nfeature_selector = pd.read_csv('./../../../../data/feature_ranking.csv')\nfeature_selector.set_index('Unnamed: 0', inplace=True)",
"_____no_output_____"
],
[
"# Separate feature space from target variable\ny_train = train_df['Attrition']\nX_train = train_df.drop('Attrition', axis=1)\ny_test = test_df['Attrition']\nX_test = test_df.drop('Attrition', axis=1)",
"_____no_output_____"
]
],
[
[
"We will be running models for different set of features and evaluate their performances. We start with complete dataset and then start with meaximum feature score of 8 to 5.",
"_____no_output_____"
]
],
[
[
"# Declare the model paramters for searching\nparam_grid = dict(\n n_estimators = [50, 100, 200, 400],\n max_depth = [10, 20, 40],\n min_samples_split = [2, 5, 10]\n)",
"_____no_output_____"
],
[
"# Declare and train the model\nrf_clf = RandomForestClassifier(class_weight=\"balanced\", max_features=None, bootstrap=False)\nrf = GridSearchCV(estimator=rf_clf, param_grid=param_grid, scoring='f1', n_jobs=-1)",
"_____no_output_____"
]
],
[
[
"## Complete data",
"_____no_output_____"
]
],
[
[
"# Train the model\nrf.fit(X_train, y_train)",
"_____no_output_____"
],
[
"# Get the parameters for the best model\nrf.best_estimator_",
"_____no_output_____"
],
[
"# Predict using model\ny_pred = rf.predict(X_test)",
"_____no_output_____"
],
[
"# Make the classification report\nprint(classification_report(y_test, y_pred))",
" precision recall f1-score support\n\n False 0.88 0.89 0.88 255\n True 0.24 0.23 0.23 39\n\n accuracy 0.80 294\n macro avg 0.56 0.56 0.56 294\nweighted avg 0.80 0.80 0.80 294\n\n"
]
],
[
[
"The results not better than that of logistic regression. The precision, recall and f1 of attrition is not at all good.",
"_____no_output_____"
],
[
"## Feature score of 8",
"_____no_output_____"
]
],
[
[
"# Create the new dataset\n\n# Get features with feature score of 8\nfeatures = feature_selector[feature_selector['Total']==8].index.tolist()\nX_train_8 = X_train.loc[:, features]\nX_test_8 = X_test.loc[:, features]",
"_____no_output_____"
],
[
"# Train the model\nrf.fit(X_train_8, y_train)",
"_____no_output_____"
],
[
"# Predict with model\ny_pred_8 = rf.predict(X_test_8)",
"_____no_output_____"
],
[
"# Make the report\nprint(classification_report(y_test, y_pred))",
" precision recall f1-score support\n\n False 0.88 0.89 0.88 255\n True 0.24 0.23 0.23 39\n\n accuracy 0.80 294\n macro avg 0.56 0.56 0.56 294\nweighted avg 0.80 0.80 0.80 294\n\n"
]
],
[
[
"There is no improvement in the result. But since this model uses less number of features, it better to use it in production in order to improve the retraining and inferencing with huge load of data.",
"_____no_output_____"
],
[
"Since the least number of features that could be used gave the same performance as all the features, it is better to skip the other scores since the chance of improvement in result is quite less.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
cbba0090adc6442f296b121579a3ba0919f75c33
| 81,739 |
ipynb
|
Jupyter Notebook
|
design_by_reinforcement_learning_and_finite_element_analysis/Bionic_partition_v03_2.ipynb
|
gigatskhondia/engineering_design_by_artificial_intelligence
|
d3bf4a015e98ae9f66372a9c4677d06d9b0a3a2d
|
[
"MIT"
] | 6 |
2021-01-24T23:58:50.000Z
|
2022-03-15T15:06:08.000Z
|
design_by_reinforcement_learning_and_finite_element_analysis/Bionic_partition_v03_2.ipynb
|
gigatskhondia/engineering_design_by_artificial_intelligence
|
d3bf4a015e98ae9f66372a9c4677d06d9b0a3a2d
|
[
"MIT"
] | null | null | null |
design_by_reinforcement_learning_and_finite_element_analysis/Bionic_partition_v03_2.ipynb
|
gigatskhondia/engineering_design_by_artificial_intelligence
|
d3bf4a015e98ae9f66372a9c4677d06d9b0a3a2d
|
[
"MIT"
] | 2 |
2021-01-06T01:59:21.000Z
|
2022-02-10T13:34:51.000Z
| 54.239549 | 22,372 | 0.574047 |
[
[
[
"import numpy as np\nimport math\nimport tensorflow as tf\nfrom tensorflow.contrib.layers import fully_connected\nimport time\nimport random\nimport matplotlib.pyplot as plt\nimport heapq\nfrom mpl_toolkits.mplot3d import Axes3D",
"_____no_output_____"
],
[
"tf.VERSION",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Finite Element Model of the Space Frame Element",
"_____no_output_____"
]
],
[
[
"def PlaneTrussElementLength(x1,y1,z1,x2,y2,z2):\n return math.sqrt((x2-x1)*(x2-x1) + (y2-y1)*(y2-y1)+(z2-z1)*(z2-z1))",
"_____no_output_____"
],
[
"def SpaceFrameElementStiffness(E,G,A,Iy,Iz,J,x1,y1,z1,x2,y2,z2):\n L = PlaneTrussElementLength(x1,y1,z1,x2,y2,z2)\n w1 = E*A/L\n w2 = 12*E*Iz/(L*L*L)\n w3 = 6*E*Iz/(L*L)\n w4 = 4*E*Iz/L\n w5 = 2*E*Iz/L\n w6 = 12*E*Iy/(L*L*L)\n w7 = 6*E*Iy/(L*L)\n w8 = 4*E*Iy/L\n w9 = 2*E*Iy/L\n w10 = G*J/L\n \n kprime = np.array([[w1, 0, 0, 0, 0, 0, -w1, 0, 0, 0, 0, 0],\n [0, w2, 0, 0, 0, w3, 0, -w2, 0, 0, 0, w3], \n [0, 0, w6, 0, -w7, 0, 0, 0, -w6, 0, -w7, 0],\n [0, 0, 0, w10, 0, 0, 0, 0, 0, -w10, 0, 0],\n [0, 0, -w7, 0, w8, 0, 0, 0, w7, 0, w9, 0],\n [0, w3, 0, 0, 0, w4, 0, -w3, 0, 0, 0, w5],\n [-w1, 0, 0, 0, 0, 0, w1, 0, 0, 0, 0, 0],\n [0, -w2, 0, 0, 0, -w3, 0, w2, 0, 0, 0, -w3],\n [0, 0, -w6, 0, w7, 0, 0, 0, w6, 0, w7, 0],\n [0, 0, 0, -w10, 0, 0, 0, 0, 0, w10, 0, 0],\n [0, 0, -w7, 0, w9, 0, 0, 0, w7, 0, w8, 0],\n [0, w3, 0, 0, 0, w5, 0, -w3, 0, 0, 0, w4]]) \n \n \n if x1 == x2 and y1 == y2:\n if z2 > z1:\n Lambda = np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]])\n else:\n Lambda = np.array([[0, 0, -1], [0, 1, 0], [1, 0, 0]])\n else:\n CXx = (x2-x1)/L\n CYx = (y2-y1)/L\n CZx = (z2-z1)/L\n D = math.sqrt(CXx*CXx + CYx*CYx)\n CXy = -CYx/D\n CYy = CXx/D\n CZy = 0\n CXz = -CXx*CZx/D\n CYz = -CYx*CZx/D\n CZz = D\n Lambda = np.array([[CXx, CYx, CZx], [CXy, CYy, CZy], [CXz, CYz, CZz]])\n \n \n R = np.array([np.concatenate((np.concatenate((Lambda,np.zeros((3,3)),np.zeros((3,3)),np.zeros((3,3))),axis=1),\n np.concatenate((np.zeros((3,3)), Lambda, np.zeros((3,3)), np.zeros((3,3))),axis=1) ,\n np.concatenate((np.zeros((3,3)), np.zeros((3,3)), Lambda, np.zeros((3,3))),axis=1), \n np.concatenate((np.zeros((3,3)), np.zeros((3,3)), np.zeros((3,3)), Lambda),axis=1)))])[0]\n return np.dot(np.dot(R.T,kprime),R) ",
"_____no_output_____"
],
[
"def SpaceFrameAssemble(K,k,i,j):\n K[6*i,6*i] = K[6*i,6*i] + k[0,0]\n K[6*i,6*i+1] = K[6*i,6*i+1] + k[0,1]\n K[6*i,6*i+2] = K[6*i,6*i+2] + k[0,2]\n K[6*i,6*i+3] = K[6*i,6*i+3] + k[0,3]\n K[6*i,6*i+4] = K[6*i,6*i+4] + k[0,4]\n K[6*i,6*i+5] = K[6*i,6*i+5] + k[0,5]\n K[6*i,6*j] = K[6*i,6*j] + k[0,6]\n K[6*i,6*j+1] = K[6*i,6*j+1] + k[0,7]\n K[6*i,6*j+2] = K[6*i,6*j+2] + k[0,8]\n K[6*i,6*j+3] = K[6*i,6*j+3] + k[0,9]\n K[6*i,6*j+4] = K[6*i,6*j+4] + k[0,10]\n K[6*i,6*j+5] = K[6*i,6*j+5] + k[0,11]\n K[6*i+1,6*i] = K[6*i+1,6*i] + k[1,0]\n K[6*i+1,6*i+1] = K[6*i+1,6*i+1] + k[1,1]\n K[6*i+1,6*i+2] = K[6*i+1,6*i+2] + k[1,2]\n K[6*i+1,6*i+3] = K[6*i+1,6*i+3] + k[1,3]\n K[6*i+1,6*i+4] = K[6*i+1,6*i+4] + k[1,4]\n K[6*i+1,6*i+5] = K[6*i+1,6*i+5] + k[1,5]\n K[6*i+1,6*j] = K[6*i+1,6*j] + k[1,6]\n K[6*i+1,6*j+1] = K[6*i+1,6*j+1] + k[1,7]\n K[6*i+1,6*j+2] = K[6*i+1,6*j+2] + k[1,8]\n K[6*i+1,6*j+3] = K[6*i+1,6*j+3] + k[1,9]\n K[6*i+1,6*j+4] = K[6*i+1,6*j+4] + k[1,10]\n K[6*i+1,6*j+5] = K[6*i+1,6*j+5] + k[1,11]\n K[6*i+2,6*i] = K[6*i+2,6*i] + k[2,0]\n K[6*i+2,6*i+1] = K[6*i+2,6*i+1] + k[2,1]\n K[6*i+2,6*i+2] = K[6*i+2,6*i+2] + k[2,2]\n K[6*i+2,6*i+3] = K[6*i+2,6*i+3] + k[2,3]\n K[6*i+2,6*i+4] = K[6*i+2,6*i+4] + k[2,4]\n K[6*i+2,6*i+5] = K[6*i+2,6*i+5] + k[2,5]\n K[6*i+2,6*j] = K[6*i+2,6*j] + k[2,6]\n K[6*i+2,6*j+1] = K[6*i+2,6*j+1] + k[2,7]\n K[6*i+2,6*j+2] = K[6*i+2,6*j+2] + k[2,8]\n K[6*i+2,6*j+3] = K[6*i+2,6*j+3] + k[2,9]\n K[6*i+2,6*j+4] = K[6*i+2,6*j+4] + k[2,10]\n K[6*i+2,6*j+5] = K[6*i+2,6*j+5] + k[2,11]\n K[6*i+3,6*i] = K[6*i+3,6*i] + k[3,0]\n K[6*i+3,6*i+1] = K[6*i+3,6*i+1] + k[3,1]\n K[6*i+3,6*i+2] = K[6*i+3,6*i+2] + k[3,2]\n K[6*i+3,6*i+3] = K[6*i+3,6*i+3] + k[3,3]\n K[6*i+3,6*i+4] = K[6*i+3,6*i+4] + k[3,4]\n K[6*i+3,6*i+5] = K[6*i+3,6*i+5] + k[3,5]\n K[6*i+3,6*j] = K[6*i+3,6*j] + k[3,6]\n K[6*i+3,6*j+1] = K[6*i+3,6*j+1] + k[3,7]\n K[6*i+3,6*j+2] = K[6*i+3,6*j+2] + k[3,8] \n K[6*i+3,6*j+3] = K[6*i+3,6*j+3] + k[3,9]\n K[6*i+3,6*j+4] = K[6*i+3,6*j+4] + k[3,10]\n K[6*i+3,6*j+5] = K[6*i+3,6*j+5] + k[3,11]\n K[6*i+4,6*i] = K[6*i+4,6*i] + k[4,0]\n K[6*i+4,6*i+1] = K[6*i+4,6*i+1] + k[4,1]\n K[6*i+4,6*i+2] = K[6*i+4,6*i+2] + k[4,2]\n K[6*i+4,6*i+3] = K[6*i+4,6*i+3] + k[4,3]\n K[6*i+4,6*i+4] = K[6*i+4,6*i+4] + k[4,4]\n K[6*i+4,6*i+5] = K[6*i+4,6*i+5] + k[4,5]\n K[6*i+4,6*j] = K[6*i+4,6*j] + k[4,6]\n K[6*i+4,6*j+1] = K[6*i+4,6*j+1] + k[4,7]\n K[6*i+4,6*j+2] = K[6*i+4,6*j+2] + k[4,8]\n K[6*i+4,6*j+3] = K[6*i+4,6*j+3] + k[4,9]\n K[6*i+4,6*j+4] = K[6*i+4,6*j+4] + k[4,10]\n K[6*i+4,6*j+5] = K[6*i+4,6*j+5] + k[4,11]\n K[6*i+5,6*i] = K[6*i+5,6*i] + k[5,0]\n K[6*i+5,6*i+1] = K[6*i+5,6*i+1] + k[5,1]\n K[6*i+5,6*i+2] = K[6*i+5,6*i+2] + k[5,2]\n K[6*i+5,6*i+3] = K[6*i+5,6*i+3] + k[5,3]\n K[6*i+5,6*i+4] = K[6*i+5,6*i+4] + k[5,4]\n K[6*i+5,6*i+5] = K[6*i+5,6*i+5] + k[5,5]\n K[6*i+5,6*j] = K[6*i+5,6*j] + k[5,6]\n K[6*i+5,6*j+1] = K[6*i+5,6*j+1] + k[5,7]\n K[6*i+5,6*j+2] = K[6*i+5,6*j+2] + k[5,8]\n K[6*i+5,6*j+3] = K[6*i+5,6*j+3] + k[5,9]\n K[6*i+5,6*j+4] = K[6*i+5,6*j+4] + k[5,10]\n K[6*i+5,6*j+5] = K[6*i+5,6*j+5] + k[5,11]\n K[6*j,6*i] = K[6*j,6*i] + k[6,0]\n K[6*j,6*i+1] = K[6*j,6*i+1] + k[6,1]\n K[6*j,6*i+2] = K[6*j,6*i+2] + k[6,2]\n K[6*j,6*i+3] = K[6*j,6*i+3] + k[6,3]\n K[6*j,6*i+4] = K[6*j,6*i+4] + k[6,4]\n K[6*j,6*i+5] = K[6*j,6*i+5] + k[6,5]\n K[6*j,6*j] = K[6*j,6*j] + k[6,6]\n K[6*j,6*j+1] = K[6*j,6*j+1] + k[6,7]\n K[6*j,6*j+2] = K[6*j,6*j+2] + k[6,8]\n K[6*j,6*j+3] = K[6*j,6*j+3] + k[6,9]\n K[6*j,6*j+4] = K[6*j,6*j+4] + k[6,10]\n K[6*j,6*j+5] = K[6*j,6*j+5] + k[6,11]\n K[6*j+1,6*i] = K[6*j+1,6*i] + k[7,0]\n K[6*j+1,6*i+1] = K[6*j+1,6*i+1] + k[7,1]\n K[6*j+1,6*i+2] = K[6*j+1,6*i+2] + k[7,2]\n K[6*j+1,6*i+3] = K[6*j+1,6*i+3] + k[7,3]\n K[6*j+1,6*i+4] = K[6*j+1,6*i+4] + k[7,4]\n K[6*j+1,6*i+5] = K[6*j+1,6*i+5] + k[7,5]\n K[6*j+1,6*j] = K[6*j+1,6*j] + k[7,6]\n K[6*j+1,6*j+1] = K[6*j+1,6*j+1] + k[7,7]\n K[6*j+1,6*j+2] = K[6*j+1,6*j+2] + k[7,8]\n K[6*j+1,6*j+3] = K[6*j+1,6*j+3] + k[7,9]\n K[6*j+1,6*j+4] = K[6*j+1,6*j+4] + k[7,10]\n K[6*j+1,6*j+5] = K[6*j+1,6*j+5] + k[7,11]\n K[6*j+2,6*i] = K[6*j+2,6*i] + k[8,0]\n K[6*j+2,6*i+1] = K[6*j+2,6*i+1] + k[8,1]\n K[6*j+2,6*i+2] = K[6*j+2,6*i+2] + k[8,2]\n K[6*j+2,6*i+3] = K[6*j+2,6*i+3] + k[8,3]\n K[6*j+2,6*i+4] = K[6*j+2,6*i+4] + k[8,4]\n K[6*j+2,6*i+5] = K[6*j+2,6*i+5] + k[8,5]\n K[6*j+2,6*j] = K[6*j+2,6*j] + k[8,6]\n K[6*j+2,6*j+1] = K[6*j+2,6*j+1] + k[8,7]\n K[6*j+2,6*j+2] = K[6*j+2,6*j+2] + k[8,8]\n K[6*j+2,6*j+3] = K[6*j+2,6*j+3] + k[8,9]\n K[6*j+2,6*j+4] = K[6*j+2,6*j+4] + k[8,10]\n K[6*j+2,6*j+5] = K[6*j+2,6*j+5] + k[8,11]\n K[6*j+3,6*i] = K[6*j+3,6*i] + k[9,0]\n K[6*j+3,6*i+1] = K[6*j+3,6*i+1] + k[9,1]\n K[6*j+3,6*i+2] = K[6*j+3,6*i+2] + k[9,2]\n K[6*j+3,6*i+3] = K[6*j+3,6*i+3] + k[9,3]\n K[6*j+3,6*i+4] = K[6*j+3,6*i+4] + k[9,4]\n K[6*j+3,6*i+5] = K[6*j+3,6*i+5] + k[9,5]\n K[6*j+3,6*j] = K[6*j+3,6*j] + k[9,6]\n K[6*j+3,6*j+1] = K[6*j+3,6*j+1] + k[9,7]\n K[6*j+3,6*j+2] = K[6*j+3,6*j+2] + k[9,8]\n K[6*j+3,6*j+3] = K[6*j+3,6*j+3] + k[9,9]\n K[6*j+3,6*j+4] = K[6*j+3,6*j+4] + k[9,10]\n K[6*j+3,6*j+5] = K[6*j+3,6*j+5] + k[9,11]\n K[6*j+4,6*i] = K[6*j+4,6*i] + k[10,0]\n K[6*j+4,6*i+1] = K[6*j+4,6*i+1] + k[10,1]\n K[6*j+4,6*i+2] = K[6*j+4,6*i+2] + k[10,2]\n K[6*j+4,6*i+3] = K[6*j+4,6*i+3] + k[10,3]\n K[6*j+4,6*i+4] = K[6*j+4,6*i+4] + k[10,4]\n K[6*j+4,6*i+5] = K[6*j+4,6*i+5] + k[10,5]\n K[6*j+4,6*j] = K[6*j+4,6*j] + k[10,6]\n K[6*j+4,6*j+1] = K[6*j+4,6*j+1] + k[10,7]\n K[6*j+4,6*j+2] = K[6*j+4,6*j+2] + k[10,8]\n K[6*j+4,6*j+3] = K[6*j+4,6*j+3] + k[10,9]\n K[6*j+4,6*j+4] = K[6*j+4,6*j+4] + k[10,10]\n K[6*j+4,6*j+5] = K[6*j+4,6*j+5] + k[10,11]\n K[6*j+5,6*i] = K[6*j+5,6*i] + k[11,0]\n K[6*j+5,6*i+1] = K[6*j+5,6*i+1] + k[11,1]\n K[6*j+5,6*i+2] = K[6*j+5,6*i+2] + k[11,2]\n K[6*j+5,6*i+3] = K[6*j+5,6*i+3] + k[11,3]\n K[6*j+5,6*i+4] = K[6*j+5,6*i+4] + k[11,4]\n K[6*j+5,6*i+5] = K[6*j+5,6*i+5] + k[11,5]\n K[6*j+5,6*j] = K[6*j+5,6*j] + k[11,6]\n K[6*j+5,6*j+1] = K[6*j+5,6*j+1] + k[11,7]\n K[6*j+5,6*j+2] = K[6*j+5,6*j+2] + k[11,8]\n K[6*j+5,6*j+3] = K[6*j+5,6*j+3] + k[11,9]\n K[6*j+5,6*j+4] = K[6*j+5,6*j+4] + k[11,10]\n K[6*j+5,6*j+5] = K[6*j+5,6*j+5] + k[11,11]\n \n return K",
"_____no_output_____"
],
[
"def FEA_u(coord, elcon, bc_u_elim, f_after_u_elim, E=210e6,G=84e6,A=2e-2,Iy=10e-5,Iz=20e-5,J=5e-5):\n coord=np.array(coord)\n elcon=np.array(elcon)\n \n K=np.zeros(shape=(6*(np.max(elcon)+1),6*(np.max(elcon)+1)))\n for el in elcon:\n k=SpaceFrameElementStiffness(E,G,A,Iy,Iz,J,coord[el[0]][0],coord[el[0]][1],coord[el[0]][2],\\\n coord[el[1]][0],coord[el[1]][1],coord[el[1]][2])\n K=SpaceFrameAssemble(K,k,el[0],el[1])\n \n K=np.delete(K,bc_u_elim,0)\n K=np.delete(K,bc_u_elim,1) \n d=np.dot(np.linalg.inv(K),f_after_u_elim) \n u=np.zeros(shape=(6*len(coord)))\n\n j=0\n for i in range(len(u)):\n if i not in bc_u_elim:\n u[i]=d[j]\n j+=1\n if j>len(d)-1:\n break\n return u",
"_____no_output_____"
]
],
[
[
"## Utils",
"_____no_output_____"
]
],
[
[
"def total_length(coord,elcon):\n coord=np.array(coord)\n elcon=np.array(elcon)\n t_length=0\n for i in range(len(elcon)):\n l=PlaneTrussElementLength(coord[elcon[i][0]][0],\\\n coord[elcon[i][0]][1],\\\n coord[elcon[i][0]][2],\\\n coord[elcon[i][1]][0],\\\n coord[elcon[i][1]][1],\\\n coord[elcon[i][1]][2])\n t_length+=l \n return t_length ",
"_____no_output_____"
],
[
"def possible_lines_dic(n,m,dx,dy):\n A=[(-dx,0),(-dx,dy),(0,dy),(dx,dy),(dx,0),(dx,-dy),(0,-dy),(-dx,-dy)]\n dic={}\n t=0\n for i in range(n):\n for j in range(m):\n for item in A:\n x,y=j*dx,i*dy\n x1,y1=x+item[0],y+item[1]\n if x1>=0 and x1<=(m-1)*dx and y1>=0 and y1<=(n-1)*dy and (x1,y1,x,y) not in dic:\n dic[(x,y,x1,y1)]=t\n t+=1\n return dic ",
"_____no_output_____"
],
[
"# def possible_elcon_dic(n,m,dx,dy):\n# dic={}\n# t=0\n# for i in range(n):\n# for j in range(m):\n# x,y=j*dx,i*dy\n# dic[(x,y)]=t\n# t+=1\n \n# return dic \n ",
"_____no_output_____"
],
[
"# lines_dic(5,5,1,1)",
"_____no_output_____"
],
[
"# possible_elcon_dic(5,5,1,1)",
"_____no_output_____"
],
[
"# def pad(l, content, width):\n# l.extend([content] * (width - len(l)))\n# return l",
"_____no_output_____"
],
[
"# def triangle(elcon):\n# c=random.randint(0,np.max(elcon))\n\n# T=[]\n# for i in range(len(elcon)):\n# if np.any(np.not_equal(elcon[i],elcon[c])) and elcon[c][1] in elcon[i]:\n# T+=list(elcon[i]) \n \n# T=set(T)\n# T=list(T)\n \n# for j in range(len(T)):\n# if ([elcon[c][0],T[j]] in elcon.tolist() or [T[j],elcon[c][0]] in elcon.tolist()) \\\n# and np.any(np.not_equal(np.array([elcon[c][0],T[j]]),elcon[c])):\n# return [elcon[c][0],elcon[c][1],T[j]]",
"_____no_output_____"
],
[
"# def mid_point_triangle(tr_edges,coord):\n# ox=(coord[tr_edges[0]][0]+coord[tr_edges[1]][0]+coord[tr_edges[2]][0])/3\n# oy=(coord[tr_edges[0]][1]+coord[tr_edges[1]][1]+coord[tr_edges[2]][1])/3\n# oz=0\n# return list((ox,oy,oz))",
"_____no_output_____"
],
[
"# def new_connections(tr_edges,coord): \n# K=tr_edges \n# return [[len(coord),K[0]],[len(coord),K[1]],[len(coord),K[2]]]\n# list(range(0,6))",
"_____no_output_____"
]
],
[
[
"## Model",
"_____no_output_____"
]
],
[
[
"class Model:\n def __init__(self,n=5,m=5,dx=1,dy=1, force=-500,\n E=210e6, G=84e6, A=2e-2, Iy=10e-5, Iz=20e-5, J=5e-5, break_flag=False):\n # n,m,dx,dy - grid parameters \n self.E=E\n self.G=G\n self.A=A\n self.Iy=Iy\n self.Iz=Iz\n self.J=J\n self.n=n\n self.m=m\n self.dx=dx\n self.dy=dy\n self.dic_lines=possible_lines_dic(self.n,self.m,self.dx,self.dy)\n self.line_list=len(self.dic_lines)*[0]\n self.break_flag=break_flag\n self.coord=[[2,2,0]] \n self.elcon=[] \n self.el_dic={(2,2):0}\n self.max_el=0\n self.bc_u_elim=[] \n self.force=force\n self.f_after_u_elim=[0,self.force,0,0,0,0]\n self.old_weight=float(\"inf\")\n self.old_strength=-float(\"inf\")\n self.visit_list = [0,0,0,0] # number of checkpoints is 4\n \n def reset(self,break_flag,force):\n self.dic_lines=possible_lines_dic(self.n,self.m,self.dx,self.dy)\n self.line_list=len(self.dic_lines)*[0]\n self.break_flag=break_flag\n self.coord=[[2,2,0]] \n self.elcon=[] \n self.el_dic={(2,2):0}\n self.max_el=0\n self.bc_u_elim=[] \n self.force=force\n self.f_after_u_elim=[0,self.force,0,0,0,0]\n# self.old_weight=-float(\"inf\")\n# self.old_strength=-float(\"inf\")\n self.visit_list = [0,0,0,0] # number of checkpoints is 4\n \n def FEA(self):\n return FEA_u(self.coord, self.elcon, self.bc_u_elim, self.f_after_u_elim,\n self.E,self.G,self.A,self.Iy,self.Iz,self.J)\n \n def max_u(self,FEA_output_arr):\n t=1\n A=[]\n while t<len(FEA_output_arr):\n A.append(FEA_output_arr[t])\n t+=6 \n return min(A) \n \n \n def length(self):\n return total_length(self.coord,self.elcon)\n \n \n def move_w(self,x,y):\n # x,y - current location\n x_new=x-self.dx\n y_new=y\n if x_new<0 or x_new>(self.m-1)*self.dx or y_new<0 or y_new>(self.n-1)*self.dy \\\n or 3 in self.line_list:\n self.break_flag=True\n else:\n try:\n self.line_list[self.dic_lines[(x,y,x_new,y_new)]]+=1\n except KeyError:\n self.line_list[self.dic_lines[(x_new,y_new, x,y)]]+=1\n \n if (x_new,y_new) not in self.el_dic:\n self.max_el+=1\n self.el_dic[(x_new,y_new)]=self.max_el\n self.coord.append([x_new,y_new,0])\n# if (x_new,y_new)!=(0,0) and (x_new,y_new)!=((self.m-1)*self.dx,0) and \\\n# (x_new,y_new)!=((self.m-1)*self.dx,(self.n-1)*self.dy) and \\\n# (x_new,y_new)!=(0,(self.n-1)*self.dy):\n if (x_new,y_new)!=(1,1) and (x_new,y_new)!=(self.m-2,1) and \\\n (x_new,y_new)!=(self.m-2,self.n-2) and \\\n (x_new,y_new)!=(1,self.n-2):\n self.f_after_u_elim+=[0,self.force,0,0,0,0]\n elif (x_new,y_new)==(1,1) or (x_new,y_new)==(self.m-2,1) or \\\n (x_new,y_new)==(self.m-2,self.n-2) or \\\n (x_new,y_new)==(1,self.n-2):\n self.bc_u_elim+=list(range(self.el_dic[(x_new,y_new)]*6-6,self.el_dic[(x_new,y_new)]*6))\n \n if (self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]) not in self.elcon and \\\n (self.el_dic[(x_new,y_new)],self.el_dic[(x,y)]) not in self.elcon:\n self.elcon.append([self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]])\n \n if (x_new,y_new) in self.el_dic:\n if (x_new,y_new)==(1,1):\n self.visit_list[0]+=1\n elif (x_new,y_new)==(self.m-2,1):\n self.visit_list[1]+=1\n elif (x_new,y_new)==(self.m-2,self.n-2):\n self.visit_list[2]+=1\n elif (x_new,y_new)==(1,self.n-2):\n self.visit_list[3]+=1\n \n return x_new, y_new\n \n def move_nw(self,x,y):\n # x,y - current location\n x_new=x-self.dx\n y_new=y+self.dy\n if x_new<0 or x_new>(self.m-1)*self.dx or y_new<0 or y_new>(self.n-1)*self.dy \\\n or 3 in self.line_list:\n self.break_flag=True\n else:\n try:\n self.line_list[self.dic_lines[(x,y,x_new,y_new)]]+=1\n except KeyError:\n self.line_list[self.dic_lines[(x_new,y_new, x,y)]]+=1\n \n if (x_new,y_new) not in self.el_dic:\n self.max_el+=1\n self.el_dic[(x_new,y_new)]=self.max_el\n self.coord.append([x_new,y_new,0])\n if (x_new,y_new)!=(1,1) and (x_new,y_new)!=(self.m-2,1) and \\\n (x_new,y_new)!=(self.m-2,self.n-2) and \\\n (x_new,y_new)!=(1,self.n-2):\n self.f_after_u_elim+=[0,self.force,0,0,0,0]\n elif (x_new,y_new)==(1,1) or (x_new,y_new)==(self.m-2,1) or \\\n (x_new,y_new)==(self.m-2,self.n-2) or \\\n (x_new,y_new)==(1,self.n-2):\n self.bc_u_elim+=list(range(self.el_dic[(x_new,y_new)]*6-6,self.el_dic[(x_new,y_new)]*6))\n \n if (self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]) not in self.elcon and \\\n (self.el_dic[(x_new,y_new)],self.el_dic[(x,y)]) not in self.elcon:\n self.elcon.append([self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]]) \n \n if (x_new,y_new) in self.el_dic:\n if (x_new,y_new)==(1,1):\n self.visit_list[0]+=1\n elif (x_new,y_new)==(self.m-2,1):\n self.visit_list[1]+=1\n elif (x_new,y_new)==(self.m-2,self.n-2):\n self.visit_list[2]+=1\n elif (x_new,y_new)==(1,self.n-2):\n self.visit_list[3]+=1 \n \n return x_new, y_new\n \n def move_n(self,x,y):\n # x,y - current location\n x_new=x\n y_new=y+self.dy\n if x_new<0 or x_new>(self.m-1)*self.dx or y_new<0 or y_new>(self.n-1)*self.dy \\\n or 3 in self.line_list:\n self.break_flag=True\n else:\n try:\n self.line_list[self.dic_lines[(x,y,x_new,y_new)]]+=1\n except KeyError:\n self.line_list[self.dic_lines[(x_new,y_new, x,y)]]+=1\n \n if (x_new,y_new) not in self.el_dic:\n self.max_el+=1\n self.el_dic[(x_new,y_new)]=self.max_el\n self.coord.append([x_new,y_new,0])\n if (x_new,y_new)!=(1,1) and (x_new,y_new)!=(self.m-2,1) and \\\n (x_new,y_new)!=(self.m-2,self.n-2) and \\\n (x_new,y_new)!=(1,self.n-2):\n self.f_after_u_elim+=[0,self.force,0,0,0,0]\n elif (x_new,y_new)==(1,1) or (x_new,y_new)==(self.m-2,1) or \\\n (x_new,y_new)==(self.m-2,self.n-2) or \\\n (x_new,y_new)==(1,self.n-2):\n self.bc_u_elim+=list(range(self.el_dic[(x_new,y_new)]*6-6,self.el_dic[(x_new,y_new)]*6))\n \n if (self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]) not in self.elcon and \\\n (self.el_dic[(x_new,y_new)],self.el_dic[(x,y)]) not in self.elcon:\n self.elcon.append([self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]])\n \n \n if (x_new,y_new) in self.el_dic:\n if (x_new,y_new)==(1,1):\n self.visit_list[0]+=1\n elif (x_new,y_new)==(self.m-2,1):\n self.visit_list[1]+=1\n elif (x_new,y_new)==(self.m-2,self.n-2):\n self.visit_list[2]+=1\n elif (x_new,y_new)==(1,self.n-2):\n self.visit_list[3]+=1 \n \n return x_new, y_new\n \n \n def move_ne(self,x,y):\n # x,y - current location\n x_new=x+self.dx\n y_new=y+self.dy\n if x_new<0 or x_new>(self.m-1)*self.dx or y_new<0 or y_new>(self.n-1)*self.dy \\\n or 3 in self.line_list:\n self.break_flag=True\n else:\n try:\n self.line_list[self.dic_lines[(x,y,x_new,y_new)]]+=1\n except KeyError:\n self.line_list[self.dic_lines[(x_new,y_new, x,y)]]+=1\n \n if (x_new,y_new) not in self.el_dic:\n self.max_el+=1\n self.el_dic[(x_new,y_new)]=self.max_el\n self.coord.append([x_new,y_new,0])\n if (x_new,y_new)!=(1,1) and (x_new,y_new)!=(self.m-2,1) and \\\n (x_new,y_new)!=(self.m-2,self.n-2) and \\\n (x_new,y_new)!=(1,self.n-2):\n self.f_after_u_elim+=[0,self.force,0,0,0,0]\n elif (x_new,y_new)==(1,1) or (x_new,y_new)==(self.m-2,1) or \\\n (x_new,y_new)==(self.m-2,self.n-2) or \\\n (x_new,y_new)==(1,self.n-2):\n self.bc_u_elim+=list(range(self.el_dic[(x_new,y_new)]*6-6,self.el_dic[(x_new,y_new)]*6))\n \n if (self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]) not in self.elcon and \\\n (self.el_dic[(x_new,y_new)],self.el_dic[(x,y)]) not in self.elcon:\n self.elcon.append([self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]])\n \n if (x_new,y_new) in self.el_dic:\n if (x_new,y_new)==(1,1):\n self.visit_list[0]+=1\n elif (x_new,y_new)==(self.m-2,1):\n self.visit_list[1]+=1\n elif (x_new,y_new)==(self.m-2,self.n-2):\n self.visit_list[2]+=1\n elif (x_new,y_new)==(1,self.n-2):\n self.visit_list[3]+=1 \n \n return x_new, y_new\n \n def move_e(self,x,y):\n # x,y - current location\n x_new=x+self.dx\n y_new=y\n if x_new<0 or x_new>(self.m-1)*self.dx or y_new<0 or y_new>(self.n-1)*self.dy \\\n or 3 in self.line_list:\n self.break_flag=True\n else:\n try:\n self.line_list[self.dic_lines[(x,y,x_new,y_new)]]+=1\n except KeyError:\n self.line_list[self.dic_lines[(x_new,y_new, x,y)]]+=1\n \n \n if (x_new,y_new) not in self.el_dic:\n self.max_el+=1\n self.el_dic[(x_new,y_new)]=self.max_el\n self.coord.append([x_new,y_new,0])\n if (x_new,y_new)!=(1,1) and (x_new,y_new)!=(self.m-2,1) and \\\n (x_new,y_new)!=(self.m-2,self.n-2) and \\\n (x_new,y_new)!=(1,self.n-2):\n self.f_after_u_elim+=[0,self.force,0,0,0,0]\n elif (x_new,y_new)==(1,1) or (x_new,y_new)==(self.m-2,1) or \\\n (x_new,y_new)==(self.m-2,self.n-2) or \\\n (x_new,y_new)==(1,self.n-2):\n self.bc_u_elim+=list(range(self.el_dic[(x_new,y_new)]*6-6,self.el_dic[(x_new,y_new)]*6))\n \n if (self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]) not in self.elcon and \\\n (self.el_dic[(x_new,y_new)],self.el_dic[(x,y)]) not in self.elcon:\n self.elcon.append([self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]]) \n \n if (x_new,y_new) in self.el_dic:\n if (x_new,y_new)==(1,1):\n self.visit_list[0]+=1\n elif (x_new,y_new)==(self.m-2,1):\n self.visit_list[1]+=1\n elif (x_new,y_new)==(self.m-2,self.n-2):\n self.visit_list[2]+=1\n elif (x_new,y_new)==(1,self.n-2):\n self.visit_list[3]+=1 \n return x_new, y_new\n \n def move_se(self,x,y):\n # x,y - current location\n x_new=x+self.dx\n y_new=y-self.dy\n if x_new<0 or x_new>(self.m-1)*self.dx or y_new<0 or y_new>(self.n-1)*self.dy \\\n or 3 in self.line_list:\n self.break_flag=True\n else:\n try:\n self.line_list[self.dic_lines[(x,y,x_new,y_new)]]+=1\n except KeyError:\n self.line_list[self.dic_lines[(x_new,y_new, x,y)]]+=1\n \n if (x_new,y_new) not in self.el_dic:\n self.max_el+=1\n self.el_dic[(x_new,y_new)]=self.max_el\n self.coord.append([x_new,y_new,0])\n if (x_new,y_new)!=(1,1) and (x_new,y_new)!=(self.m-2,1) and \\\n (x_new,y_new)!=(self.m-2,self.n-2) and \\\n (x_new,y_new)!=(1,self.n-2):\n self.f_after_u_elim+=[0,self.force,0,0,0,0]\n elif (x_new,y_new)==(1,1) or (x_new,y_new)==(self.m-2,1) or \\\n (x_new,y_new)==(self.m-2,self.n-2) or \\\n (x_new,y_new)==(1,self.n-2):\n self.bc_u_elim+=list(range(self.el_dic[(x_new,y_new)]*6-6,self.el_dic[(x_new,y_new)]*6))\n \n if (self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]) not in self.elcon and \\\n (self.el_dic[(x_new,y_new)],self.el_dic[(x,y)]) not in self.elcon:\n self.elcon.append([self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]])\n \n if (x_new,y_new) in self.el_dic:\n if (x_new,y_new)==(1,1):\n self.visit_list[0]+=1\n elif (x_new,y_new)==(self.m-2,1):\n self.visit_list[1]+=1\n elif (x_new,y_new)==(self.m-2,self.n-2):\n self.visit_list[2]+=1\n elif (x_new,y_new)==(1,self.n-2):\n self.visit_list[3]+=1 \n \n return x_new, y_new\n \n def move_s(self,x,y):\n # x,y - current location\n x_new=x\n y_new=y-self.dy\n if x_new<0 or x_new>(self.m-1)*self.dx or y_new<0 or y_new>(self.n-1)*self.dy \\\n or 3 in self.line_list:\n self.break_flag=True \n else:\n try:\n self.line_list[self.dic_lines[(x,y,x_new,y_new)]]+=1\n except KeyError:\n self.line_list[self.dic_lines[(x_new,y_new, x,y)]]+=1\n \n if (x_new,y_new) not in self.el_dic:\n self.max_el+=1\n self.el_dic[(x_new,y_new)]=self.max_el\n self.coord.append([x_new,y_new,0])\n if (x_new,y_new)!=(1,1) and (x_new,y_new)!=(self.m-2,1) and \\\n (x_new,y_new)!=(self.m-2,self.n-2) and \\\n (x_new,y_new)!=(1,self.n-2):\n self.f_after_u_elim+=[0,self.force,0,0,0,0]\n elif (x_new,y_new)==(1,1) or (x_new,y_new)==(self.m-2,1) or \\\n (x_new,y_new)==(self.m-2,self.n-2) or \\\n (x_new,y_new)==(1,self.n-2):\n self.bc_u_elim+=list(range(self.el_dic[(x_new,y_new)]*6-6,self.el_dic[(x_new,y_new)]*6))\n \n if (self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]) not in self.elcon and \\\n (self.el_dic[(x_new,y_new)],self.el_dic[(x,y)]) not in self.elcon:\n self.elcon.append([self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]])\n \n if (x_new,y_new) in self.el_dic:\n if (x_new,y_new)==(1,1):\n self.visit_list[0]+=1\n elif (x_new,y_new)==(self.m-2,1):\n self.visit_list[1]+=1\n elif (x_new,y_new)==(self.m-2,self.n-2):\n self.visit_list[2]+=1\n elif (x_new,y_new)==(1,self.n-2):\n self.visit_list[3]+=1 \n \n return x_new, y_new\n \n def move_sw(self,x,y):\n # x,y - current location\n x_new=x-self.dx\n y_new=y-self.dy\n if x_new<0 or x_new>(self.m-1)*self.dx or y_new<0 or y_new>(self.n-1)*self.dy \\\n or 3 in self.line_list:\n self.break_flag=True\n else:\n try:\n self.line_list[self.dic_lines[(x,y,x_new,y_new)]]+=1\n except KeyError:\n self.line_list[self.dic_lines[(x_new,y_new, x,y)]]+=1\n \n if (x_new,y_new) not in self.el_dic:\n self.max_el+=1\n self.el_dic[(x_new,y_new)]=self.max_el\n self.coord.append([x_new,y_new,0])\n if (x_new,y_new)!=(1,1) and (x_new,y_new)!=(self.m-2,1) and \\\n (x_new,y_new)!=(self.m-2,self.n-2) and \\\n (x_new,y_new)!=(1,self.n-2):\n self.f_after_u_elim+=[0,self.force,0,0,0,0]\n elif (x_new,y_new)==(1,1) or (x_new,y_new)==(self.m-2,1) or \\\n (x_new,y_new)==(self.m-2,self.n-2) or \\\n (x_new,y_new)==(1,self.n-2):\n self.bc_u_elim+=list(range(self.el_dic[(x_new,y_new)]*6-6,self.el_dic[(x_new,y_new)]*6))\n \n if (self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]) not in self.elcon and \\\n (self.el_dic[(x_new,y_new)],self.el_dic[(x,y)]) not in self.elcon:\n self.elcon.append([self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]])\n \n \n if (x_new,y_new) in self.el_dic:\n if (x_new,y_new)==(1,1):\n self.visit_list[0]+=1\n elif (x_new,y_new)==(self.m-2,1):\n self.visit_list[1]+=1\n elif (x_new,y_new)==(self.m-2,self.n-2):\n self.visit_list[2]+=1\n elif (x_new,y_new)==(1,self.n-2):\n self.visit_list[3]+=1 \n \n return x_new, y_new\n \n \n def action_space(self,action,x0,y0):\n if action==0:\n return self.move_w(x0,y0)\n elif action==1: \n return self.move_nw(x0,y0)\n elif action==2: \n return self.move_n(x0,y0)\n elif action==3:\n return self.move_ne(x0,y0)\n elif action==4:\n return self.move_e(x0,y0)\n elif action==5:\n return self.move_se(x0,y0)\n elif action==6:\n return self.move_s(x0,y0)\n elif action==7:\n return self.move_sw(x0,y0)\n \n \n def nn_input(self,x,y): \n return self.line_list+[x,y] \n \n def reward_(self,x_new,y_new,n_steps):\n reward=2*n_steps\n# reward=2\n if all([x>=1 for x in self.visit_list]):\n reward+=10000\n weight=self.length()\n \n# self.draw(\"green\")\n FEA_output_arr=self.FEA()\n max_=self.max_u(FEA_output_arr)\n strength=max_\n# print(weight,strength) \n if weight<=self.old_weight:\n reward+=50000\n self.old_weight=weight\n if strength>=self.old_strength: \n reward+=100000000\n self.old_strength=strength\n# print(self.old_weight, self.old_strength) \n self.break_flag=True \n return reward \n# elif any([x==1 for x in self.visit_list]):\n# reward+=250*(self.visit_list[0]+self.visit_list[1]+self.visit_list[2]+self.visit_list[3]) \n# return reward \n# if x_new<0+1 or x_new>(self.m-1)*self.dx-1 or y_new<0+1 or y_new>(self.n-1)*self.dy-1:\n# reward-=50\n# return reward\n return reward \n \n def draw(self,color):\n c=self.coord\n e=self.elcon\n c=np.array(c)\n e=np.array(e)\n coord=c.reshape(np.max(e)+1,3)\n fig=plt.figure(figsize=(13,5))\n for item in e:\n ax = fig.gca(projection='3d') \n ax.plot([coord[item[0]][0],coord[item[1]][0]],\\\n [coord[item[0]][1],coord[item[1]][1]],\\\n [coord[item[0]][2],coord[item[1]][2]],\n color=color) \n# ax.view_init(70,300)\n ax.view_init(-90,90)\n# ax1 = plt.subplot(131)\n ax.set_xlim([0, 5])\n ax.set_ylim([0, 5])\n plt.show() \n \n ",
"_____no_output_____"
]
],
[
[
"## Neural Network Policy - Policy Gradients",
"_____no_output_____"
]
],
[
[
"n_inputs = 74 \nn_hidden = 100 \nn_outputs = 8 \ninitializer = tf.contrib.layers.variance_scaling_initializer()\n\nlearning_rate = 0.0001\n\n# Build the neural network\nX_ = tf.placeholder(tf.float64, shape=[None, n_inputs], name=\"X_\")\nhidden = fully_connected(X_, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer)\nhidden1 = fully_connected(hidden, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer)\nhidden2 = fully_connected(hidden1, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer)\nlogits = fully_connected(hidden2, n_outputs, activation_fn=None, weights_initializer=initializer)\noutputs = tf.nn.softmax(logits, name=\"Y_proba\")\n# outputs = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), -1)\n\n\n# Select a random action based on the estimated probabilities\naction = tf.random.multinomial(tf.log(outputs), num_samples=1,output_dtype=tf.int64)\n\n\ny=tf.reshape(tf.one_hot(action,depth=8,dtype=tf.float64),[8,1])\nxentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=tf.transpose(logits))\n\noptimizer = tf.train.AdamOptimizer(learning_rate)\ngrads_and_vars = optimizer.compute_gradients(xentropy)\ngradients = [grad for grad, variable in grads_and_vars]\ngradient_placeholders = []\ngrads_and_vars_feed = []\nfor grad, variable in grads_and_vars:\n gradient_placeholder = tf.placeholder(tf.float64, shape=grad.get_shape())\n gradient_placeholders.append(gradient_placeholder)\n grads_and_vars_feed.append((gradient_placeholder, variable))\n\ntraining_op = optimizer.apply_gradients(grads_and_vars_feed)\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()",
"WARNING:tensorflow:\nThe TensorFlow contrib module will not be included in TensorFlow 2.0.\nFor more information, please see:\n * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n * https://github.com/tensorflow/addons\n * https://github.com/tensorflow/io (for I/O related ops)\nIf you depend on functionality not listed there, please file an issue.\n\nWARNING:tensorflow:From /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow_core/contrib/layers/python/layers/layers.py:1866: Layer.apply (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `layer.__call__` method instead.\nWARNING:tensorflow:From <ipython-input-18-a2b49fee14ad>:19: multinomial (from tensorflow.python.ops.random_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse `tf.random.categorical` instead.\nWARNING:tensorflow:From /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow_core/python/ops/nn_impl.py:183: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\n"
],
[
"# with tf.Session() as sess:\n# A=sess.run(tf.exp([-0.39514669, -0.9955475, 0.36458025, 0.02534027, 1.11079987, -0.25412942,\n# 0.68900028, 0.42532931]) /tf.reduce_sum(tf.exp([-0.39514669, -0.9955475, 0.36458025, 0.02534027, 1.11079987, -0.25412942,\n# 0.68900028, 0.42532931]), -1))",
"_____no_output_____"
],
[
"# A",
"_____no_output_____"
],
[
"def discount_rewards(rewards, discount_rate=0.99):\n discounted_rewards = np.empty(len(rewards))\n cumulative_rewards = 0\n for step in reversed(range(len(rewards))):\n cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate\n discounted_rewards[step] = cumulative_rewards\n return discounted_rewards",
"_____no_output_____"
],
[
"def discount_and_normalize_rewards(all_rewards, discount_rate=0.99):\n all_discounted_rewards = [discount_rewards(rewards) for rewards in all_rewards]\n flat_rewards = np.concatenate(all_discounted_rewards)\n reward_mean = flat_rewards.mean()\n reward_std = flat_rewards.std()\n return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]",
"_____no_output_____"
],
[
"# coord=np.array([0,0,0,0,7,0,7,7,0,7,0,0,3.5,3.5,0,4,4,0,3,4,0])\n# elcon=np.array([[0,1],[1,2],[2,3],[0,3],[1,6],[2,6],[2,5],[4,5],[4,6],[0,4],[3,4],[3,5],[5,6],[0,6]])\n# bc_u_elim=list(range(30,42))\n# f_after_u_elim=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-500,0,0,0,0]",
"_____no_output_____"
],
[
"# G=Geometry(coord,elcon,bc_u_elim,f_after_u_elim)\nM=Model() ",
"_____no_output_____"
],
[
"n_iterations = 1001 # number of training iterations\nn_max_steps = 500 # max steps per episode\nn_games_per_update = 10 # train the policy every 10 episodes\nsave_iterations = 100 # save the model every 10 training iterations\n\n\nwith tf.Session() as sess:\n start=time.time()\n init.run() \n \n# saver.restore(sess, tf.train.latest_checkpoint(\"C:\\\\Temp\\\\tf_save\\\\policy0\\\\\")) \n# tf.get_default_graph()\n \n for iteration in range(n_iterations):\n \n all_rewards = [] # all sequences of raw rewards for each episode\n all_gradients = [] # gradients saved at each step of each episode\n \n \n for game in range(n_games_per_update):\n# tf.random.set_random_seed(game)\n# init.run() # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n current_rewards = [] # all raw rewards from the current episode\n current_gradients = [] # all gradients from the current episode\n \n \n M.reset(False,-500)\n \n x0,y0=2,2\n obs=M.nn_input(x0,y0)\n \n for step in range(n_max_steps):\n \n action_val, gradients_val = sess.run([action, gradients],\n feed_dict={X_: np.array(obs).reshape(1,n_inputs)})\n \n# outputs_, gradients_val = sess.run([X_, gradients],\n# feed_dict={X_: np.array(obs).reshape(1,n_inputs)})\n x_new,y_new=x0,y0 \n# print(outputs_)\n# print(x_new,y_new)\n# print(action_val[0][0])\n \n x_new,y_new=M.action_space(action_val[0][0],x_new,y_new)\n\n \n \n obs=M.nn_input(x_new,y_new)\n \n \n reward=M.reward_(x_new,y_new,step)\n x0,y0 = x_new,y_new\n \n if M.break_flag:\n reward-=10000\n \n current_rewards.append(reward)\n current_gradients.append(gradients_val)\n \n if M.break_flag:\n break \n\n all_rewards.append(current_rewards)\n all_gradients.append(current_gradients)\n\n \n \n # At this point we have run the policy for 10 episodes, and we are\n # ready for a policy update using the algorithm described earlier.\n all_rewards = discount_and_normalize_rewards(all_rewards)\n \n \n \n feed_dict = {}\n for var_index, grad_placeholder in enumerate(gradient_placeholders):\n # multiply the gradients by the action scores, and compute the mean\n mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index] \n for game_index, rewards in enumerate(all_rewards)\n for step, reward in enumerate(rewards)],axis=0)\n feed_dict[grad_placeholder] = mean_gradients\n \n \n sess.run(training_op, feed_dict=feed_dict)\n \n if iteration % save_iterations == 0:\n# print(\"Saving {} iteration\".format(iteration))\n print('Time taken for {} epoch {} sec\\n'.format(iteration, time.time() - start))\n saver.save(sess, \"./bionic_ckpt/bionic0.ckpt\")\n\n# end=time.time()",
"Time taken for 0 epoch 0.9260900020599365 sec\n\nTime taken for 100 epoch 20.42711877822876 sec\n\nTime taken for 200 epoch 49.124107837677 sec\n\nTime taken for 300 epoch 73.34570980072021 sec\n\nTime taken for 400 epoch 99.64313888549805 sec\n\nTime taken for 500 epoch 128.86554503440857 sec\n\nTime taken for 600 epoch 167.33722496032715 sec\n\nTime taken for 700 epoch 206.74694299697876 sec\n\nTime taken for 800 epoch 246.65562677383423 sec\n\nTime taken for 900 epoch 288.9546959400177 sec\n\nTime taken for 1000 epoch 332.23919796943665 sec\n\n"
]
],
[
[
"## AI designing the bionic partition",
"_____no_output_____"
]
],
[
[
"def predict(G2):\n with tf.Session() as sess:\n saver = tf.train.import_meta_graph('./bionic_ckpt/bionic0.ckpt.meta')\n saver.restore(sess, \"./bionic_ckpt/bionic0.ckpt\") \n\n graph = tf.get_default_graph()\n outputs = graph.get_tensor_by_name(\"Y_proba:0\") \n X_ = graph.get_tensor_by_name(\"X_:0\") \n \n# G2.reset(np.array([0,0,0,0,7,0,7,7,0,7,0,0,3.5,3.5,0,4,4,0,3,4,0]),\\\n# np.array([[0,1],[1,2],[2,3],[0,3],[1,6],[2,6],[2,5],[4,5],[4,6],[0,4],[3,4],[3,5],[5,6],[0,6]]),\\\n# list(range(30,42)),[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-500,0,0,0,0]) \n \n# G2.position = random.randint(5,np.max(G2.elcon))\n\n x0,y0=2,2\n obs=G2.nn_input(x0,y0)\n \n for step in range(100):\n action_val= sess.run([outputs],feed_dict={X_: np.array(obs).reshape(1,n_inputs)})\n action_val=np.log(action_val)\n print(np.argmax(action_val))\n \n x_new,y_new=x0,y0 \n \n x_new,y_new=G2.action_space(np.argmax(action_val),x_new,y_new)\n print(x_new, y_new)\n \n \n if G2.break_flag:\n obs=G2.nn_input(x_new,y_new)\n break \n \n obs=G2.nn_input(x_new,y_new)\n x0,y0=x_new,y_new\n# print(obs[-1])\n \n# G2.position = random.randint(5,np.max(G2.elcon))\n\n G2.draw('blue')\n \n return obs[-1], obs[-2]",
"_____no_output_____"
],
[
"# coord=np.array([0,0,0,0,7,0,7,7,0,7,0,0,3.5,3.5,0,4,4,0,3,4,0])\n# elcon=np.array([[0,1],[1,2],[2,3],[0,3],[1,6],[2,6],[2,5],[4,5],[4,6],[0,4],[3,4],[3,5],[5,6],[0,6]])\n# bc_u_elim=list(range(30,42))\n# f_after_u_elim=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-500,0,0,0,0]",
"_____no_output_____"
],
[
"M=Model()",
"_____no_output_____"
],
[
"# G3.draw('green')",
"_____no_output_____"
],
[
"# pr=G3.nn_input()",
"_____no_output_____"
],
[
"# pr[-1],pr[-2]",
"_____no_output_____"
],
[
"# G3.add_node()\n# G3.add_node()\n# G3.add_node()\n# G3.add_node()\n# G3.add_node()\n# G3.add_node()\n# G3.add_node()\n# G3.add_node()",
"_____no_output_____"
],
[
"predict(M)",
"INFO:tensorflow:Restoring parameters from ./bionic_ckpt/bionic0.ckpt\n0\n1 2\n6\n1 1\n6\n1 0\n3\n2 1\n6\n2 0\n3\n3 1\n6\n3 0\n1\n2 1\n1\n1 2\n6\n1 1\n3\n2 2\n6\n2 1\n3\n3 2\n6\n3 1\n1\n2 2\n6\n2 1\n3\n3 2\n1\n2 3\n6\n2 2\n3\n3 3\n6\n3 2\n1\n2 3\n6\n2 2\n3\n3 3\n6\n3 2\n1\n2 3\n6\n2 2\n"
],
[
"M.length()",
"_____no_output_____"
],
[
"FEA_output_arr=M.FEA()\nM.max_u(FEA_output_arr)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbba17483c9ac7d3dd0df1b2f6af4af5019ca29c
| 155,984 |
ipynb
|
Jupyter Notebook
|
src/contextual-choice.ipynb
|
jeffgong047/dnd-lstm
|
270bc458f5915da6a6332024fd2c81f3646853c9
|
[
"MIT"
] | 30 |
2019-06-14T11:22:42.000Z
|
2022-03-06T11:11:22.000Z
|
src/contextual-choice.ipynb
|
jeffgong047/dnd-lstm
|
270bc458f5915da6a6332024fd2c81f3646853c9
|
[
"MIT"
] | 4 |
2019-04-23T22:08:25.000Z
|
2021-11-18T16:41:59.000Z
|
src/contextual-choice.ipynb
|
jeffgong047/dnd-lstm
|
270bc458f5915da6a6332024fd2c81f3646853c9
|
[
"MIT"
] | 8 |
2019-10-23T20:44:31.000Z
|
2022-03-23T15:36:07.000Z
| 374.961538 | 64,160 | 0.927896 |
[
[
[
"#### demo: training a DND LSTM on a contextual choice task\n\nThis is an implementation of the following paper: \n```\nRitter, S., Wang, J. X., Kurth-Nelson, Z., Jayakumar, S. M., Blundell, C., Pascanu, R., & Botvinick, M. (2018). \nBeen There, Done That: Meta-Learning with Episodic Recall. arXiv [stat.ML]. \nRetrieved from http://arxiv.org/abs/1805.09692\n```",
"_____no_output_____"
]
],
[
[
"'''\nIf you are using google colab, uncomment and run the following lines! \nwhich grabs the dependencies from github\n'''\n# !git clone https://github.com/qihongl/dnd-lstm.git\n# !cd dnd-lstm/src/\n# import os\n# os.chdir('dnd-lstm/src/')",
"_____no_output_____"
],
[
"import time\nimport torch\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom task import ContextualChoice\nfrom model import DNDLSTM as Agent\nfrom utils import compute_stats, to_sqnp\nfrom model.DND import compute_similarities\nfrom model.utils import get_reward, compute_returns, compute_a2c_loss\n\nsns.set(style='white', context='talk', palette='colorblind')\nseed_val = 0\ntorch.manual_seed(seed_val)\nnp.random.seed(seed_val)",
"_____no_output_____"
],
[
"'''init task'''\nn_unique_example = 50\nn_trials = 2 * n_unique_example\n# n time steps of a trial\ntrial_length = 10\n# after `tp_corrupt`, turn off the noise\nt_noise_off = 5\n# input/output/hidden/memory dim\nobs_dim = 32\ntask = ContextualChoice(\n obs_dim=obs_dim, trial_length=trial_length,\n t_noise_off=t_noise_off\n)",
"_____no_output_____"
],
[
"'''init model'''\n# set params\ndim_hidden = 32\ndim_output = 2\ndict_len = 100\nlearning_rate = 5e-4\nn_epochs = 20\n# init agent / optimizer\nagent = Agent(task.x_dim, dim_hidden, dim_output, dict_len)\noptimizer = torch.optim.Adam(agent.parameters(), lr=learning_rate)",
"_____no_output_____"
],
[
"'''train'''\nlog_return = np.zeros(n_epochs,)\nlog_loss_value = np.zeros(n_epochs,)\nlog_loss_policy = np.zeros(n_epochs,)\n\nlog_Y = np.zeros((n_epochs, n_trials, trial_length))\nlog_Y_hat = np.zeros((n_epochs, n_trials, trial_length))\n\n# loop over epoch\nfor i in range(n_epochs):\n time_start = time.time()\n # get data for this epoch\n X, Y = task.sample(n_unique_example)\n # flush hippocampus\n agent.reset_memory()\n agent.turn_on_retrieval()\n\n # loop over the training set\n for m in range(n_trials):\n # prealloc\n cumulative_reward = 0\n probs, rewards, values = [], [], []\n h_t, c_t = agent.get_init_states()\n\n # loop over time, for one training example\n for t in range(trial_length):\n # only save memory at the last time point\n agent.turn_off_encoding()\n if t == trial_length-1 and m < n_unique_example:\n agent.turn_on_encoding()\n # recurrent computation at time t\n output_t, _ = agent(X[m][t].view(1, 1, -1), h_t, c_t)\n a_t, prob_a_t, v_t, h_t, c_t = output_t\n # compute immediate reward\n r_t = get_reward(a_t, Y[m][t])\n # log\n probs.append(prob_a_t)\n rewards.append(r_t)\n values.append(v_t)\n # log\n cumulative_reward += r_t\n log_Y_hat[i, m, t] = a_t.item()\n\n returns = compute_returns(rewards)\n loss_policy, loss_value = compute_a2c_loss(probs, values, returns)\n loss = loss_policy + loss_value\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # log\n log_Y[i] = np.squeeze(Y.numpy())\n log_return[i] += cumulative_reward / n_trials\n log_loss_value[i] += loss_value.item() / n_trials\n log_loss_policy[i] += loss_policy.item() / n_trials\n\n # print out some stuff\n time_end = time.time()\n run_time = time_end - time_start\n print(\n 'Epoch %3d | return = %.2f | loss: val = %.2f, pol = %.2f | time = %.2f' %\n (i, log_return[i], log_loss_value[i], log_loss_policy[i], run_time)\n )",
"Epoch 0 | return = 5.86 | loss: val = 1.82, pol = 1.12 | time = 2.39\nEpoch 1 | return = 7.45 | loss: val = 1.06, pol = -0.64 | time = 2.55\nEpoch 2 | return = 7.85 | loss: val = 0.89, pol = -0.64 | time = 2.41\nEpoch 3 | return = 7.72 | loss: val = 0.83, pol = -0.75 | time = 2.51\nEpoch 4 | return = 7.64 | loss: val = 0.83, pol = -0.41 | time = 2.50\nEpoch 5 | return = 7.78 | loss: val = 0.79, pol = -0.31 | time = 2.57\nEpoch 6 | return = 8.27 | loss: val = 0.71, pol = -0.36 | time = 2.54\nEpoch 7 | return = 8.13 | loss: val = 0.76, pol = -0.33 | time = 2.52\nEpoch 8 | return = 8.15 | loss: val = 0.65, pol = -0.09 | time = 2.45\nEpoch 9 | return = 8.08 | loss: val = 0.71, pol = -0.28 | time = 2.37\nEpoch 10 | return = 8.05 | loss: val = 0.64, pol = -0.27 | time = 2.38\nEpoch 11 | return = 8.34 | loss: val = 0.68, pol = -0.18 | time = 2.37\nEpoch 12 | return = 8.33 | loss: val = 0.60, pol = -0.35 | time = 2.31\nEpoch 13 | return = 8.14 | loss: val = 0.69, pol = -0.12 | time = 2.47\nEpoch 14 | return = 8.14 | loss: val = 0.64, pol = -0.20 | time = 2.42\nEpoch 15 | return = 8.35 | loss: val = 0.60, pol = -0.39 | time = 2.42\nEpoch 16 | return = 8.29 | loss: val = 0.68, pol = -0.45 | time = 2.40\nEpoch 17 | return = 8.41 | loss: val = 0.59, pol = -0.15 | time = 2.27\nEpoch 18 | return = 8.33 | loss: val = 0.62, pol = -0.28 | time = 2.31\nEpoch 19 | return = 8.10 | loss: val = 0.71, pol = -0.20 | time = 2.39\n"
],
[
"'''learning curve'''\nf, axes = plt.subplots(1, 2, figsize=(8, 3))\naxes[0].plot(log_return)\naxes[0].set_ylabel('Return')\naxes[0].set_xlabel('Epoch')\naxes[1].plot(log_loss_value)\naxes[1].set_ylabel('Value loss')\naxes[1].set_xlabel('Epoch')\nsns.despine()\nf.tight_layout()",
"_____no_output_____"
],
[
"'''show behavior'''\ncorrects = log_Y_hat[-1] == log_Y[-1]\nacc_mu_no_memory, acc_se_no_memory = compute_stats(\n corrects[:n_unique_example])\nacc_mu_has_memory, acc_se_has_memory = compute_stats(\n corrects[n_unique_example:])\n\nn_se = 2\nf, ax = plt.subplots(1, 1, figsize=(7, 4))\nax.errorbar(range(trial_length), y=acc_mu_no_memory,\n yerr=acc_se_no_memory * n_se, label='w/o memory')\nax.errorbar(range(trial_length), y=acc_mu_has_memory,\n yerr=acc_se_has_memory * n_se, label='w/ memory')\nax.axvline(t_noise_off, label='turn off noise', color='grey', linestyle='--')\nax.set_xlabel('Time')\nax.set_ylabel('Correct rate')\nax.set_title('Choice accuracy by condition')\nf.legend(frameon=False, bbox_to_anchor=(1, .6))\nsns.despine()\nf.tight_layout()",
"_____no_output_____"
],
[
"'''visualize keys and values'''\nkeys, vals = agent.get_all_mems()\nn_mems = len(agent.dnd.keys)\ndmat_kk, dmat_vv = np.zeros((n_mems, n_mems)), np.zeros((n_mems, n_mems))\nfor i in range(n_mems):\n dmat_kk[i, :] = to_sqnp(compute_similarities(\n keys[i], keys, agent.dnd.kernel))\n dmat_vv[i, :] = to_sqnp(compute_similarities(\n vals[i], vals, agent.dnd.kernel))\n\n# plot\ndmats = {'key': dmat_kk, 'value': dmat_vv}\nf, axes = plt.subplots(1, 2, figsize=(12, 5))\nfor i, (label_i, dmat_i) in enumerate(dmats.items()):\n sns.heatmap(dmat_i, cmap='viridis', square=True, ax=axes[i])\n axes[i].set_xlabel(f'id, {label_i} i')\n axes[i].set_ylabel(f'id, {label_i} j')\n axes[i].set_title(\n f'{label_i}-{label_i} similarity, metric = {agent.dnd.kernel}'\n )\nf.tight_layout()",
"_____no_output_____"
],
[
"'''project memory content to low dim space'''\n\n# convert the values to a np array, #memories x mem_dim\nvals_np = np.vstack([to_sqnp(vals[i]) for i in range(n_mems)])\n# project to PC space\nvals_centered = (vals_np - np.mean(vals_np, axis=0, keepdims=True))\nU, S, _ = np.linalg.svd(vals_centered, full_matrices=False)\nvals_pc = np.dot(U, np.diag(S))\n\n# pick pcs\npc_x = 0\npc_y = 1\n\n# plot\nf, ax = plt.subplots(1, 1, figsize=(7, 5))\nY_phase2 = to_sqnp(Y[:n_unique_example, 0])\nfor y_val in np.unique(Y_phase2):\n ax.scatter(\n vals_pc[Y_phase2 == y_val, pc_x],\n vals_pc[Y_phase2 == y_val, pc_y],\n marker='o', alpha=.7,\n )\nax.set_title(f'Each point is a memory (i.e. value)')\nax.set_xlabel(f'PC {pc_x}')\nax.set_ylabel(f'PC {pc_y}')\nax.legend(['left trial', 'right trial'], bbox_to_anchor=(.6, .3))\nsns.despine(offset=20)\nf.tight_layout()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbba19fb335c88f01f9dad6b563fc7efd1693012
| 13,247 |
ipynb
|
Jupyter Notebook
|
tutorials/Certification_Trainings/Healthcare/Spark_NLP_Clinical_NER_Playground_Streamlit_app.ipynb
|
Rock-ass/spark-nlp-workshop
|
dbcb1f4c504bd5d0e7ed85310307db0d24a9575e
|
[
"Apache-2.0"
] | 1 |
2022-02-22T20:52:58.000Z
|
2022-02-22T20:52:58.000Z
|
tutorials/Certification_Trainings/Healthcare/Spark_NLP_Clinical_NER_Playground_Streamlit_app.ipynb
|
Tommyhappy01/6-SPARK_NLP
|
4081b67c1d7f857ba1853c001293917d30fe9c35
|
[
"Apache-2.0"
] | null | null | null |
tutorials/Certification_Trainings/Healthcare/Spark_NLP_Clinical_NER_Playground_Streamlit_app.ipynb
|
Tommyhappy01/6-SPARK_NLP
|
4081b67c1d7f857ba1853c001293917d30fe9c35
|
[
"Apache-2.0"
] | null | null | null | 59.403587 | 7,053 | 0.753227 |
[
[
[
"",
"_____no_output_____"
],
[
"[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/Spark_NLP_Clinical_NER_Playground_Streamlit_app.ipynb)",
"_____no_output_____"
],
[
"# Spark NLP Clinical NER Playground - Streamlit app\n\n",
"_____no_output_____"
],
[
"### run the following the cell and upload your license keys",
"_____no_output_____"
]
],
[
[
"import json\n\nfrom google.colab import files\n\nlicense_keys = files.upload()\n\nwith open(list(license_keys.keys())[0]) as f:\n license_keys = json.load(f)",
"_____no_output_____"
]
],
[
[
"### run the following cells and click on the ***ngrok url*** printed at the end (it may take a few minutes to load everything). It will open up a new tab on your browser to let you start play with the stremalit app.",
"_____no_output_____"
]
],
[
[
"%%capture\n\nfor k,v in license_keys.items(): \n %set_env $k=$v\n\n!wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jsl_colab_setup.sh\n!bash jsl_colab_setup.sh\n\n! pip install spark-nlp-display\n! pip install streamlit\n! pip install pyngrok\n",
"_____no_output_____"
],
[
"!wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/streamlit_notebooks/healthcare/sparknlp_ner_playground.py\n\n!streamlit run /content/sparknlp_ner_playground.py&>/dev/null&\n\nimport time\n\ntime.sleep(3)\n\nfrom pyngrok import ngrok\n\npublic_url = ngrok.connect(addr='8501')\n\nprint (public_url)",
"_____no_output_____"
]
],
[
[
"### if you want to kill the ngrok session and restart",
"_____no_output_____"
]
],
[
[
"!killall ngrok\n\npublic_url = ngrok.connect(addr='8501')\n\nprint (public_url)",
"NgrokTunnel: \"http://35b72624af3c.ngrok.io\" -> \"http://localhost:8501\"\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cbba23772638e14b0e46c5ffe7cf46a6a85fdf97
| 137,434 |
ipynb
|
Jupyter Notebook
|
RBF/rbf_test.ipynb
|
v-i-s-h/dl-vi-comm
|
80032588a5e6e13bdc397ce9bd51fed73a6045bf
|
[
"MIT"
] | 4 |
2020-07-30T10:45:03.000Z
|
2022-01-21T01:03:53.000Z
|
RBF/rbf_test.ipynb
|
v-i-s-h/dl-vi-comm
|
80032588a5e6e13bdc397ce9bd51fed73a6045bf
|
[
"MIT"
] | null | null | null |
RBF/rbf_test.ipynb
|
v-i-s-h/dl-vi-comm
|
80032588a5e6e13bdc397ce9bd51fed73a6045bf
|
[
"MIT"
] | 1 |
2021-02-26T10:55:27.000Z
|
2021-02-26T10:55:27.000Z
| 164.002387 | 100,508 | 0.829344 |
[
[
[
"# Simulation of BLER in RBF channel",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pickle\nfrom itertools import cycle, product\nimport dill\n\nimport matplotlib.pyplot as plt\nfrom scipy.spatial.distance import cdist",
"_____no_output_____"
]
],
[
[
"Simulation Configuration",
"_____no_output_____"
]
],
[
[
"blkSize = 8\nchDim = 4\n\n# Input\ninVecDim = 2 ** blkSize # 1-hot vector length for block\nencDim = 2*chDim\n\nSNR_range_dB = np.arange( 0.0, 40.1, 2.0 )",
"_____no_output_____"
]
],
[
[
"Simulation Utility functions",
"_____no_output_____"
]
],
[
[
"def rbf_channel(txBlk, n0):\n N, dim = txBlk.shape\n \n p1 = txBlk[:,:]\n p2 = np.hstack((-txBlk[:,dim//2:],txBlk[:,:dim//2]))\n \n h1 = np.sqrt(1.0/encDim) * np.random.randn(N)\n h2 = np.sqrt(1.0/encDim) * np.random.randn(N)\n \n outBlk = h1[:,None]*p1 + h2[:,None]*p2 + np.random.normal(scale=np.sqrt(n0), size=txBlk.shape)\n \n return outBlk",
"_____no_output_____"
],
[
"def add_pilot(txBlk, pilotSym):\n blkWithPilot = np.insert(txBlk, [0,chDim], values=pilotSym, axis=1)\n \n return blkWithPilot",
"_____no_output_____"
],
[
"def equalizer(rxBlk, pilotSym):\n \n N, dim = rxBlk.shape\n \n p1 = rxBlk[:,:]\n p2 = np.hstack((-rxBlk[:,dim//2:],rxBlk[:,:dim//2]))\n \n rxPilots = rxBlk[:,[0,1+chDim]]\n \n h1_hat = (pilotSym[1]*rxPilots[:,1]+pilotSym[0]*rxPilots[:,0])/(pilotSym[1]**2+pilotSym[0]**2)\n h2_hat = (pilotSym[0]*rxPilots[:,1]-pilotSym[1]*rxPilots[:,0])/(pilotSym[1]**2+pilotSym[0]**2)\n \n z1_hat = rxBlk[:,:dim//2]\n z2_hat = rxBlk[:,dim//2:]\n \n zR = (h1_hat[:,None]*z1_hat+h2_hat[:,None]*z2_hat) / (h1_hat[:,None]**2+h2_hat[:,None]**2)\n zI = (h1_hat[:,None]*z2_hat-h2_hat[:,None]*z1_hat) / (h1_hat[:,None]**2+h2_hat[:,None]**2)\n \n outBlk = np.hstack((zR[:,1:],zI[:,1:]))\n \n return outBlk",
"_____no_output_____"
]
],
[
[
"To store results",
"_____no_output_____"
]
],
[
[
"results = {}",
"_____no_output_____"
]
],
[
[
"## QAM System",
"_____no_output_____"
]
],
[
[
"qam_map_unscaled = np.array(list(map(list, product([-1.0, +1.0], repeat=blkSize))))\nqam_sym_pow_unscaled = np.mean(np.sum(qam_map_unscaled*qam_map_unscaled,axis=1))\nprint( \"Unscaled QAM Block Avg. Tx Power:\", qam_sym_pow_unscaled )",
"Unscaled QAM Block Avg. Tx Power: 8.0\n"
]
],
[
[
"### Block Symbol power scaled to block length\nHere the whole symbol block is scaled such a way that the power of the whole block is equal to the block length ie., poewr per dimension is $1$.",
"_____no_output_____"
]
],
[
[
"qam_map_norm = np.sqrt(blkSize/qam_sym_pow_unscaled) * qam_map_unscaled\nqam_sym_pow_norm = np.mean(np.sum(qam_map_norm*qam_map_norm,axis=1))\nprint(\"Normalized to block length QAM Avg. Tx Power:\", qam_sym_pow_norm)\n\n# calculate the pilot symbol\nqam_pilot_sym_norm = np.sqrt(qam_sym_pow_norm/encDim) * np.ones(2)\nprint(\"Pilot Signal :\", qam_pilot_sym_norm)",
"Normalized to block length QAM Avg. Tx Power: 8.0\nPilot Signal : [1. 1.]\n"
]
],
[
[
"The noise target SNR is assumed to affect the whoel block. So the noise power per component is calculated and the noise is added accordingly.",
"_____no_output_____"
]
],
[
[
"noisePower = qam_sym_pow_norm * 10.0**(-SNR_range_dB/10.0)\nn0_per_comp = noisePower/(2*chDim+2)\n\nerr = []\nfor n0 in n0_per_comp:\n thisErr = 0\n thisCount = 0\n while thisErr < 500 and thisCount < 100000:\n txSym = np.random.randint(inVecDim, size=1000)\n symBlk = qam_map_norm[txSym]\n txTest = add_pilot(symBlk, qam_pilot_sym_norm)\n rxTest = rbf_channel(txTest, n0)\n rxEqualized = equalizer(rxTest, qam_pilot_sym_norm)\n rxDecode = cdist(rxEqualized, qam_map_norm)\n rxSym = np.argmin(rxDecode,axis=1)\n thisErr += np.sum(rxSym!=txSym)\n thisCount += 1000\n err.append(thisErr/thisCount)\nresults[\"QAM (Block Power = ${:.1f}$, Pilot = ({:.2f},{:.2f}))\".format(qam_sym_pow_norm, *qam_pilot_sym_norm)] = np.array(err)",
"_____no_output_____"
]
],
[
[
"### Block Symbol Power scaled to unit power",
"_____no_output_____"
]
],
[
[
"qam_map_unit = np.sqrt(1.0/qam_sym_pow_unscaled) * qam_map_unscaled\nqam_sym_pow_unit = np.mean(np.sum(qam_map_unit*qam_map_unit,axis=1))\nprint( \"Normalized to block length QAM Avg. Tx Power:\", qam_sym_pow_unit )\n\n# calculate the pilot symbol\nqam_pilot_sym_unit = np.sqrt(qam_sym_pow_unit/encDim) * np.ones(2)",
"Normalized to block length QAM Avg. Tx Power: 1.0000000000000002\n"
],
[
"noisePower = qam_sym_pow_unit * 10.0**(-SNR_range_dB/10.0)\nn0_per_comp = noisePower/(2*chDim+2)\n\nerr = []\nfor n0 in n0_per_comp:\n thisErr = 0\n thisCount = 0\n while thisErr < 500 and thisCount < 100000:\n txSym = np.random.randint(inVecDim, size=1000)\n symBlk = qam_map_unit[txSym]\n txTest = add_pilot(symBlk, qam_pilot_sym_unit)\n rxTest = rbf_channel(txTest, n0)\n rxEqualized = equalizer(rxTest, qam_pilot_sym_unit)\n rxDecode = cdist(rxEqualized, qam_map_unit)\n rxSym = np.argmin(rxDecode,axis=1)\n thisErr += np.sum(rxSym!=txSym)\n thisCount += 1000\n err.append(thisErr/thisCount)\nresults[\"QAM (Block Power = ${:.1f}$, Pilot = ({:.2f},{:.2f}))\".format(qam_sym_pow_unit, *qam_pilot_sym_unit)] = np.array(err)",
"_____no_output_____"
]
],
[
[
"### Block Symbol Power is scaled to unit power and Pilot power is $1$ per component\nIn this case, the power of pilots are high and hence they will may experience a high SNR than rest of the block.",
"_____no_output_____"
]
],
[
[
"qam_pilot_sym_1 = np.ones(2)\n\nnoisePower = qam_sym_pow_unit * 10.0**(-SNR_range_dB/10.0)\nn0_per_comp = noisePower/(2*chDim+2)\n\nerr = []\nfor n0 in n0_per_comp:\n thisErr = 0\n thisCount = 0\n while thisErr < 500 and thisCount < 100000:\n txSym = np.random.randint(inVecDim, size=1000)\n symBlk = qam_map_unit[txSym]\n txTest = add_pilot(symBlk, qam_pilot_sym_1)\n rxTest = rbf_channel(txTest, n0)\n rxEqualized = equalizer(rxTest, qam_pilot_sym_1)\n rxDecode = cdist(rxEqualized, qam_map_unit)\n rxSym = np.argmin(rxDecode,axis=1)\n thisErr += np.sum(rxSym!=txSym)\n thisCount += 1000\n err.append(thisErr/thisCount)\nresults[\"QAM (Block Power = ${:.1f}$, Pilot = ({:.2f},{:.2f}))\".format(qam_sym_pow_unit, *qam_pilot_sym_1)] = np.array(err)",
"_____no_output_____"
]
],
[
[
"## Agrell",
"_____no_output_____"
]
],
[
[
"agrell_map_unscaled = []\nif blkSize==2 and chDim==1:\n agrell_map_unscaled = np.array([\n [ -1.0, -1.0 ],\n [ -1.0, 1.0 ],\n [ 1.0, -1.0 ],\n [ 1.0, 1.0 ]\n ])\nelif blkSize==4 and chDim==2:\n agrell_map_unscaled = np.array([\n [2.148934030042627, 0.0, 0.0, 0.0],\n [0.7347204676695321, 1.4142135623730951, 0.0, 0.0],\n [0.7347204676695321, -1.4142135623730951, 0.0, 0.0],\n [0.7347204676695321, 0.0, 1.4142135623730951, 0.0],\n [0.7347204676695321, 0.0, -1.4142135623730951, 0.0],\n [0.7347204676695321, 0.0, 0.0, 1.4142135623730951],\n [0.7347204676695321, 0.0, 0.0, -1.4142135623730951],\n [-0.6174729817844246, 1.0, 1.0, 1.0],\n [-0.6174729817844246, 1.0, 1.0, -1.0],\n [-0.6174729817844246, 1.0, -1.0, 1.0],\n [-0.6174729817844246, 1.0, -1.0, -1.0],\n [-0.6174729817844246, -1.0, 1.0, 1.0],\n [-0.6174729817844246, -1.0, 1.0, -1.0],\n [-0.6174729817844246, -1.0, -1.0, 1.0],\n [-0.6174729817844246, -1.0, -1.0, -1.0],\n [-1.6174729817844242, 0.0, 0.0, 0.0]\n ])\nelif blkSize==8 and chDim==4:\n agrell_map_unscaled = np.array([\n [ -256.0, -256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ -256.0, 0.0, -248.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ -256.0, 0.0, 8.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],\n [ -256.0, 0.0, 8.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],\n [ -256.0, 0.0, 8.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],\n [ -256.0, 0.0, 8.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],\n [ -256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],\n [ -256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],\n [ -256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],\n [ -256.0, 0.0, 8.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],\n [ -256.0, 0.0, 8.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],\n [ -256.0, 0.0, 8.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],\n [ -256.0, 0.0, 264.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ -256.0, 256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ -128.0, -128.0, -120.0, -136.0, -136.0, -114.0, -117.0, -139.0 ],\n [ -128.0, -128.0, -120.0, -136.0, -136.0, -114.0, 139.0, 117.0 ],\n [ -128.0, -128.0, -120.0, -136.0, -136.0, 142.0, -117.0, 117.0 ],\n [ -128.0, -128.0, -120.0, -136.0, -136.0, 142.0, 139.0, -139.0 ],\n [ -128.0, -128.0, -120.0, -136.0, 120.0, -114.0, -117.0, 117.0 ],\n [ -128.0, -128.0, -120.0, -136.0, 120.0, -114.0, 139.0, -139.0 ],\n [ -128.0, -128.0, -120.0, -136.0, 120.0, 142.0, -117.0, -139.0 ],\n [ -128.0, -128.0, -120.0, -136.0, 120.0, 142.0, 139.0, 117.0 ],\n [ -128.0, -128.0, -120.0, 120.0, -136.0, -114.0, -117.0, 117.0 ],\n [ -128.0, -128.0, -120.0, 120.0, -136.0, -114.0, 139.0, -139.0 ],\n [ -128.0, -128.0, -120.0, 120.0, -136.0, 142.0, -117.0, -139.0 ],\n [ -128.0, -128.0, -120.0, 120.0, -136.0, 142.0, 139.0, 117.0 ],\n [ -128.0, -128.0, -120.0, 120.0, 120.0, -370.0, -117.0, 117.0 ],\n [ -128.0, -128.0, -120.0, 120.0, 120.0, -114.0, -373.0, 117.0 ],\n [ -128.0, -128.0, -120.0, 120.0, 120.0, -114.0, -117.0, -139.0 ],\n [ -128.0, -128.0, -120.0, 120.0, 120.0, -114.0, -117.0, 373.0 ],\n [ -128.0, -128.0, -120.0, 120.0, 120.0, -114.0, 139.0, 117.0 ],\n [ -128.0, -128.0, -120.0, 120.0, 120.0, 142.0, -117.0, 117.0 ],\n [ -128.0, -128.0, -120.0, 120.0, 120.0, 142.0, 139.0, -139.0 ],\n [ -128.0, -128.0, 136.0, -136.0, -136.0, -114.0, -117.0, 117.0 ],\n [ -128.0, -128.0, 136.0, -136.0, -136.0, -114.0, 139.0, -139.0 ],\n [ -128.0, -128.0, 136.0, -136.0, -136.0, 142.0, -117.0, -139.0 ],\n [ -128.0, -128.0, 136.0, -136.0, -136.0, 142.0, 139.0, 117.0 ],\n [ -128.0, -128.0, 136.0, -136.0, 120.0, -114.0, -117.0, -139.0 ],\n [ -128.0, -128.0, 136.0, -136.0, 120.0, -114.0, 139.0, 117.0 ],\n [ -128.0, -128.0, 136.0, -136.0, 120.0, 142.0, -117.0, 117.0 ],\n [ -128.0, -128.0, 136.0, -136.0, 120.0, 142.0, 139.0, -139.0 ],\n [ -128.0, -128.0, 136.0, 120.0, -136.0, -114.0, -117.0, -139.0 ],\n [ -128.0, -128.0, 136.0, 120.0, -136.0, -114.0, 139.0, 117.0 ],\n [ -128.0, -128.0, 136.0, 120.0, -136.0, 142.0, -117.0, 117.0 ],\n [ -128.0, -128.0, 136.0, 120.0, -136.0, 142.0, 139.0, -139.0 ],\n [ -128.0, -128.0, 136.0, 120.0, 120.0, -114.0, -117.0, 117.0 ],\n [ -128.0, -128.0, 136.0, 120.0, 120.0, -114.0, 139.0, -139.0 ],\n [ -128.0, -128.0, 136.0, 120.0, 120.0, 142.0, -117.0, -139.0 ],\n [ -128.0, -128.0, 136.0, 120.0, 120.0, 142.0, 139.0, 117.0 ],\n [ -128.0, 128.0, -120.0, -136.0, -136.0, -114.0, -117.0, 117.0 ],\n [ -128.0, 128.0, -120.0, -136.0, -136.0, -114.0, 139.0, -139.0 ],\n [ -128.0, 128.0, -120.0, -136.0, -136.0, 142.0, -117.0, -139.0 ],\n [ -128.0, 128.0, -120.0, -136.0, -136.0, 142.0, 139.0, 117.0 ],\n [ -128.0, 128.0, -120.0, -136.0, 120.0, -114.0, -117.0, -139.0 ],\n [ -128.0, 128.0, -120.0, -136.0, 120.0, -114.0, 139.0, 117.0 ],\n [ -128.0, 128.0, -120.0, -136.0, 120.0, 142.0, -117.0, 117.0 ],\n [ -128.0, 128.0, -120.0, -136.0, 120.0, 142.0, 139.0, -139.0 ],\n [ -128.0, 128.0, -120.0, 120.0, -136.0, -114.0, -117.0, -139.0 ],\n [ -128.0, 128.0, -120.0, 120.0, -136.0, -114.0, 139.0, 117.0 ],\n [ -128.0, 128.0, -120.0, 120.0, -136.0, 142.0, -117.0, 117.0 ],\n [ -128.0, 128.0, -120.0, 120.0, -136.0, 142.0, 139.0, -139.0 ],\n [ -128.0, 128.0, -120.0, 120.0, 120.0, -114.0, -117.0, 117.0 ],\n [ -128.0, 128.0, -120.0, 120.0, 120.0, -114.0, 139.0, -139.0 ],\n [ -128.0, 128.0, -120.0, 120.0, 120.0, 142.0, -117.0, -139.0 ],\n [ -128.0, 128.0, -120.0, 120.0, 120.0, 142.0, 139.0, 117.0 ],\n [ -128.0, 128.0, 136.0, -136.0, -136.0, -114.0, -117.0, -139.0 ],\n [ -128.0, 128.0, 136.0, -136.0, -136.0, -114.0, 139.0, 117.0 ],\n [ -128.0, 128.0, 136.0, -136.0, -136.0, 142.0, -117.0, 117.0 ],\n [ -128.0, 128.0, 136.0, -136.0, -136.0, 142.0, 139.0, -139.0 ],\n [ -128.0, 128.0, 136.0, -136.0, 120.0, -114.0, -117.0, 117.0 ],\n [ -128.0, 128.0, 136.0, -136.0, 120.0, -114.0, 139.0, -139.0 ],\n [ -128.0, 128.0, 136.0, -136.0, 120.0, 142.0, -117.0, -139.0 ],\n [ -128.0, 128.0, 136.0, -136.0, 120.0, 142.0, 139.0, 117.0 ],\n [ -128.0, 128.0, 136.0, 120.0, -136.0, -114.0, -117.0, 117.0 ],\n [ -128.0, 128.0, 136.0, 120.0, -136.0, -114.0, 139.0, -139.0 ],\n [ -128.0, 128.0, 136.0, 120.0, -136.0, 142.0, -117.0, -139.0 ],\n [ -128.0, 128.0, 136.0, 120.0, -136.0, 142.0, 139.0, 117.0 ],\n [ -128.0, 128.0, 136.0, 120.0, 120.0, -114.0, -117.0, -139.0 ],\n [ -128.0, 128.0, 136.0, 120.0, 120.0, -114.0, 139.0, 117.0 ],\n [ -128.0, 128.0, 136.0, 120.0, 120.0, 142.0, -117.0, 117.0 ],\n [ -128.0, 128.0, 136.0, 120.0, 120.0, 142.0, 139.0, -139.0 ],\n [ 0.0, -256.0, -248.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, -256.0, 8.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, -256.0, 8.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],\n [ 0.0, -256.0, 8.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],\n [ 0.0, -256.0, 8.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],\n [ 0.0, -256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],\n [ 0.0, -256.0, 8.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],\n [ 0.0, -256.0, 8.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],\n [ 0.0, -256.0, 8.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],\n [ 0.0, -256.0, 8.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],\n [ 0.0, -256.0, 8.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, -256.0, 264.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, -248.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, -248.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, -248.0, -8.0, -8.0, -242.0, -245.0, 245.0 ],\n [ 0.0, 0.0, -248.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],\n [ 0.0, 0.0, -248.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],\n [ 0.0, 0.0, -248.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],\n [ 0.0, 0.0, -248.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],\n [ 0.0, 0.0, -248.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],\n [ 0.0, 0.0, -248.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],\n [ 0.0, 0.0, -248.0, -8.0, 248.0, -242.0, -245.0, -11.0 ],\n [ 0.0, 0.0, -248.0, -8.0, 248.0, -242.0, 11.0, 245.0 ],\n [ 0.0, 0.0, -248.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, -248.0, 248.0, -8.0, -242.0, -245.0, -11.0 ],\n [ 0.0, 0.0, -248.0, 248.0, -8.0, -242.0, 11.0, 245.0 ],\n [ 0.0, 0.0, -248.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -264.0, -264.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -264.0, -8.0, -242.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -264.0, -8.0, 14.0, -245.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -264.0, -8.0, 14.0, 11.0, -267.0 ],\n [ 0.0, 0.0, 8.0, -264.0, -8.0, 14.0, 11.0, 245.0 ],\n [ 0.0, 0.0, 8.0, -264.0, -8.0, 14.0, 267.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -264.0, -8.0, 270.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -264.0, 248.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -264.0, -242.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -264.0, 14.0, -245.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -264.0, 14.0, 11.0, -267.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -264.0, 14.0, 11.0, 245.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -264.0, 14.0, 267.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -264.0, 270.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, -242.0, -245.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, -242.0, 11.0, -267.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, -242.0, 11.0, 245.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, -242.0, 267.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 14.0, -245.0, -267.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 14.0, -245.0, 245.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 14.0, 267.0, -267.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 14.0, 267.0, 245.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 270.0, -245.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 270.0, 11.0, -267.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 270.0, 11.0, 245.0 ],\n [ 0.0, 0.0, 8.0, -8.0, -8.0, 270.0, 267.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, 248.0, -242.0, -245.0, 245.0 ],\n [ 0.0, 0.0, 8.0, -8.0, 248.0, -242.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, 248.0, 14.0, -245.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, 248.0, 14.0, 11.0, -267.0 ],\n [ 0.0, 0.0, 8.0, -8.0, 248.0, 14.0, 11.0, 245.0 ],\n [ 0.0, 0.0, 8.0, -8.0, 248.0, 14.0, 267.0, -11.0 ],\n [ 0.0, 0.0, 8.0, -8.0, 248.0, 270.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, 248.0, -264.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, 248.0, -8.0, -242.0, -245.0, 245.0 ],\n [ 0.0, 0.0, 8.0, 248.0, -8.0, -242.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, 248.0, -8.0, 14.0, -245.0, -11.0 ],\n [ 0.0, 0.0, 8.0, 248.0, -8.0, 14.0, 11.0, -267.0 ],\n [ 0.0, 0.0, 8.0, 248.0, -8.0, 14.0, 11.0, 245.0 ],\n [ 0.0, 0.0, 8.0, 248.0, -8.0, 14.0, 267.0, -11.0 ],\n [ 0.0, 0.0, 8.0, 248.0, -8.0, 270.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 8.0, 248.0, 248.0, -242.0, -245.0, -11.0 ],\n [ 0.0, 0.0, 8.0, 248.0, 248.0, -242.0, 11.0, 245.0 ],\n [ 0.0, 0.0, 8.0, 248.0, 248.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 264.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 264.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 264.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 264.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],\n [ 0.0, 0.0, 264.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],\n [ 0.0, 0.0, 264.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],\n [ 0.0, 0.0, 264.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],\n [ 0.0, 0.0, 264.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 264.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 0.0, 264.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 256.0, -248.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 256.0, 8.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 256.0, 8.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 256.0, 8.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],\n [ 0.0, 256.0, 8.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],\n [ 0.0, 256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],\n [ 0.0, 256.0, 8.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],\n [ 0.0, 256.0, 8.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],\n [ 0.0, 256.0, 8.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],\n [ 0.0, 256.0, 8.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 256.0, 8.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 0.0, 256.0, 264.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 128.0, -128.0, -120.0, -136.0, -136.0, -114.0, -117.0, 117.0 ],\n [ 128.0, -128.0, -120.0, -136.0, -136.0, -114.0, 139.0, -139.0 ],\n [ 128.0, -128.0, -120.0, -136.0, -136.0, 142.0, -117.0, -139.0 ],\n [ 128.0, -128.0, -120.0, -136.0, -136.0, 142.0, 139.0, 117.0 ],\n [ 128.0, -128.0, -120.0, -136.0, 120.0, -114.0, -117.0, -139.0 ],\n [ 128.0, -128.0, -120.0, -136.0, 120.0, -114.0, 139.0, 117.0 ],\n [ 128.0, -128.0, -120.0, -136.0, 120.0, 142.0, -117.0, 117.0 ],\n [ 128.0, -128.0, -120.0, -136.0, 120.0, 142.0, 139.0, -139.0 ],\n [ 128.0, -128.0, -120.0, 120.0, -136.0, -114.0, -117.0, -139.0 ],\n [ 128.0, -128.0, -120.0, 120.0, -136.0, -114.0, 139.0, 117.0 ],\n [ 128.0, -128.0, -120.0, 120.0, -136.0, 142.0, -117.0, 117.0 ],\n [ 128.0, -128.0, -120.0, 120.0, -136.0, 142.0, 139.0, -139.0 ],\n [ 128.0, -128.0, -120.0, 120.0, 120.0, -114.0, -117.0, 117.0 ],\n [ 128.0, -128.0, -120.0, 120.0, 120.0, -114.0, 139.0, -139.0 ],\n [ 128.0, -128.0, -120.0, 120.0, 120.0, 142.0, -117.0, -139.0 ],\n [ 128.0, -128.0, -120.0, 120.0, 120.0, 142.0, 139.0, 117.0 ],\n [ 128.0, -128.0, 136.0, -136.0, -136.0, -114.0, -117.0, -139.0 ],\n [ 128.0, -128.0, 136.0, -136.0, -136.0, -114.0, 139.0, 117.0 ],\n [ 128.0, -128.0, 136.0, -136.0, -136.0, 142.0, -117.0, 117.0 ],\n [ 128.0, -128.0, 136.0, -136.0, -136.0, 142.0, 139.0, -139.0 ],\n [ 128.0, -128.0, 136.0, -136.0, 120.0, -114.0, -117.0, 117.0 ],\n [ 128.0, -128.0, 136.0, -136.0, 120.0, -114.0, 139.0, -139.0 ],\n [ 128.0, -128.0, 136.0, -136.0, 120.0, 142.0, -117.0, -139.0 ],\n [ 128.0, -128.0, 136.0, -136.0, 120.0, 142.0, 139.0, 117.0 ],\n [ 128.0, -128.0, 136.0, 120.0, -136.0, -114.0, -117.0, 117.0 ],\n [ 128.0, -128.0, 136.0, 120.0, -136.0, -114.0, 139.0, -139.0 ],\n [ 128.0, -128.0, 136.0, 120.0, -136.0, 142.0, -117.0, -139.0 ],\n [ 128.0, -128.0, 136.0, 120.0, -136.0, 142.0, 139.0, 117.0 ],\n [ 128.0, -128.0, 136.0, 120.0, 120.0, -114.0, -117.0, -139.0 ],\n [ 128.0, -128.0, 136.0, 120.0, 120.0, -114.0, 139.0, 117.0 ],\n [ 128.0, -128.0, 136.0, 120.0, 120.0, 142.0, -117.0, 117.0 ],\n [ 128.0, -128.0, 136.0, 120.0, 120.0, 142.0, 139.0, -139.0 ],\n [ 128.0, 128.0, -120.0, -136.0, -136.0, -114.0, -117.0, -139.0 ],\n [ 128.0, 128.0, -120.0, -136.0, -136.0, -114.0, 139.0, 117.0 ],\n [ 128.0, 128.0, -120.0, -136.0, -136.0, 142.0, -117.0, 117.0 ],\n [ 128.0, 128.0, -120.0, -136.0, -136.0, 142.0, 139.0, -139.0 ],\n [ 128.0, 128.0, -120.0, -136.0, 120.0, -114.0, -117.0, 117.0 ],\n [ 128.0, 128.0, -120.0, -136.0, 120.0, -114.0, 139.0, -139.0 ],\n [ 128.0, 128.0, -120.0, -136.0, 120.0, 142.0, -117.0, -139.0 ],\n [ 128.0, 128.0, -120.0, -136.0, 120.0, 142.0, 139.0, 117.0 ],\n [ 128.0, 128.0, -120.0, 120.0, -136.0, -114.0, -117.0, 117.0 ],\n [ 128.0, 128.0, -120.0, 120.0, -136.0, -114.0, 139.0, -139.0 ],\n [ 128.0, 128.0, -120.0, 120.0, -136.0, 142.0, -117.0, -139.0 ],\n [ 128.0, 128.0, -120.0, 120.0, -136.0, 142.0, 139.0, 117.0 ],\n [ 128.0, 128.0, -120.0, 120.0, 120.0, -370.0, -117.0, 117.0 ],\n [ 128.0, 128.0, -120.0, 120.0, 120.0, -114.0, -373.0, 117.0 ],\n [ 128.0, 128.0, -120.0, 120.0, 120.0, -114.0, -117.0, -139.0 ],\n [ 128.0, 128.0, -120.0, 120.0, 120.0, -114.0, -117.0, 373.0 ],\n [ 128.0, 128.0, -120.0, 120.0, 120.0, -114.0, 139.0, 117.0 ],\n [ 128.0, 128.0, -120.0, 120.0, 120.0, 142.0, -117.0, 117.0 ],\n [ 128.0, 128.0, -120.0, 120.0, 120.0, 142.0, 139.0, -139.0 ],\n [ 128.0, 128.0, 136.0, -136.0, -136.0, -114.0, -117.0, 117.0 ],\n [ 128.0, 128.0, 136.0, -136.0, -136.0, -114.0, 139.0, -139.0 ],\n [ 128.0, 128.0, 136.0, -136.0, -136.0, 142.0, -117.0, -139.0 ],\n [ 128.0, 128.0, 136.0, -136.0, -136.0, 142.0, 139.0, 117.0 ],\n [ 128.0, 128.0, 136.0, -136.0, 120.0, -114.0, -117.0, -139.0 ],\n [ 128.0, 128.0, 136.0, -136.0, 120.0, -114.0, 139.0, 117.0 ],\n [ 128.0, 128.0, 136.0, -136.0, 120.0, 142.0, -117.0, 117.0 ],\n [ 128.0, 128.0, 136.0, -136.0, 120.0, 142.0, 139.0, -139.0 ],\n [ 128.0, 128.0, 136.0, 120.0, -136.0, -114.0, -117.0, -139.0 ],\n [ 128.0, 128.0, 136.0, 120.0, -136.0, -114.0, 139.0, 117.0 ],\n [ 128.0, 128.0, 136.0, 120.0, -136.0, 142.0, -117.0, 117.0 ],\n [ 128.0, 128.0, 136.0, 120.0, -136.0, 142.0, 139.0, -139.0 ],\n [ 128.0, 128.0, 136.0, 120.0, 120.0, -114.0, -117.0, 117.0 ],\n [ 128.0, 128.0, 136.0, 120.0, 120.0, -114.0, 139.0, -139.0 ],\n [ 128.0, 128.0, 136.0, 120.0, 120.0, 142.0, -117.0, -139.0 ],\n [ 128.0, 128.0, 136.0, 120.0, 120.0, 142.0, 139.0, 117.0 ],\n [ 256.0, -256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 256.0, 0.0, -248.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 256.0, 0.0, 8.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 256.0, 0.0, 8.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],\n [ 256.0, 0.0, 8.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],\n [ 256.0, 0.0, 8.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],\n [ 256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],\n [ 256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],\n [ 256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],\n [ 256.0, 0.0, 8.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],\n [ 256.0, 0.0, 8.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],\n [ 256.0, 0.0, 8.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 256.0, 0.0, 264.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],\n [ 256.0, 256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -11.0 ] ])\nelse:\n raise NotImplementedError(\"Not implemented (blkSize={},chDim={})\".format(blkSize,chDim))",
"_____no_output_____"
]
],
[
[
"### Block Symbol Power scaled to block length",
"_____no_output_____"
]
],
[
[
"agrell_sym_pow_unscaled = np.mean(np.sum(agrell_map_unscaled*agrell_map_unscaled,axis=1))\nprint( \"Unscaled Agrell Avg. Tx Power:\", agrell_sym_pow_unscaled )\n\nagrell_map_norm = np.sqrt(blkSize/agrell_sym_pow_unscaled) * agrell_map_unscaled\nagrell_sym_pow_norm = np.mean(np.sum(agrell_map_norm*agrell_map_norm,axis=1))\nprint( \"Normalized Agrell Avg. Tx Power:\", agrell_sym_pow_norm )\n\n# calculate the pilot symbol\nagrell_pilot_sym_norm = np.sqrt(agrell_sym_pow_norm/encDim) * np.ones(2)",
"Unscaled Agrell Avg. Tx Power: 137610.0\nNormalized Agrell Avg. Tx Power: 7.999999999999997\n"
],
[
"noisePower = agrell_sym_pow_norm * 10.0**(-SNR_range_dB/10.0)\nn0_per_comp = noisePower/(2*chDim+2)\n\nerr = []\nfor n0 in n0_per_comp:\n thisErr = 0\n thisCount = 0\n while thisErr < 500 and thisCount < 100000:\n txSym = np.random.randint(inVecDim, size=1000)\n symBlk = agrell_map_norm[txSym]\n txTest = add_pilot(symBlk, agrell_pilot_sym_norm)\n rxTest = rbf_channel(txTest, n0)\n rxEqualized = equalizer(rxTest, agrell_pilot_sym_norm)\n rxDecode = cdist(rxEqualized, agrell_map_norm)\n rxSym = np.argmin(rxDecode,axis=1)\n thisErr += np.sum(rxSym!=txSym)\n thisCount += 1000\n err.append(thisErr/thisCount)\nresults[\"Agrell (Block Power = ${:.1f}$, Pilot = ({:.2f},{:.2f}))\".format(agrell_sym_pow_norm, *agrell_pilot_sym_norm)] = np.array(err)",
"_____no_output_____"
]
],
[
[
"### Block Symbol Power scaled to unit power",
"_____no_output_____"
]
],
[
[
"agrell_sym_pow_unscaled = np.mean(np.sum(agrell_map_unscaled*agrell_map_unscaled,axis=1))\nprint( \"Unscaled Agrell Avg. Tx Power:\", agrell_sym_pow_unscaled )\n\nagrell_map_unit = np.sqrt(1.0/agrell_sym_pow_unscaled) * agrell_map_unscaled\nagrell_sym_pow_unit = np.mean(np.sum(agrell_map_unit*agrell_map_unit,axis=1))\nprint( \"Normalized Agrell Avg. Tx Power:\", agrell_sym_pow_unit )\n\n# calculate the pilot symbol\nagrell_pilot_sym_unit = np.sqrt(agrell_sym_pow_unit/encDim) * np.ones(2)",
"Unscaled Agrell Avg. Tx Power: 137610.0\nNormalized Agrell Avg. Tx Power: 1.0000000000000004\n"
],
[
"noisePower = agrell_sym_pow_unit * 10.0**(-SNR_range_dB/10.0)\nn0_per_comp = noisePower/(2*chDim+2)\n\nerr = []\nfor n0 in n0_per_comp:\n thisErr = 0\n thisCount = 0\n while thisErr < 500 and thisCount < 100000:\n txSym = np.random.randint(inVecDim, size=1000)\n symBlk = agrell_map_unit[txSym]\n txTest = add_pilot(symBlk, agrell_pilot_sym_unit)\n rxTest = rbf_channel(txTest, n0)\n rxEqualized = equalizer(rxTest, agrell_pilot_sym_unit)\n rxDecode = cdist(rxEqualized, agrell_map_unit)\n rxSym = np.argmin(rxDecode,axis=1)\n thisErr += np.sum(rxSym!=txSym)\n thisCount += 1000\n err.append(thisErr/thisCount)\nresults[\"Agrell (Block Power = ${:.1f}$, Pilot = ({:.2f},{:.2f}))\".format(agrell_sym_pow_unit, *agrell_pilot_sym_unit)] = np.array(err)",
"_____no_output_____"
]
],
[
[
"### Block Symbol Power is scaled to unit power and Pilot power is $1$ per component",
"_____no_output_____"
]
],
[
[
"agrell_pilot_sym_1 = np.ones(2)\n\nnoisePower = agrell_sym_pow_unit * 10.0**(-SNR_range_dB/10.0)\nn0_per_comp = noisePower/(2*chDim+2)\n\nerr = []\nfor n0 in n0_per_comp:\n thisErr = 0\n thisCount = 0\n while thisErr < 500 and thisCount < 100000:\n txSym = np.random.randint(inVecDim, size=1000)\n symBlk = agrell_map_unit[txSym]\n txTest = add_pilot(symBlk, agrell_pilot_sym_1)\n rxTest = rbf_channel(txTest, n0)\n rxEqualized = equalizer(rxTest, agrell_pilot_sym_1)\n rxDecode = cdist(rxEqualized, agrell_map_unit)\n rxSym = np.argmin(rxDecode,axis=1)\n thisErr += np.sum(rxSym!=txSym)\n thisCount += 1000\n err.append(thisErr/thisCount)\nresults[\"Agrell (Block Power = ${:.1f}$, Pilot = ({:.2f},{:.2f}))\".format(agrell_sym_pow_unit, *agrell_pilot_sym_1)] = np.array(err)",
"_____no_output_____"
]
],
[
[
"## Plot results",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(12,9))\nfor (l,v) in results.items():\n plt.semilogy(SNR_range_dB, v, label=l, linewidth=2)\nplt.legend(loc=\"lower left\", prop={'size':14})\nplt.grid()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cbba4a3425c01074c9a420176020a0d0c228e6e7
| 25,035 |
ipynb
|
Jupyter Notebook
|
ai-platform-unified/notebooks/unofficial/pipelines/google-cloud-pipeline-components_automl_images.ipynb
|
bdoohan-goog/ai-platform-samples
|
4022651010de466a3e966c7ca34bbaeb89619460
|
[
"Apache-2.0"
] | 1 |
2021-06-30T17:41:23.000Z
|
2021-06-30T17:41:23.000Z
|
ai-platform-unified/notebooks/unofficial/pipelines/google-cloud-pipeline-components_automl_images.ipynb
|
bdoohan-goog/ai-platform-samples
|
4022651010de466a3e966c7ca34bbaeb89619460
|
[
"Apache-2.0"
] | null | null | null |
ai-platform-unified/notebooks/unofficial/pipelines/google-cloud-pipeline-components_automl_images.ipynb
|
bdoohan-goog/ai-platform-samples
|
4022651010de466a3e966c7ca34bbaeb89619460
|
[
"Apache-2.0"
] | null | null | null | 33.424566 | 290 | 0.536928 |
[
[
[
"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"<table align=\"left\">\n\n <td>\n <a href=\"https://colab.research.google.com/github/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/unofficial/pipelines/google-cloud-pipeline-components_automl_images.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/colab-logo-32px.png\" alt=\"Colab logo\"> Run in Colab\n </a>\n </td>\n <td>\n <a href=\"https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/unofficial/pipelines/google-cloud-pipeline-components_automl_images.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/github-logo-32px.png\" alt=\"GitHub logo\">\n View on GitHub\n </a>\n </td>\n <td>\n <a href=\"https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/ai-platform-samples/raw/master/ai-platform-unified/notebooks/unofficial/pipelines/google-cloud-pipeline-components_automl_images.ipynb\">\n Open in Google Cloud Notebooks\n </a>\n </td> \n</table>",
"_____no_output_____"
],
[
"# Vertex Pipelines: AutoML Images pipelines using google-cloud-pipeline-components\n",
"_____no_output_____"
],
[
"## Overview\n\nThis notebook shows how to use the components defined in [`google_cloud_pipeline_components`](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud) to build an AutoML Images workflow on [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines).\n\n### Objective\n\nIn this example, you'll learn how to use components from `google_cloud_pipeline_components` to:\n- create a _Dataset_\n- train an AutoML Images model\n- deploy the trained model to an _endpoint_ for serving\n\nThe components are [documented here](https://google-cloud-pipeline-components.readthedocs.io/en/latest/google_cloud_pipeline_components.aiplatform.html#module-google_cloud_pipeline_components.aiplatform).\n\n### Costs \n\nThis tutorial uses billable components of Google Cloud:\n\n* Vertex AI Training and Serving\n* Cloud Storage\n\nLearn about pricing for [Vertex AI](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storage](https://cloud.google.com/storage/pricing), and use the [Pricing\nCalculator](https://cloud.google.com/products/calculator/)\nto generate a cost estimate based on your projected usage.",
"_____no_output_____"
],
[
"### Set up your local development environment\n\n**If you are using Colab or Google Cloud Notebooks**, your environment already meets\nall the requirements to run this notebook. You can skip this step.",
"_____no_output_____"
],
[
"**Otherwise**, make sure your environment meets this notebook's requirements.\nYou need the following:\n\n* The Google Cloud SDK\n* Git\n* Python 3\n* virtualenv\n* Jupyter notebook running in a virtual environment with Python 3\n\nThe Google Cloud guide to [Setting up a Python development\nenvironment](https://cloud.google.com/python/setup) and the [Jupyter\ninstallation guide](https://jupyter.org/install) provide detailed instructions\nfor meeting these requirements. The following steps provide a condensed set of\ninstructions:\n\n1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)\n\n1. [Install Python 3.](https://cloud.google.com/python/setup#installing_python)\n\n1. [Install\n virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv)\n and create a virtual environment that uses Python 3. Activate the virtual environment.\n\n1. To install Jupyter, run `pip install jupyter` on the\ncommand-line in a terminal shell.\n\n1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.\n\n1. Open this notebook in the Jupyter Notebook Dashboard.",
"_____no_output_____"
],
[
"### Install additional packages\n",
"_____no_output_____"
]
],
[
[
"import sys\n\nif \"google.colab\" in sys.modules:\n USER_FLAG = \"\"\nelse:\n USER_FLAG = \"--user\"",
"_____no_output_____"
],
[
"!pip3 install {USER_FLAG} google-cloud-aiplatform==1.0.0 --upgrade\n!pip3 install {USER_FLAG} kfp google-cloud-pipeline-components==0.1.1 --upgrade",
"_____no_output_____"
]
],
[
[
"### Restart the kernel\n\nAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.",
"_____no_output_____"
]
],
[
[
"# Automatically restart kernel after installs\nimport os\n\nif not os.getenv(\"IS_TESTING\"):\n # Automatically restart kernel after installs\n import IPython\n\n app = IPython.Application.instance()\n app.kernel.do_shutdown(True)",
"_____no_output_____"
]
],
[
[
"Check the versions of the packages you installed. The KFP SDK version should be >=1.6.",
"_____no_output_____"
]
],
[
[
"!python3 -c \"import kfp; print('KFP SDK version: {}'.format(kfp.__version__))\"",
"_____no_output_____"
]
],
[
[
"## Before you begin\n\nThis notebook does not require a GPU runtime.",
"_____no_output_____"
],
[
"### Set up your Google Cloud project\n\n**The following steps are required, regardless of your notebook environment.**\n\n1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.\n\n1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).\n\n1. [Enable the AI Platform (Unified), Cloud Storage, and Compute Engine APIs](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,storage-component.googleapis.com). \n\n1. Follow the \"**Configuring your project**\" instructions from the AI Platform Pipelines documentation.\n\n1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).\n\n1. Enter your project ID in the cell below. Then run the cell to make sure the\nCloud SDK uses the right project for all the commands in this notebook.\n\n**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.",
"_____no_output_____"
],
[
"#### Set your project ID\n\n**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.",
"_____no_output_____"
]
],
[
[
"import os\n\nPROJECT_ID = \"\"\n\n# Get your Google Cloud project ID from gcloud\nif not os.getenv(\"IS_TESTING\"):\n shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null\n PROJECT_ID = shell_output[0]\n print(\"Project ID: \", PROJECT_ID)",
"_____no_output_____"
]
],
[
[
"Otherwise, set your project ID here.",
"_____no_output_____"
]
],
[
[
"if PROJECT_ID == \"\" or PROJECT_ID is None:\n PROJECT_ID = \"python-docs-samples-tests\" # @param {type:\"string\"}",
"_____no_output_____"
]
],
[
[
"#### Timestamp\n\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.",
"_____no_output_____"
]
],
[
[
"from datetime import datetime\n\nTIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")",
"_____no_output_____"
]
],
[
[
"### Authenticate your Google Cloud account\n\n**If you are using AI Platform Notebooks**, your environment is already\nauthenticated. Skip this step.",
"_____no_output_____"
],
[
"**If you are using Colab**, run the cell below and follow the instructions\nwhen prompted to authenticate your account via oAuth.\n\n**Otherwise**, follow these steps:\n\n1. In the Cloud Console, go to the [**Create service account key**\n page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).\n\n2. Click **Create service account**.\n\n3. In the **Service account name** field, enter a name, and\n click **Create**.\n\n4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type \"AI Platform\"\ninto the filter box, and select\n **AI Platform Administrator**. Type \"Storage Object Admin\" into the filter box, and select **Storage Object Admin**.\n\n5. Click *Create*. A JSON file that contains your key downloads to your\nlocal environment.\n\n6. Enter the path to your service account key as the\n`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.",
"_____no_output_____"
]
],
[
[
"import os\nimport sys\n\n# If you are running this notebook in Colab, run this cell and follow the\n# instructions to authenticate your GCP account. This provides access to your\n# Cloud Storage bucket and lets you submit training jobs and prediction\n# requests.\n\n# If on AI Platform, then don't execute this code\nif not os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n if \"google.colab\" in sys.modules:\n from google.colab import auth as google_auth\n\n google_auth.authenticate_user()\n\n # If you are running this notebook locally, replace the string below with the\n # path to your service account key and run this cell to authenticate your GCP\n # account.\n elif not os.getenv(\"IS_TESTING\"):\n %env GOOGLE_APPLICATION_CREDENTIALS ''",
"_____no_output_____"
]
],
[
[
"### Create a Cloud Storage bucket as necessary\n\nYou need a Cloud Storage bucket for this example. If you don't have one that you want to use, you can make one now.\n\n\nSet the name of your Cloud Storage bucket below. It must be unique across all\nCloud Storage buckets.\n\nYou can also change the `REGION` variable, which is used for operations\nthroughout the rest of this notebook. Make sure to [choose a region where AI Platform (Unified) services are\navailable](https://cloud.google.com/ai-platform-unified/docs/general/locations#available_regions). You may\nnot use a Multi-Regional Storage bucket for training with AI Platform (Unified) Pipelines.",
"_____no_output_____"
]
],
[
[
"BUCKET_NAME = \"gs://[your-bucket-name]\" # @param {type:\"string\"}\nREGION = \"us-central1\" # @param {type:\"string\"}",
"_____no_output_____"
],
[
"if BUCKET_NAME == \"\" or BUCKET_NAME is None or BUCKET_NAME == \"gs://[your-bucket-name]\":\n BUCKET_NAME = \"gs://\" + PROJECT_ID + \"aip-\" + TIMESTAMP",
"_____no_output_____"
]
],
[
[
"**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.",
"_____no_output_____"
]
],
[
[
"! gsutil mb -l $REGION $BUCKET_NAME",
"_____no_output_____"
]
],
[
[
"Finally, validate access to your Cloud Storage bucket by examining its contents:",
"_____no_output_____"
]
],
[
[
"! gsutil ls -al $BUCKET_NAME",
"_____no_output_____"
]
],
[
[
"### Import libraries and define constants",
"_____no_output_____"
],
[
"<**TODO**: for preview, we shouldn't need the **API KEY**>.\n\nDefine some constants. \n",
"_____no_output_____"
]
],
[
[
"PATH=%env PATH\n%env PATH={PATH}:/home/jupyter/.local/bin\n\nUSER = \"your-user-name\" # <---CHANGE THIS\nPIPELINE_ROOT = \"{}/pipeline_root/{}\".format(BUCKET_NAME, USER)\nAPI_KEY = \"your-api-key\" # <---CHANGE THIS\n\nPIPELINE_ROOT",
"_____no_output_____"
]
],
[
[
"Do some imports:",
"_____no_output_____"
]
],
[
[
"import kfp\nfrom google.cloud import aiplatform\nfrom google_cloud_pipeline_components import aiplatform as gcc_aip\nfrom kfp.v2 import compiler\nfrom kfp.v2.google.client import AIPlatformClient",
"_____no_output_____"
]
],
[
[
"## Define an AutoML Image classification pipeline that uses components from `google_cloud_pipeline_components`\n",
"_____no_output_____"
],
[
"Create a managed image dataset from a CSV file and train it using AutoML Image Training.\n",
"_____no_output_____"
],
[
"Define the pipeline:",
"_____no_output_____"
]
],
[
[
"@kfp.dsl.pipeline(name=\"automl-image-training-v2\")\ndef pipeline(project: str = PROJECT_ID):\n ds_op = gcc_aip.ImageDatasetCreateOp(\n project=project,\n display_name=\"flowers\",\n gcs_source=\"gs://cloud-samples-data/vision/automl_classification/flowers/all_data_v2.csv\",\n import_schema_uri=aiplatform.schema.dataset.ioformat.image.single_label_classification,\n )\n\n training_job_run_op = gcc_aip.AutoMLImageTrainingJobRunOp(\n project=project,\n display_name=\"train-iris-automl-mbsdk-1\",\n prediction_type=\"classification\",\n model_type=\"CLOUD\",\n base_model=None,\n dataset=ds_op.outputs[\"dataset\"],\n model_display_name=\"iris-classification-model-mbsdk\",\n training_fraction_split=0.6,\n validation_fraction_split=0.2,\n test_fraction_split=0.2,\n budget_milli_node_hours=8000,\n )\n endpoint_op = gcc_aip.ModelDeployOp( # noqa: F841\n project=project, model=training_job_run_op.outputs[\"model\"]\n )",
"_____no_output_____"
]
],
[
[
"## Compile and run the pipeline\n\nCompile the pipeline:",
"_____no_output_____"
]
],
[
[
"from kfp.v2 import compiler # noqa: F811\n\ncompiler.Compiler().compile(\n pipeline_func=pipeline, package_path=\"image_classif_pipeline.json\"\n)",
"_____no_output_____"
]
],
[
[
"The pipeline compilation generates the `image_classif_pipeline.json` job spec file.\n\nNext, instantiate an API client object:",
"_____no_output_____"
]
],
[
[
"from kfp.v2.google.client import AIPlatformClient # noqa: F811\n\napi_client = AIPlatformClient(project_id=PROJECT_ID, region=REGION)",
"_____no_output_____"
]
],
[
[
"Then, you run the defined pipeline like this: ",
"_____no_output_____"
]
],
[
[
"response = api_client.create_run_from_job_spec(\n \"image_classif_pipeline.json\",\n pipeline_root=PIPELINE_ROOT,\n parameter_values={\"project\": PROJECT_ID},\n)",
"_____no_output_____"
]
],
[
[
"Click on the generated link to see your run in the Cloud Console. It should look something like this as it is running:\n\n<a href=\"https://storage.googleapis.com/amy-jo/images/mp/automl_image_classif.png\" target=\"_blank\"><img src=\"https://storage.googleapis.com/amy-jo/images/mp/automl_image_classif.png\" width=\"40%\"/></a>",
"_____no_output_____"
],
[
"## Cleaning up\n\nTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud\nproject](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.\n\nOtherwise, you can delete the individual resources you created in this tutorial:\n- Delete Cloud Storage objects that were created. Uncomment and run the command in the cell below **only if you are not using the `PIPELINE_ROOT` path for any other purpose**.\n- Delete your deployed model: first, undeploy it from its *endpoint*, then delete the model and endpoint.\n",
"_____no_output_____"
]
],
[
[
"# Warning: this command will delete ALL Cloud Storage objects under the PIPELINE_ROOT path.\n# ! gsutil -m rm -r $PIPELINE_ROOT",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
cbba6349012712db4d01dc35c5be94a75c204e35
| 928 |
ipynb
|
Jupyter Notebook
|
HelloGithub.ipynb
|
krules/dw_matrix
|
59ae100eed4b911fb29431f72ff4e1ff37b2a2ac
|
[
"MIT"
] | null | null | null |
HelloGithub.ipynb
|
krules/dw_matrix
|
59ae100eed4b911fb29431f72ff4e1ff37b2a2ac
|
[
"MIT"
] | null | null | null |
HelloGithub.ipynb
|
krules/dw_matrix
|
59ae100eed4b911fb29431f72ff4e1ff37b2a2ac
|
[
"MIT"
] | null | null | null | 928 | 928 | 0.709052 |
[
[
[
"print(\"Hello Github\")",
"Hello Github\n"
],
[
"",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
cbba66fb858241346173d052fdd10091e4917ef0
| 4,820 |
ipynb
|
Jupyter Notebook
|
tensorflow/04 - Neural Network Basic/01 - Classification.ipynb
|
novemberde/practice_deeplearning
|
08f964f2fee215278ffb2222070e560fd7f3ca6b
|
[
"MIT"
] | null | null | null |
tensorflow/04 - Neural Network Basic/01 - Classification.ipynb
|
novemberde/practice_deeplearning
|
08f964f2fee215278ffb2222070e560fd7f3ca6b
|
[
"MIT"
] | null | null | null |
tensorflow/04 - Neural Network Basic/01 - Classification.ipynb
|
novemberde/practice_deeplearning
|
08f964f2fee215278ffb2222070e560fd7f3ca6b
|
[
"MIT"
] | null | null | null | 27.078652 | 88 | 0.468465 |
[
[
[
"# 털과 날개가 있는지 없는지에 따라, 포유류인지 조류인지 분류하는 신경망 모델을 만들어봅니다.\n\nimport tensorflow as tf\nimport numpy as np",
"_____no_output_____"
],
[
"# [털, 날개]\nx_data = np.array([\n [0, 0],\n [1, 0],\n [1, 1],\n [0, 0],\n [0, 0],\n [0, 1]\n])\n\n# [기타, 포유류, 조류]\n# 다음과 같은 형식을 one-hot 형식의 데이터라고 합니다.\ny_data = np.array([\n [1, 0, 0], # 기타\n [0, 1, 0], # 포유류\n [0, 0, 1], # 조류\n [1, 0, 0],\n [1, 0, 0],\n [0, 0, 1]\n])\n\n## 신경망 모델 구성\nX = tf.placeholder(tf.float32)\nY = tf.placeholder(tf.float32)\n\n# 신경망은 2차원으로 [입력층(특성), 출력층(레이블)] -> [2, 3] 으로 정합니다.\nW = tf.Variable(tf.random_uniform([2, 3], -1., 1.))\n\n# 편향은 각 레이어의 아웃풋 갯수로 설정\n# 편향은 아웃풋의 갯수, 즉 최종 결과값의 분류 갯수인 3으로 설정합니다.\nb = tf.Variable(tf.zeros([3]))",
"_____no_output_____"
],
[
"# 신경망에 가중치 W과 편향 b를 적용합니다.\nL = tf.add(tf.matmul(X, W), b)\n# 가중치와 편향을 이용해 계산한 결과 값에\n# 텐서플로우에서 기본적으로 제공하는 활성화 함수인 ReLU 함수를 적용합니다.\nL = tf.nn.relu(L)",
"_____no_output_____"
],
[
"# 마지막으로 softmax 함수를 이용하여 출력값을 사용하기 쉽게 만듭니다.\n# softmax 함수는 다음처럼 결과값을 전체합이 1인 확률로 만들어주는 함수입니다.\n# 예) [8.04, 2.76, -6.52] -> [0.53, 0.24, 0.23]\nmodel = tf.nn.softmax(L)",
"_____no_output_____"
],
[
"# 신경망을 최적화하기 위한 비용 합수를 작성합니다.\n# 각 개별 결과에 대한 합을 구한 뒤 평균을 내는 방식을 사용합니다.\n# 전체 합이 아닌, 개별 결과를 구한 뒤 평균을 내는 방식을 사용하기 위해 axis 옵션을 사용합니다.\n# axis 옵션이 없으면 -1.09처럼 총합인 스칼라값으로 출력됩니다.\n# Y model Y * tf.log(model) reduce_sum(axis=1)\n# 예) [[1 0 0] [[0.1 0.7 0.2] -> [[-1.0 0 0] -> [-1.0, -0.09]\n# [0 1 0]] [0.2 0.8 0.0]] [ 0 -0.09 0]]\n# 즉, 이것은 예측값과 실제값 사이의 확률 분포의 차이를 비용으로 계산한 것이며,\n# 이것을 Cross-Entropy 라고 합니다.\ncost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(model), axis=1))\n\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)\ntrain_op = optimizer.minimize(cost)",
"_____no_output_____"
],
[
"## 신경망 모델 학습\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\nfor step in range(100):\n sess.run(train_op, feed_dict={X: x_data, Y: y_data})\n\n if (step + 1) % 10 == 0:\n print(step + 1, sess.run(cost, feed_dict={X: x_data, Y: y_data}))\n\n\n#########\n# 결과 확인\n# 0: 기타 1: 포유류, 2: 조류\n######\n# tf.argmax: 예측값과 실제값의 행렬에서 tf.argmax 를 이용해 가장 큰 값을 가져옵니다.\n# 예) [[0 1 0] [1 0 0]] -> [1 0]\n# [[0.2 0.7 0.1] [0.9 0.1 0.]] -> [1 0]\nprediction = tf.argmax(model, 1)\ntarget = tf.argmax(Y, 1)\nprint('예측값:', sess.run(prediction, feed_dict={X: x_data}))\nprint('실제값:', sess.run(target, feed_dict={Y: y_data}))\n\nis_correct = tf.equal(prediction, target)\naccuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\nprint('정확도: %.2f' % sess.run(accuracy * 100, feed_dict={X: x_data, Y: y_data}))",
"(10, 1.1680274)\n(20, 1.1560332)\n(30, 1.144474)\n(40, 1.134127)\n(50, 1.1241394)\n(60, 1.1144934)\n(70, 1.1051711)\n(80, 1.0961555)\n(90, 1.0874302)\n(100, 1.0789798)\n('\\xec\\x98\\x88\\xec\\xb8\\xa1\\xea\\xb0\\x92:', array([2, 0, 1, 2, 2, 1]))\n('\\xec\\x8b\\xa4\\xec\\xa0\\x9c\\xea\\xb0\\x92:', array([0, 1, 2, 0, 0, 2]))\n정확도: 0.00\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbba72e712282f0a545d9a34ad6da7e0a8fd51a5
| 8,809 |
ipynb
|
Jupyter Notebook
|
notebooks/Exploratory Analysis.ipynb
|
IBMDeveloperUK/AUTO-AI
|
9ccc61e2334e9d4aca9745dd659c8fd81405cf91
|
[
"Apache-2.0"
] | 9 |
2020-06-08T16:22:18.000Z
|
2021-07-12T15:01:05.000Z
|
notebooks/Exploratory Analysis.ipynb
|
IBMDeveloperUK/AUTO-AI
|
9ccc61e2334e9d4aca9745dd659c8fd81405cf91
|
[
"Apache-2.0"
] | null | null | null |
notebooks/Exploratory Analysis.ipynb
|
IBMDeveloperUK/AUTO-AI
|
9ccc61e2334e9d4aca9745dd659c8fd81405cf91
|
[
"Apache-2.0"
] | 7 |
2020-05-07T17:26:30.000Z
|
2022-03-25T16:06:23.000Z
| 22.880519 | 294 | 0.542173 |
[
[
[
"# Exploratory Analysis",
"_____no_output_____"
],
[
"## 1) Reading the data",
"_____no_output_____"
]
],
[
[
"\nimport types\nimport pandas as pd\n\ndf_claim = pd.read_csv('https://raw.githubusercontent.com/IBMDeveloperUK/Machine-Learning-Models-with-AUTO-AI/master/Data/insurance.csv')\ndf_claim.head()\n",
"_____no_output_____"
],
[
"df_data = pd.read_csv('https://raw.githubusercontent.com/IBMDeveloperUK/Machine-Learning-Models-with-AUTO-AI/master/Data/patientdata_v2.csv')\ndf_data.head()",
"_____no_output_____"
],
[
"above_30 = df_data[df_data[\"BMI\"] > 33]",
"_____no_output_____"
]
],
[
[
"## 2) Importing visualization libraries",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline\nimport matplotlib.patches as mpatches\n",
"_____no_output_____"
]
],
[
[
"## 3) Exploring data",
"_____no_output_____"
],
[
"### statistical description:\n\nDescribe Generate various summary statistics, excluding NaN values",
"_____no_output_____"
]
],
[
[
"df_claim.describe()",
"_____no_output_____"
]
],
[
[
"#### A) Q: Is there a relationship between BMI and claim amount?",
"_____no_output_____"
]
],
[
[
"sns.jointplot(x=df_claim[\"expenses\"], y=df_claim[\"bmi\"])\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### A) A: There is NO relationship between BMI and claim amount",
"_____no_output_____"
],
[
"#### B) Q: Is there a relationship between gender and claim amount?",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize = (5, 5))\nsns.boxplot(x = 'sex', y = 'expenses', data = df_claim)",
"_____no_output_____"
]
],
[
[
"#### B) A: On average claims from male and female are the same with slightly bigger proportion of higher amounts for male",
"_____no_output_____"
],
[
"#### C) Q: Is there a relationship between region and claim amount?",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize = (10, 5))\nsns.boxplot(x = 'region', y = 'expenses', data = df_claim)",
"_____no_output_____"
]
],
[
[
"#### C) A: On average claims from regions are the same with slightly bigger proportion of higher amounts from Southeast",
"_____no_output_____"
],
[
"#### D) Q: Is there a relationship between claim amount for smokers and non-smokers?",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize = (5, 5))\nsns.boxplot(x = 'smoker', y = 'expenses', data = df_claim)",
"_____no_output_____"
]
],
[
[
"#### D) A: There is a strong relationship between smokers and non-smokers with claims, being much higher for smokers",
"_____no_output_____"
],
[
" \n #### Is the smoker group well represented?",
"_____no_output_____"
]
],
[
[
"sns.countplot(x='smoker', data=df_claim)",
"_____no_output_____"
]
],
[
[
"### E) Q: Is there a relationship between claim amount and age?",
"_____no_output_____"
]
],
[
[
"sns.jointplot(x=df_claim[\"expenses\"], y=df_claim[\"age\"], kind='scatter')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"### E) A: Claim amounts increase with age and tend to form groups around 1.2K, up to 3K, and more than 3K",
"_____no_output_____"
],
[
"## 4) Understanding data",
"_____no_output_____"
],
[
"###### based on observations above let's bring several variables together to observe the difference\n##### IMPACT OF SMOKING",
"_____no_output_____"
]
],
[
[
"claim_pplot=df_claim[['age', 'bmi', 'children', 'smoker', 'expenses']]\nclaim_pplot.head()\nsns.pairplot(claim_pplot, kind=\"scatter\", hue = \"smoker\" , markers=[\"o\", \"s\"], palette=\"Set1\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### GENDER IMPACT",
"_____no_output_____"
]
],
[
[
"claim_pplot=df_claim[['age', 'bmi', 'children', 'sex', 'expenses']]\nclaim_pplot.head()\nsns.pairplot(claim_pplot, kind=\"scatter\", hue = \"sex\" , markers=[\"o\", \"s\"], palette=\"Set1\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Summary: Gender has very little impact on the charges",
"_____no_output_____"
],
[
"#### REGION IMPACT",
"_____no_output_____"
]
],
[
[
"claim_pplot=df_claim[['age', 'bmi', 'children', 'region', 'expenses']]\nclaim_pplot.head()\nsns.pairplot(claim_pplot, kind=\"scatter\", hue = \"region\" , markers=[\"o\", \"s\",\"x\",\"+\"], palette=\"Set1\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Summary: Region does have some imact on the charges; however we can see some trends as it relates to BMI",
"_____no_output_____"
],
[
"### Summary: The charges are highly affected by the claimant being a smoker or non-smoker. Smokers tend to have BMIs above average, 30. Region and gender might play some role in determining the amount charged",
"_____no_output_____"
],
[
"<div class=\"alert alert-success\">\n\n <h1> Using the above Visualisations as reference, try creating similar charts for the second dataset </h1> <br/> \n \n</div> \n\n> *Tip*: If you want to run these in separate cells, activate the below cell by clicking on it and then click on the + at the top of the notebook. This will add extra cells. Click on the buttons with the upwards and downwards arrows to move the cells up and down to change their order.\n ",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(15,5))\nsns.barplot(x=\"EXERCISEMINPERWEEK\",y=\"CHOLESTEROL\",data=above_30,hue=\"HEARTFAILURE\",)\nplt.show()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
cbba7669d1580daebb9eda8b6499d352f4f793f8
| 10,003 |
ipynb
|
Jupyter Notebook
|
example/leaning-tower-of-muons/04_cosmics_coverage.ipynb
|
LLR-ILD/cosmics
|
0a37ffae25db3ec0cb4d7110057bb9d35c46dcbc
|
[
"Apache-2.0"
] | null | null | null |
example/leaning-tower-of-muons/04_cosmics_coverage.ipynb
|
LLR-ILD/cosmics
|
0a37ffae25db3ec0cb4d7110057bb9d35c46dcbc
|
[
"Apache-2.0"
] | null | null | null |
example/leaning-tower-of-muons/04_cosmics_coverage.ipynb
|
LLR-ILD/cosmics
|
0a37ffae25db3ec0cb4d7110057bb9d35c46dcbc
|
[
"Apache-2.0"
] | null | null | null | 32.796721 | 136 | 0.474558 |
[
[
[
"from pathlib import Path\n\nimport awkward as ak\nimport matplotlib.colors as colors\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tqdm.notebook as tqdm\nimport uproot\n\nrun_name = \"run_050016_10192021_21h49min_Ascii_build\"\n# run_name = \"build\"\nraw_path = Path(\"data/raw\") / f\"{run_name}.root\"\nimg_path = Path(\"img/coverage\")\nimg_path.mkdir(exist_ok=True, parents=True)\n\n\nb_cosmic = uproot.open(raw_path)\nprint(\"trees:\", b_cosmic.keys())\ntree = b_cosmic[\"ecal\"]\nprint(len(tree))\ntree.show()",
"_____no_output_____"
],
[
"x, y, z, is_hit, is_masked, is_commissioned = ak.unzip(\n tree.arrays(filter_name=[\"hit_[xyz]\", \"hit_is*\"], entry_stop=1e6)\n)",
"_____no_output_____"
],
[
"tree.num_entries",
"_____no_output_____"
],
[
"n_layers = 15\ndistance_layers_mm = 15\n\nxy_pos = np.linspace(3.8, 86.3, 16)\nxy = np.concatenate([-xy_pos[::-1], xy_pos])\ndelta_xy = np.mean(xy_pos[1:] - xy_pos[:-1])\nxy_bins_pos = np.concatenate([[xy_pos[0] - delta_xy / 2], xy_pos + delta_xy / 2])\nxy_bins = np.concatenate([-xy_bins_pos[::-1], xy_bins_pos])\n\nlayer_maps = {\n i_z: np.zeros(2 * (len(xy) + 1,))\n for i_z in distance_layers_mm * np.arange(n_layers)\n}\nlayer_maps_is_hit = {\n i_z: np.zeros(2 * (len(xy) + 1,))\n for i_z in distance_layers_mm * np.arange(n_layers)\n}\nwith tqdm.tqdm(unit=\"M events\", total=tree.num_entries / 1e6) as p_bar:\n for batch in uproot.iterate(\n tree, filter_name=[\"hit_[xyz]\", \"hit_isHit\"], step_size=\"10 MB\"\n ):\n x, y, z, is_hit = ak.unzip(batch)\n for i_z in layer_maps:\n layer_maps[i_z] += np.histogram2d(\n *map(lambda v: ak.flatten(v[z == i_z]).to_numpy(), [x, y]), bins=xy_bins\n )[0]\n layer_maps_is_hit[i_z] += np.histogram2d(\n *map(\n lambda v: ak.flatten(v[(z == i_z) & (is_hit == 1)]).to_numpy(),\n [x, y],\n ),\n bins=xy_bins,\n )[0]\n p_bar.update(len(z) / 1e6)",
"_____no_output_____"
],
[
"len(z)",
"_____no_output_____"
],
[
"def _my_format(val):\n \"\"\"Enforce a format style with 5 digits maximum including the decimal dot.\"\"\"\n if val < 1e2:\n return f\"{val:.2f}\"\n if val < 1e3:\n return f\"{val:.1f}\"\n\n if val < 1e6:\n v_new, suffix = val / 1e3, \"k\"\n elif val < 1e9:\n v_new, suffix = val / 1e6, \"M\"\n elif val < 1e12:\n v_new, suffix = val / 1e9, \"B\"\n else:\n raise NotImplementedError(f\"Value is larger than forseen: {val}.\")\n\n if v_new < 10:\n return f\"{v_new:.2f}{suffix}\"\n elif v_new < 100:\n return f\"{v_new:.1f}{suffix}\"\n else:\n return f\"{v_new:.0f}{suffix}\"\n\n\ndef hit_map(layer_counts, log_norm=False, with_numbers=True):\n norm_type = colors.LogNorm if log_norm else colors.Normalize\n max_counts = max(np.max(v) for v in layer_counts.values())\n max_counts = 20000\n norm = norm_type(vmin=0, vmax=max_counts)\n fig, axs = plt.subplots(1 + (len(layer_counts) - 1) // 5, ncols=5, figsize=(15, 10))\n for i, z in enumerate(layer_counts.values()):\n ax = axs.flatten()[i]\n ax.imshow(z.T, cmap=plt.get_cmap(\"Greens\"), norm=norm)\n if with_numbers:\n boxes = [(0, 8), (8, 16), (17, 25), (25, 33)]\n for x_box in boxes:\n for y_box in boxes:\n box_vals = z[x_box[0] : x_box[1], y_box[0] : y_box[1]]\n val = np.mean(box_vals)\n assert np.std(box_vals) == 0\n color = \"black\"\n if val > 0.7 * max_counts:\n color = \"white\"\n elif log_norm and np.log(val) > 0.7 * np.log(max_counts):\n color = \"white\"\n ax.text(\n np.mean(x_box),\n np.mean(y_box),\n _my_format(val),\n ha=\"center\",\n va=\"center\",\n color=color,\n fontsize=14,\n )\n ax.set_title(f\"layer {i}\")\n ax.set_xticks([])\n ax.set_yticks([])\n fig.tight_layout()\n return fig\n\n\nfor log_norm in [True, False]:\n name = \"hit_map\"\n if log_norm:\n name += \"_log\"\n fig = hit_map(layer_maps, log_norm=log_norm)\n fig.savefig(img_path / f\"{name}.png\", dpi=300)\n name += \"_isHit\"\n fig = hit_map(layer_maps_is_hit, with_numbers=False, log_norm=log_norm)\n fig.savefig(img_path / f\"{name}.png\", dpi=300)",
"_____no_output_____"
],
[
"counts = np.concatenate([v.flatten() for v in layer_maps_is_hit.values()])\nbins = np.concatenate(\n [\n np.arange(0, 10, 1),\n np.exp(np.linspace(np.log(10), np.log(max(counts)), 100)),\n ]\n)\nhist_data, _ = np.histogram(counts, density=True, bins=bins)",
"_____no_output_____"
],
[
"fig, axs = plt.subplots(nrows=2, ncols=2, sharex=\"col\", sharey=\"row\")\nbin_centers = (bins[1:] + bins[:-1]) / 2\nbin_widths = bins[1:] - bins[:-1]\nfor is_log_x in [True, False]:\n for is_log_y in [True, False]:\n ax = axs[int(is_log_y), int(is_log_x)]\n ax.bar(bin_centers, hist_data, width=bin_widths)\n if is_log_x:\n ax.set_xscale(\"log\")\n else:\n ax.set_xlim(0, 2000)\n if is_log_y:\n ax.set_yscale(\"log\")\n ax.set_ylim((0.5 * min(hist_data[hist_data > 0]), 1.1 * max(hist_data)))\n else:\n ax.set_ylim(0, 0.003)\n lam = np.mean(counts)\n ax.plot(\n bin_centers,\n bin_widths / sum(bin_widths) / lam * np.exp(-bin_centers / lam),\n ls=\":\",\n color=\"black\",\n )\n\nfig.suptitle(\n f\"{100 * sum(counts == 0) / len(counts):.3g}% of cells were never hit\\n\"\n f\"({int(sum(counts))} hits in {len(counts)} cells)\"\n)\naxs[1][0].set_xlabel(\"cell hits [linear, lower part]\")\naxs[1][1].set_xlabel(\"cell hits [log]\")\naxs[0][0].set_ylabel(\"[linear, lower part]\")\naxs[1][0].set_ylabel(\"cell hit density [log]\")\nfig.tight_layout()\nfig.savefig(img_path / \"cell_counts.png\", dpi=400)",
"_____no_output_____"
],
[
"plt.hist(counts, density=True, bins=np.exp(np.log(np.arange(1, max(counts), 20))))\nplt.title(\n f\"{100 * sum(counts == 0) / len(counts):.3g}% of cells were never hit\\n({int(sum(counts))} hits in {len(counts)} cells)\"\n)\nplt.xscale(\"log\")",
"_____no_output_____"
],
[
"ak.mean(is_hit), ak.mean(is_masked), ak.mean(is_commissioned)",
"_____no_output_____"
],
[
"print(f\"{ak.count(is_hit):>10} all\")\nprint(f\"{ak.sum(is_hit == 1):>10} is_hit\")\nprint(f\"{ak.sum(is_masked == 1):>10} is_masked\")\nprint(f\"{ak.sum(is_commissioned == 1):>10} is_commissioned\")",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbba80e25193530a60689fb3bd1f43f7ff6a9e2e
| 124,976 |
ipynb
|
Jupyter Notebook
|
upwork-devs/Pappaterra-Lucia/Architectural Diagram.ipynb
|
lucia15/k8-vmware-visualisation
|
17be5d789e39ea9b6390df988423caaf0545ff69
|
[
"Apache-2.0"
] | null | null | null |
upwork-devs/Pappaterra-Lucia/Architectural Diagram.ipynb
|
lucia15/k8-vmware-visualisation
|
17be5d789e39ea9b6390df988423caaf0545ff69
|
[
"Apache-2.0"
] | null | null | null |
upwork-devs/Pappaterra-Lucia/Architectural Diagram.ipynb
|
lucia15/k8-vmware-visualisation
|
17be5d789e39ea9b6390df988423caaf0545ff69
|
[
"Apache-2.0"
] | null | null | null | 169.804348 | 19,050 | 0.84233 |
[
[
[
"# Architectural Diagram of the K8-VMWare Visualisations",
"_____no_output_____"
]
],
[
[
"%%capture\nimport sys\n!{sys.executable} -m pip install iplantuml",
"_____no_output_____"
],
[
"import iplantuml\nfrom IPython.core.display import display, HTML\ndisplay(HTML(\"<style>.container { width:70% !important; }</style>\"))",
"_____no_output_____"
],
[
"HTML('''<script>\ncode_show=true; \nfunction code_toggle() {\n if (code_show){\n $('div.input').hide();\n } else {\n $('div.input').show();\n }\n code_show = !code_show\n} \n$( document ).ready(code_toggle);\n</script>\n<form action=\"javascript:code_toggle()\"><input type=\"submit\" value=\"Click here to toggle on/off the raw code.\"></form>''')",
"_____no_output_____"
]
],
[
[
"## Simple \"handwritten\" workflow",
"_____no_output_____"
]
],
[
[
"%%plantuml\n\n@startuml\n\nskinparam handwritten true\n\nrectangle WorkFlow {\n usecase \"VMware\" \n usecase \"GitHub\"\n usecase \"AWS Lambda\" as AWS_Lambda\n usecase \"AWS Lambda\" as AWS_Lambda2\n usecase \"AWS S3 Bucket\" as AWS_S3_Bucket\n usecase \"Elastic\"\n usecase \"Jupyter Notebook\" as Jupyter_Notebook\n usecase \"Visjs visualisations\" as Visjs\n}\n\nVMware -right-> AWS_Lambda :\"Pull info\"\nGitHub -left-> AWS_Lambda :\"Pull info\"\nAWS_Lambda -down-> AWS_S3_Bucket :\"Store it in\\n JSON and\\n put it to\"\nAWS_S3_Bucket -left-> AWS_Lambda2 :\"Push JSON\"\nAWS_Lambda2 -left-> Elastic :\"Push JSON\"\nAWS_S3_Bucket -right-> Jupyter_Notebook\nJupyter_Notebook -right-> Visjs\n\n@enduml ",
"_____no_output_____"
]
],
[
[
"## Using icons",
"_____no_output_____"
]
],
[
[
"%%plantuml\n\n@startuml\n\n!include <awslib/AWSCommon>\n' Uncomment the following line to create simplified view\n'!include <awslib/AWSSimplified>\n\n!include <awslib/Compute/Lambda>\n\n!include <aws/common>\n!include <aws/Storage/AmazonS3/AmazonS3>\n!include <aws/Storage/AmazonS3/bucket/bucket>\n\n!include <elastic/common>\n!include <elastic/elasticsearch/elasticsearch>\n\nLambda(AWS_Lambda, \"AWS\", \"Pulls info\")\nLambda(AWS_Lambda2, \"AWS\", \"Push info\")\nAMAZONS3(AWS_S3_Bucket, \"Bucket\")\nELASTICSEARCH(Elastic, \"\", agent) \n\n\nfile VMware as \"<img:https://i.ibb.co/FYNpM1x/vmware.png>\"\nfile Jupyter_Notebook as \"<img:https://i.ibb.co/Qm02pzy/jupyter.png>\"\nfile Visjs as \"<img:https://i.ibb.co/SRGGXtK/visjs.png>\"\nfile GitHub as \"<img:https://i.ibb.co/hYJcFhJ/github.png>\"\n\n\nVMware -right-> AWS_Lambda :\"Pull info\"\nGitHub -left-> AWS_Lambda :\"Pull info\"\nAWS_Lambda -down-> AWS_S3_Bucket :\"Store it in\\n JSON and\\n put it to\"\nAWS_S3_Bucket -left-> AWS_Lambda2 :\"Push JSON\"\nAWS_Lambda2 -left-> Elastic :\"Push JSON\"\nAWS_S3_Bucket -right-> Jupyter_Notebook\nJupyter_Notebook -right-> Visjs\n\n@enduml",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cbba8310a28515c002067905dd3bf5b66c26ea32
| 556,341 |
ipynb
|
Jupyter Notebook
|
codici/backprop.ipynb
|
tvml/ml2122
|
290ac378b19ec5bbdd2094e42e3c39cd91867c9e
|
[
"MIT"
] | null | null | null |
codici/backprop.ipynb
|
tvml/ml2122
|
290ac378b19ec5bbdd2094e42e3c39cd91867c9e
|
[
"MIT"
] | null | null | null |
codici/backprop.ipynb
|
tvml/ml2122
|
290ac378b19ec5bbdd2094e42e3c39cd91867c9e
|
[
"MIT"
] | null | null | null | 1,186.228145 | 520,706 | 0.952606 |
[
[
[
"<a href=\"https://colab.research.google.com/github/tvml/ml2122/blob/master/codici/backprop.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"## Rete neurale per riconoscere caratteri. Backpropagation implementata.",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\nimport warnings\nwarnings.filterwarnings('ignore')\n\n%matplotlib inline",
"_____no_output_____"
],
[
"import numpy as np\nfrom scipy import io\nimport scipy.misc as mi\nimport scipy.special as sp",
"_____no_output_____"
],
[
"import urllib.request\n\nfilepath = \"../dataset/\"\nurl = \"https://tvml.github.io/ml2122/dataset/\"\n\ndef get_file(filename):\n IS_COLAB = ('google.colab' in str(get_ipython()))\n if IS_COLAB:\n urllib.request.urlretrieve (url+filename, filename)\n return filename\n else:\n return filepath+filename",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nimport matplotlib.cm as cm\nimport seaborn as sns\n\nplt.style.use('fivethirtyeight')\n\nplt.rcParams['font.family'] = 'sans-serif'\nplt.rcParams['font.serif'] = 'Ubuntu'\nplt.rcParams['font.monospace'] = 'Ubuntu Mono'\nplt.rcParams['font.size'] = 10\nplt.rcParams['axes.labelsize'] = 10\nplt.rcParams['axes.labelweight'] = 'bold'\nplt.rcParams['axes.titlesize'] = 10\nplt.rcParams['xtick.labelsize'] = 8\nplt.rcParams['ytick.labelsize'] = 8\nplt.rcParams['legend.fontsize'] = 10\nplt.rcParams['figure.titlesize'] = 12\nplt.rcParams['image.cmap'] = 'jet'\nplt.rcParams['image.interpolation'] = 'none'\nplt.rcParams['figure.figsize'] = (16, 8)\nplt.rcParams['lines.linewidth'] = 2\nplt.rcParams['lines.markersize'] = 8\n\ncolors = ['#008fd5', '#fc4f30', '#e5ae38', '#6d904f', '#8b8b8b', '#810f7c', \n'#137e6d', '#be0119', '#3b638c', '#af6f09', '#008fd5', '#fc4f30', '#e5ae38', '#6d904f', '#8b8b8b', \n'#810f7c', '#137e6d', '#be0119', '#3b638c', '#af6f09']\n\ncmap = mcolors.LinearSegmentedColormap.from_list(\"\", [\"#82cafc\", \"#069af3\", \"#0485d1\", colors[0], colors[8]])",
"_____no_output_____"
]
],
[
[
"Function to render the raster image of a single digit, together with the corresponding value",
"_____no_output_____"
]
],
[
[
"def plotData(X, Y, c):\n m, n = X.shape\n image = np.array(X[c,:])\n plt.figure(figsize = (2,2))\n plt.imshow((image.reshape(20, 20)).T, cmap='Greys')\n plt.show()\n print(\"True number is \" + str(Y[c].item()))",
"_____no_output_____"
]
],
[
[
"Function to display a grid of digit raster images ",
"_____no_output_____"
]
],
[
[
"# visualizza dati\ndef displayData(X, t, rows=10, cols=10, img_ind=None, size =16, class_value = False):\n if len(X)>rows*cols:\n img_ind = np.random.permutation(len(X))[0:rows * cols]\n else:\n img_ind = range(rows*cols)\n fig = plt.figure(figsize = (size,size))\n fig.patch.set_facecolor('white')\n ax = fig.gca()\n for i in range(100):\n plt.subplot(10,10,i+1)\n plt.imshow([255-x for x in X[img_ind[i]].T], cmap='gray', interpolation='gaussian')\n if class_value:\n plt.title(\"{}\".format(t[img_ind[i]]),fontsize = 20)\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.axis('off')\n plt.subplots_adjust(top=1)\n plt.show()",
"_____no_output_____"
]
],
[
[
"Function to return statistics for test set x,t",
"_____no_output_____"
]
],
[
[
"def statistics(x,t, rows=10, cols=10):\n predclass = np.array([classify(x_)[1] for x_ in x])\n c = np.where(predclass==t.ravel(), 0, 1)\n d = np.nonzero(c)[0]\n print(predclass.reshape(rows, cols))\n print(str(np.sum(c))+' misclassifications out of '+str(len(x))+' items')\n for i in d:\n print('At ('+str(i/cols+1)+', '+str(i%cols+1)+'): '+str(t[i])+' classified as '+str(predclass[i]))\n print(\"Accuracy: %.2f%%\" % (np.mean(predclass==t) * 100.0))",
"_____no_output_____"
],
[
"def sigmoid(theta, x):\n return sp.expit(np.dot(x, theta.T))",
"_____no_output_____"
],
[
"def softmax(theta, x):\n a = np.exp(np.dot(x, theta.T))\n s=np.sum(a,axis=1).reshape(-1,1)\n return a/s",
"_____no_output_____"
],
[
"# funzione di costo regolarizzata \ndef cost(theta1, theta2, X, t):\n _,_,_,probs, y = classify(theta1,theta2,X)\n classes = np.arange(1, probs.shape[1] + 1)\n # rappresentazione 1-su-K delle classi predette\n P1 = (classes == y.reshape(-1,1)).astype(int)\n # rappresentazione complementare\n P0 = (classes != y.reshape(-1,1)).astype(int)\n # calcolo log-verosimiglianza\n lprobs1=-np.log(probs)\n lprobs0=-np.log(1.0-probs)\n term1 = np.trace(np.dot(lprobs1,P1.T))\n term2 = np.trace(np.dot(lprobs0,P0.T))\n c = term1+term2\n return c",
"_____no_output_____"
],
[
"# classificazione mediante softmax\ndef classify(theta1, theta2, X):\n m = len(X)\n x1 = np.column_stack((np.ones(m), X))\n z1 = sigmoid(theta1,x1)\n z1 = np.column_stack((np.ones(m), z1))\n z2 = softmax(theta2, z1)\n predictions = 1+np.argmax(z2, axis=1)\n return x1,z1,z2, predictions",
"_____no_output_____"
],
[
"# inizializza theta(w_out,w_in) con valori casuali\ndef init_theta(w_in, w_out):\n eps = np.sqrt(6)/np.sqrt(w_in+w_out)\n return np.random.uniform(-eps, eps, (w_in, w_out)) ",
"_____no_output_____"
],
[
"def bp_step(theta1, theta2, X, t):\n theta1_grad = np.zeros_like(theta1)\n theta2_grad = np.zeros_like(theta2)\n m =len(X)\n classes = np.arange(1, theta2.shape[0] + 1)\n c = 0.0\n x1,z1,z2, predictions=classify(theta1,theta2,X)\n tk= (classes == t.reshape(-1,1)).astype(int)\n delta2 = z2-tk\n delta1 = z1*(1-z1)*np.dot(delta2, theta2)\n delta1 = delta1[:, 1:]\n for i in range(m):\n theta2_grad+=np.outer(delta2[i,:],z1[i,:])\n theta1_grad+=np.outer(delta1[i,:],x1[i,:])\n c += np.sum(-tk[i] * np.log(z2[i,:]) - (1.0 - tk[i]) * np.log(1.0 - z2[i,:]))\n theta1_grad /= m\n theta2_grad /= m\n c /= m\n return c, theta1_grad, theta2_grad",
"_____no_output_____"
],
[
"def backpropagation(hidden_layer_size, nclasses, X, t, alpha, iterations):\n theta1 = init_theta(hidden_layer_size, X.shape[1]+1)\n theta2 = init_theta(nclasses, hidden_layer_size+1)\n c_history = []\n acc_history = []\n for k in range(iterations):\n c, theta1_grad, theta2_grad = bp_step(theta1, theta2, X, t)\n theta1 -= alpha*theta1_grad\n theta2 -= alpha*theta2_grad\n c_history.append(c)\n _,_,probs, predictions = classify(theta1,theta2, X)\n acc_history.append(np.mean(np.where(predictions!=t.ravel(),0,1)))\n return theta1, theta2, c_history, acc_history",
"_____no_output_____"
]
],
[
[
"Read dataset from a file",
"_____no_output_____"
]
],
[
[
"data = io.loadmat(get_file('digits.mat'))",
"_____no_output_____"
]
],
[
[
"Extract feature matrix and target array. Set the label of class 10 to 0.",
"_____no_output_____"
]
],
[
[
"X, t = data['X'], data['y']\nm, n = np.shape(X)\nnclasses = 10\nindx = np.random.permutation(5000)[0:100]\nXt = X[np.ix_(indx)].reshape([-1,20,20])\ntt=t[np.ix_(indx)].reshape(1,-1)[0]",
"_____no_output_____"
],
[
"displayData(Xt, tt, size=20)",
"_____no_output_____"
],
[
"# dimensione rete neurale\ninput_layer_size = n\nhidden_layer_size = 25\nnum_labels = nclasses",
"_____no_output_____"
],
[
"alpha = 1",
"_____no_output_____"
],
[
"theta1, theta2, c_history, acc_history = backpropagation(hidden_layer_size, nclasses, X, t, alpha, 100)",
"_____no_output_____"
],
[
"_,_,probs, predictions = classify(theta1, theta2, X)\n\nacc = np.mean(np.where(predictions!=t.ravel(),0,1))\nprint(acc)",
"0.8864\n"
],
[
"plt.plot(acc_history)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbba8b57290424273351abb0056a101d71096a27
| 10,116 |
ipynb
|
Jupyter Notebook
|
bronze/.ipynb_checkpoints/B50_Drawing_a_Qubit_Solutions-checkpoint.ipynb
|
dilyaraahmetshina/quantum_computings
|
a618bae55def65b17974f3ad402ce27817f91842
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
bronze/.ipynb_checkpoints/B50_Drawing_a_Qubit_Solutions-checkpoint.ipynb
|
dilyaraahmetshina/quantum_computings
|
a618bae55def65b17974f3ad402ce27817f91842
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
bronze/.ipynb_checkpoints/B50_Drawing_a_Qubit_Solutions-checkpoint.ipynb
|
dilyaraahmetshina/quantum_computings
|
a618bae55def65b17974f3ad402ce27817f91842
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | 30.74772 | 309 | 0.518782 |
[
[
[
"<table>\n <tr><td align=\"right\" style=\"background-color:#ffffff;\">\n <img src=\"../images/logo.jpg\" width=\"20%\" align=\"right\">\n </td></tr>\n <tr><td align=\"right\" style=\"color:#777777;background-color:#ffffff;font-size:12px;\">\n Abuzer Yakaryilmaz | April 15, 2019 (updated) \n </td></tr>\n <tr><td align=\"right\" style=\"color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;\">\n This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros.\n </td></tr>\n</table>\n$ \\newcommand{\\bra}[1]{\\langle #1|} $\n$ \\newcommand{\\ket}[1]{|#1\\rangle} $\n$ \\newcommand{\\braket}[2]{\\langle #1|#2\\rangle} $\n$ \\newcommand{\\dot}[2]{ #1 \\cdot #2} $\n$ \\newcommand{\\biginner}[2]{\\left\\langle #1,#2\\right\\rangle} $\n$ \\newcommand{\\mymatrix}[2]{\\left( \\begin{array}{#1} #2\\end{array} \\right)} $\n$ \\newcommand{\\myvector}[1]{\\mymatrix{c}{#1}} $\n$ \\newcommand{\\myrvector}[1]{\\mymatrix{r}{#1}} $\n$ \\newcommand{\\mypar}[1]{\\left( #1 \\right)} $\n$ \\newcommand{\\mybigpar}[1]{ \\Big( #1 \\Big)} $\n$ \\newcommand{\\sqrttwo}{\\frac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\dsqrttwo}{\\dfrac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\onehalf}{\\frac{1}{2}} $\n$ \\newcommand{\\donehalf}{\\dfrac{1}{2}} $\n$ \\newcommand{\\hadamard}{ \\mymatrix{rr}{ \\sqrttwo & \\sqrttwo \\\\ \\sqrttwo & -\\sqrttwo }} $\n$ \\newcommand{\\vzero}{\\myvector{1\\\\0}} $\n$ \\newcommand{\\vone}{\\myvector{0\\\\1}} $\n$ \\newcommand{\\vhadamardzero}{\\myvector{ \\sqrttwo \\\\ \\sqrttwo } } $\n$ \\newcommand{\\vhadamardone}{ \\myrvector{ \\sqrttwo \\\\ -\\sqrttwo } } $\n$ \\newcommand{\\myarray}[2]{ \\begin{array}{#1}#2\\end{array}} $\n$ \\newcommand{\\X}{ \\mymatrix{cc}{0 & 1 \\\\ 1 & 0} } $\n$ \\newcommand{\\Z}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & -1} } $\n$ \\newcommand{\\Htwo}{ \\mymatrix{rrrr}{ \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} } } $\n$ \\newcommand{\\CNOT}{ \\mymatrix{cccc}{1 & 0 & 0 & 0 \\\\ 0 & 1 & 0 & 0 \\\\ 0 & 0 & 0 & 1 \\\\ 0 & 0 & 1 & 0} } $\n$ \\newcommand{\\norm}[1]{ \\left\\lVert #1 \\right\\rVert } $",
"_____no_output_____"
],
[
"<h2> <font color=\"blue\"> Solutions for </font>Drawing a Qubit</h2>",
"_____no_output_____"
],
[
"<a id=\"task1\"></a>\n<h3> Task 1 </h3>\n\nWrite a function that returns a randomly created 2-dimensional (real-valued) quantum state.\n\n<table align=\"left\"><tr><td><i>\nYou may use your code written for <a href=\"B28_Quantum_State.ipynb#task2\">a task given in notebook \"Quantum States\"</a>.\n</i></td></tr></table>\n<br><br>\n\nCreate 100 random quantum states by using your function, and then draw all of them as points.",
"_____no_output_____"
],
[
"<h3>Solution</h3>",
"_____no_output_____"
],
[
"A function for randomly creating a 2-dimensional quantum state:",
"_____no_output_____"
]
],
[
[
"# randomly create a 2-dimensional quantum state\nfrom random import randrange\ndef random_quantum_state():\n first_entry = randrange(101)\n first_entry = first_entry/100\n first_entry = first_entry**0.5 \n if randrange(2) == 0: \n first_entry = -1 * first_entry\n second_entry = 1 - (first_entry**2)\n second_entry = second_entry**0.5\n if randrange(2) == 0: \n second_entry = -1 * second_entry\n return [first_entry,second_entry]",
"_____no_output_____"
]
],
[
[
"Drawing randomly created 100 quantum states as blue points:",
"_____no_output_____"
]
],
[
[
"# import the drawing methods\nfrom matplotlib.pyplot import plot, figure\n\n# draw a figure\nfigure(figsize=(6,6), dpi=60) \n\n# draw the origin\nplot(0,0,'ro') \n\nfor i in range(100):\n # create a random quantum state\n quantum_state = random_quantum_state(); \n # draw a blue point for the random quantum state\n x = quantum_state[0];\n y = quantum_state[1];\n plot(x,y,'bo') ",
"_____no_output_____"
]
],
[
[
"<a id=\"task2\"></a>\n<h3> Task 2 </h3>\n\nRepeat the previous task by drawing the quantum states as vectors (arrows) instead of points.\n\n<i>Please keep the codes below for drawing axes for getting a better visual focus.</i>",
"_____no_output_____"
],
[
"<h3>Solution</h3>",
"_____no_output_____"
],
[
"A function for randomly creating a 2-dimensional quantum state:",
"_____no_output_____"
]
],
[
[
"# randomly create a 2-dimensional quantum state\nfrom random import randrange\ndef random_quantum_state():\n first_entry = randrange(101)\n first_entry = first_entry/100\n first_entry = first_entry**0.5 \n if randrange(2) == 0: \n first_entry = -1 * first_entry\n second_entry = 1 - (first_entry**2)\n second_entry = second_entry**0.5\n if randrange(2) == 0: \n second_entry = -1 * second_entry\n return [first_entry,second_entry]",
"_____no_output_____"
]
],
[
[
"Drawing randomly created 100 quantum states as blue vectors (arrows):",
"_____no_output_____"
]
],
[
[
"# import the drawing methods\nfrom matplotlib.pyplot import plot, figure, arrow\n\n%run qlatvia.py\n\n# draw a figure\nfigure(figsize=(6,6), dpi=60) \n\ndraw_axes();\n\n# draw the origin\nplot(0,0,'ro') \n\nfor i in range(100):\n # create a random quantum state\n quantum_state = random_quantum_state(); \n # draw a blue vector for the random quantum state\n x = quantum_state[0];\n y = quantum_state[1];\n # shorten the line length to 0.92\n # line_length + head_length (0.08) should be 1\n x = 0.92 * x\n y = 0.92 * y\n arrow(0,0,x,y,head_width=0.04,head_length=0.08,color=\"blue\")",
"_____no_output_____"
]
],
[
[
"<a id=\"task3\"></a>\n<h3> Task 3 </h3>\n\nWrite a function that displays a quantum state with a name.\n\nThe parameters of the function should be (x,y,name).\n\nRandomly pick a quantum state and display it with the axes and the unit circle.\n\nThe arrow head should be on the unit circle. \n\nThe name of the quantum state should be displayed out of the unit circle.\n\nTest your function with 6 random quantum states.\n\nSave your function for later usage.",
"_____no_output_____"
],
[
"<h3>Solution</h3>",
"_____no_output_____"
]
],
[
[
"# %%writefile FILENAME.py\n# import the drawing methods\nfrom matplotlib.pyplot import figure, arrow, text\n\ndef display_quantum_state(x,y,name):\n x1 = 0.92 * x\n y1 = 0.92 * y\n arrow(0,0,x1,y1,head_width=0.04,head_length=0.08,color=\"blue\")\n x2 = 1.15 * x\n y2 = 1.15 * y\n text(x2,y2,name)",
"_____no_output_____"
],
[
"#\n# test your function\n#\n\n# import the drawing methods\nfrom matplotlib.pyplot import figure\n\nfigure(figsize=(6,6), dpi=80) # size of the figure\n\n# include our predefined functions\n%run qlatvia.py\n\n# draw axes\ndraw_axes()\n\n# draw the unit circle\ndraw_unit_circle()\n\nfor i in range(6):\n s = random_quantum_state()\n display_quantum_state(s[0],s[1],\"v\"+str(i))\n #draw_quantum_state(s[0],s[1],\"v\"+str(i))",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
cbbaa1325ae578a280aefefc130e7a79ab72d974
| 53,791 |
ipynb
|
Jupyter Notebook
|
kuster.ipynb
|
esia0120/fisicarocas
|
87bed0caecfce854e2054534dcff30934535dd54
|
[
"Apache-2.0"
] | null | null | null |
kuster.ipynb
|
esia0120/fisicarocas
|
87bed0caecfce854e2054534dcff30934535dd54
|
[
"Apache-2.0"
] | null | null | null |
kuster.ipynb
|
esia0120/fisicarocas
|
87bed0caecfce854e2054534dcff30934535dd54
|
[
"Apache-2.0"
] | null | null | null | 471.850877 | 49,823 | 0.923296 |
[
[
[
"Kuster and Toksöz",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nplt.style.use(['science', 'notebook', 'grid'])",
"_____no_output_____"
],
[
"ks = 37e9\r\nki = 2.2e9\r\nmus =44e9\r\nmui = 0\r\nrhom = 2.65e3 \r\nrhof = 1e3 \r\nalfa = 2e-1\r\ndecision=2\r\nBs=(mus*(3*ks+mus)/(3*ks+4*mus))/1e9\r\nZs=((mus/6)*((9*ks+8*mus)/(ks+2*mus)))/1e9\r\nif decision==1:\r\n #spheres\r\n Psi=(ks+4*mus/3)/(ki+4*mus/3)\r\n Qsi=(mus/1e9+Zs)/Zs\r\nelif decision==2:\r\n #penny shaped\r\n Psi=(ks/1e9)/(ki/1e9+np.pi*alfa*Bs)\r\n Qsi=(1+8*(mus/1e9)/(np.pi*alfa*(mus/1e9+2*Bs))+2*(ki/1e9+(2/3)*mus/1e9)/(ki/1e9+np.pi*alfa*Bs))/5",
"_____no_output_____"
],
[
"phi=np.zeros([51])\r\nVp=np.zeros([51])\r\nVs=np.zeros([51])\r\n\r\nfor b in range(0,51): #el 51 viene de las variables de arriba phi, Vp, Vs\r\n phi[b]=b/100\r\n kkt=(ks+phi[b]*Psi*(4/3)*mus*(ki-ks)/(ks+(4/3)*mus))/(1-phi[b]*Psi*(ki-ks)/(ks+(4/3)*mus))\r\n mukt=mus*(1-phi[b]*Qsi*Zs/(mus/1e9+Zs))/(1+phi[b]*Qsi*(mus/1e9)/((mus/1e9)+Zs))\r\n rhobulk=(1-phi[b])*rhom+phi[b]*rhof\r\n Vp[b]=((3*kkt+4*mukt)/(3*rhobulk))**0.5\r\n Vs[b]=(mukt/rhobulk)**0.5",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\r\nax.plot(phi,Vp,label='Vp')\r\nax.plot(phi,Vs,label='Vs')\r\nlegend = ax.legend(loc='upper right', shadow=True, fontsize='x-large')\r\n\r\nplt.xlabel(\"Porosidad $\\it{\\phi}$\")\r\nplt.ylabel(\"Velocidad (m/s)\")\r\nplt.title('Modelo Kuster and Toksöz', fontsize=20)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
cbbab9d4f6f7819f03dcbfe808e46e7ce6abbfe2
| 10,528 |
ipynb
|
Jupyter Notebook
|
notebooks/linear.ipynb
|
ranocha/Dispersive-wave-error-growth-notebooks
|
cffe67961db325291a02258118d3c7261fcce788
|
[
"MIT"
] | null | null | null |
notebooks/linear.ipynb
|
ranocha/Dispersive-wave-error-growth-notebooks
|
cffe67961db325291a02258118d3c7261fcce788
|
[
"MIT"
] | null | null | null |
notebooks/linear.ipynb
|
ranocha/Dispersive-wave-error-growth-notebooks
|
cffe67961db325291a02258118d3c7261fcce788
|
[
"MIT"
] | null | null | null | 35.931741 | 129 | 0.533245 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
cbbabdbe6f2fc61877cf67ce6f76539227fac4d1
| 197,042 |
ipynb
|
Jupyter Notebook
|
MLPReg.ipynb
|
Mxyur/CV-using-MLP
|
f51158f8e3872a7866d29dae348d1b0b4dab2635
|
[
"MIT"
] | null | null | null |
MLPReg.ipynb
|
Mxyur/CV-using-MLP
|
f51158f8e3872a7866d29dae348d1b0b4dab2635
|
[
"MIT"
] | null | null | null |
MLPReg.ipynb
|
Mxyur/CV-using-MLP
|
f51158f8e3872a7866d29dae348d1b0b4dab2635
|
[
"MIT"
] | null | null | null | 143.931337 | 28,488 | 0.853133 |
[
[
[
"from sklearn.neural_network import MLPRegressor\nimport numpy as np \nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import classification_report, confusion_matrix \nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler",
"_____no_output_____"
],
[
"# Let's start by naming the features\nnames = ['V', 'ScanRate', 'OR', 'A']",
"_____no_output_____"
],
[
"# Reading the dataset through a Pandas function\n# Let's start by naming the features\nnames = ['V', 'ScanRate', 'OR', 'A']\ndf = pd.read_csv('CVSG0cur.csv', names=names) \ndf",
"_____no_output_____"
],
[
"from sklearn.preprocessing import MinMaxScaler\nscaler = MinMaxScaler()\ndata = scaler.fit_transform(df)\ndf1 = pd.DataFrame(data, columns = ['V','ScanRate','OR','A'])",
"_____no_output_____"
],
[
"print(scaler.data_max_)",
"[ 0.40119 100. 1. 12.23 ]\n"
],
[
"data\ndf1",
"_____no_output_____"
],
[
"# Takes first 4 columns and assign them to variable \"X\"\nX = df1.iloc[:, 0:3]\n# Takes first 5th columns and assign them to variable \"Y\". Object dtype refers to strings.\ny = df1.iloc[:, 3:4] ",
"_____no_output_____"
],
[
"X.head()",
"_____no_output_____"
],
[
"y.head()",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=1)",
"_____no_output_____"
],
[
"mlpregressor = MLPRegressor(activation= 'relu', alpha= 0.05, hidden_layer_sizes= (150,50,100), learning_rate= 'constant', solver= 'adam', random_state=42, max_iter=400)\nmlpregressor.fit(X_train, y_train.values.ravel())\nmlpregressor",
"_____no_output_____"
],
[
"predictions=mlpregressor.predict(X_test)\npredictions",
"_____no_output_____"
],
[
"predictions.shape",
"_____no_output_____"
],
[
"X_test.shape",
"_____no_output_____"
],
[
"df1 = pd.DataFrame(predictions)\ndf2 = pd.DataFrame(X_test)",
"_____no_output_____"
],
[
"df2.head()",
"_____no_output_____"
],
[
"plt.scatter(df2['V'],y_test)\nplt.xlabel('V')\nplt.ylabel('y_test')\nplt.title('V test vs y_test')\nplt.show()",
"_____no_output_____"
],
[
"plt.scatter(df2['V'],predictions)\nplt.xlabel('V test')\nplt.ylabel('pred')\nplt.title('V test vs pred')\nplt.show()",
"_____no_output_____"
],
[
"from sklearn.metrics import mean_squared_error\nMSE = mean_squared_error(y_test,predictions)\n# report error\nprint(MSE)",
"0.010454813597067016\n"
],
[
"from sklearn.metrics import r2_score\nr2 = r2_score(y_test, predictions)\nprint(r2)",
"0.5131582621489573\n"
],
[
"# calculate errors\n# example of calculate the mean absolute error\nfrom sklearn.metrics import mean_absolute_error\nMAE = mean_absolute_error(y_test, predictions)\n# report error\nprint(MAE)",
"0.0859180898996887\n"
],
[
"score=mlpregressor.score(X_test,y_test)\nscore",
"_____no_output_____"
],
[
"rmse = np.sqrt(MSE)\nrmse",
"_____no_output_____"
],
[
"history = mlpregressor.fit(X_train, y_train, epoch=34, batch_size=1, validation_data=(X_val, y_val))\nloss_train = history.history['train_loss']\nloss_val = history.history['val_loss']\nepochs = range(1,35)\nplt.plot(epochs, loss_train, 'g', label='Training loss')\nplt.plot(epochs, loss_val, 'b', label='validation loss')\nplt.title('Training and Validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"rmse = []\nlist_k = list(range(1,200))\nfor i in list_k:\n mlpregressor = MLPRegressor(activation= 'relu', alpha= 0.05, hidden_layer_sizes= (i), learning_rate= 'constant', solver= 'adam', random_state=0, max_iter=400)\n mlpregressor.fit(X_train, y_train.values.ravel())\n predictions=mlpregressor.predict(X_test)\n rmsec = np.sqrt(mean_squared_error(y_test,predictions))\n rmse.append(rmsec);\nplt.figure(figsize=(6,6))\nplt.plot(list_k, rmse, '-o')\nplt.title('rmse')\nplt.xlabel('neuron')\nplt.ylabel('rmse')",
"_____no_output_____"
],
[
"mse = []\nlist_k = list(range(1,150))\nfor i in list_k:\n mlpregressor = MLPRegressor(activation= 'relu', alpha= 0.05, hidden_layer_sizes= (i), learning_rate= 'constant', solver= 'adam', random_state=0, max_iter=400)\n mlpregressor.fit(X_train, y_train.values.ravel())\n predictions=mlpregressor.predict(X_test)\n mse1 = mean_squared_error(y_test,predictions)\n mse.append(mse1);\n \nplt.figure(figsize=(6,6))\nplt.plot(list_k, mse)\nplt.title('MSE')\nplt.xlabel('neuron')\nplt.ylabel('MSE')",
"_____no_output_____"
],
[
"r2 = []\nlist_k = list(range(1,150))\nfor i in list_k:\n mlpregressor = MLPRegressor(activation= 'relu', alpha= 0.05, hidden_layer_sizes= (i), learning_rate= 'constant', solver= 'adam', random_state=42, max_iter=400)\n mlpregressor.fit(X_train, y_train.values.ravel())\n predictions=mlpregressor.predict(X_test)\n rsq = r2_score(y_test, predictions)\n r2.append(rsq);\n \nplt.figure(figsize=(6,6))\nplt.plot(list_k, r2, '-o')\nplt.title('r square')\nplt.xlabel('neuron')\nplt.ylabel('r2')",
"_____no_output_____"
],
[
"plt.scatter(predictions,y_test)\nplt.xlabel('predict')\nplt.ylabel('y_test')\nplt.title('predict vs y_test')\nplt.show()",
"_____no_output_____"
],
[
"y_pred = mlpregressor.predict(X_test)\nplt.scatter(df2['V'],y_test)\nplt.plot(X_test, y_pred, c='g')\nplt.xlabel('WEPotential(V)')\nplt.ylabel('WECurrent(A)')\nplt.title('Plot V vs A')",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split, cross_val_score, validation_curve\ncrossval = cross_val_score(mlpregressor, X_test, y_test.values.ravel(), cv=5)\ncrossval\ncheck_parameters = { 'hidden_layer_sizes': [(50,50), (100,50)], 'activation': ['sigmoid', 'relu'], 'solver': ['sgd', 'adam'], 'alpha': [0.0001, 0.05], 'learning_rate': ['constant','adaptive'], }\n",
"_____no_output_____"
]
],
[
[
"check_parameters = {\n 'hidden_layer_sizes': [(50,50), (100,50)],\n 'activation': ['tanh', 'relu'],\n 'solver': ['sgd', 'adam'],\n 'alpha': [0.0001, 0.05],\n 'learning_rate': ['constant','adaptive'],\n}",
"_____no_output_____"
]
],
[
[
"gridsearchcv = GridSearchCV(mlpregressor, check_parameters, n_jobs=-1, cv=5)\ngridsearchcv.fit(X_train, y_train.values.ravel())",
"_____no_output_____"
],
[
"print('Best parameters found:\\n', gridsearchcv.best_params_)",
"Best parameters found:\n {'activation': 'relu', 'alpha': 0.05, 'hidden_layer_sizes': (100, 50), 'learning_rate': 'constant', 'solver': 'adam'}\n"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cbbaede553277cf93b4c83e8caed13de37529067
| 29,528 |
ipynb
|
Jupyter Notebook
|
Untitled.ipynb
|
cazsol/ThinkBayes2
|
cf3dbdb4cde114d53e2802007656edb398778020
|
[
"MIT"
] | null | null | null |
Untitled.ipynb
|
cazsol/ThinkBayes2
|
cf3dbdb4cde114d53e2802007656edb398778020
|
[
"MIT"
] | null | null | null |
Untitled.ipynb
|
cazsol/ThinkBayes2
|
cf3dbdb4cde114d53e2802007656edb398778020
|
[
"MIT"
] | null | null | null | 242.032787 | 26,672 | 0.921431 |
[
[
[
"\"\"\"This file contains code used in \"Think Bayes\",\nby Allen B. Downey, available from greenteapress.com\n\nCopyright 2018 Allen B. Downey\nLicense: GNU GPLv3 http://www.gnu.org/licenses/gpl.html\n\"\"\"\n\nfrom __future__ import print_function, division\n\nimport math\nimport numpy\n\nfrom matplotlib import pyplot\n\nimport thinkbayes2\nimport thinkplot\n\n\ndef RenderPdf(mu, sigma, n=101):\n \"\"\"Makes xs and ys for a normal PDF with (mu, sigma).\n\n n: number of places to evaluate the PDF\n \"\"\"\n xs = numpy.linspace(mu-4*sigma, mu+4*sigma, n)\n ys = [thinkbayes2.EvalNormalPdf(x, mu, sigma) for x in xs]\n return xs, ys\n\n\ndef main():\n xs, ys = RenderPdf(100, 15)\n\n n = 34\n pyplot.fill_between(xs[-n:], ys[-n:], y2=0.0001, color='blue', alpha=0.2)\n s = 'Congratulations!\\nIf you got this far,\\nyou must be here.'\n d = dict(shrink=0.05)\n pyplot.annotate(s, [127, 0.002], xytext=[80, 0.005], arrowprops=d)\n\n thinkplot.Plot(xs, ys)\n thinkplot.Show(title='Distribution of Persistence',\n xlabel='Persistence quotient',\n ylabel='PDF',\n legend=False)\n\n\nif __name__ == \"__main__\":\n main()\n\n",
"_____no_output_____"
],
[
"import thinkbayes2\nimport thinkplot",
"_____no_output_____"
],
[
"from thinkbayes2 import Hist, Pmf, Suite",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code"
]
] |
cbbaef0538a3905ca16f66d50e7b4a80931d42f0
| 69,717 |
ipynb
|
Jupyter Notebook
|
TASK 1 (1).ipynb
|
H2001-hj/Prediction-using-Supervised-ML
|
850d1ec5b418da8ac069bfd124d11dc429058b78
|
[
"Apache-2.0"
] | null | null | null |
TASK 1 (1).ipynb
|
H2001-hj/Prediction-using-Supervised-ML
|
850d1ec5b418da8ac069bfd124d11dc429058b78
|
[
"Apache-2.0"
] | null | null | null |
TASK 1 (1).ipynb
|
H2001-hj/Prediction-using-Supervised-ML
|
850d1ec5b418da8ac069bfd124d11dc429058b78
|
[
"Apache-2.0"
] | null | null | null | 94.08502 | 14,908 | 0.829611 |
[
[
[
"__GRIP at The Sparks Foundation Internship Task #1__",
"_____no_output_____"
],
[
"__Author :- Harshada Jadhav__",
"_____no_output_____"
],
[
"## **Linear Regression with Python Scikit Learn**\nIn this section we will see how the Python Scikit-Learn library for machine learning can be used to implement regression functions. We will start with simple linear regression involving two variables.\n\n### **Simple Linear Regression**\nIn this regression task we will predict the percentage of marks that a student is expected to score based upon the number of hours they studied. This is a simple linear regression task as it involves just two variables.",
"_____no_output_____"
],
[
"**Step 1** :- Importing all the liabraries and describing the dataset",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np \nimport matplotlib.pyplot as plt \n%matplotlib inline",
"_____no_output_____"
],
[
"# Reading data from remote link\nurl = \"http://bit.ly/w-data\"\ndf = pd.read_csv(url)\nprint(\"Data imported successfully\")\n\ndf.head(10)",
"Data imported successfully\n"
],
[
"# columns in dataset\ndf.columns",
"_____no_output_____"
],
[
"#Summarized description of dataset\ndf.describe()",
"_____no_output_____"
],
[
"##number of rows and columns\ndf.shape",
"_____no_output_____"
]
],
[
[
"__Step 2:- Visualization__",
"_____no_output_____"
],
[
"Let's plot our data points on 2-D graph to eyeball our dataset and see if we can manually find any relationship between the data. We can create the plot with the following script:",
"_____no_output_____"
]
],
[
[
"# Plotting the distribution of scores\ndf.plot(x='Hours', y='Scores', style='o') \nplt.title('Hours vs Percentage') \nplt.xlabel('Hours Studied') \nplt.ylabel('Percentage Score') \nplt.show()",
"_____no_output_____"
]
],
[
[
"**From the graph above, we can clearly see that there is a positive linear relation between the number of hours studied and percentage of score.**",
"_____no_output_____"
],
[
"__Step 3__:-Preparing the data and to dividing the data into \"attributes\" (inputs) and \"labels\" (outputs).",
"_____no_output_____"
]
],
[
[
"#X will take all the values except for the last column which is our dependent variable (target variable)\nX = df.iloc[:, :-1].values \ny = df.iloc[:, 1].values ",
"_____no_output_____"
]
],
[
[
"Now that we have our attributes and labels, the next step is to split this data into training and test sets. We'll do this by using Scikit-Learn's built-in train_test_split() method:",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split \nX_train, X_test, y_train, y_test = train_test_split(X, y, \n test_size=0.2, random_state=0) ",
"_____no_output_____"
]
],
[
[
"__Step 4__ :-Training the Algorithm and then we have split our data into training and testing sets, and now is finally the time to train our algorithm. ",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LinearRegression \nregressor = LinearRegression() \nregressor.fit(X_train, y_train) \n\nprint(\"Training complete.\")",
"Training complete.\n"
],
[
"# Plotting the regression line\nline = regressor.coef_*X+regressor.intercept_\n\n# Plotting for the test data\nplt.scatter(X, y)\nplt.plot(X, line,color = 'purple');\nplt.show()",
"_____no_output_____"
]
],
[
[
"__Step 5__:-Making Predictions",
"_____no_output_____"
]
],
[
[
"print(X_test) # Testing data - In Hours\ny_pred = regressor.predict(X_test) # Predicting the scores",
"[[1.5]\n [3.2]\n [7.4]\n [2.5]\n [5.9]]\n"
],
[
"#Predicting the Test set results\ny_pred = regressor.predict(X_test)\nprint(y_pred)",
"[16.88414476 33.73226078 75.357018 26.79480124 60.49103328]\n"
],
[
"#Visualising the Training set results\nplt.scatter(X_train, y_train, color = 'purple')\nplt.plot(X_train, regressor.predict(X_train), color = 'pink')\nplt.title('Hours vs. Percentage (Training set)')\nplt.xlabel('Hours studied')\nplt.ylabel('Percentage of marks')\nplt.show()\n",
"_____no_output_____"
],
[
"#Visualising the Test set results\nplt.scatter(X_test, y_test, color = 'black')\nplt.plot(X_train, regressor.predict(X_train), color = 'blue')\nplt.title('Hours vs. Percentage (Test set)')\nplt.xlabel('Hours studied')\nplt.ylabel('Percentage of marks')\nplt.show()\n",
"_____no_output_____"
],
[
"# Comparing Actual vs Predicted\ndf = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred}) \ndf ",
"_____no_output_____"
],
[
"# You can also test with your own data\ndataset = np.array(9.25)\ndataset = dataset.reshape(-1, 1)\npred = regressor.predict(dataset)\nprint(\"If the student studies for 9.25 hours/day, the score is {}.\".format(pred))",
"If the student studies for 9.25 hours/day, the score is [93.69173249].\n"
]
],
[
[
"### **Evaluating the model**\n\nThe final step is to evaluate the performance of algorithm. This step is particularly important to compare how well different algorithms perform on a particular dataset. For simplicity here, we have chosen the mean square error. There are many such metrics.",
"_____no_output_____"
]
],
[
[
"from sklearn import metrics \nprint('Mean Absolute Error:', \n metrics.mean_absolute_error(y_test, y_pred)) ",
"Mean Absolute Error: 4.183859899002975\n"
],
[
"from sklearn.metrics import r2_score\nprint(\"The R-Square of the model is: \",r2_score(y_test,y_pred))",
"The R-Square of the model is: 0.9454906892105356\n"
]
],
[
[
"### **Conclusion**",
"_____no_output_____"
],
[
"__We used a Linear Regression Model to predict the score of a student if he/she studies for 9.25 hours/day and the Predicted Score came out to be 93.69.__",
"_____no_output_____"
],
[
"_Thanks!!_",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
cbbaf831ff024853d7cfc1762aa4dc09bfffa970
| 67,104 |
ipynb
|
Jupyter Notebook
|
code/day_6/6 - SVM with Sklearn.ipynb
|
anjijava16/data-analytics-machine-learning-big-data
|
4c69f54ed549745b66257d9189db0f99d3af4d72
|
[
"Apache-2.0"
] | 8 |
2018-02-21T10:57:51.000Z
|
2021-09-28T07:16:13.000Z
|
code/day_6/6 - SVM with Sklearn.ipynb
|
rhender007/data-analytics-machine-learning-big-data
|
4c69f54ed549745b66257d9189db0f99d3af4d72
|
[
"Apache-2.0"
] | 1 |
2017-12-23T13:22:54.000Z
|
2017-12-23T13:22:54.000Z
|
code/day_6/6 - SVM with Sklearn.ipynb
|
rhender007/data-analytics-machine-learning-big-data
|
4c69f54ed549745b66257d9189db0f99d3af4d72
|
[
"Apache-2.0"
] | 15 |
2018-01-03T17:05:19.000Z
|
2021-09-03T08:38:08.000Z
| 225.939394 | 59,808 | 0.923224 |
[
[
[
"# Support Vector Machines (SVM) with Sklearn",
"_____no_output_____"
],
[
"This notebook creates and measures an [LinearSVC with Sklearn](http://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC). This has more flexibility in the choice of penalties and loss functions and should scale better to large numbers of samples compared to SVC.\n\n* Method: LinearSVC\n* Dataset: Iris",
"_____no_output_____"
],
[
"## Imports",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.metrics import accuracy_score\n\nimport matplotlib.pyplot as plt\n\nfrom mlxtend.evaluate import confusion_matrix\nfrom mlxtend.plotting import plot_confusion_matrix\n\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Load and Prepare the Data",
"_____no_output_____"
]
],
[
[
"# Load the data\ndata = load_iris()",
"_____no_output_____"
],
[
"# Show the information about the dataset\nprint(data.DESCR)",
"_____no_output_____"
],
[
"# Split the data into labels (targets) and features\nlabel_names = data['target_names']\nlabels = data['target']\n\nfeature_names = data['feature_names']\nfeatures = data['data']\n\n# View the data\nprint(label_names)\nprint(labels[0])\nprint(\"\")\nprint(feature_names)\nprint(features[0])",
"_____no_output_____"
],
[
"# Create test and training sets\nX_train, X_test, Y_train, Y_test = train_test_split(features,\n labels,\n test_size=0.33,\n random_state=42)",
"_____no_output_____"
]
],
[
[
"## Fit a LinearSVC Model\n\nParameters\n* C: tells the SVM optimization how much you want to avoid misclassifying each training example\n * If C is large: the hyperplane does a better job of getting all the training points classified correctly\n * If C is small: the optimizer will look for a larger-margin separating hyperplane even if that hyperplane misclassifies more points\n* random_state: seed of the pseudo random number generator to use when shuffling the data",
"_____no_output_____"
]
],
[
[
"# Create an instance of the GaussianNB classifier\nmodel = LinearSVC(C=1.0, random_state=42)\n\n# Train the model\nmodel.fit(X_train, Y_train)\nmodel",
"_____no_output_____"
],
[
"# Show the intercepts\nprint(\"Intercepts: {}\".format(model.intercept_))",
"_____no_output_____"
]
],
[
[
"## Create Predictions",
"_____no_output_____"
]
],
[
[
"# Create predictions\npredictions = model.predict(X_test)\nprint(predictions)",
"_____no_output_____"
],
[
"# Create a plot to compare actual labels (Y_test) and the predicted labels (predictions)\nfig = plt.figure(figsize=(20,10))\nplt.scatter(Y_test, predictions)\nplt.xlabel(\"Actual Label: $Y_i$\")\nplt.ylabel(\"Predicted Label: $\\hat{Y}_i$\")\nplt.title(\"Actual vs. Predicted Label: $Y_i$ vs. $\\hat{Y}_i$\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Model Evaluation",
"_____no_output_____"
],
[
"### Accuracy\n\nThe accuracy score is either the fraction (default) or the count (normalize=False) of correct predictions.",
"_____no_output_____"
]
],
[
[
"print(\"Accuracy Score: %.2f\" % accuracy_score(Y_test, predictions))",
"_____no_output_____"
]
],
[
[
"### K-Fold Cross Validation\n\nThis estimates the accuracy of an SVM model by splitting the data, fitting a model and computing the score 5 consecutive times. The result is a list of the scores from each consecutive run.",
"_____no_output_____"
]
],
[
[
"# Get scores for 5 folds over the data\nclf = LinearSVC(C=1.0, random_state=42)\nscores = cross_val_score(clf, data.data, data.target, cv=5)\nprint(scores)",
"_____no_output_____"
]
],
[
[
"### Confusion Matrix",
"_____no_output_____"
],
[
"**Confusion Matrix for Binary Label**\n\n",
"_____no_output_____"
]
],
[
[
"# Plot the multi-label confusion matrix\nprint(\"Labels:\")\nfor label in label_names:\n i, = np.where(label_names == label)\n print(\"{}: {}\".format(i, label))\n\ncm = confusion_matrix(y_target=Y_test, \n y_predicted=predictions, \n binary=False)\n\nfig, ax = plot_confusion_matrix(conf_mat=cm)\nplt.title(\"Confusion Matrix\")\nplt.show()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
cbbaf9ff3aea54e8fcb698be4127f14c75ccba46
| 177,145 |
ipynb
|
Jupyter Notebook
|
unused/jsma_attack.ipynb
|
sanglee325/metric-train
|
df0598bc1d8632479b56f8ec0261e787d2af7c66
|
[
"MIT"
] | null | null | null |
unused/jsma_attack.ipynb
|
sanglee325/metric-train
|
df0598bc1d8632479b56f8ec0261e787d2af7c66
|
[
"MIT"
] | null | null | null |
unused/jsma_attack.ipynb
|
sanglee325/metric-train
|
df0598bc1d8632479b56f8ec0261e787d2af7c66
|
[
"MIT"
] | null | null | null | 16.997217 | 7,448 | 0.348409 |
[
[
[
"## 1. Requirements",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom torchvision.datasets import MNIST\nfrom torchvision import datasets, transforms\n\nfrom advertorch.attacks import JacobianSaliencyMapAttack as JSMA\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## 2. MNIST model",
"_____no_output_____"
]
],
[
[
"class MnistModel(nn.Module):\n def __init__(self):\n super(MnistModel, self).__init__()\n # mnist의 경우 28*28의 흑백이미지(input channel=1)이다.\n self.conv1 = nn.Conv2d(1, 32, kernel_size = 5, padding=2)\n # feature map의 크기는 14*14가 된다\n # 첫번재 convolution layer에서 나온 output channel이 32이므로 2번째 input도 32\n self.conv2 = nn.Conv2d(32, 64, kernel_size = 5, padding=2)\n # feature map의 크기는 7*7이 된다\n # fc -> fully connected, fc는 모든 weight를 고려해서 만들기 때문에 cnn에서는 locally connected를 이용하여 만든다.\n # nn.Linear에서는 conv를 거친 feature map을 1차원으로 전부 바꿔서 input을 한다. 이게 64*7*7\n self.fc1 = nn.Linear(64*7*7, 1024)\n self.fc2 = nn.Linear(1024, 10)\n\n def forward(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), 2)\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = x.view(-1, 64*7*7) # linear에 들어갈 수 있도록 reshape\n x = F.relu(self.fc1(x)) # fully connected에 relu 적용\n x = F.dropout(x, training=self.training) # 가중치 감소만으로는 overfit을 해결하기가 어려움, 그래서 뉴런의 연결을 임의로 삭제\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)",
"_____no_output_____"
]
],
[
[
"## 3. JSMA attack",
"_____no_output_____"
]
],
[
[
"def jsma_attack(um, device, loader):\n adversary = JSMA(um, num_classes=10, clip_min=0.0, clip_max=1.0,\n loss_fn=None, theta=1.0, gamma=0.3, comply_cleverhans=False)\n\n perturbed_images = []\n for idx, (image, label) in enumerate(loader):\n print(idx)\n image, label = image.to(device), label.to(device)\n perturbed_image = adversary.perturb(image, label)\n perturbed_images.append((perturbed_image, label))\n\n return perturbed_images",
"_____no_output_____"
]
],
[
[
"## 4. test function",
"_____no_output_____"
]
],
[
[
"def test(model, device, images):\n # Accuracy counter\n correct = 0\n adv_examples = []\n\n # test set의 모든 예제를 test한다\n for image, label in images:\n # cpu나 gpu로 데이터를 전송한다\n image, label = image.to(device), label.to(device)\n\n # Re-classify the perturbed image\n output = model(image)\n\n # Check for success\n final_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n if final_pred.item() == label.item():\n correct += 1\n if (len(adv_examples) < 5):\n adv_ex = image.squeeze().detach().cpu().numpy()\n adv_examples.append( (label.item(), final_pred.item(), adv_ex) )\n else:\n # Save some adv examples for visualization later\n if len(adv_examples) < 5:\n adv_ex = image.squeeze().detach().cpu().numpy()\n adv_examples.append( (label.item(), final_pred.item(), adv_ex) )\n\n # final_acc = correct/idx\n final_acc = correct/float(len(images))\n print(\"Test Accuracy = {} / {} = {}\".format(correct, len(images), final_acc))\n\n # Return the accuracy and an adversarial example\n return final_acc, adv_examples",
"_____no_output_____"
]
],
[
[
"## 5. Load dataset and pretrained model, Set device",
"_____no_output_____"
]
],
[
[
"test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('./data', train=False, download=True, transform=transforms.Compose([\n transforms.ToTensor(),\n ])),\n batch_size=1, shuffle=True)\n\nmnist_transform = transforms.Compose([\n transforms.ToTensor(),\n])\n\ndownload_path = './data'\ntrain_dataset = MNIST(download_path, transform=mnist_transform, train=True, download=True)\ntest_dataset = MNIST(download_path, transform=mnist_transform, train=False, download=True)",
"_____no_output_____"
],
[
"is_cuda = torch.cuda.is_available()\ndevice = torch.device('cuda' if is_cuda else 'cpu')\nprint(\"CUDA Available:\", is_cuda)",
"CUDA Available: True\n"
],
[
"# pretrained_model: 이전에 training한 mnist 모델\npretrained_model = './model/mnist_um.pth'\ndefense_model = './model/mnist_jsma_model.pth'\n\nmodel_normal = MnistModel().eval().to(device)\nmodel_normal.load_state_dict(torch.load(pretrained_model, map_location='cpu'))\n\nmodel_jsma = MnistModel().to(device)\nmodel_jsma.load_state_dict(torch.load(defense_model, map_location='cpu'))",
"_____no_output_____"
]
],
[
[
"## 6. Run test",
"_____no_output_____"
]
],
[
[
"um = MnistModel().to(device)\nperturbed_images = jsma_attack(um, device, test_loader)",
"0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n27\n28\n29\n30\n31\n32\n33\n34\n35\n36\n37\n38\n39\n40\n41\n42\n43\n44\n45\n46\n47\n48\n49\n50\n51\n52\n53\n54\n55\n56\n57\n58\n59\n60\n61\n62\n63\n64\n65\n66\n67\n68\n69\n70\n71\n72\n73\n74\n75\n76\n77\n78\n79\n80\n81\n82\n83\n84\n85\n86\n87\n88\n89\n90\n91\n92\n93\n94\n95\n96\n97\n98\n99\n100\n101\n102\n103\n104\n105\n106\n107\n108\n109\n110\n111\n112\n113\n114\n115\n116\n117\n118\n119\n120\n121\n122\n123\n124\n125\n126\n127\n128\n129\n130\n131\n132\n133\n134\n135\n136\n137\n138\n139\n140\n141\n142\n143\n144\n145\n146\n147\n148\n149\n150\n151\n152\n153\n154\n155\n156\n157\n158\n159\n160\n161\n162\n163\n164\n165\n166\n167\n168\n169\n170\n171\n172\n173\n174\n175\n176\n177\n178\n179\n180\n181\n182\n183\n184\n185\n186\n187\n188\n189\n190\n191\n192\n193\n194\n195\n196\n197\n198\n199\n200\n201\n202\n203\n204\n205\n206\n207\n208\n209\n210\n211\n212\n213\n214\n215\n216\n217\n218\n219\n220\n221\n222\n223\n224\n225\n226\n227\n228\n229\n230\n231\n232\n233\n234\n235\n236\n237\n238\n239\n240\n241\n242\n243\n244\n245\n246\n247\n248\n249\n250\n251\n252\n253\n254\n255\n256\n257\n258\n259\n260\n261\n262\n263\n264\n265\n266\n267\n268\n269\n270\n271\n272\n273\n274\n275\n276\n277\n278\n279\n280\n281\n282\n283\n284\n285\n286\n287\n288\n289\n290\n291\n292\n293\n294\n295\n296\n297\n298\n299\n300\n301\n302\n303\n304\n305\n306\n307\n308\n309\n310\n311\n312\n313\n314\n315\n316\n317\n318\n319\n320\n321\n322\n323\n324\n325\n326\n327\n328\n329\n330\n331\n332\n333\n334\n335\n336\n337\n338\n339\n340\n341\n342\n343\n344\n345\n346\n347\n348\n349\n350\n351\n352\n353\n354\n355\n356\n357\n358\n359\n360\n361\n362\n363\n364\n365\n366\n367\n368\n369\n370\n371\n372\n373\n374\n375\n376\n377\n378\n379\n380\n381\n382\n383\n384\n385\n386\n387\n388\n389\n390\n391\n392\n393\n394\n395\n396\n397\n398\n399\n400\n401\n402\n403\n404\n405\n406\n407\n408\n409\n410\n411\n412\n413\n414\n415\n416\n417\n418\n419\n420\n421\n422\n423\n424\n425\n426\n427\n428\n429\n430\n431\n432\n433\n434\n435\n436\n437\n438\n439\n440\n441\n442\n443\n444\n445\n446\n447\n448\n449\n450\n451\n452\n453\n454\n455\n456\n457\n458\n459\n460\n461\n462\n463\n464\n465\n466\n467\n468\n469\n470\n471\n472\n473\n474\n475\n476\n477\n478\n479\n480\n481\n482\n483\n484\n485\n486\n487\n488\n489\n490\n491\n492\n493\n494\n495\n496\n497\n498\n499\n500\n501\n502\n503\n504\n505\n506\n507\n508\n509\n510\n511\n512\n513\n514\n515\n516\n517\n518\n519\n520\n521\n522\n523\n524\n525\n526\n527\n528\n529\n530\n531\n532\n533\n534\n535\n536\n537\n538\n539\n540\n541\n542\n543\n544\n545\n546\n547\n548\n549\n550\n551\n552\n553\n554\n555\n556\n557\n558\n559\n560\n561\n562\n563\n564\n565\n566\n567\n568\n569\n570\n571\n572\n573\n574\n575\n576\n577\n578\n579\n580\n581\n582\n583\n584\n585\n586\n587\n588\n589\n590\n591\n592\n593\n594\n595\n596\n597\n598\n599\n600\n601\n602\n603\n604\n605\n606\n607\n608\n609\n610\n611\n612\n613\n614\n615\n616\n617\n618\n619\n620\n621\n622\n623\n624\n625\n626\n627\n628\n629\n630\n631\n632\n633\n634\n635\n636\n637\n638\n639\n640\n641\n642\n643\n644\n645\n646\n647\n648\n649\n650\n651\n652\n653\n654\n655\n656\n657\n658\n659\n660\n661\n662\n663\n664\n665\n666\n667\n668\n669\n670\n671\n672\n673\n674\n675\n676\n677\n678\n679\n680\n681\n682\n683\n684\n685\n686\n687\n688\n689\n690\n691\n692\n693\n694\n695\n696\n697\n698\n699\n700\n701\n702\n703\n704\n705\n706\n707\n708\n709\n710\n711\n712\n713\n714\n715\n716\n717\n718\n719\n720\n721\n722\n723\n724\n725\n726\n727\n728\n729\n730\n731\n732\n733\n734\n735\n736\n737\n738\n739\n740\n741\n742\n743\n744\n745\n746\n747\n748\n749\n750\n751\n752\n753\n754\n755\n756\n757\n758\n759\n760\n761\n762\n763\n764\n765\n766\n767\n768\n769\n770\n771\n772\n773\n774\n775\n776\n777\n778\n779\n780\n781\n782\n783\n784\n785\n786\n787\n788\n789\n790\n791\n792\n793\n794\n795\n796\n797\n798\n799\n800\n801\n802\n803\n804\n805\n806\n807\n808\n809\n810\n811\n812\n813\n814\n815\n816\n817\n818\n819\n820\n821\n822\n823\n824\n825\n826\n827\n828\n829\n830\n831\n832\n833\n834\n835\n836\n837\n838\n839\n840\n841\n842\n843\n844\n845\n846\n847\n848\n849\n850\n851\n852\n853\n854\n855\n856\n857\n858\n859\n860\n861\n862\n863\n864\n865\n866\n867\n868\n869\n870\n871\n872\n873\n874\n875\n876\n877\n878\n879\n880\n881\n882\n883\n884\n885\n886\n887\n888\n889\n890\n891\n892\n893\n894\n895\n896\n897\n898\n899\n900\n901\n902\n903\n904\n905\n906\n907\n908\n909\n910\n911\n912\n913\n914\n915\n916\n917\n918\n919\n920\n921\n922\n923\n924\n925\n926\n927\n928\n929\n930\n931\n932\n933\n934\n935\n936\n937\n938\n939\n940\n941\n942\n943\n944\n945\n946\n947\n948\n949\n950\n951\n952\n953\n954\n955\n956\n957\n958\n959\n960\n961\n962\n963\n964\n965\n966\n967\n968\n969\n970\n971\n972\n973\n974\n975\n976\n977\n978\n979\n980\n981\n982\n983\n984\n985\n986\n987\n988\n989\n990\n991\n992\n993\n994\n995\n996\n997\n998\n999\n1000\n1001\n1002\n1003\n1004\n1005\n1006\n1007\n1008\n1009\n1010\n1011\n1012\n1013\n1014\n1015\n1016\n1017\n1018\n1019\n1020\n1021\n1022\n1023\n1024\n1025\n1026\n1027\n1028\n1029\n1030\n1031\n1032\n1033\n1034\n1035\n1036\n1037\n1038\n1039\n1040\n1041\n1042\n1043\n1044\n1045\n1046\n1047\n1048\n1049\n1050\n1051\n1052\n1053\n1054\n1055\n1056\n1057\n1058\n1059\n1060\n1061\n1062\n1063\n1064\n1065\n1066\n1067\n1068\n1069\n1070\n1071\n1072\n1073\n1074\n1075\n1076\n1077\n1078\n1079\n1080\n1081\n1082\n1083\n1084\n1085\n1086\n1087\n1088\n1089\n1090\n1091\n1092\n1093\n1094\n1095\n1096\n1097\n1098\n1099\n1100\n1101\n1102\n1103\n1104\n1105\n1106\n1107\n1108\n1109\n1110\n1111\n1112\n1113\n1114\n1115\n1116\n1117\n1118\n1119\n1120\n1121\n1122\n1123\n1124\n1125\n1126\n1127\n1128\n1129\n1130\n1131\n1132\n1133\n1134\n1135\n1136\n1137\n1138\n1139\n1140\n1141\n1142\n1143\n1144\n1145\n1146\n1147\n1148\n1149\n1150\n1151\n1152\n1153\n1154\n1155\n1156\n1157\n1158\n1159\n1160\n1161\n1162\n1163\n1164\n1165\n1166\n1167\n1168\n1169\n1170\n1171\n1172\n1173\n1174\n1175\n1176\n1177\n1178\n1179\n1180\n1181\n1182\n1183\n1184\n1185\n1186\n1187\n1188\n1189\n1190\n1191\n1192\n1193\n1194\n1195\n1196\n1197\n1198\n1199\n1200\n1201\n1202\n1203\n1204\n1205\n1206\n1207\n1208\n1209\n1210\n1211\n1212\n1213\n1214\n1215\n1216\n1217\n1218\n1219\n1220\n1221\n1222\n1223\n1224\n1225\n1226\n1227\n1228\n1229\n1230\n1231\n1232\n1233\n1234\n1235\n1236\n1237\n1238\n1239\n1240\n1241\n1242\n1243\n1244\n1245\n1246\n1247\n1248\n1249\n1250\n1251\n1252\n1253\n1254\n1255\n1256\n1257\n1258\n1259\n1260\n1261\n1262\n1263\n1264\n1265\n1266\n1267\n1268\n1269\n1270\n1271\n1272\n1273\n1274\n1275\n1276\n1277\n1278\n1279\n1280\n1281\n1282\n1283\n1284\n1285\n1286\n1287\n1288\n1289\n1290\n1291\n1292\n1293\n1294\n1295\n1296\n1297\n1298\n1299\n1300\n1301\n1302\n1303\n1304\n1305\n1306\n1307\n1308\n1309\n1310\n1311\n1312\n1313\n1314\n1315\n1316\n1317\n1318\n1319\n1320\n1321\n1322\n1323\n1324\n1325\n1326\n1327\n1328\n1329\n1330\n1331\n1332\n1333\n1334\n1335\n1336\n1337\n1338\n1339\n1340\n1341\n1342\n1343\n1344\n1345\n1346\n1347\n1348\n1349\n1350\n1351\n1352\n1353\n1354\n1355\n1356\n1357\n1358\n1359\n1360\n1361\n1362\n1363\n1364\n1365\n1366\n1367\n1368\n1369\n1370\n1371\n1372\n1373\n1374\n1375\n1376\n1377\n1378\n1379\n1380\n1381\n1382\n1383\n1384\n1385\n1386\n1387\n1388\n1389\n1390\n1391\n1392\n1393\n1394\n1395\n1396\n1397\n1398\n1399\n1400\n1401\n1402\n1403\n1404\n1405\n1406\n1407\n1408\n1409\n1410\n1411\n1412\n1413\n1414\n1415\n1416\n1417\n1418\n1419\n1420\n1421\n1422\n1423\n1424\n1425\n1426\n1427\n1428\n1429\n1430\n1431\n1432\n1433\n1434\n1435\n1436\n1437\n1438\n1439\n1440\n1441\n1442\n1443\n1444\n1445\n1446\n1447\n1448\n1449\n1450\n1451\n1452\n1453\n1454\n1455\n1456\n1457\n1458\n1459\n1460\n1461\n1462\n1463\n1464\n1465\n1466\n1467\n1468\n1469\n1470\n1471\n1472\n1473\n1474\n1475\n1476\n1477\n1478\n1479\n1480\n1481\n1482\n1483\n1484\n1485\n1486\n1487\n1488\n1489\n1490\n1491\n1492\n1493\n1494\n1495\n1496\n1497\n1498\n1499\n1500\n1501\n1502\n1503\n1504\n1505\n1506\n1507\n1508\n1509\n1510\n1511\n1512\n1513\n1514\n1515\n1516\n1517\n1518\n1519\n1520\n1521\n1522\n1523\n1524\n1525\n1526\n1527\n1528\n1529\n1530\n1531\n1532\n1533\n1534\n1535\n1536\n1537\n1538\n1539\n1540\n1541\n1542\n1543\n1544\n1545\n1546\n1547\n1548\n1549\n1550\n1551\n1552\n1553\n1554\n1555\n1556\n1557\n1558\n1559\n1560\n1561\n1562\n1563\n1564\n1565\n1566\n1567\n1568\n1569\n1570\n1571\n1572\n1573\n1574\n1575\n1576\n1577\n1578\n1579\n1580\n1581\n1582\n1583\n1584\n1585\n1586\n1587\n1588\n1589\n1590\n1591\n1592\n1593\n1594\n1595\n1596\n1597\n1598\n1599\n1600\n1601\n1602\n1603\n1604\n1605\n1606\n1607\n1608\n1609\n1610\n1611\n1612\n1613\n1614\n1615\n1616\n1617\n1618\n1619\n1620\n1621\n1622\n1623\n1624\n1625\n1626\n1627\n1628\n1629\n1630\n1631\n1632\n1633\n1634\n1635\n1636\n1637\n1638\n1639\n1640\n1641\n1642\n1643\n1644\n1645\n1646\n1647\n1648\n1649\n1650\n1651\n1652\n1653\n1654\n1655\n1656\n1657\n1658\n1659\n1660\n1661\n1662\n1663\n1664\n1665\n1666\n1667\n1668\n1669\n1670\n1671\n1672\n1673\n1674\n1675\n1676\n1677\n1678\n1679\n1680\n1681\n1682\n1683\n1684\n1685\n1686\n1687\n1688\n1689\n1690\n1691\n1692\n1693\n1694\n1695\n1696\n1697\n1698\n1699\n1700\n1701\n1702\n1703\n1704\n1705\n1706\n1707\n1708\n1709\n1710\n1711\n1712\n1713\n1714\n1715\n1716\n1717\n1718\n1719\n1720\n1721\n1722\n1723\n1724\n1725\n1726\n1727\n1728\n1729\n1730\n1731\n1732\n1733\n1734\n1735\n1736\n1737\n1738\n1739\n1740\n1741\n1742\n1743\n1744\n1745\n1746\n1747\n1748\n1749\n1750\n1751\n1752\n1753\n1754\n1755\n1756\n1757\n1758\n1759\n1760\n1761\n1762\n1763\n1764\n1765\n1766\n1767\n1768\n1769\n1770\n1771\n1772\n1773\n1774\n1775\n1776\n1777\n1778\n1779\n1780\n1781\n1782\n1783\n1784\n1785\n1786\n1787\n1788\n1789\n1790\n1791\n1792\n1793\n1794\n1795\n1796\n1797\n1798\n1799\n1800\n1801\n1802\n1803\n1804\n1805\n1806\n1807\n1808\n1809\n1810\n1811\n1812\n1813\n1814\n1815\n1816\n1817\n1818\n1819\n1820\n1821\n1822\n1823\n1824\n1825\n1826\n1827\n1828\n1829\n1830\n1831\n1832\n1833\n1834\n1835\n1836\n1837\n1838\n1839\n1840\n1841\n1842\n1843\n1844\n1845\n1846\n1847\n1848\n1849\n1850\n1851\n1852\n1853\n1854\n1855\n1856\n1857\n1858\n1859\n1860\n1861\n"
],
[
"normal_accuracies = []\nexamples = []\n\nprint('Normal:')\nacc, ex = test(model_normal, device, perturbed_images)\nnormal_accuracies.append(acc)\nexamples.append(ex)",
"Normal:\nTest Accuracy = 9801 / 10000 = 0.9801\n"
],
[
"jsma_accuracies = []\n\nprint('JSMA defense:') \nacc, ex = test(model_jsma, device, perturbed_images)\njsma_accuracies.append(acc)",
"JSMA defense:\nTest Accuracy = 7788 / 10000 = 0.7788\n"
],
[
"Linf_model = './model/mnist_fgsm_model.pth'\nmodel_fgsm = MnistModel().to(device)\nmodel_fgsm.load_state_dict(torch.load(Linf_model, map_location='cpu'))\n\nfgsm_accuracies = []\nprint('FGSM defense:')\nacc, ex = test(model_fgsm, device, perturbed_images)\nfgsm_accuracies.append(acc)",
"FGSM defense:\nTest Accuracy = 8335 / 10000 = 0.8335\n"
]
],
[
[
"# Results",
"_____no_output_____"
],
[
"## Accuracy",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(3,5))\nplt.bar(['UM'], normal_accuracies, label='UM', color='b')\nplt.bar(['JSMA'], jsma_accuracies, label='JSMA', color='g')\nplt.bar(['FGSM'], fgsm_accuracies, label='FGSM', color='r')\nplt.yticks(np.arange(0.0, 1.1, step=0.1))\nplt.title(\"Comparison\")\nplt.ylabel(\"Accuracy\")\nplt.show()",
"_____no_output_____"
],
[
"from torchvision.utils import save_image\n\nfor idx, (label, pred, image) in enumerate(examples[0]):\n title = str(idx) + '_' + str(label) + '_' + str(pred) + '.png'\n image_t = torch.from_numpy(image)\n save_image(image_t, title)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
cbbafd7e7df5d2d7138b2a784d141f2b7f8b9d92
| 416,872 |
ipynb
|
Jupyter Notebook
|
notebooks/SummarizeFEL.ipynb
|
aglucaci/AnalysisOfOrthologousCollections
|
9f19c6039dee7f236fdb40e27ae516a9a968fb36
|
[
"MIT"
] | 1 |
2021-09-18T13:45:47.000Z
|
2021-09-18T13:45:47.000Z
|
notebooks/SummarizeFEL.ipynb
|
aglucaci/AnalysisOfOrthologousCollections
|
9f19c6039dee7f236fdb40e27ae516a9a968fb36
|
[
"MIT"
] | null | null | null |
notebooks/SummarizeFEL.ipynb
|
aglucaci/AnalysisOfOrthologousCollections
|
9f19c6039dee7f236fdb40e27ae516a9a968fb36
|
[
"MIT"
] | null | null | null | 195.990597 | 61,712 | 0.589039 |
[
[
[
"import pandas as pd\nimport numpy as np\nimport os\nimport json\nimport altair as alt",
"_____no_output_____"
],
[
"JSON_FILE = \"../results/BDNF/Recombinants/BDNF_codons_RDP_recombinationFree.fas.FEL.json\"\npvalueThreshold = 0.1",
"_____no_output_____"
],
[
"def getFELData(json_file):\n with open(json_file, \"r\") as in_d:\n json_data = json.load(in_d)\n return json_data[\"MLE\"][\"content\"][\"0\"]\n#end method\n\ndef getFELHeaders(json_file):\n with open(json_file, \"r\") as in_d:\n json_data = json.load(in_d)\n return json_data[\"MLE\"][\"headers\"]\n#end method",
"_____no_output_____"
],
[
"columns = getFELHeaders(JSON_FILE)\nheaders = [x[0] for x in columns]\nheaders",
"_____no_output_____"
],
[
"data = getFELData(JSON_FILE)",
"_____no_output_____"
]
],
[
[
"### Selected Sites -- Tables",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame(getFELData(JSON_FILE), columns=headers, dtype = float)\ndf[\"omega\"] = df[\"beta\"] / df[\"alpha\"]\ndf.index += 1\ndf[\"Site\"] = df.index\ndf",
"_____no_output_____"
],
[
"df_results = df[df[\"p-value\"] <= pvalueThreshold]\ndf_results",
"_____no_output_____"
],
[
"positive_sites = df_results[df_results[\"omega\"] > 1.0]\npositive_sites = positive_sites.reset_index()\npositive_sites.index += 1\npositive_sites.drop('index', axis=1, inplace=True)\npositive_sites",
"_____no_output_____"
],
[
"negative_sites = df_results[df_results[\"omega\"] < 1.0]\nnegative_sites = negative_sites.reset_index()\nnegative_sites.index += 1\nnegative_sites.drop('index', axis=1, inplace=True)\nnegative_sites",
"_____no_output_____"
],
[
"#df = pd.DataFrame(getFELData(JSON_FILE), columns=headers, dtype = float)\n#df.index += 1\n\n# Save the DF here.\n#OUTPUT = JSON_FILE.split(\"/\")[-1].replace(\".FEL.json\", \".csv\")\n#print(\"# Saving:\", OUTPUT)\n#df.to_csv(OUTPUT)\n\n#df[\"Site\"] = df.index\n#df[\"omega\"] = df[\"beta\"] / df[\"alpha\"]\n#df[\"Site\"] = df.index\n#df",
"_____no_output_____"
]
],
[
[
"## Visualizations",
"_____no_output_____"
]
],
[
[
"#source = df[df[\"omega\"] < 10]\nsource = df\n\nline = alt.Chart(source).mark_line().encode(\n x='Site',\n y='omega', \n).properties(\n width=800,\n height=600)\n\nline",
"_____no_output_____"
],
[
"\nsource = df\n\nline = alt.Chart(source).mark_line().encode(\n x='Site',\n y=alt.Y('omega',\n scale=alt.Scale(domain=(0, 10), clamp=True)),\n \n \n).properties(\n width=800,\n height=600)\n\nline",
"_____no_output_____"
],
[
"import numpy as np\ndf[\"log10(omega)\"] = np.log10(df[\"omega\"])\n\nsource = df\n\nline = alt.Chart(source).mark_bar().encode(\n x='Site',\n y='log10(omega)',\n color=alt.Color('p-value', scale=alt.Scale(scheme='reds', reverse=True))\n \n).properties(\n width=800,\n height=600)\n\nline",
"c:\\python39\\lib\\site-packages\\pandas\\core\\arraylike.py:358: RuntimeWarning: divide by zero encountered in log10\n result = getattr(ufunc, method)(*inputs, **kwargs)\n"
],
[
"import numpy as np\nnegative_sites[\"log10(omega)\"] = np.log10(negative_sites[\"omega\"])\n\nsource = negative_sites\n\nline = alt.Chart(source).mark_bar().encode(\n x='Site',\n y='log10(omega)',\n color=alt.Color('p-value', scale=alt.Scale(scheme='reds', reverse=True))\n \n).properties(\n width=800,\n height=600)\n\nline",
"_____no_output_____"
],
[
"import numpy as np\nsource = negative_sites\n\nline = alt.Chart(source).mark_point().encode(\n x='Site',\n y='omega',\n color=alt.Color('p-value', scale=alt.Scale(scheme='reds', reverse=True)),\n size=alt.Size('p-value', scale=alt.Scale(reverse=True))\n \n).properties(\n width=800,\n height=600)\n\nline",
"_____no_output_____"
]
],
[
[
"## Go with this one for now",
"_____no_output_____"
]
],
[
[
"# Only the negative sites\nsource = negative_sites\n\nline = alt.Chart(source).mark_circle().encode(\n x='Site',\n y='omega',\n color=alt.Color('p-value', scale=alt.Scale(scheme='reds', reverse=True)),\n size=alt.Size('p-value', scale=alt.Scale(reverse=True))\n \n).properties(\n width=800,\n height=600)\n\nline",
"_____no_output_____"
],
[
"import numpy as np\nsource = negative_sites\n\nline = alt.Chart(source).mark_point().encode(\n x='Site',\n y='omega',\n color=alt.Color('p-value', scale=alt.Scale(scheme='reds', reverse=True)),\n size=alt.Size('p-value', scale=alt.Scale(reverse=True))\n \n).properties(\n width=800,\n height=600)\n\nline",
"_____no_output_____"
]
],
[
[
"## Figure legend.",
"_____no_output_____"
]
],
[
[
"## Summary\n\na = len(df[\"omega\"])\nb = len(negative_sites[\"omega\"])\n\nc = round((b/a) * 100, 3)\n\nprint(\"FEL analysis of your gene of interest found \" + str(b) + \" of \" + str(a) + \" sites to be statistically significant (p-value <= \" + str(pvalueThreshold) + \") for pervasive negative/purifying selection\" )\nprint(c)",
"FEL analysis of your gene of interest found 174 of 261 sites to be statisically significant (p-value <= 0.1) for pervasive negative/purifying selection\n66.667\n"
]
],
[
[
"## Table\n",
"_____no_output_____"
]
],
[
[
"negative_sites",
"_____no_output_____"
],
[
"df_AlnMap = pd.read_csv(\"BDNF_AlignmentMap.csv\")\ndf_AlnMap",
"_____no_output_____"
],
[
"mapping = []\n\nfor site in negative_sites[\"Site\"].to_list():\n if site in df_AlnMap[\"AlignmentSite\"].to_list():\n for n, item in enumerate(df_AlnMap[\"AlignmentSite\"].to_list()):\n if item == site:\n mapping.append(n+1)\n break\n #end if\n #end for\n else:\n mapping.append(np.nan)\n #end if\n#end for\n\nnegative_sites[\"HumanBDNF\"] = mapping\nnegative_sites",
"_____no_output_____"
],
[
"try:\n negative_sites = negative_sites.drop(['log10(omega)'], axis=1)\nexcept:\n pass\n\nnegative_sites.to_csv(\"BDNF_FEL_Negative_Table.csv\", index=False)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
cbbb06a91671da8da34010f11f2dc2ab802401b5
| 3,221 |
ipynb
|
Jupyter Notebook
|
examples/example.ipynb
|
KUASWoodyLIN/Udacity_self_driving_car_challenge_4
|
36ca5ed50f74c49645b43ffcab1b27055540d8e5
|
[
"MIT"
] | null | null | null |
examples/example.ipynb
|
KUASWoodyLIN/Udacity_self_driving_car_challenge_4
|
36ca5ed50f74c49645b43ffcab1b27055540d8e5
|
[
"MIT"
] | null | null | null |
examples/example.ipynb
|
KUASWoodyLIN/Udacity_self_driving_car_challenge_4
|
36ca5ed50f74c49645b43ffcab1b27055540d8e5
|
[
"MIT"
] | null | null | null | 29.824074 | 120 | 0.572493 |
[
[
[
"## Advanced Lane Finding Project\n\nThe goals / steps of this project are the following:\n\n* Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.\n* Apply a distortion correction to raw images.\n* Use color transforms, gradients, etc., to create a thresholded binary image.\n* Apply a perspective transform to rectify binary image (\"birds-eye view\").\n* Detect lane pixels and fit to find the lane boundary.\n* Determine the curvature of the lane and vehicle position with respect to center.\n* Warp the detected lane boundaries back onto the original image.\n* Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.\n\n---\n## First, I'll compute the camera calibration using chessboard images",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport cv2\nimport glob\nimport matplotlib.pyplot as plt\n%matplotlib qt\n\n# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\nobjp = np.zeros((6*9,3), np.float32)\nobjp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)\n\n# Arrays to store object points and image points from all the images.\nobjpoints = [] # 3d points in real world space\nimgpoints = [] # 2d points in image plane.\n\n# Make a list of calibration images\nimages = glob.glob('../camera_cal/calibration*.jpg')\n\n# Step through the list and search for chessboard corners\nfor fname in images:\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (9,6),None)\n\n # If found, add object points, image points\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n\n # Draw and display the corners\n img = cv2.drawChessboardCorners(img, (9,6), corners, ret)\n cv2.imshow('img',img)\n cv2.waitKey(500)\n\ncv2.destroyAllWindows()",
"_____no_output_____"
]
],
[
[
"## And so on and so forth...",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cbbb136fac99dd5428a55162d175c03e725514d5
| 56,155 |
ipynb
|
Jupyter Notebook
|
python/pytorch/pytorch.ipynb
|
saint1991/samples
|
923237eb22f3799619875dfce1cc92116c207870
|
[
"MIT"
] | null | null | null |
python/pytorch/pytorch.ipynb
|
saint1991/samples
|
923237eb22f3799619875dfce1cc92116c207870
|
[
"MIT"
] | null | null | null |
python/pytorch/pytorch.ipynb
|
saint1991/samples
|
923237eb22f3799619875dfce1cc92116c207870
|
[
"MIT"
] | null | null | null | 73.501309 | 25,316 | 0.751028 |
[
[
[
"# enable user scoped libraries",
"_____no_output_____"
]
],
[
[
"import site\nsite.addsitedir(site.USER_SITE)",
"_____no_output_____"
]
],
[
[
"# import basic packages",
"_____no_output_____"
]
],
[
[
"import math\nimport numpy as np\nimport torch",
"_____no_output_____"
]
],
[
[
"# params",
"_____no_output_____"
]
],
[
[
"BATCH_SIZE=128\nEPOCHS = 30\nVALIDATION_RATIO = 0.2\nRANDOM_SEED = 1",
"_____no_output_____"
]
],
[
[
"# function to preprocess datasets",
"_____no_output_____"
]
],
[
[
"from torchvision import transforms",
"_____no_output_____"
],
[
"preprocess = transforms.Compose([\n transforms.ToTensor()\n])",
"_____no_output_____"
]
],
[
[
"# load training datasets",
"_____no_output_____"
]
],
[
[
"from torchvision import datasets\nfrom torch.utils.data import DataLoader\n\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"mnist = datasets.MNIST('../data', download=True, transform=preprocess)\ndataset_shape = mnist.train_data.size()\ndataset_shape ",
"_____no_output_____"
]
],
[
[
"# train validation split",
"_____no_output_____"
]
],
[
[
"from torch.utils.data import DataLoader, Dataset\nfrom torch.utils.data.dataset import random_split\nfrom torch.utils.data.sampler import SubsetRandomSampler",
"_____no_output_____"
],
[
"sample_count = dataset_shape[0]\nvalidation_count = math.floor(VALIDATION_RATIO * sample_count)\ntrain_count = sample_count - validation_count",
"_____no_output_____"
],
[
"tset, vset = random_split(mnist, [train_count, validation_count])",
"_____no_output_____"
],
[
"trainingset = DataLoader(tset, batch_size=BATCH_SIZE, shuffle=True)\nvalidationset = DataLoader(vset, batch_size=len(vset))",
"_____no_output_____"
]
],
[
[
"### content of the first image",
"_____no_output_____"
]
],
[
[
"data, label = trainingset.dataset[0]",
"_____no_output_____"
],
[
"data[0]",
"_____no_output_____"
],
[
"plt.imshow(data[0].numpy())",
"_____no_output_____"
]
],
[
[
"### label of the first image",
"_____no_output_____"
]
],
[
[
"label",
"_____no_output_____"
]
],
[
[
"# definition of network",
"_____no_output_____"
]
],
[
[
"import torch.nn as nn\nimport torch.nn.functional as F",
"_____no_output_____"
],
[
"class FeedForwardNetwork(nn.Module):\n \n def __init__(self, data_size, out_size):\n super(FeedForwardNetwork, self).__init__()\n \n self.data_dim = self._num_flat_features(data_size)\n \n self.in_layer = nn.Linear(in_features=self.data_dim, out_features=128)\n self.in_dropout = nn.Dropout(p=0.2)\n self.in_normalize = nn.BatchNorm1d(num_features=128, affine=True)\n self.in_activate = nn.ReLU()\n \n self.hidden1_layer = nn.Linear(in_features=128, out_features=128)\n self.hidden1_dropout = nn.Dropout(p=0.2)\n self.hidden1_normalize = nn.BatchNorm1d(num_features=128, affine=True)\n self.hidden1_activate = nn.ReLU()\n \n self.hidden2_layer = nn.Linear(in_features=128, out_features=128)\n self.hidden2_dropout = nn.Dropout(p=0.2)\n self.hidden2_normalize = nn.BatchNorm1d(num_features=128, affine=True)\n self.hidden2_activate = nn.ReLU()\n \n self.out_layer = nn.Linear(in_features=128, out_features=out_size)\n self.out_normalize = nn.BatchNorm1d(num_features=10, affine=True)\n \n def forward(self, x):\n x = torch.reshape(x, (x.size(0), self.data_dim))\n \n # input layer\n x = self.in_activate(self.in_normalize(self.in_dropout(self.in_layer(x))))\n \n # hidden layers\n x = self.hidden1_activate(self.hidden1_normalize(self.hidden1_dropout(self.hidden1_layer(x))))\n x = self.hidden2_activate(self.hidden2_normalize(self.hidden2_dropout(self.hidden2_layer(x))))\n \n # output layer\n x = F.softmax(self.out_normalize(self.out_layer(x)), dim=1) \n return x\n \n def _num_flat_features(self, size):\n num = 1\n for dim in size[1:]:\n num *= dim\n return num\n\nnetwork = FeedForwardNetwork(data_size=dataset_shape, out_size=10)",
"_____no_output_____"
],
[
"network",
"_____no_output_____"
]
],
[
[
"# training",
"_____no_output_____"
]
],
[
[
"import torch.optim as optim",
"_____no_output_____"
],
[
"loss_fn = F.cross_entropy\noptimizer = optim.Adam(network.parameters(), lr=0.01, betas=[0.9, 0.999])",
"_____no_output_____"
],
[
"training_losses = []\nvalidation_losses = []\n\nfor epoch in range(EPOCHS):\n for data in trainingset:\n x, t = data\n\n optimizer.zero_grad()\n y = network(x)\n \n training_loss = loss_fn(y, t)\n training_loss.backward()\n optimizer.step()\n \n training_losses.append(training_loss.item())\n \n for data in validationset:\n x, t = data\n y = network(x)\n \n validation_loss = loss_fn(y, t)\n validation_losses.append(validation_loss.item())\n \n print('epoch: {0}/{1}, training_loss: {2}, validation_loss: {3}'.format(epoch + 1, EPOCHS, training_loss.item(), validation_loss.item()))",
"epoch: 1/30, training_loss: 1.5767312049865723, validation_loss: 1.534272313117981\nepoch: 2/30, training_loss: 1.5192404985427856, validation_loss: 1.519007682800293\nepoch: 3/30, training_loss: 1.4895987510681152, validation_loss: 1.5061911344528198\nepoch: 4/30, training_loss: 1.5002930164337158, validation_loss: 1.502048373222351\nepoch: 5/30, training_loss: 1.49653959274292, validation_loss: 1.5021532773971558\nepoch: 6/30, training_loss: 1.4830952882766724, validation_loss: 1.4969806671142578\nepoch: 7/30, training_loss: 1.5055198669433594, validation_loss: 1.4989650249481201\nepoch: 8/30, training_loss: 1.4886049032211304, validation_loss: 1.4999216794967651\nepoch: 9/30, training_loss: 1.4993969202041626, validation_loss: 1.4964265823364258\nepoch: 10/30, training_loss: 1.5231953859329224, validation_loss: 1.494498372077942\nepoch: 11/30, training_loss: 1.489976167678833, validation_loss: 1.492887258529663\nepoch: 12/30, training_loss: 1.475942850112915, validation_loss: 1.4933098554611206\nepoch: 13/30, training_loss: 1.4838476181030273, validation_loss: 1.4935801029205322\nepoch: 14/30, training_loss: 1.5001057386398315, validation_loss: 1.4934003353118896\nepoch: 15/30, training_loss: 1.5084761381149292, validation_loss: 1.4912742376327515\nepoch: 16/30, training_loss: 1.4838526248931885, validation_loss: 1.4945225715637207\nepoch: 17/30, training_loss: 1.4871407747268677, validation_loss: 1.4944355487823486\nepoch: 18/30, training_loss: 1.4807343482971191, validation_loss: 1.4934650659561157\nepoch: 19/30, training_loss: 1.4857882261276245, validation_loss: 1.4899749755859375\nepoch: 20/30, training_loss: 1.4687750339508057, validation_loss: 1.4912912845611572\nepoch: 21/30, training_loss: 1.4905977249145508, validation_loss: 1.4916164875030518\nepoch: 22/30, training_loss: 1.4759646654129028, validation_loss: 1.4929382801055908\nepoch: 23/30, training_loss: 1.511199712753296, validation_loss: 1.4909106492996216\nepoch: 24/30, training_loss: 1.471519947052002, validation_loss: 1.4897130727767944\nepoch: 25/30, training_loss: 1.4864840507507324, validation_loss: 1.4922690391540527\nepoch: 26/30, training_loss: 1.4757152795791626, validation_loss: 1.4897184371948242\nepoch: 27/30, training_loss: 1.4632033109664917, validation_loss: 1.4912234544754028\nepoch: 28/30, training_loss: 1.4828084707260132, validation_loss: 1.4911516904830933\nepoch: 29/30, training_loss: 1.4854902029037476, validation_loss: 1.4896271228790283\nepoch: 30/30, training_loss: 1.5066286325454712, validation_loss: 1.4907294511795044\n"
]
],
[
[
"# loss transition",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline ",
"_____no_output_____"
],
[
"x_axis = np.arange(EPOCHS)\nplt.plot(x_axis, training_losses, label='train')\nplt.plot(x_axis, validation_losses, label='validation')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"# evaluation",
"_____no_output_____"
]
],
[
[
"test_mnist = datasets.MNIST('../data', download=True, transform=preprocess, train=False)\ntest_mnist.test_data.size()",
"_____no_output_____"
],
[
"testset = test_mnist.test_data.type(torch.FloatTensor)\ntest_labels = test_mnist.test_labels",
"_____no_output_____"
],
[
"y = network(testset)",
"_____no_output_____"
]
],
[
[
"## logloss",
"_____no_output_____"
]
],
[
[
"logloss = loss_fn(y, test_labels)\nlogloss.item()",
"_____no_output_____"
]
],
[
[
"## accuracy",
"_____no_output_____"
]
],
[
[
"y_labels = torch.argmax(y, dim=1)",
"_____no_output_____"
],
[
"correct_count = torch.sum(y_labels == test_labels, dim=0).item()",
"_____no_output_____"
],
[
"accuracy = correct_count / len(y_labels)\naccuracy",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cbbb1804e7253fdec50fbf15e09b960ea12c461b
| 960,550 |
ipynb
|
Jupyter Notebook
|
kobert/pandas_news_test.ipynb
|
cateto/python4NLP
|
1d2d5086f907bf75be01762bf0b384c76d8f704e
|
[
"MIT"
] | 2 |
2021-12-16T22:38:27.000Z
|
2021-12-17T13:09:49.000Z
|
kobert/pandas_news_test.ipynb
|
cateto/python4NLP
|
1d2d5086f907bf75be01762bf0b384c76d8f704e
|
[
"MIT"
] | null | null | null |
kobert/pandas_news_test.ipynb
|
cateto/python4NLP
|
1d2d5086f907bf75be01762bf0b384c76d8f704e
|
[
"MIT"
] | null | null | null | 137.358787 | 128,389 | 0.643174 |
[
[
[
"<a href=\"https://colab.research.google.com/github/cateto/python4NLP/blob/main/kobert/pandas_news_test.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"!pip install pandas-profiling==2.7.1",
"Requirement already satisfied: pandas-profiling==2.7.1 in /usr/local/lib/python3.7/dist-packages (2.7.1)\nRequirement already satisfied: matplotlib>=3.2.0 in /usr/local/lib/python3.7/dist-packages (from pandas-profiling==2.7.1) (3.2.2)\nRequirement already satisfied: tangled-up-in-unicode>=0.0.4 in /usr/local/lib/python3.7/dist-packages (from pandas-profiling==2.7.1) (0.1.0)\nRequirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from pandas-profiling==2.7.1) (1.0.1)\nRequirement already satisfied: confuse>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from pandas-profiling==2.7.1) (1.5.0)\nRequirement already satisfied: missingno>=0.4.2 in /usr/local/lib/python3.7/dist-packages (from pandas-profiling==2.7.1) (0.5.0)\nRequirement already satisfied: requests>=2.23.0 in /usr/local/lib/python3.7/dist-packages (from pandas-profiling==2.7.1) (2.23.0)\nRequirement already satisfied: visions[type_image_path]==0.4.1 in /usr/local/lib/python3.7/dist-packages (from pandas-profiling==2.7.1) (0.4.1)\nRequirement already satisfied: htmlmin>=0.1.12 in /usr/local/lib/python3.7/dist-packages (from pandas-profiling==2.7.1) (0.1.12)\nRequirement already satisfied: tqdm>=4.43.0 in /usr/local/lib/python3.7/dist-packages (from pandas-profiling==2.7.1) (4.61.2)\nRequirement already satisfied: scipy>=1.4.1 in /usr/local/lib/python3.7/dist-packages (from pandas-profiling==2.7.1) (1.7.0)\nRequirement already satisfied: numpy>=1.16.0 in /usr/local/lib/python3.7/dist-packages (from pandas-profiling==2.7.1) (1.19.5)\nRequirement already satisfied: ipywidgets>=7.5.1 in /usr/local/lib/python3.7/dist-packages (from pandas-profiling==2.7.1) (7.6.3)\nRequirement already satisfied: astropy>=4.0 in /usr/local/lib/python3.7/dist-packages (from pandas-profiling==2.7.1) (4.2.1)\nRequirement already satisfied: phik>=0.9.10 in /usr/local/lib/python3.7/dist-packages (from pandas-profiling==2.7.1) (0.11.2)\nRequirement already satisfied: pandas!=1.0.0,!=1.0.1,!=1.0.2,>=0.25.3 in /usr/local/lib/python3.7/dist-packages (from pandas-profiling==2.7.1) (1.1.5)\nRequirement already satisfied: jinja2>=2.11.1 in /usr/local/lib/python3.7/dist-packages (from pandas-profiling==2.7.1) (2.11.3)\nRequirement already satisfied: attrs>=19.3.0 in /usr/local/lib/python3.7/dist-packages (from visions[type_image_path]==0.4.1->pandas-profiling==2.7.1) (21.2.0)\nRequirement already satisfied: networkx>=2.4 in /usr/local/lib/python3.7/dist-packages (from visions[type_image_path]==0.4.1->pandas-profiling==2.7.1) (2.5.1)\nRequirement already satisfied: imagehash in /usr/local/lib/python3.7/dist-packages (from visions[type_image_path]==0.4.1->pandas-profiling==2.7.1) (4.2.1)\nRequirement already satisfied: Pillow in /usr/local/lib/python3.7/dist-packages (from visions[type_image_path]==0.4.1->pandas-profiling==2.7.1) (7.1.2)\nRequirement already satisfied: pyerfa in /usr/local/lib/python3.7/dist-packages (from astropy>=4.0->pandas-profiling==2.7.1) (2.0.0)\nRequirement already satisfied: pyyaml in /usr/local/lib/python3.7/dist-packages (from confuse>=1.0.0->pandas-profiling==2.7.1) (3.13)\nRequirement already satisfied: traitlets>=4.3.1 in /usr/local/lib/python3.7/dist-packages (from ipywidgets>=7.5.1->pandas-profiling==2.7.1) (5.0.5)\nRequirement already satisfied: ipykernel>=4.5.1 in /usr/local/lib/python3.7/dist-packages (from ipywidgets>=7.5.1->pandas-profiling==2.7.1) (4.10.1)\nRequirement already satisfied: widgetsnbextension~=3.5.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets>=7.5.1->pandas-profiling==2.7.1) (3.5.1)\nRequirement already satisfied: jupyterlab-widgets>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets>=7.5.1->pandas-profiling==2.7.1) (1.0.0)\nRequirement already satisfied: nbformat>=4.2.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets>=7.5.1->pandas-profiling==2.7.1) (5.1.3)\nRequirement already satisfied: ipython>=4.0.0 in /usr/local/lib/python3.7/dist-packages (from ipywidgets>=7.5.1->pandas-profiling==2.7.1) (5.5.0)\nRequirement already satisfied: tornado>=4.0 in /usr/local/lib/python3.7/dist-packages (from ipykernel>=4.5.1->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (5.1.1)\nRequirement already satisfied: jupyter-client in /usr/local/lib/python3.7/dist-packages (from ipykernel>=4.5.1->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (5.3.5)\nRequirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (1.0.18)\nRequirement already satisfied: pexpect in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (4.8.0)\nRequirement already satisfied: pygments in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (2.6.1)\nRequirement already satisfied: decorator in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (4.4.2)\nRequirement already satisfied: pickleshare in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (0.7.5)\nRequirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (57.2.0)\nRequirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.7/dist-packages (from ipython>=4.0.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (0.8.1)\nRequirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.7/dist-packages (from jinja2>=2.11.1->pandas-profiling==2.7.1) (2.0.1)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.2.0->pandas-profiling==2.7.1) (2.8.1)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.2.0->pandas-profiling==2.7.1) (0.10.0)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.2.0->pandas-profiling==2.7.1) (1.3.1)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=3.2.0->pandas-profiling==2.7.1) (2.4.7)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from cycler>=0.10->matplotlib>=3.2.0->pandas-profiling==2.7.1) (1.15.0)\nRequirement already satisfied: seaborn in /usr/local/lib/python3.7/dist-packages (from missingno>=0.4.2->pandas-profiling==2.7.1) (0.11.1)\nRequirement already satisfied: jupyter-core in /usr/local/lib/python3.7/dist-packages (from nbformat>=4.2.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (4.7.1)\nRequirement already satisfied: ipython-genutils in /usr/local/lib/python3.7/dist-packages (from nbformat>=4.2.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (0.2.0)\nRequirement already satisfied: jsonschema!=2.5.0,>=2.4 in /usr/local/lib/python3.7/dist-packages (from nbformat>=4.2.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (2.6.0)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas!=1.0.0,!=1.0.1,!=1.0.2,>=0.25.3->pandas-profiling==2.7.1) (2018.9)\nRequirement already satisfied: wcwidth in /usr/local/lib/python3.7/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython>=4.0.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (0.2.5)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests>=2.23.0->pandas-profiling==2.7.1) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests>=2.23.0->pandas-profiling==2.7.1) (2.10)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests>=2.23.0->pandas-profiling==2.7.1) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests>=2.23.0->pandas-profiling==2.7.1) (2021.5.30)\nRequirement already satisfied: notebook>=4.4.1 in /usr/local/lib/python3.7/dist-packages (from widgetsnbextension~=3.5.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (5.3.1)\nRequirement already satisfied: Send2Trash in /usr/local/lib/python3.7/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (1.7.1)\nRequirement already satisfied: terminado>=0.8.1 in /usr/local/lib/python3.7/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (0.10.1)\nRequirement already satisfied: nbconvert in /usr/local/lib/python3.7/dist-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (5.6.1)\nRequirement already satisfied: pyzmq>=13 in /usr/local/lib/python3.7/dist-packages (from jupyter-client->ipykernel>=4.5.1->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (22.1.0)\nRequirement already satisfied: ptyprocess in /usr/local/lib/python3.7/dist-packages (from terminado>=0.8.1->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (0.7.0)\nRequirement already satisfied: PyWavelets in /usr/local/lib/python3.7/dist-packages (from imagehash->visions[type_image_path]==0.4.1->pandas-profiling==2.7.1) (1.1.1)\nRequirement already satisfied: mistune<2,>=0.8.1 in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (0.8.4)\nRequirement already satisfied: pandocfilters>=1.4.1 in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (1.4.3)\nRequirement already satisfied: bleach in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (3.3.0)\nRequirement already satisfied: entrypoints>=0.2.2 in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (0.3)\nRequirement already satisfied: defusedxml in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (0.7.1)\nRequirement already satisfied: testpath in /usr/local/lib/python3.7/dist-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (0.5.0)\nRequirement already satisfied: webencodings in /usr/local/lib/python3.7/dist-packages (from bleach->nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (0.5.1)\nRequirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from bleach->nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets>=7.5.1->pandas-profiling==2.7.1) (21.0)\n"
],
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"
],
[
"import pandas as pd\nimport pandas_profiling\ncorpus_data = pd.read_csv('/content/drive/MyDrive/dataset/preprocessed_test2.csv', encoding='utf-8')",
"_____no_output_____"
],
[
"corpus_data[:2]",
"_____no_output_____"
],
[
"pr = corpus_data.profile_report()",
"_____no_output_____"
],
[
"pr",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbbb21c48b3ceb519d9321e6171cf5b2d5e70e90
| 753,596 |
ipynb
|
Jupyter Notebook
|
jupyter/2018-11-07(z-traces for disambiguation).ipynb
|
h-mayorquin/z-traces-project
|
f6f67db6abc2b57bf20c8cc19aec6671915af986
|
[
"MIT"
] | null | null | null |
jupyter/2018-11-07(z-traces for disambiguation).ipynb
|
h-mayorquin/z-traces-project
|
f6f67db6abc2b57bf20c8cc19aec6671915af986
|
[
"MIT"
] | null | null | null |
jupyter/2018-11-07(z-traces for disambiguation).ipynb
|
h-mayorquin/z-traces-project
|
f6f67db6abc2b57bf20c8cc19aec6671915af986
|
[
"MIT"
] | null | null | null | 607.73871 | 148,408 | 0.937761 |
[
[
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Preamble\" data-toc-modified-id=\"Preamble-1\"><span class=\"toc-item-num\">1 </span>Preamble</a></span><ul class=\"toc-item\"><li><span><a href=\"#General-imports\" data-toc-modified-id=\"General-imports-1.1\"><span class=\"toc-item-num\">1.1 </span>General imports</a></span></li><li><span><a href=\"#The-class-for-the-network\" data-toc-modified-id=\"The-class-for-the-network-1.2\"><span class=\"toc-item-num\">1.2 </span>The class for the network</a></span></li><li><span><a href=\"#Some-other-functions\" data-toc-modified-id=\"Some-other-functions-1.3\"><span class=\"toc-item-num\">1.3 </span>Some other functions</a></span><ul class=\"toc-item\"><li><ul class=\"toc-item\"><li><span><a href=\"#Create-overlapped-representations\" data-toc-modified-id=\"Create-overlapped-representations-1.3.0.1\"><span class=\"toc-item-num\">1.3.0.1 </span>Create overlapped representations</a></span></li></ul></li></ul></li><li><span><a href=\"#Some-general-parameters\" data-toc-modified-id=\"Some-general-parameters-1.4\"><span class=\"toc-item-num\">1.4 </span>Some general parameters</a></span></li></ul></li><li><span><a href=\"#z-variables-as-transfers\" data-toc-modified-id=\"z-variables-as-transfers-2\"><span class=\"toc-item-num\">2 </span>z-variables as transfers</a></span><ul class=\"toc-item\"><li><span><a href=\"#An-example-to-see-it-works\" data-toc-modified-id=\"An-example-to-see-it-works-2.1\"><span class=\"toc-item-num\">2.1 </span>An example to see it works</a></span></li><li><span><a href=\"#Disambiguation\" data-toc-modified-id=\"Disambiguation-2.2\"><span class=\"toc-item-num\">2.2 </span>Disambiguation</a></span></li></ul></li></ul></div>",
"_____no_output_____"
],
[
"# Preamble",
"_____no_output_____"
],
[
"## General imports",
"_____no_output_____"
]
],
[
[
"import pprint\nimport subprocess \nimport sys \nsys.path.append('../')\n\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport matplotlib.gridspec as gridspec\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport seaborn as sns\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (8.0, 6.0)\n\nnp.set_printoptions(suppress=True, precision=5)\n\nsns.set(font_scale=2.5)\n\nfrom network import Protocol\nfrom patterns_representation import PatternsRepresentation\nfrom analysis_functions import calculate_persistence_time, calculate_recall_quantities\nfrom plotting_functions import plot_weight_matrix, plot_network_activity_angle, plot_persistent_matrix",
"_____no_output_____"
]
],
[
[
"## The class for the network",
"_____no_output_____"
]
],
[
[
"from connectivity_functions import softmax, get_w_pre_post, get_beta, strict_max\nfrom connectivity_functions import create_weight_matrix\nfrom patterns_representation import create_canonical_activity_representation, build_network_representation\n",
"_____no_output_____"
],
[
"class Network:\n def __init__(self, hypercolumns, minicolumns, G=1.0, tau_s=0.010, tau_z_pre=0.050, tau_z_post=0.005,\n tau_a=0.250, g_a=1.0, g_I=10.0, sigma_out=0.0, epsilon=1e-60, g_beta=1.0, prng=np.random,\n strict_maximum=True, perfect=False, normalized_currents=True):\n\n # Random number generator\n self.prng = prng\n self.sigma_out = sigma_out # The variance that the system would have on the steady state if were to have it\n self.sigma_in = sigma_out * np.sqrt(2 / tau_s) # Ornstein-Uhlenbeck process\n self.epsilon = epsilon\n\n # Network parameters\n self.hypercolumns = hypercolumns\n self.minicolumns = minicolumns\n\n self.n_units = self.hypercolumns * self.minicolumns\n\n # Network variables\n self.strict_maximum = strict_maximum\n self.perfect = perfect\n self.normalized_current = normalized_currents\n if self.normalized_current:\n self.normalized_constant = self.hypercolumns\n else:\n self.normalized_constant = 1.0\n\n # Dynamic Parameters\n self.tau_s = tau_s\n self.tau_a = tau_a\n self.r = self.tau_s / self.tau_a\n self.g_beta = g_beta\n self.g_a = g_a\n self.g_I = g_I\n self.tau_z_pre = tau_z_pre\n self.tau_z_post = tau_z_post\n self.G = G\n\n # State variables\n self.o = np.full(shape=self.n_units, fill_value=0.0)\n self.s = np.full(shape=self.n_units, fill_value=0.0)\n self.a = np.full(shape=self.n_units, fill_value=0.0)\n self.I = np.full(shape=self.n_units, fill_value=0.0)\n\n # Current values\n self.i = np.full(shape=self.n_units, fill_value=0.0)\n self.z_pre = np.full(shape=self.n_units, fill_value=0.0)\n self.z_post = np.full(shape=self.n_units, fill_value=0.0)\n self.z_co = np.full(shape=(self.n_units, self.n_units), fill_value=0.0)\n\n # Keeping track of the probability / connectivity\n self.t_p = 0.0\n self.p_pre = np.full(shape=self.n_units, fill_value=0.0)\n self.p_post = np.full(shape=self.n_units, fill_value=0.0)\n self.P = np.full(shape=(self.n_units, self.n_units), fill_value=0.0)\n self.w = np.full(shape=(self.n_units, self.n_units), fill_value=0.0)\n self.beta = np.full(shape=self.n_units, fill_value=0.0)\n\n def parameters(self):\n \"\"\"\n Get the parameters of the model\n\n :return: a dictionary with the parameters\n \"\"\"\n parameters = {'tau_s': self.tau_s, 'tau_z_post': self.tau_z_post, 'tau_z_pre': self.tau_z_pre,\n 'tau_a': self.tau_a, 'g_a': self.g_a, 'g_I':self.g_I, 'epsilon': self.epsilon,\n 'G': self.G, 'sigma_out':self.sigma_out, 'sigma_in': self.sigma_in,\n 'perfect': self.perfect, 'strict_maximum': self.strict_maximum}\n\n return parameters\n\n def reset_values(self, keep_connectivity=True):\n # State variables\n self.o = np.full(shape=self.n_units, fill_value=0.0)\n self.s = np.full(shape=self.n_units, fill_value=0.0)\n\n self.a = np.full(shape=self.n_units, fill_value=0.0)\n self.I = np.full(shape=self.n_units, fill_value=0.0)\n\n # Current values\n self.i = np.full(shape=self.n_units, fill_value=0.0)\n self.z_pre = np.full(shape=self.n_units, fill_value=0.0)\n self.z_post = np.full(shape=self.n_units, fill_value=0.0)\n self.z_co = np.full(shape=(self.n_units, self.n_units), fill_value=0.0)\n\n if not keep_connectivity:\n self.beta = np.full(shape=self.n_units, fill_value=0.0)\n self.w = np.full(shape=(self.n_units, self.n_units), fill_value=0.0)\n\n self.p_pre = np.full(shape=self.n_units, fill_vale=0.0)\n self.p_post = np.full(shape=self.n_units, fill_value=0.0)\n self.P = np.full(shape=(self.n_units, self.n_units), fill_value=0.0)\n\n def update_continuous(self, dt=1.0, sigma=None):\n # Get the noise\n if sigma is None:\n noise = self.sigma_in * np.sqrt(dt) * self.prng.normal(0, 1.0, self.n_units)\n else:\n noise = sigma\n\n # Calculate currents\n self.i = self.w @ self.z_pre / self.normalized_constant\n if self.perfect:\n self.s = self.i + self.g_beta * self.beta - self.g_a * self.a + self.g_I * self.I + noise\n else:\n self.s += (dt / self.tau_s) * (self.i # Current\n + self.g_beta * self.beta # Bias\n + self.g_I * self.I # Input current\n - self.g_a * self.a # Adaptation\n - self.s) # s follow all of the s above\n self.s += noise\n # Non-linearity\n if self.strict_maximum:\n self.o = strict_max(self.s, minicolumns=self.minicolumns)\n else:\n self.o = softmax(self.s, G=self.G, minicolumns=self.minicolumns)\n\n # Update the z-traces\n self.z_pre += (dt / self.tau_z_pre) * (self.o - self.z_pre)\n self.z_post += (dt / self.tau_z_post) * (self.o - self.z_post)\n self.z_co = np.outer(self.z_post, self.z_pre)\n \n # Update the adaptation\n self.a += (dt / self.tau_a) * (self.o - self.a)\n\n def update_probabilities(self, dt):\n if self.t_p > 0.0:\n time_factor = dt / self.t_p\n self.p_pre += time_factor * (self.z_pre - self.p_pre)\n self.p_post += time_factor * (self.z_post - self.p_post)\n self.P += time_factor * (self.z_co - self.P)\n self.t_p += dt\n\n def update_weights(self):\n # Update the connectivity\n self.beta = get_beta(self.p_post, self.epsilon)\n self.w = get_w_pre_post(self.P, self.p_pre, self.p_post, self.epsilon, diagonal_zero=False)\n\n\nclass NetworkManager:\n \"\"\"\n This class will run the Network. Everything from running, saving and calculating quantities should be\n methods in this class. In short this will do the running of the network, the learning protocols, etcera.\n\n Note that data analysis should be conducted into another class preferably.\n \"\"\"\n\n def __init__(self, nn=None, dt=0.001, values_to_save=[]):\n \"\"\"\n :param nn: A network instance\n :param time: A numpy array with the time to run\n :param values_to_save: a list with the values as strings of the state variables that should be saved\n \"\"\"\n\n self.nn = nn\n\n # Timing variables\n self.dt = dt\n self.T_training_total = 0.0\n self.T_recall_total = 0.0\n self.n_time_total = 0\n self.time = None\n\n # Initialize saving dictionary\n self.saving_dictionary = self.get_saving_dictionary(values_to_save)\n\n # Initialize the history dictionary for saving values\n self.history = None\n self.empty_history()\n\n # Get reference representations\n self.canonical_activity_representation = create_canonical_activity_representation(self.nn.minicolumns,\n self.nn.hypercolumns)\n self.canonical_network_representation = build_network_representation(self.canonical_activity_representation,\n self.nn.minicolumns)\n # Dictionary to see what has been taught to the network\n # self.n_patterns = 0\n self.patterns_dic = None\n self.network_representation = np.array([]).reshape(0, self.nn.n_units)\n\n # Training matrices\n self.B = None\n self.T = None\n self.w_diff = np.zeros_like(self.nn.w)\n self.beta_diff = np.zeros_like(self.nn.w)\n\n def get_saving_dictionary(self, values_to_save):\n \"\"\"\n This resets the saving dictionary and only activates the values in values_to_save\n \"\"\"\n\n # Reinitialize the dictionary\n saving_dictionary = {'o': False, 's': False, 'a': False,\n 'z_pre': False, 'z_post': False, 'z_co': False,\n 'p_pre': False, 'p_post': False, 'P': False,\n 'i': False, 'w': False, 'beta': False}\n\n # Activate the values passed to the function\n for state_variable in values_to_save:\n saving_dictionary[state_variable] = True\n\n return saving_dictionary\n\n def empty_history(self):\n \"\"\"\n A function to empty the history\n \"\"\"\n empty_array = np.array([]).reshape(0, self.nn.n_units)\n empty_array_square = np.array([]).reshape(0, self.nn.n_units, self.nn.n_units)\n\n self.history = {'o': empty_array, 's': empty_array, 'a': empty_array,\n 'z_pre': empty_array, 'z_post': empty_array, 'z_co': empty_array_square,\n 'p_pre': empty_array, 'p_post': empty_array, 'P': empty_array_square,\n 'i': empty_array, 'w': empty_array_square, 'beta': empty_array}\n\n def append_history(self, history, saving_dictionary):\n \"\"\"\n This function is used at every step of a process that is going to be saved. The items given by\n saving dictinoary will be appended to the elements of the history dictionary.\n\n :param history: is the dictionary with the saved values\n :param saving_dictionary: a saving dictionary with keys as the parameters that should be saved\n and items as boolean indicating whether that parameters should be saved or not\n \"\"\"\n\n # Dynamical variables\n if saving_dictionary['o']:\n history['o'].append(np.copy(self.nn.o))\n if saving_dictionary['s']:\n history['s'].append(np.copy(self.nn.s))\n if saving_dictionary['a']:\n history['a'].append(np.copy(self.nn.a))\n if saving_dictionary['i']:\n history['i'].append(np.copy(self.nn.i))\n if saving_dictionary['z_pre']:\n history['z_pre'].append(np.copy(self.nn.z_pre))\n if saving_dictionary['z_post']:\n history['z_post'].append(np.copy(self.nn.z_post))\n if saving_dictionary['z_co']:\n history['z_co'].append(np.copy(self.nn.z_co))\n if saving_dictionary['p_pre']:\n history['p_pre'].append(np.copy(self.nn.p_pre))\n if saving_dictionary['p_post']:\n history['p_post'].append(np.copy(self.nn.p_post))\n if saving_dictionary['P']:\n history['P'].append(np.copy(self.nn.P))\n if saving_dictionary['w']:\n history['w'].append(np.copy(self.nn.w))\n if saving_dictionary['beta']:\n history['beta'].append(np.copy(self.nn.beta))\n\n def update_patterns(self, nr):\n self.network_representation = np.concatenate((self.network_representation, nr))\n aux, indexes = np.unique(self.network_representation, axis=0, return_index=True)\n patterns_dic = {index: pattern for (index, pattern) in zip(indexes, aux)}\n self.patterns_dic = patterns_dic\n\n def run_network(self, time=None, I=None, train_network=False, plasticity_on=False):\n # Change the time if given\n\n if time is None or len(time) == 0:\n raise ValueError('Time should be given and be an array')\n\n # Load the clamping if available\n if I is None:\n self.nn.I = np.zeros_like(self.nn.o)\n elif isinstance(I, (float, int)):\n self.nn.I = self.patterns_dic[I]\n else:\n self.nn.I = I # The pattern is the input\n # Create a vector of noise\n noise = self.nn.prng.normal(loc=0, scale=1, size=(time.size, self.nn.n_units))\n noise *= self.nn.sigma_in * np.sqrt(self.dt)\n\n # Initialize run history\n step_history = {}\n\n # Create a list for the values that are in the saving dictionary\n for quantity, boolean in self.saving_dictionary.items():\n if boolean:\n step_history[quantity] = []\n\n # Run the simulation and save the values\n for index_t, t in enumerate(time):\n # Append the history first\n self.append_history(step_history, self.saving_dictionary)\n # Update the system dynamic variables\n self.nn.update_continuous(dt=self.dt, sigma=noise[index_t, :])\n # Update the learning variables\n if train_network:\n self.nn.update_z_values(dt=self.dt)\n self.nn.update_probabilities(dt=self.dt)\n # Update the weights\n if plasticity_on:\n self.nn.update_weights()\n\n # Concatenate with the past history and redefine dictionary\n for quantity, boolean in self.saving_dictionary.items():\n if boolean:\n self.history[quantity] = np.concatenate((self.history[quantity], step_history[quantity]))\n\n return self.history\n\n def run_network_protocol(self, protocol, plasticity_on=False, verbose=True,\n values_to_save_epoch=None, reset=True, empty_history=True):\n\n if empty_history:\n self.empty_history()\n self.T_training_total = 0\n if reset:\n self.nn.reset_values(keep_connectivity=True)\n\n # Updated the stored patterns\n self.update_patterns(protocol.network_representation)\n\n # Unpack the protocol\n times = protocol.times_sequence\n patterns_sequence = protocol.patterns_sequence\n learning_constants = protocol.learning_constants_sequence # The values of Kappa\n\n # Initialize dictionary for storage\n epoch_history = {}\n if values_to_save_epoch:\n saving_dictionary_epoch = self.get_saving_dictionary(values_to_save_epoch)\n # Create a list for the values that are in the saving dictionary\n for quantity, boolean in saving_dictionary_epoch.items():\n if boolean:\n epoch_history[quantity] = []\n\n # Run the protocol\n epochs = 0\n start_time = 0.0\n n_aux = 0\n for time, pattern, k in zip(times, patterns_sequence, learning_constants):\n # End of the epoch\n if pattern == epoch_end_string:\n # Store the values at the end of the epoch\n if values_to_save_epoch:\n self.append_history(epoch_history, saving_dictionary_epoch)\n\n if verbose:\n print('epochs', epochs)\n epochs += 1\n\n # Running step\n else:\n running_time = np.arange(start_time, start_time + time, self.dt)\n self.run_network(time=running_time, I=pattern, train_network=True, plasticity_on=plasticity_on)\n start_time += time\n n_aux += running_time.size\n\n # Get timings quantities\n self.T_training_total += start_time\n self.n_time_total += n_aux\n self.time = np.linspace(0, self.T_training_total, num=self.n_time_total)\n\n # Update weights\n if not plasticity_on:\n self.nn.update_weights()\n\n # Return the history if available\n if values_to_save_epoch:\n return epoch_history\n\n def run_network_protocol_offline(self, protocol):\n # Build time input\n timed_input = TimedInput(protocol, self.dt)\n timed_input.build_timed_input()\n timed_input.build_filtered_input_pre(tau_z=self.nn.tau_z_pre)\n timed_input.build_filtered_input_post(tau_z=self.nn.tau_z_post)\n # Calculate probabilities\n self.nn.p_pre, self.nn.p_post, self.nn.P = timed_input.calculate_probabilities_from_time_signal()\n # Store the connectivity values\n self.nn.beta = get_beta(self.nn.p_post, self.nn.epsilon)\n self.nn.w = get_w_pre_post(self.nn.P, self.nn.p_pre, self.nn.p_post, self.nn.epsilon, diagonal_zero=False)\n\n # Update the patterns\n self.update_patterns(protocol.network_representation)\n\n # Get timings quantities\n t_total, n_time_total, time = protocol.calculate_time_quantities(self.dt)\n self.T_training_total += t_total\n self.n_time_total += n_time_total\n self.time = np.linspace(0, self.T_training_total, num=self.n_time_total)\n\n return timed_input\n\n def run_artificial_protocol(self, ws=1.0, wn=0.25, wb=-3.0, alpha=0.5, alpha_back=None, cycle=False):\n \"\"\"\n This creates an artificial matrix\n :return: w, the weight matrix that was created\n \"\"\"\n minicolumns = self.nn.minicolumns\n extension = self.nn.minicolumns\n sequence = self.canonical_activity_representation\n if cycle:\n sequence = np.append(sequence, sequence[0]).reshape(self.nn.minicolumns + 1, self.nn.hypercolumns)\n\n w = create_weight_matrix(minicolumns, sequence, ws, wn, wb, alpha,\n alpha_back, extension, w=None)\n self.nn.w = w\n\n p = np.ones(self.nn.n_units) * (1.0/ len(sequence))\n self.nn.beta = get_beta(p, self.nn.epsilon)\n\n # Updated the patterns in the network\n nr = self.canonical_network_representation\n self.update_patterns(nr)\n\n return w\n\n def run_network_recall(self, T_recall=10.0, T_cue=0.0, I_cue=None, reset=True,\n empty_history=True, plasticity_on=False, stable_start=True):\n \"\"\"\n Run network free recall\n :param T_recall: The total time of recalling\n :param T_cue: the time that the cue is run\n :param I_cue: The current to give as the cue\n :param reset: Whether the state variables values should be returned\n :param empty_history: whether the history should be cleaned\n \"\"\"\n if T_recall < 0.0:\n raise ValueError('T_recall = ' + str(T_recall) + ' has to be positive')\n time_recalling = np.arange(0, T_recall, self.dt)\n time_cue = np.arange(0, T_cue, self.dt)\n\n if plasticity_on:\n train_network = True\n else:\n train_network = False\n\n if empty_history:\n self.empty_history()\n if reset:\n # Never destroy connectivity while recalling\n self.nn.reset_values(keep_connectivity=True)\n # Recall times\n self.T_recall_total = 0\n self.n_time_total = 0\n\n # Set initial conditions of the current to the clamping if available\n if stable_start:\n if I_cue is None:\n pass\n elif isinstance(I_cue, (float, int)):\n self.nn.s = self.nn.g_I * self.patterns_dic[I_cue].astype('float')\n self.nn.o = strict_max(self.nn.s, minicolumns=self.nn.minicolumns)\n self.nn.i = self.nn.w @ self.nn.o / self.nn.normalized_constant\n self.nn.s += self.nn.i + self.nn.beta - self.nn.g_a * self.nn.a\n else:\n self.nn.s = self.nn.g_I * I_cue # The pattern is the input\n self.nn.o = strict_max(self.nn.s, minicolumns=self.nn.minicolumns)\n self.nn.i = self.nn.w @ self.nn.o / self.nn.normalized_constant\n self.nn.s += self.nn.i + self.nn.beta - self.nn.g_a * self.nn.a\n\n # Run the cue\n if T_cue > 0.001:\n self.run_network(time=time_cue, I=I_cue, train_network=train_network, plasticity_on=plasticity_on)\n\n # Run the recall\n self.run_network(time=time_recalling, train_network=train_network, plasticity_on=plasticity_on)\n\n # Calculate total time\n self.T_recall_total += T_recall + T_cue\n self.n_time_total += self.history['o'].shape[0]\n self.time = np.linspace(0, self.T_recall_total, num=self.n_time_total)\n\n def set_persistent_time_with_adaptation_gain(self, T_persistence, from_state=2, to_state=3):\n \"\"\"\n This formula adjusts the adpatation gain g_a so the network with the current weights lasts for T_persistence\n when passing from `from_state' to `to_state'\n :param T_persistence: The persistent time necessary\n :param from_state: the state tat will last T_persistent seconds activated\n :param to_state: the state that it will go to\n :return: g_a the new adaptation\n \"\"\"\n\n delta_w = self.nn.w[from_state, from_state] - self.nn.w[to_state, from_state]\n delta_beta = self.nn.beta[from_state] - self.nn.beta[to_state]\n aux = 1 - np.exp(-T_persistence / self.nn.tau_a) / (1 - self.nn.r)\n g_a = (delta_w + delta_beta) / aux\n\n self.nn.g_a = g_a\n\n return g_a\n\n def calculate_persistence_time_matrix(self):\n\n self.w_diff = self.nn.w.diagonal() - self.nn.w\n self.beta_diff = (self.nn.beta[:, np.newaxis] - self.nn.beta[np.newaxis, :]).T\n\n self.B = (self.w_diff + self.beta_diff) / self.nn.g_a\n self.T = self.nn.tau_a * np.log(1 / (1 - self.B))\n if not self.nn.perfect:\n self.T += self.nn.tau_a * np.log(1 / (1 - self.nn.r))\n\n self.T[self.T < 0] = 0.0\n return self.T\n \nclass TimedInput:\n def __init__(self, protocol, dt):\n\n self.protocol = protocol\n self.n_patterns = protocol.n_patterns\n self.n_units = protocol.network_representation.shape[1]\n self.dt = dt\n\n self.T_protocol_total, self.n_time_total, self.time = protocol.calculate_time_quantities(self.dt)\n\n self.network_representation = protocol.network_representation\n self.O = np.zeros((self.n_units, self.n_time_total))\n self.z_pre = np.zeros_like(self.O)\n self.z_post = np.zeros_like(self.O)\n self.tau_z_pre = None\n self.tau_z_post = None\n\n def build_timed_input(self):\n end = 0\n for epoch in range(self.protocol.epochs):\n for pattern, (training_time, inter_pulse_interval) in \\\n enumerate(zip(self.protocol.training_times, self.protocol.inter_pulse_intervals)):\n\n pattern_length = int(training_time / self.dt)\n inter_pulse_interval_length = int(inter_pulse_interval / self.dt)\n start = end\n end = start + pattern_length\n # Add the input\n indexes = np.where(self.network_representation[pattern])[0]\n self.O[indexes, start:end] = 1\n end += inter_pulse_interval_length\n\n inter_sequence_interval_length = int(self.protocol.inter_sequence_interval / self.dt)\n end += inter_sequence_interval_length\n\n return self.O\n\n def build_filtered_input_pre(self, tau_z):\n self.tau_z_pre = tau_z\n for index, o in enumerate(self.O.T):\n if index == 0:\n self.z_pre[:, index] = (self.dt / tau_z) * (o - 0)\n else:\n self.z_pre[:, index] = self.z_pre[:, index - 1] + (self.dt / tau_z) * (o - self.z_pre[:, index - 1])\n\n return self.z_pre\n\n def build_filtered_input_post(self, tau_z):\n self.tau_z_post = tau_z\n for index, o in enumerate(self.O.T):\n if index == 0:\n self.z_post[:, index] = (self.dt / tau_z) * (o - 0)\n else:\n self.z_post[:, index] = self.z_post[:, index - 1] + (self.dt / tau_z) * (o - self.z_post[:, index - 1])\n\n return self.z_post\n\n def calculate_probabilities_from_time_signal(self, filtered=True):\n if filtered:\n y_pre = self.z_pre\n y_post = self.z_post\n else:\n y_pre = self.O\n y_post = self.O\n\n n_units = self.n_units\n n_time_total = self.n_time_total\n\n p_pre = sp.integrate.simps(y=y_pre, x=self.time, axis=1) / self.T_protocol_total\n p_post = sp.integrate.simps(y=y_post, x=self.time, axis=1) / self.T_protocol_total\n\n outer_product = np.zeros((n_units, n_units, n_time_total))\n for index, (s_pre, s_post) in enumerate(zip(y_pre.T, y_post.T)):\n outer_product[:, :, index] = s_post[:, np.newaxis] @ s_pre[np.newaxis, :]\n\n P = sp.integrate.simps(y=outer_product, x=self.time, axis=2) / self.T_protocol_total\n\n return p_pre, p_post, P",
"_____no_output_____"
]
],
[
[
"## Some other functions",
"_____no_output_____"
],
[
"#### Create overlapped representations",
"_____no_output_____"
]
],
[
[
"from copy import deepcopy\n\ndef create_overalaped_representation(manager, representation_overlap, sequence_overlap):\n x = deepcopy(manager.canonical_activity_representation)\n\n to_modify = int(representation_overlap * len(x[0]))\n sequence_size = int(0.5 * len(x))\n sequence_overlap_size = int(sequence_overlap * sequence_size)\n start_point = int(0.5 * sequence_size + sequence_size - np.floor(sequence_overlap_size/ 2.0))\n end_point = start_point + sequence_overlap_size\n\n for sequence_index in range(start_point, end_point):\n pattern = x[sequence_index]\n pattern[:to_modify] = manager.canonical_activity_representation[sequence_index - sequence_size][:to_modify]\n\n return x",
"_____no_output_____"
]
],
[
[
"## Some general parameters",
"_____no_output_____"
]
],
[
[
"epsilon = 1e-7\nvmin = -6.0\nremove = 0.010\n\nstrict_maximum = True\n\ndt = 0.001\ntau_s = 0.010\ntau_a = 0.250\ng_I = 3.0\ng_a = 2.0\nG = 50.0\n\nsns.set(font_scale=3.5)\nsns.set_style(\"whitegrid\", {'axes.grid': False})\nplt.rcParams['figure.figsize'] = (12, 8)\nlw = 10\nms = 22\nalpha_graph = 0.3\ncolors = sns.color_palette()",
"_____no_output_____"
]
],
[
[
"# z-variables as transfers",
"_____no_output_____"
],
[
"## An example to see it works",
"_____no_output_____"
]
],
[
[
"%%time\n\nsigma_out = 0.0\ntau_z_pre = 0.100\ntau_z_post = 0.005\n\nhypercolumns = 1\nminicolumns = 10\nn_patterns = minicolumns\n\n# Training protocol\ntraining_times_base = 0.100\ntraining_times = [training_times_base for i in range(n_patterns)]\nipi_base = 0.000\ninter_pulse_intervals = [ipi_base for i in range(n_patterns)]\ninter_sequence_interval = 0.0\nresting_time = 0.0\nepochs = 1\nT_persistence = 0.100\n\n# Manager properties\nvalues_to_save = ['o', 'i', 'z_pre', 's']\n\n# Neural Network\nnn = Network(hypercolumns, minicolumns, G=G, tau_s=tau_s, tau_z_pre=tau_z_pre, tau_z_post=tau_z_post,\n tau_a=tau_a, g_a=g_a, g_I=g_I, sigma_out=sigma_out, epsilon=epsilon, prng=np.random,\n strict_maximum=strict_maximum, perfect=False, normalized_currents=True)\n\n\n# Build the manager\nmanager = NetworkManager(nn=nn, dt=dt, values_to_save=values_to_save)\n# Build the representation\nrepresentation = PatternsRepresentation(manager.canonical_activity_representation, minicolumns=minicolumns)\n\n# Build the protocol\nprotocol = Protocol()\nprotocol.simple_protocol(representation, training_times=training_times, inter_pulse_intervals=inter_pulse_intervals,\n inter_sequence_interval=inter_sequence_interval, epochs=epochs, resting_time=resting_time)\n\n# Run the protocol\ntimed_input = manager.run_network_protocol_offline(protocol=protocol)\n\n\nsigma_number = 15\nsamples = 25\nsigma_max = 2",
"CPU times: user 20 ms, sys: 8 ms, total: 28 ms\nWall time: 27.2 ms\n"
],
[
"manager.set_persistent_time_with_adaptation_gain(T_persistence=T_persistence, from_state=1, to_state=2)\n\nT_cue = 1.0 * manager.nn.tau_s\nT_recall = 3 * T_persistence * n_patterns + T_cue\n\n# Success\nnr = representation.network_representation\naux = calculate_recall_quantities(manager, nr, T_recall, T_cue, remove=remove, reset=True, empty_history=True)\nsuccess, pattern_sequence, persistent_times, timings = aux\n\nprint('pattern sequence', pattern_sequence)\nprint('times', persistent_times)\nprint('T_calculated', np.mean(persistent_times[1:-1]))\nplot_network_activity_angle(manager, time_y=False);",
"pattern sequence [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\ntimes [0.073, 0.272, 0.321, 0.323, 0.323, 0.323, 0.323, 0.323, 0.387, 0.36]\nT_calculated 0.32437499999999997\n"
],
[
"I = manager.history['i']\nO = manager.history['o']\nZ_pre = manager.history['z_pre']\nS = manager.history['s']\n\nfig = plt.figure()\nax1 = fig.add_subplot(311)\nax2 = fig.add_subplot(312)\nax3 = fig.add_subplot(313)\n\nfor o in O.T:\n ax1.plot(manager.time, o)\n \nfor current in I.T:\n ax3.plot(manager.time, current, ls='--')\n \nfor z in Z_pre.T:\n ax2.plot(manager.time, z)",
"_____no_output_____"
],
[
"I = manager.history['i']\nO = manager.history['o']\nZ_pre = manager.history['z_pre']\n\nfig = plt.figure()\nax1 = fig.add_subplot(211)\nax2 = fig.add_subplot(212)\n\nfor o in O.T:\n ax1.plot(manager.time, o)\n \nfor index, current in enumerate(I.T):\n ax2.plot(manager.time, current, color=colors[index])\nfor index, s in enumerate(S.T):\n ax2.plot(manager.time, s, ls='--', color=colors[index])",
"_____no_output_____"
],
[
"fig = plt.figure()\nax1 = fig.add_subplot(111)\n\nfor index, current in enumerate(I.T):\n ax1.plot(manager.time, current, lw=lw, color=colors[index])\nfor index, s in enumerate(S.T):\n ax1.plot(manager.time, s, ls='--', lw=lw, color=colors[index])",
"_____no_output_____"
],
[
"fig = plt.figure()\nax1 = fig.add_subplot(111)\n\nfor index, current in enumerate(I.T):\n if index < 3:\n ax1.plot(manager.time, current, lw=lw, color=colors[index])\nfor index, s in enumerate(S.T):\n if index < 3:\n ax1.plot(manager.time, s, ls='--', lw=lw, color=colors[index])",
"_____no_output_____"
]
],
[
[
"## Disambiguation",
"_____no_output_____"
]
],
[
[
"%%time \nsigma_out = 0.05\ntau_z_pre = 0.050\ntau_z_post = 0.005\n\nhypercolumns = 1\nminicolumns = 20\nn_patterns = minicolumns\n\n# Training protocol\ntraining_times_base = 0.100\ntraining_times = [training_times_base for i in range(n_patterns)]\nipi_base = 0.000\ninter_pulse_intervals = [ipi_base for i in range(n_patterns)]\ninter_sequence_interval = 0.0\nresting_time = 0.0\nepochs = 1\nT_persistence = 0.025\n\n# Manager properties\nvalues_to_save = ['o', 'i', 'z_pre', 's']\n\n# Neural Network\nnn = Network(hypercolumns, minicolumns, G=G, tau_s=tau_s, tau_z_pre=tau_z_pre, tau_z_post=tau_z_post,\n tau_a=tau_a, g_a=g_a, g_I=g_I, sigma_out=sigma_out, epsilon=epsilon, prng=np.random,\n strict_maximum=strict_maximum, perfect=False, normalized_currents=True)\n\n\n# Build the manager\nmanager = NetworkManager(nn=nn, dt=dt, values_to_save=values_to_save)\n# Build the representation\nrepresentation = PatternsRepresentation(manager.canonical_activity_representation, minicolumns=minicolumns)\n\n# Build the protocol\nprotocol = Protocol()\nprotocol.simple_protocol(representation, training_times=training_times, inter_pulse_intervals=inter_pulse_intervals,\n inter_sequence_interval=inter_sequence_interval, epochs=epochs, resting_time=resting_time)\n\n# Run the protocol\ntimed_input = manager.run_network_protocol_offline(protocol=protocol)\n\n\nsigma_number = 15\nsamples = 25\nsigma_max = 2\n\n# Build the representation\nrepresentation_overlap = 1.0\nsequence_overlap = 0.3\npatterns_per_sequence = 10\nactivity_representation = create_overalaped_representation(manager, representation_overlap, sequence_overlap)\nrepresentation = PatternsRepresentation(activity_representation, minicolumns=minicolumns)\ninter_pulse_intervals[patterns_per_sequence - 1] = inter_sequence_interval\n\nprint(activity_representation)\n\n \n# Build the protocol\nprotocol = Protocol()\nprotocol.simple_protocol(representation, training_times=training_times, inter_pulse_intervals=inter_pulse_intervals,\n inter_sequence_interval=inter_sequence_interval, epochs=epochs, resting_time=resting_time)\n\n# Run the protocol\ntimed_input = manager.run_network_protocol_offline(protocol=protocol)\n# Set the persistent time\nmanager.set_persistent_time_with_adaptation_gain(T_persistence=T_persistence)\nplot_weight_matrix(manager)",
"[[ 0]\n [ 1]\n [ 2]\n [ 3]\n [ 4]\n [ 5]\n [ 6]\n [ 7]\n [ 8]\n [ 9]\n [10]\n [11]\n [12]\n [13]\n [ 4]\n [ 5]\n [ 6]\n [17]\n [18]\n [19]]\nCPU times: user 168 ms, sys: 40 ms, total: 208 ms\nWall time: 123 ms\n"
],
[
"T_cue = 2 * manager.nn.tau_s\nT_recall = 3 * T_persistence * patterns_per_sequence + T_cue\n\nnr1 = representation.network_representation[:patterns_per_sequence]\nnr2 = representation.network_representation[patterns_per_sequence:]\n\n# Success 1\naux1 = calculate_recall_quantities(manager, nr1, T_recall, T_cue, remove=remove, reset=True, empty_history=True)\nsuccess1, pattern_sequence1, persistent_times1, timings1 = aux1\nprint(success1)\nprint(pattern_sequence1)\nprint(persistent_times1)\n\nplot_network_activity_angle(manager, time_y=True);",
"1.0\n[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 4, 5, 6]\n[0.038, 0.035, 0.031, 0.042, 0.049, 0.044, 0.057, 0.042, 0.039, 0.045, 0.054, 0.045, 0.044, 0.047, 0.041, 0.052000000000000005, 0.051000000000000004]\n"
],
[
"# Success 2\naux2 = calculate_recall_quantities(manager, nr2, T_recall, T_cue, remove=remove, reset=True, empty_history=True)\nsuccess2, pattern_sequence2, persistent_times2, timings2 = aux2\nprint(success2)\nprint(pattern_sequence2)\nprint(persistent_times2)\n\nplot_network_activity_angle(manager, time_y=True);",
"1.0\n[10, 11, 12, 13, 4, 5, 6, 17, 18, 19, 18, 19]\n[0.041, 0.036000000000000004, 0.041, 0.049, 0.039, 0.047, 0.059000000000000004, 0.046, 0.049, 0.20700000000000002, 0.011, 0.107, 0.054]\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cbbb30146c6374529208fe45ebce495f113d1e62
| 25,661 |
ipynb
|
Jupyter Notebook
|
data/apiDataset.ipynb
|
zparis99/fantasy-football-predictor
|
e653719b4c6f35d9901b739d710edf8a88379329
|
[
"MIT"
] | null | null | null |
data/apiDataset.ipynb
|
zparis99/fantasy-football-predictor
|
e653719b4c6f35d9901b739d710edf8a88379329
|
[
"MIT"
] | null | null | null |
data/apiDataset.ipynb
|
zparis99/fantasy-football-predictor
|
e653719b4c6f35d9901b739d710edf8a88379329
|
[
"MIT"
] | null | null | null | 42.205592 | 373 | 0.52835 |
[
[
[
"import pandas as pd\nimport requests\n\nfrom fantasy import fantasy_points\nfrom creds import nfl_api_key",
"_____no_output_____"
],
[
"schedule_url = 'https://profootballapi.com/schedule'\ngame_url = 'https://profootballapi.com/game'",
"_____no_output_____"
],
[
"all_games = requests.post(schedule_url, params={'api_key': nfl_api_key, 'season_type': 'REG'}).json()",
"_____no_output_____"
]
],
[
[
"## Gather defensive data",
"_____no_output_____"
]
],
[
[
"curr_year = 2019\n# defensive_data = []\ntry:\n for game in all_games:\n new_response = requests.post(game_url, params={'api_key': nfl_api_key, 'game_id': game['id']})\n game_resp = new_response.json()\n if str(curr_year) == game['year']:\n print(curr_year)\n curr_year += 1\n\n for team, loc, opposition_loc in [(game['home'], 'home', 'away'), (game['away'], 'away', 'home')]:\n opp_pyds = game_resp[opposition_loc]['pyds']\n opp_ryds = game_resp[opposition_loc]['ryds']\n opp_pts = game[opposition_loc + '_score']\n\n # Calculate touchdowns\n rtds = 0\n ptds = 0\n passing_players = game_resp[opposition_loc]['stats']['passing']\n for passing_player in passing_players.keys():\n ptds += passing_players[passing_player]['touchdowns']\n\n rushing_players = game_resp[opposition_loc]['stats']['rushing']\n for rushing_player in rushing_players.keys():\n rtds += rushing_players[rushing_player]['touchdowns']\n\n # Calculate specific defense stats\n forced_fumbles = 0\n ints = 0\n sacks = 0\n defensive_players = game_resp[loc]['stats']['defense']\n for defensive_player in defensive_players.keys():\n forced_fumbles += defensive_players[defensive_player]['forced_fumbles']\n ints += defensive_players[defensive_player]['interceptions']\n sacks += defensive_players[defensive_player]['sacks']\n defensive_data.append([game['year'], game['week'], team, opp_pyds, opp_ryds, opp_pts, forced_fumbles, ints, sacks])\nexcept Exception as e:\n print('Error! {} vs {}. Year {} Week {}'.format(game['home'], game['away'], game['year'], game['week']))\n pass",
"_____no_output_____"
],
[
"defense_df = pd.DataFrame(defensive_data, columns=['Year', 'Week', 'Team', 'Opp Pass Yards', 'Opp Rush Yards', 'Opp Points', 'Forced Fumbles', 'Ints', 'Sacks'])\ndefense_df = defense_df.drop_duplicates()\ndefense_df['Year'] = defense_df['Year'].astype(int)\ndefense_df['Week'] = defense_df['Week'].astype(int)",
"_____no_output_____"
],
[
"defense_df.to_csv('defensive_data.csv', index=False)",
"_____no_output_____"
]
],
[
[
"### Individual Defense Stats",
"_____no_output_____"
]
],
[
[
"ind_defense_encountered = []",
"_____no_output_____"
],
[
"curr_year = 2009\nind_def_data = []\n\nfor game in all_games:\n try:\n game_id = str(game['year']) + str(game['week']) + game['home'] + game['away']\n if game_id in ind_defense_encountered:\n continue\n \n ind_defense_encountered.append(game_id)\n \n new_response = requests.post(game_url, params={'api_key': nfl_api_key, 'game_id': game['id']})\n game_resp = new_response.json()\n if str(curr_year) == game['year']:\n print(curr_year)\n curr_year += 1\n\n for team, loc, opp_loc in [(game['home'], 'home', 'away'), (game['away'], 'away', 'home')]:\n team_pts = game[loc + '_score']\n opp_team = game[opp_loc]\n opp_pts = game[opp_loc + '_score']\n\n # Gather player data\n players = {}\n\n if 'defense' in game_resp[loc]['stats'].keys():\n defense_players = game_resp[loc]['stats']['defense']\n for defense_player in defense_players.keys():\n curr_player = defense_players[defense_player]\n player_entry = { 'two_points': 0 }\n if defense_player in players.keys():\n player_entry = players[defense_player]\n player_entry['name'] = curr_player['name']\n player_entry['tackles'] = curr_player['tackles']\n player_entry['assisted_tackles'] = curr_player['assisted_tackles']\n player_entry['sacks'] = curr_player['sacks']\n player_entry['interceptions'] = curr_player['interceptions']\n player_entry['forced_fumbles'] = curr_player['forced_fumbles']\n players[defense_player] = player_entry\n\n for player_key in players.keys():\n player = players[player_key]\n ind_def_data.append([game['year'], game['week'], team, opp_team, player.get('name'),\n player.get('tackles', 0), player.get('assisted_tackles', 0), player.get('sacks', 0), player.get('interceptions', 0), player.get('forced_fumbles', 0),\n team_pts, opp_pts, player_key\n ])\n except Exception as e:\n print('Error! {} vs {}. Year {} Week {}'.format(game['home'], game['away'], game['year'], game['week']))\n print(getattr(e, 'message', repr(e)))\n pass",
"2009\n2010\n2011\n2012\n2013\n2014\n2015\n2016\n2017\n2018\nError! TEN vs WAS. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! CAR vs ATL. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! CLE vs CIN. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! DAL vs TB. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! DET vs MIN. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! NE vs BUF. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! NYJ vs GB. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! PHI vs HOU. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! LAC vs BAL. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! ARI vs LA. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! SF vs CHI. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! NO vs PIT. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! SEA vs KC. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\n2019\nError! NE vs BUF. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! SF vs LA. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! TB vs HOU. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! ATL vs JAX. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! CLE vs BAL. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! IND vs CAR. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! MIA vs CIN. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! NYJ vs PIT. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! TEN vs NO. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! WAS vs NYG. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! PHI vs DAL. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! SEA vs ARI. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! CHI vs KC. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\n"
],
[
"ind_def_df = pd.DataFrame(ind_def_data, columns=['Year', 'Week', 'Team', 'Opposing Team', 'Name', 'Tackles', 'Assisted Tackles', 'Sacks', 'Ints', 'Forced Fumbles', 'Team Score', 'Opposing Score', 'PlayerID'])\nind_def_df.to_csv('ind_defensive_data.csv', index=False)",
"_____no_output_____"
]
],
[
[
"## Offensive Stats",
"_____no_output_____"
]
],
[
[
"offensive_encountered = []",
"_____no_output_____"
],
[
"curr_year = 2009\noffensive_data = []\n\nfor game in all_games:\n try:\n game_id = str(game['year']) + str(game['week']) + game['home'] + game['away']\n if game_id in offensive_encountered:\n continue\n \n offensive_encountered.append(game_id)\n \n new_response = requests.post(game_url, params={'api_key': nfl_api_key, 'game_id': game['id']})\n game_resp = new_response.json()\n if str(curr_year) == game['year']:\n print(curr_year)\n curr_year += 1\n\n for team, loc, opp_loc in [(game['home'], 'home', 'away'), (game['away'], 'away', 'home')]:\n team_pts = game[loc + '_score']\n opp_team = game[opp_loc]\n opp_pts = game[opp_loc + '_score']\n\n # Gather player data\n players = {}\n\n if 'passing' in game_resp[loc]['stats'].keys():\n passing_players = game_resp[loc]['stats']['passing']\n for passing_player in passing_players.keys():\n curr_player = passing_players[passing_player]\n player_entry = { 'two_points': 0 }\n if passing_player in players.keys():\n player_entry = players[passing_player]\n player_entry['name'] = curr_player['name']\n player_entry['pass_attempts'] = curr_player['attempts']\n player_entry['pass_tds'] = curr_player['touchdowns']\n player_entry['pass_yds'] = curr_player['yards']\n player_entry['interceptions'] = curr_player['interceptions']\n player_entry['two_points'] += curr_player['two_point_makes']\n players[passing_player] = player_entry\n\n if 'rushing' in game_resp[loc]['stats'].keys():\n rushing_players = game_resp[loc]['stats']['rushing']\n for rushing_player in rushing_players.keys():\n curr_player = rushing_players[rushing_player]\n player_entry = { 'two_points': 0 }\n if rushing_player in players.keys():\n player_entry = players[rushing_player]\n player_entry['name'] = curr_player['name']\n player_entry['rush_attempts'] = curr_player['attempts']\n player_entry['rush_tds'] = curr_player['touchdowns']\n player_entry['rush_yds'] = curr_player['yards']\n player_entry['rush_long'] = curr_player['long']\n player_entry['two_points'] += curr_player['two_point_makes']\n players[rushing_player] = player_entry\n\n if 'receiving' in game_resp[loc]['stats'].keys():\n receiving_players = game_resp[loc]['stats']['receiving']\n for receiving_player in receiving_players.keys():\n curr_player = receiving_players[receiving_player]\n player_entry = { 'two_points': 0 }\n if receiving_player in players.keys():\n player_entry = players[receiving_player]\n player_entry['name'] = curr_player['name']\n player_entry['receptions'] = curr_player['receptions']\n player_entry['rec_tds'] = curr_player['touchdowns']\n player_entry['rec_yds'] = curr_player['yards']\n player_entry['rec_long'] = curr_player['long']\n player_entry['two_points'] += curr_player['two_point_makes']\n players[receiving_player] = player_entry\n\n if 'fumbles' in game_resp[loc]['stats'].keys():\n fumble_players = game_resp[loc]['stats']['fumbles']\n for fumble_player in fumble_players.keys():\n curr_player = fumble_players[fumble_player]\n player_entry = { 'two_points': 0 }\n if fumble_player in players.keys():\n player_entry = players[fumble_player]\n player_entry['name'] = curr_player['name']\n player_entry['fumbles'] = curr_player['total_fumbles']\n player_entry['fumbles_lost'] = curr_player['fumbles_lost']\n players[fumble_player] = player_entry\n\n if 'kick_return' in game_resp[loc]['stats'].keys():\n kick_return_players = game_resp[loc]['stats']['kick_return']\n for kick_return_player in kick_return_players.keys():\n curr_player = kick_return_players[kick_return_player]\n player_entry = { 'two_points': 0 }\n if kick_return_player in players.keys():\n player_entry = players[kick_return_player]\n player_entry['name'] = curr_player['name']\n player_entry['kr_tds'] = curr_player['touchdowns']\n players[kick_return_player] = player_entry\n\n if 'punt_return' in game_resp[loc]['stats'].keys():\n punt_return_players = game_resp[loc]['stats']['punt_return']\n for punt_return_player in punt_return_players.keys():\n curr_player = punt_return_players[punt_return_player]\n player_entry = { 'two_points': 0 }\n if punt_return_player in players.keys():\n player_entry = players[punt_return_player]\n player_entry['name'] = curr_player['name']\n player_entry['pr_tds'] = curr_player['touchdowns']\n players[punt_return_player] = player_entry\n\n for player_key in players.keys():\n player = players[player_key]\n offensive_data.append([game['year'], game['week'], team, opp_team, player.get('name'), player.get('pass_attempts', 0), player.get('pass_yds', 0), player.get('pass_tds', 0),\n player.get('interceptions', 0), player.get('rush_attempts', 0), player.get('rush_yds', 0), player.get('rush_tds', 0), player.get('rush_long', 0),\n player.get('receptions', 0), player.get('rec_yds', 0), player.get('rec_tds', 0), player.get('rec_long', 0), player.get('two_points', 0), player.get('fumbles', 0), \n player.get('fumbles_lost', 0), player.get('kr_tds', 0), player.get('pr_tds', 0), team_pts, opp_pts, player_key\n ])\n except Exception as e:\n print('Error! {} vs {}. Year {} Week {}'.format(game['home'], game['away'], game['year'], game['week']))\n print(getattr(e, 'message', repr(e)))\n pass",
"2009\n2010\n2011\n2012\n2013\n2014\n2015\n2016\n2017\n2018\nError! TEN vs WAS. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! CAR vs ATL. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! CLE vs CIN. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! DAL vs TB. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! DET vs MIN. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! NE vs BUF. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! NYJ vs GB. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! PHI vs HOU. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! LAC vs BAL. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! ARI vs LA. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! SF vs CHI. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! NO vs PIT. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! SEA vs KC. Year 2018 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\n2019\nError! NE vs BUF. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! SF vs LA. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! TB vs HOU. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! ATL vs JAX. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! CLE vs BAL. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! IND vs CAR. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! MIA vs CIN. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! NYJ vs PIT. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! TEN vs NO. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! WAS vs NYG. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! PHI vs DAL. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! SEA vs ARI. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\nError! CHI vs KC. Year 2019 Week 16\nJSONDecodeError('Expecting value: line 1 column 1 (char 0)')\n"
],
[
"offensive_df = pd.DataFrame(offensive_data, columns=['Year', 'Week', 'Team', 'Opposing Team', 'Name', 'Pass Attempts', 'Pass Yards', 'Pass TDs', 'Ints', 'Rush Attempts', 'Rush Yards', 'Rush TDs', 'Rush Long', 'Receptions', 'Rec Yards', 'Rec TDs', 'Rec Long', 'Two Points', 'Fumbles', 'Fumbles Lost', 'KR TDs', 'PR TDs', 'Team Score', 'Opposing Score', 'PlayerID'])\noffensive_df.to_csv('offensive_data.csv', index=False)",
"_____no_output_____"
],
[
"game_resp['home']['stats']['defense']",
"_____no_output_____"
]
],
[
[
"## Schedule Data",
"_____no_output_____"
]
],
[
[
"game_encountered = []",
"_____no_output_____"
],
[
"game_data = []\nfor game in all_games:\n game_id = str(game['year']) + str(game['week']) + game['home'] + game['away']\n if game_id in game_encountered:\n continue\n \n game_encountered.append(game_id)\n \n game_data.append([game['year'], game['week'], game['home'], game['home_score'], \n game['away'], game['away_score'], game['month'], game['day'], game['time']])",
"_____no_output_____"
],
[
"game_df = pd.DataFrame(game_data, columns=['Year', 'Week', 'Home Team', 'Home Score', 'Away Team', 'Away Score', 'Month', 'Day', 'Time'])\ngame_df.to_csv('schedule_data.csv', index=False)",
"_____no_output_____"
]
],
[
[
"## Weather Data",
"_____no_output_____"
]
],
[
[
"weather_url = 'http://history.openweathermap.org/data/2.5/history/city'\ncity_id = 3882428\ntype_call = 'hour'\nstart = 1252585800\ncnt = 2\nweather_api_key = '26aa50f82507f34fca4e6d212f2bd716'",
"_____no_output_____"
],
[
"requests.get(weather_url, params={ 'id': city_id, 'type': type_call, 'appid': weather_api_key,\n 'start': start, 'cnt': cnt})",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cbbb3056a015579bed8665e1ce10ab1380a5fe97
| 908,612 |
ipynb
|
Jupyter Notebook
|
src/7_time_series.ipynb
|
WilliamSimoni/Tennis-Matches-Data-mining
|
006054db3ef624322e35fe595bbd7419e178661b
|
[
"MIT"
] | null | null | null |
src/7_time_series.ipynb
|
WilliamSimoni/Tennis-Matches-Data-mining
|
006054db3ef624322e35fe595bbd7419e178661b
|
[
"MIT"
] | null | null | null |
src/7_time_series.ipynb
|
WilliamSimoni/Tennis-Matches-Data-mining
|
006054db3ef624322e35fe595bbd7419e178661b
|
[
"MIT"
] | null | null | null | 606.955244 | 838,107 | 0.931304 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
cbbb3cedd51dbc589fad9d040c0fbbc0490788a3
| 17,094 |
ipynb
|
Jupyter Notebook
|
nbs/dl2/10b_mixup_label_smoothing.ipynb
|
stuartzong/course-v3
|
496c8d06d401e53f5cd517e3805a85befa6795cc
|
[
"Apache-2.0"
] | null | null | null |
nbs/dl2/10b_mixup_label_smoothing.ipynb
|
stuartzong/course-v3
|
496c8d06d401e53f5cd517e3805a85befa6795cc
|
[
"Apache-2.0"
] | null | null | null |
nbs/dl2/10b_mixup_label_smoothing.ipynb
|
stuartzong/course-v3
|
496c8d06d401e53f5cd517e3805a85befa6795cc
|
[
"Apache-2.0"
] | null | null | null | 32.56 | 498 | 0.584299 |
[
[
[
"# Mixup / Label smoothing",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2\n\n%matplotlib inline",
"_____no_output_____"
],
[
"#export\nfrom exp.nb_10 import *",
"_____no_output_____"
],
[
"path = datasets.untar_data(datasets.URLs.IMAGENETTE_160)",
"_____no_output_____"
],
[
"tfms = [make_rgb, ResizeFixed(128), to_byte_tensor, to_float_tensor]\nbs = 64\n\nil = ImageList.from_files(path, tfms=tfms)\nsd = SplitData.split_by_func(il, partial(grandparent_splitter, valid_name='val'))\nll = label_by_func(sd, parent_labeler, proc_y=CategoryProcessor())\ndata = ll.to_databunch(bs, c_in=3, c_out=10, num_workers=4)",
"_____no_output_____"
]
],
[
[
"## Mixup",
"_____no_output_____"
],
[
"### What is mixup?\n\nAs the name kind of suggests, the authors of the [mixup article](https://arxiv.org/abs/1710.09412) propose to train the model on a mix of the pictures of the training set. Let's say we're on CIFAR10 for instance, then instead of feeding the model the raw images, we take two (which could be in the same class or not) and do a linear combination of them: in terms of tensor it's\n``` python\nnew_image = t * image1 + (1-t) * image2\n```\nwhere t is a float between 0 and 1. Then the target we assign to that image is the same combination of the original targets:\n``` python\nnew_target = t * target1 + (1-t) * target2\n```\nassuming your targets are one-hot encoded (which isn't the case in pytorch usually). And that's as simple as this.",
"_____no_output_____"
]
],
[
[
"img1 = PIL.Image.open(ll.train.x.items[0])\nimg1",
"_____no_output_____"
],
[
"img2 = PIL.Image.open(ll.train.x.items[4000])\nimg2",
"_____no_output_____"
],
[
"mixed_up = ll.train.x[0] * 0.3 + ll.train.x[4000] * 0.7\nplt.imshow(mixed_up.permute(1,2,0));",
"_____no_output_____"
]
],
[
[
"French horn or tench? The right answer is 70% french horn and 30% tench ;)",
"_____no_output_____"
],
[
"### Implementation",
"_____no_output_____"
],
[
"The implementation relies on something called the *beta distribution* which in turns uses something which Jeremy still finds mildly terrifying called the *gamma function*. To get over his fears, Jeremy reminds himself that *gamma* is just a factorial function that (kinda) interpolates nice and smoothly to non-integers too. How it does that exactly isn't important...",
"_____no_output_____"
]
],
[
[
"# PyTorch has a log-gamma but not a gamma, so we'll create one\nΓ = lambda x: x.lgamma().exp()",
"_____no_output_____"
]
],
[
[
"NB: If you see math symbols you don't know you can google them like this: [Γ function](https://www.google.com/search?q=Γ+function).\n\nIf you're not used to typing unicode symbols, on Mac type <kbd>ctrl</kbd>-<kbd>cmd</kbd>-<kbd>space</kbd> to bring up a searchable emoji box. On Linux you can use the [compose key](https://help.ubuntu.com/community/ComposeKey). On Windows you can also use a compose key, but you first need to install [WinCompose](https://github.com/samhocevar/wincompose). By default the <kbd>compose</kbd> key is the right-hand <kbd>Alt</kbd> key.\n\nYou can search for symbol names in WinCompose. The greek letters are generally <kbd>compose</kbd>-<kbd>\\*</kbd>-<kbd>letter</kbd> (where *letter* is, for instance, <kbd>a</kbd> to get greek α alpha).",
"_____no_output_____"
]
],
[
[
"facts = [math.factorial(i) for i in range(7)]",
"_____no_output_____"
],
[
"plt.plot(range(7), facts, 'ro')\nplt.plot(torch.linspace(0,6), Γ(torch.linspace(0,6)+1))\nplt.legend(['factorial','Γ']);",
"_____no_output_____"
],
[
"torch.linspace(0,0.9,10)",
"_____no_output_____"
]
],
[
[
"In the original article, the authors suggested three things:\n 1. Create two separate dataloaders and draw a batch from each at every iteration to mix them up\n 2. Draw a t value following a beta distribution with a parameter α (0.4 is suggested in their article)\n 3. Mix up the two batches with the same value t.\n 4. Use one-hot encoded targets\n\nWhy the beta distribution with the same parameters α? Well it looks like this:",
"_____no_output_____"
]
],
[
[
"_,axs = plt.subplots(1,2, figsize=(12,4))\nx = torch.linspace(0,1, 100)\nfor α,ax in zip([0.1,0.8], axs):\n α = tensor(α)\n# y = (x.pow(α-1) * (1-x).pow(α-1)) / (gamma_func(α ** 2) / gamma_func(α))\n y = (x**(α-1) * (1-x)**(α-1)) / (Γ(α)**2 / Γ(2*α))\n ax.plot(x,y)\n ax.set_title(f\"α={α:.1}\")",
"_____no_output_____"
]
],
[
[
"With a low `α`, we pick values close to 0. and 1. with a high probability, and the values in the middle all have the same kind of probability. With a greater `α`, 0. and 1. get a lower probability .",
"_____no_output_____"
],
[
"While the approach above works very well, it's not the fastest way we can do this. The main point that slows down this process is wanting two different batches at every iteration (which means loading twice the amount of images and applying to them the other data augmentation function). To avoid this slow down, we can be a little smarter and mixup a batch with a shuffled version of itself (this way the images mixed up are still different). This was a trick suggested in the MixUp paper.\n\nThen pytorch was very careful to avoid one-hot encoding targets when it could, so it seems a bit of a drag to undo this. Fortunately for us, if the loss is a classic cross-entropy, we have\n```python\nloss(output, new_target) = t * loss(output, target1) + (1-t) * loss(output, target2)\n```\nso we won't one-hot encode anything and just compute those two losses then do the linear combination.\n\nUsing the same parameter t for the whole batch also seemed a bit inefficient. In our experiments, we noticed that the model can train faster if we draw a different t for every image in the batch (both options get to the same result in terms of accuracy, it's just that one arrives there more slowly).\nThe last trick we have to apply with this is that there can be some duplicates with this strategy: let's say or shuffle say to mix image0 with image1 then image1 with image0, and that we draw t=0.1 for the first, and t=0.9 for the second. Then\n```python\nimage0 * 0.1 + shuffle0 * (1-0.1) = image0 * 0.1 + image1 * 0.9\nimage1 * 0.9 + shuffle1 * (1-0.9) = image1 * 0.9 + image0 * 0.1\n```\nwill be the same. Of course, we have to be a bit unlucky but in practice, we saw there was a drop in accuracy by using this without removing those near-duplicates. To avoid them, the tricks is to replace the vector of parameters we drew by\n``` python\nt = max(t, 1-t)\n```\nThe beta distribution with the two parameters equal is symmetric in any case, and this way we insure that the biggest coefficient is always near the first image (the non-shuffled batch).\n",
"_____no_output_____"
],
[
"In `Mixup` we have handle loss functions that have an attribute `reduction` (like `nn.CrossEntropy()`). To deal with the `reduction=None` with various types of loss function without modifying the actual loss function outside of the scope we need to perform those operations with no reduction, we create a context manager:",
"_____no_output_____"
]
],
[
[
"#export\nclass NoneReduce():\n def __init__(self, loss_func): \n self.loss_func,self.old_red = loss_func,None\n \n def __enter__(self):\n if hasattr(self.loss_func, 'reduction'):\n self.old_red = getattr(self.loss_func, 'reduction')\n setattr(self.loss_func, 'reduction', 'none')\n return self.loss_func\n else: return partial(self.loss_func, reduction='none')\n \n def __exit__(self, type, value, traceback):\n if self.old_red is not None: setattr(self.loss_func, 'reduction', self.old_red) ",
"_____no_output_____"
]
],
[
[
"Then we can use it in `MixUp`:",
"_____no_output_____"
]
],
[
[
"#export\nfrom torch.distributions.beta import Beta\n\ndef unsqueeze(input, dims):\n for dim in listify(dims): input = torch.unsqueeze(input, dim)\n return input\n\ndef reduce_loss(loss, reduction='mean'):\n return loss.mean() if reduction=='mean' else loss.sum() if reduction=='sum' else loss ",
"_____no_output_____"
],
[
"#export\nclass MixUp(Callback):\n _order = 90 #Runs after normalization and cuda\n def __init__(self, α:float=0.4): self.distrib = Beta(tensor([α]), tensor([α]))\n \n def begin_fit(self): self.old_loss_func,self.run.loss_func = self.run.loss_func,self.loss_func\n \n def begin_batch(self):\n if not self.in_train: return #Only mixup things during training\n λ = self.distrib.sample((self.yb.size(0),)).squeeze().to(self.xb.device)\n λ = torch.stack([λ, 1-λ], 1)\n self.λ = unsqueeze(λ.max(1)[0], (1,2,3))\n shuffle = torch.randperm(self.yb.size(0)).to(self.xb.device)\n xb1,self.yb1 = self.xb[shuffle],self.yb[shuffle]\n self.run.xb = lin_comb(self.xb, xb1, self.λ)\n \n def after_fit(self): self.run.loss_func = self.old_loss_func\n \n def loss_func(self, pred, yb):\n if not self.in_train: return self.old_loss_func(pred, yb)\n with NoneReduce(self.old_loss_func) as loss_func:\n loss1 = loss_func(pred, yb)\n loss2 = loss_func(pred, self.yb1)\n loss = lin_comb(loss1, loss2, self.λ)\n return reduce_loss(loss, getattr(self.old_loss_func, 'reduction', 'mean'))",
"_____no_output_____"
],
[
"nfs = [32,64,128,256,512]",
"_____no_output_____"
],
[
"def get_learner(nfs, data, lr, layer, loss_func=F.cross_entropy,\n cb_funcs=None, opt_func=optim.SGD, **kwargs):\n model = get_cnn_model(data, nfs, layer, **kwargs)\n init_cnn(model)\n return Learner(model, data, loss_func, lr=lr, cb_funcs=cb_funcs, opt_func=opt_func)",
"_____no_output_____"
],
[
"cbfs = [partial(AvgStatsCallback,accuracy),\n CudaCallback, \n ProgressCallback,\n partial(BatchTransformXCallback, norm_imagenette),\n MixUp]",
"_____no_output_____"
],
[
"learn = get_learner(nfs, data, 0.4, conv_layer, cb_funcs=cbfs)",
"_____no_output_____"
],
[
"learn.fit(1)",
"_____no_output_____"
]
],
[
[
"Questions: How does softmax interact with all this? Should we jump straight from mixup to inference?",
"_____no_output_____"
],
[
"## Label smoothing",
"_____no_output_____"
],
[
"Another regularization technique that's often used is label smoothing. It's designed to make the model a little bit less certain of it's decision by changing a little bit its target: instead of wanting to predict 1 for the correct class and 0 for all the others, we ask it to predict `1-ε` for the correct class and `ε` for all the others, with `ε` a (small) positive number and N the number of classes. This can be written as:\n\n$$loss = (1-ε) ce(i) + ε \\sum ce(j) / N$$\n\nwhere `ce(x)` is cross-entropy of `x` (i.e. $-\\log(p_{x})$), and `i` is the correct class. This can be coded in a loss function:",
"_____no_output_____"
]
],
[
[
"#export\nclass LabelSmoothingCrossEntropy(nn.Module):\n def __init__(self, ε:float=0.1, reduction='mean'):\n super().__init__()\n self.ε,self.reduction = ε,reduction\n \n def forward(self, output, target):\n c = output.size()[-1]\n log_preds = F.log_softmax(output, dim=-1)\n loss = reduce_loss(-log_preds.sum(dim=-1), self.reduction)\n nll = F.nll_loss(log_preds, target, reduction=self.reduction)\n return lin_comb(loss/c, nll, self.ε)",
"_____no_output_____"
]
],
[
[
"Note: we implement the various reduction attributes so that it plays nicely with MixUp after.",
"_____no_output_____"
]
],
[
[
"cbfs = [partial(AvgStatsCallback,accuracy),\n CudaCallback,\n ProgressCallback,\n partial(BatchTransformXCallback, norm_imagenette)]",
"_____no_output_____"
],
[
"learn = get_learner(nfs, data, 0.4, conv_layer, cb_funcs=cbfs, loss_func=LabelSmoothingCrossEntropy())",
"_____no_output_____"
],
[
"learn.fit(1)",
"_____no_output_____"
]
],
[
[
"And we can check our loss function `reduction` attribute hasn't changed outside of the training loop:",
"_____no_output_____"
]
],
[
[
"assert learn.loss_func.reduction == 'mean'",
"_____no_output_____"
]
],
[
[
"## Export",
"_____no_output_____"
]
],
[
[
"!./notebook2script.py 10b_mixup_label_smoothing.ipynb",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cbbb3e39aa2f96b45bf52517193ea6884a439466
| 20,616 |
ipynb
|
Jupyter Notebook
|
notebooks/Demo Data.ipynb
|
realnoobs/django-efilling
|
c105671d122d2f9b35320f12d9bb616f23ccd735
|
[
"MIT"
] | 1 |
2021-09-15T17:12:13.000Z
|
2021-09-15T17:12:13.000Z
|
notebooks/Demo Data.ipynb
|
realnoobs/django-efilling
|
c105671d122d2f9b35320f12d9bb616f23ccd735
|
[
"MIT"
] | null | null | null |
notebooks/Demo Data.ipynb
|
realnoobs/django-efilling
|
c105671d122d2f9b35320f12d9bb616f23ccd735
|
[
"MIT"
] | 1 |
2021-09-12T14:08:05.000Z
|
2021-09-12T14:08:05.000Z
| 45.111597 | 132 | 0.551756 |
[
[
[
"import os\nimport django\nfrom django.db import transaction\nimport random\nfrom django_efilling.models import Instrument, InstrumentQuestion, InstrumentQuestionChoice\nfrom django_efilling.models import (ESSAY, SINGLE_CHOICE, MULTIPLE_CHOICE, IMAGE_CHOICE, Respondent)\nos.environ[\"DJANGO_ALLOW_ASYNC_UNSAFE\"] = \"true\"\ndjango.setup()",
"_____no_output_____"
]
],
[
[
"# Create Users\nwith transaction.atomic():\n user1 = User.objects.create_user('john', '[email protected]', 'johnpassword')\n user2 = User.objects.create_user('albert', '[email protected]', 'albertpassword')\n user3 = User.objects.create_user('sonia', '[email protected]', 'soniapassword')\n user4 = User.objects.create_user('james', '[email protected]', 'jamespassword')\n user5 = User.objects.create_user('chika', '[email protected]', 'chikapassword')\n user6 = User.objects.create_user('robert', '[email protected]', 'robertpassword')\n user7 = User.objects.create_user('hendra', '[email protected]', 'hendrapassword')\n user8 = User.objects.create_user('yoshi', '[email protected]', 'yoshipassword')\n user9 = User.objects.create_user('bianca', '[email protected]', 'biancapassword')\n user10 = User.objects.create_user('devia', '[email protected]', 'deviapassword')\n user11 = User.objects.create_user('yudi', '[email protected]', 'yudipassword')\n user12 = User.objects.create_user('meriam', '[email protected]', 'meriampassword')\n user13 = User.objects.create_user('hania', '[email protected]', 'haniapassword')\n user14 = User.objects.create_user('bimo', '[email protected]', 'bimopassword')\n user15 = User.objects.create_user('gino', '[email protected]', 'ginopassword')\n user16 = User.objects.create_user('juju', '[email protected]', 'jujupassword')\n user17 = User.objects.create_user('romeo', '[email protected]', 'romeopassword')\n user18 = User.objects.create_user('hansen', '[email protected]', 'hansenpassword')\n user19 = User.objects.create_user('romi', '[email protected]', 'romipassword')\n user20 = User.objects.create_user('gina', '[email protected]', 'ginapassword')\n user21 = User.objects.create_user('huria', '[email protected]', 'huriapassword')\n user22 = User.objects.create_user('jaka', '[email protected]', 'jakapassword')\n user23 = User.objects.create_user('rendra', '[email protected]', 'rendrapassword')\n user24 = User.objects.create_user('kiki', '[email protected]', 'kikipassword')\n user25 = User.objects.create_user('rama', '[email protected]', 'ramapassword')\n user26 = User.objects.create_user('habibie', '[email protected]', 'habibiepassword')\n user27 = User.objects.create_user('iankasela', '[email protected]', 'iankaselapassword')\n user28 = User.objects.create_user('koko', '[email protected]', 'kokopassword')\n user29 = User.objects.create_user('joko', '[email protected]', 'jokopassword')\n user30 = User.objects.create_user('momo', '[email protected]', 'momopassword')",
"_____no_output_____"
]
],
[
[
"# Creating Questions\n\nuser1 = User.objects.get(username='rizkisasri')\n\nwith transaction.atomic():\n# def create_instrumet(number)\n # Create Instrument\n instrument1 = Instrument(\n name=\"Owesome survey #3, Test All Question Type.\",\n creator=user1,\n )\n instrument1.save()\n instrument1.tags.add(\"tag1\")\n instrument1.tags.add(\"tag2\")\n \n # Add Question to Instrument\n q1 = InstrumentQuestion(\n instrument=instrument1,\n order=1,\n question_type=ESSAY,\n text=\"Essay question example, is it works?\",\n help_text=\"Describe something ..\",\n scoring=True,\n answer=1\n )\n q1.save()\n\n q2 = InstrumentQuestion(\n order=2,\n instrument=instrument1,\n question_type=SINGLE_CHOICE,\n text=\"Single choice question example, is it works?\",\n help_text=\"Choose one ..\",\n scoring=True,\n answer=1\n )\n q2.save()\n\n q2c1 = InstrumentQuestionChoice(question=q2, order=1, label=\"Choice 1\", value=1)\n q2c2 = InstrumentQuestionChoice(question=q2, order=2, label=\"Choice 2\", value=2)\n q2c3 = InstrumentQuestionChoice(question=q2, order=3, label=\"Choice 3\", value=3)\n q2c4 = InstrumentQuestionChoice(question=q2, order=4, label=\"Choice 4\", value=4)\n InstrumentQuestionChoice.objects.bulk_create([q2c1, q2c2, q2c3, q2c4])\n \n q3 = InstrumentQuestion(\n order=3,\n instrument=instrument1,\n question_type=MULTIPLE_CHOICE,\n text=\"Multiple choice question example, is it works?\",\n help_text=\"Choose one or more ..\",\n scoring=True,\n answer=\"1, 2\"\n )\n q3.save()\n\n q3c1 = InstrumentQuestionChoice(question=q3, order=1, label=\"Choice 1\", value=1)\n q3c2 = InstrumentQuestionChoice(question=q3, order=2, label=\"Choice 2\", value=2)\n q3c3 = InstrumentQuestionChoice(question=q3, order=3, label=\"Choice 3\", value=3)\n q3c4 = InstrumentQuestionChoice(question=q3, order=4, label=\"Choice 4\", value=4)\n InstrumentQuestionChoice.objects.bulk_create([q3c1, q3c2, q3c3, q3c4])\n \n q4 = InstrumentQuestion(\n order=4,\n instrument=instrument1,\n question_type=IMAGE_CHOICE,\n text=\"Image choice question example, is it works?\",\n help_text=\"Choose one ..\",\n scoring=True,\n answer=\"1, 2\"\n )\n q4.save()\n\n q4c1 = InstrumentQuestionChoice(question=q4, order=1, label=\"Image 1\", value=1)\n q4c2 = InstrumentQuestionChoice(question=q4, order=2, label=\"Image 2\", value=2)\n q4c3 = InstrumentQuestionChoice(question=q4, order=3, label=\"Image 3\", value=3)\n q4c4 = InstrumentQuestionChoice(question=q4, order=4, label=\"Image 4\", value=4)\n InstrumentQuestionChoice.objects.bulk_create([q4c1, q4c2, q4c3, q4c4])\n \n q5 = InstrumentQuestion(\n order=3,\n instrument=instrument1,\n question_type=MULTIPLE_CHOICE,\n text=\"Multiple choice 2 question example, is it works?\",\n help_text=\"Choose one or more ..\",\n scoring=True,\n answer=\"1\"\n )\n q5.save()\n\n q5c1 = InstrumentQuestionChoice(question=q5, order=1, label=\"Choice 1\", value=1)\n q5c2 = InstrumentQuestionChoice(question=q5, order=2, label=\"Choice 2\", value=2)\n q5c3 = InstrumentQuestionChoice(question=q5, order=3, label=\"Choice 3\", value=3)\n q5c4 = InstrumentQuestionChoice(question=q5, order=4, label=\"Choice 4\", value=4)\n q5c5 = InstrumentQuestionChoice(question=q5, order=5, label=\"Choice 5\", value=5)\n InstrumentQuestionChoice.objects.bulk_create([q5c1, q5c2, q5c3, q5c4, q5c5])\n",
"_____no_output_____"
],
[
"user1 = User.objects.get(username='rizkisasri')\n\nwith transaction.atomic():\n respondents = []\n users = User.objects.all()\n ip = '127.0.0.{}'\n for i in range(users.count()):\n respondents.append(\n Respondent(\n ip_address=ip.format(i),\n respondent=users[i],\n instrument=instrument1,\n response = json.dumps({\n q1.id: \"Text {}\".format(random.choice([\"one\",\"two\",\"tree\",\"four\"])),\n q2.id: random.choice([1,2,3,4]),\n q3.id: random.choice([1,2,3,4]),\n q4.id: random.choice([1,2,3,4]),\n q5.id: random.choice([1,2,3,4,5]),\n })\n )\n )\n Respondent.objects.bulk_create(respondents)",
"_____no_output_____"
],
[
"from numpy import int64\nimport pandas as pd\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django_efilling.models import InstrumentQuestionChoice, ESSAY, SINGLE_CHOICE, MULTIPLE_CHOICE, IMAGE_CHOICE\n\n\nclass QuestionAnalizer:\n def __init__(self, question, instrument_analizer):\n self.questions = instrument_analizer.questions\n self.question = question\n self.responses = instrument_analizer.responses\n self.choices = instrument_analizer.choices\n\n def get_responses_df(self):\n df = pd.DataFrame(self.responses)\n if df.empty:\n df = pd.DataFrame(columns=[str(id) for id in self.get_questions_id()])\n return df[str(self.question[\"id\"])]\n\n def get_questions_id(self):\n return [ question['id'] for question in self.questions ]\n \n def get_dataframe(self):\n raise NotImplementedError(\"%s class should implement get_dataframe() method.\" % self.__class__.__name__)\n\n def get_report(self):\n return self.get_dataframe().to_dict()\n\n def get_report_transpose(self):\n return self.get_dataframe().transpose().to_dict()\n\n\nclass EssayQuestionAnalizer(QuestionAnalizer):\n def get_dataframe(self):\n response_df = self.get_responses_df().value_counts().to_frame(\"count\").sort_index()\n response_df[\"value\"] = response_df.index\n response_df[\"label\"] = response_df[\"value\"]\n response_df.index = [x for x in range(len(response_df.index))]\n response_df[\"answer\"] = response_df[\"count\"].sum()\n response_df[\"percent\"] = response_df[\"count\"] / response_df[\"answer\"] * 100\n return response_df\n\n\nclass SingleChoiceQuestionAnalizer(QuestionAnalizer):\n def get_choices_dataframe(self):\n choices_df = pd.DataFrame(self.choices)\n return choices_df[choices_df[\"question\"] == self.question[\"id\"]]\n\n def get_dataframe(self):\n response_df = self.get_responses_df().value_counts().to_frame(\"count\")\n response_df[\"value\"] = response_df.index.astype(int64)\n response_df[\"answer\"] = response_df[\"count\"].sum()\n response_df[\"percent\"] = response_df[\"count\"] / response_df[\"answer\"] * 100\n\n # Merge with choice to get label\n choice_dataframe = self.get_choices_dataframe()\n choice_dataframe = choice_dataframe[[\"label\", \"value\"]]\n choice_dataframe['value'] = choice_dataframe['value'].astype(int64)\n\n results = pd.merge(response_df, choice_dataframe, on=\"value\", how=\"left\")\n return results\n\n\nclass MultipleChoiceQuestionAnalizer(QuestionAnalizer):\n def get_choices_dataframe(self):\n choices_df = pd.DataFrame(self.choices)\n return choices_df[choices_df[\"question\"] == self.question[\"id\"]]\n\n def get_dataframe(self):\n response_df = self.get_responses_df()\n new_values = []\n for val in response_df.values:\n if isinstance(val, (list, tuple)):\n for item in val:\n new_values.append(item)\n else:\n new_values.append(val)\n response_df = pd.Series(new_values)\n response_df = response_df.value_counts().to_frame(\"count\")\n response_df[\"value\"] = response_df.index.astype(int64)\n response_df[\"answer\"] = response_df[\"count\"].sum()\n response_df[\"percent\"] = response_df[\"count\"] / response_df[\"answer\"] * 100\n\n # Merge with choice to get label\n choice_dataframe = self.get_choices_dataframe()\n choice_dataframe = choice_dataframe[[\"label\", \"value\"]]\n choice_dataframe['value'] = choice_dataframe['value'].astype(int64)\n\n results = pd.merge(response_df, choice_dataframe, on=\"value\", how=\"left\")\n return results\n\n\nclass ImageChoiceQuestionAnalizer(SingleChoiceQuestionAnalizer):\n pass\n\n\nQUESTION_ANALIZERS = {\n ESSAY: EssayQuestionAnalizer,\n SINGLE_CHOICE: SingleChoiceQuestionAnalizer,\n MULTIPLE_CHOICE: MultipleChoiceQuestionAnalizer,\n IMAGE_CHOICE: ImageChoiceQuestionAnalizer,\n}\n\n\nclass InstrumentAnalizer:\n @property\n def total_responses(self):\n return len(self.respondents)\n\n @property\n def total_questions(self):\n return len(self.questions)\n\n def __init__(self, instrument):\n self.instrument = instrument\n self.questions = self.get_questions()\n self.choices = self.get_choices()\n self.respondents = self.get_respondents()\n self.responses = self.get_responses()\n\n def get_questions(self):\n questions = self.instrument.questions.filter(scoring=True)\n return questions.values(\"id\", \"order\", \"text\", \"help_text\", \"question_type\")\n\n def get_choices(self):\n question_ids = [question[\"id\"] for question in self.questions]\n choices = InstrumentQuestionChoice.objects.filter(question__in=question_ids)\n return choices.values(\"id\", \"question\", \"label\", \"value\")\n\n def get_respondents(self):\n qs = self.instrument.respondents.select_related(\"respondent\", \"instrument\")\n respondents = qs.values(\"response\", \"created_at\", \"ip_address\", \"respondent__id\", \"respondent__username\")\n return respondents\n\n def get_responses(self):\n responses = [respondent[\"response\"] for respondent in self.respondents]\n return responses\n\n def get_table_header(self):\n # Build Response Table Header for Tabulator\n headers = [\n {\"title\": _(\"Question {}\").format(question[\"order\"]), \"field\": str(question[\"id\"])}\n for question in self.questions\n ]\n return headers\n\n def get_question_analizer(self, question):\n analizer_class = QUESTION_ANALIZERS[question[\"question_type\"]]\n return analizer_class(question, self)\n\n def get_questions_report(self):\n questions_reports = list()\n for question in self.questions:\n question_analizer = self.get_question_analizer(question)\n question_report = question_analizer.get_report()\n question_report_transpose = question_analizer.get_report_transpose()\n questions_reports.append(\n {\n \"id\": question[\"id\"],\n \"order\": question[\"order\"],\n \"text\": question[\"text\"],\n \"help_text\": question[\"help_text\"],\n \"question_type\": question[\"question_type\"],\n \"results\": question_report,\n \"results_transpose\": question_report_transpose,\n }\n )\n return questions_reports\n\n def get_report(self):\n report = {\n \"id\": self.instrument.id,\n \"name\": self.instrument.name,\n \"created_at\": self.instrument.created_at,\n \"expired_at\": self.instrument.expired_at,\n \"description\": self.instrument.description,\n \"unique\": self.instrument.unique,\n \"public\": self.instrument.public,\n \"max_respondent\": self.instrument.max_respondent,\n \"total_response\": self.total_responses,\n \"total_questions\": self.total_questions,\n \"completeness\": (self.total_responses / self.instrument.max_respondent) * 100,\n \"response_headers\": self.get_table_header(),\n \"response_list\": self.responses,\n \"questions_reports\": self.get_questions_report(),\n }\n return report\n\n\ninstrument = Instrument.objects.get(pk='a86f5c2d-2fe5-4423-8f8c-0ee6c67cdb09')\nanalizer = InstrumentAnalizer(instrument)\nanalizer.get_report()",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cbbb4c27bc72dab8c2c0905a4447a0202b8a7ceb
| 253,246 |
ipynb
|
Jupyter Notebook
|
Housing Prediction/housing EDA by Daye.ipynb
|
daye-oa/Data-Science-Projects
|
605fe78f78561725657295a7c0ba1600d8750019
|
[
"MIT"
] | null | null | null |
Housing Prediction/housing EDA by Daye.ipynb
|
daye-oa/Data-Science-Projects
|
605fe78f78561725657295a7c0ba1600d8750019
|
[
"MIT"
] | null | null | null |
Housing Prediction/housing EDA by Daye.ipynb
|
daye-oa/Data-Science-Projects
|
605fe78f78561725657295a7c0ba1600d8750019
|
[
"MIT"
] | 2 |
2020-09-30T13:40:01.000Z
|
2021-06-21T21:00:19.000Z
| 152.649789 | 202,080 | 0.861297 |
[
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns",
"_____no_output_____"
],
[
"hs_d = pd.read_csv('Housing_data.csv')",
"_____no_output_____"
],
[
"hs_d.head()",
"_____no_output_____"
]
],
[
[
"# Exploratory Data Analysis",
"_____no_output_____"
]
],
[
[
"hs_d.isnull().sum()",
"_____no_output_____"
],
[
"hs_d.nunique()",
"_____no_output_____"
]
],
[
[
"# The Cateforical features for the Dataset",
"_____no_output_____"
]
],
[
[
"for i in hs_d.columns:\n if hs_d[i].nunique() <= 10:\n print ( 'count value for each category in each feature: {}'.format(hs_d.groupby(i)[i].count()), '\\n')\n\nprint ('The number of Categorical feautures: {}'.format(len(hs_d.columns)))",
"count value for each category in each feature: bedrooms\n1.0 855\n2.0 941\n3.0 1444\n4.0 872\n5.0 265\nName: bedrooms, dtype: int64 \n\ncount value for each category in each feature: bathrooms\n0.0 457\n1.0 745\n2.0 803\n3.0 1252\n4.0 792\n5.0 300\n6.0 28\nName: bathrooms, dtype: int64 \n\ncount value for each category in each feature: toilets\n0.0 465\n1.0 622\n2.0 477\n3.0 868\n4.0 1075\n5.0 662\n6.0 199\n7.0 9\nName: toilets, dtype: int64 \n\ncount value for each category in each feature: estate_flag\n0.0 1802\n1.0 2575\nName: estate_flag, dtype: int64 \n\ncount value for each category in each feature: terrace_flag\n0.0 3049\n1.0 1328\nName: terrace_flag, dtype: int64 \n\ncount value for each category in each feature: new_flag\n0.0 2038\n1.0 2339\nName: new_flag, dtype: int64 \n\ncount value for each category in each feature: serviced_flag\n0.0 2602\n1.0 1775\nName: serviced_flag, dtype: int64 \n\ncount value for each category in each feature: exec_flag\n1.0 1781\n2.0 1110\n3.0 707\n4.0 779\nName: exec_flag, dtype: int64 \n\ncount value for each category in each feature: location\najah 584\ngbagada 300\nikeja 332\nikorodu 138\nikoyi 212\niyanaipaja 25\nlekki 2098\nogba 193\nsurulere 193\nyaba 302\nName: location, dtype: int64 \n\nThe number of Categorical feautures: 17\n"
],
[
"hs_d.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 4377 entries, 0 to 4376\nData columns (total 17 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 price 4377 non-null float64\n 1 bedrooms 4377 non-null float64\n 2 bathrooms 4377 non-null float64\n 3 toilets 4377 non-null float64\n 4 estate_flag 4377 non-null float64\n 5 terrace_flag 4377 non-null float64\n 6 new_flag 4377 non-null float64\n 7 serviced_flag 4377 non-null float64\n 8 estate_price 4377 non-null float64\n 9 serviced_price 4377 non-null float64\n 10 location_rank 4377 non-null float64\n 11 new_price 4377 non-null float64\n 12 exec_flag 4377 non-null float64\n 13 locationbed 4377 non-null object \n 14 location 4377 non-null object \n 15 spec_location 4377 non-null object \n 16 description 4377 non-null object \ndtypes: float64(13), object(4)\nmemory usage: 581.4+ KB\n"
]
],
[
[
"# Correlation \n\nexamining the dpendent relationship with the numerical features of the dataset",
"_____no_output_____"
]
],
[
[
"corr_hsd = hs_d.corr()",
"_____no_output_____"
],
[
"corr_hsd\n#.groupby('price').sum()",
"_____no_output_____"
],
[
"sns.pairplot(corr_hsd)",
"_____no_output_____"
]
],
[
[
"# Using the correlated plot and values \n\n1. Finding out the features that have a strong positive correlation with each other\n2. ...............................................a strong negative correlation with each other \n\nFrom observation \n\nThe feature columns (estate price, serviced price, location rank, new price) have a high correlation with each other.\n\nThe feature columns (bedrooms, bathrooms, toilets) have a high correlation with each other.\n",
"_____no_output_____"
],
[
"Further realtions can be gottern individually to get a better understanding of the Data set ",
"_____no_output_____"
],
[
"# Possible models and tagert vairables to predict\n\nTarget variables\n1. Price of an Apartment (Regression)\n2. Location of an Apartment (Classfication)\n3. Using the Description column to gauge NLP\n\nFeature Columns that should be dropped due to lack of correlation or irrelevance for Regression and Classification \n\n1. Description\n2. Specific Location\n3. Location Bed\n4. Serviced price or New price",
"_____no_output_____"
],
[
"# Preprocessing \n\n",
"_____no_output_____"
]
],
[
[
"hs_d.drop(['locationbed', 'spec_location', 'description', 'serviced_price'], axis = 1, inplace = True)",
"_____no_output_____"
],
[
"from sklearn.preprocessing import LabelEncoder\n\nlbl = LabelEncoder()\n\nhs_d.location = lbl.fit_transform(hs_d.location)",
"_____no_output_____"
],
[
"from sklearn.preprocessing import StandardScaler\n\nscl = StandardScaler()\n\ns_hsd = pd.DataFrame( scl.fit_transform(hs_d), columns = hs_d.columns)",
"_____no_output_____"
]
],
[
[
"\n# Prediction Model ",
"_____no_output_____"
]
],
[
[
"X = s_hsd.drop(['price'], axis = 1)\ny = s_hsd['price']\n#y = np.array(s_hsd['price']).reshape(-1,1)",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\n\nx_train, x_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 100)",
"_____no_output_____"
],
[
"print (x_train.shape, x_test.shape, y_train.shape, y_test.shape)",
"(3063, 12) (1314, 12) (3063,) (1314,)\n"
]
],
[
[
"# Models to use\n1. ElasticNET\n2. Decisions Tree\n3. RandomForest ",
"_____no_output_____"
]
],
[
[
"from sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn import metrics\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn import model_selection\nfrom sklearn.pipeline import Pipeline",
"_____no_output_____"
],
[
"elas = ElasticNet()\npipe = Pipeline\nrandf = RandomForestRegressor()\ndcr = DecisionTreeRegressor()\ngrd = GridSearchCV",
"_____no_output_____"
],
[
"elas_param = {'alpha' : np.arange(0.01, 0.1, 0.01)}\n\nelas_grd = grd(elas, param_grid = elas_param, cv = 10, n_jobs = -1, verbose = True, scoring = 'r2')",
"_____no_output_____"
],
[
"elas_grd.fit(x_train, y_train)",
"Fitting 10 folds for each of 9 candidates, totalling 90 fits\n"
],
[
"randf_param = {'max_depth' : np.arange(100, 500, 50), 'min_samples_leaf': np.arange(10,50,5)}\nrandf_grd = grd(randf, randf_param, cv = 5, n_jobs = -1,verbose = True, scoring = 'r2')",
"_____no_output_____"
],
[
"randf_grd.fit(x_train, y_train)",
"Fitting 5 folds for each of 64 candidates, totalling 320 fits\n"
],
[
"dcr_param = {'min_samples_split' : np.arange(1,10,1),'min_samples_leaf': np.arange(10,50,5), 'max_depth' : np.arange(10,50,10)}\ndcr_grd = grd(dcr, dcr_param, cv = 5, n_jobs = -1,verbose = True, scoring = 'r2')\n",
"_____no_output_____"
],
[
"dcr_grd.fit(x_train, y_train)",
"Fitting 5 folds for each of 288 candidates, totalling 1440 fits\n"
],
[
"models = [('rand forest', randf_grd.best_estimator_), ('elastice net', elas_grd.best_estimator_), ('decision trees', dcr_grd.best_estimator_)]\noutcome = []\nmodel_names = []\n\n\nfor model_name, model in models:\n k_fold = model_selection.KFold (n_splits = 10, shuffle = True, random_state = 100)\n results = model_selection.cross_val_score(model, x_train, y_train, cv=k_fold, scoring = 'r2')\n outcome.append(results)\n model_names.append(model_name)\n output_message = \"%s| Mean=%f STD=%f\" % (model_name, results.mean(), results.std())\n print (output_message)\n",
"rand forest| Mean=0.921948 STD=0.013495\nelastice net| Mean=0.816061 STD=0.018882\ndecision trees| Mean=0.917857 STD=0.013715\n"
]
],
[
[
"# Classification Model",
"_____no_output_____"
],
[
"# Preprocessing ",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import OneHotEncoder",
"_____no_output_____"
],
[
"#lbl.inverse_transform(s_hsd.location)",
"_____no_output_____"
],
[
"x1= s_hsd.drop('location', axis = 1)\ny1= hs_d['location']",
"_____no_output_____"
]
],
[
[
"# Models to use\n\n1. RandomForestClasssfier\n2. K Nearest Nieghbours\n3. Decison trees Classsifer\n4. Extra tree classifier \n",
"_____no_output_____"
]
],
[
[
"from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import confusion_matrix, classification_report",
"_____no_output_____"
],
[
"dcc = DecisionTreeClassifier(random_state = 100)\nextr = ExtraTreeClassifier(random_state = 100)\nknn = KNeighborsClassifier()\nranfc = RandomForestClassifier(random_state = 100)",
"_____no_output_____"
],
[
"x_tr, x_te, y_tr, y_te = train_test_split(x1, y1, test_size = 0.3, random_state = 100)",
"_____no_output_____"
],
[
"import sklearn\nclass_outcome = []\nclass_model_names= []\n\nclass_models = [('Decison Tree Classifier', dcc), ('Extra Tree Classifier', extr), ('K Nearest', knn), ('Random Forest Classifier', ranfc)]\n\n\nfor model_name, model in class_models:\n k_fold = sklearn.model_selection.KFold (n_splits = 10, shuffle = True , random_state = 100)\n results = sklearn.model_selection.cross_val_score(model, x1, y1, cv = k_fold, scoring = 'f1_macro')\n class_outcome.append(results)\n class_model_names.append(model_name)\n \n output_message = ' {} {}'.format(model_name, results.mean())\n print(output_message)",
" Decison Tree Classifier 0.9586769073082886\n Extra Tree Classifier 0.934637356142425\n K Nearest 0.6855793743392173\n Random Forest Classifier 0.9531455841638886\n"
],
[
"dcc_param = {'min_samples_split' : np.arange(0,10,2),'min_samples_leaf': np.arange(0,10,1), 'max_depth' : np.arange(5,15,1) }\ndcc_grd = grd(dcc, dcc_param, cv = 5, n_jobs = -1, verbose = True, scoring = 'f1_macro')\n\ndcc_grd.fit(x1, y1)",
"Fitting 5 folds for each of 500 candidates, totalling 2500 fits\n"
],
[
"dcc_grd.best_estimator_",
"_____no_output_____"
],
[
"extr_params = {'min_samples_split' : np.arange(0,6,1),'min_samples_leaf': np.arange(0,10,1), 'max_depth' : np.arange(16,26,2)}\nextr_grd = grd(extr, extr_params, cv = 5, n_jobs = -1, verbose = True, scoring = 'f1_macro')\n\nextr_grd.fit(x1, y1)",
"Fitting 5 folds for each of 300 candidates, totalling 1500 fits\n"
],
[
"extr_grd.best_estimator_",
"_____no_output_____"
],
[
"knn_params = {'n_neighbors' : np.arange(1,10,1), 'weights' : ['uniform', 'distance']}\nknn_grd = grd(knn, knn_params, cv = 5, n_jobs = -1, verbose = True, scoring = 'f1_macro')\n\nknn_grd.fit(x1, y1)",
"Fitting 5 folds for each of 18 candidates, totalling 90 fits\n"
],
[
"y_pred = knn_grd.predict(x_te)\n\nprint(classification_report(y_te, y_pred, digits = 4, target_names= list(lbl.inverse_transform(list(np.arange(0,10,1))))))",
" precision recall f1-score support\n\n ajah 0.8314 0.8563 0.8437 167\n gbagada 0.8140 0.8537 0.8333 82\n ikeja 0.8941 0.6909 0.7795 110\n ikorodu 0.9118 0.8378 0.8732 37\n ikoyi 0.9206 0.9355 0.9280 62\n iyanaipaja 0.5000 0.1250 0.2000 8\n lekki 0.9468 0.9858 0.9659 632\n ogba 0.8070 0.9200 0.8598 50\n surulere 0.7708 0.5781 0.6607 64\n yaba 0.7339 0.7843 0.7583 102\n\n accuracy 0.8866 1314\n macro avg 0.8130 0.7567 0.7702 1314\nweighted avg 0.8841 0.8866 0.8824 1314\n\n"
],
[
"knn_grd.best_estimator_",
"_____no_output_____"
],
[
"ranfc_params = {'min_samples_split' : np.arange(0,10,1),'min_samples_leaf': np.arange(0,10,1), 'max_depth' : np.arange(20,40,10), 'n_estimators' : np.arange(30,70,10)}\nranfc_grd =grd(ranfc, ranfc_params, cv = 2, n_jobs = 3 , verbose = True, scoring = 'f1_macro')\n\nranfc_grd.fit(x1, y1)",
"Fitting 2 folds for each of 800 candidates, totalling 1600 fits\n"
],
[
"ranfc_grd.best_estimator_",
"_____no_output_____"
],
[
"class_outcome_one = []\nclass_model_names_one = []\n\nclass_models = [('Decison Tree Classifier', dcc_grd.best_estimator_),\n ('Extra Tree Classifier', extr_grd.best_estimator_),\n ('K Nearest', knn_grd.best_estimator_),\n ('Random Forest Classifier', ranfc_grd.best_estimator_)]\n\n\nfor model_name, model in class_models:\n k_fold = sklearn.model_selection.KFold (n_splits = 10, shuffle = True , random_state = 100)\n results = sklearn.model_selection.cross_val_score(model, x1, y1, cv = k_fold, scoring = 'f1_macro')\n class_outcome_one.append(results)\n class_model_names_one.append(model_name)\n \n output_message = ' {} {}'.format(model_name, results.mean())\n print(output_message)",
" Decison Tree Classifier 0.9625395453238372\n Extra Tree Classifier 0.9379588686624192\n K Nearest 0.8032979726023909\n Random Forest Classifier 0.9537808125779526\n"
]
],
[
[
"# Working with Piplines",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
cbbb53351f3fbce8c309e3dd68d4769159c2a481
| 9,914 |
ipynb
|
Jupyter Notebook
|
VC_Investment.ipynb
|
csd-oss/vc-investmemt
|
158b1788102ecf0c7a56647b4e21c0bccf9b6d6e
|
[
"MIT"
] | 1 |
2020-04-08T15:15:50.000Z
|
2020-04-08T15:15:50.000Z
|
VC_Investment.ipynb
|
csd-oss/vc-investmemt
|
158b1788102ecf0c7a56647b4e21c0bccf9b6d6e
|
[
"MIT"
] | 6 |
2020-04-06T19:25:07.000Z
|
2020-04-09T21:16:46.000Z
|
VC_Investment.ipynb
|
csd-oss/vc-investmemt
|
158b1788102ecf0c7a56647b4e21c0bccf9b6d6e
|
[
"MIT"
] | null | null | null | 27.538889 | 233 | 0.468428 |
[
[
[
"<a href=\"https://colab.research.google.com/github/csd-oss/vc-investmemt/blob/master/VC_Investment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# General preparation and GDrive conection",
"_____no_output_____"
]
],
[
[
"import pandas as pd \nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"# Uploiding info from [OECD](https://stats.oecd.org/Index.aspx?DataSetCode=VC_INVEST#)",
"_____no_output_____"
]
],
[
[
"vc_path = \"https://raw.githubusercontent.com/csd-oss/vc-investmemt/master/VC_INVEST_06042020205501847.csv\"\ndf = pd.read_csv(vc_path)\ndf",
"_____no_output_____"
]
],
[
[
"## Droping all not needed info",
"_____no_output_____"
]
],
[
[
"df = df.drop(columns=[\"Reference Period Code\",\"Reference Period\",\"Flag Codes\",\"Flags\",\"SUBJECT\",\"Measure\",\"Unit\",\"Year\",\"Subject\",\"SUBJECT\",\"Development stages\"])\ndf",
"_____no_output_____"
]
],
[
[
"## Deviding data into 2 dataframes",
"_____no_output_____"
]
],
[
[
"df_usd = df.query('MEASURE == \"USD_V\"')\ndf_gdp = df.query('MEASURE == \"SH_GDP\"')",
"_____no_output_____"
]
],
[
[
"# Playing with USD data",
"_____no_output_____"
],
[
"## Creating filters",
"_____no_output_____"
]
],
[
[
"filt_total_us = (df_usd['STAGES'] == \"VC_T\") & (df_usd['LOCATION']== \"USA\")\nfilt_seed_us = (df_usd['STAGES'] == \"SEED\") & (df_usd['LOCATION']== \"USA\")\nfilt_start_us = (df_usd['STAGES'] == \"START\") & (df_usd['LOCATION']== \"USA\")\nfilt_later_us = (df_usd['STAGES'] == \"LATER\") & (df_usd['LOCATION']== \"USA\")",
"_____no_output_____"
]
],
[
[
"## Ploting US VC data ",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()\nplt.style.use(\"ggplot\")\nax.plot(df_usd.loc[filt_total_us].TIME, df_usd.loc[filt_total_us].Value, label = \"Total\")\nax.plot(df_usd.loc[filt_seed_us].TIME, df_usd.loc[filt_seed_us].Value, label = \"Seed\")\nax.plot(df_usd.loc[filt_start_us].TIME, df_usd.loc[filt_start_us].Value, label = \"Series A\")\nax.plot(df_usd.loc[filt_later_us].TIME, df_usd.loc[filt_later_us].Value, label = \"Later Stages\")\n\nax.set_xlabel(\"Years\")\nax.set_label(\"Millions US$\")\nax.set_title(\"USA VC investment\")\nax.grid(True)\nax.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Playing with GDP data",
"_____no_output_____"
]
],
[
[
"filt_total_us = (df_gdp['STAGES'] == \"VC_T\") & (df_gdp['LOCATION']== \"USA\")\nfilt_seed_us = (df_gdp['STAGES'] == \"SEED\") & (df_gdp['LOCATION']== \"USA\")\nfilt_start_us = (df_gdp['STAGES'] == \"START\") & (df_gdp['LOCATION']== \"USA\")\nfilt_later_us = (df_gdp['STAGES'] == \"LATER\") & (df_gdp['LOCATION']== \"USA\")",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nplt.style.use(\"ggplot\")\nax.plot(df_gdp.loc[filt_total_us].TIME, df_gdp.loc[filt_total_us].Value, label = \"Total\")\nax.plot(df_gdp.loc[filt_seed_us].TIME, df_gdp.loc[filt_seed_us].Value, label = \"Seed\")\nax.plot(df_gdp.loc[filt_start_us].TIME, df_gdp.loc[filt_start_us].Value, label = \"Series A\")\nax.plot(df_gdp.loc[filt_later_us].TIME, df_gdp.loc[filt_later_us].Value, label = \"Later Stages\")\n\nax.set_xlabel(\"Years\")\nax.set_label(\"%GDP\")\nax.set_title(\"USA VC investment\")\nax.grid(True)\nax.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Countries Yearly Sum",
"_____no_output_____"
]
],
[
[
"filt_total = (df_usd['STAGES'] == \"VC_T\") & (df_usd['TIME'] >= 2007) #not enoght data till 2007\ndf_usd[filt_total].groupby(['TIME'])['Value'].sum().plot()\nplt.title('Total investment')\nplt.ylabel('Millions USD')\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Countries Yearly mean GDP Share",
"_____no_output_____"
]
],
[
[
"filt_total = (df_gdp['STAGES'] == \"VC_T\") & (df_gdp['TIME'] >= 2007) #not enoght data till 2007\ndf_gdp[filt_total].groupby(['TIME'])['Value'].mean().plot()\nplt.title('Total investment')\nplt.ylabel('% GDP')\nplt.show()",
"_____no_output_____"
]
],
[
[
"# 2018 Pie Chart creation",
"_____no_output_____"
]
],
[
[
"filt_total_2018 = (df_usd['TIME']==2018)&(df_usd['STAGES']=='VC_T') \nfilt_other = df_usd['Value'] > 2185.094678\npie_2018 = df_usd[filt_total_2018 & filt_other]\npie_2018.drop(columns=['STAGES','MEASURE','TIME','Unit Code','PowerCode Code','PowerCode'], inplace=True)\npie_2018",
"_____no_output_____"
],
[
"pie_2018.loc[1]=['OTH', 'Other', df_usd[filt_total_2018 & ~filt_other]['Value'].sum()]\npie_2018",
"_____no_output_____"
],
[
"expl = [0,0.1,0]\nplt.figure(figsize=(40,10))\nplt.pie(pie_2018['Value'], explode=expl)\nplt.legend(pie_2018['Country'],fontsize='11',loc='best')\n# plt.style.use('qqplot')\nplt.title('2018 Total Investment',fontdict={'fontsize':'20'},loc='left')\nplt.show()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cbbb85a3628548a69dad37c71cfedba1658f4f18
| 15,091 |
ipynb
|
Jupyter Notebook
|
someFunctionsAndListComprehensions.ipynb
|
ba-13/PythonBasics
|
ff3f024bdf660b7578e449f508ea8b8d42090263
|
[
"Apache-2.0"
] | null | null | null |
someFunctionsAndListComprehensions.ipynb
|
ba-13/PythonBasics
|
ff3f024bdf660b7578e449f508ea8b8d42090263
|
[
"Apache-2.0"
] | null | null | null |
someFunctionsAndListComprehensions.ipynb
|
ba-13/PythonBasics
|
ff3f024bdf660b7578e449f508ea8b8d42090263
|
[
"Apache-2.0"
] | null | null | null | 18.7699 | 383 | 0.446955 |
[
[
[
"mylist = [1,2,3,4]",
"_____no_output_____"
],
[
"for n in range(5):\n print(n);",
"0\n1\n2\n3\n4\n"
],
[
"for n in range(3,15):\n print(n);",
"3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n"
],
[
"for n in range(2,15,3):\n print(n);",
"2\n5\n8\n11\n14\n"
],
[
"range(7,21,6) #is a generator",
"_____no_output_____"
],
[
"list(range(7,21,6))",
"_____no_output_____"
],
[
"index_count = 0;\n# for letter in 'abcde':\n# print(f'At index {index_count} the letter is {letter}')\n# index_count += index_count+1\nfor letter in 'abcde':\n print('At index {}, the letter is {}'.format(index_count,letter))\n index_count += 1\n#we have a counter built-in in python, for enumeration.",
"At index 0, the letter is a\nAt index 1, the letter is b\nAt index 2, the letter is c\nAt index 3, the letter is d\nAt index 4, the letter is e\n"
],
[
"word = 'Focus';\nfor item in enumerate(word):\n print(item)",
"(0, 'F')\n(1, 'o')\n(2, 'c')\n(3, 'u')\n(4, 's')\n"
],
[
"word = 'Focus';\nfor index,letter in enumerate(word):\n print(index, '\\t', letter, '\\n')",
"0 \t F \n\n1 \t o \n\n2 \t c \n\n3 \t u \n\n4 \t s \n\n"
],
[
"#zip function will combine different lists into an n-tuple.\nlist1 = [0, 1, 2, 3, 4];\nlist2 = ['a', 'b', 'c', 'd', 'e'];\nlist3 = ['a', 'e', 'i', 'o', 'u', 'j']; #the extra terms aren't included in the zipped list.\ncombinedList = zip(list1, list2, list3);\nfor item in combinedList:\n print(item)",
"(0, 'a', 'a')\n(1, 'b', 'e')\n(2, 'c', 'i')\n(3, 'd', 'o')\n(4, 'e', 'u')\n"
],
[
"list(zip(list1,list2))",
"_____no_output_____"
],
[
"'bete' in ['mauj', 'krdi'] #in operator is used to check if an element is in a list or strings or dictionaries",
"_____no_output_____"
],
[
"dict1 = {1:'MTH', 2:'PHY'}",
"_____no_output_____"
],
[
"dict1.items()",
"_____no_output_____"
],
[
"dict1.values()",
"_____no_output_____"
],
[
"list(dict1.values())",
"_____no_output_____"
],
[
"print('MTH' in dict1.values())\nprint(3 in dict1)",
"True\nFalse\n"
],
[
"mylist = [1,2,3,4,5,100]",
"_____no_output_____"
],
[
"print(min(mylist))\nprint(max(mylist))",
"1\n100\n"
],
[
"from random import shuffle",
"_____no_output_____"
],
[
"mylist2 = [1,2,3,4,5,6,7]",
"_____no_output_____"
],
[
"shuffle(mylist2) #doesn't return a value, but directly modifies the list, inplace function",
"_____no_output_____"
],
[
"mylist2",
"_____no_output_____"
],
[
"from random import randint",
"_____no_output_____"
],
[
"mini = 1\nmaxi = 10\nmyRandInt = randint(mini, maxi) #returning a value",
"_____no_output_____"
],
[
"myRandInt",
"_____no_output_____"
],
[
"InputNumber = input('Enter the number you want: ') #saved as string type",
"Enter the number you want: 13\n"
],
[
"type(InputNumber)",
"_____no_output_____"
],
[
"# float(InputNumber)\nint(InputNumber)",
"_____no_output_____"
],
[
"numberByConversion = int(input('Enter the number that would be converted to an int directly: '))",
"Enter the number that would be converted to an int directly: 20\n"
],
[
"type(numberByConversion)",
"_____no_output_____"
]
],
[
[
"# LIST COMPREHENSION",
"_____no_output_____"
]
],
[
[
"mystring = 'HelloWorld'\nmylist = []\n\nfor letter in mystring:\n mylist.append(letter) #we used this for loop to make a list out of an iterable object.\n \nprint(mylist)",
"['H', 'e', 'l', 'l', 'o', 'W', 'o', 'r', 'l', 'd']\n"
],
[
"mylist2 = [gfgdfdg for gfgdfdg in mystring] #flattened out for loop\nprint(mylist2)",
"['H', 'e', 'l', 'l', 'o', 'W', 'o', 'r', 'l', 'd']\n"
],
[
"n = 2\nmylist3 = [n for letter in mystring] #just putting the element, the same number of times as the length of the iterable object.\nprint(mylist3)",
"[2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n"
],
[
"mylist4 = [x**3 for x in range(1,10)]\nmylist4",
"_____no_output_____"
],
[
"mylist5 = [x%15 for x in range(1,30) if x%4 == 0]\nprint(mylist5)",
"[4, 8, 12, 1, 5, 9, 13]\n"
],
[
"celcius = [0,10,38.5,64]\nfahrenheit = [((9/5)*temp + 32) for temp in celcius if temp < 60] #one-liners should be used uptil the point readability is ok.\nfahrenheit",
"_____no_output_____"
],
[
"mylist6 = [x/2.5 if x%2==0 else 'ODD' for x in range(15)]\nprint(mylist6)",
"[0.0, 'ODD', 0.8, 'ODD', 1.6, 'ODD', 2.4, 'ODD', 3.2, 'ODD', 4.0, 'ODD', 4.8, 'ODD', 5.6]\n"
],
[
"mylist = []\nfor x in [2,3,4]:\n for y in [1,10,100]:\n mylist.append(x*y);\n# print(mylist)\nprint(mylist)",
"[2, 20, 200, 3, 30, 300, 4, 40, 400]\n"
],
[
"mylistxy = [x*y for x in [2,4,6] for y in [1,10,100]]\nprint(mylistxy)",
"[2, 20, 200, 4, 40, 400, 6, 60, 600]\n"
],
[
"mylist7 = [x if x>2 for x in range(1,10,2) ] #note how this doesn't work. Compare with the above if as well as if-else.\nprint(mylist7)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbbb9229803ad345d08712ee2f5d74a6e15983dd
| 26,292 |
ipynb
|
Jupyter Notebook
|
Ranger-Mish-ImageWoof-Training.ipynb
|
lgvaz/Ranger-Mish-ImageWoof-5
|
b0aa73508870de072329d058f0add165da462d6d
|
[
"Apache-2.0"
] | 119 |
2019-08-29T07:38:55.000Z
|
2021-11-11T13:24:24.000Z
|
Ranger-Mish-ImageWoof-Training.ipynb
|
lgvaz/Ranger-Mish-ImageWoof-5
|
b0aa73508870de072329d058f0add165da462d6d
|
[
"Apache-2.0"
] | 7 |
2019-08-29T04:42:40.000Z
|
2020-10-23T10:37:35.000Z
|
Ranger-Mish-ImageWoof-Training.ipynb
|
lgvaz/Ranger-Mish-ImageWoof-5
|
b0aa73508870de072329d058f0add165da462d6d
|
[
"Apache-2.0"
] | 28 |
2019-08-29T04:30:40.000Z
|
2021-08-07T04:09:56.000Z
| 28.2103 | 136 | 0.366119 |
[
[
[
"#Below is the training settings used for ImageWoof 5 epoch, 128px. You can kick off a run in the very next cell.\n#There are some other sample runs showing how some of the settings were adjusted...if you find better settings please let us know!",
"_____no_output_____"
],
[
"#Run this to reproduce the results of our entry:\n%run train.py --run 5 --woof 1 --size 128 --bs 64 --mixup 0 --epoch 5 --lr 4e-3 --gpu 0 \\\n --opt ranger --mom .95 --sched_type flat_and_anneal --ann_start 0.72",
"lr: 0.004; eff_lr: 0.004; size: 128; alpha: 0.99; mom: 0.95; eps: 1e-06\n/home/ubuntu/.fastai/data/imagewoof\n698 annealing start\nUsing Ranger Optimizer...\nUsing Ranger Optimizer...\n"
],
[
"#Sample run below...",
"_____no_output_____"
],
[
"%run train.py --run 5 --woof 1 --size 128 --bs 64 --mixup 0 --epoch 5 --lr 4e-3 --gpu 0 \\\n --opt ranger --mom .95 --sched_type flat_and_anneal --ann_start 0.72",
"lr: 0.004; eff_lr: 0.004; size: 128; alpha: 0.99; mom: 0.95; eps: 1e-06\n/home/ubuntu/.fastai/data/imagewoof\n698 annealing start\nUsing Ranger Optimizer...\nUsing Ranger Optimizer...\n"
],
[
"#possible ideas to test.....\n%run train.py --run 5 --woof 1 --size 128 --bs 64 --mixup 0 --epoch 20 --lr 3e-3 --gpu 0 \\\n --opt ranger --sched_type flat_and_anneal --ann_start 0.7",
"_____no_output_____"
],
[
"%run train.py --run 5 --woof 1 --size 128 --bs 64 --mixup 0 --epoch 20 --lr 5e-3 --gpu 0 \\\n --opt ranger --sched_type flat_and_anneal --ann_start 0.7",
"_____no_output_____"
],
[
"%run train.py --run 5 --woof 1 --size 128 --bs 64 --mixup 0 --epoch 20 --lr 8e-3 --gpu 0 \\\n --opt ranger --sched_type flat_and_anneal --ann_start 0.7",
"_____no_output_____"
],
[
"%run train.py --run 2 --woof 1 --size 128 --bs 64 --mixup 0 --epoch 5 --lr 2e-3 --gpu 0 \\\n --opt adam --mom .95 --alpha .98 --sched_type flat_and_anneal --ann_start 0.5",
"_____no_output_____"
],
[
"%run train.py --run 2 --woof 1 --size 128 --bs 64 --mixup 0 --sa 0 --epoch 80 --lr 8e-2 --gpu 0 \\\n --opt Adams --mom .95 --alpha .98 --sched_type flat_and_anneal --ann_start 0.3",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbbba5464e6c95a41def3cc0f305c37438051961
| 33,205 |
ipynb
|
Jupyter Notebook
|
Classification/Decision_Tree.ipynb
|
mahmoudmuawad/scikit-learn_Common_Practice
|
72c8b606c36e98452fbdfd5796fc00c8778d1148
|
[
"MIT"
] | 1 |
2022-02-20T14:40:51.000Z
|
2022-02-20T14:40:51.000Z
|
Classification/Decision_Tree.ipynb
|
mahmoudmuawad/scikit-learn_Common_Practice
|
72c8b606c36e98452fbdfd5796fc00c8778d1148
|
[
"MIT"
] | null | null | null |
Classification/Decision_Tree.ipynb
|
mahmoudmuawad/scikit-learn_Common_Practice
|
72c8b606c36e98452fbdfd5796fc00c8778d1148
|
[
"MIT"
] | null | null | null | 99.416168 | 24,236 | 0.829574 |
[
[
[
"# Decision Tree\nis a classifiers which splits data into optimal splits to classify the points",
"_____no_output_____"
],
[
"#### Preprocessing",
"_____no_output_____"
]
],
[
[
"# importing libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"dataset = pd.read_csv('../datasets/Social_Network_Ads.csv')\nx = dataset.iloc[:,2:-1].values\ny = dataset.iloc[:,-1].values\ndataset.head()",
"_____no_output_____"
],
[
"# standrize the values\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nx = scaler.fit_transform(x)\n\n# split into train and test\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size =0.2, random_state=0)",
"/home/mahmoud/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py:590: DataConversionWarning: Data with input dtype int64 was converted to float64 by StandardScaler.\n warnings.warn(msg, DataConversionWarning)\n/home/mahmoud/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py:590: DataConversionWarning: Data with input dtype int64 was converted to float64 by StandardScaler.\n warnings.warn(msg, DataConversionWarning)\n"
]
],
[
[
"* Decision Tree model",
"_____no_output_____"
]
],
[
[
"from sklearn.tree import DecisionTreeClassifier\ndecision_TreeModel = DecisionTreeClassifier(random_state=0)\ndecision_TreeModel.fit(x_train, y_train)",
"_____no_output_____"
]
],
[
[
"* confusion matrix ",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, decision_TreeModel.predict(x_test))\nprint(decision_TreeModel.score(x_test, y_test))\npd.DataFrame(cm)",
"0.9\n"
]
],
[
[
"* Visualization",
"_____no_output_____"
]
],
[
[
"# Visualising the Training set results\nfrom matplotlib.colors import ListedColormap\nx_set, y_set = x_train, y_train\nx1, x2= np.meshgrid(np.arange(start = x_set[:, 0].min() - 1, stop = x_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = x_set[:, 1].min() - 1, stop = x_set[:, 1].max() + 1, step = 0.01))\nplt.contourf(x1, x2, decision_TreeModel.predict(np.array([x1.ravel(), x2.ravel()]).T).reshape(x1.shape))\nplt.xlim(x1.min(), x1.max())\nplt.ylim(x2.min(), x2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(x_set[y_set == j, 0], x_set[y_set == j, 1], label = j)\nplt.title('Decision Tree Classifxcation (Training set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated Salary')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cbbbb261392529476864b9325db894ec148c51ec
| 28,126 |
ipynb
|
Jupyter Notebook
|
examples/notebooks/01_Load_Pointcloud_no_colors.ipynb
|
victorjoos/pypcd
|
307cb7f4e6c32e8aaa2fa3f63f38802e58456be8
|
[
"BSD-3-Clause"
] | null | null | null |
examples/notebooks/01_Load_Pointcloud_no_colors.ipynb
|
victorjoos/pypcd
|
307cb7f4e6c32e8aaa2fa3f63f38802e58456be8
|
[
"BSD-3-Clause"
] | null | null | null |
examples/notebooks/01_Load_Pointcloud_no_colors.ipynb
|
victorjoos/pypcd
|
307cb7f4e6c32e8aaa2fa3f63f38802e58456be8
|
[
"BSD-3-Clause"
] | null | null | null | 76.222222 | 16,724 | 0.792541 |
[
[
[
"%matplotlib inline\n\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"# Install pypcd from this repository\nimport notebook_helper\n!{notebook_helper.get_install_cmd(quiet=True)}",
"_____no_output_____"
],
[
"import pypcd\nprint(pypcd.__version__)",
"0.1.1\n"
],
[
"# Intentionally pasting the example point cloud into this cell\n# so the reader can inspect the ascii file format\n#\n# Source: http://pointclouds.org/documentation/tutorials/pcd_file_format.php\n\npcd_string = \"\"\"# .PCD v.7 - Point Cloud Data file format\nVERSION .7\nFIELDS x y z rgb\nSIZE 4 4 4 4\nTYPE F F F F\nCOUNT 1 1 1 1\nWIDTH 213\nHEIGHT 1\nVIEWPOINT 0 0 0 1 0 0 0\nPOINTS 213\nDATA ascii\n0.93773 0.33763 0 4.2108e+06\n0.90805 0.35641 0 4.2108e+06\n0.81915 0.32 0 4.2108e+06\n0.97192 0.278 0 4.2108e+06\n0.944 0.29474 0 4.2108e+06\n0.98111 0.24247 0 4.2108e+06\n0.93655 0.26143 0 4.2108e+06\n0.91631 0.27442 0 4.2108e+06\n0.81921 0.29315 0 4.2108e+06\n0.90701 0.24109 0 4.2108e+06\n0.83239 0.23398 0 4.2108e+06\n0.99185 0.2116 0 4.2108e+06\n0.89264 0.21174 0 4.2108e+06\n0.85082 0.21212 0 4.2108e+06\n0.81044 0.32222 0 4.2108e+06\n0.74459 0.32192 0 4.2108e+06\n0.69927 0.32278 0 4.2108e+06\n0.8102 0.29315 0 4.2108e+06\n0.75504 0.29765 0 4.2108e+06\n0.8102 0.24399 0 4.2108e+06\n0.74995 0.24723 0 4.2108e+06\n0.68049 0.29768 0 4.2108e+06\n0.66509 0.29002 0 4.2108e+06\n0.69441 0.2526 0 4.2108e+06\n0.62807 0.22187 0 4.2108e+06\n0.58706 0.32199 0 4.2108e+06\n0.52125 0.31955 0 4.2108e+06\n0.49351 0.32282 0 4.2108e+06\n0.44313 0.32169 0 4.2108e+06\n0.58678 0.2929 0 4.2108e+06\n0.53436 0.29164 0 4.2108e+06\n0.59308 0.24134 0 4.2108e+06\n0.5357 0.2444 0 4.2108e+06\n0.50043 0.31235 0 4.2108e+06\n0.44107 0.29711 0 4.2108e+06\n0.50727 0.22193 0 4.2108e+06\n0.43957 0.23976 0 4.2108e+06\n0.8105 0.21112 0 4.2108e+06\n0.73555 0.2114 0 4.2108e+06\n0.69907 0.21082 0 4.2108e+06\n0.63327 0.21154 0 4.2108e+06\n0.59165 0.21201 0 4.2108e+06\n0.52477 0.21491 0 4.2108e+06\n0.49375 0.21006 0 4.2108e+06\n0.4384 0.19632 0 4.2108e+06\n0.43425 0.16052 0 4.2108e+06\n0.3787 0.32173 0 4.2108e+06\n0.33444 0.3216 0 4.2108e+06\n0.23815 0.32199 0 4.808e+06\n0.3788 0.29315 0 4.2108e+06\n0.33058 0.31073 0 4.2108e+06\n0.3788 0.24399 0 4.2108e+06\n0.30249 0.29189 0 4.2108e+06\n0.23492 0.29446 0 4.808e+06\n0.29465 0.24399 0 4.2108e+06\n0.23514 0.24172 0 4.808e+06\n0.18836 0.32277 0 4.808e+06\n0.15992 0.32176 0 4.808e+06\n0.08642 0.32181 0 4.808e+06\n0.039994 0.32283 0 4.808e+06\n0.20039 0.31211 0 4.808e+06\n0.1417 0.29506 0 4.808e+06\n0.20921 0.22332 0 4.808e+06\n0.13884 0.24227 0 4.808e+06\n0.085123 0.29441 0 4.808e+06\n0.048446 0.31279 0 4.808e+06\n0.086957 0.24399 0 4.808e+06\n0.3788 0.21189 0 4.2108e+06\n0.29465 0.19323 0 4.2108e+06\n0.23755 0.19348 0 4.808e+06\n0.29463 0.16054 0 4.2108e+06\n0.23776 0.16054 0 4.808e+06\n0.19016 0.21038 0 4.808e+06\n0.15704 0.21245 0 4.808e+06\n0.08678 0.21169 0 4.808e+06\n0.012746 0.32168 0 4.808e+06\n-0.075715 0.32095 0 4.808e+06\n-0.10622 0.32304 0 4.808e+06\n-0.16391 0.32118 0 4.808e+06\n0.00088411 0.29487 0 4.808e+06\n-0.057568 0.29457 0 4.808e+06\n-0.0034333 0.24399 0 4.808e+06\n-0.055185 0.24185 0 4.808e+06\n-0.10983 0.31352 0 4.808e+06\n-0.15082 0.29453 0 4.808e+06\n-0.11534 0.22049 0 4.808e+06\n-0.15155 0.24381 0 4.808e+06\n-0.1912 0.32173 0 4.808e+06\n-0.281 0.3185 0 4.808e+06\n-0.30791 0.32307 0 4.808e+06\n-0.33854 0.32148 0 4.808e+06\n-0.21248 0.29805 0 4.808e+06\n-0.26372 0.29905 0 4.808e+06\n-0.22562 0.24399 0 4.808e+06\n-0.25035 0.2371 0 4.808e+06\n-0.29941 0.31191 0 4.808e+06\n-0.35845 0.2954 0 4.808e+06\n-0.29231 0.22236 0 4.808e+06\n-0.36101 0.24172 0 4.808e+06\n-0.0034393 0.21129 0 4.808e+06\n-0.07306 0.21304 0 4.808e+06\n-0.10579 0.2099 0 4.808e+06\n-0.13642 0.21411 0 4.808e+06\n-0.22562 0.19323 0 4.808e+06\n-0.24439 0.19799 0 4.808e+06\n-0.22591 0.16041 0 4.808e+06\n-0.23466 0.16082 0 4.808e+06\n-0.3077 0.20998 0 4.808e+06\n-0.3413 0.21239 0 4.808e+06\n-0.40551 0.32178 0 4.2108e+06\n-0.50568 0.3218 0 4.2108e+06\n-0.41732 0.30844 0 4.2108e+06\n-0.44237 0.28859 0 4.2108e+06\n-0.41591 0.22004 0 4.2108e+06\n-0.44803 0.24236 0 4.2108e+06\n-0.50623 0.29315 0 4.2108e+06\n-0.50916 0.24296 0 4.2108e+06\n-0.57019 0.22334 0 4.2108e+06\n-0.59611 0.32199 0 4.2108e+06\n-0.65104 0.32199 0 4.2108e+06\n-0.72566 0.32129 0 4.2108e+06\n-0.75538 0.32301 0 4.2108e+06\n-0.59653 0.29315 0 4.2108e+06\n-0.65063 0.29315 0 4.2108e+06\n-0.59478 0.24245 0 4.2108e+06\n-0.65063 0.24399 0 4.2108e+06\n-0.70618 0.29525 0 4.2108e+06\n-0.76203 0.31284 0 4.2108e+06\n-0.70302 0.24183 0 4.2108e+06\n-0.77062 0.22133 0 4.2108e+06\n-0.41545 0.21099 0 4.2108e+06\n-0.45004 0.19812 0 4.2108e+06\n-0.4475 0.1673 0 4.2108e+06\n-0.52031 0.21236 0 4.2108e+06\n-0.55182 0.21045 0 4.2108e+06\n-0.5965 0.21131 0 4.2108e+06\n-0.65064 0.2113 0 4.2108e+06\n-0.72216 0.21286 0 4.2108e+06\n-0.7556 0.20987 0 4.2108e+06\n-0.78343 0.31973 0 4.2108e+06\n-0.87572 0.32111 0 4.2108e+06\n-0.90519 0.32263 0 4.2108e+06\n-0.95526 0.34127 0 4.2108e+06\n-0.79774 0.29271 0 4.2108e+06\n-0.85618 0.29497 0 4.2108e+06\n-0.79975 0.24326 0 4.2108e+06\n-0.8521 0.24246 0 4.2108e+06\n-0.91157 0.31224 0 4.2108e+06\n-0.95031 0.29572 0 4.2108e+06\n-0.92223 0.2213 0 4.2108e+06\n-0.94979 0.24354 0 4.2108e+06\n-0.78641 0.21505 0 4.2108e+06\n-0.87094 0.21237 0 4.2108e+06\n-0.90637 0.20934 0 4.2108e+06\n-0.93777 0.21481 0 4.2108e+06\n0.22244 -0.0296 0 4.808e+06\n0.2704 -0.078167 0 4.808e+06\n0.24416 -0.056883 0 4.808e+06\n0.27311 -0.10653 0 4.808e+06\n0.26172 -0.10653 0 4.808e+06\n0.2704 -0.1349 0 4.808e+06\n0.24428 -0.15599 0 4.808e+06\n0.19017 -0.025297 0 4.808e+06\n0.14248 -0.02428 0 4.808e+06\n0.19815 -0.037432 0 4.808e+06\n0.14248 -0.03515 0 4.808e+06\n0.093313 -0.02428 0 4.808e+06\n0.044144 -0.02428 0 4.808e+06\n0.093313 -0.03515 0 4.808e+06\n0.044144 -0.03515 0 4.808e+06\n0.21156 -0.17357 0 4.808e+06\n0.029114 -0.12594 0 4.2108e+06\n0.036583 -0.15619 0 4.2108e+06\n0.22446 -0.20514 0 4.808e+06\n0.2208 -0.2369 0 4.808e+06\n0.2129 -0.208 0 4.808e+06\n0.19316 -0.25672 0 4.808e+06\n0.14497 -0.27484 0 4.808e+06\n0.030167 -0.18748 0 4.2108e+06\n0.1021 -0.27453 0 4.808e+06\n0.1689 -0.2831 0 4.808e+06\n0.13875 -0.28647 0 4.808e+06\n0.086993 -0.29568 0 4.808e+06\n0.044924 -0.3154 0 4.808e+06\n-0.0066125 -0.02428 0 4.808e+06\n-0.057362 -0.02428 0 4.808e+06\n-0.0066125 -0.03515 0 4.808e+06\n-0.057362 -0.03515 0 4.808e+06\n-0.10653 -0.02428 0 4.808e+06\n-0.15266 -0.025282 0 4.808e+06\n-0.10653 -0.03515 0 4.808e+06\n-0.16036 -0.037257 0 4.808e+06\n0.0083286 -0.1259 0 4.2108e+06\n0.0007442 -0.15603 0 4.2108e+06\n-0.1741 -0.17381 0 4.808e+06\n-0.18502 -0.02954 0 4.808e+06\n-0.20707 -0.056403 0 4.808e+06\n-0.23348 -0.07764 0 4.808e+06\n-0.2244 -0.10653 0 4.808e+06\n-0.23604 -0.10652 0 4.808e+06\n-0.20734 -0.15641 0 4.808e+06\n-0.23348 -0.13542 0 4.808e+06\n0.0061083 -0.18729 0 4.2108e+06\n-0.066235 -0.27472 0 4.808e+06\n-0.17577 -0.20789 0 4.808e+06\n-0.10861 -0.27494 0 4.808e+06\n-0.15584 -0.25716 0 4.808e+06\n-0.0075775 -0.31546 0 4.808e+06\n-0.050817 -0.29595 0 4.808e+06\n-0.10306 -0.28653 0 4.808e+06\n-0.1319 -0.2831 0 4.808e+06\n-0.18716 -0.20571 0 4.808e+06\n-0.18369 -0.23729 0 4.808e+06\"\"\"",
"_____no_output_____"
],
[
"# load cloud.pcd for visualization\ncloud = pypcd.PointCloud.from_buffer(pcd_string)\n\n# set the size of pyplot charts\nplt.rcParams['figure.figsize'] = (14, 6)\n\n# plot the points of the columns x and y\nplt.scatter(cloud.pc_data['x'], -cloud.pc_data['y'])\n\n# scale the axis equally \nplt.axis('scaled');",
"_____no_output_____"
],
[
"cloud.get_metadata()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbbbc0c2deea23c6b606cb3b9536f0faa24ab493
| 2,902 |
ipynb
|
Jupyter Notebook
|
Code/RunCode.ipynb
|
SuihongSong/-GeoModeling_Unconditional_ProGAN
|
697f1e7ca0e6d5831145326ba920a2d7a7bf9701
|
[
"MIT"
] | 5 |
2020-06-21T05:54:24.000Z
|
2022-03-17T07:14:12.000Z
|
Code/RunCode.ipynb
|
SuihongSong/-GeoModeling_Unconditional_ProGAN
|
697f1e7ca0e6d5831145326ba920a2d7a7bf9701
|
[
"MIT"
] | null | null | null |
Code/RunCode.ipynb
|
SuihongSong/-GeoModeling_Unconditional_ProGAN
|
697f1e7ca0e6d5831145326ba920a2d7a7bf9701
|
[
"MIT"
] | 5 |
2020-09-08T18:12:49.000Z
|
2021-06-06T14:03:52.000Z
| 29.917526 | 181 | 0.620262 |
[
[
[
"%cd /home/users/suihong/1-Unconditional_Upload/",
"/home/users/suihong/1-Unconditional_Upload\n"
],
[
"% run train.py",
"Initializing TensorFlow...\nRunning util_scripts.evaluate_metrics_swd_distributions_training_trad_prog()...\nLogging output to /scratch/users/suihong/ProGAN_MultiChannel_Reusults_ConditionedtoMultiConditions_TF/881-Unconditional_trad/metric-swd_distri_training_trad_prog-40.txt\nStreaming data using dataset.TFRecordDataset...\nDataset shape = [1, 64, 64]\nDynamic range = [0, 255]\nLabel size = 0\nInitializing metrics.swd_distributions_training_trad_prog.API...\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
cbbbc7299cec9e3770640e58361d751d4d0ad25f
| 15,992 |
ipynb
|
Jupyter Notebook
|
aip2-dgs/other/reference.ipynb
|
nurseiit/comm-unist
|
e7a122c910bf12eddf5c0ffc2c666995b4989408
|
[
"MIT"
] | 4 |
2019-07-03T00:57:01.000Z
|
2020-12-11T23:06:11.000Z
|
aip2-dgs/other/reference.ipynb
|
nurseiit/comm-unist
|
e7a122c910bf12eddf5c0ffc2c666995b4989408
|
[
"MIT"
] | 1 |
2019-10-19T17:42:42.000Z
|
2019-10-19T17:42:42.000Z
|
aip2-dgs/other/reference.ipynb
|
nurseiit/comm-unist
|
e7a122c910bf12eddf5c0ffc2c666995b4989408
|
[
"MIT"
] | 1 |
2019-11-05T04:14:08.000Z
|
2019-11-05T04:14:08.000Z
| 32.048096 | 737 | 0.606116 |
[
[
[
"## Review Calculus using by Python ",
"_____no_output_____"
],
[
"Consider a sequence of n numbers $x_0, x_1, \\cdots x_{n-1}$. We will start our index at 0, to remain in accordance with Python/Numpy's index system. $x_0$ is the first number in the sequence, $x_1$ is the second number in the sequence, and so forth, so $x_j$ is the general $j+1$ number in the sequence. We will utilize this $j$ index in our summation notation. Suppose we were to calculate the sum of all $n$ of these numbers in the sequence. We write that this sum is equivalent to ",
"_____no_output_____"
],
[
"<script src=\"https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js\"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math.svg'><script type=\"application/vnd.jupyter-embedded-widgets\">{}</script>",
"_____no_output_____"
],
[
"Let's parse this equation. $\\Sigma$ is the summation sign, indicating that we are summing a sequence. $j$ is the index of summation that is being iterated over; $j$ is being used to subscript $x$. Then, we start our summation at 0 because the lower bound on our sum is set as $j=0$. The upper bound for our sum, or our stopping point is written as $n-1$, which is equivalent to writing $j=n-1$ on top of the $\\Sigma$ symbol. Then, $x_j$ indicates the quantitiy that we are summing. One way to think about this is in terms of a for-loop. This summation concept is equivalent to a for loop for all integers in the range from the lower bound to the upper bound, indexed into the sequence $x$. Then, the code for our previous sum is:",
"_____no_output_____"
],
[
"Then, you can run through a few more examples of summation notation.\n\nExample1:",
"_____no_output_____"
],
[
"<script src=\"https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js\"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math1.svg'><script type=\"application/vnd.jupyter-embedded-widgets\">{}</script>",
"_____no_output_____"
],
[
"Example2:",
"_____no_output_____"
],
[
"<script src=\"https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js\"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math2.svg'><script type=\"application/vnd.jupyter-embedded-widgets\">{}</script>",
"_____no_output_____"
],
[
"Example3:",
"_____no_output_____"
],
[
"<script src=\"https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js\"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math3.svg'><script type=\"application/vnd.jupyter-embedded-widgets\">{}</script>",
"_____no_output_____"
],
[
"Then, we can also sum over several different sequences. Consider the sequences A and B, where A consists of m values $a_0, a_1, \\cdots, a_{m-1}$ and B contains n values $b_0, b_1, \\cdots b_{n-1}$. Then, we can calculate the sum of the product of each value in A with each value in B (a sum over $m\\times n$ products). Because the $i$ index only appears in association with A, and the $j$ index with B, we can group these summations. ",
"_____no_output_____"
],
[
"<script src=\"https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js\"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math4.svg'><script type=\"application/vnd.jupyter-embedded-widgets\">{}</script>",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
],
[
"A = np.random.rand(5)",
"_____no_output_____"
],
[
"B = np.random.rand(7)",
"_____no_output_____"
]
],
[
[
"Note that the following does not hold ",
"_____no_output_____"
],
[
"<script src=\"https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js\"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math6.svg'><script type=\"application/vnd.jupyter-embedded-widgets\">{}</script>",
"_____no_output_____"
],
[
"because this effectively treats the index $i$ in the first term independently from the $i$ in the second term of the product. Notice that in the right side of the equation, we could have interchanged the index $i$ in the second summation with $j$, without changing any of the mathematics. The sum on the left represents the summation of $m$ terms of $a_{i}^2$, whereas the sum on the right represents the summation of $m \\times m$ terms - products between all possible pairs of A's terms.",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
],
[
"A = np.random.rand(5)",
"_____no_output_____"
]
],
[
[
"Note that typically when someone writes $\\sum_{i} x_i$, this is just the sum of all values in the sequence X, and is the same as writing $\\sum_{i=0}^{n-1} x_i$, where you'll usually know the value of n so you can still compute the sum.\n",
"_____no_output_____"
],
[
"## Matrices \n",
"_____no_output_____"
],
[
"Then suppose we have a m by n matrix that contains all products of the values in both sequence A and sequence B such that the matrix value of $M$ at index $(i,j)$ is $a_i \\cdot b_j$.",
"_____no_output_____"
],
[
"<script src=\"https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js\"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math5.svg'><script type=\"application/vnd.jupyter-embedded-widgets\">{}</script>",
"_____no_output_____"
],
[
"You can form this matrix in numpy via an \"outer product\": ",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
]
],
[
[
"You can use this matrix to visualize several possible uses of summations. For example, suppose $m=n$. Then, if we express the sequences A and B as column vectors, the dot product of the two vectors would be the sum of the diagonal of M, or $\\sum_{k=0}^{n-1} a_k \\cdot b_k$. Furthermore, we can take the sum of a row using the sum $\\sum_{i=0}^{n-1} a_k \\cdot b_i$ for the $k^{th}$ row of the matrix, and similarly take the sum of a column with $\\sum_{i=0}^{n-1} a_i \\cdot b_k$ for the $k^{th}$ column of the matrix.",
"_____no_output_____"
]
],
[
[
"k = 2",
"_____no_output_____"
],
[
"# sum over the kth column:",
"_____no_output_____"
],
[
"# sum over the kth row:",
"_____no_output_____"
]
],
[
[
"## Kronecker Delta",
"_____no_output_____"
],
[
"To make notation for working with summations (particularly in considering the matrix format) even simpler, we can use the Kronecker delta function, named after Prussian mathematician Leonard Kronecker. We use $\\delta_{i,j}$ to denote the Kronecker delta function, defined as: \\begin{equation} \\begin{cases} \\delta{i, j}= 0 & \\text{if } i \\neq j\\\n\\delta{i,j}=1 & \\text{if } i=j\n\\end{cases} \\end{equation}\n\nSee that a Kronecker-delta can \"collapse\" a sum. Let $j$ be an integer between 0 and $n-1$: ",
"_____no_output_____"
],
[
"<script src=\"https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js\"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math7.svg'><script type=\"application/vnd.jupyter-embedded-widgets\">{}</script>",
"_____no_output_____"
],
[
"See also that the identity matrix, $I$, can be written as $I_{i,j}=\\delta_{i,j}$.",
"_____no_output_____"
],
[
"<script src=\"https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js\"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math8.svg'><script type=\"application/vnd.jupyter-embedded-widgets\">{}</script>",
"_____no_output_____"
],
[
"As an exercise, write a python function that behaves as a kronecker delta, and include it in a for-loop that is computing a sum. Verify that it does indeed collapse the sum.",
"_____no_output_____"
],
[
"## Partial Derivatives",
"_____no_output_____"
],
[
"Partial derivatives are used in multivariable functions, in which we essentially derive with respect to one of these variables and treat the remaining variables as constants. For example: ",
"_____no_output_____"
],
[
"$$ f(x,y) = 6 x^2 y^3 $$ $$ \\frac{\\partial}{\\partial x} f(x,y) = 12x y^3 $$ $$ \\frac{\\partial}{\\partial y} f(x,y) = 18 x^2 y^2 $$\n\nSo, what if we want to take a partial derivative of a sum?\n\nSuppose that we have two vectors containing variables:",
"_____no_output_____"
],
[
"<script src=\"https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js\"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math9.svg'><script type=\"application/vnd.jupyter-embedded-widgets\">{}</script>",
"_____no_output_____"
],
[
"<script src=\"https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js\"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math10.svg'><script type=\"application/vnd.jupyter-embedded-widgets\">{}</script>",
"_____no_output_____"
],
[
"Then, in our previous section we saw that $\\vec{x} \\cdot \\vec{y} = \\sum_{i=0}^{n-1} x_i \\cdot y_i$. What happens when we take the partial derivative of this sum with respect to the variable $x_{j}$? Let $f = \\vec{x} \\cdot \\vec{y}$, then ",
"_____no_output_____"
],
[
"<script src=\"https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js\"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math11.svg'><script type=\"application/vnd.jupyter-embedded-widgets\">{}</script>",
"_____no_output_____"
],
[
"It is critical to see that, because $x_i$ and $x_j$ are distinct variables (unless $i = j$), \\begin{equation} \\begin{cases} \\frac{\\partial x_i}{\\partial x_j} = 0 & \\text{if } i \\neq j\\\n\\frac{\\partial x_i}{\\partial x_j} = 1 & \\text{if } i=j\n\\end{cases} \\end{equation} and thus $\\frac{\\partial x_i}{\\partial x_j} = \\delta_{i,j}$.\n\nThus we can simplify our sum by collapsing it via the kronecker-delta: ",
"_____no_output_____"
],
[
"<script src=\"https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js\"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math12.svg'><script type=\"application/vnd.jupyter-embedded-widgets\">{}</script>",
"_____no_output_____"
],
[
"Defining $\\frac{\\partial f}{\\partial \\vec{x}} = [\\frac{\\partial f}{\\partial x_0}, \\cdots , \\frac{\\partial f}{\\partial x_j}, \\cdots, \\frac{\\partial f}{\\partial x_{n-1}}]$, we can see from our above result that ",
"_____no_output_____"
],
[
"<script src=\"https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js\"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math13.svg'><script type=\"application/vnd.jupyter-embedded-widgets\">{}</script>",
"_____no_output_____"
],
[
"Take a little bit of time to think through the above expression, and make sure you understand why it is true, writing out matrices to help your understanding. This is just one example of how we can apply some of the calculus we already know to vectors, leading to vector calculus, a pillar for linear algebra! This is especially important for deriving expressions used in back-propagation in machine learning.",
"_____no_output_____"
],
[
"The end of document",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
cbbbc9fdd18a1eeb6fabfbeee840442d3e7c4e9f
| 3,806 |
ipynb
|
Jupyter Notebook
|
HW4/Q5/Q5.ipynb
|
markblitz/RU_573_HW
|
b6b9799b1af2f1d5e65362e7fb841cd30bbc951f
|
[
"MIT"
] | null | null | null |
HW4/Q5/Q5.ipynb
|
markblitz/RU_573_HW
|
b6b9799b1af2f1d5e65362e7fb841cd30bbc951f
|
[
"MIT"
] | null | null | null |
HW4/Q5/Q5.ipynb
|
markblitz/RU_573_HW
|
b6b9799b1af2f1d5e65362e7fb841cd30bbc951f
|
[
"MIT"
] | null | null | null | 31.454545 | 87 | 0.488965 |
[
[
[
"class Graph:\n def __init__(self, input_vertex_number):\n self.vertex_number = input_vertex_number\n self.edge_number = 0\n self.Adjacency_list = [[] for i in range(self.vertex_number)]\n #print(self.Adjacency_list)\n \n def Add_Path(self, start_vertex, end_vertex, weight=1):\n self.Adjacency_list[start_vertex].append([end_vertex, weight])\n self.edge_number += 1\n \n def Print_List(self):\n print(self.edge_number, \"edge(s)\")\n for i in range(0, len(self.Adjacency_list)):\n print(\"Vertex: \", i, \" Linked with: \", self.Adjacency_list[i])\n \n def BFS(self):\n vertex_queue = []\n vertex_queue.append(0)\n visited_vertex = set()\n visited_vertex.add(0)\n while (len(vertex_queue) > 0):\n vertex = vertex_queue.pop(0)\n nodes = []\n for i in range(0, len(self.Adjacency_list[vertex])):\n nodes.append(self.Adjacency_list[vertex][i][0])\n for i in nodes:\n if i not in visited_vertex:\n vertex_queue.append(i)\n visited_vertex.add(i)\n if len(visited_vertex) == self.vertex_number:\n print('all vertex visited')\n \n def DFS(self):\n vertex_stack = []\n vertex_stack.append(0)\n visited_vertex = set()\n visited_vertex.add(0)\n while (len(vertex_stack) > 0):\n vertex = vertex_stack.pop()\n nodes = []\n for i in range(0, len(self.Adjacency_list[vertex])):\n nodes.append(self.Adjacency_list[vertex][i][0])\n for i in nodes:\n if i not in visited_vertex:\n vertex_stack.append(i)\n visited_vertex.add(i)\n if len(visited_vertex) == self.vertex_number:\n print('all vertex visited')",
"_____no_output_____"
],
[
"input_data = []\nfile = open('./data/NYC.txt')\ninput_vertex_number = int(file.readline())\ninput_edge_number = int(file.readline())\ngraph = Graph(input_vertex_number)\nfor line in file.readlines():\n start_vertex = int(line.split()[0])\n end_vertex = int(line.split()[1])\n weight = float(line.split()[2])\n graph.Add_Path(start_vertex, end_vertex, weight)\nfile.close()\n#graph.Print_List()\ngraph.BFS()\ngraph.DFS()",
"all vertex visited\nall vertex visited\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
cbbbcb25a70156210f2aa89c2d23b095d5596833
| 22,529 |
ipynb
|
Jupyter Notebook
|
docs/notebooks/introduction.ipynb
|
IFV/creme
|
a7393b534489422ba156f2d2e83fb777afbd2efb
|
[
"BSD-3-Clause"
] | null | null | null |
docs/notebooks/introduction.ipynb
|
IFV/creme
|
a7393b534489422ba156f2d2e83fb777afbd2efb
|
[
"BSD-3-Clause"
] | 1 |
2022-02-10T06:24:42.000Z
|
2022-02-10T06:24:42.000Z
|
docs/notebooks/introduction.ipynb
|
igorol/creme
|
60977c4accfdca08cfd76a162095ff738ef87281
|
[
"BSD-3-Clause"
] | 1 |
2021-04-16T08:27:14.000Z
|
2021-04-16T08:27:14.000Z
| 48.345494 | 1,228 | 0.649785 |
[
[
[
"# Introduction",
"_____no_output_____"
],
[
"## A quick overview of batch learning\n\nIf you've already delved into machine learning, then you shouldn't have any difficulty in getting to use incremental learning. If you are somewhat new to machine learning, then do not worry! The point of this notebook in particular is to introduce simple notions. We'll also start to show how `creme` fits in and explain how to use it.\n\nThe whole point of machine learning is to *learn from data*. In *supervised learning* you want to learn how to predict a target $y$ given a set of features $X$. Meanwhile in an unsupervised learning there is no target, and the goal is rather to identify patterns and trends in the features $X$. At this point most people tend to imagine $X$ as a somewhat big table where each row is an observation and each column is a feature, and they would be quite right. Learning from tabular data is part of what's called *batch learning*, which basically that all of the data is available to our learning algorithm at once. A lot of libraries have been created to handle the batch learning regime, with one of the most prominent being Python's [scikit-learn](https://scikit-learn.org/stable/). \n\nAs a simple example of batch learning let's say we want to learn to predict if a women has breast cancer or not. We'll use the [breast cancer dataset available with scikit-learn](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_breast_cancer().html). We'll learn to map a set of features to a binary decision using a [logistic regression](https://www.wikiwand.com/en/Logistic_regression). Like many other models based on numerical weights, logisitc regression is sensitive to the scale of the features. Rescaling the data so that each feature has mean 0 and variance 1 is generally considered good practice. We can apply the rescaling and fit the logistic regression sequentially in an elegant manner using a [`Pipeline`](https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html). To measure the performance of the model we'll evaluate the average [ROC AUC score](https://www.wikiwand.com/en/Receiver_operating_characteristic) using a 5 fold [cross-validation](https://www.wikiwand.com/en/Cross-validation_(statistics)). ",
"_____no_output_____"
]
],
[
[
"from sklearn import datasets\nfrom sklearn import linear_model\nfrom sklearn import metrics\nfrom sklearn import model_selection\nfrom sklearn import pipeline\nfrom sklearn import preprocessing\n\n\n# Load the data\ndataset = datasets.load_breast_cancer()\nX, y = dataset.data, dataset.target\n\n# Define the steps of the model\nmodel = pipeline.Pipeline([\n ('scale', preprocessing.StandardScaler()),\n ('lin_reg', linear_model.LogisticRegression(solver='lbfgs'))\n])\n\n# Define a determistic cross-validation procedure\ncv = model_selection.KFold(n_splits=5, shuffle=True, random_state=42)\n\n# Compute the MSE values\nscorer = metrics.make_scorer(metrics.roc_auc_score)\nscores = model_selection.cross_val_score(model, X, y, scoring=scorer, cv=cv)\n\n# Display the average score and it's standard deviation\nprint(f'ROC AUC: {scores.mean():.3f} (± {scores.std():.3f})')",
"ROC AUC: 0.975 (± 0.011)\n"
]
],
[
[
"This might be a lot to take in if you're not accustomed to scikit-learn, but it probably isn't if you are. Batch learning basically boils down to:\n\n1. Loading the data\n2. Fitting a model to the data\n3. Computing the performance of the model on unseen data\n\nThis is pretty standard and is maybe how most people imagine a machine learning pipeline. However this way of proceding has certain downsides. First of all your laptop would crash if the `load_boston` function returned a dataset who's size exceeds your available amount of RAM. Sometimes you can use some tricks to get around this. For example by optimizing the data types and by using sparse representations when applicable you can potentially save precious gigabytes of RAM. However like many tricks this only goes so far. If your dataset weighs hundreds of gigabytes then you won't go far without some special hardware. One solution is to do out-of-core learning; that is, algorithms that can learning by being presented the data in chunks. If you want to go down this road then take a look at [Dask](https://examples.dask.org/machine-learning.html) and [Spark's MLlib](https://spark.apache.org/mllib/).\n\nAnother issue with the batch learning regime is that can't elegantly learn from new data. Indeed if new data is made available, then the model has to learn from scratch with a new dataset composed of the old data and the new data. This is particularly annoying in a real situation where you might have new incoming data every week, day, hour, minute, or even setting. For example if you're building a recommendation engine for an e-commerce app, then you're probably training your model from 0 every week or so. As your app grows in popularity, so does the dataset you're training on. This will lead to longer and longer training times and might require a hardware upgrade.\n\nA final downside that isn't very easy to grasp concerns the manner in which features are extracted. Everytime you want to train your model you first have to extract features. The trick is that some features might not be accessible at the particular point in time you are at. For example maybe that some attributes in your data warehouse get overwritten with time. In other words maybe that all the features pertaining to a particular observations are not available, whereas they were a week ago. This happens more often than not in real scenarios, and apart if you have a sophisticated data engineering pipeline then you will encounter these issues at some point. ",
"_____no_output_____"
],
[
"## A hands-on introduction to incremental learning\n\nIncremental learning is also often called *online learning*, but if you [google online learning](https://www.google.com/search?q=online+learning) a lot of the results will point to educational websites. Hence we prefer the name \"incremental learning\", from which `creme` derives it's name. The point of incremental learning is to fit a model to a stream of data. In other words, the data isn't available in it's entirety, but rather the observations are provided one by one. As an example let's stream through the dataset used previously.",
"_____no_output_____"
]
],
[
[
"for xi, yi in zip(X, y):\n # This where the model learns\n pass",
"_____no_output_____"
]
],
[
[
"In this case we're iterating over a dataset that is already in memory, but we could just as well stream from a CSV file, a Kafka stream, an SQL query, etc. If we look at `x` we can notice that it is a `numpy.ndarray`.",
"_____no_output_____"
]
],
[
[
"xi",
"_____no_output_____"
]
],
[
[
"`creme` on the other hand works with `dict`s. We believe that `dict`s are more enjoyable to program with than `numpy.ndarray`s, at least for when single observations are concerned. `dict`'s bring the added benefit that each feature can be accessed by name rather than by position.",
"_____no_output_____"
]
],
[
[
"for xi, yi in zip(X, y):\n xi = dict(zip(dataset.feature_names, xi))\n pass\n\nxi",
"_____no_output_____"
]
],
[
[
"`creme`'s `stream` module has an `iter_sklearn_dataset` convenience function that we can use instead.",
"_____no_output_____"
]
],
[
[
"from creme import stream\n\nfor xi, yi in stream.iter_sklearn_dataset(datasets.load_breast_cancer()):\n pass",
"_____no_output_____"
]
],
[
[
"The simple fact that we are getting the data in a stream means that we can't do a lot of things the same way as in a batch setting. For example let's say we want to scale the data so that it has mean 0 and variance 1, as we did earlier. To do so we simply have to subtract the mean of each feature to each value and then divide the result by the standard deviation of the feature. The problem is that we can't possible known the values of the mean and the standard deviation before actually going through all the data! One way to procede would be to do a first pass over the data to compute the necessary values and then scale the values during a second pass. The problem is that defeats our purpose, which is to learn by only looking at the data once. Although this might seem rather restrictive, it reaps sizable benefits down the road.\n\nThe way we do feature scaling in `creme` involves computing *running statistics*. The idea is that we use a data structure that estimates the mean and updates itself when it is provided with a value. The same goes for the variance (and thus the standard deviation). For example, if we denote $\\mu_t$ the mean and $n_t$ the count at any moment $t$, then updating the mean can be done as so:\n\n$$\n\\begin{cases}\nn_{t+1} = n_t + 1 \\\\\n\\mu_{t+1} = \\mu_t + \\frac{x - \\mu_t}{n_{t+1}}\n\\end{cases}\n$$\n\nLikewhise a running variance can be computed as so:\n\n$$\n\\begin{cases}\nn_{t+1} = n_t + 1 \\\\\n\\mu_{t+1} = \\mu_t + \\frac{x - \\mu_t}{n_{t+1}} \\\\\ns_{t+1} = s_t + (x - \\mu_t) \\times (x - \\mu_{t+1}) \\\\\n\\sigma_{t+1} = \\frac{s_{t+1}}{n_{t+1}}\n\\end{cases}\n$$\n\nwhere $s_t$ is a running sum of squares and $\\sigma_t$ is the running variance at time $t$. This might seem a tad more involved than the batch algorithms you learn in school, but it is rather elegant. Implementing this in Python is not too difficult. For example let's compute the running mean and variance of the `'mean area'` variable.",
"_____no_output_____"
]
],
[
[
"n, mean, sum_of_squares, variance = 0, 0, 0, 0\n\nfor xi, yi in stream.iter_sklearn_dataset(datasets.load_breast_cancer()):\n n += 1\n old_mean = mean\n mean += (xi['mean area'] - mean) / n\n sum_of_squares += (xi['mean area'] - old_mean) * (xi['mean area'] - mean)\n variance = sum_of_squares / n\n \nprint(f'Running mean: {mean:.3f}')\nprint(f'Running variance: {variance:.3f}')",
"Running mean: 654.889\nRunning variance: 123625.903\n"
]
],
[
[
"Let's compare this with `numpy`.",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\ni = list(dataset.feature_names).index('mean area')\nprint(f'True mean: {np.mean(X[:, i]):.3f}')\nprint(f'True variance: {np.var(X[:, i]):.3f}')",
"True mean: 654.889\nTrue variance: 123625.903\n"
]
],
[
[
"The results seem to be exactly the same! The twist is that the running statistics won't be very accurate for the first few observations. In general though this doesn't matter too much. Some would even go as far as to say that this descrepancy is beneficial and acts as some sort of regularization...\n\nNow the idea is that we can compute the running statistics of each feature and scale them as they come along. The way to do this with `creme` is to use the `StandardScaler` class from the `preprocessing` module, as so:",
"_____no_output_____"
]
],
[
[
"from creme import preprocessing\n\nscaler = preprocessing.StandardScaler()\n\nfor xi, yi in stream.iter_sklearn_dataset(datasets.load_breast_cancer()):\n xi = scaler.fit_one(xi, yi)",
"_____no_output_____"
]
],
[
[
"This is quite terse but let's break it down nonetheless. Every class in `creme` has a `fit_one(x, y)` method where all the magic happens. Now the important thing to notice is that the `fit_one` actually returns the output for the given input. This is one of the nice properties of online learning: inference can be done immediatly. In `creme` each call to a `Transformer`'s `fit_one` will return the transformed output. Meanwhile calling `fit_one` with a `Classifier` or a `Regressor` will return the predicted target for the given set of features. The twist is that the prediction is made *before* looking at the true target `y`. This means that we get a free hold-out prediction every time we call `fit_one`. This can be used to monitor the performance of the model as it trains, which is obviously nice to have.\n\nNow that we are scaling the data, we can start doing some actual machine learning. We're going to implement an online linear regression. Because all the data isn't available at once, we are obliged to do what is called *stochastic gradient descent*, which is a popular research topic and has a lot of variants. SGD is commonly used to train neural networks. The idea is that at each step we compute the loss between the target prediction and the truth. We then calculate the gradient, which is simply a set of derivatives with respect to each weight from the linear regression. Once we have obtained the gradient, we can update the weights by moving them in the opposite direction of the gradient. The amount by which the weights are moved typically depends on a *learning rate*, which is typically set by the user. Different optimizers have different ways of managing the weight update, and some handle the learning rate implicitely. Online linear regression can be done in `creme` with the `LinearRegression` class from the `linear_model` module. We'll be using plain and simple SGD using the `SGD` optimizer from the `optim` module. During training we'll measure the squared error between the truth and the predictions.",
"_____no_output_____"
]
],
[
[
"from creme import linear_model\nfrom creme import optim\n\nscaler = preprocessing.StandardScaler()\noptimizer = optim.SGD(lr=0.01)\nlog_reg = linear_model.LogisticRegression(optimizer)\n\ny_true = []\ny_pred = []\n\nfor xi, yi in stream.iter_sklearn_dataset(datasets.load_breast_cancer(), shuffle=True, random_state=42):\n \n # Scale the features\n xi_scaled = scaler.fit_one(xi).transform_one(xi)\n \n # Fit the linear regression\n yi_pred = log_reg.predict_proba_one(xi_scaled)\n log_reg.fit_one(xi_scaled, yi)\n \n # Store the truth and the prediction\n y_true.append(yi)\n y_pred.append(yi_pred[True])\n \nprint(f'ROC AUC: {metrics.roc_auc_score(y_true, y_pred):.3f}')",
"ROC AUC: 0.989\n"
]
],
[
[
"The ROC AUC is significantly better than the one obtained from the cross-validation of scikit-learn's logisitic regression. However to make things really comparable it would be nice to compare with the same cross-validation procedure. `creme` has a `compat` module that contains utilities for making `creme` compatible with other Python libraries. Because we're doing regression we'll be using the `SKLRegressorWrapper`. We'll also be using `Pipeline` to encapsulate the logic of the `StandardScaler` and the `LogisticRegression` in one single object.",
"_____no_output_____"
]
],
[
[
"from creme import compat\nfrom creme import compose\n\n# We define a Pipeline, exactly like we did earlier for sklearn \nmodel = compose.Pipeline([\n ('scale', preprocessing.StandardScaler()),\n ('log_reg', linear_model.LogisticRegression())\n])\n\n# We make the Pipeline compatible with sklearn\nmodel = compat.convert_creme_to_sklearn(model)\n\n# We compute the CV scores using the same CV scheme and the same scoring\nscores = model_selection.cross_val_score(model, X, y, scoring=scorer, cv=cv)\n\n# Display the average score and it's standard deviation\nprint(f'ROC AUC: {scores.mean():.3f} (± {scores.std():.3f})')",
"ROC AUC: 0.963 (± 0.010)\n"
]
],
[
[
"This time the ROC AUC score is lower, which is what we would expect. Indeed online learning isn't as accurate as batch learning. However it all depends in what you're interested in. If you're only interested in predicting the next observation then the online learning regime would be better. That's why it's a bit hard to compare both approaches: they're both suited to different scenarios.",
"_____no_output_____"
],
[
"## Going further",
"_____no_output_____"
],
[
"There's a lot more to learn, and it all depends on what kind on your use case. Feel free to have a look at the [documentation](https://creme-ml.github.io/) to know what `creme` has available, and have a look the [example notebook](https://github.com/creme-ml/notebooks).\n\nHere a few resources if you want to do some reading:\n\n- [Online learning -- Wikipedia](https://www.wikiwand.com/en/Online_machine_learning)\n- [What is online machine learning? -- Max Pagels](https://medium.com/value-stream-design/online-machine-learning-515556ff72c5)\n- [Introduction to Online Learning -- USC course](http://www-bcf.usc.edu/~haipengl/courses/CSCI699/)\n- [Online Methods in Machine Learning -- MIT course](http://www.mit.edu/~rakhlin/6.883/)\n- [Online Learning: A Comprehensive Survey](https://arxiv.org/pdf/1802.02871.pdf)\n- [Streaming 101: The world beyond batch](https://www.oreilly.com/ideas/the-world-beyond-batch-streaming-101)\n- [Machine learning for data streams](https://www.cms.waikato.ac.nz/~abifet/book/contents.html)\n- [Data Stream Mining: A Practical Approach](https://www.cs.waikato.ac.nz/~abifet/MOA/StreamMining.pdf)\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
cbbbd03f7a23b493b9a76af24e3c053d2736295b
| 107,722 |
ipynb
|
Jupyter Notebook
|
examples/PolynomialLinearRegression.ipynb
|
shotahorii/ml-from-scratch
|
10fe8c9d5811bfcb9ee303aba2087524574681e6
|
[
"MIT"
] | 3 |
2021-03-21T21:16:42.000Z
|
2021-06-27T03:20:04.000Z
|
examples/PolynomialLinearRegression.ipynb
|
shotahorii/ml-from-scratch
|
10fe8c9d5811bfcb9ee303aba2087524574681e6
|
[
"MIT"
] | null | null | null |
examples/PolynomialLinearRegression.ipynb
|
shotahorii/ml-from-scratch
|
10fe8c9d5811bfcb9ee303aba2087524574681e6
|
[
"MIT"
] | null | null | null | 608.59887 | 75,776 | 0.950771 |
[
[
[
"import numpy as np\nfrom matplotlib import pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"from bareml.machinelearning.supervised import LinearRegression\nfrom bareml.machinelearning.utils.preprocessing import PolynomialFeatures",
"_____no_output_____"
],
[
"x = np.arange(0,2*np.pi,0.2)\ny = np.sin(x) + 0.2*np.random.randn(len(x))\n\n# for drawing\nx_true = np.arange(0,2*np.pi,0.05)\ny_true = np.sin(x_true)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1,1,figsize=(12,8))\nplt.scatter(x,y)\nplt.plot(x_true, y_true)",
"_____no_output_____"
],
[
"reg = LinearRegression()",
"_____no_output_____"
],
[
"degrees = [1,3,9]\npreds = []\n\nfor degree in degrees:\n pol = PolynomialFeatures(degree, include_bias=False)\n X = pol.fit_transform(x[:,None])\n X_true = pol.transform(x_true[:,None])\n reg.fit(X,y)\n y_pred = reg.predict(X_true)\n preds.append(y_pred)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1,1,figsize=(12,8))\nfig.suptitle('Polynomial Linear Regression with different degrees', y=0.08, x=0.5, fontsize=18)\nax.scatter(x,y,label='observed data')\nax.plot(x_true, y_true, '--', label='true function')\nfor i, y_pred in enumerate(preds):\n d = degrees[i]\n if d == 1:\n label = '1st degree'\n elif d == 2:\n label = '2nd degree'\n elif d == 3:\n label = '3rd degree'\n else:\n label = str(degrees[i]) + 'th degree'\n ax.plot(x_true, y_pred, label=label)\n \nax.legend()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbbbec053653ae82b625c4df68f4bd15debd0576
| 2,227 |
ipynb
|
Jupyter Notebook
|
content/python/basics/repr_vs_str.ipynb
|
vedraiyani/notes-1
|
85b86787e5bdb9c5b4160438c026391c7c8c3a48
|
[
"CC0-1.0"
] | 1 |
2019-06-17T19:46:34.000Z
|
2019-06-17T19:46:34.000Z
|
content/python/basics/repr_vs_str.ipynb
|
vedraiyani/notes-1
|
85b86787e5bdb9c5b4160438c026391c7c8c3a48
|
[
"CC0-1.0"
] | null | null | null |
content/python/basics/repr_vs_str.ipynb
|
vedraiyani/notes-1
|
85b86787e5bdb9c5b4160438c026391c7c8c3a48
|
[
"CC0-1.0"
] | null | null | null | 19.535088 | 129 | 0.500674 |
[
[
[
"---\ntitle: \"repr vs. str\"\nauthor: \"Chris Albon\"\ndate: 2017-12-20T11:53:49-07:00\ndescription: \"repr vs. str in Python.\"\ntype: technical_note\ndraft: false\n---",
"_____no_output_____"
]
],
[
[
"## Preliminaries",
"_____no_output_____"
]
],
[
[
"import datetime",
"_____no_output_____"
]
],
[
[
"## Create A Simple Object",
"_____no_output_____"
]
],
[
[
"class Regiment(object):\n\n def __init__(self, date=datetime.datetime.now()):\n self.date = date\n\n def __repr__(self):\n return self.date\n\n def __str__(self):\n return str(self.date)",
"_____no_output_____"
],
[
"reg=Regiment()\nreg.__repr__()",
"_____no_output_____"
]
],
[
[
"`__repr__` is for the developer. It is string representation of the object and the code needed to reproduce the object. \n\n`__str__` is the output for the end user. It prints what the user wants to see.",
"_____no_output_____"
]
]
] |
[
"raw",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"raw"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
cbbbef4d9fd7f66db039f90b1a6423f973890f20
| 477,107 |
ipynb
|
Jupyter Notebook
|
tutorial/source/gmm.ipynb
|
themrzmaster/pyro
|
10f2912a107e3bd32a67ccaef2bbbab425d23da7
|
[
"Apache-2.0"
] | 2 |
2020-06-05T20:40:50.000Z
|
2020-09-05T15:39:48.000Z
|
tutorial/source/gmm.ipynb
|
pawni/pyro
|
dd6b52859cd8eda776b75f2b5ee757a76f17f145
|
[
"Apache-2.0"
] | null | null | null |
tutorial/source/gmm.ipynb
|
pawni/pyro
|
dd6b52859cd8eda776b75f2b5ee757a76f17f145
|
[
"Apache-2.0"
] | 1 |
2020-06-04T18:25:38.000Z
|
2020-06-04T18:25:38.000Z
| 651.785519 | 101,628 | 0.947157 |
[
[
[
"# Gaussian Mixture Model\n\nThis is tutorial demonstrates how to marginalize out discrete latent variables in Pyro through the motivating example of a mixture model. We'll focus on the mechanics of parallel enumeration, keeping the model simple by training a trivial 1-D Gaussian model on a tiny 5-point dataset. See also the [enumeration tutorial](http://pyro.ai/examples/enumeration.html) for a broader introduction to parallel enumeration.\n\n#### Table of contents\n\n- [Overview](#Overview)\n- [Training a MAP estimator](#Training-a-MAP-estimator)\n- [Serving the model: predicting membership](#Serving-the-model:-predicting-membership)\n - [Predicting membership using discrete inference](#Predicting-membership-using-discrete-inference)\n - [Predicting membership by enumerating in the guide](#Predicting-membership-by-enumerating-in-the-guide)\n- [MCMC](#MCMC)",
"_____no_output_____"
]
],
[
[
"import os\nfrom collections import defaultdict\nimport torch\nimport numpy as np\nimport scipy.stats\nfrom torch.distributions import constraints\nfrom matplotlib import pyplot\n%matplotlib inline\n\nimport pyro\nimport pyro.distributions as dist\nfrom pyro import poutine\nfrom pyro.infer.autoguide import AutoDelta\nfrom pyro.optim import Adam\nfrom pyro.infer import SVI, TraceEnum_ELBO, config_enumerate, infer_discrete\n\nsmoke_test = ('CI' in os.environ)\nassert pyro.__version__.startswith('1.3.1')\npyro.enable_validation(True)",
"_____no_output_____"
]
],
[
[
"## Overview\n\nPyro's [TraceEnum_ELBO](http://docs.pyro.ai/en/dev/inference_algos.html#pyro.infer.traceenum_elbo.TraceEnum_ELBO) can automatically marginalize out variables in both the guide and the model. When enumerating guide variables, Pyro can either enumerate sequentially (which is useful if the variables determine downstream control flow), or enumerate in parallel by allocating a new tensor dimension and using nonstandard evaluation to create a tensor of possible values at the variable's sample site. These nonstandard values are then replayed in the model. When enumerating variables in the model, the variables must be enumerated in parallel and must not appear in the guide. Mathematically, guide-side enumeration simply reduces variance in a stochastic ELBO by enumerating all values, whereas model-side enumeration avoids an application of Jensen's inequality by exactly marginalizing out a variable.\n\nHere is our tiny dataset. It has five points.",
"_____no_output_____"
]
],
[
[
"data = torch.tensor([0., 1., 10., 11., 12.])",
"_____no_output_____"
]
],
[
[
"## Training a MAP estimator\n\nLet's start by learning model parameters `weights`, `locs`, and `scale` given priors and data. We will learn point estimates of these using an [AutoDelta](http://docs.pyro.ai/en/dev/infer.autoguide.html#autodelta) guide (named after its delta distributions). Our model will learn global mixture weights, the location of each mixture component, and a shared scale that is common to both components. During inference, [TraceEnum_ELBO](http://docs.pyro.ai/en/dev/inference_algos.html#pyro.infer.traceenum_elbo.TraceEnum_ELBO) will marginalize out the assignments of datapoints to clusters.",
"_____no_output_____"
]
],
[
[
"K = 2 # Fixed number of components.\n\n@config_enumerate\ndef model(data):\n # Global variables.\n weights = pyro.sample('weights', dist.Dirichlet(0.5 * torch.ones(K)))\n scale = pyro.sample('scale', dist.LogNormal(0., 2.))\n with pyro.plate('components', K):\n locs = pyro.sample('locs', dist.Normal(0., 10.))\n\n with pyro.plate('data', len(data)):\n # Local variables.\n assignment = pyro.sample('assignment', dist.Categorical(weights))\n pyro.sample('obs', dist.Normal(locs[assignment], scale), obs=data)",
"_____no_output_____"
]
],
[
[
"To run inference with this `(model,guide)` pair, we use Pyro's [config_enumerate()](http://docs.pyro.ai/en/dev/poutine.html#pyro.infer.enum.config_enumerate) handler to enumerate over all assignments in each iteration. Since we've wrapped the batched Categorical assignments in a [pyro.plate](http://docs.pyro.ai/en/dev/primitives.html#pyro.plate) indepencence context, this enumeration can happen in parallel: we enumerate only 2 possibilites, rather than `2**len(data) = 32`. Finally, to use the parallel version of enumeration, we inform Pyro that we're only using a single [plate](http://docs.pyro.ai/en/dev/primitives.html#pyro.plate) via `max_plate_nesting=1`; this lets Pyro know that we're using the rightmost dimension [plate](http://docs.pyro.ai/en/dev/primitives.html#pyro.plate) and that Pyro can use any other dimension for parallelization.",
"_____no_output_____"
]
],
[
[
"optim = pyro.optim.Adam({'lr': 0.1, 'betas': [0.8, 0.99]})\nelbo = TraceEnum_ELBO(max_plate_nesting=1)",
"_____no_output_____"
]
],
[
[
"Before inference we'll initialize to plausible values. Mixture models are very succeptible to local modes. A common approach is choose the best among many randomly initializations, where the cluster means are initialized from random subsamples of the data. Since we're using an [AutoDelta](http://docs.pyro.ai/en/dev/infer.autoguide.html#autodelta) guide, we can initialize by defining a custom ``init_loc_fn()``.",
"_____no_output_____"
]
],
[
[
"def init_loc_fn(site):\n if site[\"name\"] == \"weights\":\n # Initialize weights to uniform.\n return torch.ones(K) / K\n if site[\"name\"] == \"scale\":\n return (data.var() / 2).sqrt()\n if site[\"name\"] == \"locs\":\n return data[torch.multinomial(torch.ones(len(data)) / len(data), K)]\n raise ValueError(site[\"name\"])\n\ndef initialize(seed):\n global global_guide, svi\n pyro.set_rng_seed(seed)\n pyro.clear_param_store()\n global_guide = AutoDelta(poutine.block(model, expose=['weights', 'locs', 'scale']),\n init_loc_fn=init_loc_fn)\n svi = SVI(model, global_guide, optim, loss=elbo)\n return svi.loss(model, global_guide, data)\n\n# Choose the best among 100 random initializations.\nloss, seed = min((initialize(seed), seed) for seed in range(100))\ninitialize(seed)\nprint('seed = {}, initial_loss = {}'.format(seed, loss))",
"seed = 7, initial_loss = 25.665584564208984\n"
]
],
[
[
"During training, we'll collect both losses and gradient norms to monitor convergence. We can do this using PyTorch's `.register_hook()` method.",
"_____no_output_____"
]
],
[
[
"# Register hooks to monitor gradient norms.\ngradient_norms = defaultdict(list)\nfor name, value in pyro.get_param_store().named_parameters():\n value.register_hook(lambda g, name=name: gradient_norms[name].append(g.norm().item()))\n\nlosses = []\nfor i in range(200 if not smoke_test else 2):\n loss = svi.step(data)\n losses.append(loss)\n print('.' if i % 100 else '\\n', end='')",
"\n...................................................................................................\n..................................................................................................."
],
[
"pyplot.figure(figsize=(10,3), dpi=100).set_facecolor('white')\npyplot.plot(losses)\npyplot.xlabel('iters')\npyplot.ylabel('loss')\npyplot.yscale('log')\npyplot.title('Convergence of SVI');",
"_____no_output_____"
],
[
"pyplot.figure(figsize=(10,4), dpi=100).set_facecolor('white')\nfor name, grad_norms in gradient_norms.items():\n pyplot.plot(grad_norms, label=name)\npyplot.xlabel('iters')\npyplot.ylabel('gradient norm')\npyplot.yscale('log')\npyplot.legend(loc='best')\npyplot.title('Gradient norms during SVI');",
"_____no_output_____"
]
],
[
[
"Here are the learned parameters:",
"_____no_output_____"
]
],
[
[
"map_estimates = global_guide(data)\nweights = map_estimates['weights']\nlocs = map_estimates['locs']\nscale = map_estimates['scale']\nprint('weights = {}'.format(weights.data.numpy()))\nprint('locs = {}'.format(locs.data.numpy()))\nprint('scale = {}'.format(scale.data.numpy()))",
"weights = [0.375 0.625]\nlocs = [ 0.49887404 10.984463 ]\nscale = 0.6514337062835693\n"
]
],
[
[
"The model's `weights` are as expected, with about 2/5 of the data in the first component and 3/5 in the second component. Next let's visualize the mixture model.",
"_____no_output_____"
]
],
[
[
"X = np.arange(-3,15,0.1)\nY1 = weights[0].item() * scipy.stats.norm.pdf((X - locs[0].item()) / scale.item())\nY2 = weights[1].item() * scipy.stats.norm.pdf((X - locs[1].item()) / scale.item())\n\npyplot.figure(figsize=(10, 4), dpi=100).set_facecolor('white')\npyplot.plot(X, Y1, 'r-')\npyplot.plot(X, Y2, 'b-')\npyplot.plot(X, Y1 + Y2, 'k--')\npyplot.plot(data.data.numpy(), np.zeros(len(data)), 'k*')\npyplot.title('Density of two-component mixture model')\npyplot.ylabel('probability density');",
"_____no_output_____"
]
],
[
[
"Finally note that optimization with mixture models is non-convex and can often get stuck in local optima. For example in this tutorial, we observed that the mixture model gets stuck in an everthing-in-one-cluster hypothesis if `scale` is initialized to be too large.\n\n## Serving the model: predicting membership\n\nNow that we've trained a mixture model, we might want to use the model as a classifier. \nDuring training we marginalized out the assignment variables in the model. While this provides fast convergence, it prevents us from reading the cluster assignments from the guide. We'll discuss two options for treating the model as a classifier: first using [infer_discrete](http://docs.pyro.ai/en/dev/inference_algos.html#pyro.infer.discrete.infer_discrete) (much faster) and second by training a secondary guide using enumeration inside SVI (slower but more general).\n\n### Predicting membership using discrete inference\n\nThe fastest way to predict membership is to use the [infer_discrete](http://docs.pyro.ai/en/dev/inference_algos.html#pyro.infer.discrete.infer_discrete) handler, together with `trace` and `replay`. Let's start out with a MAP classifier, setting `infer_discrete`'s temperature parameter to zero. For a deeper look at effect handlers like `trace`, `replay`, and `infer_discrete`, see the [effect handler tutorial](http://pyro.ai/examples/effect_handlers.html).",
"_____no_output_____"
]
],
[
[
"guide_trace = poutine.trace(global_guide).get_trace(data) # record the globals\ntrained_model = poutine.replay(model, trace=guide_trace) # replay the globals\n \ndef classifier(data, temperature=0):\n inferred_model = infer_discrete(trained_model, temperature=temperature,\n first_available_dim=-2) # avoid conflict with data plate\n trace = poutine.trace(inferred_model).get_trace(data)\n return trace.nodes[\"assignment\"][\"value\"]\n\nprint(classifier(data))",
"tensor([0, 0, 1, 1, 1])\n"
]
],
[
[
"Indeed we can run this classifer on new data",
"_____no_output_____"
]
],
[
[
"new_data = torch.arange(-3, 15, 0.1)\nassignment = classifier(new_data)\npyplot.figure(figsize=(8, 2), dpi=100).set_facecolor('white')\npyplot.plot(new_data.numpy(), assignment.numpy())\npyplot.title('MAP assignment')\npyplot.xlabel('data value')\npyplot.ylabel('class assignment');",
"_____no_output_____"
]
],
[
[
"To generate random posterior assignments rather than MAP assignments, we could set `temperature=1`.",
"_____no_output_____"
]
],
[
[
"print(classifier(data, temperature=1))",
"tensor([0, 0, 1, 1, 1])\n"
]
],
[
[
"Since the classes are very well separated, we zoom in to the boundary between classes, around 5.75.",
"_____no_output_____"
]
],
[
[
"new_data = torch.arange(5.5, 6.0, 0.005)\nassignment = classifier(new_data, temperature=1)\npyplot.figure(figsize=(8, 2), dpi=100).set_facecolor('white')\npyplot.plot(new_data.numpy(), assignment.numpy(), 'bx', color='C0')\npyplot.title('Random posterior assignment')\npyplot.xlabel('data value')\npyplot.ylabel('class assignment');",
"_____no_output_____"
]
],
[
[
"### Predicting membership by enumerating in the guide\n\nA second way to predict class membership is to enumerate in the guide. This doesn't work well for serving classifier models, since we need to run stochastic optimization for each new input data batch, but it is more general in that it can be embedded in larger variational models.\n\nTo read cluster assignments from the guide, we'll define a new `full_guide` that fits both global parameters (as above) and local parameters (which were previously marginalized out). Since we've already learned good values for the global variables, we will block SVI from updating those by using [poutine.block](http://docs.pyro.ai/en/dev/poutine.html#pyro.poutine.block).",
"_____no_output_____"
]
],
[
[
"@config_enumerate\ndef full_guide(data):\n # Global variables.\n with poutine.block(hide_types=[\"param\"]): # Keep our learned values of global parameters.\n global_guide(data)\n\n # Local variables.\n with pyro.plate('data', len(data)):\n assignment_probs = pyro.param('assignment_probs', torch.ones(len(data), K) / K,\n constraint=constraints.unit_interval)\n pyro.sample('assignment', dist.Categorical(assignment_probs))",
"_____no_output_____"
],
[
"optim = pyro.optim.Adam({'lr': 0.2, 'betas': [0.8, 0.99]})\nelbo = TraceEnum_ELBO(max_plate_nesting=1)\nsvi = SVI(model, full_guide, optim, loss=elbo)\n\n# Register hooks to monitor gradient norms.\ngradient_norms = defaultdict(list)\nsvi.loss(model, full_guide, data) # Initializes param store.\nfor name, value in pyro.get_param_store().named_parameters():\n value.register_hook(lambda g, name=name: gradient_norms[name].append(g.norm().item()))\n\nlosses = []\nfor i in range(200 if not smoke_test else 2):\n loss = svi.step(data)\n losses.append(loss)\n print('.' if i % 100 else '\\n', end='')",
"\n...................................................................................................\n..................................................................................................."
],
[
"pyplot.figure(figsize=(10,3), dpi=100).set_facecolor('white')\npyplot.plot(losses)\npyplot.xlabel('iters')\npyplot.ylabel('loss')\npyplot.yscale('log')\npyplot.title('Convergence of SVI');",
"_____no_output_____"
],
[
"pyplot.figure(figsize=(10,4), dpi=100).set_facecolor('white')\nfor name, grad_norms in gradient_norms.items():\n pyplot.plot(grad_norms, label=name)\npyplot.xlabel('iters')\npyplot.ylabel('gradient norm')\npyplot.yscale('log')\npyplot.legend(loc='best')\npyplot.title('Gradient norms during SVI');",
"_____no_output_____"
]
],
[
[
"We can now examine the guide's local `assignment_probs` variable.",
"_____no_output_____"
]
],
[
[
"assignment_probs = pyro.param('assignment_probs')\npyplot.figure(figsize=(8, 3), dpi=100).set_facecolor('white')\npyplot.plot(data.data.numpy(), assignment_probs.data.numpy()[:, 0], 'ro',\n label='component with mean {:0.2g}'.format(locs[0]))\npyplot.plot(data.data.numpy(), assignment_probs.data.numpy()[:, 1], 'bo',\n label='component with mean {:0.2g}'.format(locs[1]))\npyplot.title('Mixture assignment probabilities')\npyplot.xlabel('data value')\npyplot.ylabel('assignment probability')\npyplot.legend(loc='center');",
"_____no_output_____"
]
],
[
[
"## MCMC\n\nNext we'll explore the full posterior over component parameters using collapsed NUTS, i.e. we'll use NUTS and marginalize out all discrete latent variables.",
"_____no_output_____"
]
],
[
[
"from pyro.infer.mcmc.api import MCMC\nfrom pyro.infer.mcmc import NUTS\npyro.set_rng_seed(2)\nkernel = NUTS(model)\nmcmc = MCMC(kernel, num_samples=250, warmup_steps=50)\nmcmc.run(data)\nposterior_samples = mcmc.get_samples()",
"Sample: 100%|██████████| 300/300 [00:30<00:00, 9.99it/s, step size=1.69e-01, acc. rate=0.587]\n"
],
[
"X, Y = posterior_samples[\"locs\"].t()",
"_____no_output_____"
],
[
"pyplot.figure(figsize=(8, 8), dpi=100).set_facecolor('white')\nh, xs, ys, image = pyplot.hist2d(X.numpy(), Y.numpy(), bins=[20, 20])\npyplot.contour(np.log(h + 3).T, extent=[xs.min(), xs.max(), ys.min(), ys.max()],\n colors='white', alpha=0.8)\npyplot.title('Posterior density as estimated by collapsed NUTS')\npyplot.xlabel('loc of component 0')\npyplot.ylabel('loc of component 1')\npyplot.tight_layout()",
"_____no_output_____"
]
],
[
[
"Note that due to nonidentifiability of the mixture components the likelihood landscape has two equally likely modes, near `(11,0.5)` and `(0.5,11)`. NUTS has difficulty switching between the two modes.",
"_____no_output_____"
]
],
[
[
"pyplot.figure(figsize=(8, 3), dpi=100).set_facecolor('white')\npyplot.plot(X.numpy(), color='red')\npyplot.plot(Y.numpy(), color='blue')\npyplot.xlabel('NUTS step')\npyplot.ylabel('loc')\npyplot.title('Trace plot of loc parameter during NUTS inference')\npyplot.tight_layout()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cbbc08dbf92dfd585aa3cb70eb7a5feacbcfa368
| 49,469 |
ipynb
|
Jupyter Notebook
|
Notebooks/wfml-ejercicio-07-regresi-n-lineal.ipynb
|
jovenluk/WFML
|
52d46ef7a3687b9ce455db006fd7a5d7122f5980
|
[
"Apache-2.0"
] | null | null | null |
Notebooks/wfml-ejercicio-07-regresi-n-lineal.ipynb
|
jovenluk/WFML
|
52d46ef7a3687b9ce455db006fd7a5d7122f5980
|
[
"Apache-2.0"
] | null | null | null |
Notebooks/wfml-ejercicio-07-regresi-n-lineal.ipynb
|
jovenluk/WFML
|
52d46ef7a3687b9ce455db006fd7a5d7122f5980
|
[
"Apache-2.0"
] | null | null | null | 49,469 | 49,469 | 0.956296 |
[
[
[
"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the read-only \"../input/\" directory\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n\n# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
],
[
"taxable_value: es un nodo para hacer un modelo de regresion lineal - el modelo permite hacer boosting y bagging (los llama aumento y agregación autododimante) abriendo el nodo en Opciones de creación\n\nel segundo nodo (con puntitos) ni mirarlo porque es lo mismo, pero antíguo (deprecated)\n\nEl tercer nodo es un GenLin (tiene como objetivo tratar el error no como ruido blanco - Mínimos cuadrados generalizados - Darwin y Watson...)\n\nEl PCA/Factor es un nodo (modelo) hace un PCA\n\nEl GenLin mejorará al resto y se verá que este método es mejor que el resto\n\nPARA MEJORAR EL MODELO - SELECCIONAR LOS COMPONENTES PRINCIPALES (PROBABLEMENTE 3) Y HACER LA REGRESION CON TODAS LAS COLUMNAS, CON LOS COMPONENTES Y CON LAS VARIABLES QUE MAS PESAN EN ESOS COMPONENTES (HAY QUE GRAFICAR)\n",
"_____no_output_____"
]
]
] |
[
"code",
"markdown"
] |
[
[
"code"
],
[
"markdown",
"markdown"
]
] |
cbbc2b8bb5916d9d53dae8844c2fc61a1faa4efa
| 4,166 |
ipynb
|
Jupyter Notebook
|
examples/Marks/Object Model/HeatMap.ipynb
|
meeseeksmachine/bqplot
|
d8fae93274422e72b7ecf1f464d8d8197103a28d
|
[
"Apache-2.0"
] | 4 |
2019-04-10T17:25:40.000Z
|
2021-05-20T09:56:07.000Z
|
examples/Marks/Object Model/HeatMap.ipynb
|
meeseeksmachine/bqplot
|
d8fae93274422e72b7ecf1f464d8d8197103a28d
|
[
"Apache-2.0"
] | 4 |
2021-06-15T20:52:49.000Z
|
2022-03-02T10:41:23.000Z
|
examples/Marks/Object Model/HeatMap.ipynb
|
meeseeksmachine/bqplot
|
d8fae93274422e72b7ecf1f464d8d8197103a28d
|
[
"Apache-2.0"
] | 4 |
2019-05-08T00:52:01.000Z
|
2019-12-23T16:28:25.000Z
| 27.959732 | 287 | 0.553289 |
[
[
[
"# Heatmap",
"_____no_output_____"
],
[
"The `HeatMap` mark represents a 2d matrix of values as a color image. It can be used to visualize a 2d function, or a grayscale image for instance.\n\n\n`HeatMap` is very similar to the `GridHeatMap`, but should be preferred for a greater number of points (starting at around 100x100), to avoid overloading the browser. `GridHeatMap` offers more control (interactions, selections), and is better suited for a smaller number of points.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom bqplot import (Figure, LinearScale,ColorScale, \n Color, Axis, HeatMap, ColorAxis)\nfrom ipywidgets import Layout",
"_____no_output_____"
]
],
[
[
"### Data Input\n\n- `x` is a 1d array, corresponding to the abscissas of the points (size N)\n- `y` is a 1d array, corresponding to the ordinates of the points (size M)\n- `color` is a 2d array, $\\text{color}_{ij}$ is the intensity of the point $(x_i, y_j)$ (size (N, M))\n\nScales must be defined for each attribute:\n- a `LinearScale`, `LogScale` or `OrdinalScale` for `x` and `y`\n- a `ColorScale` for `color`",
"_____no_output_____"
]
],
[
[
"x = np.linspace(-5, 5, 200)\ny = np.linspace(-5, 5, 200)\nX, Y = np.meshgrid(x, y)\ncolor = np.cos(X**2 + Y**2)",
"_____no_output_____"
]
],
[
[
"## Plotting a 2-dimensional function \n\nThis is a visualization of the function $f(x, y) = \\text{cos}(x^2+y^2)$",
"_____no_output_____"
]
],
[
[
"x_sc, y_sc, col_sc = LinearScale(), LinearScale(), ColorScale(scheme='RdYlBu')\nheat = HeatMap(x=x, y=y, color=color,\n scales={'x': x_sc, 'y': y_sc, 'color': col_sc})\nax_x = Axis(scale=x_sc)\nax_y = Axis(scale=y_sc, orientation='vertical')\nax_c = ColorAxis(scale=col_sc)\nfig = Figure(marks=[heat], axes=[ax_x, ax_y, ax_c],\n title='Cosine',\n layout=Layout(width='650px', height='650px'),\n min_aspect_ratio=1, max_aspect_ratio=1, padding_y=0)\nfig",
"_____no_output_____"
]
],
[
[
"## Displaying an image\n\nThe `HeatMap` can be used as is to display a 2d grayscale image, by feeding the matrix of pixel intensities to the `color` attribute",
"_____no_output_____"
]
],
[
[
"from scipy.misc import ascent\nZ = ascent()\nZ = Z[::-1, :] \naspect_ratio = Z.shape[1]/Z.shape[0]",
"_____no_output_____"
],
[
"col_sc = ColorScale(scheme='Greys', reverse=True)\nscales = {'color': col_sc};\nascent = HeatMap(color=Z, scales=scales)\nimg = Figure(title='Ascent', marks=[ascent],\n layout=Layout(width='650px', height='650px'),\n min_aspect_ratio=aspect_ratio, max_aspect_ratio=aspect_ratio, padding_y=0)\nimg",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
cbbc3ed9b7671160a1d7de044d39fb8348eb34c9
| 207,348 |
ipynb
|
Jupyter Notebook
|
exercises/L14-exercises-solutions.ipynb
|
drewyoungren/mvc
|
f5217ae7888050d722c66de95756586f662841d2
|
[
"MIT"
] | null | null | null |
exercises/L14-exercises-solutions.ipynb
|
drewyoungren/mvc
|
f5217ae7888050d722c66de95756586f662841d2
|
[
"MIT"
] | null | null | null |
exercises/L14-exercises-solutions.ipynb
|
drewyoungren/mvc
|
f5217ae7888050d722c66de95756586f662841d2
|
[
"MIT"
] | null | null | null | 310.866567 | 116,996 | 0.932828 |
[
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom IPython.display import display, HTML, IFrame\nfrom ipywidgets import interact,fixed\nfrom mpl_toolkits import mplot3d\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nfrom matplotlib.patches import Rectangle\n\nfrom numpy.linalg import norm\nfrom numpy import cos,sin,tan,arctan,exp,log,pi,sqrt,arange,linspace,meshgrid,array\n\nfrom scipy.integrate import quad, dblquad, tplquad\n\nfrom ipykernel.pylab.backend_inline import flush_figures\n\n%matplotlib inline\n\nplt.rcParams.update({\n \"figure.figsize\": (6,6),\n \"text.usetex\": True,\n# # \"font.family\": \"serif\",\n})\n\n# Uncomment the one that corresponds to your Jupyter theme\nplt.style.use('default')\n# plt.style.use('dark_background')\n# plt.style.use('fivethirtyeight')\n# plt.style.use('Solarize_Light2')",
"_____no_output_____"
]
],
[
[
"### Example 1\n\nLet $\\mathcal R = [1,5]\\times[2,3]$. \n\n a. Estimate $$\\iint_\\mathcal R (xy - y^2/6)dA$$ as a Riemann sum using 16 squares.",
"_____no_output_____"
]
],
[
[
"x = linspace(0,6)\ny = linspace(0,4)\nplt.figure(figsize=(9,6))\nx,y = meshgrid(x,y)\nplt.contour(x,y,x*y - y**2/6,levels=20);\nplt.fill([1,5,5,1],[2,2,3,3],alpha = .4);\n\n# Draw grid\nM,N = (8,2)\ndx = (5-1)/M\ndy = (3-2)/N\n\n[plt.plot([1 + i*dx,1 + i*dx],[2,3],'k') for i in range(M+1)];\n[plt.plot([1,5],[2 + j*dy,2 + j*dy],'k') for j in range(N+1)];\n",
"_____no_output_____"
]
],
[
[
"For $\\Delta x = (5 - 1)/M$ and $\\Delta y = (3-2)/N$, the Riemann sum is \n\n$$\\sum_{i=0}^{M-1} \\sum_{j=0}^{N-1} \\left( (1+i\\Delta x)(2+j\\Delta y) - (j+\\Delta y)^2/6\\right)\\Delta x\\,\\Delta y$$",
"_____no_output_____"
]
],
[
[
"M,N = (800,200)\n\ndx = (5-1)/M\ndy = (3-2)/N\n\ndef f(x,y):\n return x*y - y**2/6 \n\nsum([f(1 + (i)*dx,2+(j)*dy) for i in range(M) for j in range(N)])*dx*dy",
"_____no_output_____"
]
],
[
[
" b. Evaluate the definite integral in part a. ",
"_____no_output_____"
]
],
[
[
"dblquad(lambda y,x: x*y - y**2/6,1,5,2,3)[0]",
"_____no_output_____"
]
],
[
[
"<p style=\"padding-bottom:40%;\"> </p>",
"_____no_output_____"
],
[
"### Example 2 \n\nSwitch the order of integration of the following iterated integrals.\n\na. \n$$\\int_0^1 \\int_0^x f(x,y)\\,dy\\,dx $$",
"_____no_output_____"
]
],
[
[
"x = linspace(0,1,10) # this divides the interval from 0 to 1 into 10 pieces\nplt.figure(figsize=(6,6))\nplt.plot(x,x);\nplt.fill_between(x,x,alpha=.4);",
"_____no_output_____"
]
],
[
[
"Pick an arbitrary integrand, here $x \\cos(y)$, to test the 2 orders for integration. (Do mind the peculiarities of `dblquad` when dealing with orders of integration.)",
"_____no_output_____"
],
[
"$y$ on the inside: \n\n$$\\int_0^1 \\int_{0}^x x\\cos(y)\\, dy\\,dx $$",
"_____no_output_____"
]
],
[
[
"dblquad(lambda y,x: x*cos(y),0,1,0,lambda x:x)",
"_____no_output_____"
]
],
[
[
"$x$ on the inside: \n\n$$\\int_0^1 \\int_{y}^1 x\\cos(y)\\, dx\\,dy $$",
"_____no_output_____"
]
],
[
[
"dblquad(lambda x,y: x*cos(y),0,1,lambda y:y,1)",
"_____no_output_____"
]
],
[
[
"<p style=\"padding-bottom:40%;\"> </p>",
"_____no_output_____"
],
[
"b.\n$$\\int_0^1 \\int_0^{e^y} g(x,y)\\,dx\\,dy $$",
"_____no_output_____"
]
],
[
[
"y = linspace(0,1,25)\nplt.figure(figsize=(6,6))\nplt.plot(exp(y),y)\nplt.fill_betweenx(y,exp(y),alpha=.4);",
"_____no_output_____"
]
],
[
[
"$y$ on the inside (split region in 2): \n\n$$\\int_0^1 \\int_{0}^1 x\\cos(y)\\, dy\\,dx + \\int_1^e \\int_{\\ln x}^1 x\\cos(y)\\, dy\\,dx $$",
"_____no_output_____"
]
],
[
[
"dblquad(lambda y,x: x*cos(y),0,1,0,1)[0] + dblquad(lambda y,x: x*cos(y),1,exp(1),log,1)[0]",
"_____no_output_____"
]
],
[
[
"$x$ on the inside: \n\n$$\\int_0^1 \\int_{y}^1 x\\cos(y)\\, dx\\,dy $$",
"_____no_output_____"
]
],
[
[
"dblquad(lambda x,y: x*cos(y),0,1,0,exp)",
"_____no_output_____"
]
],
[
[
"<p style=\"padding-bottom:40%;\"> </p>",
"_____no_output_____"
],
[
"### Exercise\n\nLet $\\mathcal D$ be the region in the plane bounded by the curves $y=x+|x|$ and $y=x+1$. Compute $$ \\iint_\\mathcal D y\\,dA$$ as an iterated integral. ",
"_____no_output_____"
]
],
[
[
"x = linspace(-1,1,50)\nplt.figure(figsize=(6,6))\nplt.plot(x,x+1,x,abs(x)+x);\nplt.fill_between(x,x+1,abs(x)+x,alpha=.5);",
"_____no_output_____"
]
],
[
[
"$$\\int_{-1}^0\\int_{0}^{x+1} y \\,dy\\,dx + \\int_{0}^1\\int_{2x}^{x+1} y \\,dy\\,dx $$",
"_____no_output_____"
]
],
[
[
"dblquad(lambda y,x: y, -1,1, lambda x:x + abs(x),lambda x:x+1)",
"_____no_output_____"
]
],
[
[
"or $$\\int_0^2\\int_{y-1}^{y/2} y \\,dx\\,dy $$",
"_____no_output_____"
]
],
[
[
"dblquad(lambda x,y: y, 0,2, lambda y:y-1,lambda y:y/2)",
"_____no_output_____"
]
],
[
[
"<p style=\"padding-bottom:40%;\"> </p>",
"_____no_output_____"
],
[
"### Example\n\nSet up an iterated integral to find the mass of the region between the surface $z=x^2 + y ^2$ and $z = 2x + 1$ if the density is given by $\\rho(x,y,z) = 2z$. ",
"_____no_output_____"
]
],
[
[
"@interact(angle=(-90,90,6),vangle=(0,90,6))\ndef _(angle=24,vangle=30):\n fig = plt.figure(figsize=(10,10))\n ax= fig.add_subplot(111,projection='3d')\n r = np.linspace(0,1,50)\n th = np.linspace(0,2*pi,80)\n r,th = np.meshgrid(r,th)\n X = sqrt(2)*r*cos(th)+1\n Y = sqrt(2)*r*sin(th)\n Z = X**2 + Y**2\n ax.plot_wireframe(X,Y,Z,rcount=20,ccount=20)\n ax.plot_wireframe(X,Y,2*X + 1,rcount=20,ccount=20)\n ax.plot_surface(X,Y,0*r,rcount=20,ccount=20,color='gray',alpha=.4)\n ax.view_init(vangle,angle)\n# axes\n ax.plot([-.5,2.5],[0,0],[0,0],'k')\n ax.plot([0,0],[-1.5,1.5],[0,0],'k')\n ax.plot([0,0],[0,0],[0,5],'k')\n for c in 'xyz':\n# getattr(ax,f\"set_{c}lim\")([-1,1]); \n getattr(ax,f\"set_{c}label\")(f\"${c}$\",size=16)\n flush_figures();",
"_____no_output_____"
]
],
[
[
"<p style=\"padding-bottom:40%;\"> </p>",
"_____no_output_____"
],
[
"Find the boundary of the region (i.e., where the surfaces intersect).\n\n$$z = x^2 + y^2 = 2x+1$$",
"_____no_output_____"
],
[
"$$ (x-1)^2 + y^2 = 2$$",
"_____no_output_____"
],
[
"$$ y = \\pm \\sqrt{2-(x-1)^2}$$",
"_____no_output_____"
],
[
"$$ \\int_{1-\\sqrt{2}}^{1+\\sqrt{2}}\\int_{-\\sqrt{2-(x-1)^2}}^{\\sqrt{2-(x-1)^2}}\\int_{x^2 + y^2}^{2x+1} 2z \\,dzdydx$$",
"_____no_output_____"
]
],
[
[
"from scipy.integrate import tplquad\n# help(tplquad)",
"_____no_output_____"
]
],
[
[
"$$ \\int_{1-\\sqrt{2}}^{1+\\sqrt{2}}\\int_{-\\sqrt{2-(x-1)^2}}^{\\sqrt{2-(x-1)^2}}\\int_{x^2 + y^2}^{2x+1} 2z \\,dzdydx$$",
"_____no_output_____"
]
],
[
[
"tplquad(lambda z,y,x: 2*z,1-sqrt(2),1+sqrt(2),\n lambda x: -sqrt(2 - (x-1)**2),lambda x: sqrt(2 - (x-1)**2),\n lambda x,y: x**2 + y**2,lambda x,y: 2*x + 1)",
"_____no_output_____"
]
],
[
[
"<p style=\"padding-bottom:40%;\"> </p>",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cbbc43cbc8d3478f622d577a7025c79efc625770
| 20,253 |
ipynb
|
Jupyter Notebook
|
recommenderSystemWithPysparck.ipynb
|
AlainKuiete/DATA612ASSINGMENTS
|
c2ba9dd9d0ad4a2dbb4247958104a25d108f5e26
|
[
"BSD-4-Clause-UC"
] | null | null | null |
recommenderSystemWithPysparck.ipynb
|
AlainKuiete/DATA612ASSINGMENTS
|
c2ba9dd9d0ad4a2dbb4247958104a25d108f5e26
|
[
"BSD-4-Clause-UC"
] | null | null | null |
recommenderSystemWithPysparck.ipynb
|
AlainKuiete/DATA612ASSINGMENTS
|
c2ba9dd9d0ad4a2dbb4247958104a25d108f5e26
|
[
"BSD-4-Clause-UC"
] | null | null | null | 24.819853 | 425 | 0.454747 |
[
[
[
"## Recommender System With Pyspark ",
"_____no_output_____"
],
[
"### User Ratings Using Alternative Least Square ",
"_____no_output_____"
],
[
"Import libraries",
"_____no_output_____"
]
],
[
[
"from pyspark.sql import SparkSession",
"_____no_output_____"
],
[
"from pyspark.ml.recommendation import ALS\nfrom pyspark.ml.evaluation import RegressionEvaluator\nfrom pyspark.ml.tuning import TrainValidationSplit, ParamGridBuilder\n#import findspark\n#findspark.init()\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"Import SparkSession from pyspark.sql",
"_____no_output_____"
],
[
"Create an instance of SparkSession",
"_____no_output_____"
]
],
[
[
"spark = SparkSession.builder.appName('recommender').getOrCreate()",
"_____no_output_____"
]
],
[
[
"Print the tables in the catalog",
"_____no_output_____"
]
],
[
[
"print(spark.catalog.listTables())",
"[]\n"
]
],
[
[
"Load the file",
"_____no_output_____"
]
],
[
[
"file_path = \"C:\\\\DATA612\\\\DATA612ASSINGMENTS-master\\\\ml-20m\\\\ratings.csv\"",
"_____no_output_____"
],
[
"file_path_movies = \"C:\\\\DATA612\\\\DATA612ASSINGMENTS-master\\\\ml-20m\\\\movies.csv\"",
"_____no_output_____"
]
],
[
[
"Read the rating data",
"_____no_output_____"
]
],
[
[
"ratings = spark.read.csv(file_path, header=True)",
"_____no_output_____"
],
[
"movies = spark.read.csv(file_path_movies, header=True)",
"_____no_output_____"
]
],
[
[
"Show the ratings data",
"_____no_output_____"
]
],
[
[
"ratings.show()",
"+------+-------+------+----------+\n|userId|movieId|rating| timestamp|\n+------+-------+------+----------+\n| 1| 2| 3.5|1112486027|\n| 1| 29| 3.5|1112484676|\n| 1| 32| 3.5|1112484819|\n| 1| 47| 3.5|1112484727|\n| 1| 50| 3.5|1112484580|\n| 1| 112| 3.5|1094785740|\n| 1| 151| 4.0|1094785734|\n| 1| 223| 4.0|1112485573|\n| 1| 253| 4.0|1112484940|\n| 1| 260| 4.0|1112484826|\n| 1| 293| 4.0|1112484703|\n| 1| 296| 4.0|1112484767|\n| 1| 318| 4.0|1112484798|\n| 1| 337| 3.5|1094785709|\n| 1| 367| 3.5|1112485980|\n| 1| 541| 4.0|1112484603|\n| 1| 589| 3.5|1112485557|\n| 1| 593| 3.5|1112484661|\n| 1| 653| 3.0|1094785691|\n| 1| 919| 3.5|1094785621|\n+------+-------+------+----------+\nonly showing top 20 rows\n\n"
]
],
[
[
"Add ratings to catalog",
"_____no_output_____"
]
],
[
[
"ratings.createOrReplaceTempView(\"ratings\")",
"_____no_output_____"
]
],
[
[
"Look at the type of each column",
"_____no_output_____"
]
],
[
[
"ratings.printSchema()",
"root\n |-- userId: string (nullable = true)\n |-- movieId: string (nullable = true)\n |-- rating: string (nullable = true)\n |-- timestamp: string (nullable = true)\n\n"
]
],
[
[
"Cast the columns to integers",
"_____no_output_____"
]
],
[
[
"ratings = ratings.withColumn(\"userId\", ratings.userId.cast(\"integer\"))\nratings = ratings.withColumn(\"movieId\", ratings.movieId.cast(\"integer\"))\nratings = ratings.withColumn(\"rating\", ratings.rating.cast(\"float\"))",
"_____no_output_____"
],
[
"movies = movies.withColumn(\"movieId\", movies.movieId.cast(\"integer\"))",
"_____no_output_____"
],
[
"ratings.printSchema()",
"root\n |-- userId: integer (nullable = true)\n |-- movieId: integer (nullable = true)\n |-- rating: float (nullable = true)\n |-- timestamp: string (nullable = true)\n\n"
],
[
"movies.printSchema()",
"root\n |-- movieId: integer (nullable = true)\n |-- title: string (nullable = true)\n |-- genres: string (nullable = true)\n\n"
]
],
[
[
"Eliminate the timestamp column in ratings dataframe",
"_____no_output_____"
]
],
[
[
"ratings = ratings.select(['userId', 'movieId', 'rating'])",
"_____no_output_____"
]
],
[
[
"Summirized Statistics on the data ratings",
"_____no_output_____"
]
],
[
[
"ratings.describe().show()",
"+-------+-----------------+------------------+------------------+\n|summary| userId| movieId| rating|\n+-------+-----------------+------------------+------------------+\n| count| 20000263| 20000263| 20000263|\n| mean|69045.87258292554| 9041.567330339605|3.5255285642993797|\n| stddev|40038.62665316201|19789.477445413086| 1.051988919294227|\n| min| 1| 1| 0.5|\n| max| 138493| 131262| 5.0|\n+-------+-----------------+------------------+------------------+\n\n"
]
],
[
[
"Splitting the data into train and test sets",
"_____no_output_____"
]
],
[
[
"training, test = ratings.randomSplit([0.8,0.2])",
"_____no_output_____"
]
],
[
[
"Create ALS model",
"_____no_output_____"
]
],
[
[
"# Build the recommendation model using ALS on the training data\nals = ALS(maxIter=5, regParam=0.01, userCol=\"userId\", itemCol=\"movieId\", ratingCol=\"rating\", coldStartStrategy=\"drop\")\nmodel = als.fit(training)",
"_____no_output_____"
]
],
[
[
"RMSE evaluator",
"_____no_output_____"
]
],
[
[
"evaluator = RegressionEvaluator(metricName='rmse', labelCol='rating', predictionCol=\"prediction\")",
"_____no_output_____"
]
],
[
[
"Bulid the cross validation",
"_____no_output_____"
]
],
[
[
"tvs = TrainValidationSplit(estimator=als, evaluator=evaluator)",
"_____no_output_____"
]
],
[
[
"Fit ALS to the training data",
"_____no_output_____"
]
],
[
[
"model = als.fit(training)",
"_____no_output_____"
]
],
[
[
"Summary statistic of predictions\n",
"_____no_output_____"
],
[
"Generate predictions",
"_____no_output_____"
]
],
[
[
"predictions = model.transform(test)\npredictions.show()",
"+------+-------+------+----------+\n|userId|movieId|rating|prediction|\n+------+-------+------+----------+\n| 53338| 148| 1.0| 2.415784|\n| 19067| 148| 2.0| 1.5705694|\n| 87301| 148| 2.0| 2.8955607|\n| 88527| 148| 2.0| 1.6705151|\n| 92852| 148| 3.0| 2.5834496|\n| 81218| 148| 1.0| 3.7903538|\n| 91782| 148| 3.0| 3.6888664|\n| 60081| 148| 2.0| 2.6538968|\n| 94994| 148| 4.0| 3.2066813|\n| 46380| 148| 4.0| 2.8373895|\n|109121| 148| 4.0| 3.891463|\n| 28361| 148| 4.0| 4.963693|\n| 35498| 148| 3.0| 2.9753017|\n| 61815| 148| 3.0| 4.476904|\n| 4914| 148| 2.0| 2.8983908|\n| 10434| 148| 3.0| 2.9129286|\n| 44926| 148| 2.0| 2.6564178|\n| 3439| 148| 1.0| 4.118425|\n| 18797| 148| 2.0| 2.3620577|\n| 55876| 148| 4.0| 4.3065305|\n+------+-------+------+----------+\nonly showing top 20 rows\n\n"
]
],
[
[
"Evaluate with RMSE",
"_____no_output_____"
]
],
[
[
"rmse = evaluator.evaluate(predictions)",
"_____no_output_____"
]
],
[
[
"Evaluation metric and model parametres",
"_____no_output_____"
]
],
[
[
"evaluator = RegressionEvaluator(metricName='rmse', labelCol='rating')\nrmse = evaluator.evaluate(predictions)\nrmse",
"_____no_output_____"
],
[
"user_recs = model.recommendForAllUsers(10)",
"_____no_output_____"
],
[
"user_recs.show()",
"+------+--------------------+\n|userId| recommendations|\n+------+--------------------+\n| 148|[[84829, 12.42627...|\n| 463|[[66579, 9.799184...|\n| 471|[[98126, 9.141088...|\n| 496|[[73529, 9.036393...|\n| 833|[[66289, 14.06172...|\n| 1088|[[116951, 12.0657...|\n| 1238|[[116951, 7.81076...|\n| 1342|[[59680, 11.36778...|\n| 1580|[[88674, 17.24096...|\n| 1591|[[34466, 13.62567...|\n| 1645|[[84829, 9.214763...|\n| 1829|[[123571, 8.62417...|\n| 1959|[[56779, 9.660917...|\n| 2122|[[116489, 11.5092...|\n| 2142|[[115921, 10.8225...|\n| 2366|[[116951, 14.4172...|\n| 2659|[[128366, 10.2077...|\n| 2866|[[70495, 8.592925...|\n| 3175|[[108768, 11.5466...|\n| 3749|[[70806, 13.35640...|\n+------+--------------------+\nonly showing top 20 rows\n\n"
],
[
"def get_recs_for_user(recs):\n recs = recs.select(\"recommendations.movieId\", \"recommendations.rating\")\n movies = recs.select(\"movieId\").toPandas().iloc[0,0]\n ratings = recs.select(\"rating\").toPandas().iloc[0,0]\n ratings_matrix = pd.DataFrame(movies, columns = [\"movieId\"])\n ratings_matrix[\"ratings\"] = ratings\n ratings_matrix_ps = spark.createDataFrame(ratings_matrix)\n return ratings_matrix_ps",
"_____no_output_____"
]
],
[
[
"Recommendation for user 148",
"_____no_output_____"
]
],
[
[
"user148_recs = get_recs_for_user(user_recs)",
"_____no_output_____"
],
[
"movies = movies.select('movieId', 'title')",
"_____no_output_____"
],
[
"ru = user148_recs.toPandas()\nmr = movies .toPandas()\npd.merge(mr, ru, on = 'movieId').sort_values(by=\"ratings\", ascending=False)\n",
"_____no_output_____"
]
],
[
[
"## Conlusion",
"_____no_output_____"
],
[
"Comparison to other models I developed before. Spark allow me to use all the 20 million ratings instead of the little subset of data i have been manipulated. We alaways say the more the data the better the alagorithm. The RMSE with spark is far aways better than what I have in previous models. But it still is better for impovement, to consider AWS or Microsft Asure to gain skills on using clusters with large data.",
"_____no_output_____"
],
[
"#### References:\n- Spark Documentation https://spark.apache.org/docs/2.1.0/index.html.\n- Jamen Long: Recommendation Engines Using ALS in PySpark (MovieLens Dataset)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
cbbc4d425c96bde8b3d4018c43a32db388a8480d
| 604,886 |
ipynb
|
Jupyter Notebook
|
pennylane/2_Graph_optimization_with_QAOA.ipynb
|
wkcwells/amazon-braket-examples
|
19a11641d77951e6619d7941cd2488242b18e937
|
[
"Apache-2.0"
] | null | null | null |
pennylane/2_Graph_optimization_with_QAOA.ipynb
|
wkcwells/amazon-braket-examples
|
19a11641d77951e6619d7941cd2488242b18e937
|
[
"Apache-2.0"
] | null | null | null |
pennylane/2_Graph_optimization_with_QAOA.ipynb
|
wkcwells/amazon-braket-examples
|
19a11641d77951e6619d7941cd2488242b18e937
|
[
"Apache-2.0"
] | null | null | null | 740.374541 | 144,136 | 0.952618 |
[
[
[
"# Graph optimization with QAOA",
"_____no_output_____"
],
[
"One application area where near-term quantum hardware is expected to shine is in graph optimization. Graph-based problems are interesting to explore because they have both strong links to practical use-cases (such as logistics and social networks) and are also often hard to solve.",
"_____no_output_____"
],
[
"\n\nGraphs are composed of a collection of interconnected nodes. For example, here is a six-node graph:",
"_____no_output_____"
]
],
[
[
"import networkx as nx\n\nn_nodes = 6\np = 0.5 # probability of an edge\nseed = 1967\n\ng = nx.erdos_renyi_graph(n_nodes, p=p, seed=seed)\npositions = nx.spring_layout(g, seed=seed)\n\nnx.draw(g, with_labels=True, pos=positions, node_size=600)",
"_____no_output_____"
]
],
[
[
"Many practical use-cases can be mapped to a graph structure. In a social network, the nodes of a graph can represent users and the edges can represent connections between the users.\n\nWe often need to solve optimization problems to identify important properties of the graph. These problems can include:\n\n- finding large clusters of fully connected nodes (known as [maximum clique](https://en.wikipedia.org/wiki/Clique_problem))\n- finding a minimum number of nodes that connect to every edge in the graph (known as [minimum vertex cover](https://en.wikipedia.org/wiki/Vertex_cover))\n- finding a partition of the nodes into two subsets so that the greatest number of edges are intersected (known as [maximum cut](https://en.wikipedia.org/wiki/Maximum_cut))\n\nThis tutorial shows how a quantum algorithm called QAOA can be run using PennyLane and Braket to solve graph-based optimization problems. We begin with a small 6-node graph and then push the limits to run a 20-node graph using parallel executions on SV1.",
"_____no_output_____"
],
[
"## QAOA",
"_____no_output_____"
],
[
"The quantum approximate optimization algorithm (QAOA) is an algorithm designed for near-term hardware. It can find approximate solutions to combinatorial optimization problems such as graph-based problems.\n\nQAOA is covered in more depth in the [QAOA_braket](../hybrid_quantum_algorithms/QAOA/QAOA_braket.ipynb) notebook as well as in PennyLane [tutorials](https://pennylane.ai/qml/demos/tutorial_qaoa_intro.html). The following is a short summary to refresh the key concepts.\n\n\nQAOA begins by associating the optimization problem with a cost Hamiltonian $H_C$ and choosing a mixer Hamiltonian $H_{M}$. It proceeds by repetitively applying multiple layers of the unitaries $\\exp{(-i \\gamma_i H_C)}$ and $\\exp{(-i \\alpha_i H_M)}$ with controllable parameters $\\gamma_i$ and $\\alpha_i$, as shown in the diagram below.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"The algorithm then measures the cost Hamiltonian $H_C$. By varying the controllable parameters $\\gamma_i$ and $\\alpha_i$, the expectation value of the cost Hamiltonian is minimized. Applying the optimized unitaries prepares a quantum state that contains information about the optimal configuration for the problem. Sampling from the state will give a candidate solution.\n\n<div class=\"alert alert-block alert-info\">\n<b>Summary</b> If you are less familiar with QAOA and quantum algorithms, the key takeaway message is that the algorithm involves an optimization of the controllable parameters $\\gamma_i$ and $\\alpha_i$ that the quantum circuit depends on. This can be tackled naturally using the PennyLane/Braket pipeline.\n</div>",
"_____no_output_____"
],
[
"## Fixing the problem",
"_____no_output_____"
],
[
"Let's consider the graph above and aim to find the maximum clique, i.e., the largest set of nodes that are fully connected.\n\nTo solve this using QAOA in PennyLane and Braket, we first calculate the cost Hamiltonian $H_C$ and corresponding mixer Hamiltonian $H_M$",
"_____no_output_____"
]
],
[
[
"import pennylane as qml\nfrom pennylane import numpy as np\n\nqml.enable_tape() # unlocks the latest features in PennyLane\n\ncost_h, mixer_h = qml.qaoa.max_clique(g, constrained=False)\n# constrained=True results in greater circuit depth but potentially better solutions\n\nprint(\"Cost Hamiltonian:\\n\", cost_h)\nprint(\"Mixer Hamiltonian:\\n\", mixer_h)",
"Cost Hamiltonian:\n (0.75) [Z0 Z1]\n+ (0.25) [Z0]\n+ (-0.5) [Z1]\n+ (0.75) [Z1 Z4]\n+ (0.25) [Z4]\n+ (0.75) [Z2 Z5]\n+ (0.25) [Z2]\n+ (-0.5) [Z5]\n+ (0.75) [Z3 Z5]\n+ (0.25) [Z3]\nMixer Hamiltonian:\n (1) [X0]\n+ (1) [X1]\n+ (1) [X2]\n+ (1) [X3]\n+ (1) [X4]\n+ (1) [X5]\n"
]
],
[
[
"## Setting up the algorithm",
"_____no_output_____"
],
[
"We begin by setting up a single QAOA layer",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"This layer contains the controllable parameters $\\gamma_i$ and $\\alpha_i$.",
"_____no_output_____"
]
],
[
[
"def qaoa_layer(gamma, alpha):\n qml.qaoa.cost_layer(gamma, cost_h)\n qml.qaoa.mixer_layer(alpha, mixer_h)",
"_____no_output_____"
]
],
[
[
"The full QAOA circuit is then given by:",
"_____no_output_____"
]
],
[
[
"n_layers = 4\nwires = n_nodes\n\ndef circuit(params, **kwargs):\n \n for i in range(wires): # Prepare an equal superposition over all qubits\n qml.Hadamard(wires=i)\n \n qml.layer(qaoa_layer, n_layers, params[0], params[1])",
"_____no_output_____"
]
],
[
[
"<div class=\"alert alert-block alert-info\">\n<b>Note</b> We have chosen to use 4 QAOA layers. The choice of depth is a tradeoff between improved solutions (for greater depth) and increasing runtime.\n</div>",
"_____no_output_____"
],
[
"There are overall eight controllable parameters: the first four are for $\\gamma_i$ of the cost Hamiltonian and the second four are for $\\alpha_i$ of the mixer Hamiltonian:",
"_____no_output_____"
]
],
[
[
"np.random.seed(1967)\nparams = np.random.uniform(size=[2, n_layers])\nparams",
"_____no_output_____"
]
],
[
[
"For this part of the tutorial, we will use the local Braket simulator (see the [introduction tutorial](./0_Getting_started.ipynb) for further details):",
"_____no_output_____"
]
],
[
[
"dev = qml.device(\"braket.local.qubit\", wires=wires)",
"_____no_output_____"
]
],
[
[
"The final step is to define the cost function. In QAOA, the output cost function is given by the expectation value of the cost Hamiltonian $H_C$, i.e.,",
"_____no_output_____"
]
],
[
[
"cost_function = qml.ExpvalCost(circuit, cost_h, dev, optimize=True)",
"_____no_output_____"
]
],
[
[
"## Running the algorithm",
"_____no_output_____"
],
[
"Now that we have set up the cost function, we just need to pick an optimizer and run the standard optimization loop.",
"_____no_output_____"
]
],
[
[
"optimizer = qml.GradientDescentOptimizer()",
"_____no_output_____"
],
[
"print(\"Initial cost:\", cost_function(params))\n\nfor i in range(10):\n params = optimizer.step(cost_function, params)\n cost_eval = cost_function(params)\n print(f\"Completed iteration {i + 1}, cost function:\", cost_eval)",
"Initial cost: 0.6516478992954958\nCompleted iteration 1, cost function: -0.668741012754724\nCompleted iteration 2, cost function: -1.715907278390606\nCompleted iteration 3, cost function: -1.950115458213475\nCompleted iteration 4, cost function: -2.0330929514098757\nCompleted iteration 5, cost function: -2.1057100865780667\nCompleted iteration 6, cost function: -2.174831755527574\nCompleted iteration 7, cost function: -2.242888962573279\nCompleted iteration 8, cost function: -2.3112830459267624\nCompleted iteration 9, cost function: -2.3807887017553826\nCompleted iteration 10, cost function: -2.4517855696487607\n"
]
],
[
[
"## Investigating the result",
"_____no_output_____"
],
[
"How do we know how well the algorithm has performed? To do this, we can sample from the circuit using the optimized parameters. This will give us binary samples that allow us to select which nodes of the graph to use as part of our clique, e.g., either by simply selecting the most common sample or selecting the sample with the lowest corresponding energy.\n\nLet's take some samples and see which ones occur most frequently. To start, we'll create a QNode designed for sampling.",
"_____no_output_____"
]
],
[
[
"shots = 100000\ndev = qml.device(\"braket.local.qubit\", wires=wires, shots=shots)\n\[email protected](dev)\ndef samples(params):\n circuit(params)\n return [qml.sample(qml.PauliZ(i)) for i in range(wires)]",
"_____no_output_____"
]
],
[
[
"Samples can now be generated and converted into probabilities:",
"_____no_output_____"
]
],
[
[
"from collections import Counter\n\ns = samples(params).T\ns = (1 - s.numpy()) / 2\ns = map(tuple, s)\n\ncounts = Counter(s)\nindx = np.ndindex(*[2] * wires)\n\nprobs = {p: counts.get(p, 0) / shots for p in indx}",
"_____no_output_____"
]
],
[
[
"We can now plot the probability distribution over all possible samples:",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n\nplt.style.use(\"seaborn\")\nlabels = [\"{0:{fill}6b}\".format(i, fill='0') for i in range(len(probs))]\n\nplt.bar(range(2 ** wires), probs.values())\nplt.xticks([i for i in range(len(probs))], labels, rotation='vertical', size=12)\nplt.yticks(size=12)\n\nplt.xlabel(\"Sample\", size=20)\nplt.ylabel(\"Probability\", size=20)\n\nfig = plt.gcf()\nfig.set_size_inches(16, 8)\nplt.show()",
"_____no_output_____"
]
],
[
[
"From the plot, it is clear that the sample ``101110`` has the greatest probability. Since each qubit corresponds to a node, this sample selects the nodes ``[0, 2, 3, 4]`` to form a subgraph. Let's check if this is a clique, i.e., if all of the nodes are connected:",
"_____no_output_____"
]
],
[
[
"sub = g.subgraph([0, 2, 3, 4])\nnx.draw(g, pos=positions, with_labels=True)\nnx.draw(sub, pos=positions, node_color=\"r\", edge_color=\"r\")",
"_____no_output_____"
]
],
[
[
"Great, this is a clique! Moreover, it is the *largest* clique in this six-node graph. QAOA, using PennyLane and Braket, has helped us to solve the maximum clique problem!",
"_____no_output_____"
],
[
"## Scaling-up QAOA for larger graphs",
"_____no_output_____"
],
[
"We have seen how we can use PennyLane on Braket to solve graph optimization problems with QAOA. However, we have so far restricted to a simple six-node graph and used the local Braket device. Let's now be more ambitious and try to solve an optimization problem on a twenty-node graph!",
"_____no_output_____"
]
],
[
[
"import networkx as nx\n\nnodes = wires = 20\nedges = 60\nseed = 1967\n\ng = nx.gnm_random_graph(nodes, edges, seed=seed)\npositions = nx.spring_layout(g, seed=seed)\n\nnx.draw(g, with_labels=True, pos=positions)",
"_____no_output_____"
]
],
[
[
"A twenty-node graph (which maps to the same number of qubits) definitely puts us in a regime where the local simulator will be slow to execute. As we have discussed in the [parallelization tutorial](./1_Parallelized_optimization_of_quantum_circuits.ipynb), this slowness will be compounded when it comes to training the circuit, with each optimization step resulting in multiple device executions due to calculation of the gradient. Thankfully, the remote SV1 simulator is highly suited to speeding up gradient calculations through paralellization. We now show that this makes training the circuit for QAOA solvable within a reasonable time.\n\nLet's first load a new device:",
"_____no_output_____"
]
],
[
[
"# Please enter the S3 bucket you created during onboarding\n# (or any other S3 bucket starting with 'amazon-braket-' in your account) in the code below\n\nmy_bucket = f\"amazon-braket-Your-Bucket-Name\" # the name of the bucket\nmy_prefix = \"Your-Folder-Name\" # the name of the folder in the bucket\ns3_folder = (my_bucket, my_prefix)\n\ndevice_arn = \"arn:aws:braket:::device/quantum-simulator/amazon/sv1\"",
"_____no_output_____"
],
[
"dev = qml.device(\n \"braket.aws.qubit\",\n device_arn=device_arn,\n wires=wires,\n s3_destination_folder=s3_folder,\n parallel=True,\n max_parallel=20,\n poll_timeout_seconds=30,\n)",
"_____no_output_____"
]
],
[
[
"Note the specification of ``max_parallel=20``. This means that up to ``20`` circuits will be executed in parallel on SV1 (the default value is ``10``).\n\n<div class=\"alert alert-block alert-warning\">\n<b>Caution:</b> Increasing the maximum number of parallel executions can result in a greater rate of spending on simulation fees. The value must also be set bearing in mind your\n service <a href=\"https://docs.aws.amazon.com/braket/latest/developerguide/braket-quotas.html\">quota</a>, which can be found <a href=\"https://console.aws.amazon.com/servicequotas/home\">here</a>.\n</div>\n\nWe now just need to set up the QAOA circuit and optimization problem in the same way as before. However, we will switch to a new optimization problem to keep things interesting: aiming to solve maximum cut, with the objective of partitioning the graph's nodes into two groups so that the greatest number of edges are shared between the groups (see the image below). This problem is NP-hard, so we expect it to be tough as we increase the number of graph nodes.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"cost_h, mixer_h = qml.qaoa.maxcut(g)",
"_____no_output_____"
],
[
"def qaoa_layer(gamma, alpha):\n qml.qaoa.cost_layer(gamma, cost_h)\n qml.qaoa.mixer_layer(alpha, mixer_h)",
"_____no_output_____"
],
[
"n_layers = 2\n\ndef circuit(params, **kwargs):\n \n for i in range(wires): # Prepare an equal superposition over all qubits\n qml.Hadamard(wires=i)\n \n qml.layer(qaoa_layer, n_layers, params[0], params[1])",
"_____no_output_____"
],
[
"np.random.seed(1967)\nparams = 0.01 * np.random.uniform(size=[2, n_layers])",
"_____no_output_____"
],
[
"cost_function = qml.ExpvalCost(circuit, cost_h, dev, optimize=True)",
"_____no_output_____"
]
],
[
[
"A variety of [optimizers](https://pennylane.readthedocs.io/en/stable/introduction/optimizers.html) are available in PennyLane. Let's choose ``AdagradOptimizer``:",
"_____no_output_____"
]
],
[
[
"optimizer = qml.AdagradOptimizer(stepsize=0.1)",
"_____no_output_____"
]
],
[
[
"We're now set up to train the circuit! Note, if you are training this circuit yourself, you may want to increase the number of iterations in the optimization loop and also investigate changing the number of QAOA layers.\n\n<div class=\"alert alert-block alert-warning\">\n<b>Caution:</b> Running the following cell will take a long time and will result in <a href=\"https://aws.amazon.com/braket/pricing/\">usage fees</a> charged to your AWS account. Only uncomment the cell if you are comfortable with the potential wait-time and costs. We recommend monitoring the Billing & Cost Management Dashboard on the AWS console and being aware that jobs involving a large number of qubits can be costly.\n</div>",
"_____no_output_____"
]
],
[
[
"# import time\n\n# iterations = 10\n\n# for i in range(iterations): \n# t0 = time.time()\n \n# params, cost_before = optimizer.step_and_cost(cost_function, params) \n\n# t1 = time.time()\n \n# if i == 0:\n# print(\"Initial cost:\", cost_before)\n# else:\n# print(f\"Cost at step {i}:\", cost_before)\n\n# print(f\"Completed iteration {i + 1}\")\n# print(f\"Time to complete iteration: {t1 - t0} seconds\")\n\n# print(f\"Cost at step {iterations}:\", cost_function(params))\n\n# np.save(\"params.npy\", params)\n# print(\"Parameters saved to params.npy\")",
"_____no_output_____"
]
],
[
[
"<code>\nInitial cost: -29.98570234095951\nCompleted iteration 1\nTime to complete iteration: 93.96246099472046 seconds\nCost at step 1: -27.154071768632154\nCompleted iteration 2\nTime to complete iteration: 84.80994844436646 seconds\nCost at step 2: -29.98726230006233\nCompleted iteration 3\nTime to complete iteration: 83.13504934310913 seconds\nCost at step 3: -29.999163153600062\nCompleted iteration 4\nTime to complete iteration: 85.61391234397888 seconds\nCost at step 4: -30.002158646044307\nCompleted iteration 5\nTime to complete iteration: 86.70688223838806 seconds\nCost at step 5: -30.012058444011906\nCompleted iteration 6\nTime to complete iteration: 83.26341080665588 seconds\nCost at step 6: -30.063709712612443\nCompleted iteration 7\nTime to complete iteration: 85.25566911697388 seconds\nCost at step 7: -30.32522304705352\nCompleted iteration 8\nTime to complete iteration: 83.55433392524719 seconds\nCost at step 8: -31.411030331978186\nCompleted iteration 9\nTime to complete iteration: 84.08745908737183 seconds\nCost at step 9: -33.87153965616938\nCompleted iteration 10\nTime to complete iteration: 87.4032838344574 seconds\nCost at step 10: -36.05424874438809\nParameters saved to params.npy\n</code>",
"_____no_output_____"
],
[
"This example shows us that a 20-qubit QAOA problem can be trained within around 1-2 minutes per iteration by using parallel executions on the Amazon Braket SV1 device to speed up gradient calculations. If this problem were run on the local Braket simulator without parallelization, we would expect for training to take much longer.\n\nPre-optimized parameters for the above 2-layer QAOA circuit after 30 iterations can be loaded with:",
"_____no_output_____"
]
],
[
[
"params_30 = np.load(\"params_30.npy\")",
"_____no_output_____"
]
],
[
[
"<div class=\"alert alert-block alert-info\">\n<b>What's next?</b> See if you can analyze the trained QAOA circuit for the 20-node graph by adapting the earlier analysis. Also, check out the followup tutorial on quantum chemistry.\n</div>",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cbbc5b2cbe169d96dae1a5b11f27751b70f0262a
| 617,713 |
ipynb
|
Jupyter Notebook
|
DSPNP_practical3/DSPNP_practical3.ipynb
|
Davidobot/cl-datasci-pnp-2021
|
99a3180f59d92e00732c13ae3a14386c06dc6bac
|
[
"Apache-2.0"
] | null | null | null |
DSPNP_practical3/DSPNP_practical3.ipynb
|
Davidobot/cl-datasci-pnp-2021
|
99a3180f59d92e00732c13ae3a14386c06dc6bac
|
[
"Apache-2.0"
] | null | null | null |
DSPNP_practical3/DSPNP_practical3.ipynb
|
Davidobot/cl-datasci-pnp-2021
|
99a3180f59d92e00732c13ae3a14386c06dc6bac
|
[
"Apache-2.0"
] | null | null | null | 213.151484 | 112,192 | 0.892537 |
[
[
[
"# Practical Session 3: Ensemble Learning Techniques\n\n*Notebook by Ekaterina Kochmar*",
"_____no_output_____"
],
[
"This practical will address the use of ensemble-based learning techniques. You will be working with the otherwise familiar settings of classification and regression tasks. In this practical, you will use the familiar datasets and will also learn how to generate artificial datasets.\n\n## Learning objectives\n\nIn this practical you will learn about:\n- simple voting classifiers using hard and soft voting strategies\n- bagging and pasting ensembles\n- boosting and early stopping\n- popular ensemble-based learning algorithms including `RandomForests` and `AdaBoost`\n\nIn addition, you will get more practice with `sklearn's` machine learning routines, and you will learn how to implement ensembles both from scratch and using `sklearn's` implementation.\n\n**Bibliography**: Aurelien Geron, *Hands-On Machine Learning with Scikit-Learn and TensorFlow*.",
"_____no_output_____"
],
[
"## Setup",
"_____no_output_____"
],
[
"First of all, let's import a few common modules, ensure `matplotlib` plots are inline and define the parameters for the figures. Feel free to add your own settings for the notebook here:",
"_____no_output_____"
]
],
[
[
"# Common imports\nimport numpy as np\nimport os\n\n# to make this notebook's output stable across runs\nnp.random.seed(42)\n\n# To plot pretty figures\n%matplotlib inline\nimport matplotlib\nimport matplotlib.pyplot as plt\nplt.rcParams['axes.labelsize'] = 14\nplt.rcParams['xtick.labelsize'] = 12\nplt.rcParams['ytick.labelsize'] = 12",
"_____no_output_____"
]
],
[
[
"## Simple voting classifiers: hard and soft voting strategies\n\n[The wisdom of the crowd](https://en.wikipedia.org/wiki/Wisdom_of_the_crowd) is the collective opinion of a group of individuals rather than that of a single expert. It has been shown that if you ask a complex question to thousands of random people and then aggregate their answers, in many cases you are likely to get an answer that is more accurate than an expert's prediction. This effect is nowadays widely used in practice and can be said to provide the basis for crowdsourcing approaches.\n\nBy analogy, if you combine decisions of multiple diverse classifiers, the resulting vote will often be better than a vote of any single predictor. Such combination of classifiers is called _ensemble_. Perhaps somewhat counterintuitively, the aggregated vote of an ensemble often achieves a higher accuracy than the best classifier in the ensemble. This works even if each predictor in the ensemble is a _weak learner_ **(!)** (meaning that it works only slightly better than random guessing). The ensemble can still be a _strong learner_ achieving high accuracy, provided that there are:\n- a sufficient number of weak learners, and\n- their decisions are sufficiently diverse.\n\nHow is this possible? Here is a statistical analogy: suppose you have a slightly biased coin that has a $51\\%$ chance of coming up heads and a $49\\%$ chance of coming up tails. If you toss it $1000$ times, you will get around $510$ heads and $490$ tails, and hence a majority of heads. If you do the math or run simulations, you will find out that the probability of obtaining the majority of heads after $1000$ tosses of this coin approaches $73\\%$. After $10K$ tosses, the probability of having a majority of heads is about $97\\%$. This is explained by the [_law of large numbers_](https://en.wikipedia.org/wiki/Law_of_large_numbers): over a large number of tosses the ratio of heads gets closer to the probability of heads ($51\\%$).",
"_____no_output_____"
]
],
[
[
"import scipy, math\nimport scipy.special\n\ndef probability(p, n, x):\n binom = scipy.special.comb(n, x, exact=True) * math.pow(p, x) * math.pow((1-p), n-x)\n return binom\n\nprob_maj = 0.0\nfor x in range(501, 1001):\n prob_maj += probability(0.51, 1000, x)\n \nprint(prob_maj)",
"0.7260985557305037\n"
]
],
[
[
"The code below simulates $10$ independent series of coin tosses, and then plots the results. You can see in the figure below that as the number of tosses increases, all $10$ series end up close to $51\\%$ and thus are consistently above $50\\%$.",
"_____no_output_____"
]
],
[
[
"np.random.seed(42)\nheads_proba = 0.51\ncoin_tosses = (np.random.rand(10000, 10) < heads_proba).astype(np.int32)\ncumulative_heads_ratio = np.cumsum(coin_tosses, axis=0) / np.arange(1, 10001).reshape(-1, 1)\n\nplt.figure(figsize=(8,3.5))\nplt.plot(cumulative_heads_ratio)\nplt.plot([0, 10000], [0.51, 0.51], \"k--\", linewidth=2, label=\"51%\")\nplt.plot([0, 10000], [0.5, 0.5], \"k-\", label=\"50%\")\nplt.xlabel(\"Number of coin tosses\")\nplt.ylabel(\"Heads ratio\")\nplt.legend(loc=\"lower right\")\nplt.axis([0, 10000, 0.42, 0.58])\nplt.show()",
"_____no_output_____"
]
],
[
[
"Similarly, if you combine $1000$ classifiers, each of which is individually correct only $51\\%$ of the time, and then predict the majority voted class, you can get up to $73\\%$ accuracy.\n\nFor this to work, the classifiers need to be independent and sufficiently diverse, i.e. they need to make **independent, uncorrelated errors**. In practice, this is hard to achieve because the classifiers are usually all trained on the same data.\n\nLet's apply voting strategy to some data: this time, we'll be working with a synthetic (i.e., artificially generated) [`moons` dataset](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html), that includes two interleaving half circles and provides a good 'toy' example for testing out classification strategies.\n\nLet's first generate the data points and visualise them on a plot:",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import make_moons\nX, y = make_moons(n_samples=500, noise=0.30, random_state=42)\n\ndef plot_dataset(X, y, axes):\n plt.plot(X[:, 0][y==0], X[:, 1][y==0], \"bs\")\n plt.plot(X[:, 0][y==1], X[:, 1][y==1], \"g^\")\n plt.axis(axes)\n plt.grid(True, which='both')\n plt.xlabel(r\"$x_1$\", fontsize=20)\n plt.ylabel(r\"$x_2$\", fontsize=20, rotation=0)\n\nplot_dataset(X, y, [-1.5, 2.5, -1, 1.5])\nplt.show()",
"_____no_output_____"
]
],
[
[
"Next, let's split the dataset into training and test sets and train some classifiers on the training data:",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)",
"_____no_output_____"
]
],
[
[
"The code below shows how to apply several classifiers to this data, with a couple of classifiers that we haven't used before in this course: [`RandomForestClassifier`](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) and [`Support Vector Machines`](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html) (`SVC`) classifier. For the moment, don't worry about the particular details of the classifiers' implementation (if you are interested in learning more about them, check out `sklearn's` documentation; we'll discuss `RandomForestClassifier` in this practical; `Support Vector Machines` is covered in detail in the Part II [Machine Learning and Bayesian Inference](https://www.cl.cam.ac.uk/teaching/2021/MLBayInfer/) course). The point here is that we are combining *diverse* classifiers in an ensemble: in essence, `RandomForestClassifier` runs recursive partitioning on the data, while `Support Vector Machines` try to construct a linear boundary between the classes (as discussed in Lecture 3). Feel free to experiment with your own selection of classifiers from the [`sklearn's`](https://scikit-learn.org/stable/supervised_learning.html#supervised-learning) suite.",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import VotingClassifier\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\n\nlog_clf = LogisticRegression(random_state=42)\nrnd_clf = RandomForestClassifier(random_state=42)\nsvm_clf = SVC(random_state=42)\n\nvoting_clf = VotingClassifier(\n estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],\n voting='hard')\nvoting_clf.fit(X_train, y_train)",
"_____no_output_____"
]
],
[
[
"Let's look into how correlated the predictions of the classifiers are: first, get the predictions on the test data, next store them as a `pandas` DataFrame, and finally apply `.corr()` function:",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\ndef get_predictions(clf):\n clf.fit(X_train, y_train)\n return clf.predict(X_test)\n\n\npreds = {'lr': get_predictions(log_clf), \n 'rf': get_predictions(rnd_clf), \n 'svc': get_predictions(svm_clf)}\ndf = pd.DataFrame(data=preds)\ndf[:100]",
"_____no_output_____"
],
[
"df.corr()",
"_____no_output_____"
]
],
[
[
"Will this correlation in the individual classifiers' predictions be sufficient for an ensemble? Let's check this out by combining the votes with a *hard voting strategy*: the ensemble classifier will simply choose the majority class predicted by the three classifiers.\n\nThe code below prints out individual classifiers' accuracy scores along with the ensemble's accuracy score:",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import accuracy_score\n\nfor clf in (log_clf, rnd_clf, svm_clf, voting_clf):\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n print(clf.__class__.__name__, accuracy_score(y_test, y_pred))",
"LogisticRegression 0.864\nRandomForestClassifier 0.896\nSVC 0.896\nVotingClassifier 0.912\n"
]
],
[
[
"As you can see, the voting classifier slightly outperforms each of the three individual classifiers, including the most accurate one(s). In addition, if individual classifiers can estimate class probabilities (i.e., if they have `predict_proba()` method), you can use _soft voting_ strategy, that is, estimate highest class probability averaged over individual classifiers. In comparison to the hard voting strategy, soft voting gives more weight to highly confident votes.\n\n`SVC` classifier doesn't estimate class probabilities by default, so you need to set the `probability` hyperparameter to `True`.",
"_____no_output_____"
]
],
[
[
"log_clf = LogisticRegression(random_state=42)\nrnd_clf = RandomForestClassifier(random_state=42)\nsvm_clf = SVC(probability=True, random_state=42)\n\nvoting_clf = VotingClassifier(\n estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],\n voting='soft')\nvoting_clf.fit(X_train, y_train)",
"_____no_output_____"
]
],
[
[
"Not let's estimate the accuracy of the voting classifier in this mode:",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import accuracy_score\n\nfor clf in (log_clf, rnd_clf, svm_clf, voting_clf):\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n print(clf.__class__.__name__, accuracy_score(y_test, y_pred))",
"LogisticRegression 0.864\nRandomForestClassifier 0.896\nSVC 0.896\nVotingClassifier 0.92\n"
]
],
[
[
"## Bagging ensembles: bagging and pasting\n\nOne way to ensure that the classifiers are sufficiently diverse in their predicitons is to select different training algorithms. As we said before, often it is hard to ensure that the classifiers' errors are not highly correlated because the classifiers are ultimately trained on the same data.\n\nSo, another way to make sure you end up with diverse classifiers is to train them on different random subsets of the training set. There are two ways to do that:\n- sampling _with replacement_ is called [_bagging_](https://link.springer.com/article/10.1023/A:1018054314350) – short for _bootstrap aggregating_\n- sampling _without replacement_ is called [_pasting_](https://link.springer.com/article/10.1023/A:1007563306331)\n\nBoth bagging and pasting allow training instances to be sampled several times across multiple predictors, but only bagging allows training instances to be sampled several times for the same predictor.\n\nWhen applied to a new, test instance the ensemble classifier aggregates the predictions of all predictors and estimates the statistical mode (i.e., most frequent prediction).\n\n**Question**: Is this similar to the hard or the soft voting strategy?\n\n**Ans**: This is similar to the _hard_ voting strategy, as it just picks the most frequest predicition.\n\nLet's use `sklearn's BaggingClassifier` to train an ensemble of $500$ [`Decision Trees`](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html), each trained on $100$ training instances randomly selected from the training set with replacement (for bagging, but you can change the strategy to pasting by setting `bootstrap` to `False`). In addition, both bagging and pasting strategies are good for parallelisation: predictors can all be trained in parallel on different CPU cores or even different servers. Setting `n_jobs` parameter to $-1$ tells `sklearn` to use all available CPU cores:",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import BaggingClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\n#use bootstrap=False for pasting\nbag_clf = BaggingClassifier(\n DecisionTreeClassifier(random_state=42), n_estimators=500,\n max_samples=100, bootstrap=True, n_jobs=-1, random_state=42)\n#n_jobs = use all of the available CPU cores\nbag_clf.fit(X_train, y_train)\ny_pred = bag_clf.predict(X_test)",
"_____no_output_____"
]
],
[
[
"**Question**: [`BaggingClassifier`](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingClassifier.html) automatically performs soft voting when the base classifier can estimate class probabilities (i.e., has a `predict_proba()` method). Which strategy – hard or soft voting – does `BaggingClassifier` follow with the `DecisionTreeClassifier` as the base classifier?\n\n**Ans**: The `DecisionTreeClassifier` has a `predict_proba()` with default settings. Hence, the `BaggingClassifier` will use soft voting.\n\nLet's calculate the accuracy score for the bagging classifier that combines $500$ Decision Tree estimators with a prediction of a single Decision Tree trained on the same data:",
"_____no_output_____"
]
],
[
[
"print(accuracy_score(y_test, y_pred))",
"0.904\n"
],
[
"tree_clf = DecisionTreeClassifier(random_state=42)\ntree_clf.fit(X_train, y_train)\ny_pred_tree = tree_clf.predict(X_test)\nprint(accuracy_score(y_test, y_pred_tree))",
"0.856\n"
]
],
[
[
"To get more insight into the results, let's also plot the decision boundary for the two classifiers:",
"_____no_output_____"
]
],
[
[
"from matplotlib.colors import ListedColormap\n\ndef plot_decision_boundary(clf, X, y, axes=[-1.5, 2.5, -1, 1.5], alpha=0.5, contour=True):\n x1s = np.linspace(axes[0], axes[1], 100)\n x2s = np.linspace(axes[2], axes[3], 100)\n x1, x2 = np.meshgrid(x1s, x2s)\n X_new = np.c_[x1.ravel(), x2.ravel()]\n y_pred = clf.predict(X_new).reshape(x1.shape)\n custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])\n plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap)\n if contour:\n custom_cmap2 = ListedColormap(['#7d7d58','#4c4c7f','#507d50'])\n plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8)\n plt.plot(X[:, 0][y==0], X[:, 1][y==0], \"yo\", alpha=alpha)\n plt.plot(X[:, 0][y==1], X[:, 1][y==1], \"bs\", alpha=alpha)\n plt.axis(axes)\n plt.xlabel(r\"$x_1$\", fontsize=18)\n plt.ylabel(r\"$x_2$\", fontsize=18, rotation=0)\n \nplt.figure(figsize=(11,4))\nplt.subplot(121)\nplot_decision_boundary(tree_clf, X, y)\nplt.title(\"Decision Tree\", fontsize=14)\nplt.subplot(122)\nplot_decision_boundary(bag_clf, X, y)\nplt.title(\"Decision Trees with Bagging\", fontsize=14)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Even though the ensemble makes a comparable number of errors on the training set as a single Decision Tree, its decision boundary is less irregular. This suggests that ensemble's predictions will likely generalise better than the single `Decision Tree`'s predictions\nwhen applied to the new data.",
"_____no_output_____"
],
[
"### Out-of-Bag evaluation\n\nWith bagging, some of the instances get sampled several times for any given predictor, while others may never be selected at all. By default, `BaggingClassifier` samples $m$ training instances with replacement, where $m$ is the size of the training set. As $m$ grows, the ratio of the training instances that are sampled on average for each predictor approaches $1 - exp(-1)$, i.e. around $63\\%$. The remaining $37\\%$ of the training instances that are never used by the predictors are called *out-of-bag* (*oob*) instances **(!)**.\n\nMathematical proof of the above: With $m$ training instances in the set, the probability of not picking an instance in a random draw is $\\frac{m-1}{m}$. For sampling with replacement, the probability of not picking an instance in $m$ random draws is $(\\frac{m-1}{m})^m$. In the limit of a large number of $m$, this becomes:\n\n\\begin{equation}\n\\lim_{m\\to\\infty} (1 - \\frac{1}{m})^m = e^{-1} = 0.368 \\approx 37\\%\n\\end{equation}\n\nSince a predictor never sees these instances during training, they can be used for evaluation without the need for an additional validation set or cross-validation experiments. You can evaluate the ensemble itself by averaging each predictor's performance on the oob instances.\n\nThe code below shows you how to do that: if you set `oob_score` to `True`, the bagging classifier will be evaluated on the oob instances after training, and you can output the score as `oob_score_`.",
"_____no_output_____"
]
],
[
[
"bag_clf = BaggingClassifier(\n DecisionTreeClassifier(random_state=42), n_estimators=500,\n bootstrap=True, n_jobs=-1, oob_score=True, random_state=40)\nbag_clf.fit(X_train, y_train)\nbag_clf.oob_score_",
"_____no_output_____"
]
],
[
[
"This means that the bagging classifier is likely to achieve around $90\\%$ accuracy on the test data, too. Let's check the results:",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import accuracy_score\ny_pred = bag_clf.predict(X_test)\naccuracy_score(y_test, y_pred)",
"_____no_output_____"
]
],
[
[
"Close enough! You can also take a look into the decision process by printing out the predictions of the classifier on the training instances. Since the base estimator has a `predict_proba()` method, what you see here are the class probabilities assigned by the classifier to negative and positive classes for each instance:",
"_____no_output_____"
]
],
[
[
"bag_clf.oob_decision_function_",
"_____no_output_____"
]
],
[
[
"## Random Forests\n\n[`RandomForestClassifier`](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) is an ensemble of Decision Trees typically trained via the bagging method. The number of training instances (`max_samples`) is usually set to the total size of the training set. So, in fact, `RandomForestClassifier` will be roughly equivalent to the `BaggingClassifier` that takes Decision Trees as base estimators with the following parameters:",
"_____no_output_____"
]
],
[
[
"bag_clf = BaggingClassifier(\n DecisionTreeClassifier(splitter=\"random\", max_leaf_nodes=16, random_state=42),\n n_estimators=500, max_samples=1.0, bootstrap=True, n_jobs=-1, random_state=42)\n\nbag_clf.fit(X_train, y_train)\ny_pred = bag_clf.predict(X_test)",
"_____no_output_____"
]
],
[
[
"The key difference is that, rather than using `BaggingClassifier` and passing it a `DecisionTreeClassifier`, you can rely on the `sklearn's RandomForestClassifier` implementation, which is more convenient and optimised for Decision Trees:",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestClassifier\n\nrnd_clf = RandomForestClassifier(n_estimators=500, max_leaf_nodes=16, \n n_jobs=-1, random_state=42)\nrnd_clf.fit(X_train, y_train)\n\ny_pred_rf = rnd_clf.predict(X_test)",
"_____no_output_____"
]
],
[
[
"Let's estimate the difference between the two classifiers' predictions:",
"_____no_output_____"
]
],
[
[
"np.sum(y_pred == y_pred_rf) / len(y_pred) # see to what extent predictions are identical",
"_____no_output_____"
]
],
[
[
"I.e., the two classifiers are almost identical in their predictions. The Random Forest algorithm introduces extra randomness when growing trees: instead of searching for the very best feature when splitting a node, it searches for the best feature among a random subset of features. As a result, the model is more diverse and in general yields better results.\n\nLet's visualise the decision boundary for a random set of $15$ Decision Trees to get a better idea of where the diversity comes from:",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(6, 4))\n\nfor i in range(15):\n tree_clf = DecisionTreeClassifier(max_leaf_nodes=16, random_state=42 + i)\n indices_with_replacement = np.random.randint(0, len(X_train), len(X_train))\n tree_clf.fit(X[indices_with_replacement], y[indices_with_replacement])\n plot_decision_boundary(tree_clf, X, y, axes=[-1.5, 2.5, -1, 1.5], \n alpha=0.02, contour=False)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"**Extra material**: One further way to introduce randomness into the training process is to not only select a random subset of features when splitting a node, but also use a random threshold for each feature rather than searching for the best possible thresholds as traditional Decision Trees do. Since estimating best possible threshold for each feature at every node is also one of the most time-consuming tasks, this modification to the algorithm makes it more efficient. The algorithm that exploits this idea is called *Extremely Randomised Trees ensemble*, or *Extra-Trees*, and you can use it via [`sklearn's ExtraTreeClassifier`](https://scikit-learn.org/stable/modules/generated/sklearn.tree.ExtraTreeClassifier.html) implementation.",
"_____no_output_____"
],
[
"### Feature importance\n\nAnother useful property of the Random Forests classifier is that it can help you measure the relative importance of each feature by looking at how much the tree nodes that use this particular feature reduce impurity of the nodes on average (i.e., across all trees in the forest). The results are scaled so that the sum of all feature importances is equal to $1$.\n\nBecause features used in the *iris* and *digits* datasets are highly interpretable, let's estimate feature importances on these two datasets: ",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import load_iris\niris = load_iris()\nrnd_clf = RandomForestClassifier(n_estimators=500, n_jobs=-1, random_state=42)\nrnd_clf.fit(iris[\"data\"], iris[\"target\"])\nfor name, score in zip(iris[\"feature_names\"], rnd_clf.feature_importances_):\n print(name, score)",
"sepal length (cm) 0.11249225099876375\nsepal width (cm) 0.02311928828251033\npetal length (cm) 0.4410304643639577\npetal width (cm) 0.4233579963547682\n"
],
[
"rnd_clf.feature_importances_",
"_____no_output_____"
]
],
[
[
"**Question**: What do these results suggest? Are they similar to your observations from the previous experiments on this dataset?\n\n**Ans**: These results suggest that petal length and width are more important to distinguishing between different types of irises than sepal proportions. This is backed up by results from the second practical which showed that petal length and width are easily linearly separable but versicolor and virginica sepal dimensions are pretty mixed-together.\n\n*Digits* dataset provides further interpretability of the results if we plot them. What does the figure below suggest?",
"_____no_output_____"
]
],
[
[
"from sklearn import datasets\ndigits = datasets.load_digits()\n\nrnd_clf = RandomForestClassifier(random_state=42)\nrnd_clf.fit(digits[\"data\"], digits[\"target\"])",
"_____no_output_____"
],
[
"def plot_digit(data):\n image = data.reshape(8, 8)\n plt.imshow(image, cmap = matplotlib.cm.hot,\n interpolation=\"nearest\")\n plt.axis(\"off\")\n \nplot_digit(rnd_clf.feature_importances_)\n\ncbar = plt.colorbar(ticks=[rnd_clf.feature_importances_.min(), rnd_clf.feature_importances_.max()])\ncbar.ax.set_yticklabels(['Not important', 'Very important'])\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## AdaBoost\n\n*Boosting*, or *hypothesis boosting* is an ensemble method that can combine several weak learners into a strong learner. The general idea is to train predictors sequentially in such a way that each next predictor tries to correct its predecessor. The most popular among boosting methods are *AdaBoost* (which stands for *Adaptive Boosting*) and *Gradient Boosting*. Let's apply *AdaBoost* first.\n\nThe idea behind *AdaBoost* is as follows: in order to correct the predecessor's mistakes, the algorithm assigns higher weights to the training instances that the predecessor underfitted. I.e., the next step pays more attention to such instances and the algorithm focuses more on the hard cases.\n\nHere is the step-by-step strategy:\n- start with the first classifier (i.e., this can be a Decision Trees classifier)\n- train it and use it to make predictions on the training set\n- increase the relative weight of misclassified training instances\n- train a second classifier with these new updated weights and make new predictions\n- update weights using the new predictions\n- continue until stopping criteria are satisfied.\n\nFor instance, suppose each instance's original weight $w^{(i)}$ is set to $\\frac{1}{m}$, where $m$ is the number of instances. The first classifier is applied, and its error rate $r_1$ is computed on the training set using the equation below:\n\n\\begin{equation}\nr_j = \\frac{\\sum_{\\hat{y}^{(i)}_j \\neq y^{(i)}} w^{(i)}}{\\sum_{i=1}^m w^{(i)}}\n\\end{equation}\n\nwhere $\\hat{y}^{(i)}_j$ is the prediction of the $j$-th classifier on the $i$-th instance. The predictor's weight $\\alpha_j$ is then estimated using:\n\n\\begin{equation}\n\\alpha_j = \\eta log \\frac{1-r_j}{r_j}\n\\end{equation}\n\nwhere $\\eta$ is the learning rate, a hyperparameter that defaults to $1$. The more accurate the predictor is, the higher its weight will be; if a predictor is guessing randomly, its weight will be close to $0$; and if it performs worse than random guessing it will get a high negative weight.\n\nNext, the weights are updated and the misclassified instances are boosted as follows:\n\n\\begin{equation}\n w^{(i)}=\\begin{cases}\n w^{(i)}, & \\text{if $\\hat y_j^{(i)} = y_j^{(i)}$}\\\\\n w^{(i)} exp(\\alpha_j), & \\text{if $\\hat y_j^{(i)} \\neq y_j^{(i)}$}\n \\end{cases}\n\\end{equation}\n\nThen all the instances weights are normalised (i.e., divided by $\\sum_{i=1}^m w^{(i)}$). The new predictor is trained on the updated training instances, applied to the training set, its weight is computed, weights are updated again, and so on. The algorithm stops when either the predefined number of predictors is reached or a perfect predictor is found.\n\n(This process should remind you about other sequential learning techniques we've discussed, e.g. Gradient Descent. The difference is, instead of tweaking a single predictor's parameters to minimise a cost function, AdaBoost adds predictors to the ensemble gradually making the prediction better.)\n\n**Question**: We said earlier than one of the advantages of bagging and pasting strategies is that predictors can all be trained in parallel. Is the same applicable to AdaBoost?\n\n**Ans**: No, the predictors need to be trained sequentially.\n\nAt prediction time, AdaBoost computes the predictions of all the predictors and weighs them according to $\\alpha_j$. The predicted class is then the one that receives the majority of weighted votes, i.e.:\n\n\\begin{equation}\n \\hat y(x) = argmax_{k} \\sum_{j=1; \\hat y_j(x)=k} ^{N} \\alpha_j\n\\end{equation}\n\nwith $N$ being the number of predictors.\n\n`sklearn's` implementation of AdaBoost uses *Stagewise Additive Modeling using a Multiclass Exponential loss function* (or *SAMME* for short) multiclass version of the algorithm, which in a binary case simply defaults to AdaBoost. In addition, if the base estimators can estimate class probabilities (have `predict_proba()` method), `sklearn's` algorithm relies on class probabilities rather than predictions, which generally performs better. This version is called *SAMME.R*, where *R* stands for \"Real\".\n\nLet's train [`sklearn's AdaBoostClassifier`](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostClassifier.html), for example using $200$ *Decision Stumps* (trees composed of a single decision node and two leaf nodes, `max_depth=1`):",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import AdaBoostClassifier\n\nada_clf = AdaBoostClassifier(\n DecisionTreeClassifier(max_depth=1), n_estimators=200,\n algorithm=\"SAMME.R\", learning_rate=0.5, random_state=42)\nada_clf.fit(X_train, y_train)",
"_____no_output_____"
]
],
[
[
"And let's plot the decision boundary of the algorithm:",
"_____no_output_____"
]
],
[
[
"plot_decision_boundary(ada_clf, X, y)",
"_____no_output_____"
]
],
[
[
"To get a bit more insight into how the decision boundaries change from one step to another and with a different learning rate, let's plot decision boundaries for $5$ consecutive predictors, in this case using `SVC` as the estimator. Note how the first classifiers usually get many instances wrong, while the following predictors are gradually getting better. The plot on the right presents the very same $5$ consecutive classifiers, but assigns half the learning rate (i.e., the misclassified instances weights are only boosted half as much at every iteration):",
"_____no_output_____"
]
],
[
[
"m = len(X_train)\n\nplt.figure(figsize=(11, 4))\nfor subplot, learning_rate in ((121, 1), (122, 0.5)):\n sample_weights = np.ones(m)\n plt.subplot(subplot)\n for i in range(5):\n svm_clf = SVC(kernel=\"rbf\", C=0.05, gamma=\"auto\", random_state=42)\n svm_clf.fit(X_train, y_train, sample_weight=sample_weights)\n y_pred = svm_clf.predict(X_train)\n sample_weights[y_pred != y_train] *= (1 + learning_rate)\n plot_decision_boundary(svm_clf, X, y, alpha=0.2)\n plt.title(\"learning_rate = {}\".format(learning_rate), fontsize=16)\n if subplot == 121:\n plt.text(1.70, -0.90, \"1\", fontsize=14)\n plt.text(-0.40, -0.35, \"2\", fontsize=14)\n plt.text(-0.55, -0.05, \"3\", fontsize=14)\n plt.text(-0.70, 0.20, \"4\", fontsize=14)\n plt.text(-0.85, 0.45, \"5\", fontsize=14)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"In addition, here is the full list of parameters and attributes of the algorithm:",
"_____no_output_____"
]
],
[
[
"list(m for m in dir(ada_clf) if not m.startswith(\"_\") and m.endswith(\"_\"))",
"_____no_output_____"
]
],
[
[
"E.g., you can check classification errors for each estimator in the boosted ensemble as follows:",
"_____no_output_____"
]
],
[
[
"ada_clf.estimator_errors_",
"_____no_output_____"
]
],
[
[
"## Gradient Boosting\n\nGradient Boosting is another popular boosting algorithm. It also works sequentially adding new predictors to the ensemble, each one correcting the errors from its predecessor. Unlike AdaBoost, it's trying to fit each new predictor to the *residual errors* made by the previous predictor. \n\nLet's this time apply the boosting algorithm to a regression task. First, generate a synthetic noisy quadratic training set:",
"_____no_output_____"
]
],
[
[
"np.random.seed(42)\nX = np.random.rand(100, 1) - 0.5\ny = 3*X[:, 0]**2 + 0.05 * np.random.randn(100)",
"_____no_output_____"
]
],
[
[
"Next, let's fit a single [`DecisionTreeRegressor`](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html):",
"_____no_output_____"
]
],
[
[
"from sklearn.tree import DecisionTreeRegressor\n\ntree_reg1 = DecisionTreeRegressor(max_depth=2, random_state=42)\ntree_reg1.fit(X, y)",
"_____no_output_____"
]
],
[
[
"Now train a second `DecisionTreeRegressor` on the residual errors made by the first predictor:",
"_____no_output_____"
]
],
[
[
"y2 = y - tree_reg1.predict(X)\ntree_reg2 = DecisionTreeRegressor(max_depth=2, random_state=42)\ntree_reg2.fit(X, y2)",
"_____no_output_____"
]
],
[
[
"Finally, let's train a third regressor on the residual errors made by the second predictor:",
"_____no_output_____"
]
],
[
[
"y3 = y2 - tree_reg2.predict(X)\ntree_reg3 = DecisionTreeRegressor(max_depth=2, random_state=42)\ntree_reg3.fit(X, y3)",
"_____no_output_____"
]
],
[
[
"That's it – you've just built an ensemble containing three trees. It can now be applied to new instances by making predictions based on **adding up the predictions** from all three trees:",
"_____no_output_____"
]
],
[
[
"X_new = np.array([[0.8]])\n\ny_pred = sum(tree.predict(X_new) for tree in (tree_reg1, tree_reg2, tree_reg3))\n\ny_pred",
"_____no_output_____"
]
],
[
[
"Let's visualise the results by plotting the predictions from each tree on the left and the ensemble on the right. E.g., in the first row the ensemble contains only one tree, so its predictions are exactly the same as the first tree's predictions. In the second row, the second tree is trained on the residual errors from the first tree, and the ensemble's prediction is based on the sum of the predictions of the two trees. You can see how the predictions of the ensemble are gradually getting better:",
"_____no_output_____"
]
],
[
[
"def plot_predictions(regressors, X, y, axes, label=None, style=\"r-\", data_style=\"b.\", data_label=None):\n x1 = np.linspace(axes[0], axes[1], 500)\n y_pred = sum(regressor.predict(x1.reshape(-1, 1)) for regressor in regressors)\n plt.plot(X[:, 0], y, data_style, label=data_label)\n plt.plot(x1, y_pred, style, linewidth=2, label=label)\n if label or data_label:\n plt.legend(loc=\"upper center\", fontsize=16)\n plt.axis(axes)\n\nplt.figure(figsize=(11,11))\n\nplt.subplot(321)\nplot_predictions([tree_reg1], X, y, axes=[-0.5, 0.5, -0.1, 0.8], \n label=\"$h_1(x_1)$\", style=\"g-\", data_label=\"Training set\")\nplt.ylabel(\"$y$\", fontsize=16, rotation=0)\nplt.title(\"Residuals and tree predictions\", fontsize=16)\n\nplt.subplot(322)\nplot_predictions([tree_reg1], X, y, axes=[-0.5, 0.5, -0.1, 0.8], \n label=\"$h(x_1) = h_1(x_1)$\", data_label=\"Training set\")\nplt.ylabel(\"$y$\", fontsize=16, rotation=0)\nplt.title(\"Ensemble predictions\", fontsize=16)\n\nplt.subplot(323)\nplot_predictions([tree_reg2], X, y2, axes=[-0.5, 0.5, -0.5, 0.5], \n label=\"$h_2(x_1)$\", style=\"g-\", data_style=\"k+\", data_label=\"Residuals\")\nplt.ylabel(\"$y - h_1(x_1)$\", fontsize=16)\n\nplt.subplot(324)\nplot_predictions([tree_reg1, tree_reg2], X, y, axes=[-0.5, 0.5, -0.1, 0.8], \n label=\"$h(x_1) = h_1(x_1) + h_2(x_1)$\")\nplt.ylabel(\"$y$\", fontsize=16, rotation=0)\n\nplt.subplot(325)\nplot_predictions([tree_reg3], X, y3, axes=[-0.5, 0.5, -0.5, 0.5], \n label=\"$h_3(x_1)$\", style=\"g-\", data_style=\"k+\")\nplt.ylabel(\"$y - h_1(x_1) - h_2(x_1)$\", fontsize=16)\nplt.xlabel(\"$x_1$\", fontsize=16)\n\nplt.subplot(326)\nplot_predictions([tree_reg1, tree_reg2, tree_reg3], X, y, axes=[-0.5, 0.5, -0.1, 0.8], \n label=\"$h(x_1) = h_1(x_1) + h_2(x_1) + h_3(x_1)$\")\nplt.xlabel(\"$x_1$\", fontsize=16)\nplt.ylabel(\"$y$\", fontsize=16, rotation=0)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"A simpler way to train ensembles of *Gradient Boosted Regression Trees* is to use [`sklearn's GradientBoostingRegressor`](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html) implementation. In addition to the typical hyperparameters that control the growth of the trees, it also includes hyperparameters that control the ensemble training, e.g. `n_estimators`:",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import GradientBoostingRegressor\n\ngbrt = GradientBoostingRegressor(max_depth=2, n_estimators=3, learning_rate=1.0, random_state=42)\ngbrt.fit(X, y)",
"_____no_output_____"
]
],
[
[
"**Question**: Another hyperparameter is `learning_rate`. What is it responsible for and what effect on the resulting ensemble does it have? \n\n**Ans**: *learning rate shrinks the contribution of each tree by learning_rate. There is a trade-off between learning_rate and n_estimators.*\n\nFeel free to check `sklearn's` documentation, as well as look into the comparison of two ensembles: the one with a lower number of estimators and a higher learning rate, and a 'slow learner' with a lower learning rate but a higher number of estimators:",
"_____no_output_____"
]
],
[
[
"gbrt_slow = GradientBoostingRegressor(max_depth=2, n_estimators=200, learning_rate=0.1, random_state=42)\ngbrt_slow.fit(X, y)",
"_____no_output_____"
],
[
"plt.figure(figsize=(11,4))\n\nplt.subplot(121)\nplot_predictions([gbrt], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label=\"Ensemble predictions\")\nplt.title(\"learning_rate={}, n_estimators={}\".format(gbrt.learning_rate, gbrt.n_estimators), fontsize=14)\n\nplt.subplot(122)\nplot_predictions([gbrt_slow], X, y, axes=[-0.5, 0.5, -0.1, 0.8])\nplt.title(\"learning_rate={}, n_estimators={}\".format(gbrt_slow.learning_rate, gbrt_slow.n_estimators), fontsize=14)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"**Question**: What generalisation behaviour can you expect from each of these ensembles?\n\n**Ans**: since the slow learner is more wriggly, I would bet that the higher learning rate estimator would fare better. 200 estimators is enough to fit to nearly all points exactly. It heavily depends on the kind of data we're working with though, i.e. the intrinsic properties of the data\n\n\n### Gradient Boosting with Early Stopping\n\nThe examples above demonstrate ensembles with a different number of estimators. Which one is better? How can you quantitatively estimate the optimal number of predictors?\n\nOne technique that you can use is called *early stopping* – it allows your algorithm to stop as soon as the validation error reaches a minimum. An easy way to implement this with `sklearn` is to use `staged_predict()` method as the code below demonstrates:",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\n\nX_train, X_val, y_train, y_val = train_test_split(X, y, random_state=100)\n\ngbrt = GradientBoostingRegressor(max_depth=2, n_estimators=120, random_state=42)\ngbrt.fit(X_train, y_train)\n\nerrors = [mean_squared_error(y_val, y_pred)\n for y_pred in gbrt.staged_predict(X_val)]\nbst_n_estimators = np.argmin(errors)\n\ngbrt_best = GradientBoostingRegressor(max_depth=2,n_estimators=bst_n_estimators, random_state=42)\ngbrt_best.fit(X_train, y_train)",
"_____no_output_____"
]
],
[
[
"Let's visualise the validaton errors and the optimal number of estimators:",
"_____no_output_____"
]
],
[
[
"min_error = np.min(errors)\n\nplt.figure(figsize=(11, 4))\n\nplt.subplot(121)\nplt.plot(errors, \"b.-\")\nplt.plot([bst_n_estimators, bst_n_estimators], [0, min_error], \"k--\")\nplt.plot([0, 120], [min_error, min_error], \"k--\")\nplt.plot(bst_n_estimators, min_error, \"ko\")\nplt.text(bst_n_estimators, min_error*1.2, \"Minimum\", ha=\"center\", fontsize=14)\nplt.axis([0, 120, 0, 0.01])\nplt.xlabel(\"Number of trees\")\nplt.title(\"Validation error\", fontsize=14)\n\nplt.subplot(122)\nplot_predictions([gbrt_best], X, y, axes=[-0.5, 0.5, -0.1, 0.8])\nplt.title(\"Best model (%d trees)\" % bst_n_estimators, fontsize=14)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"Finally, an alternative to training a large number of estimators and then looking back in order to find the optimal number of those is to allow the algorithm to stop training when the validation error does not improve over a number of consecutive iterations (e.g., $5$ in the code below). For that to work, set the `warm_start` to `True`.\n\n`warm_start`: _When set to True, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just erase the previous solution._",
"_____no_output_____"
]
],
[
[
"gbrt = GradientBoostingRegressor(max_depth=2, warm_start=True, random_state=42)\n\nmin_val_error = float(\"inf\")\nerror_going_up = 0\nfor n_estimators in range(1, 120):\n gbrt.n_estimators = n_estimators\n gbrt.fit(X_train, y_train)\n y_pred = gbrt.predict(X_val)\n val_error = mean_squared_error(y_val, y_pred)\n if val_error < min_val_error:\n min_val_error = val_error\n error_going_up = 0\n else:\n error_going_up += 1\n if error_going_up == 5:\n break # early stopping\n\nprint(gbrt.n_estimators)",
"74\n"
]
],
[
[
"# Your tasks\n\n1. Run the code in the notebook. During the practical session, be prepared to discuss the methods and answer the questions from this notebook:\n\n * When applied to a new, test instance bagging classifier aggregates the predictions of all predictors and estimates the statistical mode. Is this similar to the hard or the soft voting strategy?\n * **Ans**: This is similar to the _hard_ voting strategy, as it just picks the most frequest predicition.\n * [`BaggingClassifier`](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingClassifier.html) automatically performs soft voting when the base classifier can estimate class probabilities (i.e., has a `predict_proba()` method). Which strategy – hard or soft voting – does `BaggingClassifier` follow with the `DecisionTreeClassifier` as the base classifier?\n * **Ans**: The `DecisionTreeClassifier` has a `predict_proba()` with default settings. Hence, the `BaggingClassifier` will use soft voting.\n * What do the feature importance weights obtained on the *iris* dataset suggest? Are they similar to your observations from the previous experiments on this dataset?\n * **Ans**: These results suggest that petal length and width are more important to distinguishing between different types of irises than sepal proportions. This is backed up by results from the second practical which showed that petal length and width are easily linearly separable but versicolor and virginica sepal dimensions are pretty mixed-together.\n * We said earlier than one of the advantages of bagging and pasting strategies is that predictors can all be trained in parallel. Is the same applicable to AdaBoost?\n * **Ans**: No, the predictors need to be trained sequentially.\n * One of the hyperparameters of the boosting algorithms is learning_rate. What is it responsible for and what effect on the resulting ensemble does it have?\n * **Ans**: *learning rate shrinks the contribution of each tree by learning_rate. There is a trade-off between learning_rate and n_estimators.*\n * What generalisation behaviour can you expect from each of the tree regressor ensembles (using different learning rates and different number of estimators – see above)?\n * **Ans**: since the slow learner is more wriggly, I would bet that the higher learning rate estimator would fare better. 200 estimators is enough to fit to nearly all points exactly. It heavily depends on the kind of data we're working with though, i.e. the intrinsic properties of the data",
"_____no_output_____"
],
[
"***",
"_____no_output_____"
],
[
"2. Apply ensemble techniques of your choice to one of the datasets you've worked on during the previous practicals and report your findings.",
"_____no_output_____"
],
[
"### Digits - Ensemble Techniques",
"_____no_output_____"
]
],
[
[
"# Common imports\nimport numpy as np\nimport os\nimport scipy, math\nimport scipy.special\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import mean_squared_error\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.tree import ExtraTreeClassifier\n\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import GradientBoostingRegressor\n\n# to make this notebook's output stable across runs\nnp.random.seed(42)\n\n# To plot pretty figures\n%matplotlib inline\nimport matplotlib\nimport matplotlib.pyplot as plt\nplt.rcParams['axes.labelsize'] = 14\nplt.rcParams['xtick.labelsize'] = 12\nplt.rcParams['ytick.labelsize'] = 12",
"_____no_output_____"
],
[
"from sklearn import datasets\ndigits = datasets.load_digits()\nX, y = digits[\"data\"], digits[\"target\"]",
"_____no_output_____"
],
[
"split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)\nsplit.get_n_splits(X, y)\nprint(split) \n\nfor train_index, test_index in split.split(X, y):\n print(\"TRAIN:\", len(train_index), \"TEST:\", len(test_index))\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\nprint(X_train.shape, y_train.shape, X_test.shape, y_test.shape)",
"StratifiedShuffleSplit(n_splits=1, random_state=42, test_size=0.2,\n train_size=None)\nTRAIN: 1437 TEST: 360\n(1437, 64) (1437,) (360, 64) (360,)\n"
],
[
"def digit_importance(model):\n def plot_digit(data):\n image = data.reshape(8, 8)\n plt.imshow(image, cmap = matplotlib.cm.hot,\n interpolation=\"nearest\")\n plt.axis(\"off\")\n\n try:\n feat_imp = model.feature_importances_\n except:\n feat_imp = np.mean([\n tree.feature_importances_ for tree in model.estimators_\n ], axis=0)\n plot_digit(feat_imp)\n\n cbar = plt.colorbar(ticks=[feat_imp.min(), feat_imp.max()])\n cbar.ax.set_yticklabels(['Not important', 'Very important'])\n\n plt.show()",
"_____no_output_____"
]
],
[
[
"### Evaluation",
"_____no_output_____"
],
[
"#### Voting Classifiers",
"_____no_output_____"
]
],
[
[
"log_clf = LogisticRegression(max_iter=10000, random_state=42)\nrnd_clf = RandomForestClassifier(random_state=42)\nsvm_clf = SVC(random_state=42)\n\nvoting_clf = VotingClassifier(\n estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],\n voting='hard')\nvoting_clf.fit(X_train, y_train)\n\nprint(\"== Hard Voting ==\")\nfor clf in (log_clf, rnd_clf, svm_clf, voting_clf):\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n print(clf.__class__.__name__, accuracy_score(y_test, y_pred))",
"== Hard Voting ==\nLogisticRegression 0.9611111111111111\nRandomForestClassifier 0.9611111111111111\nSVC 0.9916666666666667\nVotingClassifier 0.9861111111111112\n"
],
[
"log_clf = LogisticRegression(max_iter=10000, random_state=42)\nrnd_clf = RandomForestClassifier(random_state=42)\nsvm_clf = SVC(probability=True, random_state=42)\n\nvoting_clf = VotingClassifier(\n estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],\n voting='soft')\nvoting_clf.fit(X_train, y_train)\n\nprint(\"== Soft Voting ==\")\nfor clf in (log_clf, rnd_clf, svm_clf, voting_clf):\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n print(clf.__class__.__name__, accuracy_score(y_test, y_pred))",
"== Soft Voting ==\nLogisticRegression 0.9611111111111111\nRandomForestClassifier 0.9611111111111111\nSVC 0.9916666666666667\nVotingClassifier 0.9833333333333333\n"
]
],
[
[
"#### Bagging Ensembles",
"_____no_output_____"
]
],
[
[
"bag_clf = BaggingClassifier(\n DecisionTreeClassifier(random_state=42), n_estimators=500,\n max_samples=100, bootstrap=True, n_jobs=-1, oob_score=True, random_state=42)\n\nbag_clf.fit(X_train, y_train)\ny_pred = bag_clf.predict(X_test)\nprint(f\"500 DecisionTrees Bagging out-of-bag eval: {bag_clf.oob_score_}\")\nprint(f\"500 DecisionTrees Bagging eval on test set: {accuracy_score(y_test, y_pred)}\")\n\ndigit_importance(bag_clf)",
"500 DecisionTrees Bagging out-of-bag eval: 0.9345859429366736\n500 DecisionTrees Bagging eval on test set: 0.9222222222222223\n"
],
[
"bag_clf = BaggingClassifier(\n ExtraTreeClassifier(random_state=42), n_estimators=200,\n bootstrap=True, n_jobs=-1, oob_score=True, random_state=42)\n\nbag_clf.fit(X_train, y_train)\ny_pred = bag_clf.predict(X_test)\nprint(f\"200 ExtraTrees Bagging out-of-bag eval: {bag_clf.oob_score_}\")\nprint(f\"200 ExtraTrees Bagging eval on test set: {accuracy_score(y_test, y_pred)}\")\n\ndigit_importance(bag_clf)",
"200 ExtraTrees Bagging out-of-bag eval: 0.9784272790535838\n200 ExtraTrees Bagging eval on test set: 0.9805555555555555\n"
],
[
"bag_clf = BaggingClassifier(\n DecisionTreeClassifier(random_state=42), n_estimators=500,\n max_samples=100, bootstrap=False, n_jobs=-1, random_state=42)\n\nbag_clf.fit(X_train, y_train)\ny_pred = bag_clf.predict(X_test)\nprint(f\"500 DecisionTrees Pasting eval on test set: {accuracy_score(y_test, y_pred)}\")\n\ndigit_importance(bag_clf)",
"500 DecisionTrees Pasting eval on test set: 0.9166666666666666\n"
],
[
"from sklearn.ensemble import RandomForestClassifier\n\nrnd_clf = RandomForestClassifier(n_estimators=500, max_leaf_nodes=16, \n n_jobs=-1, random_state=42)\nrnd_clf.fit(X_train, y_train)\ny_pred = rnd_clf.predict(X_test)\nprint(f\"RandomForest (n_est = 500) eval on test set: {accuracy_score(y_test, y_pred)}\")\n\ndigit_importance(rnd_clf)",
"RandomForest (n_est = 500) eval on test set: 0.9083333333333333\n"
]
],
[
[
"#### AdaBoost",
"_____no_output_____"
]
],
[
[
"ada_clf = AdaBoostClassifier(\n DecisionTreeClassifier(max_depth=1), n_estimators=200,\n algorithm=\"SAMME.R\", learning_rate=0.5, random_state=42)\nada_clf.fit(X_train, y_train)\n\ny_pred = ada_clf.predict(X_test)\nprint(f\"AdaBoostClassifier (200 decision stumps; class prob) eval on test set: {accuracy_score(y_test, y_pred)}\")\n\ndigit_importance(ada_clf)",
"AdaBoostClassifier (200 decision stumps; class prob) eval on test set: 0.8416666666666667\n"
],
[
"ada_clf = AdaBoostClassifier(\n DecisionTreeClassifier(max_depth=1), n_estimators=200,\n algorithm=\"SAMME\", learning_rate=0.5, random_state=42)\nada_clf.fit(X_train, y_train)\n\ny_pred = ada_clf.predict(X_test)\nprint(f\"AdaBoostClassifier (200 decision stumps; predictions) eval on test set: {accuracy_score(y_test, y_pred)}\")\n\ndigit_importance(ada_clf)",
"AdaBoostClassifier (200 decision stumps; predictions) eval on test set: 0.8333333333333334\n"
]
],
[
[
"#### Gradient Boosting",
"_____no_output_____"
]
],
[
[
"gbrt = GradientBoostingRegressor(max_depth=16, warm_start=True, n_estimators=50, learning_rate=1.0, random_state=42)\n\nmin_val_error = float(\"inf\")\nerror_going_up = 0\nfor n_estimators in range(1, 120):\n gbrt.n_estimators = n_estimators\n gbrt.fit(X_train, y_train)\n y_pred = gbrt.predict(X_test)\n val_error = mean_squared_error(y_test, y_pred)\n if val_error < min_val_error:\n min_val_error = val_error\n error_going_up = 0\n else:\n error_going_up += 1\n if error_going_up == 5:\n break # early stopping\n\ny_pred = np.round(gbrt.predict(X_test), 0)\nprint(f\"Gradient Boosting (max_d=16, n_est={gbrt.n_estimators}, lr=1.0) eval on test set: {accuracy_score(y_test, y_pred)}\")",
"Gradient Boosting (max_d=16, n_est=7, lr=1.0) eval on test set: 0.7861111111111111\n"
],
[
"gbrt = GradientBoostingRegressor(max_depth=8, warm_start=True, n_estimators=100, learning_rate=0.1, random_state=42)\n\nmin_val_error = float(\"inf\")\nerror_going_up = 0\nfor n_estimators in range(1, 120):\n gbrt.n_estimators = n_estimators\n gbrt.fit(X_train, y_train)\n y_pred = gbrt.predict(X_test)\n val_error = mean_squared_error(y_test, y_pred)\n if val_error < min_val_error:\n min_val_error = val_error\n error_going_up = 0\n else:\n error_going_up += 1\n if error_going_up == 5:\n break # early stopping\n\ny_pred = np.round(gbrt.predict(X_test), 0)\nprint(f\"Gradient Boosting (max_d=8, n_est={gbrt.n_estimators}, lr=0.1) eval on test set: {accuracy_score(y_test, y_pred)}\")",
"Gradient Boosting (max_d=8, n_est=119, lr=0.1) eval on test set: 0.6611111111111111\n"
]
],
[
[
"#### Conclusions\nSVC perfoms best on the digits dataset with an accuracy of `0.992`.\n\nUsing a bagging classifier with bagging (ie bootstrap) with 200 `ExtraTreeClassifier` performs respectibly with an accuracy of `0.981`.",
"_____no_output_____"
],
[
"***",
"_____no_output_____"
],
[
"3. **Optional**: If you want more practice with these techniques, try implementing a [*stacking algorithm*](https://en.wikipedia.org/wiki/Ensemble_learning#Stacking). There is no available implementation of this approach in `sklearn` so it needs to be implemented from scratch. The idea is as follows:\n * Split a dataset of your choice into three subsets – training, validation and test.\n * Train a number of predictors on the training data and apply them to the validation data.\n * Treat the predictions of those predictors on the validation data to generate new training set: you can use the predictions of the estimators as new features (i.e., each training instance will have as many features as the number of predictors you originally used), and the validation data targets as the new training instances targets. \n * Now train a *blender* – a classifier of your choice – on the new training data created this way. Together with the original classifiers, the blender forms a stacking ensemble.\n * Finally, apply your original classifiers to the test set, feed their predictions to the blender, and use blender's output as the prediction on the test set.",
"_____no_output_____"
]
],
[
[
"from sklearn import datasets\ndigits = datasets.load_digits()",
"_____no_output_____"
],
[
"def stratified_test_val_train_split(X, y, seed=None):\n split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=seed) \n\n for train_index, test_index in split.split(X, y):\n print(\"TEST:\", len(test_index))\n X_rem, X_test = X[train_index], X[test_index]\n y_rem, y_test = y[train_index], y[test_index]\n \n split = StratifiedShuffleSplit(n_splits=1, test_size=0.25, random_state=seed) \n for train_index, test_index in split.split(X_rem, y_rem):\n print(\"VAL:\", len(test_index))\n print(\"TRAIN:\", len(train_index))\n X_train, X_val = X[train_index], X[test_index]\n y_train, y_val = y[train_index], y[test_index]\n \n return X_train, y_train, X_val, y_val, X_test, y_test\n\nX_train, y_train, X_val, y_val, X_test, y_test = stratified_test_val_train_split(digits[\"data\"], digits[\"target\"], 42)",
"TEST: 360\nVAL: 360\nTRAIN: 1077\n"
],
[
"from sklearn.naive_bayes import GaussianNB, MultinomialNB\nfrom sklearn.linear_model import SGDClassifier, LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.multiclass import OneVsOneClassifier\n\ndef get_many_predictors(X, y, seed=None):\n np.random.seed(seed)\n ovo_log = OneVsOneClassifier(LogisticRegression(max_iter=10000, random_state=seed))\n ovo_rnd = OneVsOneClassifier(RandomForestClassifier(random_state=seed))\n ovo_svm = OneVsOneClassifier(SVC(random_state=seed))\n ovo_sgd = OneVsOneClassifier(SGDClassifier(max_iter=10000, random_state=seed, loss=\"perceptron\", \n eta0=1, learning_rate=\"constant\", penalty=None))\n ovo_mnb = OneVsOneClassifier(MultinomialNB())\n ovo_gnb = OneVsOneClassifier(GaussianNB())\n \n predictors = [ovo_log, ovo_rnd, ovo_svm, ovo_sgd, ovo_mnb, ovo_gnb]\n for i, clf in enumerate(predictors):\n clf.fit(X, y)\n \n return predictors\n\ndef get_blender_set(X, predictors):\n train = np.empty((len(X), len(predictors)))\n for i, clf in enumerate(predictors):\n train[:,i] = clf.predict(X)\n return train\n\ndef get_blender_clf(X, y, predictors, seed=None):\n train = get_blender_set(X, predictors)\n \n blender = OneVsOneClassifier(SVC(random_state=seed))\n blender.fit(train, y)\n \n return blender\n\npredictors = get_many_predictors(X_train, y_train, 42)\nblender = get_blender_clf(X_val, y_val, predictors, 42)\nX_blender_test = get_blender_set(X_test, predictors)\ny_pred = blender.predict(X_blender_test)\nprint(f\"Blender eval on test set: {accuracy_score(y_test, y_pred)}\")",
"Blender eval on test set: 0.9555555555555556\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
cbbc6f39f31caad8177ba3e089e8433f8eb64045
| 26,132 |
ipynb
|
Jupyter Notebook
|
docs/guide/serializing.ipynb
|
Qingtian-Zou/cti-python-stix2
|
17445a085cb84734900603eb8009bcc856892762
|
[
"BSD-3-Clause"
] | 277 |
2017-02-15T17:54:37.000Z
|
2022-03-11T09:04:33.000Z
|
docs/guide/serializing.ipynb
|
Qingtian-Zou/cti-python-stix2
|
17445a085cb84734900603eb8009bcc856892762
|
[
"BSD-3-Clause"
] | 503 |
2017-02-21T15:36:58.000Z
|
2022-03-11T02:15:49.000Z
|
docs/guide/serializing.ipynb
|
Qingtian-Zou/cti-python-stix2
|
17445a085cb84734900603eb8009bcc856892762
|
[
"BSD-3-Clause"
] | 92 |
2017-02-15T18:07:49.000Z
|
2022-01-31T09:29:23.000Z
| 62.516746 | 1,774 | 0.569264 |
[
[
[
"# Delete this cell to re-enable tracebacks\nimport sys\nipython = get_ipython()\n\ndef hide_traceback(exc_tuple=None, filename=None, tb_offset=None,\n exception_only=False, running_compiled_code=False):\n etype, value, tb = sys.exc_info()\n value.__cause__ = None # suppress chained exceptions\n return ipython._showtraceback(etype, value, ipython.InteractiveTB.get_exception_only(etype, value))\n\nipython.showtraceback = hide_traceback",
"_____no_output_____"
],
[
"# JSON output syntax highlighting\nfrom __future__ import print_function\nfrom pygments import highlight\nfrom pygments.lexers import JsonLexer, TextLexer\nfrom pygments.formatters import HtmlFormatter\nfrom IPython.display import display, HTML\nfrom IPython.core.interactiveshell import InteractiveShell\n\nInteractiveShell.ast_node_interactivity = \"all\"\n\ndef json_print(inpt):\n string = str(inpt)\n formatter = HtmlFormatter()\n if string[0] == '{':\n lexer = JsonLexer()\n else:\n lexer = TextLexer()\n return HTML('<style type=\"text/css\">{}</style>{}'.format(\n formatter.get_style_defs('.highlight'),\n highlight(string, lexer, formatter)))\n\nglobals()['print'] = json_print",
"_____no_output_____"
]
],
[
[
"## Serializing STIX Objects",
"_____no_output_____"
],
[
"The string representation of all STIX classes is a valid STIX JSON object.",
"_____no_output_____"
]
],
[
[
"from stix2 import Indicator\n\nindicator = Indicator(name=\"File hash for malware variant\",\n pattern_type=\"stix\",\n pattern=\"[file:hashes.md5 = 'd41d8cd98f00b204e9800998ecf8427e']\")\n\nprint(indicator.serialize(pretty=True))",
"_____no_output_____"
]
],
[
[
"---\n**New in 3.0.0:** \n\nCalling `str()` on a STIX object will call `serialize()` without any formatting options. The change was made to address the performance penalty induced by unknowingly calling with the pretty formatted option. As shown above, to get the same effect as `str()` had in past versions of the library, use the method directly and pass in the pretty argument `serialize(pretty=True)`.\n\n---\n\nHowever, the pretty formatted string representation can be slow, as it sorts properties to be in a more readable order. If you need performance and don't care about the human-readability of the output, use the object's `serialize()` function to pass in any arguments `json.dump()` would understand:",
"_____no_output_____"
]
],
[
[
"print(indicator.serialize())",
"_____no_output_____"
]
],
[
[
"If you need performance but also need human-readable output, you can pass the `indent` keyword argument to `serialize()`:",
"_____no_output_____"
]
],
[
[
"print(indicator.serialize(indent=4))",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cbbc8a85d6576aec95264da171bd1b9b3c6087bf
| 3,739 |
ipynb
|
Jupyter Notebook
|
scikit-learn-official-examples/exercises/plot_cv_diabetes.ipynb
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:16:23.000Z
|
2019-05-10T09:16:23.000Z
|
scikit-learn-official-examples/exercises/plot_cv_diabetes.ipynb
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | null | null | null |
scikit-learn-official-examples/exercises/plot_cv_diabetes.ipynb
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:17:28.000Z
|
2019-05-10T09:17:28.000Z
| 69.240741 | 2,511 | 0.652581 |
[
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"\n# Cross-validation on diabetes Dataset Exercise\n\n\nA tutorial exercise which uses cross-validation with linear models.\n\nThis exercise is used in the `cv_estimators_tut` part of the\n`model_selection_tut` section of the `stat_learn_tut_index`.\n\n",
"_____no_output_____"
]
],
[
[
"from __future__ import print_function\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import datasets\nfrom sklearn.linear_model import LassoCV\nfrom sklearn.linear_model import Lasso\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import GridSearchCV\n\ndiabetes = datasets.load_diabetes()\nX = diabetes.data[:150]\ny = diabetes.target[:150]\n\nlasso = Lasso(random_state=0)\nalphas = np.logspace(-4, -0.5, 30)\n\ntuned_parameters = [{'alpha': alphas}]\nn_folds = 3\n\nclf = GridSearchCV(lasso, tuned_parameters, cv=n_folds, refit=False)\nclf.fit(X, y)\nscores = clf.cv_results_['mean_test_score']\nscores_std = clf.cv_results_['std_test_score']\nplt.figure().set_size_inches(8, 6)\nplt.semilogx(alphas, scores)\n\n# plot error lines showing +/- std. errors of the scores\nstd_error = scores_std / np.sqrt(n_folds)\n\nplt.semilogx(alphas, scores + std_error, 'b--')\nplt.semilogx(alphas, scores - std_error, 'b--')\n\n# alpha=0.2 controls the translucency of the fill color\nplt.fill_between(alphas, scores + std_error, scores - std_error, alpha=0.2)\n\nplt.ylabel('CV score +/- std error')\nplt.xlabel('alpha')\nplt.axhline(np.max(scores), linestyle='--', color='.5')\nplt.xlim([alphas[0], alphas[-1]])\n\n# #############################################################################\n# Bonus: how much can you trust the selection of alpha?\n\n# To answer this question we use the LassoCV object that sets its alpha\n# parameter automatically from the data by internal cross-validation (i.e. it\n# performs cross-validation on the training data it receives).\n# We use external cross-validation to see how much the automatically obtained\n# alphas differ across different cross-validation folds.\nlasso_cv = LassoCV(alphas=alphas, random_state=0)\nk_fold = KFold(3)\n\nprint(\"Answer to the bonus question:\",\n \"how much can you trust the selection of alpha?\")\nprint()\nprint(\"Alpha parameters maximising the generalization score on different\")\nprint(\"subsets of the data:\")\nfor k, (train, test) in enumerate(k_fold.split(X, y)):\n lasso_cv.fit(X[train], y[train])\n print(\"[fold {0}] alpha: {1:.5f}, score: {2:.5f}\".\n format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))\nprint()\nprint(\"Answer: Not very much since we obtained different alphas for different\")\nprint(\"subsets of the data and moreover, the scores for these alphas differ\")\nprint(\"quite substantially.\")\n\nplt.show()",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cbbc8edb6ff321426be77e836269a5b12b504593
| 3,721 |
ipynb
|
Jupyter Notebook
|
Hw05_Nikiforov.ipynb
|
DmitryN10/SBT_ML
|
65460d50ea0c3550f5780e951b6989207cf5d02a
|
[
"MIT"
] | null | null | null |
Hw05_Nikiforov.ipynb
|
DmitryN10/SBT_ML
|
65460d50ea0c3550f5780e951b6989207cf5d02a
|
[
"MIT"
] | null | null | null |
Hw05_Nikiforov.ipynb
|
DmitryN10/SBT_ML
|
65460d50ea0c3550f5780e951b6989207cf5d02a
|
[
"MIT"
] | null | null | null | 26.204225 | 245 | 0.574845 |
[
[
[
"### Домашнее задание 5",
"_____no_output_____"
],
[
"Напишите свою функцию определения качества модели по следующей метрике: максимальный precision, при условии, что $precision < 1.5 * recall$ и $recall > 0.5$ и определите наилучшее её значение, перебирая гиперпараметры по предложенной сетке",
"_____no_output_____"
],
[
"Ответом на эту задачу является максимальное значение качества по предложенной метрике, округлённое до 4го знака",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import make_scorer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.datasets import load_breast_cancer\n\nparam_grid = {\n 'n_estimators': [10, 20, 30, 40, 50],\n 'max_depth': [None, 5, 10, 15, 20],\n 'criterion': ['entropy', 'gini']\n}\n\nX_data, y_data = load_breast_cancer(return_X_y=True)\n\nestimator = RandomForestClassifier(random_state=42)\n\nprint('Accuracy best params and score')\nresult = GridSearchCV(estimator, param_grid, cv=3, scoring='accuracy').fit(X_data, y_data)\nprint('\\tParams:', result.best_params_)\nprint('\\tScore:', result.best_score_)",
"Accuracy best params and score\n\tParams: {'criterion': 'entropy', 'max_depth': None, 'n_estimators': 50}\n\tScore: 0.9648506151142355\n"
],
[
"from sklearn.metrics import precision_score, recall_score, make_scorer\n\ndef my_score_func(y, y_pred, **kargs):\n recall = recall_score(y, y_pred)\n precision = precision_score(y, y_pred)\n if precision >= 1.5 * recall or recall <= 0.5: return 0\n return precision",
"_____no_output_____"
],
[
"from sklearn.metrics import make_scorer, precision_score, recall_score\n\nscorer = make_scorer(my_score_func)\n\nprint('Custom loss best params and score')\nresult = GridSearchCV(estimator, param_grid, cv=3, scoring=scorer).fit(X_data, y_data)\nprint('\\tParams:', result.best_params_)\nprint('\\tScore:', result.best_score_)",
"Custom loss best params and score\n\tParams: {'criterion': 'gini', 'max_depth': None, 'n_estimators': 10}\n\tScore: 0.9688196110664571\n"
],
[
"print(round(result.best_score_, 4))",
"0.9688\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
cbbc90d59468e0853c2ddc78e57554ccb85f7856
| 18,909 |
ipynb
|
Jupyter Notebook
|
notebooks/DecisionTreeClassifier_final.ipynb
|
andronikmk/decision-tree-learning
|
85cb666bd29654dcacd1655ce4add624ef1b4839
|
[
"MIT"
] | null | null | null |
notebooks/DecisionTreeClassifier_final.ipynb
|
andronikmk/decision-tree-learning
|
85cb666bd29654dcacd1655ce4add624ef1b4839
|
[
"MIT"
] | null | null | null |
notebooks/DecisionTreeClassifier_final.ipynb
|
andronikmk/decision-tree-learning
|
85cb666bd29654dcacd1655ce4add624ef1b4839
|
[
"MIT"
] | null | null | null | 29.591549 | 601 | 0.483897 |
[
[
[
"# training dataset\ntraining_data = [\n ['Yes', 'No','No','Yes','Some','$$$','No','Yes','French','0-10','Yes'],\n ['Yes', 'No', 'No', 'Yes', 'Full', '$', 'No', 'No', 'Thai', '30-60', 'No'],\n ['No', 'Yes', 'No', 'No', 'Some', '$', 'No', 'No', 'Burger', '0-10', 'Yes'],\n ['Yes', 'No', 'Yes', 'Yes', 'Full', '$', 'No', 'No', 'Thai', '10-30', 'Yes'],\n ['Yes', 'No', 'Yes', 'No', 'Full', '$$$', 'No', 'Yes', 'French', '>60', 'No'],\n ['No','Yes','No','Yes','Some','$$','Yes','Yes','Italian','0-10','Yes'],\n ['No', 'Yes', 'No', 'No', 'None', '$', 'Yes', 'No', 'Burger', '0-10', 'No'],\n ['No', 'No', 'No', 'Yes', 'Some', '$$', 'Yes', 'Yes', 'Thai', '0-10', 'Yes'],\n ['No', 'Yes', 'Yes', 'No', 'Full', '$', 'Yes', 'No', 'Burger', '>60', 'No'],\n ['Yes','Yes','Yes','Yes','Full','$$$','No','Yes','Italian','10-30','No'],\n ['No', 'No', 'No', 'No', 'None', '$', 'No', 'No', 'Thai', '0-10', 'No'],\n ['Yes','Yes','Yes','Yes','Full','$','No','No','Burger','30-60','Yes']\n]",
"_____no_output_____"
],
[
"# column labels\nheader = [\"Alternate\", \"Bar\", \"Fri/Sat\", \"Hungry\", \"Patrons\", \"Price\", \"Raining\", \"Reservation\" , \"Type\", \"WaitEstimate\", \"WillWait\"]",
"_____no_output_____"
],
[
"def unique_vals(rows, col):\n \"\"\" This return a set of unique values in a particular column \"\"\"\n return set([row[col] for row in rows])\n\nunique_vals(training_data, 8)",
"_____no_output_____"
],
[
"def class_counts(rows):\n counts = {} # a dictionary of labels.\n for row in rows: # for every row in rows \n label = row[-1] # assuming labels are the last column in data frame\n if label not in counts: # label is not in counts\n counts[label] = 0\n counts[label] += 1\n return counts",
"_____no_output_____"
],
[
"def is_numeric(value):\n \"\"\" Testing whether the values are numeric \"\"\"\n return isinstance(value, int) or isinstance(value, float)",
"_____no_output_____"
],
[
"# # Checking categorical and numeric values.\n# for i in range(len(training_data)):\n# print(\"Is\", training_data[i][0], \"numeric?\" , is_numeric(training_data[i][0]))",
"_____no_output_____"
],
[
"class Question:\n \n def __init__(self, column, value):\n self.column = column\n self.value = value\n \n def match(self, example):\n val = example[self.column]\n if is_numeric(val):\n return val >= self.value\n else:\n return val == self.value\n \n def __repr__(self):\n \n \"\"\" Helper method to that the question can be printed in \n a readable format.\"\"\"\n \n condition = \"==\"\n if is_numeric(self.value):\n condition = \">=\"\n return \"Is %s %s %s\" % (\n header[self.column], condition, str(self.value))",
"_____no_output_____"
],
[
"q = Question(0, \"Yes\")\nq",
"_____no_output_____"
],
[
"example1 = training_data[0]\nexample1",
"_____no_output_____"
],
[
"q.match(example1) # is the first example is green. this will be true",
"_____no_output_____"
],
[
"q1 = Question(2, \"Yes\")\nexample2 = training_data[0]\nq1.match(example2) # is False because the string instance is 'Apple'.",
"_____no_output_____"
],
[
"def partition(rows, question):\n \n true_rows, false_rows = [], []\n \n for row in rows:\n if question.match(row):\n true_rows.append(row)\n else:\n false_rows.append(row)\n return true_rows, false_rows",
"_____no_output_____"
],
[
"true_rows, false_rows = partition(training_data, Question(0, 'Yes'))\ntrue_rows # get back rows were the first element is equivalen to 'Red'",
"_____no_output_____"
],
[
"def gini(rows):\n \n \"\"\" Gini Impurity is a measurement of the likelihood of \n an incorrect classification of a new instance of a \n random variable, if that new instance were randomly \n classified according to the distribution of class \n labels from the data set. Source: https://bambielli.com\n /til/2017-10-29-gini-impurity \"\"\"\n \n counts = class_counts(rows)\n impurity = 1\n for lbl in counts: # for labels in counts\n prob_of_lbl = counts[lbl] / float(len(rows))\n impurity -= prob_of_lbl**2\n return impurity",
"_____no_output_____"
],
[
"def info_gain(left, right, current_uncertainty):\n \n \"\"\" Information gain is the reduction in entropy \n or surprise by transforming a dataset and is\n often used in training decision trees. \n Information gain is calculated by comparing\n the entropy of the dataset before and after \n a transformation. \"\"\"\n \n p = float(len(left)) / (len(left) + len(right))\n return current_uncertainty - p * gini(left) - (1 - p) * gini(right)",
"_____no_output_____"
],
[
"current_uncertainty = gini(training_data)\ncurrent_uncertainty",
"_____no_output_____"
],
[
"true_rows, false_rows = partition(training_data, Question(3, 'Yes'))\ninfo_gain(true_rows, false_rows, current_uncertainty)",
"_____no_output_____"
],
[
"def find_best_split(rows):\n \n \"\"\" Find the best questions to ask by iterating over every\n feature/value and calculating the information gain. \"\"\"\n \n best_gain = 0 # keeping track of best information gain\n best_question = None # keep train of the feature / value that produced it\n current_uncertainty = gini(rows)\n n_features = len(rows[0]) - 1 # number of columns\n \n for col in range(n_features): # for every feature\n \n values = set([row[col] for row in rows]) # value that is unique\n \n for val in values: # for every value\n \n question = Question(col, val)\n \n true_rows, false_rows = partition(rows, question) # attempt to split data set\n \n if len(true_rows) == 0 or len(false_rows) == 0: # if the data set is not divisible skip the split\n continue\n \n # information gain should be calculated after the split\n gain = info_gain(true_rows, false_rows, current_uncertainty)\n \n if gain >= best_gain:\n best_gain, best_question = gain, question\n\n return best_gain, best_question",
"_____no_output_____"
],
[
"# find the best question to ask our toy dataset\nbest_gain, best_question = find_best_split(training_data)\nbest_question",
"_____no_output_____"
],
[
"class Leaf:\n \"\"\"\n A lead node is need to classify data.\n \n This will hold a dictionary of class (\"Apple\", \"Grape\", etc...) -> this will\n show the amount of times the class appears in the rows of the training dataset \n when it gets to this leaf.\n \"\"\" \n def __init__(self, rows):\n self.predictions = class_counts(rows)",
"_____no_output_____"
],
[
"class Decision_Node:\n \"\"\"\n This is a Decision Node and it asks a question.\n \n A reference to the question will be held here. In addition to the two child nodes\n that will branch off from this point.\n \"\"\"\n def __init__(self, \n question,\n true_branch,\n false_branch):\n self.question = question\n self.true_branch = true_branch\n self.false_branch = false_branch",
"_____no_output_____"
],
[
"def build_tree(rows):\n \"\"\"\n This builds our tree\n \n Rules for recursion: \n 1. Start from checking from the base case (this implies no further info. gain)\n 2. There will be a big stack trace.\n \"\"\"\n \n # attempt to partition the dataset on each of the attributes that are unique.\n # information gain needs to be calculated.\n # a lead is returned.\n gain, question = find_best_split(rows)\n \n # Base case:\n # There will be no more information gain.\n # Since no further questions are asked we will return a leaf\n if gain == 0:\n return Leaf(rows)\n \n # Assuming that we have reached this part of the code.\n # We are at a point where there is usefull features/values to split on.\n true_rows, false_rows = partition(rows, question)\n \n # Build the true branch using recursion\n true_branch = build_tree(true_rows)\n \n # Build the false branch using recursion\n false_branch = build_tree(false_rows)\n \n \n # The question node is returned\n # This is give you a record of the best feature/value to ask.\n # in addition, the branches to follow which depends on the answer.\n return Decision_Node(question, true_branch, false_branch)",
"_____no_output_____"
],
[
"def fit(node, spacing=\"\"):\n \n # base base: this is a way to say that we have reached a leaf\n if isinstance(node, Leaf):\n print(spacing + \"Predict\", node.predictions)\n return\n \n # The question at this node can be printed\n print(spacing + str(node.question))\n \n # This function needs to be called recursivly on the branch that is true\n print(spacing + \"---> True: \")\n print_tree(node.true_branch, spacing + \" \")\n \n # The false branch needs to be called recursively as well\n print(spacing + \"---> False: \")\n print_tree(node.false_branch, spacing + \" \")",
"_____no_output_____"
],
[
"my_tree = build_tree(training_data)",
"_____no_output_____"
],
[
"fit(my_tree)",
"Is Patrons == Some\n---> True: \n Predict {'Yes': 4}\n---> False: \n Is Hungry == No\n ---> True: \n Predict {'No': 4}\n ---> False: \n Is Type == Italian\n ---> True: \n Predict {'No': 1}\n ---> False: \n Is Fri/Sat == No\n ---> True: \n Predict {'No': 1}\n ---> False: \n Predict {'Yes': 2}\n"
],
[
"def predict(row, node):\n \"\"\" See rules of recursion that is shown above. \"\"\"\n \n # Base case: we've reached a leaf\n if isinstance(node, Leaf):\n return node.predictions\n \n # A decision on whether to follow the true or flase branch\n if node.question.match(row):\n return predict(row, node.true_branch)\n else:\n return predict(row, node.false_branch)",
"_____no_output_____"
],
[
"def print_leaf(counts):\n \n \"\"\" A nicer way to print the predictions at a leaf. \"\"\"\n \n total = sum(counts.values()) * 1.0\n probs = {}\n for lbl in counts.keys():\n probs[lbl] = str(int(counts[lbl] / total*100)) + \"%\"\n return probs",
"_____no_output_____"
],
[
"print_leaf(predict(training_data[0], my_tree))",
"_____no_output_____"
],
[
"print_leaf(predict(training_data[1], my_tree))",
"_____no_output_____"
],
[
"testing_data = [\n ['Yes', 'No','No','Yes','Some','$$$','No','Yes','French','0-10','Yes'],\n ['Yes', 'No', 'No', 'Yes', 'Full', '$', 'No', 'No', 'Thai', '30-60', 'No'],\n ['No', 'Yes', 'No', 'No', 'Some', '$', 'No', 'No', 'Burger', '0-10', 'Yes'],\n ['Yes', 'No', 'Yes', 'Yes', 'Full', '$', 'No', 'No', 'Thai', '10-30', 'Yes'],\n]",
"_____no_output_____"
],
[
"for row in testing_data:\n print(\"Actual: %s Predicted: %s\" % (row[-1], print_leaf(predict(row, my_tree))))",
"Actual: Yes Predicted: {'Yes': '100%'}\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbbc9a381b46e7038249e5be35d63739def033a0
| 21,299 |
ipynb
|
Jupyter Notebook
|
notebooks/GRU_LSTM_pred_plots_12h.ipynb
|
harryli18/hybrid-rnn-models
|
9baae52985cf21635b5c2e75b785ee6c2eac85d4
|
[
"MIT"
] | 1 |
2021-03-11T03:45:06.000Z
|
2021-03-11T03:45:06.000Z
|
notebooks/GRU_LSTM_pred_plots_12h.ipynb
|
harryli18/hybrid-rnn-models
|
9baae52985cf21635b5c2e75b785ee6c2eac85d4
|
[
"MIT"
] | null | null | null |
notebooks/GRU_LSTM_pred_plots_12h.ipynb
|
harryli18/hybrid-rnn-models
|
9baae52985cf21635b5c2e75b785ee6c2eac85d4
|
[
"MIT"
] | null | null | null | 50.233491 | 1,660 | 0.636603 |
[
[
[
"import plaidml.keras\nplaidml.keras.install_backend()\nimport os\nos.environ[\"KERAS_BACKEND\"] = \"plaidml.keras.backend\"",
"_____no_output_____"
],
[
"# Importing useful libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, Dropout, GRU, Bidirectional, Conv1D, Flatten, MaxPooling1D\nfrom keras.optimizers import SGD\nimport math\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\nfrom keras import optimizers\n\nimport time ",
"_____no_output_____"
]
],
[
[
"### Data Processing",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('../data/num_data.csv')",
"_____no_output_____"
],
[
"dataset = df",
"_____no_output_____"
],
[
"dataset.shape",
"_____no_output_____"
],
[
"def return_rmse(test,predicted):\n rmse = math.sqrt(mean_squared_error(test, predicted))\n return rmse",
"_____no_output_____"
],
[
"data_size = dataset.shape[0]\ntrain_size=int(data_size * 0.6)\ntest_size = 100\nvalid_size = data_size - train_size - test_size\n\ntest_next_day = [12, 24, 48]",
"_____no_output_____"
],
[
"training_set = dataset[:train_size].iloc[:,4:16].values\nvalid_set = dataset[train_size:train_size+valid_size].iloc[:,4:16].values\ntest_set = dataset[data_size-test_size:].iloc[:,4:16].values",
"_____no_output_____"
],
[
"y = dataset.iloc[:,4].values\ny = y.reshape(-1,1)\nn_feature = training_set.shape[1]\ny.shape",
"_____no_output_____"
],
[
"# Scaling the dataset\nsc = MinMaxScaler(feature_range=(0,1))\ntraining_set_scaled = sc.fit_transform(training_set)\nvalid_set_scaled = sc.fit_transform(valid_set)\ntest_set_scaled = sc.fit_transform(test_set)\n\nsc_y = MinMaxScaler(feature_range=(0,1))\ny_scaled = sc_y.fit_transform(y)",
"_____no_output_____"
],
[
"# split a multivariate sequence into samples\nposition_of_target = 4\ndef split_sequences(sequences, n_steps_in, n_steps_out):\n X_, y_ = list(), list()\n for i in range(len(sequences)):\n # find the end of this pattern\n end_ix = i + n_steps_in\n out_end_ix = end_ix + n_steps_out-1\n # check if we are beyond the dataset\n if out_end_ix > len(sequences):\n break\n # gather input and output parts of the pattern\n seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix-1:out_end_ix, position_of_target]\n X_.append(seq_x)\n y_.append(seq_y)\n return np.array(X_), np.array(y_)",
"_____no_output_____"
],
[
"n_steps_in = 12\nn_steps_out = 12\nX_train, y_train = split_sequences(training_set_scaled, n_steps_in, n_steps_out)\nX_valid, y_valid = split_sequences(valid_set_scaled, n_steps_in, n_steps_out)\nX_test, y_test = split_sequences(test_set_scaled, n_steps_in, n_steps_out)",
"_____no_output_____"
],
[
"GRU_LSTM_reg = Sequential()\nGRU_LSTM_reg.add(GRU(units=50, return_sequences=True, input_shape=(X_train.shape[1],n_feature), activation='tanh'))\nGRU_LSTM_reg.add(LSTM(units=50, activation='tanh'))\nGRU_LSTM_reg.add(Dense(units=n_steps_out))\n\nDFS_2LSTM = Sequential()\nDFS_2LSTM.add(Conv1D(filters=64, kernel_size=6, activation='tanh', input_shape=(X_train.shape[1],n_feature)))\nDFS_2LSTM.add(MaxPooling1D(pool_size=4))\nDFS_2LSTM.add(Dropout(0.2)) \nDFS_2LSTM.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1],n_feature), activation='tanh'))\nDFS_2LSTM.add(LSTM(units=50, activation='tanh'))\nDFS_2LSTM.add(Dropout(0.190 + 0.0025 * n_steps_in))\nDFS_2LSTM.add(Dense(units=n_steps_out))\n\n# Compiling the RNNs\nadam = optimizers.Adam(lr=0.01)\nGRU_LSTM_reg.compile(optimizer=adam,loss='mean_squared_error')\nDFS_2LSTM.compile(optimizer=adam,loss='mean_squared_error')\n",
"INFO:plaidml:Opening device \"llvm_cpu.0\"\n"
],
[
"RnnModelDict = {'GRU_LSTM': GRU_LSTM_reg}\n\nrmse_df = pd.DataFrame(columns=['Model', 'train_rmse', 'valid_rmse', 'train_time'])\n\n# RnnModelDict = {'LSTM_GRU': LSTM_GRU_reg}",
"_____no_output_____"
],
[
"for model in RnnModelDict:\n regressor = RnnModelDict[model]\n \n print('training start for', model) \n start = time.process_time()\n regressor.fit(X_train,y_train,epochs=50,batch_size=1024)\n train_time = round(time.process_time() - start, 2)\n \n print('results for training set')\n y_train_pred = regressor.predict(X_train)\n# plot_predictions(y_train,y_train_pred)\n train_rmse = return_rmse(y_train,y_train_pred)\n \n print('results for valid set')\n y_valid_pred = regressor.predict(X_valid)\n# plot_predictions(y_valid,y_valid_pred)\n valid_rmse = return_rmse(y_valid,y_valid_pred)\n \n \n# print('results for test set - 24 hours')\n# y_test_pred24 = regressor.predict(X_test_24)\n# plot_predictions(y_test_24,y_test_pred24)\n# test24_rmse = return_rmse(y_test_24,y_test_pred24)\n \n \n one_df = pd.DataFrame([[model, train_rmse, valid_rmse, train_time]], \n columns=['Model', 'train_rmse', 'valid_rmse', 'train_time'])\n rmse_df = pd.concat([rmse_df, one_df])\n\n# save the rmse results \n# rmse_df.to_csv('../rmse_24h_plus_time.csv')\n",
"training start for GRU_LSTM\nEpoch 1/50\n252438/252438 [==============================] - 329s 1ms/step - loss: 0.0057\nEpoch 2/50\n124928/252438 [=============>................] - ETA: 49s - loss: 0.0047"
],
[
"history = regressor.fit(X_train, y_train, epochs=50, batch_size=1024, validation_data=(X_valid, y_valid),\n verbose=2, shuffle=False)\n# plot history\n\nplt.figure(figsize=(30, 15))\nplt.plot(history.history['loss'], label='Training')\nplt.plot(history.history['val_loss'], label='Validation')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"# Transform back and plot\ny_train_origin = y[:train_size-46]\ny_valid_origin = y[train_size:train_size+valid_size]\n\ny_train_pred = regressor.predict(X_train)\ny_train_pred_origin = sc_y.inverse_transform(y_train_pred)\n\ny_valid_pred = regressor.predict(X_valid)\ny_valid_pred_origin = sc_y.inverse_transform(y_valid_pred)\n\n_y_train_pred_origin = y_train_pred_origin[:, 0:1]\n_y_valid_pred_origin = y_valid_pred_origin[:, 0:1]\n\n",
"_____no_output_____"
],
[
"plt.figure(figsize=(20, 8));\nplt.plot(pd.to_datetime(valid_original.index), valid_original, \n alpha=0.5, color='red', label='Actual PM2.5 Concentration',)\nplt.plot(pd.to_datetime(valid_original.index), y_valid_pred_origin[:,0:1], \n alpha=0.5, color='blue', label='Predicted PM2.5 Concentation')\nplt.title('PM2.5 Concentration Prediction')\nplt.xlabel('Time')\nplt.ylabel('PM2.5 Concentration')\nplt.legend()\nplt.show()\n",
"_____no_output_____"
],
[
"sample = 500\nplt.figure(figsize=(20, 8));\nplt.plot(pd.to_datetime(valid_original.index[-500:]), valid_original[-500:], \n alpha=0.5, color='red', label='Actual PM2.5 Concentration',)\nplt.plot(pd.to_datetime(valid_original.index[-500:]), y_valid_pred_origin[:,11:12][-500:], \n alpha=0.5, color='blue', label='Predicted PM2.5 Concentation')\nplt.title('PM2.5 Concentration Prediction')\nplt.xlabel('Time')\nplt.ylabel('PM2.5 Concentration')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbbca409a5a2c10dbd0da41a78f81a4631968782
| 384,326 |
ipynb
|
Jupyter Notebook
|
2 - Improving Deep Neural Networks Hyperparameter tuning, Regularization and Optimization/week2/.ipynb_checkpoints/Optimization+methods-checkpoint.ipynb
|
mashuai191/coursera_deeplearning_assignment
|
d90f69e9ac73c2ece08cea1be81d33e3fdfacec2
|
[
"MIT"
] | 1 |
2018-06-22T01:54:51.000Z
|
2018-06-22T01:54:51.000Z
|
2 - Improving Deep Neural Networks Hyperparameter tuning, Regularization and Optimization/week2/.ipynb_checkpoints/Optimization+methods-checkpoint.ipynb
|
mashuai191/coursera_deeplearning_assignment
|
d90f69e9ac73c2ece08cea1be81d33e3fdfacec2
|
[
"MIT"
] | null | null | null |
2 - Improving Deep Neural Networks Hyperparameter tuning, Regularization and Optimization/week2/.ipynb_checkpoints/Optimization+methods-checkpoint.ipynb
|
mashuai191/coursera_deeplearning_assignment
|
d90f69e9ac73c2ece08cea1be81d33e3fdfacec2
|
[
"MIT"
] | 1 |
2018-08-09T08:37:07.000Z
|
2018-08-09T08:37:07.000Z
| 232.080918 | 62,492 | 0.879574 |
[
[
[
"# Optimization Methods\n\nUntil now, you've always used Gradient Descent to update the parameters and minimize the cost. In this notebook, you will learn more advanced optimization methods that can speed up learning and perhaps even get you to a better final value for the cost function. Having a good optimization algorithm can be the difference between waiting days vs. just a few hours to get a good result. \n\nGradient descent goes \"downhill\" on a cost function $J$. Think of it as trying to do this: \n<img src=\"images/cost.jpg\" style=\"width:650px;height:300px;\">\n<caption><center> <u> **Figure 1** </u>: **Minimizing the cost is like finding the lowest point in a hilly landscape**<br> At each step of the training, you update your parameters following a certain direction to try to get to the lowest possible point. </center></caption>\n\n**Notations**: As usual, $\\frac{\\partial J}{\\partial a } = $ `da` for any variable `a`.\n\nTo get started, run the following code to import the libraries you will need.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io\nimport math\nimport sklearn\nimport sklearn.datasets\n\nfrom opt_utils import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation\nfrom opt_utils import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset\nfrom testCases import *\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'",
"/notebooks/2 - Improving Deep Neural Networks Hyperparameter tuning, Regularization and Optimization/week2/opt_utils.py:76: SyntaxWarning: assertion is always true, perhaps remove parentheses?\n assert(parameters['W' + str(l)].shape == layer_dims[l], layer_dims[l-1])\n/notebooks/2 - Improving Deep Neural Networks Hyperparameter tuning, Regularization and Optimization/week2/opt_utils.py:77: SyntaxWarning: assertion is always true, perhaps remove parentheses?\n assert(parameters['W' + str(l)].shape == layer_dims[l], 1)\n/usr/local/lib/python3.5/dist-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n"
]
],
[
[
"## 1 - Gradient Descent\n\nA simple optimization method in machine learning is gradient descent (GD). When you take gradient steps with respect to all $m$ examples on each step, it is also called Batch Gradient Descent. \n\n**Warm-up exercise**: Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$: \n$$ W^{[l]} = W^{[l]} - \\alpha \\text{ } dW^{[l]} \\tag{1}$$\n$$ b^{[l]} = b^{[l]} - \\alpha \\text{ } db^{[l]} \\tag{2}$$\n\nwhere L is the number of layers and $\\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: update_parameters_with_gd\n\ndef update_parameters_with_gd(parameters, grads, learning_rate):\n \"\"\"\n Update parameters using one step of gradient descent\n \n Arguments:\n parameters -- python dictionary containing your parameters to be updated:\n parameters['W' + str(l)] = Wl\n parameters['b' + str(l)] = bl\n grads -- python dictionary containing your gradients to update each parameters:\n grads['dW' + str(l)] = dWl\n grads['db' + str(l)] = dbl\n learning_rate -- the learning rate, scalar.\n \n Returns:\n parameters -- python dictionary containing your updated parameters \n \"\"\"\n\n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for l in range(L):\n ### START CODE HERE ### (approx. 2 lines)\n parameters[\"W\" + str(l+1)] -= learning_rate * grads[\"dW\" + str(l+1)]\n parameters[\"b\" + str(l+1)] -= learning_rate * grads[\"db\" + str(l+1)]\n ### END CODE HERE ###\n \n return parameters",
"_____no_output_____"
],
[
"parameters, grads, learning_rate = update_parameters_with_gd_test_case()\n\nparameters = update_parameters_with_gd(parameters, grads, learning_rate)\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))",
"W1 = [[ 1.63535156 -0.62320365 -0.53718766]\n [-1.07799357 0.85639907 -2.29470142]]\nb1 = [[ 1.74604067]\n [-0.75184921]]\nW2 = [[ 0.32171798 -0.25467393 1.46902454]\n [-2.05617317 -0.31554548 -0.3756023 ]\n [ 1.1404819 -1.09976462 -0.1612551 ]]\nb2 = [[-0.88020257]\n [ 0.02561572]\n [ 0.57539477]]\n"
]
],
[
[
"**Expected Output**:\n\n<table> \n <tr>\n <td > **W1** </td> \n <td > [[ 1.63535156 -0.62320365 -0.53718766]\n [-1.07799357 0.85639907 -2.29470142]] </td> \n </tr> \n \n <tr>\n <td > **b1** </td> \n <td > [[ 1.74604067]\n [-0.75184921]] </td> \n </tr> \n \n <tr>\n <td > **W2** </td> \n <td > [[ 0.32171798 -0.25467393 1.46902454]\n [-2.05617317 -0.31554548 -0.3756023 ]\n [ 1.1404819 -1.09976462 -0.1612551 ]] </td> \n </tr> \n \n <tr>\n <td > **b2** </td> \n <td > [[-0.88020257]\n [ 0.02561572]\n [ 0.57539477]] </td> \n </tr> \n</table>\n",
"_____no_output_____"
],
[
"A variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent. \n\n- **(Batch) Gradient Descent**:\n\n``` python\nX = data_input\nY = labels\nparameters = initialize_parameters(layers_dims)\nfor i in range(0, num_iterations):\n # Forward propagation\n a, caches = forward_propagation(X, parameters)\n # Compute cost.\n cost = compute_cost(a, Y)\n # Backward propagation.\n grads = backward_propagation(a, caches, parameters)\n # Update parameters.\n parameters = update_parameters(parameters, grads)\n \n```\n\n- **Stochastic Gradient Descent**:\n\n```python\nX = data_input\nY = labels\nparameters = initialize_parameters(layers_dims)\nfor i in range(0, num_iterations):\n for j in range(0, m):\n # Forward propagation\n a, caches = forward_propagation(X[:,j], parameters)\n # Compute cost\n cost = compute_cost(a, Y[:,j])\n # Backward propagation\n grads = backward_propagation(a, caches, parameters)\n # Update parameters.\n parameters = update_parameters(parameters, grads)\n```\n",
"_____no_output_____"
],
[
"In Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will \"oscillate\" toward the minimum rather than converge smoothly. Here is an illustration of this: \n\n<img src=\"images/kiank_sgd.png\" style=\"width:750px;height:250px;\">\n<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **SGD vs GD**<br> \"+\" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence. But each step is a lot faster to compute for SGD than for GD, as it uses only one training example (vs. the whole batch for GD). </center></caption>\n\n**Note** also that implementing SGD requires 3 for-loops in total:\n1. Over the number of iterations\n2. Over the $m$ training examples\n3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$)\n\nIn practice, you'll often get faster results if you do not use neither the whole training set, nor only one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples.\n\n<img src=\"images/kiank_minibatch.png\" style=\"width:750px;height:250px;\">\n<caption><center> <u> <font color='purple'> **Figure 2** </u>: <font color='purple'> **SGD vs Mini-Batch GD**<br> \"+\" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. </center></caption>\n\n<font color='blue'>\n**What you should remember**:\n- The difference between gradient descent, mini-batch gradient descent and stochastic gradient descent is the number of examples you use to perform one update step.\n- You have to tune a learning rate hyperparameter $\\alpha$.\n- With a well-turned mini-batch size, usually it outperforms either gradient descent or stochastic gradient descent (particularly when the training set is large).",
"_____no_output_____"
],
[
"## 2 - Mini-Batch Gradient descent\n\nLet's learn how to build mini-batches from the training set (X, Y).\n\nThere are two steps:\n- **Shuffle**: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches. \n\n<img src=\"images/kiank_shuffle.png\" style=\"width:550px;height:300px;\">\n\n- **Partition**: Partition the shuffled (X, Y) into mini-batches of size `mini_batch_size` (here 64). Note that the number of training examples is not always divisible by `mini_batch_size`. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full `mini_batch_size`, it will look like this: \n\n<img src=\"images/kiank_partition.png\" style=\"width:550px;height:300px;\">\n\n**Exercise**: Implement `random_mini_batches`. We coded the shuffling part for you. To help you with the partitioning step, we give you the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches:\n```python\nfirst_mini_batch_X = shuffled_X[:, 0 : mini_batch_size]\nsecond_mini_batch_X = shuffled_X[:, mini_batch_size : 2 * mini_batch_size]\n...\n```\n\nNote that the last mini-batch might end up smaller than `mini_batch_size=64`. Let $\\lfloor s \\rfloor$ represents $s$ rounded down to the nearest integer (this is `math.floor(s)` in Python). If the total number of examples is not a multiple of `mini_batch_size=64` then there will be $\\lfloor \\frac{m}{mini\\_batch\\_size}\\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be ($m-mini_\\_batch_\\_size \\times \\lfloor \\frac{m}{mini\\_batch\\_size}\\rfloor$). ",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: random_mini_batches\n\ndef random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):\n \"\"\"\n Creates a list of random minibatches from (X, Y)\n \n Arguments:\n X -- input data, of shape (input size, number of examples)\n Y -- true \"label\" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)\n mini_batch_size -- size of the mini-batches, integer\n \n Returns:\n mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)\n \"\"\"\n \n np.random.seed(seed) # To make your \"random\" minibatches the same as ours\n m = X.shape[1] # number of training examples\n mini_batches = []\n \n # Step 1: Shuffle (X, Y)\n permutation = list(np.random.permutation(m))\n shuffled_X = X[:, permutation]\n shuffled_Y = Y[:, permutation].reshape((1,m))\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n ### START CODE HERE ### (approx. 2 lines)\n mini_batch_X = shuffled_X[:, k * mini_batch_size : (k+1) * mini_batch_size]\n mini_batch_Y = shuffled_Y[:, k * mini_batch_size : (k+1) * mini_batch_size]\n ### END CODE HERE ###\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n ### START CODE HERE ### (approx. 2 lines)\n mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size: ]\n mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size: ]\n ### END CODE HERE ###\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches",
"_____no_output_____"
],
[
"X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()\nmini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)\n\nprint (\"shape of the 1st mini_batch_X: \" + str(mini_batches[0][0].shape))\nprint (\"shape of the 2nd mini_batch_X: \" + str(mini_batches[1][0].shape))\nprint (\"shape of the 3rd mini_batch_X: \" + str(mini_batches[2][0].shape))\nprint (\"shape of the 1st mini_batch_Y: \" + str(mini_batches[0][1].shape))\nprint (\"shape of the 2nd mini_batch_Y: \" + str(mini_batches[1][1].shape)) \nprint (\"shape of the 3rd mini_batch_Y: \" + str(mini_batches[2][1].shape))\nprint (\"mini batch sanity check: \" + str(mini_batches[0][0][0][0:3]))",
"shape of the 1st mini_batch_X: (12288, 64)\nshape of the 2nd mini_batch_X: (12288, 64)\nshape of the 3rd mini_batch_X: (12288, 20)\nshape of the 1st mini_batch_Y: (1, 64)\nshape of the 2nd mini_batch_Y: (1, 64)\nshape of the 3rd mini_batch_Y: (1, 20)\nmini batch sanity check: [ 0.90085595 -0.7612069 0.2344157 ]\n"
]
],
[
[
"**Expected Output**:\n\n<table style=\"width:50%\"> \n <tr>\n <td > **shape of the 1st mini_batch_X** </td> \n <td > (12288, 64) </td> \n </tr> \n \n <tr>\n <td > **shape of the 2nd mini_batch_X** </td> \n <td > (12288, 64) </td> \n </tr> \n \n <tr>\n <td > **shape of the 3rd mini_batch_X** </td> \n <td > (12288, 20) </td> \n </tr>\n <tr>\n <td > **shape of the 1st mini_batch_Y** </td> \n <td > (1, 64) </td> \n </tr> \n <tr>\n <td > **shape of the 2nd mini_batch_Y** </td> \n <td > (1, 64) </td> \n </tr> \n <tr>\n <td > **shape of the 3rd mini_batch_Y** </td> \n <td > (1, 20) </td> \n </tr> \n <tr>\n <td > **mini batch sanity check** </td> \n <td > [ 0.90085595 -0.7612069 0.2344157 ] </td> \n </tr>\n \n</table>",
"_____no_output_____"
],
[
"<font color='blue'>\n**What you should remember**:\n- Shuffling and Partitioning are the two steps required to build mini-batches\n- Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128.",
"_____no_output_____"
],
[
"## 3 - Momentum\n\nBecause mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will \"oscillate\" toward convergence. Using momentum can reduce these oscillations. \n\nMomentum takes into account the past gradients to smooth out the update. We will store the 'direction' of the previous gradients in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the \"velocity\" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill. \n\n<img src=\"images/opt_momentum.png\" style=\"width:400px;height:250px;\">\n<caption><center> <u><font color='purple'>**Figure 3**</u><font color='purple'>: The red arrows shows the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, we let the gradient influence $v$ and then take a step in the direction of $v$.<br> <font color='black'> </center>\n\n\n**Exercise**: Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the `grads` dictionary, that is:\nfor $l =1,...,L$:\n```python\nv[\"dW\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"W\" + str(l+1)])\nv[\"db\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"b\" + str(l+1)])\n```\n**Note** that the iterator l starts at 0 in the for loop while the first parameters are v[\"dW1\"] and v[\"db1\"] (that's a \"one\" on the superscript). This is why we are shifting l to l+1 in the `for` loop.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: initialize_velocity\n\ndef initialize_velocity(parameters):\n \"\"\"\n Initializes the velocity as a python dictionary with:\n - keys: \"dW1\", \"db1\", ..., \"dWL\", \"dbL\" \n - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.\n Arguments:\n parameters -- python dictionary containing your parameters.\n parameters['W' + str(l)] = Wl\n parameters['b' + str(l)] = bl\n \n Returns:\n v -- python dictionary containing the current velocity.\n v['dW' + str(l)] = velocity of dWl\n v['db' + str(l)] = velocity of dbl\n \"\"\"\n \n L = len(parameters) // 2 # number of layers in the neural networks\n v = {}\n \n # Initialize velocity\n for l in range(L):\n ### START CODE HERE ### (approx. 2 lines)\n v[\"dW\" + str(l+1)] = np.zeros((parameters['W' + str(l+1)].shape[0], parameters['W' + str(l+1)].shape[1]))\n v[\"db\" + str(l+1)] = np.zeros((parameters['b' + str(l+1)].shape[0], parameters['b' + str(l+1)].shape[1]))\n ### END CODE HERE ###\n \n return v",
"_____no_output_____"
],
[
"parameters = initialize_velocity_test_case()\n\nv = initialize_velocity(parameters)\nprint(\"v[\\\"dW1\\\"] = \" + str(v[\"dW1\"]))\nprint(\"v[\\\"db1\\\"] = \" + str(v[\"db1\"]))\nprint(\"v[\\\"dW2\\\"] = \" + str(v[\"dW2\"]))\nprint(\"v[\\\"db2\\\"] = \" + str(v[\"db2\"]))",
"v[\"dW1\"] = [[ 0. 0. 0.]\n [ 0. 0. 0.]]\nv[\"db1\"] = [[ 0.]\n [ 0.]]\nv[\"dW2\"] = [[ 0. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. 0.]]\nv[\"db2\"] = [[ 0.]\n [ 0.]\n [ 0.]]\n"
]
],
[
[
"**Expected Output**:\n\n<table style=\"width:40%\"> \n <tr>\n <td > **v[\"dW1\"]** </td> \n <td > [[ 0. 0. 0.]\n [ 0. 0. 0.]] </td> \n </tr> \n \n <tr>\n <td > **v[\"db1\"]** </td> \n <td > [[ 0.]\n [ 0.]] </td> \n </tr> \n \n <tr>\n <td > **v[\"dW2\"]** </td> \n <td > [[ 0. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. 0.]] </td> \n </tr> \n \n <tr>\n <td > **v[\"db2\"]** </td> \n <td > [[ 0.]\n [ 0.]\n [ 0.]] </td> \n </tr> \n</table>\n",
"_____no_output_____"
],
[
"**Exercise**: Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$: \n\n$$ \\begin{cases}\nv_{dW^{[l]}} = \\beta v_{dW^{[l]}} + (1 - \\beta) dW^{[l]} \\\\\nW^{[l]} = W^{[l]} - \\alpha v_{dW^{[l]}}\n\\end{cases}\\tag{3}$$\n\n$$\\begin{cases}\nv_{db^{[l]}} = \\beta v_{db^{[l]}} + (1 - \\beta) db^{[l]} \\\\\nb^{[l]} = b^{[l]} - \\alpha v_{db^{[l]}} \n\\end{cases}\\tag{4}$$\n\nwhere L is the number of layers, $\\beta$ is the momentum and $\\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a \"one\" on the superscript). So you will need to shift `l` to `l+1` when coding.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: update_parameters_with_momentum\n\ndef update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):\n \"\"\"\n Update parameters using Momentum\n \n Arguments:\n parameters -- python dictionary containing your parameters:\n parameters['W' + str(l)] = Wl\n parameters['b' + str(l)] = bl\n grads -- python dictionary containing your gradients for each parameters:\n grads['dW' + str(l)] = dWl\n grads['db' + str(l)] = dbl\n v -- python dictionary containing the current velocity:\n v['dW' + str(l)] = ...\n v['db' + str(l)] = ...\n beta -- the momentum hyperparameter, scalar\n learning_rate -- the learning rate, scalar\n \n Returns:\n parameters -- python dictionary containing your updated parameters \n v -- python dictionary containing your updated velocities\n \"\"\"\n\n L = len(parameters) // 2 # number of layers in the neural networks\n \n # Momentum update for each parameter\n for l in range(L):\n \n ### START CODE HERE ### (approx. 4 lines)\n # compute velocities\n v[\"dW\" + str(l+1)] = beta * v['dW' + str(l+1)] + (1 - beta) * grads['dW' + str(l+1)]\n v[\"db\" + str(l+1)] = beta * v['db' + str(l+1)] + (1 - beta) * grads['db' + str(l+1)]\n # update parameters\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * v[\"dW\" + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate * v[\"db\" + str(l+1)]\n ### END CODE HERE ###\n \n return parameters, v",
"_____no_output_____"
],
[
"parameters, grads, v = update_parameters_with_momentum_test_case()\n\nparameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))\nprint(\"v[\\\"dW1\\\"] = \" + str(v[\"dW1\"]))\nprint(\"v[\\\"db1\\\"] = \" + str(v[\"db1\"]))\nprint(\"v[\\\"dW2\\\"] = \" + str(v[\"dW2\"]))\nprint(\"v[\\\"db2\\\"] = \" + str(v[\"db2\"]))",
"W1 = [[ 1.62544598 -0.61290114 -0.52907334]\n [-1.07347112 0.86450677 -2.30085497]]\nb1 = [[ 1.74493465]\n [-0.76027113]]\nW2 = [[ 0.31930698 -0.24990073 1.4627996 ]\n [-2.05974396 -0.32173003 -0.38320915]\n [ 1.13444069 -1.0998786 -0.1713109 ]]\nb2 = [[-0.87809283]\n [ 0.04055394]\n [ 0.58207317]]\nv[\"dW1\"] = [[-0.11006192 0.11447237 0.09015907]\n [ 0.05024943 0.09008559 -0.06837279]]\nv[\"db1\"] = [[-0.01228902]\n [-0.09357694]]\nv[\"dW2\"] = [[-0.02678881 0.05303555 -0.06916608]\n [-0.03967535 -0.06871727 -0.08452056]\n [-0.06712461 -0.00126646 -0.11173103]]\nv[\"db2\"] = [[ 0.02344157]\n [ 0.16598022]\n [ 0.07420442]]\n"
]
],
[
[
"**Expected Output**:\n\n<table style=\"width:90%\"> \n <tr>\n <td > **W1** </td> \n <td > [[ 1.62544598 -0.61290114 -0.52907334]\n [-1.07347112 0.86450677 -2.30085497]] </td> \n </tr> \n \n <tr>\n <td > **b1** </td> \n <td > [[ 1.74493465]\n [-0.76027113]] </td> \n </tr> \n \n <tr>\n <td > **W2** </td> \n <td > [[ 0.31930698 -0.24990073 1.4627996 ]\n [-2.05974396 -0.32173003 -0.38320915]\n [ 1.13444069 -1.0998786 -0.1713109 ]] </td> \n </tr> \n \n <tr>\n <td > **b2** </td> \n <td > [[-0.87809283]\n [ 0.04055394]\n [ 0.58207317]] </td> \n </tr> \n\n <tr>\n <td > **v[\"dW1\"]** </td> \n <td > [[-0.11006192 0.11447237 0.09015907]\n [ 0.05024943 0.09008559 -0.06837279]] </td> \n </tr> \n \n <tr>\n <td > **v[\"db1\"]** </td> \n <td > [[-0.01228902]\n [-0.09357694]] </td> \n </tr> \n \n <tr>\n <td > **v[\"dW2\"]** </td> \n <td > [[-0.02678881 0.05303555 -0.06916608]\n [-0.03967535 -0.06871727 -0.08452056]\n [-0.06712461 -0.00126646 -0.11173103]] </td> \n </tr> \n \n <tr>\n <td > **v[\"db2\"]** </td> \n <td > [[ 0.02344157]\n [ 0.16598022]\n [ 0.07420442]]</td> \n </tr> \n</table>\n\n",
"_____no_output_____"
],
[
"**Note** that:\n- The velocity is initialized with zeros. So the algorithm will take a few iterations to \"build up\" velocity and start to take bigger steps.\n- If $\\beta = 0$, then this just becomes standard gradient descent without momentum. \n\n**How do you choose $\\beta$?**\n\n- The larger the momentum $\\beta$ is, the smoother the update because the more we take the past gradients into account. But if $\\beta$ is too big, it could also smooth out the updates too much. \n- Common values for $\\beta$ range from 0.8 to 0.999. If you don't feel inclined to tune this, $\\beta = 0.9$ is often a reasonable default. \n- Tuning the optimal $\\beta$ for your model might need trying several values to see what works best in term of reducing the value of the cost function $J$. ",
"_____no_output_____"
],
[
"<font color='blue'>\n**What you should remember**:\n- Momentum takes past gradients into account to smooth out the steps of gradient descent. It can be applied with batch gradient descent, mini-batch gradient descent or stochastic gradient descent.\n- You have to tune a momentum hyperparameter $\\beta$ and a learning rate $\\alpha$.",
"_____no_output_____"
],
[
"## 4 - Adam\n\nAdam is one of the most effective optimization algorithms for training neural networks. It combines ideas from RMSProp (described in lecture) and Momentum. \n\n**How does Adam work?**\n1. It calculates an exponentially weighted average of past gradients, and stores it in variables $v$ (before bias correction) and $v^{corrected}$ (with bias correction). \n2. It calculates an exponentially weighted average of the squares of the past gradients, and stores it in variables $s$ (before bias correction) and $s^{corrected}$ (with bias correction). \n3. It updates parameters in a direction based on combining information from \"1\" and \"2\".\n\nThe update rule is, for $l = 1, ..., L$: \n\n$$\\begin{cases}\nv_{dW^{[l]}} = \\beta_1 v_{dW^{[l]}} + (1 - \\beta_1) \\frac{\\partial \\mathcal{J} }{ \\partial W^{[l]} } \\\\\nv^{corrected}_{dW^{[l]}} = \\frac{v_{dW^{[l]}}}{1 - (\\beta_1)^t} \\\\\ns_{dW^{[l]}} = \\beta_2 s_{dW^{[l]}} + (1 - \\beta_2) (\\frac{\\partial \\mathcal{J} }{\\partial W^{[l]} })^2 \\\\\ns^{corrected}_{dW^{[l]}} = \\frac{s_{dW^{[l]}}}{1 - (\\beta_1)^t} \\\\\nW^{[l]} = W^{[l]} - \\alpha \\frac{v^{corrected}_{dW^{[l]}}}{\\sqrt{s^{corrected}_{dW^{[l]}}} + \\varepsilon}\n\\end{cases}$$\nwhere:\n- t counts the number of steps taken of Adam \n- L is the number of layers\n- $\\beta_1$ and $\\beta_2$ are hyperparameters that control the two exponentially weighted averages. \n- $\\alpha$ is the learning rate\n- $\\varepsilon$ is a very small number to avoid dividing by zero\n\nAs usual, we will store all parameters in the `parameters` dictionary ",
"_____no_output_____"
],
[
"**Exercise**: Initialize the Adam variables $v, s$ which keep track of the past information.\n\n**Instruction**: The variables $v, s$ are python dictionaries that need to be initialized with arrays of zeros. Their keys are the same as for `grads`, that is:\nfor $l = 1, ..., L$:\n```python\nv[\"dW\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"W\" + str(l+1)])\nv[\"db\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"b\" + str(l+1)])\ns[\"dW\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"W\" + str(l+1)])\ns[\"db\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"b\" + str(l+1)])\n\n```",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: initialize_adam\n\ndef initialize_adam(parameters) :\n \"\"\"\n Initializes v and s as two python dictionaries with:\n - keys: \"dW1\", \"db1\", ..., \"dWL\", \"dbL\" \n - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.\n \n Arguments:\n parameters -- python dictionary containing your parameters.\n parameters[\"W\" + str(l)] = Wl\n parameters[\"b\" + str(l)] = bl\n \n Returns: \n v -- python dictionary that will contain the exponentially weighted average of the gradient.\n v[\"dW\" + str(l)] = ...\n v[\"db\" + str(l)] = ...\n s -- python dictionary that will contain the exponentially weighted average of the squared gradient.\n s[\"dW\" + str(l)] = ...\n s[\"db\" + str(l)] = ...\n\n \"\"\"\n \n L = len(parameters) // 2 # number of layers in the neural networks\n v = {}\n s = {}\n \n # Initialize v, s. Input: \"parameters\". Outputs: \"v, s\".\n for l in range(L):\n ### START CODE HERE ### (approx. 4 lines)\n v[\"dW\" + str(l+1)] = np.zeros((parameters[\"W\" + str(l+1)].shape[0], parameters[\"W\" + str(l+1)].shape[1]))\n v[\"db\" + str(l+1)] = np.zeros((parameters[\"b\" + str(l+1)].shape[0], parameters[\"b\" + str(l+1)].shape[1]))\n s[\"dW\" + str(l+1)] = np.zeros((parameters[\"W\" + str(l+1)].shape[0], parameters[\"W\" + str(l+1)].shape[1]))\n s[\"db\" + str(l+1)] = np.zeros((parameters[\"b\" + str(l+1)].shape[0], parameters[\"b\" + str(l+1)].shape[1]))\n ### END CODE HERE ###\n \n return v, s",
"_____no_output_____"
],
[
"parameters = initialize_adam_test_case()\n\nv, s = initialize_adam(parameters)\nprint(\"v[\\\"dW1\\\"] = \" + str(v[\"dW1\"]))\nprint(\"v[\\\"db1\\\"] = \" + str(v[\"db1\"]))\nprint(\"v[\\\"dW2\\\"] = \" + str(v[\"dW2\"]))\nprint(\"v[\\\"db2\\\"] = \" + str(v[\"db2\"]))\nprint(\"s[\\\"dW1\\\"] = \" + str(s[\"dW1\"]))\nprint(\"s[\\\"db1\\\"] = \" + str(s[\"db1\"]))\nprint(\"s[\\\"dW2\\\"] = \" + str(s[\"dW2\"]))\nprint(\"s[\\\"db2\\\"] = \" + str(s[\"db2\"]))\n",
"v[\"dW1\"] = [[ 0. 0. 0.]\n [ 0. 0. 0.]]\nv[\"db1\"] = [[ 0.]\n [ 0.]]\nv[\"dW2\"] = [[ 0. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. 0.]]\nv[\"db2\"] = [[ 0.]\n [ 0.]\n [ 0.]]\ns[\"dW1\"] = [[ 0. 0. 0.]\n [ 0. 0. 0.]]\ns[\"db1\"] = [[ 0.]\n [ 0.]]\ns[\"dW2\"] = [[ 0. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. 0.]]\ns[\"db2\"] = [[ 0.]\n [ 0.]\n [ 0.]]\n"
]
],
[
[
"**Expected Output**:\n\n<table style=\"width:40%\"> \n <tr>\n <td > **v[\"dW1\"]** </td> \n <td > [[ 0. 0. 0.]\n [ 0. 0. 0.]] </td> \n </tr> \n \n <tr>\n <td > **v[\"db1\"]** </td> \n <td > [[ 0.]\n [ 0.]] </td> \n </tr> \n \n <tr>\n <td > **v[\"dW2\"]** </td> \n <td > [[ 0. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. 0.]] </td> \n </tr> \n \n <tr>\n <td > **v[\"db2\"]** </td> \n <td > [[ 0.]\n [ 0.]\n [ 0.]] </td> \n </tr> \n <tr>\n <td > **s[\"dW1\"]** </td> \n <td > [[ 0. 0. 0.]\n [ 0. 0. 0.]] </td> \n </tr> \n \n <tr>\n <td > **s[\"db1\"]** </td> \n <td > [[ 0.]\n [ 0.]] </td> \n </tr> \n \n <tr>\n <td > **s[\"dW2\"]** </td> \n <td > [[ 0. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. 0.]] </td> \n </tr> \n \n <tr>\n <td > **s[\"db2\"]** </td> \n <td > [[ 0.]\n [ 0.]\n [ 0.]] </td> \n </tr>\n\n</table>\n",
"_____no_output_____"
],
[
"**Exercise**: Now, implement the parameters update with Adam. Recall the general update rule is, for $l = 1, ..., L$: \n\n$$\\begin{cases}\nv_{W^{[l]}} = \\beta_1 v_{W^{[l]}} + (1 - \\beta_1) \\frac{\\partial J }{ \\partial W^{[l]} } \\\\\nv^{corrected}_{W^{[l]}} = \\frac{v_{W^{[l]}}}{1 - (\\beta_1)^t} \\\\\ns_{W^{[l]}} = \\beta_2 s_{W^{[l]}} + (1 - \\beta_2) (\\frac{\\partial J }{\\partial W^{[l]} })^2 \\\\\ns^{corrected}_{W^{[l]}} = \\frac{s_{W^{[l]}}}{1 - (\\beta_2)^t} \\\\\nW^{[l]} = W^{[l]} - \\alpha \\frac{v^{corrected}_{W^{[l]}}}{\\sqrt{s^{corrected}_{W^{[l]}}}+\\varepsilon}\n\\end{cases}$$\n\n\n**Note** that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.",
"_____no_output_____"
]
],
[
[
"# GRADED FUNCTION: update_parameters_with_adam\n\ndef update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,\n beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):\n \"\"\"\n Update parameters using Adam\n \n Arguments:\n parameters -- python dictionary containing your parameters:\n parameters['W' + str(l)] = Wl\n parameters['b' + str(l)] = bl\n grads -- python dictionary containing your gradients for each parameters:\n grads['dW' + str(l)] = dWl\n grads['db' + str(l)] = dbl\n v -- Adam variable, moving average of the first gradient, python dictionary\n s -- Adam variable, moving average of the squared gradient, python dictionary\n learning_rate -- the learning rate, scalar.\n beta1 -- Exponential decay hyperparameter for the first moment estimates \n beta2 -- Exponential decay hyperparameter for the second moment estimates \n epsilon -- hyperparameter preventing division by zero in Adam updates\n\n Returns:\n parameters -- python dictionary containing your updated parameters \n v -- Adam variable, moving average of the first gradient, python dictionary\n s -- Adam variable, moving average of the squared gradient, python dictionary\n \"\"\"\n \n L = len(parameters) // 2 # number of layers in the neural networks\n v_corrected = {} # Initializing first moment estimate, python dictionary\n s_corrected = {} # Initializing second moment estimate, python dictionary\n \n # Perform Adam update on all parameters\n for l in range(L):\n # Moving average of the gradients. Inputs: \"v, grads, beta1\". Output: \"v\".\n ### START CODE HERE ### (approx. 2 lines)\n v[\"dW\" + str(l+1)] = beta1 * v[\"dW\" + str(l+1)] + (1 - beta1) * grads['dW' + str(l+1)]\n v[\"db\" + str(l+1)] = beta1 * v[\"db\" + str(l+1)] + (1 - beta1) * grads['db' + str(l+1)]\n ### END CODE HERE ###\n\n # Compute bias-corrected first moment estimate. Inputs: \"v, beta1, t\". Output: \"v_corrected\".\n ### START CODE HERE ### (approx. 2 lines)\n v_corrected[\"dW\" + str(l+1)] = v[\"dW\" + str(l+1)] / (1 - beta1 ** t)\n v_corrected[\"db\" + str(l+1)] = v[\"db\" + str(l+1)] / (1 - beta1 ** t)\n ### END CODE HERE ###\n\n # Moving average of the squared gradients. Inputs: \"s, grads, beta2\". Output: \"s\".\n ### START CODE HERE ### (approx. 2 lines)\n s[\"dW\" + str(l+1)] = beta2 * s[\"dW\" + str(l+1)] + (1 - beta2) * (grads['dW' + str(l+1)] ** 2)\n s[\"db\" + str(l+1)] = beta2 * s[\"db\" + str(l+1)] + (1 - beta2) * (grads['db' + str(l+1)] ** 2)\n ### END CODE HERE ###\n\n # Compute bias-corrected second raw moment estimate. Inputs: \"s, beta2, t\". Output: \"s_corrected\".\n ### START CODE HERE ### (approx. 2 lines)\n s_corrected[\"dW\" + str(l+1)] = s[\"dW\" + str(l+1)] / (1 - beta2 ** t)\n s_corrected[\"db\" + str(l+1)] = s[\"db\" + str(l+1)] / (1 - beta2 ** t)\n ### END CODE HERE ###\n\n # Update parameters. Inputs: \"parameters, learning_rate, v_corrected, s_corrected, epsilon\". Output: \"parameters\".\n ### START CODE HERE ### (approx. 2 lines)\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * v_corrected[\"dW\" + str(l+1)] / (np.sqrt(s_corrected[\"dW\" + str(l+1)]) + epsilon)\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate * v_corrected[\"db\" + str(l+1)] / (np.sqrt(s_corrected[\"db\" + str(l+1)]) + epsilon)\n ### END CODE HERE ###\n\n return parameters, v, s",
"_____no_output_____"
],
[
"parameters, grads, v, s = update_parameters_with_adam_test_case()\nparameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2)\n\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))\nprint(\"v[\\\"dW1\\\"] = \" + str(v[\"dW1\"]))\nprint(\"v[\\\"db1\\\"] = \" + str(v[\"db1\"]))\nprint(\"v[\\\"dW2\\\"] = \" + str(v[\"dW2\"]))\nprint(\"v[\\\"db2\\\"] = \" + str(v[\"db2\"]))\nprint(\"s[\\\"dW1\\\"] = \" + str(s[\"dW1\"]))\nprint(\"s[\\\"db1\\\"] = \" + str(s[\"db1\"]))\nprint(\"s[\\\"dW2\\\"] = \" + str(s[\"dW2\"]))\nprint(\"s[\\\"db2\\\"] = \" + str(s[\"db2\"]))",
"W1 = [[ 1.63178673 -0.61919778 -0.53561312]\n [-1.08040999 0.85796626 -2.29409733]]\nb1 = [[ 1.75225313]\n [-0.75376553]]\nW2 = [[ 0.32648046 -0.25681174 1.46954931]\n [-2.05269934 -0.31497584 -0.37661299]\n [ 1.14121081 -1.09244991 -0.16498684]]\nb2 = [[-0.88529979]\n [ 0.03477238]\n [ 0.57537385]]\nv[\"dW1\"] = [[-0.11006192 0.11447237 0.09015907]\n [ 0.05024943 0.09008559 -0.06837279]]\nv[\"db1\"] = [[-0.01228902]\n [-0.09357694]]\nv[\"dW2\"] = [[-0.02678881 0.05303555 -0.06916608]\n [-0.03967535 -0.06871727 -0.08452056]\n [-0.06712461 -0.00126646 -0.11173103]]\nv[\"db2\"] = [[ 0.02344157]\n [ 0.16598022]\n [ 0.07420442]]\ns[\"dW1\"] = [[ 0.00121136 0.00131039 0.00081287]\n [ 0.0002525 0.00081154 0.00046748]]\ns[\"db1\"] = [[ 1.51020075e-05]\n [ 8.75664434e-04]]\ns[\"dW2\"] = [[ 7.17640232e-05 2.81276921e-04 4.78394595e-04]\n [ 1.57413361e-04 4.72206320e-04 7.14372576e-04]\n [ 4.50571368e-04 1.60392066e-07 1.24838242e-03]]\ns[\"db2\"] = [[ 5.49507194e-05]\n [ 2.75494327e-03]\n [ 5.50629536e-04]]\n"
]
],
[
[
"**Expected Output**:\n\n<table> \n <tr>\n <td > **W1** </td> \n <td > [[ 1.63178673 -0.61919778 -0.53561312]\n [-1.08040999 0.85796626 -2.29409733]] </td> \n </tr> \n \n <tr>\n <td > **b1** </td> \n <td > [[ 1.75225313]\n [-0.75376553]] </td> \n </tr> \n \n <tr>\n <td > **W2** </td> \n <td > [[ 0.32648046 -0.25681174 1.46954931]\n [-2.05269934 -0.31497584 -0.37661299]\n [ 1.14121081 -1.09245036 -0.16498684]] </td> \n </tr> \n \n <tr>\n <td > **b2** </td> \n <td > [[-0.88529978]\n [ 0.03477238]\n [ 0.57537385]] </td> \n </tr> \n <tr>\n <td > **v[\"dW1\"]** </td> \n <td > [[-0.11006192 0.11447237 0.09015907]\n [ 0.05024943 0.09008559 -0.06837279]] </td> \n </tr> \n \n <tr>\n <td > **v[\"db1\"]** </td> \n <td > [[-0.01228902]\n [-0.09357694]] </td> \n </tr> \n \n <tr>\n <td > **v[\"dW2\"]** </td> \n <td > [[-0.02678881 0.05303555 -0.06916608]\n [-0.03967535 -0.06871727 -0.08452056]\n [-0.06712461 -0.00126646 -0.11173103]] </td> \n </tr> \n \n <tr>\n <td > **v[\"db2\"]** </td> \n <td > [[ 0.02344157]\n [ 0.16598022]\n [ 0.07420442]] </td> \n </tr> \n <tr>\n <td > **s[\"dW1\"]** </td> \n <td > [[ 0.00121136 0.00131039 0.00081287]\n [ 0.0002525 0.00081154 0.00046748]] </td> \n </tr> \n \n <tr>\n <td > **s[\"db1\"]** </td> \n <td > [[ 1.51020075e-05]\n [ 8.75664434e-04]] </td> \n </tr> \n \n <tr>\n <td > **s[\"dW2\"]** </td> \n <td > [[ 7.17640232e-05 2.81276921e-04 4.78394595e-04]\n [ 1.57413361e-04 4.72206320e-04 7.14372576e-04]\n [ 4.50571368e-04 1.60392066e-07 1.24838242e-03]] </td> \n </tr> \n \n <tr>\n <td > **s[\"db2\"]** </td> \n <td > [[ 5.49507194e-05]\n [ 2.75494327e-03]\n [ 5.50629536e-04]] </td> \n </tr>\n</table>\n",
"_____no_output_____"
],
[
"You now have three working optimization algorithms (mini-batch gradient descent, Momentum, Adam). Let's implement a model with each of these optimizers and observe the difference.",
"_____no_output_____"
],
[
"## 5 - Model with different optimization algorithms\n\nLets use the following \"moons\" dataset to test the different optimization methods. (The dataset is named \"moons\" because the data from each of the two classes looks a bit like a crescent-shaped moon.) ",
"_____no_output_____"
]
],
[
[
"train_X, train_Y = load_dataset()",
"_____no_output_____"
]
],
[
[
"We have already implemented a 3-layer neural network. You will train it with: \n- Mini-batch **Gradient Descent**: it will call your function:\n - `update_parameters_with_gd()`\n- Mini-batch **Momentum**: it will call your functions:\n - `initialize_velocity()` and `update_parameters_with_momentum()`\n- Mini-batch **Adam**: it will call your functions:\n - `initialize_adam()` and `update_parameters_with_adam()`",
"_____no_output_____"
]
],
[
[
"def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,\n beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True):\n \"\"\"\n 3-layer neural network model which can be run in different optimizer modes.\n \n Arguments:\n X -- input data, of shape (2, number of examples)\n Y -- true \"label\" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)\n layers_dims -- python list, containing the size of each layer\n learning_rate -- the learning rate, scalar.\n mini_batch_size -- the size of a mini batch\n beta -- Momentum hyperparameter\n beta1 -- Exponential decay hyperparameter for the past gradients estimates \n beta2 -- Exponential decay hyperparameter for the past squared gradients estimates \n epsilon -- hyperparameter preventing division by zero in Adam updates\n num_epochs -- number of epochs\n print_cost -- True to print the cost every 1000 epochs\n\n Returns:\n parameters -- python dictionary containing your updated parameters \n \"\"\"\n\n L = len(layers_dims) # number of layers in the neural networks\n costs = [] # to keep track of the cost\n t = 0 # initializing the counter required for Adam update\n seed = 10 # For grading purposes, so that your \"random\" minibatches are the same as ours\n \n # Initialize parameters\n parameters = initialize_parameters(layers_dims)\n\n # Initialize the optimizer\n if optimizer == \"gd\":\n pass # no initialization required for gradient descent\n elif optimizer == \"momentum\":\n v = initialize_velocity(parameters)\n elif optimizer == \"adam\":\n v, s = initialize_adam(parameters)\n \n # Optimization loop\n for i in range(num_epochs):\n \n # Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch\n seed = seed + 1\n minibatches = random_mini_batches(X, Y, mini_batch_size, seed)\n\n for minibatch in minibatches:\n\n # Select a minibatch\n (minibatch_X, minibatch_Y) = minibatch\n\n # Forward propagation\n a3, caches = forward_propagation(minibatch_X, parameters)\n\n # Compute cost\n cost = compute_cost(a3, minibatch_Y)\n\n # Backward propagation\n grads = backward_propagation(minibatch_X, minibatch_Y, caches)\n\n # Update parameters\n if optimizer == \"gd\":\n parameters = update_parameters_with_gd(parameters, grads, learning_rate)\n elif optimizer == \"momentum\":\n parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)\n elif optimizer == \"adam\":\n t = t + 1 # Adam counter\n parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,\n t, learning_rate, beta1, beta2, epsilon)\n \n # Print the cost every 1000 epoch\n if print_cost and i % 1000 == 0:\n print (\"Cost after epoch %i: %f\" %(i, cost))\n if print_cost and i % 100 == 0:\n costs.append(cost)\n \n # plot the cost\n plt.plot(costs)\n plt.ylabel('cost')\n plt.xlabel('epochs (per 100)')\n plt.title(\"Learning rate = \" + str(learning_rate))\n plt.show()\n\n return parameters",
"_____no_output_____"
]
],
[
[
"You will now run this 3 layer neural network with each of the 3 optimization methods.\n\n### 5.1 - Mini-batch Gradient descent\n\nRun the following code to see how the model does with mini-batch gradient descent.",
"_____no_output_____"
]
],
[
[
"# train 3-layer model\nlayers_dims = [train_X.shape[0], 5, 2, 1]\nparameters = model(train_X, train_Y, layers_dims, optimizer = \"gd\")\n\n# Predict\npredictions = predict(train_X, train_Y, parameters)\n\n# Plot decision boundary\nplt.title(\"Model with Gradient Descent optimization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,2.5])\naxes.set_ylim([-1,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)",
"Cost after epoch 0: 0.690736\nCost after epoch 1000: 0.685273\nCost after epoch 2000: 0.647072\nCost after epoch 3000: 0.619525\nCost after epoch 4000: 0.576584\nCost after epoch 5000: 0.607243\nCost after epoch 6000: 0.529403\nCost after epoch 7000: 0.460768\nCost after epoch 8000: 0.465586\nCost after epoch 9000: 0.464518\n"
]
],
[
[
"### 5.2 - Mini-batch gradient descent with momentum\n\nRun the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains.",
"_____no_output_____"
]
],
[
[
"# train 3-layer model\nlayers_dims = [train_X.shape[0], 5, 2, 1]\nparameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = \"momentum\")\n\n# Predict\npredictions = predict(train_X, train_Y, parameters)\n\n# Plot decision boundary\nplt.title(\"Model with Momentum optimization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,2.5])\naxes.set_ylim([-1,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)",
"Cost after epoch 0: 0.690741\nCost after epoch 1000: 0.685341\nCost after epoch 2000: 0.647145\nCost after epoch 3000: 0.619594\nCost after epoch 4000: 0.576665\nCost after epoch 5000: 0.607324\nCost after epoch 6000: 0.529476\nCost after epoch 7000: 0.460936\nCost after epoch 8000: 0.465780\nCost after epoch 9000: 0.464740\n"
]
],
[
[
"### 5.3 - Mini-batch with Adam mode\n\nRun the following code to see how the model does with Adam.",
"_____no_output_____"
]
],
[
[
"# train 3-layer model\nlayers_dims = [train_X.shape[0], 5, 2, 1]\nparameters = model(train_X, train_Y, layers_dims, optimizer = \"adam\")\n\n# Predict\npredictions = predict(train_X, train_Y, parameters)\n\n# Plot decision boundary\nplt.title(\"Model with Adam optimization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,2.5])\naxes.set_ylim([-1,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)",
"Cost after epoch 0: 0.690552\nCost after epoch 1000: 0.185567\nCost after epoch 2000: 0.150852\nCost after epoch 3000: 0.074454\nCost after epoch 4000: 0.125936\nCost after epoch 5000: 0.104235\nCost after epoch 6000: 0.100552\nCost after epoch 7000: 0.031601\nCost after epoch 8000: 0.111709\nCost after epoch 9000: 0.197648\n"
]
],
[
[
"### 5.4 - Summary\n\n<table> \n <tr>\n <td>\n **optimization method**\n </td>\n <td>\n **accuracy**\n </td>\n <td>\n **cost shape**\n </td>\n\n </tr>\n <td>\n Gradient descent\n </td>\n <td>\n 79.7%\n </td>\n <td>\n oscillations\n </td>\n <tr>\n <td>\n Momentum\n </td>\n <td>\n 79.7%\n </td>\n <td>\n oscillations\n </td>\n </tr>\n <tr>\n <td>\n Adam\n </td>\n <td>\n 94%\n </td>\n <td>\n smoother\n </td>\n </tr>\n</table> \n\nMomentum usually helps, but given the small learning rate and the simplistic dataset, its impact is almost negligeable. Also, the huge oscillations you see in the cost come from the fact that some minibatches are more difficult thans others for the optimization algorithm.\n\nAdam on the other hand, clearly outperforms mini-batch gradient descent and Momentum. If you run the model for more epochs on this simple dataset, all three methods will lead to very good results. However, you've seen that Adam converges a lot faster.\n\nSome advantages of Adam include:\n- Relatively low memory requirements (though higher than gradient descent and gradient descent with momentum) \n- Usually works well even with little tuning of hyperparameters (except $\\alpha$)",
"_____no_output_____"
],
[
"**References**:\n\n- Adam paper: https://arxiv.org/pdf/1412.6980.pdf",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
cbbcaed62b192881d007ac582cff3911bb8faaf4
| 8,591 |
ipynb
|
Jupyter Notebook
|
wandb/run-20210917_183955-1qrf2j0o/tmp/code/_session_history.ipynb
|
Programmer-RD-AI/Intel-Image-Classification-TL
|
640177c2b8864a768ea49730bc634b3f980ede51
|
[
"Apache-2.0"
] | null | null | null |
wandb/run-20210917_183955-1qrf2j0o/tmp/code/_session_history.ipynb
|
Programmer-RD-AI/Intel-Image-Classification-TL
|
640177c2b8864a768ea49730bc634b3f980ede51
|
[
"Apache-2.0"
] | null | null | null |
wandb/run-20210917_183955-1qrf2j0o/tmp/code/_session_history.ipynb
|
Programmer-RD-AI/Intel-Image-Classification-TL
|
640177c2b8864a768ea49730bc634b3f980ede51
|
[
"Apache-2.0"
] | null | null | null | 34.641129 | 269 | 0.539052 |
[
[
[
"from torchvision.models import *\nimport wandb\nfrom sklearn.model_selection import train_test_split\nimport os,cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.nn import *\nimport torch,torchvision\nfrom tqdm import tqdm\ndevice = 'cuda'\nPROJECT_NAME = 'Intel-Image-Classification-TL'",
"_____no_output_____"
],
[
"def load_data():\n data = []\n labels = {}\n idx = 0\n labels_r = {}\n for folder in os.listdir('./data/'):\n idx += 1\n labels[folder] = idx\n labels_r[idx] = folder\n for folder in tqdm(os.listdir('./data/')):\n for file in os.listdir(f'./data/{folder}')[:1000]:\n img = cv2.imread(f'./data/{folder}/{file}')\n img = cv2.resize(img,(56,56))\n img = img / 255.0\n data.append([img,np.eye(labels[folder]+1,len(labels))[labels[folder]]])\n X = []\n y = []\n for d in data:\n X.append(d[0])\n y.append(d[1])\n X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25,shuffle=False)\n X_train = torch.from_numpy(np.array(X_train)).to(device).view(-1,3,56,56).float()\n y_train = torch.from_numpy(np.array(y_train)).to(device).float()\n X_test = torch.from_numpy(np.array(X_test)).to(device).view(-1,3,56,56).float()\n y_test = torch.from_numpy(np.array(y_test)).to(device).float()\n return X,y,X_train,X_test,y_train,y_test,labels,labels_r,idx,data",
"_____no_output_____"
],
[
"X,y,X_train,X_test,y_train,y_test,labels,labels_r,idx,data = load_data()",
"_____no_output_____"
],
[
"torch.save(X_train,'X_train.pt')\ntorch.save(y_train,'y_train.pt')\ntorch.save(X_test,'X_test.pt')\ntorch.save(y_test,'y_test.pt')\ntorch.save(labels_r,'labels_r.pt')\ntorch.save(labels,'labels.pt')\ntorch.save(X_train,'X_train.pth')\ntorch.save(y_train,'y_train.pth')\ntorch.save(X_test,'X_test.pth')\ntorch.save(y_test,'y_test.pth')\ntorch.save(labels_r,'labels_r.pth')\ntorch.save(labels,'labels.pth')",
"_____no_output_____"
],
[
"def get_loss(model,X,y,criterion):\n preds = model(X)\n loss = criterion(preds,y)\n return loss.item()",
"_____no_output_____"
],
[
"def get_accuracy(model,X,y):\n preds = model(X)\n correct = 0\n total = 0\n for pred,yb in zip(preds,y):\n pred = int(torch.argmax(pred))\n yb = int(torch.argmax(yb))\n if pred == yb:\n correct += 1\n total += 1\n acc = round(correct / total,3) * 100\n return acc",
"_____no_output_____"
],
[
"class Model(Module):\n def __init__(self):\n super().__init__()\n self.activation = ReLU()\n self.dropout = Dropout()\n self.max_pool2d = MaxPool2d((2,2),(2,2))\n self.conv1 = Conv2d(3,7,(5,5))\n self.conv1bn = BatchNorm2d(7)\n self.conv2 = Conv2d(7,14,(5,5))\n self.conv2bn = BatchNorm2d(14)\n self.conv3 = Conv2d(14,21,(5,5))\n self.conv3bn = BatchNorm2d(21)\n self.linear1 = Linear(21*3*3,256)\n self.linear1bn = BatchNorm1d(256)\n self.linear2 = Linear(256,512)\n self.linear2bn = BatchNorm1d(512)\n self.linear3 = Linear(512,256)\n self.linear3bn = BatchNorm1d(256)\n self.output = Linear(256,len(labels))\n \n def forward(self,X):\n preds = self.activation(self.dropout(self.max_pool2d(self.conv1bn(self.conv1(X)))))\n preds = self.activation(self.dropout(self.max_pool2d(self.conv2bn(self.conv2(preds)))))\n preds = self.activation(self.dropout(self.max_pool2d(self.conv3bn(self.conv3(preds)))))\n# print(preds.shape)\n preds = preds.view(-1,21*3*3)\n preds = self.activation(self.dropout(self.linear1bn(self.linear1(preds))))\n preds = self.activation(self.dropout(self.linear2bn(self.linear2(preds))))\n preds = self.activation(self.dropout(self.linear3bn(self.linear3(preds))))\n preds = self.output(preds)\n return preds",
"_____no_output_____"
],
[
"model = Model().to(device)\ncriterion = MSELoss()\noptimizer = torch.optim.Adam(model.parameters(),lr=0.001)\nbatch_size = 32\nepochs = 100",
"_____no_output_____"
],
[
"wandb.init(project=PROJECT_NAME,name='baseline-CNN')\nfor _ in tqdm(range(epochs)):\n for i in range(0,len(X_train),batch_size):\n X_batch = X_train[i:i+batch_size]\n y_batch = y_train[i:i+batch_size]\n preds = model(X_batch)\n loss = criterion(preds,y_batch)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n torch.cuda.empty_cache()\n wandb.log({'Loss':(get_loss(model,X_train,y_train,criterion)+get_loss(model,X_batch,y_batch,criterion))/2})\n torch.cuda.empty_cache()\n wandb.log({'Val Loss':get_loss(model,X_test,y_test,criterion)})\n torch.cuda.empty_cache()\n wandb.log({'Acc':(get_accuracy(model,X_train,y_train)+get_accuracy(model,X_batch,y_batch))/2})\n torch.cuda.empty_cache()\n wandb.log({'Val ACC':get_accuracy(model,X_test,y_test)})\n torch.cuda.empty_cache()\nwandb.finish()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbbcb3c67690de5f7d4cd76a62260043153a6b76
| 36,100 |
ipynb
|
Jupyter Notebook
|
display.ipynb
|
RyanSamman/BlogWordCloud
|
45d88e5ece7027cebb76bd2bd592702e50fe052a
|
[
"Unlicense"
] | null | null | null |
display.ipynb
|
RyanSamman/BlogWordCloud
|
45d88e5ece7027cebb76bd2bd592702e50fe052a
|
[
"Unlicense"
] | null | null | null |
display.ipynb
|
RyanSamman/BlogWordCloud
|
45d88e5ece7027cebb76bd2bd592702e50fe052a
|
[
"Unlicense"
] | null | null | null | 353.921569 | 17,318 | 0.918366 |
[
[
[
"import json # Library for parsing JSON files\nfrom os import path\nfrom PIL import Image # Python Image Library\n\nimport numpy as np # Faster data structures \nimport matplotlib.pyplot as plt # Used to display charts\nfrom wordcloud import WordCloud, ImageColorGenerator\n\n# Load Word/Frequency Data\nwith open(\"processedData.json\", \"r\") as file:\n wordAndFrequencyData = json.load(file)\n\n# Load Image which text will be superimposed onto\nimageMask = np.array(Image.open(path.join(\"images\", \"FCITLogo.jpg\")))\n\n# Generate colors from Image\nimageColors = ImageColorGenerator(imageMask)\n\nprint(wordAndFrequencyData)",
"{'some': 3, 'is': 2, 'it': 2, 'words': 2, 'hello': 1, 'this': 1, 'test': 1, 'data': 1, \"isn't\": 1, 'yes': 1, 'repetitive': 1}\n"
],
[
"# Create WordCloud object with image\nwordCloudObject = WordCloud(scale=10, background_color=\"white\", mask=imageMask) \n\n# Pass in Word: Frequency to be displayed\nwordCloudObject.generate_from_frequencies(wordAndFrequencyData)\n\n# Add the Color data to the Word Cloud\nwordCloudObject.recolor(color_func=imageColors)\n\n# Remove Axis markings\nplt.axis(\"off\")\n\n# Display as Matplotlib Chart\nplt.imshow(wordCloudObject, interpolation=\"bilinear\")\nplt.show()\n",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
cbbcb923ab0bf39c34b37567c82267ca4ced0bef
| 330,365 |
ipynb
|
Jupyter Notebook
|
inspector/plottools/Simple Beacons Time Series - single selected targets.ipynb
|
m-lab/analysis
|
cfdcb3475c042f0d2d6fa96ef29c57f8e29c17d9
|
[
"Apache-2.0"
] | 4 |
2020-04-11T20:06:59.000Z
|
2021-06-30T08:06:31.000Z
|
inspector/plottools/Simple Beacons Time Series - single selected targets.ipynb
|
m-lab/analysis
|
cfdcb3475c042f0d2d6fa96ef29c57f8e29c17d9
|
[
"Apache-2.0"
] | 11 |
2019-01-19T02:19:20.000Z
|
2021-08-30T22:04:21.000Z
|
inspector/plottools/Simple Beacons Time Series - single selected targets.ipynb
|
m-lab/analysis
|
cfdcb3475c042f0d2d6fa96ef29c57f8e29c17d9
|
[
"Apache-2.0"
] | 1 |
2020-06-25T09:48:12.000Z
|
2020-06-25T09:48:12.000Z
| 338.48873 | 290,344 | 0.909788 |
[
[
[
"# Beacon Time Series, across the transition\n\nEdit selector= below\n\nLook at the beacons with the largest normalized spread.\n\n( Steal plotMultiBeacons() from here.)",
"_____no_output_____"
]
],
[
[
"import math\nimport numpy as np\nimport pandas as pd\nimport BQhelper as bq\n\n%matplotlib nbagg\nimport matplotlib.pyplot as plt\n\nbq.project = \"mlab-sandbox\"\n# bq.dataset = 'mattmathis'\n# bq.UnitTestRunQuery()\n# bq.UnitTestWriteQuery()\nUnitTest=False",
"_____no_output_____"
],
[
"# Plot simple timeseries for a list of beacons\n# UnitTest=True\n\n# Modified start date\n\nquery=\"\"\"\nSELECT\n a.TestTime,\n client.IP,\n a.MeanThroughputMbps,\n node._instruments\n# FROM `mlab-sandbox.mm_unified_testing.unified_{selector}`\nFROM `measurement-lab.ndt.unified_{selector}`\nWHERE client.IP in ( {clientIP} )\nAND test_date >= '2018-01-01'\nORDER BY TestTime\n\n\"\"\"\nglobal StashData\n\n \ndef plotMultiBeacons(clients, columns=1, width=10, data=None, selector='downloads'):\n if data is None:\n clist = '\"'+'\", \"'.join(clients)+'\"'\n data=bq.QueryTimestampTimeseries(query, clientIP=clist, selector=selector)\n global StashData # Skip slow queries when debugging\n StashData = data\n rows = math.ceil(len(clients) / float(columns))\n figLen = width/float(columns)*rows # assume square subplots\n print('Size', figLen, width)\n plt.rcParams['figure.figsize'] = [ width, figLen]\n \n fig, axs = plt.subplots(nrows=rows, ncols=columns, squeeze=False, sharex='all')\n for ax, client in zip([i for j in axs for i in j], clients):\n print ('Beacon: '+client)\n ax.set_title('Beacon: '+client)\n cdata = data[data['IP'] == client]\n ax.plot(cdata['MeanThroughputMbps'][cdata[\"_instruments\"] == 'web100'], 'b.',\n cdata['MeanThroughputMbps'][cdata[\"_instruments\"] == 'tcpinfo'], 'r.')\n fig.autofmt_xdate()\n fig.show()\n\nif UnitTest:\n # %matplotlib nbagg\n\n try:\n TestData = StashData\n print('Using StashData')\n except:\n pass\n\n try:\n TestData\n except:\n print('Genereating test data')\n clients = [\n '69.68.23.44', # Max deltaMean\n '96.229.66.58', # Max deltaMax\n '73.210.92.196',\n ]\n \n clist = '\"'+'\", \"'.join(clients)+'\"'\n TestData=bq.QueryTimestampTimeseries(query, clientIP=clist, selector='downloads')\n # plt.ion()\n clients=list(TestData['IP'].unique())\n print (clients)\n plotMultiBeacons(clients, data=TestData, columns=2, width=10)\n print ('Done')\n",
"_____no_output_____"
],
[
"MIfastAfter=[\n '24.127.189.188',\n '68.61.90.228',\n '68.32.195.88',\n '98.209.182.228',\n '68.40.138.115',\n '23.116.227.182',\n '98.209.29.89',\n '68.36.121.102',\n '2601:40d:200:a802::2',\n '68.188.190.134']\n \n \n \nplotMultiBeacons(MIfastAfter, columns=2, width=10)\n",
"Size 25.0 10\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cbbccbeef5331aa6b21b7a0ded2abd4b7f525fab
| 46,415 |
ipynb
|
Jupyter Notebook
|
artificial-intelligence-with-python-ja-master/Chapter 2/confusion_matrix.ipynb
|
tryoutlab/python-ai-oreilly
|
111a0db4a9d5bf7ec4c07b1e9e357ed4fa225f28
|
[
"Unlicense"
] | null | null | null |
artificial-intelligence-with-python-ja-master/Chapter 2/confusion_matrix.ipynb
|
tryoutlab/python-ai-oreilly
|
111a0db4a9d5bf7ec4c07b1e9e357ed4fa225f28
|
[
"Unlicense"
] | null | null | null |
artificial-intelligence-with-python-ja-master/Chapter 2/confusion_matrix.ipynb
|
tryoutlab/python-ai-oreilly
|
111a0db4a9d5bf7ec4c07b1e9e357ed4fa225f28
|
[
"Unlicense"
] | null | null | null | 573.024691 | 33,576 | 0.7152 |
[
[
[
"import numpy as np \nimport matplotlib.pyplot as plt \n%matplotlib inline\nfrom sklearn.metrics import confusion_matrix \nfrom sklearn.metrics import classification_report\n\ntrue_labels = [2, 0, 0, 2, 4, 4, 1, 0, 3, 3, 3] \npred_labels = [2, 1, 0, 2, 4, 3, 1, 0, 1, 3, 3] \n\nconfusion_mat = confusion_matrix(true_labels, pred_labels) \nprint(confusion_mat)\n\nplt.imshow(confusion_mat, interpolation='nearest', cmap=plt.cm.gray) \nplt.title('Confusion matrix') \nplt.colorbar() \nticks = np.arange(5) \nplt.xticks(ticks, ticks) \nplt.yticks(ticks, ticks) \nplt.ylabel('True labels') \nplt.xlabel('Predicted labels') \nplt.show()",
"[[2 1 0 0 0]\n [0 1 0 0 0]\n [0 0 2 0 0]\n [0 1 0 2 0]\n [0 0 0 1 1]]\n"
],
[
"targets = ['Class-0', 'Class-1', 'Class-2', 'Class-3', 'Class-4'] \nprint('\\n', classification_report(true_labels, pred_labels, target_names=targets)) ",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
cbbcd528f3b392bb2c504e11bd9924a06f105d04
| 814,827 |
ipynb
|
Jupyter Notebook
|
Chapter04/Chapter4.ipynb
|
AdityaAnuragMishra/Geospatial-Data-Science-Quick-Start-Guide
|
e304e4d6b1ddeea23c6006152750c572b7661e2a
|
[
"MIT"
] | 27 |
2019-03-04T10:45:31.000Z
|
2021-12-03T08:19:28.000Z
|
Chapter04/Chapter4.ipynb
|
AdityaAnuragMishra/Geospatial-Data-Science-Quick-Start-Guide
|
e304e4d6b1ddeea23c6006152750c572b7661e2a
|
[
"MIT"
] | null | null | null |
Chapter04/Chapter4.ipynb
|
AdityaAnuragMishra/Geospatial-Data-Science-Quick-Start-Guide
|
e304e4d6b1ddeea23c6006152750c572b7661e2a
|
[
"MIT"
] | 30 |
2018-10-19T22:49:37.000Z
|
2022-03-10T23:04:50.000Z
| 264.554221 | 137,962 | 0.879743 |
[
[
[
"<a href=\"https://colab.research.google.com/github/shakasom/MapsDataScience/blob/master/Chapter4.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Making sense of humongous location datasets",
"_____no_output_____"
],
[
"## Installations\n\nThe geospatial libraries are not pre installed in google colab as standard python library, therefore we need to install some libraries to use. Luckily this is an easy process. You can use either apt install or pip install. You can also create anaconda environment, but that is a bit complex so pip and apt are enough in our case to get the libraries we need. These are the libraries we need to install in this tutorial:\n\nGdal Geopandas Folium\n\nThe installation might take 1 minute.",
"_____no_output_____"
]
],
[
[
"%%time \n!apt update --quiet\n!apt upgrade --quiet\n# GDAL Important library for many geopython libraries\n!apt install gdal-bin python-gdal python3-gdal --quiet\n# Install rtree - Geopandas requirment\n!apt install python3-rtree --quiet\n# Install Geopandas\n!pip install git+git://github.com/geopandas/geopandas.git --quiet\n# Install descartes - Geopandas requirment\n!pip install descartes --quiet\n# Install Folium for Geographic data visualization\n!pip install folium --quiet\n# Install Pysal\n!pip install pysal --quiet\n# Install splot --> pysal\n!pip install splot --quiet\n# Install mapclassify\n!pip install mapclassify --quiet",
"_____no_output_____"
],
[
"import pandas as pd\nimport numpy as np\nimport geopandas as gpd\nfrom shapely.geometry import Point\nfrom pysal.explore import esda\nfrom pysal.lib import weights\n#import libysal as lps\nfrom pysal.viz.splot.esda import plot_moran, plot_local_autocorrelation, lisa_cluster\nimport matplotlib\nimport matplotlib.pyplot as plt \nimport folium\nimport os\nimport seaborn as sns\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import dbscan\n\nfrom libpysal.weights.contiguity import Queen\nfrom esda.moran import Moran\nfrom splot.esda import moran_scatterplot\nfrom esda.moran import Moran_Local\nfrom splot.esda import lisa_cluster\n\nimport pysal as ps\nps.__version__",
"_____no_output_____"
]
],
[
[
"## Data \n\nThe dataset for this chapter is stored in the dropbox link. It is a valuable skill to learn how to access data on the web, so we will use WGET. WGET is great utility in accessing files from the web and supports different protocols. \n",
"_____no_output_____"
]
],
[
[
"# Get the data from dropbox link\n!wget https://www.dropbox.com/s/xvs0ybc402mkrn8/2019-02-avon-and-somerset-street.zip --quiet",
"_____no_output_____"
],
[
"# see the folders available\nimport os\nos.listdir(os.getcwd())",
"_____no_output_____"
],
[
"# We have zipped data so let us unzip it\n!unzip 2019-02-avon-and-somerset-street.zip ",
"Archive: 2019-02-avon-and-somerset-street.zip\n inflating: 2019-02-avon-and-somerset-street.csv \n"
],
[
"crime_somerset = pd.read_csv(\"2019-02-avon-and-somerset-street.csv\")\ncrime_somerset.head()",
"_____no_output_____"
],
[
"crime_somerset.shape",
"_____no_output_____"
],
[
"crime_somerset.isnull().sum()",
"_____no_output_____"
],
[
"# Drop columns with high missing values\ncrime_somerset.drop(['Last outcome category','Context', 'Crime ID' ], axis=1, inplace=True)",
"_____no_output_____"
],
[
"crime_somerset.head()",
"_____no_output_____"
],
[
"crime_somerset.isnull().sum()",
"_____no_output_____"
],
[
"# Drop rows with missin values\ncrime_somerset.dropna(axis=0,inplace=True)",
"_____no_output_____"
],
[
"crime_somerset.isnull().sum()",
"_____no_output_____"
],
[
"crime_somerset.shape\n",
"_____no_output_____"
],
[
"crime_somerset.head()",
"_____no_output_____"
]
],
[
[
"### Convert to GeoDataFrame",
"_____no_output_____"
]
],
[
[
"# Function to create a Geodataframe\ndef create_gdf(df, lat, lon):\n \"\"\" Convert pandas dataframe into a Geopandas GeoDataFrame\"\"\"\n crs = {'init': 'epsg:4326'}\n geometry = [Point(xy) for xy in zip(df[lon], df[lat])]\n gdf = gpd.GeoDataFrame(df, crs=crs, geometry=geometry)\n return gdf",
"_____no_output_____"
],
[
"crime_somerset_gdf = create_gdf(crime_somerset, 'Latitude', 'Longitude')\ncrime_somerset_gdf.head()",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(12,10))\ncrime_somerset_gdf.plot(markersize=20, ax=ax);\nplt.savefig('crime_somerset_map.png', bbox_inches='tight')",
"_____no_output_____"
]
],
[
[
"## KMeans Clustering Location Data",
"_____no_output_____"
]
],
[
[
"crime_somerset_gdf.head()",
"_____no_output_____"
]
],
[
[
"* Split training and test dataset",
"_____no_output_____"
]
],
[
[
"train = crime_somerset_gdf.sample(frac=0.7, random_state=14)\ntest = crime_somerset_gdf.drop(train.index)\ntrain.shape, test.shape",
"_____no_output_____"
],
[
"# Get coordinates for the train and test dataset\ntrain_coords = train[['Latitude', 'Longitude']].values\ntest_coords = test[['Latitude', 'Longitude']].values",
"_____no_output_____"
],
[
"# Fit Kmeans clustering on training dataset\nkmeans = KMeans(n_clusters=5)\nkmeans.fit(train_coords)",
"_____no_output_____"
],
[
"# Predict on the test dataset by clustering \npreds = kmeans.predict(test_coords)\n\n# Get centers of the clusters\ncenters = kmeans.cluster_centers_",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(12,10))\nplt.scatter(test_coords[:, 0], test_coords[:, 1], c=preds, s=30, cmap='viridis')\nplt.scatter(centers[:,0], centers[:,1], c='Red', marker=\"s\", s=50);",
"_____no_output_____"
]
],
[
[
"## DBSCAN ",
"_____no_output_____"
],
[
"### Detecting Outliers/Noise",
"_____no_output_____"
]
],
[
[
"coords = crime_somerset_gdf[['Latitude', 'Longitude']]\ncoords[:5]",
"_____no_output_____"
],
[
"# Get labels of each cluster \n_, labels = dbscan(crime_somerset_gdf[['Latitude', 'Longitude']], eps=0.1, min_samples=10)\n",
"_____no_output_____"
],
[
"# Create a labels dataframe with the index of the dataset\nlabels_df = pd.DataFrame(labels, index=crime_somerset_gdf.index, columns=['cluster'])\nlabels_df.head()",
"_____no_output_____"
],
[
"# Groupby Labels\nlabels_df.groupby('cluster').size()",
"_____no_output_____"
],
[
"# Plot the groupedby labels\nsns.countplot(labels_df.cluster);\nplt.show()",
"_____no_output_____"
],
[
"# Get Noise (Outliers) with label -1 \nnoise = crime_somerset_gdf.loc[labels_df['cluster']==-1, ['Latitude', 'Longitude']]\n\n# Get core with labels 0\ncore = crime_somerset_gdf.loc[labels_df['cluster']== 0, ['Latitude', 'Longitude']]",
"_____no_output_____"
],
[
"# Display scatter plot with noises as stars and core as circle points\n\nfig, ax = plt.subplots(figsize=(12,10))\nax.scatter(noise['Latitude'], noise['Longitude'],marker= '*', s=40, c='blue' )\nax.scatter(core['Latitude'], core['Longitude'], marker= 'o', s=20, c='red')\nplt.savefig('outliers.png');\nplt.show();\n",
"_____no_output_____"
],
[
"noise",
"_____no_output_____"
]
],
[
[
"### Detecting Clusters",
"_____no_output_____"
]
],
[
[
"_, labels = dbscan(crime_somerset_gdf[['Latitude', 'Longitude']], eps=0.01, min_samples=300)\nlabels_df = pd.DataFrame(labels, index=crime_somerset_gdf.index, columns=['cluster'])\nlabels_df.groupby('cluster').size()",
"_____no_output_____"
],
[
"noise = crime_somerset_gdf.loc[labels_df['cluster']==-1, ['Latitude', 'Longitude']]\ncore = crime_somerset_gdf.loc[labels_df['cluster']== 0, ['Latitude', 'Longitude']]\nbp1 = crime_somerset_gdf.loc[labels_df['cluster']== 1, ['Latitude', 'Longitude']]\nbp2 = crime_somerset_gdf.loc[labels_df['cluster']== 2, ['Latitude', 'Longitude']]\nbp3 = crime_somerset_gdf.loc[labels_df['cluster']== 3, ['Latitude', 'Longitude']]\n",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(12,10))\nax.scatter(noise['Latitude'], noise['Longitude'], markers=10, c='gray' )\nax.scatter(core['Latitude'], core['Longitude'], s=100, c='red')\nax.scatter(bp1['Latitude'], bp1['Longitude'], s=50, c='yellow')\nax.scatter(bp2['Latitude'], bp2['Longitude'], s=50, c='green')\nax.scatter(bp3['Latitude'], bp3['Longitude'], s=50, c='blue')\nplt.savefig('cluster_ex1.png');\nplt.show()\n",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(15,12))\nax.scatter(noise['Latitude'], noise['Longitude'],s=1, c='gray' )\nax.scatter(core['Latitude'], core['Longitude'],marker= \"*\", s=10, c='red')\nax.scatter(bp1['Latitude'], bp1['Longitude'], marker = \"v\", s=10, c='yellow')\nax.scatter(bp2['Latitude'], bp2['Longitude'], marker= \"P\", s=10, c='green')\nax.scatter(bp3['Latitude'], bp3['Longitude'], marker= \"d\", s=10, c='blue')\nax.set_xlim(left=50.8, right=51.7)\nax.set_ylim(bottom=-3.5, top=-2.0)\nplt.savefig('cluster_zoomed.png');\nplt.show()\n",
"_____no_output_____"
],
[
"#Creates four polar axes, and accesses them through the returned array\nfig, axes = plt.subplots(2, 2, figsize=(12,10))\naxes[0, 0].scatter(noise['Latitude'], noise['Longitude'],s=0.01, c='gray' )\naxes[0, 0].title.set_text('Noise')\naxes[0, 1].scatter(core['Latitude'], core['Longitude'],marker= \"*\", s=10, c='red')\naxes[0, 1].title.set_text('Core')\naxes[1, 0].scatter(bp1['Latitude'], bp1['Longitude'], marker = \"v\", s=50, c='yellow')\naxes[1, 0].title.set_text('Border Points 1')\naxes[1,1].scatter(bp2['Latitude'], bp2['Longitude'], marker= \"P\", s=50, c='green')\naxes[1, 1].title.set_text('Border Points 2')\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Spatial Autocorellation\n\nWe will Polygon data for this section. Let us first get the data from the dropbox URL",
"_____no_output_____"
]
],
[
[
"!wget https://www.dropbox.com/s/k2ynddy79k2r46i/ASC_Beats_2016.zip ",
"_____no_output_____"
],
[
"!unzip ASC_Beats_2016.zip",
"_____no_output_____"
],
[
"boundaries = gpd.read_file('ASC_Beats_2016.shp')\nboundaries.head()",
"_____no_output_____"
],
[
"boundaries.crs, crime_somerset_gdf.crs",
"_____no_output_____"
],
[
"boundaries_4326 = boundaries.to_crs({'init': 'epsg:4326'}) \nfig, ax = plt.subplots(figsize=(12,10))\n\nboundaries_4326.plot(ax=ax)\ncrime_somerset_gdf.plot(ax=ax, markersize=10, color='red')\nplt.savefig('overlayed_map.png')",
"_____no_output_____"
],
[
"# Points in Polygon\ncrimes_with_boundaries = gpd.sjoin(boundaries_4326,crime_somerset_gdf, op='contains' )\ncrimes_with_boundaries.head()",
"_____no_output_____"
],
[
"grouped_crimes = crimes_with_boundaries.groupby('BEAT_CODE').size()\ngrouped_crimes.head()",
"_____no_output_____"
],
[
"df = grouped_crimes.to_frame().reset_index()\ndf.columns = ['BEAT_CODE', 'CrimeCount']\ndf.head()",
"_____no_output_____"
],
[
"final_result = boundaries.merge(df, on='BEAT_CODE')\nfinal_result.head()",
"_____no_output_____"
]
],
[
[
"* Choropleth Map of the Crime Count",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(figsize=(12,10))\nfinal_result.plot(column='CrimeCount', scheme='Quantiles', k=5, cmap='YlGnBu', legend=True, ax=ax);\nplt.tight_layout()\nax.set_axis_off()\nplt.savefig('choroplethmap.png')\nplt.title('Crimes Choropleth Map ')\nplt.show()",
"_____no_output_____"
]
],
[
[
"### GLobal Spatial Autocorrelation",
"_____no_output_____"
]
],
[
[
"# Create y variable values\ny = final_result['CrimeCount'].values\n\n# Sptial lag\nylag = weights.lag_spatial(wq, y)\nfinal_result['ylag'] = ylag\n\n# Get Weights (Queen)\nwq = Queen.from_dataframe(final_result)\nwq.transform = 'r'",
"_____no_output_____"
],
[
"\nmoran = Moran(y, wq)\nmoran.I",
"_____no_output_____"
],
[
"from splot.esda import plot_moran\n\nplot_moran(moran, zstandard=True, figsize=(10,4))\nplt.tight_layout()\nplt.savefig('moronPlot.png')\nplt.show()\n",
"_____no_output_____"
],
[
"moran.p_sim",
"_____no_output_____"
]
],
[
[
"## Visualizing Local Autocorrelation with splot - Hot Spots, Cold Spots and Spatial Outliers",
"_____no_output_____"
]
],
[
[
"# calculate Moran_Local and plot\nmoran_loc = Moran_Local(y, w)\nfig, ax = moran_scatterplot(moran_loc)\nplt.savefig('moron_local.png')\nplt.show()",
"_____no_output_____"
],
[
"fig, ax = moran_scatterplot(moran_loc, p=0.05)\nplt.show()",
"_____no_output_____"
],
[
"lisa_cluster(moran_loc, final_result, p=0.05, figsize = (10,8))\nplt.tight_layout()\nplt.savefig('lisa_clusters.png')\nplt.show()",
"_____no_output_____"
]
],
[
[
"# END",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
cbbce006aa297a08528922ce643eaed4a7aaa8b6
| 144,682 |
ipynb
|
Jupyter Notebook
|
Tests to lection 8. 2018-05-18.ipynb
|
MikhailEpatko/Mashine-Learning
|
108e449deb0199a53b44a4053ae0711b49145b8d
|
[
"Apache-2.0"
] | null | null | null |
Tests to lection 8. 2018-05-18.ipynb
|
MikhailEpatko/Mashine-Learning
|
108e449deb0199a53b44a4053ae0711b49145b8d
|
[
"Apache-2.0"
] | null | null | null |
Tests to lection 8. 2018-05-18.ipynb
|
MikhailEpatko/Mashine-Learning
|
108e449deb0199a53b44a4053ae0711b49145b8d
|
[
"Apache-2.0"
] | null | null | null | 45.785443 | 54,040 | 0.634308 |
[
[
[
"from sklearn.datasets import load_files\nimport os\n",
"_____no_output_____"
],
[
"PATH = '/home/mikhail/Documents/ML/лекции/mlcourse_open-master/data/imdb_reviews'",
"_____no_output_____"
],
[
"!du -hs $PATH",
"246M\t/home/mikhail/Documents/ML/лекции/mlcourse_open-master/data/imdb_reviews\r\n"
],
[
"%%time\ntrain_reviews = load_files(os.path.join(PATH, 'train'))",
"CPU times: user 204 ms, sys: 153 ms, total: 357 ms\nWall time: 360 ms\n"
],
[
"%%time\ntest_reviews = load_files(os.path.join(PATH, 'train'))",
"CPU times: user 202 ms, sys: 124 ms, total: 326 ms\nWall time: 327 ms\n"
],
[
"len(train_reviews.data)",
"_____no_output_____"
],
[
"len(test_reviews.data)",
"_____no_output_____"
],
[
"print(train_reviews.data[2])\nprint('==================================================')\nprint(train_reviews.data[3])",
"b'Everyone plays their part pretty well in this \"little nice movie\". Belushi gets the chance to live part of his life differently, but ends up realizing that what he had was going to be just as good or maybe even better. The movie shows us that we ought to take advantage of the opportunities we have, not the ones we do not or cannot have. If U can get this movie on video for around $10, it\\xc2\\xb4d be an investment!'\n==================================================\nb'There are a lot of highly talented filmmakers/actors in Germany now. None of them are associated with this \"movie\".<br /><br />Why in the world do producers actually invest money in something like this this? You could have made 10 good films with the budget of this garbage! It\\'s not entertaining to have seven grown men running around as dwarfs, pretending to be funny. What IS funny though is that the film\\'s producer (who happens to be the oldest guy of the bunch) is playing the YOUNGEST dwarf.<br /><br />The film is filled with moments that scream for captions saying \"You\\'re supposed to laugh now!\". It\\'s hard to believe that this crap\\'s supposed to be a comedy.<br /><br />Many people actually stood up and left the cinema 30 minutes into the movie. I should have done the same instead of wasting my time...<br /><br />Pain!'\n"
],
[
"# 1 - good\n# 2 - bad\nprint(train_reviews.target[2])\nprint(train_reviews.target[3])",
"1\n0\n"
],
[
"import numpy as np\nimport pandas as pd\nfrom scipy.sparse import csr_matrix",
"_____no_output_____"
],
[
"# создание матрицы, заполненной нулями\na = np.zeros([5,5])",
"_____no_output_____"
],
[
"a",
"_____no_output_____"
],
[
"# заполняем ненулевые элементы матрицы\na[0,3] = 1\na[4,4] = 6\na[2,2] = 5\na[3,1] = 4\na[3,2] = 2\na[1,1] = 7",
"_____no_output_____"
],
[
"# распечатываем матрицу как дата-фрейм\npd.DataFrame(a)",
"_____no_output_____"
],
[
"# даём названия колонкам\npd.DataFrame(a, columns=['apple', 'land', 'iris', 'shop', 'sun'])",
"_____no_output_____"
],
[
"# создаём разреженную матрицу (без нулей). \n# На практике так не делают, т.к. неразреженная матрица 'а' может не поместиться в памяти\nb = csr_matrix(a)",
"_____no_output_____"
],
[
"b",
"_____no_output_____"
],
[
"# преобразование разреженной матрицы в обычную/\n# Может не поместиться в памяти\nb.todense()",
"_____no_output_____"
],
[
"# индексы ненулевых элементов (верхнее - индексы строк с нуля, нижнее - столбцов)\nb.nonzero()",
"_____no_output_____"
],
[
"# значения ненулевых элементов\nb.data",
"_____no_output_____"
],
[
"# достаем все слова из текста. Каждое слово - отдельный признак.\nfrom sklearn.feature_extraction.text import CountVectorizer",
"_____no_output_____"
],
[
"# получаем разреженную матрицу слов",
"_____no_output_____"
],
[
"%%time\ncv = CountVectorizer()\nX_train_sparce = cv.fit_transform(train_reviews.data)",
"CPU times: user 3.38 s, sys: 49.2 ms, total: 3.43 s\nWall time: 3.43 s\n"
],
[
"# смотрим получившийся словарь, состоящий из пар 'слово - его индекс'\ncv.vocabulary_",
"_____no_output_____"
],
[
"len(cv.vocabulary_)",
"_____no_output_____"
],
[
"# преобразуем тестовую выборку\n# игнорирует слова, которые появились только в тестовой выборке, а в обучающей их не было\nX_test_sparce = cv.transform(test_reviews.data)",
"_____no_output_____"
],
[
"# количество слов с тренировочной и тестовой выборках одинаковое\nX_train_sparce.shape, X_test_sparce.shape",
"_____no_output_____"
],
[
"# разреженная матрица размером 75000 отзывов на 124255 уникальных слов\n# хранит примерно 10_359_806 элементов (без нулей)\nX_train_sparce",
"_____no_output_____"
],
[
"# количество элементов с нулями - 9_319_125_000. В памяти они не поместятся.\n75000 * 124255",
"_____no_output_____"
],
[
"# Доля ненулевых элементов - 1 промиль\n10359806 / (75000 * 124255)",
"_____no_output_____"
],
[
"from sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.metrics import accuracy_score",
"_____no_output_____"
],
[
"y_train, y_test = train_reviews.target, test_reviews.target",
"_____no_output_____"
],
[
"# смотрим распределение плохих и хороших отзывов в обучающих и тестовых выборках - по 12500. \n# есть также 50000 непомеченных документов для обучения без учителя - упакованы в изначальном архиве\nnp.bincount(y_train), np.bincount(y_test)",
"_____no_output_____"
],
[
"# обучаем логистическую регрессию\nlogit = LogisticRegression(solver='lbfgs', random_state=17, n_jobs=-1)\n# обучаем стохастический градиентный спуск\n# max_iter - максимальное число проходов по всей выборке. Определяется, как 10**6, делённое на размерность выборки\nsgd_logit = SGDClassifier(max_iter=40,random_state=17, n_jobs=-1)",
"_____no_output_____"
],
[
"10**6/X_train_sparce.shape[0]",
"_____no_output_____"
],
[
"%%time\nlogit.fit(X_train_sparce, y_train)\n# Получим:\n# logit.fit(X_train_sparce, y_train)\n# /home/mikhail/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:1228: UserWarning: 'n_jobs' > 1 does not have any effect when 'solver' is set to 'liblinear'. Got 'n_jobs' = -1.\n# \" = {}.\".format(self.n_jobs))\n# Это означает, что оптимизация n_jobs не работает, т.к. по умолчанию стоит solver='liblinear'\n# поменяем на 'lbfgs'\n# Список всех солверов можно посмотреть так: LogisticRegression?\n",
"CPU times: user 147 ms, sys: 68 ms, total: 215 ms\nWall time: 2.53 s\n"
],
[
"%%time\nsgd_logit.fit(X_train_sparce, y_train)",
"CPU times: user 1.04 s, sys: 11.5 ms, total: 1.06 s\nWall time: 461 ms\n"
],
[
"# смотрим аккуратность логистической регрессии\naccuracy_score(y_test, logit.predict(X_test_sparce))",
"_____no_output_____"
],
[
"# смотрим аккуратность стохастического градиентного спуска\naccuracy_score(y_test, sgd_logit.predict(X_test_sparce))",
"_____no_output_____"
]
],
[
[
"#### визуализируем коэффициенты",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"def visualize_coefficients(classifier, feature_names, n_top_features=35):\n # get coefficients with large absolute values \n coef = classifier.coef_.ravel()\n positive_coefficients = np.argsort(coef)[-n_top_features:]\n negative_coefficients = np.argsort(coef)[:n_top_features]\n interesting_coefficients = np.hstack([negative_coefficients, positive_coefficients])\n # plot them\n plt.figure(figsize=(15, 5))\n colors = [\"red\" if c < 0 else \"blue\" for c in coef[interesting_coefficients]]\n plt.bar(np.arange(2 * n_top_features), coef[interesting_coefficients], color=colors)\n feature_names = np.array(feature_names)\n plt.xticks(np.arange(1, 1 + 2 * n_top_features), feature_names[interesting_coefficients], rotation=60, ha=\"right\");",
"_____no_output_____"
],
[
"def plot_grid_scores(grid, param_name):\n plot(grid.param_grid[param_name], grid.cv_results_['mean_train_score'],\n color='green', label='train')\n plot(grid.param_grid[param_name], grid.cv_results_['mean_test_score'],\n color='red', label='test')\n legend();\n ",
"_____no_output_____"
],
[
"visualize_coefficients(logit, cv.get_feature_names())",
"_____no_output_____"
]
],
[
[
"#### для увеличения точности моделей (в т.ч. стохастическо градиентного спуска) нужно добавить биграммы в параметры \nбиграмма - слово, состоящее из двух слов. Пример биграммы: \"хорошая погода\".",
"_____no_output_____"
]
],
[
[
"%%time\ncv = CountVectorizer(ngram_range=(1,2))\nX_train_sparce = cv.fit_transform(train_reviews.data)\nX_test_sparce = cv.transform(test_reviews.data)\n",
"CPU times: user 25.3 s, sys: 408 ms, total: 25.7 s\nWall time: 25.7 s\n"
],
[
"len(cv.vocabulary_)",
"_____no_output_____"
],
[
"cv.vocabulary_",
"_____no_output_____"
],
[
"X_train_sparce.shape, X_test_sparce.shape",
"_____no_output_____"
],
[
"X_train_sparce",
"_____no_output_____"
],
[
"sgd_logit_2 = SGDClassifier(max_iter=40,random_state=17, n_jobs=-1)",
"_____no_output_____"
],
[
"%%time\nsgd_logit_2.fit(X_train_sparce, y_train)",
"CPU times: user 3.45 s, sys: 27.9 ms, total: 3.47 s\nWall time: 2.86 s\n"
],
[
"# точность повысилсь с 0,91 до 0,99\naccuracy_score(y_test, sgd_logit_2.predict(X_test_sparce))",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbbce032ec2c5891a0300006524df0168b3e5d02
| 189,523 |
ipynb
|
Jupyter Notebook
|
Jason Robinson_DS_213_assignment.ipynb
|
techthumb1/DS-Unit-2-Linear-Models
|
3984ec64894afa380347a19a3e68d011d68cc5f5
|
[
"MIT"
] | null | null | null |
Jason Robinson_DS_213_assignment.ipynb
|
techthumb1/DS-Unit-2-Linear-Models
|
3984ec64894afa380347a19a3e68d011d68cc5f5
|
[
"MIT"
] | null | null | null |
Jason Robinson_DS_213_assignment.ipynb
|
techthumb1/DS-Unit-2-Linear-Models
|
3984ec64894afa380347a19a3e68d011d68cc5f5
|
[
"MIT"
] | null | null | null | 50.71528 | 9,192 | 0.49194 |
[
[
[
"Lambda School Data Science\n\n*Unit 2, Sprint 1, Module 3*\n\n---",
"_____no_output_____"
],
[
"# Ridge Regression\n\n## Assignment\n\nWe're going back to our other **New York City** real estate dataset. Instead of predicting apartment rents, you'll predict property sales prices.\n\nBut not just for condos in Tribeca...\n\n- [ ] Use a subset of the data where `BUILDING_CLASS_CATEGORY` == `'01 ONE FAMILY DWELLINGS'` and the sale price was more than 100 thousand and less than 2 million.\n- [ ] Do train/test split. Use data from January — March 2019 to train. Use data from April 2019 to test.\n- [ ] Do one-hot encoding of categorical features.\n- [ ] Do feature selection with `SelectKBest`.\n- [ ] Fit a ridge regression model with multiple features. Use the `normalize=True` parameter (or do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html) beforehand — use the scaler's `fit_transform` method with the train set, and the scaler's `transform` method with the test set)\n- [ ] Get mean absolute error for the test set.\n- [ ] As always, commit your notebook to your fork of the GitHub repo.\n\nThe [NYC Department of Finance](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page) has a glossary of property sales terms and NYC Building Class Code Descriptions. The data comes from the [NYC OpenData](https://data.cityofnewyork.us/browse?q=NYC%20calendar%20sales) portal.\n\n\n## Stretch Goals\n\nDon't worry, you aren't expected to do all these stretch goals! These are just ideas to consider and choose from.\n\n- [ ] Add your own stretch goal(s) !\n- [ ] Instead of `Ridge`, try `LinearRegression`. Depending on how many features you select, your errors will probably blow up! 💥\n- [ ] Instead of `Ridge`, try [`RidgeCV`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html).\n- [ ] Learn more about feature selection:\n - [\"Permutation importance\"](https://www.kaggle.com/dansbecker/permutation-importance)\n - [scikit-learn's User Guide for Feature Selection](https://scikit-learn.org/stable/modules/feature_selection.html)\n - [mlxtend](http://rasbt.github.io/mlxtend/) library\n - scikit-learn-contrib libraries: [boruta_py](https://github.com/scikit-learn-contrib/boruta_py) & [stability-selection](https://github.com/scikit-learn-contrib/stability-selection)\n - [_Feature Engineering and Selection_](http://www.feat.engineering/) by Kuhn & Johnson.\n- [ ] Try [statsmodels](https://www.statsmodels.org/stable/index.html) if you’re interested in more inferential statistical approach to linear regression and feature selection, looking at p values and 95% confidence intervals for the coefficients.\n- [ ] Read [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapters 1-3, for more math & theory, but in an accessible, readable way.\n- [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html).",
"_____no_output_____"
]
],
[
[
"%%capture\nimport sys\n\n# If you're on Colab:\nif 'google.colab' in sys.modules:\n DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'\n !pip install category_encoders==2.*\n\n# If you're working locally:\nelse:\n DATA_PATH = '../data/'\n \n# Ignore this Numpy warning when using Plotly Express:\n# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.\nimport warnings\nwarnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')",
"_____no_output_____"
],
[
"import pandas as pd\nimport pandas_profiling\n\n# Read New York City property sales data\ndf = pd.read_csv(DATA_PATH+'condos/NYC_Citywide_Rolling_Calendar_Sales.csv')\n\n# Change column names: replace spaces with underscores\ndf.columns = [col.replace(' ', '_') for col in df]\n\n# SALE_PRICE was read as strings.\n# Remove symbols, convert to integer\ndf['SALE_PRICE'] = (\n df['SALE_PRICE']\n .str.replace('$','')\n .str.replace('-','')\n .str.replace(',','')\n .astype(int)\n)",
"_____no_output_____"
],
[
"# BOROUGH is a numeric column, but arguably should be a categorical feature,\n# so convert it from a number to a string\ndf['BOROUGH'] = df['BOROUGH'].astype(str)",
"_____no_output_____"
],
[
"# Reduce cardinality for NEIGHBORHOOD feature\n\n# Get a list of the top 10 neighborhoods\ntop10 = df['NEIGHBORHOOD'].value_counts()[:10].index\n\n# At locations where the neighborhood is NOT in the top 10, \n# replace the neighborhood with 'OTHER'\ndf.loc[~df['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER'",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
],
[
"# Using a subset of the dataset restricting to building class category equivalent \n#to one family dwellings, with sale price between one hundred thousand and two million.\n\ntrained1 = df[(df['BUILDING_CLASS_CATEGORY'] == '01 ONE FAMILY DWELLINGS') & \n (df['SALE_PRICE'] > 100000) & (df['SALE_PRICE'] > 100000)]\ntrained = trained1.drop(['EASE-MENT'], axis=1)\ntrained",
"_____no_output_____"
],
[
"train = trained[trained['SALE_DATE'] < '03/31/2019']\ntrain",
"_____no_output_____"
],
[
"test = trained[trained['SALE_DATE'] > '03/31/2019']\ntest",
"_____no_output_____"
],
[
"train.shape, test.shape",
"_____no_output_____"
],
[
"train['SALE_PRICE'].mean()",
"_____no_output_____"
],
[
"train.describe()",
"_____no_output_____"
],
[
"train.describe(exclude='number')",
"_____no_output_____"
],
[
"train['TAX_CLASS_AT_PRESENT'].value_counts()",
"_____no_output_____"
],
[
"train.groupby('TAX_CLASS_AT_PRESENT')['SALE_PRICE'].mean()",
"_____no_output_____"
],
[
"train['TAX_CLASS_AT_PRESENT'].unique()",
"_____no_output_____"
],
[
"# Remove features with high cardinality.\ntarget = 'SALE_PRICE'\nhigh_cardinality = ['APARTMENT_NUMBER', 'NEIGHBORHOOD', 'ADDRESS', 'LAND_SQUARE_FEET', 'SALE_DATE']\nfeatures = train.columns.drop([target] + high_cardinality)\nfeatures",
"_____no_output_____"
],
[
"# To get a view of the data before encoding.\nX_train = train[features]\ny_train = train[target]\nX_test = test[features]\ny_test = test[target]\nX_train.head()",
"_____no_output_____"
],
[
"# Now we want to use one hot encoding on our categorical data.\n# We will need to import category encoders.\nimport category_encoders as ce\n\nencoder = ce.one_hot.OneHotEncoder(use_cat_names=True)\nX_train_enc = encoder.fit_transform(X_train)\nX_test_enc = encoder.transform(X_test)\nX_test_enc.head()",
"_____no_output_____"
],
[
"X_test_enc.columns",
"_____no_output_____"
],
[
"X_test_enc.isnull().sum()",
"_____no_output_____"
],
[
"def engineer_features(X):\n \n X = X.copy()\n \n feature_cols = ['BOROUGH_3', 'BOROUGH_4', 'BOROUGH_2', 'BOROUGH_5', 'BOROUGH_1',\n 'BUILDING_CLASS_CATEGORY_01 ONE FAMILY DWELLINGS',\n 'TAX_CLASS_AT_PRESENT_1', 'TAX_CLASS_AT_PRESENT_1D', 'BLOCK', 'LOT']\n \n X['FEATURE_COUNT'] = X[feature_cols].sum(axis=1)\n X['TAX_CLASS'] = (X['TAX_CLASS_AT_PRESENT_1']==1) | (X['TAX_CLASS_AT_PRESENT_1D']==1)\n \n return X\n\nX_train_eng = engineer_features(X_train_enc)\nX_test_eng = engineer_features(X_test_enc)",
"_____no_output_____"
],
[
"features = X_train_eng.columns\nn = len(features)\nn",
"_____no_output_____"
],
[
"from math import factorial\n\ndef n_choose_k(n, k):\n return factorial(n)/(factorial(k)*factorial(n-k))\n\ncombinations = sum(n_choose_k(n,k) for k in range(1,n+1))\nprint(f'{combinations:,.0f}')",
"35,184,372,088,831\n"
],
[
"\nfrom sklearn.feature_selection import SelectKBest, f_regression\n\nselector = SelectKBest(score_func = f_regression, k=25)\nX_train_kbest = selector.fit_transform(X_train_eng, y_train)\nX_test_kbest = selector.transform(X_test_eng)",
"C:\\Users\\Aarons\\Anaconda3\\envs\\unit2\\lib\\site-packages\\sklearn\\feature_selection\\_univariate_selection.py:299: RuntimeWarning: divide by zero encountered in true_divide\n corr /= X_norms\nC:\\Users\\Aarons\\Anaconda3\\envs\\unit2\\lib\\site-packages\\sklearn\\feature_selection\\_univariate_selection.py:304: RuntimeWarning: invalid value encountered in true_divide\n F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom\nC:\\Users\\Aarons\\Anaconda3\\envs\\unit2\\lib\\site-packages\\scipy\\stats\\_distn_infrastructure.py:903: RuntimeWarning: invalid value encountered in greater\n return (a < x) & (x < b)\nC:\\Users\\Aarons\\Anaconda3\\envs\\unit2\\lib\\site-packages\\scipy\\stats\\_distn_infrastructure.py:903: RuntimeWarning: invalid value encountered in less\n return (a < x) & (x < b)\nC:\\Users\\Aarons\\Anaconda3\\envs\\unit2\\lib\\site-packages\\scipy\\stats\\_distn_infrastructure.py:1912: RuntimeWarning: invalid value encountered in less_equal\n cond2 = cond0 & (x <= _a)\n"
],
[
"mask = selector.get_support()\nmask",
"_____no_output_____"
],
[
"X_train_eng.columns[mask]",
"_____no_output_____"
],
[
"X_train_eng.columns[~mask]",
"_____no_output_____"
],
[
"from sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_absolute_error\nmae_list =[]\nfor k in range(1, X_train_eng.shape[1]+1):\n print(f'{k} features')\n selector = SelectKBest(score_func = f_regression, k=15)\n X_train_kbest = selector.fit_transform(X_train_eng, y_train)\n X_test_kbest = selector.transform(X_test_eng)\n model = LinearRegression()\n model.fit(X_train_kbest, y_train)\n y_pred = model.predict(X_test_kbest)\n mae = mean_absolute_error(y_pred, y_test)\n print(f'MAE on test set: {mae:.2f}')\n mae_list.append(mae)",
"1 features\nMAE on test set: 246494.21\n2 features\nMAE on test set: 246494.21\n3 features\nMAE on test set: 246494.21\n4 features\nMAE on test set: 246494.21\n5 features\nMAE on test set: 246494.21\n6 features\nMAE on test set: 246494.21"
],
[
"import seaborn as sns\n\nsns.scatterplot(range(1, X_train_eng.shape[1]+ 1), mae_list);",
"_____no_output_____"
],
[
"from IPython.display import display, HTML\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import Ridge\n%matplotlib inline\n\nfor alpha in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]:\n \n feature = 'TAX_CLASS_AT_PRESENT_1'\n display(HTML(f'Ridge Regression, with alpha={alpha}'))\n model = Ridge(alpha=alpha, normalize=True)\n model.fit(X_train_eng[[features]], y_train)\n \n y_pred = model.predict(X_test_eng[[features]])\n mae = mean_absolute_error(y_test, y_pred)\n display(HTML(f'Test Mean Absolute Error: ${mae:,.0f}'))\n \n train.plot.scatter(features, target, alpha=0.05)\n plt.plot(X_test_eng[features], y_pred)\n plt.show()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbbcffd22ded8492921666e45054d750cc98ba27
| 290,627 |
ipynb
|
Jupyter Notebook
|
35_morphological_analysis/backup/classification_for_ashwin.ipynb
|
jingpengw/realneuralnetworks-notebook
|
7364826feea3d71cc6b8e2ec3002f41af5411200
|
[
"Apache-2.0"
] | 1 |
2022-03-14T16:19:16.000Z
|
2022-03-14T16:19:16.000Z
|
35_morphological_analysis/backup/classification_for_ashwin.ipynb
|
jingpengw/realneuralnetworks-notebook
|
7364826feea3d71cc6b8e2ec3002f41af5411200
|
[
"Apache-2.0"
] | null | null | null |
35_morphological_analysis/backup/classification_for_ashwin.ipynb
|
jingpengw/realneuralnetworks-notebook
|
7364826feea3d71cc6b8e2ec3002f41af5411200
|
[
"Apache-2.0"
] | null | null | null | 179.178175 | 82,502 | 0.847877 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
cbbd081888cee72ceed3d147f4904f1851a501ac
| 23,875 |
ipynb
|
Jupyter Notebook
|
examples/04_model_select_and_optimize/nni_ncf.ipynb
|
nixent/recommenders
|
588dafe371762e8b35779c7a608a577730b73001
|
[
"MIT"
] | 3 |
2021-06-22T02:12:38.000Z
|
2021-11-25T02:39:52.000Z
|
src/recommendationservice/examples/04_model_select_and_optimize/nni_ncf.ipynb
|
IMT-Atlantique-FIL-2020-2023/microservices-demo
|
289801d397f6c5544a347af8c48176eee7492e8b
|
[
"Apache-2.0"
] | null | null | null |
src/recommendationservice/examples/04_model_select_and_optimize/nni_ncf.ipynb
|
IMT-Atlantique-FIL-2020-2023/microservices-demo
|
289801d397f6c5544a347af8c48176eee7492e8b
|
[
"Apache-2.0"
] | 2 |
2021-11-14T13:36:48.000Z
|
2022-03-02T18:09:20.000Z
| 33.865248 | 694 | 0.564398 |
[
[
[
"<i>Copyright (c) Microsoft Corporation. All rights reserved.<br>\nLicensed under the MIT License.</i>\n<br>\n# Model Comparison for NCF Using the Neural Network Intelligence Toolkit",
"_____no_output_____"
],
[
"This notebook shows how to use the **[Neural Network Intelligence](https://nni.readthedocs.io/en/latest/) toolkit (NNI)** for tuning hyperparameters for the Neural Collaborative Filtering Model.\n\nTo learn about each tuner NNI offers you can read about it [here](https://nni.readthedocs.io/en/latest/Tuner/BuiltinTuner.html).\n\nNNI is a toolkit to help users design and tune machine learning models (e.g., hyperparameters), neural network architectures, or complex system’s parameters, in an efficient and automatic way. NNI has several appealing properties: ease of use, scalability, flexibility and efficiency. NNI can be executed in a distributed way on a local machine, a remote server, or a large scale training platform such as OpenPAI or Kubernetes. \n\nIn this notebook, we can see how NNI works with two different model types and the differences between their hyperparameter search spaces, yaml config file, and training scripts.\n\n- [NCF Training Script](../../reco_utils/nni/ncf_training.py)\n\nFor this notebook we use a _local machine_ as the training platform (this can be any machine running the `reco_base` conda environment). In this case, NNI uses the available processors of the machine to parallelize the trials, subject to the value of `trialConcurrency` we specify in the configuration. Our runs and the results we report were obtained on a [Standard_D16_v3 virtual machine](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-general#dv3-series-1) with 16 vcpus and 64 GB memory.",
"_____no_output_____"
],
[
"### 1. Global Settings",
"_____no_output_____"
]
],
[
[
"import sys\nimport json\nimport os\nimport surprise\nimport papermill as pm\nimport pandas as pd\nimport shutil\nimport subprocess\nimport yaml\nimport pkg_resources\nfrom tempfile import TemporaryDirectory\nimport tensorflow as tf\ntf.get_logger().setLevel('ERROR') # only show error messages\n\nimport reco_utils\nfrom reco_utils.common.timer import Timer\nfrom reco_utils.dataset import movielens\nfrom reco_utils.dataset.python_splitters import python_chrono_split\nfrom reco_utils.evaluation.python_evaluation import rmse, precision_at_k, ndcg_at_k\nfrom reco_utils.tuning.nni.nni_utils import (\n check_experiment_status, \n check_stopped, \n check_metrics_written, \n get_trials,\n stop_nni, start_nni\n)\nfrom reco_utils.recommender.ncf.dataset import Dataset as NCFDataset\nfrom reco_utils.recommender.ncf.ncf_singlenode import NCF\nfrom reco_utils.tuning.nni.ncf_utils import compute_test_results, combine_metrics_dicts\n\nprint(\"System version: {}\".format(sys.version))\nprint(\"Tensorflow version: {}\".format(tf.__version__))\nprint(\"NNI version: {}\".format(pkg_resources.get_distribution(\"nni\").version))\n\ntmp_dir = TemporaryDirectory()\n\n%load_ext autoreload\n%autoreload 2",
"System version: 3.6.10 |Anaconda, Inc.| (default, Mar 25 2020, 23:51:54) \n[GCC 7.3.0]\nTensorflow version: 1.15.2\nNNI version: 1.5\n"
]
],
[
[
"### 2. Prepare Dataset\n1. Download data and split into training, validation and test sets\n2. Store the data sets to a local directory.",
"_____no_output_____"
]
],
[
[
"# Parameters used by papermill\n# Select Movielens data size: 100k, 1m\nMOVIELENS_DATA_SIZE = '100k'\nSURPRISE_READER = 'ml-100k'\nTMP_DIR = tmp_dir.name\nNUM_EPOCHS = 10\nMAX_TRIAL_NUM = 16\nDEFAULT_SEED = 42\n\n# time (in seconds) to wait for each tuning experiment to complete\nWAITING_TIME = 20\nMAX_RETRIES = MAX_TRIAL_NUM*4 # it is recommended to have MAX_RETRIES>=4*MAX_TRIAL_NUM\n",
"_____no_output_____"
],
[
"# Note: The NCF model can incorporate\ndf = movielens.load_pandas_df(\n size=MOVIELENS_DATA_SIZE,\n header=[\"userID\", \"itemID\", \"rating\", \"timestamp\"]\n)\n\ndf.head()",
"100%|██████████| 4.81k/4.81k [00:00<00:00, 8.54kKB/s]\n"
],
[
"train, validation, test = python_chrono_split(df, [0.7, 0.15, 0.15])\ntrain = train.drop(['timestamp'], axis=1)\nvalidation = validation.drop(['timestamp'], axis=1)\ntest = test.drop(['timestamp'], axis=1)",
"_____no_output_____"
],
[
"LOG_DIR = os.path.join(TMP_DIR, \"experiments\")\nos.makedirs(LOG_DIR, exist_ok=True)\n\nDATA_DIR = os.path.join(TMP_DIR, \"data\") \nos.makedirs(DATA_DIR, exist_ok=True)\n\nTRAIN_FILE_NAME = \"movielens_\" + MOVIELENS_DATA_SIZE + \"_train.pkl\"\ntrain.to_pickle(os.path.join(DATA_DIR, TRAIN_FILE_NAME))\n\nVAL_FILE_NAME = \"movielens_\" + MOVIELENS_DATA_SIZE + \"_val.pkl\"\nvalidation.to_pickle(os.path.join(DATA_DIR, VAL_FILE_NAME))\n\nTEST_FILE_NAME = \"movielens_\" + MOVIELENS_DATA_SIZE + \"_test.pkl\"\ntest.to_pickle(os.path.join(DATA_DIR, TEST_FILE_NAME))",
"_____no_output_____"
]
],
[
[
"### 3. Prepare Hyperparameter Tuning ",
"_____no_output_____"
],
[
"To run an experiment on NNI we require a general training script for our model of choice.\nA general framework for a training script utilizes the following components\n1. Argument Parse for the fixed parameters (dataset location, metrics to use)\n2. Data preprocessing steps specific to the model\n3. Fitting the model on the train set\n4. Evaluating the model on the validation set on each metric (ranking and rating)\n5. Save metrics and model\n\nTo utilize NNI we also require a hypeyparameter search space. Only the hyperparameters we want to tune are required in the dictionary. NNI supports different methods of [hyperparameter sampling](https://nni.readthedocs.io/en/latest/Tutorial/SearchSpaceSpec.html).",
"_____no_output_____"
],
[
"The `script_params` below are the parameters of the training script that are fixed (unlike `hyper_params` which are tuned).",
"_____no_output_____"
]
],
[
[
"PRIMARY_METRIC = \"precision_at_k\"\nRATING_METRICS = [\"rmse\"]\nRANKING_METRICS = [\"precision_at_k\", \"ndcg_at_k\"] \nUSERCOL = \"userID\"\nITEMCOL = \"itemID\"\nREMOVE_SEEN = True\nK = 10\nRANDOM_STATE = 42\nVERBOSE = True\nBIASED = True\n\nscript_params = \" \".join([\n \"--datastore\", DATA_DIR,\n \"--train-datapath\", TRAIN_FILE_NAME,\n \"--validation-datapath\", VAL_FILE_NAME,\n \"--surprise-reader\", SURPRISE_READER,\n \"--rating-metrics\", \" \".join(RATING_METRICS),\n \"--ranking-metrics\", \" \".join(RANKING_METRICS),\n \"--usercol\", USERCOL,\n \"--itemcol\", ITEMCOL,\n \"--k\", str(K),\n \"--random-state\", str(RANDOM_STATE),\n \"--epochs\", str(NUM_EPOCHS),\n \"--primary-metric\", PRIMARY_METRIC\n])\n\nif BIASED:\n script_params += \" --biased\"\nif VERBOSE:\n script_params += \" --verbose\"\nif REMOVE_SEEN:\n script_params += \" --remove-seen\"",
"_____no_output_____"
]
],
[
[
"We specify the search space for the NCF hyperparameters",
"_____no_output_____"
]
],
[
[
"ncf_hyper_params = {\n 'n_factors': {\"_type\": \"choice\", \"_value\": [2, 4, 8, 12]},\n 'learning_rate': {\"_type\": \"uniform\", \"_value\": [1e-3, 1e-2]},\n}",
"_____no_output_____"
],
[
"with open(os.path.join(TMP_DIR, 'search_space_ncf.json'), 'w') as fp:\n json.dump(ncf_hyper_params, fp)",
"_____no_output_____"
]
],
[
[
"This config file follows the guidelines provided in [NNI Experiment Config instructions](https://github.com/microsoft/nni/blob/master/docs/en_US/Tutorial/ExperimentConfig.md).\n\nThe options to pay attention to are\n- The \"searchSpacePath\" which contains the space of hyperparameters we wanted to tune defined above\n- The \"tuner\" which specifies the hyperparameter tuning algorithm that will sample from our search space and optimize our model",
"_____no_output_____"
]
],
[
[
"config = {\n \"authorName\": \"default\",\n \"experimentName\": \"tensorflow_ncf\",\n \"trialConcurrency\": 8,\n \"maxExecDuration\": \"1h\",\n \"maxTrialNum\": MAX_TRIAL_NUM,\n \"trainingServicePlatform\": \"local\",\n # The path to Search Space\n \"searchSpacePath\": \"search_space_ncf.json\",\n \"useAnnotation\": False,\n \"logDir\": LOG_DIR,\n \"tuner\": {\n \"builtinTunerName\": \"TPE\",\n \"classArgs\": {\n #choice: maximize, minimize\n \"optimize_mode\": \"maximize\"\n }\n },\n # The path and the running command of trial\n \"trial\": {\n \"command\": f\"{sys.executable} ncf_training.py {script_params}\",\n \"codeDir\": os.path.join(os.path.split(os.path.abspath(reco_utils.__file__))[0], \"tuning\", \"nni\"),\n \"gpuNum\": 0\n }\n}\n \nwith open(os.path.join(TMP_DIR, \"config_ncf.yml\"), \"w\") as fp:\n fp.write(yaml.dump(config, default_flow_style=False))",
"_____no_output_____"
]
],
[
[
"### 4. Execute NNI Trials\n\nThe conda environment comes with NNI installed, which includes the command line tool `nnictl` for controlling and getting information about NNI experiments. <br>\nTo start the NNI tuning trials from the command line, execute the following command: <br>\n`nnictl create --config <path of config.yml>` <br>\n\n\nThe `start_nni` function will run the `nnictl create` command. To find the URL for an active experiment you can run `nnictl webui url` on your terminal.\n\nIn this notebook the 16 NCF models are trained concurrently in a single experiment with batches of 8. While NNI can run two separate experiments simultaneously by adding the `--port <port_num>` flag to `nnictl create`, the total training time will probably be the same as running the batches sequentially since these are CPU bound processes.",
"_____no_output_____"
]
],
[
[
"stop_nni()\nconfig_path_ncf = os.path.join(TMP_DIR, 'config_ncf.yml')\nwith Timer() as time_ncf:\n start_nni(config_path_ncf, wait=WAITING_TIME, max_retries=MAX_RETRIES)",
"_____no_output_____"
],
[
"check_metrics_written(wait=WAITING_TIME, max_retries=MAX_RETRIES)\ntrials_ncf, best_metrics_ncf, best_params_ncf, best_trial_path_ncf = get_trials('maximize')",
"_____no_output_____"
],
[
"best_metrics_ncf",
"_____no_output_____"
],
[
"best_params_ncf",
"_____no_output_____"
]
],
[
[
"## 5. Baseline Model\n\nAlthough we hope that the additional effort of utilizing an AutoML framework like NNI for hyperparameter tuning will lead to better results, we should also draw comparisons using our baseline model (our model trained with its default hyperparameters). This allows us to precisely understand what performance benefits NNI is or isn't providing.",
"_____no_output_____"
]
],
[
[
"data = NCFDataset(train, validation, seed=DEFAULT_SEED)\nmodel = NCF(\n n_users=data.n_users, \n n_items=data.n_items,\n model_type=\"NeuMF\",\n n_factors=4,\n layer_sizes=[16,8,4],\n n_epochs=NUM_EPOCHS,\n learning_rate=1e-3, \n verbose=True,\n seed=DEFAULT_SEED\n)\nmodel.fit(data)",
"_____no_output_____"
],
[
"test_results = compute_test_results(model, train, validation, RATING_METRICS, RANKING_METRICS)\ntest_results",
"_____no_output_____"
]
],
[
[
"### 5. Show Results\n\nThe metrics for each model type is reported on the validation set. At this point we can compare the metrics for each model and select the one with the best score on the primary metric(s) of interest.",
"_____no_output_____"
]
],
[
[
"test_results['name'] = 'ncf_baseline'\nbest_metrics_ncf['name'] = 'ncf_tuned'\ncombine_metrics_dicts(test_results, best_metrics_ncf)",
"_____no_output_____"
]
],
[
[
"Based on the above metrics, we determine that NNI has identified a set of hyperparameters that does demonstrate an improvement on our metrics of interest. In this example, it turned out that an `n_factors` of 12 contributed to a better performance than an `n_factors` of 4. While the difference in `precision_at_k` and `ndcg_at_k` is small, NNI has helped us determine that a slightly larger embedding dimension for NCF may be useful for the movielens dataset.",
"_____no_output_____"
]
],
[
[
"# Stop the NNI experiment \nstop_nni()",
"_____no_output_____"
],
[
"tmp_dir.cleanup()",
"_____no_output_____"
]
],
[
[
"### 7. Concluding Remarks\n\nIn this notebook we showed how to use the NNI framework on different models. By inspection of the training scripts, the differences between the two should help you identify what components would need to be modified to run another model with NNI.\n\nIn practice, an AutoML framework like NNI is just a tool to help you explore a large space of hyperparameters quickly with a pre-described level of randomization. It is recommended that in addition to using NNI one trains baseline models using typical hyperparamter choices (learning rate of 0.005, 0.001 or regularization rates of 0.05, 0.01, etc.) to draw more meaningful comparisons between model performances. This may help determine if a model is overfitting from the tuner or if there is a statistically significant improvement.\n\nAnother thing to note is the added computational cost required to train models using an AutoML framework. In this case, it takes about 6 minutes to train each of the models on a [Standard_NC6 VM](https://docs.microsoft.com/en-us/azure/virtual-machines/nc-series). With this in mind, while NNI can easily train hundreds of models over all hyperparameters for a model, in practice it may be beneficial to choose a subset of the hyperparameters that are deemed most important and to tune those. Too small of a hyperparameter search space may restrict our exploration, but too large may also lead to random noise in the data being exploited by a specific combination of hyperparameters. \n\nFor examples of scaling larger tuning workloads on clusters of machines, see [the notebooks](./README.md) that employ the [Azure Machine Learning service](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters). ",
"_____no_output_____"
],
[
"### 8. References\n\nRecommenders Repo References\n* [NCF deep-dive notebook](../02_model/ncf_deep_dive.ipynb)\n* [SVD NNI notebook (uses more tuners available)](./nni_surprise_svd.ipynb)\n\nExternal References\n* [NCF Paper](https://arxiv.org/abs/1708.05031) \n* [NNI Docs | Neural Network Intelligence toolkit](https://github.com/Microsoft/nni)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
cbbd1a4bbf6b26ba7171104762480ac2bd87113f
| 5,892 |
ipynb
|
Jupyter Notebook
|
jupyter/load_mxnet_model.ipynb
|
raghav-deepsource/djl
|
8d774578a51b298d2ddeb1a898ddd5a157b7f0bd
|
[
"Apache-2.0"
] | 1 |
2020-11-25T06:01:52.000Z
|
2020-11-25T06:01:52.000Z
|
jupyter/load_mxnet_model.ipynb
|
wulin-challenge/djl
|
5dd343ccc03a75322efcd441b6f5234339bd95f3
|
[
"Apache-2.0"
] | null | null | null |
jupyter/load_mxnet_model.ipynb
|
wulin-challenge/djl
|
5dd343ccc03a75322efcd441b6f5234339bd95f3
|
[
"Apache-2.0"
] | null | null | null | 29.908629 | 207 | 0.596572 |
[
[
[
"# Load MXNet model\n\nIn this tutorial, you learn how to load an existing MXNet model and use it to run a prediction task.\n\n\n## Preparation\n\nThis tutorial requires the installation of Java Kernel. For more information on installing the Java Kernel, see the [README](https://github.com/awslabs/djl/blob/master/jupyter/README.md).",
"_____no_output_____"
]
],
[
[
"// %mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/\n\n%maven ai.djl:api:0.8.0\n%maven ai.djl:model-zoo:0.8.0\n%maven ai.djl.mxnet:mxnet-engine:0.8.0\n%maven ai.djl.mxnet:mxnet-model-zoo:0.8.0\n%maven org.slf4j:slf4j-api:1.7.26\n%maven org.slf4j:slf4j-simple:1.7.26\n%maven net.java.dev.jna:jna:5.3.0\n \n// See https://github.com/awslabs/djl/blob/master/mxnet/mxnet-engine/README.md\n// for more MXNet library selection options\n%maven ai.djl.mxnet:mxnet-native-auto:1.7.0-backport",
"_____no_output_____"
],
[
"import java.awt.image.*;\nimport java.nio.file.*;\nimport ai.djl.*;\nimport ai.djl.inference.*;\nimport ai.djl.ndarray.*;\nimport ai.djl.modality.*;\nimport ai.djl.modality.cv.*;\nimport ai.djl.modality.cv.util.*;\nimport ai.djl.modality.cv.transform.*;\nimport ai.djl.modality.cv.translator.*;\nimport ai.djl.translate.*;\nimport ai.djl.training.util.*;\nimport ai.djl.util.*;",
"_____no_output_____"
]
],
[
[
"## Step 1: Prepare your MXNet model\n\nThis tutorial assumes that you have a MXNet model trained using Python. A MXNet symbolic model usually contains the following files:\n* Symbol file: {MODEL_NAME}-symbol.json - a json file that contains network information about the model\n* Parameters file: {MODEL_NAME}-{EPOCH}.params - a binary file that stores the parameter weight and bias\n* Synset file: synset.txt - an optional text file that stores classification classes labels\n\nThis tutorial uses a pre-trained MXNet `resnet18_v1` model.",
"_____no_output_____"
],
[
"We use `DownloadUtils` for downloading files from internet.",
"_____no_output_____"
]
],
[
[
"DownloadUtils.download(\"https://mlrepo.djl.ai/model/cv/image_classification/ai/djl/mxnet/resnet/0.0.1/resnet18_v1-symbol.json\", \"build/resnet/resnet18_v1-symbol.json\", new ProgressBar());\nDownloadUtils.download(\"https://mlrepo.djl.ai/model/cv/image_classification/ai/djl/mxnet/resnet/0.0.1/resnet18_v1-0000.params.gz\", \"build/resnet/resnet18_v1-0000.params\", new ProgressBar());\nDownloadUtils.download(\"https://mlrepo.djl.ai/model/cv/image_classification/ai/djl/mxnet/synset.txt\", \"build/resnet/synset.txt\", new ProgressBar());\n",
"_____no_output_____"
]
],
[
[
"## Step 2: Load your model",
"_____no_output_____"
]
],
[
[
"Path modelDir = Paths.get(\"build/resnet\");\nModel model = Model.newInstance(\"resnet\");\nmodel.load(modelDir, \"resnet18_v1\");",
"_____no_output_____"
]
],
[
[
"## Step 3: Create a `Translator`",
"_____no_output_____"
]
],
[
[
"Pipeline pipeline = new Pipeline();\npipeline.add(new CenterCrop()).add(new Resize(224, 224)).add(new ToTensor());\nTranslator<Image, Classifications> translator = ImageClassificationTranslator.builder()\n .setPipeline(pipeline)\n .optSynsetArtifactName(\"synset.txt\")\n .optApplySoftmax(true)\n .build();",
"_____no_output_____"
]
],
[
[
"## Step 4: Load image for classification",
"_____no_output_____"
]
],
[
[
"var img = ImageFactory.getInstance().fromUrl(\"https://resources.djl.ai/images/kitten.jpg\");\nimg.getWrappedImage()",
"_____no_output_____"
]
],
[
[
"## Step 5: Run inference",
"_____no_output_____"
]
],
[
[
"Predictor<Image, Classifications> predictor = model.newPredictor(translator);\nClassifications classifications = predictor.predict(img);\n\nclassifications",
"_____no_output_____"
]
],
[
[
"## Summary\n\nNow, you can load any MXNet symbolic model and run inference.\n\nYou might also want to check out [load_pytorch_model.ipynb](https://github.com/awslabs/djl/blob/master/jupyter/load_pytorch_model.ipynb) which demonstrates loading a local model using the ModelZoo API.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cbbd1c43a4bb4c492aafe72c3585b407b5813ef4
| 421,540 |
ipynb
|
Jupyter Notebook
|
Scikit-learn.ipynb
|
TaehoLi/Python-ML-Second-Edition
|
7b6cf7884f5119de4d4f78c2e8b30be4327ae0da
|
[
"MIT"
] | 1 |
2019-09-20T13:29:28.000Z
|
2019-09-20T13:29:28.000Z
|
Scikit-learn.ipynb
|
TaehoLi/Python-ML-Second-Edition
|
7b6cf7884f5119de4d4f78c2e8b30be4327ae0da
|
[
"MIT"
] | null | null | null |
Scikit-learn.ipynb
|
TaehoLi/Python-ML-Second-Edition
|
7b6cf7884f5119de4d4f78c2e8b30be4327ae0da
|
[
"MIT"
] | null | null | null | 319.348485 | 38,748 | 0.92692 |
[
[
[
"# 3장. 사이킷런을 타고 떠나는 머신 러닝 분류 모델 투어",
"_____no_output_____"
],
[
"**아래 링크를 통해 이 노트북을 주피터 노트북 뷰어(nbviewer.jupyter.org)로 보거나 구글 코랩(colab.research.google.com)에서 실행할 수 있습니다.**\n\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://nbviewer.jupyter.org/github/rickiepark/python-machine-learning-book-2nd-edition/blob/master/code/ch03/ch03.ipynb\"><img src=\"https://jupyter.org/assets/main-logo.svg\" width=\"28\" />주피터 노트북 뷰어로 보기</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/rickiepark/python-machine-learning-book-2nd-edition/blob/master/code/ch03/ch03.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />구글 코랩(Colab)에서 실행하기</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"`watermark`는 주피터 노트북에 사용하는 파이썬 패키지를 출력하기 위한 유틸리티입니다. `watermark` 패키지를 설치하려면 다음 셀의 주석을 제거한 뒤 실행하세요.",
"_____no_output_____"
]
],
[
[
"#!pip install watermark",
"_____no_output_____"
],
[
"%load_ext watermark\n%watermark -u -d -p numpy,pandas,matplotlib,sklearn",
"last updated: 2019-06-06 \n\nnumpy 1.16.4\npandas 0.24.2\nmatplotlib 3.1.0\nsklearn 0.21.2\n"
]
],
[
[
"# 사이킷런 첫걸음",
"_____no_output_____"
],
[
"사이킷런에서 붓꽃 데이터셋을 적재합니다. 세 번째 열은 꽃잎의 길이이고 네 번째 열은 꽃잎의 너비입니다. 클래스는 이미 정수 레이블로 변환되어 있습니다. 0=Iris-Setosa, 1=Iris-Versicolor, 2=Iris-Virginica 입니다.",
"_____no_output_____"
]
],
[
[
"from sklearn import datasets\nimport numpy as np\n\niris = datasets.load_iris()\nX = iris.data[:, [2, 3]]\ny = iris.target\n\nprint('클래스 레이블:', np.unique(y))",
"클래스 레이블: [0 1 2]\n"
]
],
[
[
"70%는 훈련 데이터 30%는 테스트 데이터로 분할합니다:",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.3, random_state=1, stratify=y)",
"_____no_output_____"
],
[
"print('y의 레이블 카운트:', np.bincount(y))\nprint('y_train의 레이블 카운트:', np.bincount(y_train))\nprint('y_test의 레이블 카운트:', np.bincount(y_test))",
"y의 레이블 카운트: [50 50 50]\ny_train의 레이블 카운트: [35 35 35]\ny_test의 레이블 카운트: [15 15 15]\n"
]
],
[
[
"특성을 표준화합니다:",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import StandardScaler\n\nsc = StandardScaler()\nsc.fit(X_train)\nX_train_std = sc.transform(X_train)\nX_test_std = sc.transform(X_test)",
"_____no_output_____"
]
],
[
[
"## 사이킷런으로 퍼셉트론 훈련하기",
"_____no_output_____"
],
[
"2장의 `plot_decision_region` 함수를 다시 사용하겠습니다:",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import Perceptron\n\nppn = Perceptron(max_iter=40, eta0=0.1, tol=1e-3, random_state=1)\nppn.fit(X_train_std, y_train)",
"_____no_output_____"
],
[
"y_pred = ppn.predict(X_test_std)\nprint('잘못 분류된 샘플 개수: %d' % (y_test != y_pred).sum())",
"잘못 분류된 샘플 개수: 1\n"
],
[
"from sklearn.metrics import accuracy_score\n\nprint('정확도: %.2f' % accuracy_score(y_test, y_pred))",
"정확도: 0.98\n"
],
[
"print('정확도: %.2f' % ppn.score(X_test_std, y_test))",
"정확도: 0.98\n"
],
[
"from matplotlib.colors import ListedColormap\nimport matplotlib.pyplot as plt\n\n\ndef plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):\n\n # 마커와 컬러맵을 설정합니다.\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n\n # 결정 경계를 그립니다.\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0], \n y=X[y == cl, 1],\n alpha=0.8, \n c=colors[idx],\n marker=markers[idx], \n label=cl, \n edgecolor='black')\n\n # 테스트 샘플을 부각하여 그립니다.\n if test_idx:\n X_test, y_test = X[test_idx, :], y[test_idx]\n\n plt.scatter(X_test[:, 0],\n X_test[:, 1],\n c='',\n edgecolor='black',\n alpha=1.0,\n linewidth=1,\n marker='o',\n s=100, \n label='test set')",
"_____no_output_____"
]
],
[
[
"표준화된 훈련 데이터를 사용하여 퍼셉트론 모델을 훈련합니다:",
"_____no_output_____"
]
],
[
[
"X_combined_std = np.vstack((X_train_std, X_test_std))\ny_combined = np.hstack((y_train, y_test))\n\nplot_decision_regions(X=X_combined_std, y=y_combined,\n classifier=ppn, test_idx=range(105, 150))\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\n\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
]
],
[
[
"# 로지스틱 회귀를 사용한 클래스 확률 모델링",
"_____no_output_____"
],
[
"### 로지스틱 회귀의 이해와 조건부 확률",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef sigmoid(z):\n return 1.0 / (1.0 + np.exp(-z))\n\nz = np.arange(-7, 7, 0.1)\nphi_z = sigmoid(z)\n\nplt.plot(z, phi_z)\nplt.axvline(0.0, color='k')\nplt.ylim(-0.1, 1.1)\nplt.xlabel('z')\nplt.ylabel('$\\phi (z)$')\n\n# y 축의 눈금과 격자선\nplt.yticks([0.0, 0.5, 1.0])\nax = plt.gca()\nax.yaxis.grid(True)\n\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
]
],
[
[
"### 로지스틱 비용 함수의 가중치 학습하기",
"_____no_output_____"
]
],
[
[
"def cost_1(z):\n return - np.log(sigmoid(z))\n\n\ndef cost_0(z):\n return - np.log(1 - sigmoid(z))\n\nz = np.arange(-10, 10, 0.1)\nphi_z = sigmoid(z)\n\nc1 = [cost_1(x) for x in z]\nplt.plot(phi_z, c1, label='J(w) if y=1')\n\nc0 = [cost_0(x) for x in z]\nplt.plot(phi_z, c0, linestyle='--', label='J(w) if y=0')\n\nplt.ylim(0.0, 5.1)\nplt.xlim([0, 1])\nplt.xlabel('$\\phi$(z)')\nplt.ylabel('J(w)')\nplt.legend(loc='best')\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
],
[
"class LogisticRegressionGD(object):\n \"\"\"경사 하강법을 사용한 로지스틱 회귀 분류기\n\n 매개변수\n ------------\n eta : float\n 학습률 (0.0과 1.0 사이)\n n_iter : int\n 훈련 데이터셋 반복 횟수\n random_state : int\n 가중치 무작위 초기화를 위한 난수 생성기 시드\n\n 속성\n -----------\n w_ : 1d-array\n 학습된 가중치\n cost_ : list\n 에포크마다 누적된 로지스틱 비용 함수 값\n\n \"\"\"\n def __init__(self, eta=0.05, n_iter=100, random_state=1):\n self.eta = eta\n self.n_iter = n_iter\n self.random_state = random_state\n\n def fit(self, X, y):\n \"\"\"훈련 데이터 학습\n\n 매개변수\n ----------\n X : {array-like}, shape = [n_samples, n_features]\n n_samples 개의 샘플과 n_features 개의 특성으로 이루어진 훈련 데이터\n y : array-like, shape = [n_samples]\n 타깃값\n\n 반환값\n -------\n self : object\n\n \"\"\"\n rgen = np.random.RandomState(self.random_state)\n self.w_ = rgen.normal(loc=0.0, scale=0.01, size=1 + X.shape[1])\n self.cost_ = []\n\n for i in range(self.n_iter):\n net_input = self.net_input(X)\n output = self.activation(net_input)\n errors = (y - output)\n self.w_[1:] += self.eta * X.T.dot(errors)\n self.w_[0] += self.eta * errors.sum()\n \n # 오차 제곱합 대신 로지스틱 비용을 계산합니다.\n cost = -y.dot(np.log(output)) - ((1 - y).dot(np.log(1 - output)))\n self.cost_.append(cost)\n return self\n \n def net_input(self, X):\n \"\"\"최종 입력 계산\"\"\"\n return np.dot(X, self.w_[1:]) + self.w_[0]\n\n def activation(self, z):\n \"\"\"로지스틱 시그모이드 활성화 계산\"\"\"\n return 1. / (1. + np.exp(-np.clip(z, -250, 250)))\n\n def predict(self, X):\n \"\"\"단위 계단 함수를 사용하여 클래스 레이블을 반환합니다\"\"\"\n return np.where(self.net_input(X) >= 0.0, 1, 0)\n # 다음과 동일합니다.\n # return np.where(self.activation(self.net_input(X)) >= 0.5, 1, 0)",
"_____no_output_____"
],
[
"X_train_01_subset = X_train[(y_train == 0) | (y_train == 1)]\ny_train_01_subset = y_train[(y_train == 0) | (y_train == 1)]\n\nlrgd = LogisticRegressionGD(eta=0.05, n_iter=1000, random_state=1)\nlrgd.fit(X_train_01_subset,\n y_train_01_subset)\n\nplot_decision_regions(X=X_train_01_subset, \n y=y_train_01_subset,\n classifier=lrgd)\n\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\n\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
]
],
[
[
"### 사이킷런을 사용해 로지스틱 회귀 모델 훈련하기",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LogisticRegression\n\nlr = LogisticRegression(solver='liblinear', multi_class='auto', C=100.0, random_state=1)\nlr.fit(X_train_std, y_train)\n\nplot_decision_regions(X_combined_std, y_combined,\n classifier=lr, test_idx=range(105, 150))\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
],
[
"lr.predict_proba(X_test_std[:3, :])",
"_____no_output_____"
],
[
"lr.predict_proba(X_test_std[:3, :]).sum(axis=1)",
"_____no_output_____"
],
[
"lr.predict_proba(X_test_std[:3, :]).argmax(axis=1)",
"_____no_output_____"
],
[
"lr.predict(X_test_std[:3, :])",
"_____no_output_____"
],
[
"lr.predict(X_test_std[0, :].reshape(1, -1))",
"_____no_output_____"
]
],
[
[
"### 규제를 사용해 과대적합 피하기",
"_____no_output_____"
]
],
[
[
"weights, params = [], []\nfor c in np.arange(-5, 5):\n lr = LogisticRegression(solver='liblinear', multi_class='auto', C=10.**c, random_state=1)\n lr.fit(X_train_std, y_train)\n weights.append(lr.coef_[1])\n params.append(10.**c)\n\nweights = np.array(weights)\nplt.plot(params, weights[:, 0],\n label='petal length')\nplt.plot(params, weights[:, 1], linestyle='--',\n label='petal width')\nplt.ylabel('weight coefficient')\nplt.xlabel('C')\nplt.legend(loc='upper left')\nplt.xscale('log')\nplt.show()",
"_____no_output_____"
]
],
[
[
"# 서포트 벡터 머신을 사용한 최대 마진 분류",
"_____no_output_____"
]
],
[
[
"from sklearn.svm import SVC\n\nsvm = SVC(kernel='linear', C=1.0, random_state=1)\nsvm.fit(X_train_std, y_train)\n\nplot_decision_regions(X_combined_std, \n y_combined,\n classifier=svm, \n test_idx=range(105, 150))\nplt.scatter(svm.dual_coef_[0, :], svm.dual_coef_[1, :])\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
],
[
"svm.coef_",
"_____no_output_____"
],
[
"svm.dual_coef_, svm.dual_coef_.shape",
"_____no_output_____"
]
],
[
[
"## 사이킷런의 다른 구현",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import SGDClassifier\n\nppn = SGDClassifier(loss='perceptron')\nlr = SGDClassifier(loss='log')\nsvm = SGDClassifier(loss='hinge')",
"_____no_output_____"
]
],
[
[
"# 커널 SVM을 사용하여 비선형 문제 풀기",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\n\nnp.random.seed(1)\nX_xor = np.random.randn(200, 2)\ny_xor = np.logical_xor(X_xor[:, 0] > 0,\n X_xor[:, 1] > 0)\ny_xor = np.where(y_xor, 1, -1)\n\nplt.scatter(X_xor[y_xor == 1, 0],\n X_xor[y_xor == 1, 1],\n c='b', marker='x',\n label='1')\nplt.scatter(X_xor[y_xor == -1, 0],\n X_xor[y_xor == -1, 1],\n c='r',\n marker='s',\n label='-1')\n\nplt.xlim([-3, 3])\nplt.ylim([-3, 3])\nplt.legend(loc='best')\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
]
],
[
[
"## 커널 기법을 사용해 고차원 공간에서 분할 초평면 찾기",
"_____no_output_____"
]
],
[
[
"svm = SVC(kernel='rbf', random_state=1, gamma=0.10, C=10.0)\nsvm.fit(X_xor, y_xor)\nplot_decision_regions(X_xor, y_xor,\n classifier=svm)\n\nplt.legend(loc='upper left')\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
],
[
"svm = SVC(kernel='rbf', random_state=1, gamma=0.2, C=1.0)\nsvm.fit(X_train_std, y_train)\n\nplot_decision_regions(X_combined_std, y_combined,\n classifier=svm, test_idx=range(105, 150))\nplt.scatter(svm.dual_coef_[0,:], svm.dual_coef_[1,:])\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
],
[
"svm = SVC(kernel='rbf', random_state=1, gamma=100.0, C=1.0)\nsvm.fit(X_train_std, y_train)\n\nplot_decision_regions(X_combined_std, y_combined, \n classifier=svm, test_idx=range(105, 150))\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
]
],
[
[
"# 결정 트리 학습",
"_____no_output_____"
],
[
"## 정보 이득 최대화-자원을 최대로 활용하기",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef gini(p):\n return p * (1 - p) + (1 - p) * (1 - (1 - p))\n\n\ndef entropy(p):\n return - p * np.log2(p) - (1 - p) * np.log2((1 - p))\n\n\ndef error(p):\n return 1 - np.max([p, 1 - p])\n\nx = np.arange(0.0, 1.0, 0.01)\n\nent = [entropy(p) if p != 0 else None for p in x]\nsc_ent = [e * 0.5 if e else None for e in ent]\nerr = [error(i) for i in x]\n\nfig = plt.figure()\nax = plt.subplot(111)\nfor i, lab, ls, c, in zip([ent, sc_ent, gini(x), err], \n ['Entropy', 'Entropy (scaled)', \n 'Gini Impurity', 'Misclassification Error'],\n ['-', '-', '--', '-.'],\n ['black', 'lightgray', 'red', 'green', 'cyan']):\n line = ax.plot(x, i, label=lab, linestyle=ls, lw=2, color=c)\n\nax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15),\n ncol=5, fancybox=True, shadow=False)\n\nax.axhline(y=0.5, linewidth=1, color='k', linestyle='--')\nax.axhline(y=1.0, linewidth=1, color='k', linestyle='--')\nplt.ylim([0, 1.1])\nplt.xlabel('p(i=1)')\nplt.ylabel('Impurity Index')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## 결정 트리 만들기",
"_____no_output_____"
]
],
[
[
"from sklearn.tree import DecisionTreeClassifier\n\ntree = DecisionTreeClassifier(criterion='gini', \n max_depth=4, \n random_state=1)\ntree.fit(X_train, y_train)\n\nX_combined = np.vstack((X_train, X_test))\ny_combined = np.hstack((y_train, y_test))\nplot_decision_regions(X_combined, y_combined, \n classifier=tree, test_idx=range(105, 150))\n\nplt.xlabel('petal length [cm]')\nplt.ylabel('petal width [cm]')\nplt.legend(loc='upper left')\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
],
[
"from pydotplus import graph_from_dot_data\nfrom sklearn.tree import export_graphviz\n\ndot_data = export_graphviz(tree,\n filled=True, \n rounded=True,\n class_names=['Setosa', \n 'Versicolor',\n 'Virginica'],\n feature_names=['petal length', \n 'petal width'],\n out_file=None) \ngraph = graph_from_dot_data(dot_data) \ngraph.write_png('tree.png') ",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
],
[
"## 랜덤 포레스트로 여러 개의 결정 트리 연결하기",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestClassifier\n\nforest = RandomForestClassifier(criterion='gini',\n n_estimators=25, \n random_state=1,\n n_jobs=2)\nforest.fit(X_train, y_train)\n\nplot_decision_regions(X_combined, y_combined, \n classifier=forest, test_idx=range(105, 150))\n\nplt.xlabel('petal length [cm]')\nplt.ylabel('petal width [cm]')\nplt.legend(loc='upper left')\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
]
],
[
[
"# K-최근접 이웃: 게으른 학습 알고리즘",
"_____no_output_____"
]
],
[
[
"from sklearn.neighbors import KNeighborsClassifier\n\nknn = KNeighborsClassifier(n_neighbors=5, \n p=2, \n metric='minkowski')\nknn.fit(X_train_std, y_train)\n\nplot_decision_regions(X_combined_std, y_combined, \n classifier=knn, test_idx=range(105, 150))\n\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\nplt.legend(loc='upper left')\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cbbd288d8839f1150ac78aa268396b873f03e72d
| 669,448 |
ipynb
|
Jupyter Notebook
|
Time Series Forecasting/Energy Demand Time Series Forecast/Part 4 - Dynamic Time Series Model.ipynb
|
niheon/ds-ml-dl
|
16f55411d0eaeb45a952f7889eb580959e769487
|
[
"MIT"
] | 2 |
2022-03-08T19:13:01.000Z
|
2022-03-09T01:19:20.000Z
|
Time Series Forecasting/Energy Demand Time Series Forecast/Part 4 - Dynamic Time Series Model.ipynb
|
niheon/machine-learning
|
16f55411d0eaeb45a952f7889eb580959e769487
|
[
"MIT"
] | null | null | null |
Time Series Forecasting/Energy Demand Time Series Forecast/Part 4 - Dynamic Time Series Model.ipynb
|
niheon/machine-learning
|
16f55411d0eaeb45a952f7889eb580959e769487
|
[
"MIT"
] | null | null | null | 265.864972 | 132,904 | 0.888045 |
[
[
[
"## Importing Necessary Libraries and Functions",
"_____no_output_____"
],
[
"The first thing we need to do is import the necessary functions and libraries that we will be working with throughout the topic. We should also go ahead and upload all the of the necessary data sets here instead of loading them as we go. We will be using energy production data from PJM Interconnection. They are a regional transmission organization that coordinates the movement of wholesale electricity in parts of the United States. Specifically, we will be focused on a region of Pennsylvania. We will also be using temperature data collected from the National Oceanic and Atmospheric Assocation (NOAA).",
"_____no_output_____"
]
],
[
[
"!conda update -n base -c defaults conda\n\n!conda install pandas -y\n!conda install numpy -y\n!conda install matplotlib -y\n!conda install statsmodels -y\n!pip install scipy ",
"_____no_output_____"
],
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nfrom statsmodels.graphics import tsaplots\nfrom statsmodels.graphics import tsaplots\nfrom statsmodels.tsa.arima_model import ARMA\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX\nfrom statsmodels.tsa.holtwinters import SimpleExpSmoothing, Holt, ExponentialSmoothing",
"_____no_output_____"
]
],
[
[
"Notice how we added an additional pieces above from the ```statsmodels``` module. We need to build time series models in this milestone and so we will need the above pieces to do so. We will be building exponential smoothing models as well as ARIMA models.\n\nThis milestone builds off the previous ones so we should complete the following steps to the first milestone again to have our data prepped and ready to go. We should also rebuild our last model from milestone 3 since that is our foundational model!",
"_____no_output_____"
],
[
"## Preparing the Energy and Temperature Data##",
"_____no_output_____"
],
[
"First we need to load our weather and energy data sets for cleaning. Let's use the pandas library and the ```read.csv``` function to do this.",
"_____no_output_____"
]
],
[
[
"# Loading the Needed Data Sets \nweather = pd.read_csv('.../hr_temp_20170201-20200131_subset.csv')\nenergy = pd.read_csv('.../hrl_load_metered - 20170201-20200131.csv')\n",
"_____no_output_____"
]
],
[
[
"It is always good practice to take a look at the first few observations of the data set to make sure that everything looks like how we expected it to when we read in our CSV file. Let's use the ```head``` function for this.",
"_____no_output_____"
]
],
[
[
"weather.head()",
"_____no_output_____"
]
],
[
[
"Perfect! We have temperature as well as time. There are some other pieces of information like the station number, source of the reading and reading type, but we don't need those.\n\nLet's take a look at the first few observations of the energy data as well!",
"_____no_output_____"
]
],
[
[
"energy.head()",
"_____no_output_____"
]
],
[
[
"Great! Again, we have the important information of time as well as megawatt (MW) readings per hour. Again, there are some other varibales that we won't end up using in this data set as well.\n\nLet's get rid of the variables we don't need and combine the variables that we do need into one pandas data frame. Dictionaries are an easy way of doing this. Here, we are pulling the MW column from the energy data set as well as the temperature and date columns from the weather data set. These data sets already line up on time which makes this much easier.",
"_____no_output_____"
]
],
[
[
"d = {'MW': energy['mw'], 'Temp': weather['HourlyDryBulbTemperature'], 'Date': weather['DATE']}",
"_____no_output_____"
]
],
[
[
"Now let's create our pandas data frame.",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame(d)",
"_____no_output_____"
]
],
[
[
"One of the problems when loading a data set you want to run time series analysis on is the type of object Python sees for the \"date\" variable. Let's look at the pandas data frame data types for each of our variables.",
"_____no_output_____"
]
],
[
[
"print(df.dtypes)",
"MW float64\nTemp float64\nDate object\ndtype: object\n"
]
],
[
[
"Here we can see that the Date variable is a general object and not a \"date\" according to Python. We can change that with the pandas function ```to_datetime``` as we have below.",
"_____no_output_____"
]
],
[
[
"df['Date'] = pd.to_datetime(df['Date'])\nprint(df.dtypes)",
"MW float64\nTemp float64\nDate datetime64[ns]\ndtype: object\n"
]
],
[
[
"Good! Now that we have a ```datetime64``` object in our data set we can easily create other forms of date variables. The hour of day, day of week, month of year, and possibly even the year itself might all impact the energy usage. Let's extract these variables from our date object so that we can use them in our analysis. Pandas has some wonderful functionality to do this with the ```hour```, ```day```, ```dayofweek```, ```month```, and ```year``` functions. Then let's inspect the first few observations to make sure things look correct.",
"_____no_output_____"
]
],
[
[
"df['hour'] = pd.DatetimeIndex(pd.to_datetime(df['Date'])).hour\ndf['day'] = pd.DatetimeIndex(pd.to_datetime(df['Date'])).day\ndf['weekday'] = df['Date'].dt.dayofweek\ndf['month'] = pd.DatetimeIndex(pd.to_datetime(df['Date'])).month\ndf['year'] = pd.DatetimeIndex(pd.to_datetime(df['Date'])).year\n\ndf.head()",
"_____no_output_____"
]
],
[
[
"Everything looks good in the first few observations above. If you still aren't convinced you could pull different pieces of the data frame to make sure that other observations are structured correctly.\n\nNow we should set this Python date object as the index of our data set. This will make it easier for plotting as well as forecasting later. We can use the ```set_index``` function for this.",
"_____no_output_____"
]
],
[
[
"df = df.set_index('Date')",
"_____no_output_____"
]
],
[
[
"Good! Now that we have our data structured as we would like, we can start the cleaning of the data. First, let's check if there are any missing values in the temperature column. The ```is.null``` function will help us here.",
"_____no_output_____"
]
],
[
[
"sum(df['Temp'].isnull())",
"_____no_output_____"
]
],
[
[
"Looks like there are 37 missing values in our temperature data. We shoudl impute those. However, we don't just want to put the average temperature in these spots as the overall average across three years probably isn't a good guess for any one hour. The temperature of the hours on either side of the missing observation would be more helpful. Let's do a linear interpolation across missing values to help with this. This will essentially draw a straight line between the two known points to fill in the missing values. We can use the ```interpolate(method='linear')``` function for this.",
"_____no_output_____"
]
],
[
[
"df['Temp'] = df['Temp'].interpolate(method='linear')",
"_____no_output_____"
]
],
[
[
"Now let's see if we have any more missing temperature values.",
"_____no_output_____"
]
],
[
[
"sum(df['Temp'].isnull())",
"_____no_output_____"
]
],
[
[
"No more! Time to check if the energy data has any missing values.",
"_____no_output_____"
]
],
[
[
"sum(df['MW'].isnull())",
"_____no_output_____"
]
],
[
[
"No missing values there either! Perfect.\n\nNow it is time to split the data into two pieces - training and testing. The training data set is the data set we will be building our model on, while the testing data set is what we will be reporting results on since the model wouldn't have seen it ahead of time. Using the date index we can easily do this in our data frame.",
"_____no_output_____"
]
],
[
[
"#Training and Validation Split #\ntrain = pd.DataFrame(df['2017-01-01':'2019-12-31'])\ntest = pd.DataFrame(df['2020-01-01':'2020-01-31'])",
"_____no_output_____"
]
],
[
[
"Now let's look at the first few observations for our training data set.",
"_____no_output_____"
]
],
[
[
"train.head()",
"_____no_output_____"
]
],
[
[
"Everything looks good there!\n\nNow let's do the same for our testing data set.",
"_____no_output_____"
]
],
[
[
"test.head()",
"_____no_output_____"
]
],
[
[
"Excellent! We now have our data cleaned and split. By combining and cleaning the data sets, we will make the exploration of these data sets as well as the modeling of these data sets much easier for the upcoming sections!",
"_____no_output_____"
],
[
"## Building Naive Energy Model",
"_____no_output_____"
],
[
"Now that we have recreated the pieces of milestone 1 that clean and split our data we can start the modeling phase of milestone 3.\n\nFirst, let's review some of the findings we have from the first two milestones:\n- Energy usage changes depending on month / season\n- Energy usage changes depending on day of week\n- Energy usage changes depending on hour of day\n- Energy usage changes depending on outside temperature\n- The relationship between temperature and energy usage appears quadratic in nature\n\nLooking at this last bullet point, we need to create a quadratic variable on temperature as temperature in the model by itself won't be enough to model energy usage. It is always good practice to standardize (mean of 0 and standard deviation of 1) any variable you are going to raise to a higher power in a regression to help prevent multicollinearity problems. We can standardize the variable *Temp* by using the ```mean``` and ```std``` functions.",
"_____no_output_____"
]
],
[
[
"train['Temp_Norm'] = (train['Temp']-train['Temp'].mean())/train['Temp'].std()",
"_____no_output_____"
]
],
[
[
"Now that temperature is standardized (or normalized) we can just multiply it by itself to get our quadratic term.",
"_____no_output_____"
]
],
[
[
"train['Temp_Norm2'] = train['Temp_Norm']**2",
"_____no_output_____"
]
],
[
[
"Let's do a brief look at the first few observations in our training data set to make sure that things worked as expected.",
"_____no_output_____"
]
],
[
[
"train.head()",
"_____no_output_____"
],
[
"results = sm.OLS.from_formula('MW ~ Temp_Norm*C(hour) + Temp_Norm2*C(hour) + Temp_Norm*C(month) + Temp_Norm2*C(month) + C(weekday)*C(hour)', \n data=train).fit()\nprint(results.summary())",
" OLS Regression Results \n==============================================================================\nDep. Variable: MW R-squared: 0.924\nModel: OLS Adj. R-squared: 0.924\nMethod: Least Squares F-statistic: 1248.\nDate: Fri, 09 Oct 2020 Prob (F-statistic): 0.00\nTime: 12:43:23 Log-Likelihood: -1.4774e+05\nNo. Observations: 25536 AIC: 2.960e+05\nDf Residuals: 25287 BIC: 2.980e+05\nDf Model: 248 \nCovariance Type: nonrobust \n=================================================================================================\n coef std err t P>|t| [0.025 0.975]\n-------------------------------------------------------------------------------------------------\nIntercept 1229.6515 9.041 136.011 0.000 1211.931 1247.372\nC(hour)[T.1] -21.5265 9.497 -2.267 0.023 -40.141 -2.912\nC(hour)[T.2] -25.0718 9.485 -2.643 0.008 -43.663 -6.480\nC(hour)[T.3] -6.9363 9.475 -0.732 0.464 -25.507 11.635\nC(hour)[T.4] 48.0591 9.474 5.073 0.000 29.489 66.629\nC(hour)[T.5] 136.7171 9.474 14.431 0.000 118.147 155.287\nC(hour)[T.6] 211.4750 9.493 22.276 0.000 192.867 230.083\nC(hour)[T.7] 259.9536 9.525 27.291 0.000 241.283 278.624\nC(hour)[T.8] 291.9217 9.566 30.516 0.000 273.171 310.672\nC(hour)[T.9] 312.8325 9.618 32.525 0.000 293.980 331.685\nC(hour)[T.10] 324.4094 9.647 33.629 0.000 305.501 343.317\nC(hour)[T.11] 326.6089 9.663 33.799 0.000 307.668 345.550\nC(hour)[T.12] 333.2134 9.653 34.520 0.000 314.294 352.133\nC(hour)[T.13] 320.6632 9.675 33.145 0.000 301.700 339.626\nC(hour)[T.14] 309.1144 9.674 31.952 0.000 290.152 328.077\nC(hour)[T.15] 302.4094 9.675 31.257 0.000 283.446 321.373\nC(hour)[T.16] 308.6771 9.664 31.942 0.000 289.736 327.618\nC(hour)[T.17] 328.0391 9.641 34.027 0.000 309.143 346.935\nC(hour)[T.18] 341.0574 9.618 35.462 0.000 322.206 359.908\nC(hour)[T.19] 336.0446 9.594 35.028 0.000 317.241 354.849\nC(hour)[T.20] 297.8209 9.576 31.099 0.000 279.051 316.591\nC(hour)[T.21] 219.9381 9.564 22.997 0.000 201.192 238.684\nC(hour)[T.22] 126.9058 9.548 13.292 0.000 108.192 145.620\nC(hour)[T.23] 50.0603 9.534 5.251 0.000 31.373 68.748\nC(month)[T.2] -10.9536 6.587 -1.663 0.096 -23.864 1.957\nC(month)[T.3] -58.4207 6.602 -8.848 0.000 -71.362 -45.480\nC(month)[T.4] -110.3894 6.439 -17.143 0.000 -123.011 -97.768\nC(month)[T.5] -122.2577 6.548 -18.671 0.000 -135.092 -109.423\nC(month)[T.6] -105.6638 8.055 -13.117 0.000 -121.453 -89.875\nC(month)[T.7] -87.2652 14.169 -6.159 0.000 -115.037 -59.494\nC(month)[T.8] -80.4514 11.193 -7.187 0.000 -102.391 -58.512\nC(month)[T.9] -91.9013 7.370 -12.470 0.000 -106.347 -77.456\nC(month)[T.10] -111.9445 6.423 -17.428 0.000 -124.535 -99.354\nC(month)[T.11] -45.0605 6.751 -6.675 0.000 -58.293 -31.828\nC(month)[T.12] -18.2699 7.454 -2.451 0.014 -32.881 -3.659\nC(weekday)[T.1] 6.1527 9.085 0.677 0.498 -11.654 23.960\nC(weekday)[T.2] 32.7819 9.087 3.608 0.000 14.971 50.593\nC(weekday)[T.3] 37.2304 9.092 4.095 0.000 19.409 55.052\nC(weekday)[T.4] 37.0628 9.087 4.078 0.000 19.251 54.875\nC(weekday)[T.5] 17.1413 9.087 1.886 0.059 -0.670 34.953\nC(weekday)[T.6] -9.1433 9.083 -1.007 0.314 -26.947 8.660\nC(weekday)[T.1]:C(hour)[T.1] 1.2708 12.844 0.099 0.921 -23.905 26.446\nC(weekday)[T.2]:C(hour)[T.1] -7.0511 12.848 -0.549 0.583 -32.233 18.131\nC(weekday)[T.3]:C(hour)[T.1] -1.0763 12.852 -0.084 0.933 -26.268 24.115\nC(weekday)[T.4]:C(hour)[T.1] -0.0966 12.845 -0.008 0.994 -25.273 25.080\nC(weekday)[T.5]:C(hour)[T.1] -5.4920 12.846 -0.428 0.669 -30.671 19.687\nC(weekday)[T.6]:C(hour)[T.1] -2.4908 12.843 -0.194 0.846 -27.664 22.682\nC(weekday)[T.1]:C(hour)[T.2] -4.6528 12.844 -0.362 0.717 -29.828 20.523\nC(weekday)[T.2]:C(hour)[T.2] -6.1394 12.847 -0.478 0.633 -31.320 19.041\nC(weekday)[T.3]:C(hour)[T.2] -11.4852 12.854 -0.894 0.372 -36.679 13.709\nC(weekday)[T.4]:C(hour)[T.2] -9.3409 12.845 -0.727 0.467 -34.518 15.836\nC(weekday)[T.5]:C(hour)[T.2] -20.8512 12.846 -1.623 0.105 -46.031 4.328\nC(weekday)[T.6]:C(hour)[T.2] -16.2096 12.843 -1.262 0.207 -41.382 8.963\nC(weekday)[T.1]:C(hour)[T.3] -4.5071 12.844 -0.351 0.726 -29.682 20.668\nC(weekday)[T.2]:C(hour)[T.3] -8.5981 12.846 -0.669 0.503 -33.778 16.581\nC(weekday)[T.3]:C(hour)[T.3] -13.8075 12.853 -1.074 0.283 -39.000 11.385\nC(weekday)[T.4]:C(hour)[T.3] -12.3027 12.845 -0.958 0.338 -37.480 12.874\nC(weekday)[T.5]:C(hour)[T.3] -34.6792 12.846 -2.700 0.007 -59.859 -9.499\nC(weekday)[T.6]:C(hour)[T.3] -37.6174 12.843 -2.929 0.003 -62.791 -12.444\nC(weekday)[T.1]:C(hour)[T.4] -5.2068 12.844 -0.405 0.685 -30.383 19.969\nC(weekday)[T.2]:C(hour)[T.4] -6.0556 12.846 -0.471 0.637 -31.234 19.123\nC(weekday)[T.3]:C(hour)[T.4] -9.5735 12.852 -0.745 0.456 -34.765 15.618\nC(weekday)[T.4]:C(hour)[T.4] -12.6740 12.845 -0.987 0.324 -37.851 12.503\nC(weekday)[T.5]:C(hour)[T.4] -67.7274 12.846 -5.272 0.000 -92.907 -42.548\nC(weekday)[T.6]:C(hour)[T.4] -79.0581 12.844 -6.155 0.000 -104.232 -53.884\nC(weekday)[T.1]:C(hour)[T.5] -3.4440 12.845 -0.268 0.789 -28.620 21.732\nC(weekday)[T.2]:C(hour)[T.5] -1.9474 12.846 -0.152 0.880 -27.125 23.231\nC(weekday)[T.3]:C(hour)[T.5] -8.4281 12.852 -0.656 0.512 -33.619 16.763\nC(weekday)[T.4]:C(hour)[T.5] -12.5802 12.845 -0.979 0.327 -37.757 12.596\nC(weekday)[T.5]:C(hour)[T.5] -128.9868 12.846 -10.041 0.000 -154.166 -103.807\nC(weekday)[T.6]:C(hour)[T.5] -149.5590 12.843 -11.645 0.000 -174.733 -124.385\nC(weekday)[T.1]:C(hour)[T.6] 5.4641 12.844 0.425 0.671 -19.711 30.640\nC(weekday)[T.2]:C(hour)[T.6] 3.7926 12.846 0.295 0.768 -21.386 28.972\nC(weekday)[T.3]:C(hour)[T.6] -3.2823 12.852 -0.255 0.798 -28.473 21.908\nC(weekday)[T.4]:C(hour)[T.6] -5.9154 12.845 -0.461 0.645 -31.092 19.261\nC(weekday)[T.5]:C(hour)[T.6] -173.4048 12.847 -13.498 0.000 -198.585 -148.225\nC(weekday)[T.6]:C(hour)[T.6] -203.5208 12.843 -15.847 0.000 -228.694 -178.348\nC(weekday)[T.1]:C(hour)[T.7] 8.3028 12.844 0.646 0.518 -16.873 33.479\nC(weekday)[T.2]:C(hour)[T.7] 4.2140 12.847 0.328 0.743 -20.967 29.395\nC(weekday)[T.3]:C(hour)[T.7] 1.5437 12.851 0.120 0.904 -23.646 26.733\nC(weekday)[T.4]:C(hour)[T.7] -3.0864 12.845 -0.240 0.810 -28.263 22.090\nC(weekday)[T.5]:C(hour)[T.7] -174.5214 12.846 -13.585 0.000 -199.701 -149.342\nC(weekday)[T.6]:C(hour)[T.7] -213.6351 12.843 -16.635 0.000 -238.808 -188.462\nC(weekday)[T.1]:C(hour)[T.8] 6.2398 12.845 0.486 0.627 -18.938 31.418\nC(weekday)[T.2]:C(hour)[T.8] 0.2350 12.848 0.018 0.985 -24.947 25.417\nC(weekday)[T.3]:C(hour)[T.8] -0.9942 12.852 -0.077 0.938 -26.184 24.196\nC(weekday)[T.4]:C(hour)[T.8] -4.2566 12.845 -0.331 0.740 -29.434 20.921\nC(weekday)[T.5]:C(hour)[T.8] -160.6811 12.847 -12.508 0.000 -185.861 -135.501\nC(weekday)[T.6]:C(hour)[T.8] -202.4275 12.843 -15.762 0.000 -227.601 -177.254\nC(weekday)[T.1]:C(hour)[T.9] 7.3545 12.846 0.573 0.567 -17.824 32.533\nC(weekday)[T.2]:C(hour)[T.9] -8.6074 12.849 -0.670 0.503 -33.792 16.577\nC(weekday)[T.3]:C(hour)[T.9] -4.3096 12.852 -0.335 0.737 -29.500 20.880\nC(weekday)[T.4]:C(hour)[T.9] -5.2384 12.846 -0.408 0.683 -30.417 19.940\nC(weekday)[T.5]:C(hour)[T.9] -151.5667 12.848 -11.797 0.000 -176.749 -126.385\nC(weekday)[T.6]:C(hour)[T.9] -187.9846 12.843 -14.637 0.000 -213.158 -162.811\nC(weekday)[T.1]:C(hour)[T.10] 4.3423 12.847 0.338 0.735 -20.839 29.523\nC(weekday)[T.2]:C(hour)[T.10] -12.2992 12.851 -0.957 0.339 -37.487 12.889\nC(weekday)[T.3]:C(hour)[T.10] -5.4245 12.852 -0.422 0.673 -30.615 19.766\nC(weekday)[T.4]:C(hour)[T.10] -11.1744 12.847 -0.870 0.384 -36.355 14.006\nC(weekday)[T.5]:C(hour)[T.10] -153.6771 12.848 -11.961 0.000 -178.860 -128.495\nC(weekday)[T.6]:C(hour)[T.10] -177.2951 12.844 -13.804 0.000 -202.469 -152.121\nC(weekday)[T.1]:C(hour)[T.11] 3.8983 12.848 0.303 0.762 -21.284 29.081\nC(weekday)[T.2]:C(hour)[T.11] -12.1382 12.852 -0.944 0.345 -37.328 13.052\nC(weekday)[T.3]:C(hour)[T.11] -7.2745 12.853 -0.566 0.571 -32.466 17.917\nC(weekday)[T.4]:C(hour)[T.11] -19.2054 12.848 -1.495 0.135 -44.388 5.977\nC(weekday)[T.5]:C(hour)[T.11] -155.4069 12.849 -12.095 0.000 -180.591 -130.223\nC(weekday)[T.6]:C(hour)[T.11] -174.2920 12.844 -13.570 0.000 -199.467 -149.117\nC(weekday)[T.1]:C(hour)[T.12] -6.0155 12.850 -0.468 0.640 -31.203 19.172\nC(weekday)[T.2]:C(hour)[T.12] -19.4370 12.855 -1.512 0.131 -44.633 5.759\nC(weekday)[T.3]:C(hour)[T.12] -15.7628 12.855 -1.226 0.220 -40.959 9.433\nC(weekday)[T.4]:C(hour)[T.12] -26.9476 12.849 -2.097 0.036 -52.133 -1.762\nC(weekday)[T.5]:C(hour)[T.12] -173.4917 12.849 -13.502 0.000 -198.677 -148.306\nC(weekday)[T.6]:C(hour)[T.12] -174.9069 12.845 -13.617 0.000 -200.083 -149.731\nC(weekday)[T.1]:C(hour)[T.13] 4.2963 12.850 0.334 0.738 -20.890 29.483\nC(weekday)[T.2]:C(hour)[T.13] -8.5240 12.853 -0.663 0.507 -33.717 16.669\nC(weekday)[T.3]:C(hour)[T.13] -8.0834 12.854 -0.629 0.529 -33.278 17.111\nC(weekday)[T.4]:C(hour)[T.13] -19.2660 12.849 -1.499 0.134 -44.451 5.919\nC(weekday)[T.5]:C(hour)[T.13] -168.8350 12.850 -13.139 0.000 -194.021 -143.649\nC(weekday)[T.6]:C(hour)[T.13] -156.0927 12.846 -12.151 0.000 -181.271 -130.914\nC(weekday)[T.1]:C(hour)[T.14] 3.5832 12.853 0.279 0.780 -21.609 28.775\nC(weekday)[T.2]:C(hour)[T.14] -10.7601 12.854 -0.837 0.403 -35.956 14.435\nC(weekday)[T.3]:C(hour)[T.14] -9.4850 12.855 -0.738 0.461 -34.682 15.712\nC(weekday)[T.4]:C(hour)[T.14] -29.6413 12.851 -2.307 0.021 -54.830 -4.453\nC(weekday)[T.5]:C(hour)[T.14] -169.1355 12.850 -13.162 0.000 -194.323 -143.948\nC(weekday)[T.6]:C(hour)[T.14] -146.4668 12.846 -11.402 0.000 -171.646 -121.288\nC(weekday)[T.1]:C(hour)[T.15] 7.6746 12.852 0.597 0.550 -17.515 32.864\nC(weekday)[T.2]:C(hour)[T.15] -7.8948 12.853 -0.614 0.539 -33.087 17.298\nC(weekday)[T.3]:C(hour)[T.15] -4.4390 12.854 -0.345 0.730 -29.633 20.755\nC(weekday)[T.4]:C(hour)[T.15] -36.2094 12.851 -2.818 0.005 -61.398 -11.021\nC(weekday)[T.5]:C(hour)[T.15] -151.9363 12.851 -11.823 0.000 -177.124 -126.748\nC(weekday)[T.6]:C(hour)[T.15] -131.3300 12.846 -10.224 0.000 -156.509 -106.151\nC(weekday)[T.1]:C(hour)[T.16] 10.1146 12.852 0.787 0.431 -15.075 35.304\nC(weekday)[T.2]:C(hour)[T.16] -13.4999 12.855 -1.050 0.294 -38.696 11.696\nC(weekday)[T.3]:C(hour)[T.16] -3.8428 12.853 -0.299 0.765 -29.036 21.351\nC(weekday)[T.4]:C(hour)[T.16] -41.0287 12.850 -3.193 0.001 -66.216 -15.841\nC(weekday)[T.5]:C(hour)[T.16] -150.7744 12.850 -11.733 0.000 -175.961 -125.588\nC(weekday)[T.6]:C(hour)[T.16] -107.2603 12.846 -8.350 0.000 -132.438 -82.082\nC(weekday)[T.1]:C(hour)[T.17] 6.1321 12.851 0.477 0.633 -19.057 31.321\nC(weekday)[T.2]:C(hour)[T.17] -15.8141 12.854 -1.230 0.219 -41.009 9.381\nC(weekday)[T.3]:C(hour)[T.17] -13.7108 12.853 -1.067 0.286 -38.904 11.483\nC(weekday)[T.4]:C(hour)[T.17] -60.7848 12.851 -4.730 0.000 -85.973 -35.597\nC(weekday)[T.5]:C(hour)[T.17] -141.3721 12.850 -11.002 0.000 -166.558 -116.186\nC(weekday)[T.6]:C(hour)[T.17] -95.5304 12.845 -7.437 0.000 -120.707 -70.354\nC(weekday)[T.1]:C(hour)[T.18] 2.1231 12.851 0.165 0.869 -23.065 27.311\nC(weekday)[T.2]:C(hour)[T.18] -14.7173 12.853 -1.145 0.252 -39.910 10.475\nC(weekday)[T.3]:C(hour)[T.18] -18.6454 12.853 -1.451 0.147 -43.838 6.547\nC(weekday)[T.4]:C(hour)[T.18] -74.4545 12.851 -5.794 0.000 -99.643 -49.266\nC(weekday)[T.5]:C(hour)[T.18] -134.3285 12.850 -10.454 0.000 -159.515 -109.142\nC(weekday)[T.6]:C(hour)[T.18] -89.5318 12.845 -6.970 0.000 -114.708 -64.355\nC(weekday)[T.1]:C(hour)[T.19] 2.9413 12.851 0.229 0.819 -22.247 28.130\nC(weekday)[T.2]:C(hour)[T.19] -6.7866 12.852 -0.528 0.597 -31.977 18.404\nC(weekday)[T.3]:C(hour)[T.19] -16.1142 12.852 -1.254 0.210 -41.305 9.077\nC(weekday)[T.4]:C(hour)[T.19] -72.3864 12.851 -5.633 0.000 -97.574 -47.198\nC(weekday)[T.5]:C(hour)[T.19] -126.0005 12.850 -9.806 0.000 -151.187 -100.814\nC(weekday)[T.6]:C(hour)[T.19] -74.0456 12.845 -5.765 0.000 -99.222 -48.870\nC(weekday)[T.1]:C(hour)[T.20] 4.2810 12.851 0.333 0.739 -20.908 29.470\nC(weekday)[T.2]:C(hour)[T.20] -10.0548 12.852 -0.782 0.434 -35.245 15.136\nC(weekday)[T.3]:C(hour)[T.20] -15.0396 12.852 -1.170 0.242 -40.231 10.151\nC(weekday)[T.4]:C(hour)[T.20] -65.5416 12.850 -5.101 0.000 -90.728 -40.355\nC(weekday)[T.5]:C(hour)[T.20] -106.0449 12.849 -8.253 0.000 -131.231 -80.859\nC(weekday)[T.6]:C(hour)[T.20] -52.1952 12.845 -4.064 0.000 -77.372 -27.019\nC(weekday)[T.1]:C(hour)[T.21] 10.6535 12.851 0.829 0.407 -14.534 35.841\nC(weekday)[T.2]:C(hour)[T.21] -14.8287 12.852 -1.154 0.249 -40.020 10.362\nC(weekday)[T.3]:C(hour)[T.21] -18.6243 12.852 -1.449 0.147 -43.816 6.567\nC(weekday)[T.4]:C(hour)[T.21] -50.5523 12.850 -3.934 0.000 -75.740 -25.365\nC(weekday)[T.5]:C(hour)[T.21] -80.5735 12.850 -6.270 0.000 -105.760 -55.387\nC(weekday)[T.6]:C(hour)[T.21] -35.5806 12.845 -2.770 0.006 -60.757 -10.404\nC(weekday)[T.1]:C(hour)[T.22] 13.5161 12.851 1.052 0.293 -11.672 38.705\nC(weekday)[T.2]:C(hour)[T.22] -11.4437 12.852 -0.890 0.373 -36.635 13.748\nC(weekday)[T.3]:C(hour)[T.22] -12.9284 12.852 -1.006 0.314 -38.120 12.263\nC(weekday)[T.4]:C(hour)[T.22] -35.8049 12.851 -2.786 0.005 -60.993 -10.617\nC(weekday)[T.5]:C(hour)[T.22] -48.8802 12.850 -3.804 0.000 -74.067 -23.693\nC(weekday)[T.6]:C(hour)[T.22] -16.6274 12.845 -1.294 0.196 -41.804 8.549\nC(weekday)[T.1]:C(hour)[T.23] 15.8422 12.852 1.233 0.218 -9.348 41.032\nC(weekday)[T.2]:C(hour)[T.23] -5.2718 12.853 -0.410 0.682 -30.463 19.920\nC(weekday)[T.3]:C(hour)[T.23] -6.3897 12.853 -0.497 0.619 -31.581 18.802\nC(weekday)[T.4]:C(hour)[T.23] -23.8773 12.852 -1.858 0.063 -49.067 1.313\nC(weekday)[T.5]:C(hour)[T.23] -31.3646 12.851 -2.441 0.015 -56.553 -6.176\nC(weekday)[T.6]:C(hour)[T.23] -0.4384 12.846 -0.034 0.973 -25.617 24.740\nTemp_Norm -68.6609 9.453 -7.263 0.000 -87.190 -50.132\nTemp_Norm:C(hour)[T.1] -5.3790 5.038 -1.068 0.286 -15.254 4.496\nTemp_Norm:C(hour)[T.2] -2.1789 5.163 -0.422 0.673 -12.298 7.940\nTemp_Norm:C(hour)[T.3] 3.6797 5.259 0.700 0.484 -6.628 13.988\nTemp_Norm:C(hour)[T.4] 15.8257 5.331 2.969 0.003 5.377 26.275\nTemp_Norm:C(hour)[T.5] 10.0501 5.315 1.891 0.059 -0.367 20.467\nTemp_Norm:C(hour)[T.6] -18.3154 5.090 -3.598 0.000 -28.292 -8.339\nTemp_Norm:C(hour)[T.7] -37.3330 4.765 -7.834 0.000 -46.673 -27.993\nTemp_Norm:C(hour)[T.8] -47.7697 4.505 -10.603 0.000 -56.601 -38.939\nTemp_Norm:C(hour)[T.9] -57.0840 4.347 -13.131 0.000 -65.605 -48.563\nTemp_Norm:C(hour)[T.10] -61.0501 4.286 -14.245 0.000 -69.450 -52.650\nTemp_Norm:C(hour)[T.11] -58.5721 4.277 -13.695 0.000 -66.955 -50.189\nTemp_Norm:C(hour)[T.12] -54.8722 4.292 -12.785 0.000 -63.285 -46.460\nTemp_Norm:C(hour)[T.13] -48.5805 4.312 -11.268 0.000 -57.031 -40.130\nTemp_Norm:C(hour)[T.14] -37.7095 4.320 -8.729 0.000 -46.177 -29.242\nTemp_Norm:C(hour)[T.15] -23.0896 4.320 -5.345 0.000 -31.556 -14.623\nTemp_Norm:C(hour)[T.16] -16.1147 4.301 -3.747 0.000 -24.545 -7.684\nTemp_Norm:C(hour)[T.17] -27.4735 4.298 -6.392 0.000 -35.899 -19.048\nTemp_Norm:C(hour)[T.18] -23.0480 4.335 -5.317 0.000 -31.544 -14.552\nTemp_Norm:C(hour)[T.19] -1.9596 4.419 -0.443 0.657 -10.620 6.701\nTemp_Norm:C(hour)[T.20] 12.0138 4.500 2.670 0.008 3.194 20.834\nTemp_Norm:C(hour)[T.21] 8.5639 4.606 1.859 0.063 -0.465 17.592\nTemp_Norm:C(hour)[T.22] 4.2954 4.721 0.910 0.363 -4.957 13.548\nTemp_Norm:C(hour)[T.23] 2.4786 4.840 0.512 0.609 -7.007 11.965\nTemp_Norm:C(month)[T.2] 16.2930 10.020 1.626 0.104 -3.347 35.933\nTemp_Norm:C(month)[T.3] 23.3681 10.105 2.312 0.021 3.561 43.175\nTemp_Norm:C(month)[T.4] 65.7583 9.199 7.148 0.000 47.728 83.789\nTemp_Norm:C(month)[T.5] 159.1896 10.874 14.640 0.000 137.876 180.503\nTemp_Norm:C(month)[T.6] 166.9858 15.377 10.860 0.000 136.847 197.125\nTemp_Norm:C(month)[T.7] 264.6119 25.623 10.327 0.000 214.389 314.835\nTemp_Norm:C(month)[T.8] 178.5430 22.036 8.102 0.000 135.352 221.734\nTemp_Norm:C(month)[T.9] 133.1027 13.847 9.612 0.000 105.962 160.244\nTemp_Norm:C(month)[T.10] 127.0785 9.337 13.611 0.000 108.778 145.379\nTemp_Norm:C(month)[T.11] 23.0011 11.289 2.038 0.042 0.875 45.128\nTemp_Norm:C(month)[T.12] -6.1431 11.769 -0.522 0.602 -29.212 16.926\nTemp_Norm2 46.8979 4.051 11.578 0.000 38.958 54.838\nTemp_Norm2:C(hour)[T.1] -6.7922 4.045 -1.679 0.093 -14.721 1.137\nTemp_Norm2:C(hour)[T.2] -7.2007 4.068 -1.770 0.077 -15.173 0.772\nTemp_Norm2:C(hour)[T.3] -8.7551 4.072 -2.150 0.032 -16.737 -0.773\nTemp_Norm2:C(hour)[T.4] -8.3277 4.085 -2.038 0.042 -16.335 -0.320\nTemp_Norm2:C(hour)[T.5] -14.1854 4.062 -3.492 0.000 -22.147 -6.224\nTemp_Norm2:C(hour)[T.6] -23.7351 3.968 -5.982 0.000 -31.513 -15.957\nTemp_Norm2:C(hour)[T.7] -30.2120 3.837 -7.874 0.000 -37.733 -22.691\nTemp_Norm2:C(hour)[T.8] -35.5661 3.770 -9.434 0.000 -42.955 -28.177\nTemp_Norm2:C(hour)[T.9] -37.6081 3.753 -10.020 0.000 -44.964 -30.252\nTemp_Norm2:C(hour)[T.10] -36.8893 3.747 -9.844 0.000 -44.235 -29.544\nTemp_Norm2:C(hour)[T.11] -32.2166 3.764 -8.559 0.000 -39.595 -24.839\nTemp_Norm2:C(hour)[T.12] -29.6368 3.774 -7.853 0.000 -37.033 -22.240\nTemp_Norm2:C(hour)[T.13] -26.8816 3.788 -7.097 0.000 -34.306 -19.457\nTemp_Norm2:C(hour)[T.14] -20.2397 3.789 -5.342 0.000 -27.666 -12.813\nTemp_Norm2:C(hour)[T.15] -14.8185 3.790 -3.909 0.000 -22.248 -7.389\nTemp_Norm2:C(hour)[T.16] -9.0722 3.774 -2.404 0.016 -16.469 -1.676\nTemp_Norm2:C(hour)[T.17] -0.6812 3.770 -0.181 0.857 -8.070 6.708\nTemp_Norm2:C(hour)[T.18] 3.1939 3.811 0.838 0.402 -4.277 10.664\nTemp_Norm2:C(hour)[T.19] 7.9381 3.870 2.051 0.040 0.352 15.524\nTemp_Norm2:C(hour)[T.20] 13.7865 3.905 3.530 0.000 6.132 21.441\nTemp_Norm2:C(hour)[T.21] 16.3843 3.962 4.136 0.000 8.619 24.149\nTemp_Norm2:C(hour)[T.22] 13.2291 4.004 3.304 0.001 5.381 21.077\nTemp_Norm2:C(hour)[T.23] 6.8167 4.039 1.688 0.091 -1.099 14.733\nTemp_Norm2:C(month)[T.2] 9.9211 4.109 2.414 0.016 1.866 17.976\nTemp_Norm2:C(month)[T.3] 22.8510 4.289 5.327 0.000 14.443 31.258\nTemp_Norm2:C(month)[T.4] 90.0379 4.505 19.985 0.000 81.207 98.869\nTemp_Norm2:C(month)[T.5] 187.5059 6.130 30.591 0.000 175.492 199.520\nTemp_Norm2:C(month)[T.6] 269.1734 8.172 32.937 0.000 253.155 285.192\nTemp_Norm2:C(month)[T.7] 224.0601 11.515 19.458 0.000 201.490 246.631\nTemp_Norm2:C(month)[T.8] 286.2384 11.027 25.958 0.000 264.625 307.852\nTemp_Norm2:C(month)[T.9] 270.5962 7.293 37.105 0.000 256.302 284.890\nTemp_Norm2:C(month)[T.10] 229.8348 4.986 46.094 0.000 220.061 239.608\nTemp_Norm2:C(month)[T.11] 1.0268 5.737 0.179 0.858 -10.218 12.272\nTemp_Norm2:C(month)[T.12] -7.6658 4.506 -1.701 0.089 -16.498 1.166\n==============================================================================\nOmnibus: 3046.293 Durbin-Watson: 0.224\nProb(Omnibus): 0.000 Jarque-Bera (JB): 21208.345\nSkew: 0.357 Prob(JB): 0.00\nKurtosis: 7.407 Cond. No. 331.\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n"
]
],
[
[
"All of those terms appeared significant too! Excellent. Now we have our naive energy model. It takes into account the hour of day, day of week, month of year, and the complicated relationship with temperature. \n\nTime to see how good our predictions are. One evaluation of model performance is the mean absolute percentage error (MAPE). This evaluates on average how far off are our predictions in terms of percentages. We need to get our predictions from our training data set. The ```fittedvalues``` function will do that for us. Then we can calculate the MAPE ourselves.",
"_____no_output_____"
]
],
[
[
"train['fitted'] = results.fittedvalues\n\ntrain['APE'] = abs((train['MW']-train['fitted'])/train['MW'])*100\nprint(\"Training Naive Model MAPE is: \", train['APE'].mean())",
"Training Naive Model MAPE is: 3.5119541032055452\n"
]
],
[
[
"On average, our model incorrectly predicted energy usage by a little over 3.5%! That gives us a good baseline to compare our future models with.\n\n",
"_____no_output_____"
]
],
[
[
"test['Temp_Norm'] = (test['Temp']-test['Temp'].mean())/test['Temp'].std()\ntest['Temp_Norm2'] = test['Temp_Norm']**2",
"_____no_output_____"
]
],
[
[
"Let's forecast out our model by scoring the test data set with the linear regression we built. Remember, we don't want to build a model on the test data set, just run the observations through the equation we got from the training model. These are our January 2020 predictions! The ```predict``` function will help us with this. We need to specify which data set we are predicting as you see with the ```predict(test)``` below. Let's look at the first few observations from this prediction!",
"_____no_output_____"
]
],
[
[
"test['pred'] = results.predict(test)\n\ntest.head()",
"_____no_output_____"
]
],
[
[
"Good! Now let's plot our predictions for the test data set against the actual values.",
"_____no_output_____"
]
],
[
[
"test['MW'].plot(color = 'blue', figsize=(9,7))\n\nplt.ylabel('MW Hours')\nplt.xlabel('Date')\n\ntest['pred'].plot(color = 'green', linestyle = 'dashed', figsize=(9,7))\n\nplt.legend(loc=\"best\");\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"Those look like rather good predictions! Let's see what the MAPE is on these.",
"_____no_output_____"
]
],
[
[
"test['APE'] = abs((test['MW']-test['pred'])/test['MW'])*100\nprint(\"Naive Model MAPE is: \", test['APE'].mean())",
"Naive Model MAPE is: 4.3947190107463365\n"
]
],
[
[
"Great! Remember, the MAPE is probably going to be higher because our model hasn't seen this data before. This is a great way to truly evaluate how well your model will do when deployed in a real world setting since you won't know energy data before you predict it. Looks like our model is only off by 4.4% on average.\n\nThe foundation is laid in this step. Model building can be complicated and sometimes it is hard to know when to stop. The best plan is to build a foundational model that you can try to build upon and/or outperform with later editions of your model. Without a good baseline, you won't know how good your final model is. These seasonal effects of hours of day, days of week, months of year as well as the temperature effects build a great first attempt at forecasting future energy usage.\n\nThis is a great initial model if your boss needs a check-in to see your progress. This model gets you a long way there since you have incorporated temperature's complicated relationship. In the next milestones you get to build on this great foundation to really show your boss what you can do!",
"_____no_output_____"
],
[
"## Dynamic Time Series Model",
"_____no_output_____"
],
[
"Now that we have recreated the important pieces of milestones 1 and 3, we can move on to milestone 4's objectives. \n\nWe have a great foundational, naive energy model. This model accounts for the energy's relationship with hour of day, day of week, month of year, and the complicated relationship with temperature. However, previous values of energy usage probably play some impact on the prediction of current energy usage. This is the basis for time series modeling!\n\nFirst, we need to get the residuals from the naive energy model. We will use these residuals as inputs to our dynamic time series model. We can use the ```resid``` function to do this.",
"_____no_output_____"
]
],
[
[
"train['resid'] = results.resid",
"_____no_output_____"
]
],
[
[
"Just like with our original energy data, let's plot the residuals from our model to see what we have.",
"_____no_output_____"
]
],
[
[
"ax1 = train['resid'].plot(color = 'blue', figsize=(9,7))\n\nax1.set_ylabel('Residuals')\nax1.set_xlabel('Date')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"Looks like we still see the seasonal effects that we had in our original data. Summer months seem to have bigger residuals (model errors) than the rest of the year. \n\nLet's zoom in on a specific week from December to see what our residuals look like.",
"_____no_output_____"
]
],
[
[
"ax1 = train['2019-12-01':'2019-12-07']['resid'].plot(color = 'blue', figsize=(9,7))\n\nax1.set_ylabel('Residuals')\nax1.set_xlabel('Date')\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"It appears that we still have some daily effects as well. Different hours of the day we do worse at predicting energy than other hours. Let's see if time series models can help us correct this!",
"_____no_output_____"
],
[
"### Exponential Smoothing Models ",
"_____no_output_____"
],
[
"#### Winters Seasonal Exponential Smoothing Model ",
"_____no_output_____"
],
[
"Exponential smoothing models can be used to predict a variety of different types of data. There are different models depending on whether our data is trending and/or contains a seasonal effect as well. The Winters exponential smoothing model accounts for seasonal effects while the Holt exponential smoothing model accounts for trend. Since our residuals don't trend, but still have a seasonal effect we should use the Winter's Seasonal Exponential Smoothing Model. Let's try to forecast our energy residuals with this model!\n\nThe ```ExponentialSmoothing``` function will help us with this. Remember that we don't want a trend. Also, since our data is hourly and appears we have a daily effect, we set the seasonal periods to 24. You can play around with either an additive (```seasonal='add'```) or multiplicative (```seasonal='mult'```) effect. Use the resources provided with the milestone if you are interested in learning the difference between those!",
"_____no_output_____"
]
],
[
[
"mod_tes = ExponentialSmoothing(train['resid'], trend=None, seasonal='add', seasonal_periods=24)\n\nres_tes = mod_tes.fit()\nprint(res_tes.summary())",
"C:\\Users\\adlabarr\\Anaconda3\\lib\\site-packages\\statsmodels\\tsa\\base\\tsa_model.py:218: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n ' ignored when e.g. forecasting.', ValueWarning)\n"
]
],
[
[
"We can then use the ```forecast``` functions to forecast out the month of January which is 744 observations. Careful though. These forecasts are the **residuals**.",
"_____no_output_____"
]
],
[
[
"forecast = pd.DataFrame(res_tes.forecast(744))\nforecast.index = test.index.copy()\n\nax1 = forecast.plot(color = 'blue', figsize=(9,7))\n\nax1.set_ylabel('Forecast')\nax1.set_xlabel('Date')\n\nplt.show()",
"C:\\Users\\adlabarr\\Anaconda3\\lib\\site-packages\\statsmodels\\tsa\\base\\tsa_model.py:583: ValueWarning: No supported index is available. Prediction results will be given with an integer index beginning at `start`.\n ValueWarning)\n"
]
],
[
[
"Let's go ahead and save these model fitted values (from the training data) and forecasts (the test data) to our respective data frames. That way we can evaluate them best.",
"_____no_output_____"
]
],
[
[
"train['fitted_resid'] = res_tes.fittedvalues\ntest['pred_resid'] = forecast",
"_____no_output_____"
]
],
[
[
"Our energy forecast isn't the residual forecast. It is the combination the forecast from the naive model **and** the new exponential smoothing model on the residuals. Add these two forecasts together to get your new dynamic energy model forecasts for each the training and test data sets. ",
"_____no_output_____"
]
],
[
[
"train['fitted_ESM'] = train['fitted'] + train['fitted_resid']\ntest['pred_ESM'] = test['pred'] + test['pred_resid']",
"_____no_output_____"
]
],
[
[
"Now let's view our forecast just like we did with the naive model!",
"_____no_output_____"
]
],
[
[
"test['MW'].plot(color = 'blue', figsize=(9,7))\n\nplt.ylabel('MW Hours')\nplt.xlabel('Date')\n\ntest['pred_ESM'].plot(color = 'green', linestyle = 'dashed', figsize=(9,7))\n\nplt.legend(loc=\"best\");\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"Just like with the naive model, let's calculate the MAPE for our new dynamic energy model using exponential smoothing. First let's do this on the training data.",
"_____no_output_____"
]
],
[
[
"train['APE_ESM'] = abs((train['MW']-train['fitted_ESM'])/train['MW'])*100\nprint(\"Training Naive + ESM Model MAPE is: \", train['APE_ESM'].mean())",
"Training Naive + ESM Model MAPE is: 1.4789918216232285\n"
]
],
[
[
"Wow! Our naive model had a training data set of about 3.5%, but this is down to nearly 1.5%! Our model seems to have improved. Let's check the test data set though and calculate a MAPE there.",
"_____no_output_____"
]
],
[
[
"test['APE_ESM'] = abs((test['MW']-test['pred_ESM'])/test['MW'])*100\nprint(\"Naive + ESM Model MAPE is: \", test['APE_ESM'].mean())",
"Naive + ESM Model MAPE is: 5.458113823008118\n"
]
],
[
[
"So we didn't see as much improvement in the test data set, but we still have some promise here based on the training data set improvement. \n\nExponential smoothing models aren't the only time series models we could use. Instead of using ESM's we could try another class of time series model - the ARIMA model.",
"_____no_output_____"
],
[
"### ARIMA Model",
"_____no_output_____"
],
[
"#### Model Selection",
"_____no_output_____"
],
[
"There are many techniques to building ARIMA models. Classical approaches involve looking at correlation functions. More modern approaches use computer algorithms to build grids of models and compare. The nuances of these approaches are discussed in the resources provided. A brief outline is given here.\n\nLooking at the correlation patterns of the data across time can reveal the best underlying model for the data. There are two correlation functions that we need to look at to get the full picture:\n 1. Autocorrelation Function (ACF)\n 2. Partial Autocorrelation Function (PACF)\n\nLet's look at the ACF of our data with the ```plot_acf``` function.",
"_____no_output_____"
]
],
[
[
"fig = tsaplots.plot_acf(train['resid'].diff(24)[25:], lags = 72)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"From this plot we can see an exponentially decreasing pattern. This signals some potential for autoregressive (AR) terms in our model. We also see a random spike at 24. This signals a potential moving average (MA) term as well.\n\nNow let's look at the PACF of the residuals with the ```plot_pacf``` function.",
"_____no_output_____"
]
],
[
[
"fig = tsaplots.plot_pacf(train['resid'].diff(24)[25:], lags = 72)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"We have a couple of spikes early on in this plot followed by a lot of nothing. Definitely an AR patterns with 2 as its order (p = 2 in ARIMA terminology). We also see an exponentially decreasing set of spikes every 24 hours. This coincides with the single spike at 24 from the ACF plot. Definitely a moving average (MA) term at that seasonal period (in ARIMA terminology this is Q = 1).\n\nWe also know that our data still has some seasonal effects every 24 hours so we should take a seasonal difference to account for this. ",
"_____no_output_____"
]
],
[
[
"mod = SARIMAX(train['resid'], order=(2,0,0), seasonal_order=(0,1,1,24))\nres = mod.fit()\n\nprint(res.summary())",
"C:\\Users\\adlabarr\\Anaconda3\\lib\\site-packages\\statsmodels\\tsa\\base\\tsa_model.py:218: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n ' ignored when e.g. forecasting.', ValueWarning)\nC:\\Users\\adlabarr\\Anaconda3\\lib\\site-packages\\statsmodels\\tsa\\base\\tsa_model.py:218: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n ' ignored when e.g. forecasting.', ValueWarning)\n"
]
],
[
[
"Let's take a look at the results that we just got. It appears based on the p-values above that all of our terms are significant which is great. \n\nLet's forecast out the next 744 hours (our test data set) to see what it looks like. Again, we can use the ```forecast``` function to do this. Remember though, this is only a forecast of our residuals!",
"_____no_output_____"
]
],
[
[
"forecast = pd.DataFrame(res.forecast(744))\nforecast.index = test.index.copy()\n\nax1 = forecast.plot(color = 'blue', figsize=(9,7))\n\nax1.set_ylabel('Forecast')\nax1.set_xlabel('Date')\n\nplt.show()",
"C:\\Users\\adlabarr\\Anaconda3\\lib\\site-packages\\statsmodels\\tsa\\base\\tsa_model.py:583: ValueWarning: No supported index is available. Prediction results will be given with an integer index beginning at `start`.\n ValueWarning)\n"
]
],
[
[
"Just with the ESM model, let's go ahead and save the predicted values and forecasts to our respective data frames. This will make it easier to see how well we did.",
"_____no_output_____"
]
],
[
[
"train['fitted_resid2'] = res.predict()\ntest['pred_resid2'] = forecast",
"_____no_output_____"
]
],
[
[
"Now, let's add these ARIMA forecasts of our residuals to the previous forecasts we developed from our naive energy model to form our dynamic energy model using ARIMA techniques. ",
"_____no_output_____"
]
],
[
[
"train['fitted_ARIMA'] = train['fitted'] + train['fitted_resid2']\ntest['pred_ARIMA'] = test['pred'] + test['pred_resid2']",
"_____no_output_____"
]
],
[
[
"Let's plot this forecast to see how well we did in the test data set.",
"_____no_output_____"
]
],
[
[
"test['MW'].plot(color = 'blue', figsize=(9,7))\n\nplt.ylabel('MW Hours')\nplt.xlabel('Date')\n\ntest['pred_ARIMA'].plot(color = 'green', linestyle = 'dashed', figsize=(9,7))\n\nplt.legend(loc=\"best\");\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"It is visually a little hard to determine how well we did in comparison to the other models that we have developed. Let's calculate the MAPE for both our training and testing data sets.",
"_____no_output_____"
]
],
[
[
"train['APE_ARIMA'] = abs((train['MW']-train['fitted_ARIMA'])/train['MW'])*100\nprint(\"Training Naive + ARIMA Model MAPE is: \", train['APE_ARIMA'].mean())",
"Training Naive + ARIMA Model MAPE is: 1.3663869635441892\n"
]
],
[
[
"Wow! Our naive model had a training data set of about 3.5%, and ESM dynamic model had a MAPE of 1.5%, but this is down to nearly 1.4%! Our model seems to have improved. Let's check the test data set though and calculate a MAPE there.",
"_____no_output_____"
]
],
[
[
"test['APE_ARIMA'] = abs((test['MW']-test['pred_ARIMA'])/test['MW'])*100\nprint(\"Naive + ARIMA Model MAPE is: \", test['APE_ARIMA'].mean())",
"Naive + ARIMA Model MAPE is: 5.5553216961887655\n"
]
],
[
[
"Again, we didn't see as much improvement in the test data set, but we still have some promise here based on the training data set improvement. \n\nFeel free to play around with other seasonal ARIMA models to see if you can improve the forecasts! These techniques are memory intensive and time consuming however. Just be prepared for this as you build models. If you are running this in a colab environment, you might need to restart the kernel at each model build because of the memory and time consumption. Local installations might not have this problem. \n\nOne potential improvement to modeling in time series is to ensemble (or average) multiple models' forecasts to make a better forecast. It doesn't always work, but always worth trying since it is rather easy. First, let's take the average of our two residual forecasts and add that to our naive model instead of just picking either the ESM or the ARIMA.",
"_____no_output_____"
]
],
[
[
"train['fitted_Ensemble'] = train['fitted'] + 0.5*train['fitted_resid'] + 0.5*train['fitted_resid2']\ntest['pred_Ensemble'] = test['pred'] + 0.5*test['pred_resid'] + 0.5*test['pred_resid2']",
"_____no_output_____"
]
],
[
[
"Now let's check the MAPE of both the training and testing data sets.",
"_____no_output_____"
]
],
[
[
"train['APE_Ensemble'] = abs((train['MW']-train['fitted_Ensemble'])/train['MW'])*100\nprint(\"Training Naive + Ensemble Model MAPE is: \", train['APE_Ensemble'].mean())",
"Training Naive + Ensemble Model MAPE is: 1.3776474681630708\n"
],
[
"test['APE_Ensemble'] = abs((test['MW']-test['pred_Ensemble'])/test['MW'])*100\nprint(\"Naive + Ensemble Model MAPE is: \", test['APE_Ensemble'].mean())",
"Naive + Ensemble Model MAPE is: 5.4762931305278135\n"
]
],
[
[
"Looks like the ensemble didn't do too much to improve our forecasts. If that is the case, it might not be the analytical techniques as much as the variables that go into them. That is what we will be covering in the next milestone!\n\nSo many times forecasters will stop at simple regression techniques or only use time series approaches in isolation. The benefit can really be felt by merging the two together as you will do in this milestone. Gaining the benefit of the external variable relationships as well as the correlations across time can greater improve your forecasts and reduce your prediction errors. Now you can really display your analytical talent for your boss. If they were impressed with your last model, then this one should really help drive home the impact you are making in helping them getting more accurate forecasts to improve their business decisions!",
"_____no_output_____"
],
[
"#### OPTIONAL Additional Code in ARIMA",
"_____no_output_____"
],
[
"Python has some built in functions to try and select ARIMA models automatically. Unfortunately, they use grid search techniques to build many different ARIMA models which as mentioned earlier can be both time and memory intensive. For this reason, we are not going over this function in this course. However, feel free to play around with the code below and investigate more on your own!",
"_____no_output_____"
]
],
[
[
"#!pip install scipy \n#!pip install pmdarima \n\n#from pmdarima import auto_arima\n\n#mod_auto = auto_arima(train['resid'], start_p=0, start_q=0, max_p=3, max_q=3, \n #start_P=2, start_Q=0, max_P=2, max_Q=0, m=24, \n #seaonal=True, trace=True, d=0, D=1, error_action='warn', \n #stepwise=True)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
cbbd32f51d45229209286075e82c6daa8dfa019b
| 5,784 |
ipynb
|
Jupyter Notebook
|
experiments/actor-mimic-experiments-all/train_expert_2.ipynb
|
Tony-Cheng/Active-Reinforcement-Learning
|
50bb65106ae1f957d8cb6cb5706ce1285519e6b4
|
[
"MIT"
] | null | null | null |
experiments/actor-mimic-experiments-all/train_expert_2.ipynb
|
Tony-Cheng/Active-Reinforcement-Learning
|
50bb65106ae1f957d8cb6cb5706ce1285519e6b4
|
[
"MIT"
] | null | null | null |
experiments/actor-mimic-experiments-all/train_expert_2.ipynb
|
Tony-Cheng/Active-Reinforcement-Learning
|
50bb65106ae1f957d8cb6cb5706ce1285519e6b4
|
[
"MIT"
] | null | null | null | 29.065327 | 141 | 0.574862 |
[
[
[
"from tqdm.notebook import tqdm\nimport math\nimport gym\nimport torch\nimport torch.optim as optim \nfrom torch.utils.tensorboard import SummaryWriter\nfrom collections import deque\n\nfrom active_rl.networks.dqn_atari import DQN\nfrom active_rl.utils.memory import ReplayMemory\nfrom active_rl.utils.optimization import standard_optimization\nfrom active_rl.environments.atari_wrappers import make_atari, wrap_deepmind\nfrom active_rl.utils.atari_utils import fp, ActionSelector, evaluate",
"_____no_output_____"
],
[
"env_name = 'Boxing'\nenv_raw = make_atari('{}NoFrameskip-v4'.format(env_name))\nenv = wrap_deepmind(env_raw, frame_stack=False, episode_life=True, clip_rewards=True)\nc,h,w = c,h,w = fp(env.reset()).shape\nn_actions = env.action_space.n",
"_____no_output_____"
],
[
"BATCH_SIZE = 64\nLR = 0.0000625\nGAMMA = 0.99\nEPS_START = 1.\nEPS_END = 0.05\nEPS_DECAY = 1000000 \nNUM_STEPS = 20000000\nPOLICY_UPDATE = 4\nTARGET_UPDATE = 4000\nEVALUATE_FREQ = 10000\nMEMORY_CAPACITY = 100000\nSAVE_FREQ = 1000000\n\nNAME = f'train_expert_{env_name}'\nFILE_NAME = f'expert_{env_name}'",
"_____no_output_____"
],
[
"device = 'cuda:1'\npolicy_net = DQN(n_actions).to(device)\ntarget_net = DQN(n_actions).to(device)\npolicy_net.apply(policy_net.init_weights)\ntarget_net.load_state_dict(policy_net.state_dict())\ntarget_net.eval()\noptimizer = optim.Adam(policy_net.parameters(), lr=LR, eps=1.5e-4)",
"_____no_output_____"
],
[
"memory = ReplayMemory(MEMORY_CAPACITY, [5,h,w], n_actions, device)\naction_selector = ActionSelector(EPS_START, EPS_END, policy_net, EPS_DECAY, n_actions, device)",
"_____no_output_____"
],
[
"steps_done = 0\nwriter = SummaryWriter(f'runs/{NAME}')",
"_____no_output_____"
],
[
"q = deque(maxlen=5)\ndone=True\neps = 0\nepisode_len = 0",
"_____no_output_____"
],
[
"progressive = tqdm(range(NUM_STEPS), total=NUM_STEPS, ncols=400, leave=False, unit='b')\nfor step in progressive:\n if done:\n env.reset()\n sum_reward = 0\n episode_len = 0\n img, _, _, _ = env.step(1) # BREAKOUT specific !!!\n for i in range(10): # no-op\n n_frame, _, _, _ = env.step(0)\n n_frame = fp(n_frame)\n q.append(n_frame)\n \n # Select and perform an action\n state = torch.cat(list(q))[1:].unsqueeze(0)\n action, eps = action_selector.select_action(state)\n n_frame, reward, done, info = env.step(action)\n n_frame = fp(n_frame)\n\n # 5 frame as memory\n q.append(n_frame)\n memory.push(torch.cat(list(q)).unsqueeze(0), action, reward, done) # here the n_frame means next frame from the previous time step\n episode_len += 1\n\n # Perform one step of the optimization (on the target network)\n if step % POLICY_UPDATE == 0 and step > 0:\n loss = standard_optimization(policy_net, target_net, optimizer, memory, batch_size=BATCH_SIZE, device=device)\n if loss is not None:\n writer.add_scalar('training/loss', loss, step)\n \n # Update the target network, copying all weights and biases in DQN\n if step % TARGET_UPDATE == 0 and step > 0:\n target_net.load_state_dict(policy_net.state_dict())\n \n if step % EVALUATE_FREQ == 0 and step > 0:\n evaluated_reward = evaluate(step, policy_net, device, env_raw, n_actions, eps=0.05, num_episode=15)\n writer.add_scalar('Performance/reward_vs_step', evaluated_reward, step)\n \n if step % SAVE_FREQ == 0 and step > 0:\n torch.save(target_net, f'models/{FILE_NAME}_step{step}')",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbbd48fd93a638abd1cc095fc4064e1502672e99
| 88,276 |
ipynb
|
Jupyter Notebook
|
notebooks/archive/training_level1.ipynb
|
evanmacbride/microscope-autofocus
|
5e88dc8b498cc96c3eee0503111fdf67ef867b54
|
[
"MIT"
] | null | null | null |
notebooks/archive/training_level1.ipynb
|
evanmacbride/microscope-autofocus
|
5e88dc8b498cc96c3eee0503111fdf67ef867b54
|
[
"MIT"
] | null | null | null |
notebooks/archive/training_level1.ipynb
|
evanmacbride/microscope-autofocus
|
5e88dc8b498cc96c3eee0503111fdf67ef867b54
|
[
"MIT"
] | null | null | null | 88,276 | 88,276 | 0.757918 |
[
[
[
"## Initial setup",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"Mounted at /content/drive\n"
],
[
"import tensorflow as tf\nprint(tf.__version__)\n# tensorflow version used is 2.8.0\nimport torch\nprint(torch.__version__)\n# torch version used is 1.10+cu111",
"2.8.0\n1.10.0+cu111\n"
],
[
"!nvidia-smi",
"Sun Mar 20 16:57:53 2022 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n| N/A 47C P8 10W / 70W | 0MiB / 15109MiB | 0% Default |\n| | | N/A |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n"
],
[
"# Other imports\n! pip install tensorflow_addons\n! pip install tensorflow_io\n! pip install lightgbm\n! pip install xgboost\n! pip install catboost\n\nimport os\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.models import *\nfrom tensorflow import keras as tfkeras\nimport matplotlib.pyplot as plt\nfrom imutils import paths\nfrom tqdm import tqdm\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nimport tensorflow_datasets as tfds\nimport tensorflow_io as tfio\nimport tensorflow_hub as hub\nimport numpy as np\nimport cv2\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics.pairwise import cosine_similarity as cos\nfrom sympy.utilities.iterables import multiset_permutations\nfrom sklearn.metrics import accuracy_score, f1_score, confusion_matrix, mean_absolute_error, r2_score\nfrom sklearn.model_selection import *\nfrom IPython.display import Image, display\nfrom scipy import ndimage\n\nfrom xgboost import XGBRegressor as xgb\nfrom sklearn.ensemble import ExtraTreesRegressor as extratree\nfrom sklearn.ensemble import RandomForestRegressor as randomforest\nfrom sklearn.svm import NuSVR as svr\nfrom sklearn.gaussian_process import GaussianProcessRegressor as gpr\nfrom sklearn.ensemble import AdaBoostRegressor as ada\nfrom sklearn.tree import DecisionTreeRegressor as tree\nfrom sklearn.ensemble import GradientBoostingRegressor as gdb\nfrom sklearn.neural_network import MLPRegressor as mlp\nfrom sklearn.experimental import enable_hist_gradient_boosting\nfrom sklearn.ensemble import HistGradientBoostingRegressor as hgb\nfrom lightgbm import LGBMRegressor as lgb\nfrom catboost import CatBoostRegressor as cgb\nfrom sklearn.ensemble import StackingRegressor as stk\nfrom sklearn.linear_model import RidgeCV\nfrom sklearn.model_selection import cross_validate, cross_val_predict\nfrom sklearn.ensemble import VotingRegressor as vot\n\nimport zipfile\nimport concurrent.futures\n\n# Random seed fix\nrandom_seed = 42\ntf.random.set_seed(random_seed)\nnp.random.seed(random_seed)",
"Collecting tensorflow_addons\n Downloading tensorflow_addons-0.16.1-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (1.1 MB)\n\u001b[?25l\r\u001b[K |▎ | 10 kB 26.8 MB/s eta 0:00:01\r\u001b[K |▋ | 20 kB 9.0 MB/s eta 0:00:01\r\u001b[K |▉ | 30 kB 7.8 MB/s eta 0:00:01\r\u001b[K |█▏ | 40 kB 3.6 MB/s eta 0:00:01\r\u001b[K |█▌ | 51 kB 3.6 MB/s eta 0:00:01\r\u001b[K |█▊ | 61 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██ | 71 kB 4.5 MB/s eta 0:00:01\r\u001b[K |██▍ | 81 kB 4.8 MB/s eta 0:00:01\r\u001b[K |██▋ | 92 kB 5.3 MB/s eta 0:00:01\r\u001b[K |███ | 102 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███▏ | 112 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███▌ | 122 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███▉ | 133 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████ | 143 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████▍ | 153 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████▊ | 163 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████ | 174 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████▎ | 184 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████▌ | 194 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████▉ | 204 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████▏ | 215 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████▍ | 225 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████▊ | 235 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████ | 245 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████▎ | 256 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████▋ | 266 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████▉ | 276 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████▏ | 286 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████▌ | 296 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████▊ | 307 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████ | 317 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████▍ | 327 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████▋ | 337 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████ | 348 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████▏ | 358 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████▌ | 368 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████▉ | 378 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████ | 389 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████▍ | 399 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████▊ | 409 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████ | 419 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████▎ | 430 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████▌ | 440 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████▉ | 450 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████████▏ | 460 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████████▍ | 471 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████████▊ | 481 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████████ | 491 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████████▎ | 501 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████████▋ | 512 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████████▉ | 522 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████████▏ | 532 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████████▌ | 542 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████████▊ | 552 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████████ | 563 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████████▍ | 573 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████████▋ | 583 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████████████ | 593 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████████████▏ | 604 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████████████▌ | 614 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████████████▉ | 624 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████████████ | 634 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████████████▍ | 645 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████████████▊ | 655 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████████████ | 665 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████████████▎ | 675 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████████████▌ | 686 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████████████▉ | 696 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████████████▏ | 706 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████████████▍ | 716 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████████████▊ | 727 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████████████████ | 737 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████████████████▎ | 747 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████████████████▋ | 757 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████████████████▉ | 768 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████████████████▏ | 778 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████████████████▌ | 788 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████████████████▊ | 798 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████████████████ | 808 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████████████████▍ | 819 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████████████████▋ | 829 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████████████████ | 839 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████████████████▏ | 849 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████████████████▌ | 860 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████████████████▉ | 870 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████████████████████ | 880 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████████████████████▍ | 890 kB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████████████████████▊ | 901 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████████████████████ | 911 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████████████████████▎ | 921 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████████████████████▌ | 931 kB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████████████████████▉ | 942 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████████████████████▏ | 952 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████████████████████▍ | 962 kB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████████████████████▊ | 972 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████████████████████ | 983 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████████████████████▎ | 993 kB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████████████████████▋ | 1.0 MB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████████████████████▉ | 1.0 MB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▏ | 1.0 MB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▌ | 1.0 MB 4.3 MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▊ | 1.0 MB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████ | 1.1 MB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▍ | 1.1 MB 4.3 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▋ | 1.1 MB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████████████████████████ | 1.1 MB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▏| 1.1 MB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▌| 1.1 MB 4.3 MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▉| 1.1 MB 4.3 MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 1.1 MB 4.3 MB/s \n\u001b[?25hRequirement already satisfied: typeguard>=2.7 in /usr/local/lib/python3.7/dist-packages (from tensorflow_addons) (2.7.1)\nInstalling collected packages: tensorflow-addons\nSuccessfully installed tensorflow-addons-0.16.1\nCollecting tensorflow_io\n Downloading tensorflow_io-0.24.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (23.4 MB)\n\u001b[K |████████████████████████████████| 23.4 MB 1.7 MB/s \n\u001b[?25hRequirement already satisfied: tensorflow-io-gcs-filesystem==0.24.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow_io) (0.24.0)\nInstalling collected packages: tensorflow-io\nSuccessfully installed tensorflow-io-0.24.0\nRequirement already satisfied: lightgbm in /usr/local/lib/python3.7/dist-packages (2.2.3)\nRequirement already satisfied: scikit-learn in /usr/local/lib/python3.7/dist-packages (from lightgbm) (1.0.2)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from lightgbm) (1.21.5)\nRequirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from lightgbm) (1.4.1)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn->lightgbm) (1.1.0)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn->lightgbm) (3.1.0)\nRequirement already satisfied: xgboost in /usr/local/lib/python3.7/dist-packages (0.90)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from xgboost) (1.21.5)\nRequirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from xgboost) (1.4.1)\nCollecting catboost\n Downloading catboost-1.0.4-cp37-none-manylinux1_x86_64.whl (76.1 MB)\n\u001b[K |████████████████████████████████| 76.1 MB 1.3 MB/s \n\u001b[?25hRequirement already satisfied: graphviz in /usr/local/lib/python3.7/dist-packages (from catboost) (0.10.1)\nRequirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from catboost) (3.2.2)\nRequirement already satisfied: pandas>=0.24.0 in /usr/local/lib/python3.7/dist-packages (from catboost) (1.3.5)\nRequirement already satisfied: numpy>=1.16.0 in /usr/local/lib/python3.7/dist-packages (from catboost) (1.21.5)\nRequirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from catboost) (1.4.1)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from catboost) (1.15.0)\nRequirement already satisfied: plotly in /usr/local/lib/python3.7/dist-packages (from catboost) (5.5.0)\nRequirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.24.0->catboost) (2018.9)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.24.0->catboost) (2.8.2)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->catboost) (0.11.0)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->catboost) (3.0.7)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->catboost) (1.4.0)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from kiwisolver>=1.0.1->matplotlib->catboost) (3.10.0.2)\nRequirement already satisfied: tenacity>=6.2.0 in /usr/local/lib/python3.7/dist-packages (from plotly->catboost) (8.0.1)\nInstalling collected packages: catboost\nSuccessfully installed catboost-1.0.4\n"
]
],
[
[
"## Data utility, only need to use once to preprocess tiff images to png format",
"_____no_output_____"
]
],
[
[
"%%time\nzf = zipfile.ZipFile('/content/drive/MyDrive/ML course 2022/rawImages.zip')\ndef unzip(file):\n zf.extract(file)\nwith concurrent.futures.ProcessPoolExecutor() as executor:\n executor.map(unzip, zf.infolist())",
"CPU times: user 14 s, sys: 1.32 s, total: 15.3 s\nWall time: 5min 4s\n"
],
[
"drive.flush_and_unmount()",
"_____no_output_____"
],
[
"for i in range(len(level1_images_train)):\n img = cv2.imread(level1_images_train[i])\n if img is None:\n continue\n img = cv2.imread(level1_images_train[i])\n cv2.imwrite('/content/drive/MyDrive/ML course 2022_Team BNL/level1/%s.jpeg' %level1_images_train[i].split('/')[-1][:-5], img)",
"_____no_output_____"
],
[
"import shutil\nfor i in range(len(level1_images_train)):\n shutil.copy(level1_images_train[i], \"/content/drive/MyDrive/ML course 2022_Team BNL/level1_png/%s\" %level1_images_train[i].split(\"/\")[-1])",
"_____no_output_____"
],
[
"# the snippet that I use to create the jpegs of the high pass filtered images\npixel_std_values = np.zeros((len(train_images_directory_select)))\nfor i in range(train_images_directory_select.shape[0]):\n im = cv2.imread(train_images_directory_select[i]) # high pass filter then grayscale then standard deviation of all pixel values\n high_pass_image = cv2.cvtColor(im - ndimage.gaussian_filter(im, 13), cv2.COLOR_BGR2GRAY)\n # pixel_std_values[i] = np.std(high_pass_image)\n cv2.imwrite('/content/drive/MyDrive/ML course 2022_Team BNL/level1_jpeg/%s.jpeg' %train_images_directory_select[i].split('/')[-1][:-5], high_pass_image)\n# plt.scatter(labels, pixel_std_values)\n# plt.xlabel('true_focal_distance')\n# plt.ylabel('std value')",
"_____no_output_____"
]
],
[
[
"## Dataset gathering and preparation",
"_____no_output_____"
]
],
[
[
"%cd /content/drive/MyDrive/ML\\ course\\ 2022_Team\\ BNL",
"/content/drive/MyDrive/ML course 2022_Team BNL\n"
],
[
"training_batch_size = 1\ntest_batch_size = 1\n\nimageSizeheight=1200\nimageSizewidth=1920\n\n# imageSizeheight=300\n# imageSizewidth=480",
"_____no_output_____"
],
[
"# Image preprocessing utils\[email protected]\ndef parse_images(image_path):\n\n image_string = tf.io.read_file(image_path)\n image = tf.io.decode_jpeg(image_string, channels=3)\n # image = tfio.experimental.image.decode_tiff(image_string, index=0, name=None)[:,:,:-1] # in the doc, it transforms tiff to 4 channels, with additional channel of opacity which is not needed.\n image = tf.image.convert_image_dtype(image, tf.float32)\n image = tf.image.resize(image, size=[imageSizeheight, imageSizewidth])\n\n return image",
"_____no_output_____"
],
[
"level1_images_train = list(paths.list_files(\"/content/drive/MyDrive/ML course 2022_Team BNL/level1_jpeg\", validExts=\"jpeg\"))\n# train_images_directory_select = np.random.choice(level1_images_train, 6000, replace=False)\ntrain_images_directory_select = np.array(level1_images_train)\nprint(len(level1_images_train))\nprint(len(train_images_directory_select))",
"5882\n5882\n"
],
[
"labels = [] # initialize labels\npositions = [] # initialize the positions of the images, needed for record keeping\n\nedge_pixel_intensity_threshold = 0.50\nedge_percentage_threshold = 0.10\ncap = 20\n\nfor i in range(train_images_directory_select.shape[0]):\n label = np.abs(float(train_images_directory_select[i].split(\"_\")[-1][:-5].split(\"(\")[0])) # individual label, can be put into absolute or not \n labels.append(label)\n position = train_images_directory_select[i].split(\"_\")[-3] + '_' + train_images_directory_select[i].split(\"_\")[-2] # individual position\n positions.append(position)\nlabels = np.array(labels)\npositions = np.array(positions)\n\ninfocus_images_coordinate = np.where(labels==0)[0] # locations where the infocus images are\nedge_percentages = []\nvalid = []\nfor i in range(train_images_directory_select[infocus_images_coordinate].shape[0]):\n edge_percentage = len(np.where(parse_images(train_images_directory_select[infocus_images_coordinate][i])[:,:,0]>=edge_pixel_intensity_threshold)[0]) / imageSizeheight / imageSizewidth\n edge_percentages.append(edge_percentage)\n if edge_percentage >= edge_percentage_threshold: # picking out infocus images where the area containing edges is greater than a threshold value\n valid.append(positions[infocus_images_coordinate][i])\n\nfor i in range(len(valid)):\n valid_position = np.where(positions==valid[i])[0]\n if i == 0:\n valid_positions = valid_position\n else:\n valid_positions = np.append(valid_positions, valid_position)\n\nvalid_directories = train_images_directory_select[valid_positions]\nvalid_labels = labels[valid_positions]\n\n\ncapped_valid_labels = valid_labels[np.where(np.abs(valid_labels)<=cap)[0]] # not include the focal distance larger than cap\ncapped_valid_directories = valid_directories[np.where(np.abs(valid_labels)<=cap)[0]]\n\nprint(len(capped_valid_labels))",
"510\n"
]
],
[
[
"## Initiate our cnn model",
"_____no_output_____"
]
],
[
[
"Resnet50_transfer = tf.keras.applications.resnet50.ResNet50(\n include_top=False,\n weights=\"imagenet\",\n # weights=None,\n input_tensor=None,\n input_shape=(imageSizeheight, imageSizewidth, 3), \n pooling=None,\n)\n\nResnet50_transfer.trainable = True",
"Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5\n94773248/94765736 [==============================] - 2s 0us/step\n94781440/94765736 [==============================] - 2s 0us/step\n"
],
[
"Resnet50_transfer.trainable = False\n\nsupervise_model = tf.keras.models.Sequential([\n \n Resnet50_transfer,\n GlobalAveragePooling2D(),\n # BatchNormalization(epsilon=0.1),\n # Dense(64, activation='relu'),\n # Dropout(0.5),\n # BatchNormalization(epsilon=0.1),\n # Dense(32, activation='relu'),\n # Dropout(0.5),\n # BatchNormalization(epsilon=0.1),\n Dense(16, activation='relu'),\n Dropout(0.5),\n Dense(1)\n])",
"_____no_output_____"
],
[
"# adam = tf.keras.optimizers.Adam(learning_rate=0.001)\n# sgd = tf.keras.optimizers.SGD(learning_rate=0.001)\n# metrics = ['mae'\n# ]\n# supervise_model.compile(loss = 'mse', optimizer = adam, metrics = metrics)\n# supervise_model.summary()",
"Model: \"sequential\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n resnet50 (Functional) (None, 38, 60, 2048) 23587712 \n \n global_average_pooling2d (G (None, 2048) 0 \n lobalAveragePooling2D) \n \n dense (Dense) (None, 16) 32784 \n \n dropout (Dropout) (None, 16) 0 \n \n dense_1 (Dense) (None, 1) 17 \n \n=================================================================\nTotal params: 23,620,513\nTrainable params: 32,801\nNon-trainable params: 23,587,712\n_________________________________________________________________\n"
],
[
"# cnn feature extraction in segments so as to avoid OOM problem, that we encountered with image set size larger than 1000.\nfeature_extractor = Model(supervise_model.input, supervise_model.layers[-4].output)\nn_segments = len(capped_valid_directories) // 500 + 1\nfor i in range(n_segments):\n if i == 0:\n train_ds = tf.data.Dataset.from_tensor_slices(capped_valid_directories[:(i + 1) * 500])\n train_ds = (\n train_ds\n .map(parse_images, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n .batch(training_batch_size\n # , drop_remainder=True\n )\n .prefetch(tf.data.experimental.AUTOTUNE)\n )\n\n features = feature_extractor.predict(train_ds)\n\n else:\n train_ds = tf.data.Dataset.from_tensor_slices(capped_valid_directories[i * 500 :(i + 1) * 500])\n train_ds = (\n train_ds\n .map(parse_images, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n .batch(training_batch_size\n # , drop_remainder=True\n )\n .prefetch(tf.data.experimental.AUTOTUNE)\n )\n\n features = np.append(features, feature_extractor.predict(train_ds), axis=0)",
"_____no_output_____"
]
],
[
[
"## KFolds validation",
"_____no_output_____"
]
],
[
[
"training_data_size = np.array([10, 20, 50, 80, 100, 150, 200])\nn_folds = 5\nn_split_split = 20\nn_validation_images = 50\nrkf = RepeatedKFold(n_splits=n_folds, n_repeats=n_split_split, random_state=42)\n\nxgb_scores = np.zeros((len(training_data_size), n_folds * n_split_split, 2))\nridge_scores = np.zeros((len(training_data_size), n_folds * n_split_split, 2))\nrf_scores = np.zeros((len(training_data_size), n_folds * n_split_split, 2))\nsv_scores = np.zeros((len(training_data_size), n_folds * n_split_split, 2))\ngp_scores = np.zeros((len(training_data_size), n_folds * n_split_split, 2))\nmlp_scores = np.zeros((len(training_data_size), n_folds * n_split_split, 2))\n\nk = 0\nfor train_ix, test_ix in rkf.split(features, capped_valid_labels):\n \n Train_feature, val_feature = features[train_ix], features[test_ix]\n Train_label, val_label = capped_valid_labels[train_ix], capped_valid_labels[test_ix]\n val_feature = val_feature[:n_validation_images]\n val_label = val_label[:n_validation_images]\n \n for i in range(len(training_data_size)):\n\n # initate regressors\n xg_boost_regressor = xgb(max_depth=16, learning_rate=0.1, tree_method='gpu_hist', objective='reg:squarederror') # don't go beyond max_depth of 16, session will crash due to OOM\n ridge_regressor = RidgeCV()\n rf_regressor = randomforest(max_depth=16)\n sv_regressor = svr()\n gp_regressor = gpr()\n mlp_regressor = mlp(hidden_layer_sizes=(100, 100, 100), max_iter=1000)\n\n train_feature = Train_feature[:training_data_size[i]]\n train_label = Train_label[:training_data_size[i]]\n\n ## add or comment out regressors here\n\n\n xg_boost_regressor.fit(train_feature, train_label)\n y_pred_xgb = xg_boost_regressor.predict(val_feature)\n xgb_scores[i, k, 0] = r2_score(y_pred_xgb, val_label)\n xgb_scores[i, k, 1] = mean_absolute_error(y_pred_xgb, val_label)\n\n ridge_regressor.fit(train_feature, train_label)\n y_pred_ridge = ridge_regressor.predict(val_feature)\n ridge_scores[i, k, 0] = r2_score(y_pred_ridge, val_label)\n ridge_scores[i, k, 1] = mean_absolute_error(y_pred_ridge, val_label)\n\n rf_regressor.fit(train_feature, train_label)\n y_pred_rf = rf_regressor.predict(val_feature)\n rf_scores[i, k, 0] = r2_score(y_pred_rf, val_label)\n rf_scores[i, k, 1] = mean_absolute_error(y_pred_rf, val_label)\n\n sv_regressor.fit(train_feature, train_label)\n y_pred_sv = sv_regressor.predict(val_feature)\n sv_scores[i, k, 0] = r2_score(y_pred_sv, val_label)\n sv_scores[i, k, 1] = mean_absolute_error(y_pred_sv, val_label)\n\n gp_regressor.fit(train_feature, train_label)\n y_pred_gp = gp_regressor.predict(val_feature)\n gp_scores[i, k, 0] = r2_score(y_pred_gp, val_label)\n gp_scores[i, k, 1] = mean_absolute_error(y_pred_gp, val_label)\n\n mlp_regressor.fit(train_feature, train_label)\n y_pred_mlp = mlp_regressor.predict(val_feature)\n mlp_scores[i, k, 0] = r2_score(y_pred_mlp, val_label)\n mlp_scores[i, k, 1] = mean_absolute_error(y_pred_mlp, val_label)\n \n k += 1\n\nnp.savez_compressed('performance_log/level1/cap%i_cutoff%1.2f_valid%i.npz' %(cap, edge_percentage_threshold, len(capped_valid_labels)), xgb=xgb_scores, ridge=ridge_scores, rf=rf_scores, sv=sv_scores, gp=gp_scores, mlp=mlp_scores)",
"_____no_output_____"
],
[
"for i in range(len(training_data_size)):\n print('training_data_size: %i' % training_data_size[i])\n print(np.average(xgb_scores[i, :, 0]))\n print(np.std(xgb_scores[i, :, 0]))\n print(np.average(xgb_scores[i, :, 1]))\n print(np.std(xgb_scores[i, :, 1]))\n print(np.average(ridge_scores[i, :, 0]))\n print(np.std(ridge_scores[i, :, 0]))\n print(np.average(ridge_scores[i, :, 1]))\n print(np.std(ridge_scores[i, :, 1]))\n print(np.average(rf_scores[i, :, 0]))\n print(np.std(rf_scores[i, :, 0]))\n print(np.average(rf_scores[i, :, 1]))\n print(np.std(rf_scores[i, :, 1]))\n print(np.average(sv_scores[i, :, 0]))\n print(np.std(sv_scores[i, :, 0]))\n print(np.average(sv_scores[i, :, 1]))\n print(np.std(sv_scores[i, :, 1]))\n print(np.average(gp_scores[i, :, 0]))\n print(np.std(gp_scores[i, :, 0]))\n print(np.average(gp_scores[i, :, 1]))\n print(np.std(gp_scores[i, :, 1]))\n print(np.average(mlp_scores[i, :, 0]))\n print(np.std(mlp_scores[i, :, 0]))\n print(np.average(mlp_scores[i, :, 1]))\n print(np.std(mlp_scores[i, :, 1]))",
"training_data_size: 10\n0.14084248695740414\n2.7596362558658334\n3.072736385648594\n0.8956592728706928\n-0.7766834675071752\n7.595225687628904\n2.919382927749211\n0.8930499778813756\n-4.75159455046763\n21.32244198472842\n3.707794289579082\n0.8381547926925947\n-1376046.7630098618\n2118775.2312446465\n6.652055530528884\n0.7328939987957965\n-20.139719237492674\n114.14783619964996\n4.195719456627305\n1.0904307439505168\n0.31790680908968794\n2.390246555151645\n2.4593776514136305\n1.134806561531438\ntraining_data_size: 20\n0.6127460916004517\n0.14273374618180942\n2.5826030791908217\n0.4023774093347564\n0.5979328567114998\n0.12393640668622741\n2.727254395662266\n0.3551364679070613\n0.4695063402054142\n0.20017859336515761\n2.688883169837599\n0.38192943109347943\n-119141.3728962809\n93425.85060880502\n6.27230394648212\n0.6147711495190518\n0.4227523017135294\n0.2049816135261394\n3.532484646850965\n0.4839886312193726\n0.7459562784933172\n0.12870139680243298\n2.076345256859041\n0.4731978555179052\ntraining_data_size: 50\n0.8203983095044499\n0.0813710079479404\n1.8189238103763918\n0.31835587664078063\n0.8810790948688981\n0.04093764368141865\n1.7370660068909358\n0.24903124740454324\n0.8351375909390757\n0.05075356508560078\n1.7992479860717152\n0.21104235861444043\n-8376.665931761401\n2614.529403387579\n6.002770899687513\n0.5161205594769536\n0.6201355889437847\n0.11851263374356036\n3.004971927340325\n0.36547285417363223\n0.8478852613162248\n0.056011520626132266\n1.782785358318094\n0.2631306362533543\ntraining_data_size: 80\n0.9083950034115618\n0.03745150726943866\n1.3435363704845256\n0.19916348411480972\n0.9252873768560436\n0.02102686881782523\n1.4022435428550668\n0.1414553425484046\n0.9100721924319877\n0.036313377568307346\n1.347547300866414\n0.17261037114927213\n-2651.4731180791555\n475.47371995027567\n5.924137085521008\n0.4525022040498505\n0.6867956625673383\n0.11416176743783346\n2.569695774759313\n0.40497497869661125\n0.8778979080638526\n0.048103982832002734\n1.6297619458434178\n0.2509850740151335\ntraining_data_size: 100\n0.9212289448201575\n0.03647168611797789\n1.2326429367756924\n0.19567887801379003\n0.9293755852615996\n0.022302630504414243\n1.329463549542797\n0.14349953051944603\n0.9235942071371065\n0.033336546478229555\n1.239676189402976\n0.1608974991051767\n-1482.2604119163138\n224.71347800861955\n5.891199374110353\n0.43176999215527656\n0.7125141860700718\n0.10790024622258106\n2.4759274293968003\n0.4044658331879874\n0.8879578247699073\n0.04616443076624449\n1.5710054313252053\n0.2554933738653663\ntraining_data_size: 150\n0.9425109155778957\n0.02772839342797353\n1.0476652797382067\n0.15881349172825485\n0.9510358314580771\n0.017721447215294617\n1.1157259919904492\n0.1260250039176212\n0.9379231184338046\n0.02798137789388804\n1.0923404920115902\n0.15914939271462836\n-711.8816547442243\n110.28370311326765\n5.836516910421523\n0.447310550243297\n0.754891961996632\n0.08783222036969277\n2.211500399392409\n0.3674392918856898\n0.8881859498892823\n0.05260471220163631\n1.5768598692961506\n0.2994298914647158\ntraining_data_size: 200\n0.9500538039644125\n0.025227156944934555\n0.9685059190571245\n0.14970389300903494\n0.9532059958787177\n0.015245713932625282\n1.0865333214303208\n0.11278059865765575\n0.9441234177950087\n0.025805593948424663\n1.0188601376367352\n0.14328988408151982\n-395.004017721243\n53.94205151407905\n5.773433767720981\n0.4318984522267623\n0.786397781292844\n0.08230859431349802\n1.960874737406081\n0.3394807218700498\n0.8891547044793708\n0.041680979172726715\n1.5479714113376601\n0.23487422010232534\n"
],
[
"# rf_regressor = randomforest(max_depth=16)\nfeatures_train, features_test, train_label, test_label = train_test_split(features, capped_valid_labels, test_size=0.2, random_state=42)\n# rf_regressor.fit(features_train, train_label)\ny_pred_rf = rf_regressor.predict(features_test)",
"_____no_output_____"
],
[
"print('rf prediction r2_score = %f' % r2_score(y_pred_rf, test_label))\nprint('rf prediction mean_absolute_error = %f' % mean_absolute_error(y_pred_rf, test_label))\nplt.scatter(test_label, y_pred_rf)\nplt.plot(np.linspace(0, cap, 150), np.linspace(0, cap, 150), c='r')\nplt.xlabel('true_distance [$\\mu m$]')\nplt.ylabel('predicted_distance [$\\mu m$]')",
"rf prediction r2_score = 0.984096\nrf prediction mean_absolute_error = 0.531279\n"
],
[
"ridge_regressor.fit(extracted_features_train, train_label)\ny_pred_ridge = ridge_regressor.predict(extracted_features_test)",
"_____no_output_____"
],
[
"print('RidgeCV prediction r2_score = %f' % r2_score(y_pred_ridge, test_label))\nprint('RidgeCV prediction mean_absolute_error = %f' % mean_absolute_error(y_pred_ridge, test_label))\nplt.scatter(test_label, y_pred_ridge)\nplt.plot(np.linspace(0, cap, 150), np.linspace(0, cap, 150), c='r')\nplt.xlabel('true_distance')\nplt.ylabel('predicted_distance')",
"RidgeCV prediction r2_score = 0.688922\nRidgeCV prediction mean_absolute_error = 2.739158\n"
],
[
"tf.random.set_seed(42)\nsupervise_history = supervise_model.fit(train_ds,\n validation_data=test_ds, \n batch_size=training_batch_size, \n epochs=10, \n workers=8, \n use_multiprocessing=True, \n verbose=1) \n # callbacks=[MetricsCheckpoint('small_supervise')]) ",
"Epoch 1/10\n21/21 [==============================] - 55s 2s/step - loss: 30.6404 - mae: 4.4466 - val_loss: 25.7785 - val_mae: 4.5040\nEpoch 2/10\n21/21 [==============================] - ETA: 0s - loss: 27.6751 - mae: 4.4045"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbbd497497a57be7a338e273e2aeba3d7f223d25
| 228,258 |
ipynb
|
Jupyter Notebook
|
data_processing/ml_varnost_posameznih_obcin.ipynb
|
markloboda/PR21mlfjis
|
0ebf388236daab02ce1efc514284802b8ab07ef9
|
[
"MIT"
] | null | null | null |
data_processing/ml_varnost_posameznih_obcin.ipynb
|
markloboda/PR21mlfjis
|
0ebf388236daab02ce1efc514284802b8ab07ef9
|
[
"MIT"
] | null | null | null |
data_processing/ml_varnost_posameznih_obcin.ipynb
|
markloboda/PR21mlfjis
|
0ebf388236daab02ce1efc514284802b8ab07ef9
|
[
"MIT"
] | null | null | null | 449.326772 | 128,374 | 0.938101 |
[
[
[
"## 1. Importi",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport csv\nimport os.path\n\nimport mplcyberpunk\nplt.style.use(\"cyberpunk\")",
"_____no_output_____"
]
],
[
[
"## 2. Branje podatkov",
"_____no_output_____"
]
],
[
[
"with open('../data/kd2018.csv', 'rt') as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n head0 = next(reader)\n none_handler = lambda i : i or None\n main_data18 = np.array([none_handler(i) for i in [row for row in reader]])",
"_____no_output_____"
],
[
"with open('../data/kd2019.csv', 'rt') as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n head1 = next(reader)\n none_handler = lambda i : i or None\n main_data19 = np.array([none_handler(i) for i in [row for row in reader]])",
"_____no_output_____"
],
[
"with open('../data/kd2020.csv', 'rt') as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n head2 = next(reader)\n none_handler = lambda i : i or None\n main_data20 = np.array([none_handler(i) for i in [row for row in reader]])",
"_____no_output_____"
],
[
"with open('../data/kd2021.csv', 'rt') as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n head3 = next(reader)\n none_handler = lambda i : i or None\n main_data21 = np.array([none_handler(i) for i in [row for row in reader]])",
"_____no_output_____"
]
],
[
[
"### 2.1 Priprava podatkov\nV podatkih je nek zločin zapisan večrat. To je ocitno, saj se zaporedna številka kaznivega dejanja ponavlja večrat.",
"_____no_output_____"
]
],
[
[
"found_ids = set()\nrows_to_keep = np.array([])\nfor i in range(len(main_data18)):\n if (main_data18[i][0] not in found_ids):\n rows_to_keep = np.append(rows_to_keep, True)\n found_ids.add(main_data18[i][0])\n else:\n rows_to_keep = np.append(rows_to_keep, False)\n\nmain_data18_filtered = main_data18[np.ma.make_mask(rows_to_keep)]\n\nfound_ids = set()\nrows_to_keep = np.array([])\nfor i in range(len(main_data19)):\n if (main_data19[i][0] not in found_ids):\n rows_to_keep = np.append(rows_to_keep, True)\n found_ids.add(main_data19[i][0])\n else:\n rows_to_keep = np.append(rows_to_keep, False)\n\nmain_data19_filtered = main_data19[np.ma.make_mask(rows_to_keep)]\n\nfound_ids = set()\nrows_to_keep = np.array([])\nfor i in range(len(main_data20)):\n if (main_data20[i][0] not in found_ids):\n rows_to_keep = np.append(rows_to_keep, True)\n found_ids.add(main_data20[i][0])\n else:\n rows_to_keep = np.append(rows_to_keep, False)\n\nmain_data20_filtered = main_data20[np.ma.make_mask(rows_to_keep)]\n\nfound_ids = set()\nrows_to_keep = np.array([])\nfor i in range(len(main_data21)):\n if (main_data21[i][0] not in found_ids):\n rows_to_keep = np.append(rows_to_keep, True)\n found_ids.add(main_data21[i][0])\n else:\n rows_to_keep = np.append(rows_to_keep, False)\n\nmain_data21_filtered = main_data21[np.ma.make_mask(rows_to_keep)]",
"_____no_output_____"
],
[
"main_data = np.concatenate((main_data18_filtered, main_data19_filtered, main_data20_filtered, main_data21_filtered))",
"_____no_output_____"
],
[
"cities, counts = np.unique(main_data[:, 19], return_counts=True)\ndict_id_city = dict((id, str.lower(city)) for id, city in enumerate(cities))\ndict_city_id = dict((str.lower(city), id) for id, city in enumerate(cities))",
"_____no_output_____"
]
],
[
[
"## 3. Štetje zločinov v posamezni občini",
"_____no_output_____"
]
],
[
[
"# count number of occurences of each value in column\ndict_cityId_crimeCount = dict((dict_city_id[str.lower(cities[i])], counts[i]) for i in range(len(cities)))\n\ny_axis = np.array([str.capitalize(dict_id_city[cityId]) for cityId in dict_cityId_crimeCount.keys()])\nx_axis = np.array([dict_cityId_crimeCount[cityId] for cityId in dict_cityId_crimeCount.keys()])\n\ncounts_argsort = np.argsort(x_axis)[::-1]\n\n# make graph of number of occurences of each value in column\nplt.figure(figsize=(20, 10))\nplt.xticks(rotation=90)\nplt.xlabel('Število kriminalnih dejanj')\nplt.ylabel('Mesta')\nplt.bar(y_axis[counts_argsort][:10], x_axis[counts_argsort][:10])\nplt.show()",
"_____no_output_____"
]
],
[
[
"Že takoj na prvi pogled je razvidno, da se največ zločina zgodi v Ljubljani. Sigurno pa na podlagi teh rezultatov ne moremo odgovoriti na vprašanje, \"Katera občina je najbolj varna/nevarna?\". Dodal bom podatke za število prebivalcev v vsaki občini.",
"_____no_output_____"
]
],
[
[
"with open('../data/stevilo_prebivalcev_po_obcinah.csv', 'rt', encoding=\"windows-1250\") as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n head2 = next(reader)\n none_handler = lambda i : i or None\n count_per_city = np.array([none_handler(i) for i in [row for row in reader]])",
"_____no_output_____"
],
[
"# if name contains / split by / and keep first part\ncount_per_city = [(str.split(row[0], '/')[0], row[1]) for row in count_per_city]\ndict_cityId_popCount = dict([(dict_city_id[str.lower(count_per_city[i][0])], count_per_city[i][1]) for i in range(len(count_per_city)) if str.lower(count_per_city[i][0]) in dict_city_id.keys()])\n\n# delete unknown values from cities we are watching\nto_be_deleted = []\nfor id in dict_id_city.keys():\n if id not in dict_cityId_popCount.keys():\n to_be_deleted.append(id)\n\nprint(f\"Izbrisani so bili podatki {[dict_id_city[id] for id in to_be_deleted]}, saj za njih ne poznamo prave občine, ali pa le ne poznamo števila prebivalcev.\")\n\nfor id in to_be_deleted:\n del dict_city_id[dict_id_city[id]]\n del dict_id_city[id]",
"Izbrisani so bili podatki ['ajdovščina', 'kočevje', 'neznana obč', 'neznana ue', 'ni podatka', 'tržič', 'črnomelj', 'šentjur pri celju'], saj za njih ne poznamo prave občine, ali pa le ne poznamo števila prebivalcev.\n"
],
[
"# normalized data of crimes per city by population of city\ncrime_per_city_norm0 = np.array([tuple([cityId, float(dict_cityId_crimeCount[cityId]) / float(dict_cityId_popCount[cityId])]) for cityId in dict_id_city.keys()])\n\ncityId, x_axis = zip(*crime_per_city_norm0)\n\nx_axis_argsort = np.argsort(x_axis)[::-1]\nx_axis = np.array(x_axis)\ny_axis = np.array([str.capitalize(dict_id_city[id]) for id in cityId])\n\n\nplt.figure(figsize=(20, 10))\nplt.xticks(rotation=90)\nplt.xlabel('Število kriminalnih dejanj / število prebivalcev')\nplt.ylabel('Mesta')\nplt.bar(y_axis[x_axis_argsort], x_axis[x_axis_argsort])\nplt.show()",
"_____no_output_____"
]
],
[
[
"Murska sobota vrne največjo vrednost normaliziranega števila kriminala s številom prebivalcev. Zakaj?\nVse primere zločina v Murski Soboti bom zapisal v datoteko.",
"_____no_output_____"
]
],
[
[
"ms_crime = np.array([i for i in main_data if str.lower(i[19]) == 'murska sobota'])\n\nif (not os.path.exists('../data/kd18192021_murska_sobota.csv')):\n open('../data/kd18192021_murska_sobota.csv', 'x')\n\nwith open('../data/kd18192021_murska_sobota.csv', 'w', encoding=\"windows-1250\", newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=';')\n writer.writerow(head1)\n writer.writerows(ms_crime)",
"_____no_output_____"
],
[
"with open('../data/kd18192021_murska_sobota.csv', 'rt', encoding=\"windows-1250\") as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n head0 = next(reader)\n none_handler = lambda i : i or None\n main_data_murska_sobota = np.array([none_handler(i) for i in [row for row in reader]])",
"_____no_output_____"
],
[
"zlocini_gr = np.array([str.split(i, '-')[-1] for i in main_data_murska_sobota[:, 6]])\n\nzlocin, count = np.unique([zlocin for zlocin in zlocini_gr], return_counts=True)\ndict_zlocini_count_gr = dict(zip(zlocin, count))\n\nx_axis = np.array([dict_zlocini_count_gr[zlocin] for zlocin in zlocin])\ny_axis = np.array([str.capitalize(zlocin) for zlocin in zlocin])\n\nx_axis_argsort = np.argsort(x_axis)[::-1]\n\ny_axis = y_axis[x_axis_argsort]\nx_axis = x_axis[x_axis_argsort]\n\n#plot graph\nplt.figure(figsize=(25, 6))\nplt.xticks(rotation=90, fontsize=15)\nplt.xlabel('Kriminalno dejanje')\nplt.ylabel('Število kriminalnih dejanj')\nplt.bar(y_axis[:20], x_axis[:20])\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"Kršitev temeljnih pravic delavcev je največ. Izpišem jih:",
"_____no_output_____"
]
],
[
[
"j = 0\nfor i in main_data_murska_sobota:\n if (str.strip(str.upper(i[6].split('-')[-1])) == 'KRŠITEV TEMELJNIH PRAVIC DELAVCEV'):\n j += 1\n\nprint(f\"Število kršitev temeljnih pravic delavcev v gornji radgoni je {j}.\")",
"_____no_output_____"
],
[
"j = 0\nfor i in main_data:\n if (str.strip(str.upper(i[6].split('-')[-1])) == 'KRŠITEV TEMELJNIH PRAVIC DELAVCEV'):\n j += 1\n\nprint(f\"Število kršitev temeljnih pravic delavcev je {j/len(main_data)}.\")",
"_____no_output_____"
]
],
[
[
"Kar precejšnji delež kršitev je 'KRŠITEV TEMELJNIH PRAVIC DELAVCEV', kateri za nas niso zelo pomembni. Zato bom za vizualizacijo te odstranil.",
"_____no_output_____"
],
[
"Očitno je največ vnosov vedno kršitev delavskih pravic, kar za naš problem ni tako zanimivo, zato za grafični pogled to vrednost spustimo.",
"_____no_output_____"
]
],
[
[
"mask = np.ma.make_mask(np.array([str.strip(str.upper(i[6].split('-')[-1])) != 'KRŠITEV TEMELJNIH PRAVIC DELAVCEV' for i in main_data]))\n\ncities, counts = np.unique(main_data[mask, 19], return_counts=True)\n\n# count number of occurences of each value in column\ndict_cityId_crimeCount_noKDelavcev = dict((dict_city_id[str.lower(cities[i])], counts[i]) for i in range(len(cities)) if str.lower(cities[i]) in dict_city_id.keys())\n\ny_axis = np.array([str.capitalize(dict_id_city[cityId]) for cityId in dict_cityId_crimeCount_noKDelavcev.keys()])\nx_axis = np.array([dict_cityId_crimeCount_noKDelavcev[cityId] for cityId in dict_cityId_crimeCount_noKDelavcev.keys()])\n\ncounts_argsort = np.argsort(x_axis)[::-1]\n\n# make graph of number of occurences of each value in column\nplt.figure(figsize=(20, 10))\nplt.xticks(rotation=90)\nplt.bar(y_axis[counts_argsort], x_axis[counts_argsort])\nplt.show()",
"_____no_output_____"
]
],
[
[
"Sedaj ponovno normaliziram.",
"_____no_output_____"
]
],
[
[
"# normalized data of crimes per city by population of city\ncrime_per_city_norm0_noKDelavcev = np.array([tuple([cityId, float(dict_cityId_crimeCount_noKDelavcev[cityId]) / float(dict_cityId_popCount[cityId])]) for cityId in dict_id_city.keys()])\n\ncityId, x_axis = zip(*crime_per_city_norm0_noKDelavcev)\n\nx_axis_argsort = np.argsort(x_axis)[::-1]\nx_axis = np.array(x_axis)\ny_axis = np.array([str.capitalize(dict_id_city[id]) for id in cityId])\n\n\nplt.figure(figsize=(20, 10))\nplt.xticks(rotation=90)\nplt.xlabel(\"Število kriminalnih dejanj / število prebivalcev\")\nplt.ylabel(\"Mesta\")\nplt.bar(y_axis[x_axis_argsort], x_axis[x_axis_argsort])\nplt.show()",
"_____no_output_____"
]
],
[
[
"Sedaj imajo izbire za najbolj nevarna mesta vel smisla in so bližje pričakovanim vrednostim.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
cbbd5663491ea157c7639d85f35afb883c4a62c8
| 1,308 |
ipynb
|
Jupyter Notebook
|
python-note/python_notebooks-master/itertools_permutations.ipynb
|
ii6uu99/ipynb
|
d924a6926838ca5e563620cd324368a07d2c2521
|
[
"MIT"
] | 1 |
2021-01-27T09:01:33.000Z
|
2021-01-27T09:01:33.000Z
|
python-note/python_notebooks-master/itertools_permutations.ipynb
|
ii6uu99/ipynb
|
d924a6926838ca5e563620cd324368a07d2c2521
|
[
"MIT"
] | null | null | null |
python-note/python_notebooks-master/itertools_permutations.ipynb
|
ii6uu99/ipynb
|
d924a6926838ca5e563620cd324368a07d2c2521
|
[
"MIT"
] | 1 |
2021-01-26T13:22:21.000Z
|
2021-01-26T13:22:21.000Z
| 27.829787 | 538 | 0.373853 |
[
[
[
"from itertools import permutations\n\nprint(list(permutations('ETAS')))",
"[('E', 'T', 'A', 'S'), ('E', 'T', 'S', 'A'), ('E', 'A', 'T', 'S'), ('E', 'A', 'S', 'T'), ('E', 'S', 'T', 'A'), ('E', 'S', 'A', 'T'), ('T', 'E', 'A', 'S'), ('T', 'E', 'S', 'A'), ('T', 'A', 'E', 'S'), ('T', 'A', 'S', 'E'), ('T', 'S', 'E', 'A'), ('T', 'S', 'A', 'E'), ('A', 'E', 'T', 'S'), ('A', 'E', 'S', 'T'), ('A', 'T', 'E', 'S'), ('A', 'T', 'S', 'E'), ('A', 'S', 'E', 'T'), ('A', 'S', 'T', 'E'), ('S', 'E', 'T', 'A'), ('S', 'E', 'A', 'T'), ('S', 'T', 'E', 'A'), ('S', 'T', 'A', 'E'), ('S', 'A', 'E', 'T'), ('S', 'A', 'T', 'E')]\n"
]
]
] |
[
"code"
] |
[
[
"code"
]
] |
cbbd585679b35f7535aab80bd05f969b4da4e7db
| 21,212 |
ipynb
|
Jupyter Notebook
|
exp_vqa/data/collect_vqa_vocabs_answers_glove.ipynb
|
drewhayward/n2nmn
|
14fd04bf9af5b539eeef6e094838b4a496de163e
|
[
"BSD-2-Clause"
] | 299 |
2017-06-18T01:34:18.000Z
|
2022-01-06T15:59:09.000Z
|
exp_vqa/data/collect_vqa_vocabs_answers_glove.ipynb
|
drewhayward/n2nmn
|
14fd04bf9af5b539eeef6e094838b4a496de163e
|
[
"BSD-2-Clause"
] | 16 |
2017-06-23T16:03:15.000Z
|
2020-07-31T05:43:24.000Z
|
exp_vqa/data/collect_vqa_vocabs_answers_glove.ipynb
|
drewhayward/n2nmn
|
14fd04bf9af5b539eeef6e094838b4a496de163e
|
[
"BSD-2-Clause"
] | 57 |
2017-06-18T10:18:58.000Z
|
2022-03-20T23:45:00.000Z
| 106.592965 | 15,944 | 0.855459 |
[
[
[
"import numpy as np\nimport json\nimport re\nfrom collections import defaultdict\nimport spacy\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"annotation_file = '../vqa-dataset/Annotations/mscoco_%s_annotations.json'\nannotation_sets = ['train2014', 'val2014']\nquestion_file = '../vqa-dataset/Questions/OpenEnded_mscoco_%s_questions.json'\nquestion_sets = ['train2014', 'val2014', 'test-dev2015', 'test2015']\n\nvocab_file = './vocabulary_vqa.txt'\nanswer_file = './answers_vqa.txt'\nglove_mat_file = './vocabulary_vqa_glove.npy'\n\nnum_answers = 3000",
"_____no_output_____"
],
[
"answer_counts = defaultdict(lambda: 0)\nfor image_set in annotation_sets:\n with open(annotation_file % image_set) as f:\n annotations = json.load(f)[\"annotations\"]\n for ann in annotations:\n for answer in ann[\"answers\"]:\n# if answer[\"answer_confidence\"] != \"yes\":\n# continue\n word = answer[\"answer\"]\n if re.search(r\"[^\\w\\s]\", word):\n continue\n answer_counts[word] += 1\n\ntop_answers = sorted(answer_counts, key=answer_counts.get, reverse=True)\nprint('total answer num: %d, keeping top %d' % (len(top_answers), num_answers))\n# add a <unk> symbol to represent the unseen answers.\nassert('<unk>' not in top_answers)\nanswer_list = ['<unk>'] + top_answers[:num_answers]",
"total answer num: 121928, keeping top 3000\n"
],
[
"vocab_set = set()\nSENTENCE_SPLIT_REGEX = re.compile(r'(\\W+)')\nquestion_length = []\nfor image_set in question_sets:\n with open(question_file % image_set) as f:\n questions = json.load(f)['questions']\n set_question_length = [None]*len(questions)\n for n_q, q in enumerate(questions):\n words = SENTENCE_SPLIT_REGEX.split(q['question'].lower())\n words = [w.strip() for w in words if len(w.strip()) > 0]\n vocab_set.update(words)\n set_question_length[n_q] = len(words)\n question_length += set_question_length\n\n# although we've collected all words in the dataset,\n# still add a <unk> for future-proof\nvocab_set.add('<unk>')\nprint('total word num: %d, keeping all' % len(vocab_set))\nvocab_list = list(vocab_set)\nvocab_list.sort()",
"total word num: 17742, keeping all\n"
],
[
"with open(vocab_file, 'w') as f:\n f.writelines([w+'\\n' for w in vocab_list])\nwith open(answer_file, 'w') as f:\n f.writelines([w+'\\n' for w in answer_list])",
"_____no_output_____"
],
[
"# Collect glove vectors for the words\nglove_dim = 300\nglove_mat = np.zeros((len(vocab_list), glove_dim), np.float32)\nnlp = spacy.load('en', vectors='en_glove_cc_300_1m_vectors')\nfor n, w in enumerate(vocab_list):\n glove_mat[n] = nlp(w).vector\nnp.save(glove_mat_file, glove_mat)",
"_____no_output_____"
],
[
"_ = plt.hist(question_length, bins=20)\nprint('maximum question length:', np.max(question_length))",
"maximum question length: 26\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbbd733c2c5247b83d72f1433f72b8b1f3461760
| 4,430 |
ipynb
|
Jupyter Notebook
|
scikit-learn-official-examples/cluster/plot_segmentation_toy.ipynb
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:16:23.000Z
|
2019-05-10T09:16:23.000Z
|
scikit-learn-official-examples/cluster/plot_segmentation_toy.ipynb
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | null | null | null |
scikit-learn-official-examples/cluster/plot_segmentation_toy.ipynb
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:17:28.000Z
|
2019-05-10T09:17:28.000Z
| 82.037037 | 2,320 | 0.646501 |
[
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"\n# Spectral clustering for image segmentation\n\n\nIn this example, an image with connected circles is generated and\nspectral clustering is used to separate the circles.\n\nIn these settings, the `spectral_clustering` approach solves the problem\nknow as 'normalized graph cuts': the image is seen as a graph of\nconnected voxels, and the spectral clustering algorithm amounts to\nchoosing graph cuts defining regions while minimizing the ratio of the\ngradient along the cut, and the volume of the region.\n\nAs the algorithm tries to balance the volume (ie balance the region\nsizes), if we take circles with different sizes, the segmentation fails.\n\nIn addition, as there is no useful information in the intensity of the image,\nor its gradient, we choose to perform the spectral clustering on a graph\nthat is only weakly informed by the gradient. This is close to performing\na Voronoi partition of the graph.\n\nIn addition, we use the mask of the objects to restrict the graph to the\noutline of the objects. In this example, we are interested in\nseparating the objects one from the other, and not from the background.\n\n",
"_____no_output_____"
]
],
[
[
"print(__doc__)\n\n# Authors: Emmanuelle Gouillart <[email protected]>\n# Gael Varoquaux <[email protected]>\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.feature_extraction import image\nfrom sklearn.cluster import spectral_clustering\n\nl = 100\nx, y = np.indices((l, l))\n\ncenter1 = (28, 24)\ncenter2 = (40, 50)\ncenter3 = (67, 58)\ncenter4 = (24, 70)\n\nradius1, radius2, radius3, radius4 = 16, 14, 15, 14\n\ncircle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2\ncircle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2\ncircle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2\ncircle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2\n\n# #############################################################################\n# 4 circles\nimg = circle1 + circle2 + circle3 + circle4\n\n# We use a mask that limits to the foreground: the problem that we are\n# interested in here is not separating the objects from the background,\n# but separating them one from the other.\nmask = img.astype(bool)\n\nimg = img.astype(float)\nimg += 1 + 0.2 * np.random.randn(*img.shape)\n\n# Convert the image into a graph with the value of the gradient on the\n# edges.\ngraph = image.img_to_graph(img, mask=mask)\n\n# Take a decreasing function of the gradient: we take it weakly\n# dependent from the gradient the segmentation is close to a voronoi\ngraph.data = np.exp(-graph.data / graph.data.std())\n\n# Force the solver to be arpack, since amg is numerically\n# unstable on this example\nlabels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')\nlabel_im = -np.ones(mask.shape)\nlabel_im[mask] = labels\n\nplt.matshow(img)\nplt.matshow(label_im)\n\n# #############################################################################\n# 2 circles\nimg = circle1 + circle2\nmask = img.astype(bool)\nimg = img.astype(float)\n\nimg += 1 + 0.2 * np.random.randn(*img.shape)\n\ngraph = image.img_to_graph(img, mask=mask)\ngraph.data = np.exp(-graph.data / graph.data.std())\n\nlabels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')\nlabel_im = -np.ones(mask.shape)\nlabel_im[mask] = labels\n\nplt.matshow(img)\nplt.matshow(label_im)\n\nplt.show()",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
cbbd779455af1da76014609bc7daa3a70cc7bb7c
| 86,250 |
ipynb
|
Jupyter Notebook
|
paper/5_optical_flow.ipynb
|
KnottLab/codex
|
f92c0af3116118e56a3feb81c6cc9dbd946f462a
|
[
"MIT"
] | null | null | null |
paper/5_optical_flow.ipynb
|
KnottLab/codex
|
f92c0af3116118e56a3feb81c6cc9dbd946f462a
|
[
"MIT"
] | 3 |
2021-08-15T19:53:07.000Z
|
2021-08-30T16:55:28.000Z
|
paper/5_optical_flow.ipynb
|
KnottLab/codex
|
f92c0af3116118e56a3feb81c6cc9dbd946f462a
|
[
"MIT"
] | null | null | null | 154.017857 | 5,024 | 0.868313 |
[
[
[
"%matplotlib inline\n\nfrom image_registration import chi2_shift\nfrom matplotlib import pyplot as plt\nfrom matplotlib import rcParams\nimport seaborn as sns\nimport numpy as np\nimport cv2",
"_____no_output_____"
],
[
"all_imgs = !ls 210226_Bladder_TMA1_reg35/1_shading_correction/*.tif | grep DAPI\nfor i,z in enumerate(all_imgs):\n print(i, z)\n \nprint()\nafter_imgs = !ls 210226_Bladder_TMA1_reg35/2_cycle_alignment/*.tif | grep DAPI\nfor i,z in enumerate(after_imgs):\n print(i, z)\n \nprint()\nstitched_imgs = !ls 210226_Bladder_TMA1_reg35/4_stitching/*.tif | grep DAPI\nfor i,z in enumerate(stitched_imgs):\n print(i, z)",
"0 210226_Bladder_TMA1_reg35/1_shading_correction/210226_Bladder_TMA1_reg35_cycle01_channel00_DAPI_4.tif\n1 210226_Bladder_TMA1_reg35/1_shading_correction/210226_Bladder_TMA1_reg35_cycle02_channel00_DAPI_8.tif\n2 210226_Bladder_TMA1_reg35/1_shading_correction/210226_Bladder_TMA1_reg35_cycle03_channel00_DAPI_12.tif\n3 210226_Bladder_TMA1_reg35/1_shading_correction/210226_Bladder_TMA1_reg35_cycle04_channel00_DAPI_16.tif\n4 210226_Bladder_TMA1_reg35/1_shading_correction/210226_Bladder_TMA1_reg35_cycle05_channel00_DAPI_20.tif\n5 210226_Bladder_TMA1_reg35/1_shading_correction/210226_Bladder_TMA1_reg35_cycle06_channel00_DAPI_24.tif\n6 210226_Bladder_TMA1_reg35/1_shading_correction/210226_Bladder_TMA1_reg35_cycle07_channel00_DAPI_28.tif\n7 210226_Bladder_TMA1_reg35/1_shading_correction/210226_Bladder_TMA1_reg35_cycle08_channel00_DAPI_32.tif\n8 210226_Bladder_TMA1_reg35/1_shading_correction/210226_Bladder_TMA1_reg35_cycle09_channel00_DAPI_36.tif\n9 210226_Bladder_TMA1_reg35/1_shading_correction/210226_Bladder_TMA1_reg35_cycle10_channel00_DAPI_40.tif\n10 210226_Bladder_TMA1_reg35/1_shading_correction/210226_Bladder_TMA1_reg35_cycle11_channel00_DAPI_44.tif\n11 210226_Bladder_TMA1_reg35/1_shading_correction/210226_Bladder_TMA1_reg35_cycle12_channel00_DAPI_48.tif\n\n0 210226_Bladder_TMA1_reg35/2_cycle_alignment/210226_Bladder_TMA1_reg35_cycle01_channel00_DAPI_4.tif\n1 210226_Bladder_TMA1_reg35/2_cycle_alignment/210226_Bladder_TMA1_reg35_cycle02_channel00_DAPI_8.tif\n2 210226_Bladder_TMA1_reg35/2_cycle_alignment/210226_Bladder_TMA1_reg35_cycle03_channel00_DAPI_12.tif\n3 210226_Bladder_TMA1_reg35/2_cycle_alignment/210226_Bladder_TMA1_reg35_cycle04_channel00_DAPI_16.tif\n4 210226_Bladder_TMA1_reg35/2_cycle_alignment/210226_Bladder_TMA1_reg35_cycle05_channel00_DAPI_20.tif\n5 210226_Bladder_TMA1_reg35/2_cycle_alignment/210226_Bladder_TMA1_reg35_cycle06_channel00_DAPI_24.tif\n6 210226_Bladder_TMA1_reg35/2_cycle_alignment/210226_Bladder_TMA1_reg35_cycle07_channel00_DAPI_28.tif\n7 210226_Bladder_TMA1_reg35/2_cycle_alignment/210226_Bladder_TMA1_reg35_cycle08_channel00_DAPI_32.tif\n8 210226_Bladder_TMA1_reg35/2_cycle_alignment/210226_Bladder_TMA1_reg35_cycle09_channel00_DAPI_36.tif\n9 210226_Bladder_TMA1_reg35/2_cycle_alignment/210226_Bladder_TMA1_reg35_cycle10_channel00_DAPI_40.tif\n10 210226_Bladder_TMA1_reg35/2_cycle_alignment/210226_Bladder_TMA1_reg35_cycle11_channel00_DAPI_44.tif\n11 210226_Bladder_TMA1_reg35/2_cycle_alignment/210226_Bladder_TMA1_reg35_cycle12_channel00_DAPI_48.tif\n\n0 210226_Bladder_TMA1_reg35/4_stitching/210226_Bladder_TMA1_reg35_cycle01_channel00_DAPI_4.tif\n1 210226_Bladder_TMA1_reg35/4_stitching/210226_Bladder_TMA1_reg35_cycle02_channel00_DAPI_8.tif\n2 210226_Bladder_TMA1_reg35/4_stitching/210226_Bladder_TMA1_reg35_cycle03_channel00_DAPI_12.tif\n3 210226_Bladder_TMA1_reg35/4_stitching/210226_Bladder_TMA1_reg35_cycle04_channel00_DAPI_16.tif\n4 210226_Bladder_TMA1_reg35/4_stitching/210226_Bladder_TMA1_reg35_cycle05_channel00_DAPI_20.tif\n5 210226_Bladder_TMA1_reg35/4_stitching/210226_Bladder_TMA1_reg35_cycle06_channel00_DAPI_24.tif\n6 210226_Bladder_TMA1_reg35/4_stitching/210226_Bladder_TMA1_reg35_cycle07_channel00_DAPI_28.tif\n7 210226_Bladder_TMA1_reg35/4_stitching/210226_Bladder_TMA1_reg35_cycle08_channel00_DAPI_32.tif\n8 210226_Bladder_TMA1_reg35/4_stitching/210226_Bladder_TMA1_reg35_cycle09_channel00_DAPI_36.tif\n9 210226_Bladder_TMA1_reg35/4_stitching/210226_Bladder_TMA1_reg35_cycle10_channel00_DAPI_40.tif\n10 210226_Bladder_TMA1_reg35/4_stitching/210226_Bladder_TMA1_reg35_cycle11_channel00_DAPI_44.tif\n11 210226_Bladder_TMA1_reg35/4_stitching/210226_Bladder_TMA1_reg35_cycle12_channel00_DAPI_48.tif\n"
],
[
"import itertools\nfrom scipy.stats import pearsonr\ndef get_shifts(ref, query, tilesize=200, overlap=0, min_mean=300, \n border = 50, \n xnorm=np.inf, ynorm=np.inf):\n assert np.all(ref.shape == query.shape)\n h,w = ref.shape\n nh = int(np.ceil((h-border) / tilesize))\n nw = int(np.ceil((w-border) / tilesize))\n nh += int(nh * overlap)\n nw += int(nw * overlap)\n hcoords = np.linspace(border, h-border-tilesize, nh, dtype='int')\n wcoords = np.linspace(border, w-border-tilesize, nw, dtype='int')\n \n shifts = np.zeros((nh,nw,3),dtype='float')\n \n for i,hc in enumerate(hcoords):\n for j,wc in enumerate(wcoords):\n r = ref[hc:hc+tilesize, wc:wc+tilesize]\n q = query[hc:hc+tilesize, wc:wc+tilesize]\n if np.mean(r) < min_mean:\n xoff=0\n yoff=0\n else:\n xoff, yoff, exoff, eyoff = chi2_shift(r, q, return_error=True, upsample_factor='auto')\n #if np.abs(xoff)>xnorm:\n # xoff=0\n #if np.abs(yoff)>ynorm:\n # yoff=0\n # cor = pearsonr(r.ravel(), q.ravel())\n shifts[i,j,:] = xoff, yoff, np.mean(r)\n \n return shifts",
"_____no_output_____"
],
[
"def cart2polar(x,y):\n theta = np.rad2deg(np.arctan2(y,x))\n if (x<0) & (y>0):\n theta = 0+theta\n if (x<0) & (y<0):\n theta = 360+theta\n if (x>0) & (y<0):\n theta = 360+theta\n \n r = np.sqrt(x**2 + y**2)\n return r, theta/360\n\nfrom matplotlib.colors import hsv_to_rgb\ndef color_shifts(shifts, r_norm=None):\n hsv = np.zeros_like(shifts)\n for i,j in itertools.product(range(shifts.shape[0]), range(shifts.shape[1])):\n comp = shifts[i,j,:2]\n r,theta = cart2polar(comp[0],comp[1])\n hsv[i,j,:] = theta, r, 1\n \n if r_norm is None: \n hsv[:,:,1] /= hsv[:,:,1].max()\n else:\n rlayer = hsv[:,:,1].copy()\n rlayer[rlayer>r_norm] = r_norm\n rlayer = rlayer/r_norm\n hsv[:,:,1] = rlayer\n \n rgb = np.zeros_like(hsv)\n for i,j in itertools.product(range(shifts.shape[0]), range(shifts.shape[1])):\n color = hsv_to_rgb(hsv[i,j,:])\n rgb[i,j,:] = color\n \n return rgb\n\nfrom matplotlib.colors import rgb2hex\ndef scatter_shifts(shifts, rgb, lims=None, save=None, ax=None):\n xs = []\n ys = []\n colors = []\n for i,j in itertools.product(range(shifts.shape[0]), range(shifts.shape[1])):\n xs.append(shifts[i,j,0])\n ys.append(shifts[i,j,1])\n colors.append(rgb2hex(rgb[i,j,:]))\n \n if lims is None:\n xtnt = np.max(np.abs(xs))\n ytnt = np.max(np.abs(ys))\n lims = np.max([xtnt, ytnt])\n \n if ax is None:\n \n plt.figure(figsize=(2,2))\n ax = plt.gca()\n ax.set_aspect('equal')\n ax.scatter(xs, ys, color=colors, lw=0.1, ec='k')\n ax.set_xlabel('xoff')\n ax.set_ylabel('yoff')\n ax.axhline(0, color='k', lw=0.5, zorder=0)\n ax.axvline(0, color='k', lw=0.5, zorder=0)\n ax.set_xlim([-lims, lims])\n ax.set_ylim([-lims, lims])\n \n if save is not None:\n plt.savefig(save, bbox_inches='tight', transparent=True)",
"_____no_output_____"
],
[
"from matplotlib import rcParams\nrcParams['svg.fonttype'] = 'none'\n\ncycle_index = 1\nfor cycle_index in range(1,10):\n ref_img = cv2.imread(all_imgs[0],-1)\n before_img = cv2.imread(all_imgs[cycle_index],-1)\n print(ref_img.shape, before_img.shape)\n \n shifts = get_shifts(ref_img, before_img, tilesize=200, overlap=0.0)\n \n stitched_ref = cv2.imread(stitched_imgs[0],-1)\n aligned_img = cv2.imread(stitched_imgs[cycle_index],-1)\n \n shifts_aligned = get_shifts(stitched_ref, aligned_img, tilesize=200, overlap=0.0)\n \n rgb = color_shifts(shifts)\n rgb_aligned = color_shifts(shifts_aligned)\n \n np.save(f'figures/cycle_alignment/optical_shift/cycle_{cycle_index}_shift_uncorrected.npy', shifts)\n np.save(f'figures/cycle_alignment/optical_shift/cycle_{cycle_index}_shift_corrected.npy', shifts_aligned)\n \n plt.figure()\n ax = plt.gca()\n ax.imshow(rgb)\n ax.axis('off')\n plt.savefig(f'figures/cycle_alignment/optical_shift/cycle_{cycle_index}_uncorrected.png',\n bbox_inches='tight', transparent=True)\n\n scatter_shifts(shifts, rgb, lims=6,\n save = f'figures/cycle_alignment/optical_shift/cycle_{cycle_index}_uncorrected_plot.svg')\n \n plt.figure()\n ax = plt.gca()\n ax.imshow(rgb_aligned)\n ax.axis('off')\n plt.savefig(f'figures/cycle_alignment/optical_shift/cycle_{cycle_index}_corrected.png',\n bbox_inches='tight', transparent=True)\n\n scatter_shifts(shifts_aligned, rgb_aligned, lims=6,\n save = f'figures/cycle_alignment/optical_shift/cycle_{cycle_index}_corrected_plot.svg')",
"(4096, 4096) (4096, 4096)\n(4096, 4096) (4096, 4096)\n(4096, 4096) (4096, 4096)\n(4096, 4096) (4096, 4096)\n(4096, 4096) (4096, 4096)\n"
],
[
"for cycle_index in range(1,10):\n shifts = np.load(f'figures/cycle_alignment/optical_shift/cycle_{cycle_index}_shift_uncorrected.npy')\n shifts_aligned = np.load(f'figures/cycle_alignment/optical_shift/cycle_{cycle_index}_shift_corrected.npy')\n \n# shifts[:,10,:] = 0\n# shifts[10,:,:] = 0\n# shifts_aligned[:,10,:] = 0\n# shifts_aligned[10,:,:] = 0\n \n rgb = color_shifts(shifts, r_norm=5)\n rgb_aligned = color_shifts(shifts_aligned, r_norm=5)\n \n plt.figure()\n ax = plt.gca()\n ax.imshow(rgb)\n ax.axis('off')\n plt.savefig(f'figures/cycle_alignment/optical_shift/cycle_{cycle_index}_uncorrected.png',\n bbox_inches='tight', transparent=True)\n\n scatter_shifts(shifts, rgb, lims=6,\n save = f'figures/cycle_alignment/optical_shift/cycle_{cycle_index}_uncorrected_plot.svg')\n \n plt.figure()\n ax = plt.gca()\n ax.imshow(rgb_aligned)\n ax.axis('off')\n plt.savefig(f'figures/cycle_alignment/optical_shift/cycle_{cycle_index}_corrected.png',\n bbox_inches='tight', transparent=True)\n\n scatter_shifts(shifts_aligned, rgb_aligned, lims=6,\n save = f'figures/cycle_alignment/optical_shift/cycle_{cycle_index}_corrected_plot.svg')",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbbd7a0b12e62943136710af9b5661a6ebfee9a3
| 360,961 |
ipynb
|
Jupyter Notebook
|
code/tutorials-scikit-learn-master/2. Robust and calibrated estimators with Scikit-Learn.ipynb
|
nju-teaching/computational-communication
|
b95bca72bcfbe412fef15df9f3f057e398be7e34
|
[
"MIT"
] | 65 |
2017-04-06T01:00:19.000Z
|
2020-11-16T15:30:30.000Z
|
code/tba/tutorials-scikit-learn-master/2. Robust and calibrated estimators with Scikit-Learn.ipynb
|
XinyueSha/cjc
|
c20b0dbc929f5fb00dbdbc15ce4e1b1c8657a607
|
[
"MIT"
] | 90 |
2017-05-12T10:09:06.000Z
|
2019-09-17T13:13:22.000Z
|
code/tba/tutorials-scikit-learn-master/2. Robust and calibrated estimators with Scikit-Learn.ipynb
|
XinyueSha/cjc
|
c20b0dbc929f5fb00dbdbc15ce4e1b1c8657a607
|
[
"MIT"
] | 48 |
2017-03-22T02:58:34.000Z
|
2020-11-16T03:08:47.000Z
| 396.225027 | 105,060 | 0.927892 |
[
[
[
"<center>\n <img src=\"img/scikit-learn-logo.png\" width=\"40%\" />\n <br />\n <h1>Robust and calibrated estimators with Scikit-Learn</h1>\n <br /><br />\n Gilles Louppe (<a href=\"https://twitter.com/glouppe\">@glouppe</a>)\n <br /><br />\n New York University\n</center>",
"_____no_output_____"
]
],
[
[
"# Global imports and settings\n\n# Matplotlib\n%matplotlib inline\nfrom matplotlib import pyplot as plt\nplt.rcParams[\"figure.figsize\"] = (8, 8)\nplt.rcParams[\"figure.max_open_warning\"] = -1\n\n# Print options\nimport numpy as np\nnp.set_printoptions(precision=3)\n\n# Slideshow\nfrom notebook.services.config import ConfigManager\ncm = ConfigManager()\ncm.update('livereveal', {'width': 1440, 'height': 768, 'scroll': True, 'theme': 'simple'})\n\n# Silence warnings\nimport warnings\nwarnings.simplefilter(action=\"ignore\", category=FutureWarning)\nwarnings.simplefilter(action=\"ignore\", category=UserWarning)\nwarnings.simplefilter(action=\"ignore\", category=RuntimeWarning)\n\n# Utils\nfrom robustness import plot_surface\nfrom robustness import plot_outlier_detector",
"_____no_output_____"
],
[
"%%javascript\nReveal.addEventListener(\"slidechanged\", function(event){ window.location.hash = \"header\"; });",
"_____no_output_____"
]
],
[
[
"# Motivation",
"_____no_output_____"
],
[
"_In theory,_\n- Samples $x$ are drawn from a distribution $P$;\n- As data increases, convergence towards the optimal model is guaranteed. ",
"_____no_output_____"
],
[
"_In practice,_\n- A few samples may be distant from other samples:\n - either because they correspond to rare observations,\n - or because they are due to experimental errors;\n- Because data is finite, outliers might strongly affect the resulting model.",
"_____no_output_____"
],
[
"_Today's goal:_ build models that are robust to outliers!",
"_____no_output_____"
],
[
"# Outline\n\n* Motivation\n* Novelty and anomaly detection\n* Ensembling for robustness \n* From least squares to least absolute deviances\n* Calibration",
"_____no_output_____"
],
[
"# Novelty and anomaly detection",
"_____no_output_____"
],
[
"_Novelty detection:_ \n- Training data is not polluted by outliers, and we are interested in detecting anomalies in new observations.",
"_____no_output_____"
],
[
"_Outlier detection:_\n- Training data contains outliers, and we need to fit the central mode of the training data, ignoring the deviant observations.",
"_____no_output_____"
],
[
"## API",
"_____no_output_____"
]
],
[
[
"# Unsupervised learning \nestimator.fit(X_train) # no \"y_train\"\n\n# Detecting novelty or outliers \ny_pred = estimator.predict(X_test) # inliers == 1, outliers == -1\ny_score = estimator.decision_function(X_test) # outliers == highest scores",
"_____no_output_____"
],
[
"# Generate data\nfrom sklearn.datasets import make_blobs\n\ninliers, _ = make_blobs(n_samples=200, centers=2, random_state=1)\noutliers = np.random.rand(50, 2)\noutliers = np.min(inliers, axis=0) + (np.max(inliers, axis=0) - np.min(inliers, axis=0)) * outliers\n\nX = np.vstack((inliers, outliers))\nground_truth = np.ones(len(X), dtype=np.int)\nground_truth[-len(outliers):] = 0",
"_____no_output_____"
],
[
"from sklearn.svm import OneClassSVM\nfrom sklearn.covariance import EllipticEnvelope\nfrom sklearn.ensemble import IsolationForest\n\n# Unsupervised learning\nestimator = OneClassSVM(nu=0.4, kernel=\"rbf\", gamma=0.1)\n# clf = EllipticEnvelope(contamination=.1)\n# clf = IsolationForest(max_samples=100)\nestimator.fit(X)\n\nplot_outlier_detector(estimator, X, ground_truth)",
"_____no_output_____"
]
],
[
[
"# Ensembling for robustness",
"_____no_output_____"
],
[
"## Bias-variance decomposition",
"_____no_output_____"
],
[
"__Theorem.__ For the _squared error loss_, the bias-variance decomposition of the expected\ngeneralization error at $X=\\mathbf{x}$ is\n\n$$\n\\mathbb{E}_{\\cal L} \\{ Err(\\varphi_{\\cal L}(\\mathbf{x})) \\} = \\text{noise}(\\mathbf{x}) + \\text{bias}^2(\\mathbf{x}) + \\text{var}(\\mathbf{x})\n$$\n\n",
"_____no_output_____"
],
[
"<center>\n <img src=\"img/bv.png\" width=\"50%\" />\n</center>",
"_____no_output_____"
],
[
"## Variance and robustness\n\n- Low variance implies robustness to outliers\n- High variance implies sensitivity to data pecularities\n",
"_____no_output_____"
],
[
"## Ensembling reduces variance ",
"_____no_output_____"
],
[
"__Theorem.__ For the _squared error loss_, the bias-variance decomposition of the expected generalization error at $X=x$ of an ensemble of $M$ randomized models $\\varphi_{{\\cal L},\\theta_m}$ is\n\n$$\n\\mathbb{E}_{\\cal L} \\{ Err(\\psi_{{\\cal L},\\theta_1,\\dots,\\theta_M}(\\mathbf{x})) \\} = \\text{noise}(\\mathbf{x}) + \\text{bias}^2(\\mathbf{x}) + \\text{var}(\\mathbf{x})\n$$",
"_____no_output_____"
],
[
"where\n\\begin{align*}\n\\text{noise}(\\mathbf{x}) &= Err(\\varphi_B(\\mathbf{x})), \\\\\n\\text{bias}^2(\\mathbf{x}) &= (\\varphi_B(\\mathbf{x}) - \\mathbb{E}_{{\\cal L},\\theta} \\{ \\varphi_{{\\cal L},\\theta}(\\mathbf{x}) \\} )^2, \\\\\n\\text{var}(\\mathbf{x}) &= \\rho(\\mathbf{x}) \\sigma^2_{{\\cal L},\\theta}(\\mathbf{x}) + \\frac{1 - \\rho(\\mathbf{x})}{M} \\sigma^2_{{\\cal L},\\theta}(\\mathbf{x}).\n\\end{align*}",
"_____no_output_____"
]
],
[
[
"# Load data\nfrom sklearn.datasets import load_iris\n\niris = load_iris()\nX = iris.data[:, [0, 1]]\ny = iris.target",
"_____no_output_____"
],
[
"from sklearn.tree import DecisionTreeClassifier\nclf = DecisionTreeClassifier().fit(X, y)\nplot_surface(clf, X, y)\n\nfrom sklearn.ensemble import RandomForestClassifier\nclf = RandomForestClassifier(n_estimators=100).fit(X, y)\nplot_surface(clf, X, y)",
"_____no_output_____"
]
],
[
[
"# From least squares to least absolute deviances",
"_____no_output_____"
],
[
"## Robust learning",
"_____no_output_____"
],
[
"- Most methods minimize the mean squared error $\\frac{1}{N} \\sum_i (y_i - \\varphi(x_i))^2$\n- By definition, squaring residuals gives emphasis to large residuals.\n- Outliers are thus very likely to have a significant effect.",
"_____no_output_____"
],
[
"- A robust alternative is to minimize instead the mean absolute deviation $\\frac{1}{N} \\sum_i |y_i - \\varphi(x_i)|$\n- Large residuals are therefore given much less emphasis. ",
"_____no_output_____"
]
],
[
[
"# Generate data\nfrom sklearn.datasets import make_regression\n\nn_outliers = 3\nX, y, coef = make_regression(n_samples=100, n_features=1, n_informative=1, noise=10,\n coef=True, random_state=0)\n\nnp.random.seed(1)\nX[-n_outliers:] = 1 + 0.25 * np.random.normal(size=(n_outliers, 1))\ny[-n_outliers:] = -100 + 10 * np.random.normal(size=n_outliers)\n\nplt.scatter(X[:-n_outliers], y[:-n_outliers], color=\"b\")\nplt.scatter(X[-n_outliers:], y[-n_outliers:], color=\"r\")\nplt.xlim(-3, 3)\nplt.ylim(-150, 120)\nplt.show()",
"_____no_output_____"
],
[
"# Fit with least squares vs. least absolute deviances\nfrom sklearn.ensemble import GradientBoostingRegressor\n\nclf_ls = GradientBoostingRegressor(loss=\"ls\")\nclf_lad = GradientBoostingRegressor(loss=\"lad\")\nclf_ls.fit(X, y)\nclf_lad.fit(X, y)\n\n# Plot\nX_test = np.linspace(-5, 5).reshape(-1, 1)\nplt.scatter(X[:-n_outliers], y[:-n_outliers], color=\"b\")\nplt.scatter(X[-n_outliers:], y[-n_outliers:], color=\"r\")\nplt.plot(X_test, clf_ls.predict(X_test), \"g\", label=\"Least squares\")\nplt.plot(X_test, clf_lad.predict(X_test), \"y\", label=\"Lead absolute deviances\")\nplt.xlim(-3, 3)\nplt.ylim(-150, 120)\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Robust scaling\n\n- Standardization of a dataset is a common requirement for many machine learning estimators. \n- Typically this is done by removing the mean and scaling to unit variance. ",
"_____no_output_____"
],
[
"- For similar reasons as before, outliers can influence the sample mean / variance in a negative way. \n- In such cases, the median and the interquartile range often give better results.",
"_____no_output_____"
]
],
[
[
"# Generate data\nfrom sklearn.datasets import make_blobs\nfrom sklearn.model_selection import train_test_split\n\nX, y = make_blobs(n_samples=100, centers=[(0, 0), (-1, 0)], random_state=0)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)\nX_train[0, 0] = -1000 # a fairly large outlier",
"_____no_output_____"
],
[
"# Scale data\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import RobustScaler\n\nstandard_scaler = StandardScaler()\nXtr_s = standard_scaler.fit_transform(X_train)\nXte_s = standard_scaler.transform(X_test)\n\nrobust_scaler = RobustScaler()\nXtr_r = robust_scaler.fit_transform(X_train)\nXte_r = robust_scaler.transform(X_test)",
"_____no_output_____"
],
[
"# Plot data\nfig, ax = plt.subplots(1, 3, figsize=(12, 4))\nax[0].scatter(X_train[:, 0], X_train[:, 1], color=np.where(y_train == 0, 'r', 'b'))\nax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(y_train == 0, 'r', 'b'))\nax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(y_train == 0, 'r', 'b'))\nax[0].set_title(\"Unscaled data\")\nax[1].set_title(\"After standard scaling (zoomed in)\")\nax[2].set_title(\"After robust scaling (zoomed in)\")\n\n# for the scaled data, we zoom in to the data center (outlier can't be seen!)\nfor a in ax[1:]:\n a.set_xlim(-3, 3)\n a.set_ylim(-3, 3)\n \nplt.show()",
"_____no_output_____"
],
[
"# Classify using kNN\nfrom sklearn.neighbors import KNeighborsClassifier\n\nknn = KNeighborsClassifier()\nknn.fit(Xtr_s, y_train)\nacc_s = knn.score(Xte_s, y_test)\nprint(\"Test set accuracy using standard scaler: %.3f\" % acc_s)\n\nknn.fit(Xtr_r, y_train)\nacc_r = knn.score(Xte_r, y_test)\nprint(\"Test set accuracy using robust scaler: %.3f\" % acc_r)",
"Test set accuracy using standard scaler: 0.460\nTest set accuracy using robust scaler: 0.680\n"
]
],
[
[
"# Calibration",
"_____no_output_____"
],
[
"- In classification, you often want to predict not only the class label, but also the associated probability.\n- However, not all classifiers provide well-calibrated probabilities.\n- Thus, a separate calibration of predicted probabilities is often desirable as a postprocessing",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import make_blobs\nfrom sklearn.model_selection import train_test_split\n\n# Generate 3 blobs with 2 classes where the second blob contains\n# half positive samples and half negative samples. Probability in this\n# blob is therefore 0.5.\nX, y = make_blobs(n_samples=10000, n_features=2, cluster_std=1.0, \n centers=[(-5, -5), (0, 0), (5, 5)], shuffle=False)\ny[:len(X) // 2] = 0\ny[len(X) // 2:] = 1\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=42)",
"_____no_output_____"
],
[
"# Plot\nfor this_y, color in zip([0, 1], [\"r\", \"b\"]):\n this_X = X_train[y_train == this_y]\n plt.scatter(this_X[:, 0], this_X[:, 1], c=color, alpha=0.2, label=\"Class %s\" % this_y)\nplt.legend(loc=\"best\")\nplt.title(\"Data\")\nplt.show()",
"_____no_output_____"
],
[
"from sklearn.naive_bayes import GaussianNB\nfrom sklearn.calibration import CalibratedClassifierCV\n\n# Without calibration\nclf = GaussianNB()\nclf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights\nprob_pos_clf = clf.predict_proba(X_test)[:, 1]\n\n# With isotonic calibration\nclf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')\nclf_isotonic.fit(X_train, y_train)\nprob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]",
"_____no_output_____"
],
[
"# Plot\norder = np.lexsort((prob_pos_clf, ))\nplt.plot(prob_pos_clf[order], 'r', label='No calibration')\nplt.plot(prob_pos_isotonic[order], 'b', label='Isotonic calibration')\nplt.plot(np.linspace(0, y_test.size, 51)[1::2], y_test[order].reshape(25, -1).mean(1), 'k--', label=r'Empirical')\n\nplt.xlabel(\"Instances sorted according to predicted probability \"\n \"(uncalibrated GNB)\")\nplt.ylabel(\"P(y=1)\")\nplt.legend(loc=\"upper left\")\nplt.title(\"Gaussian naive Bayes probabilities\")\nplt.ylim([-0.05, 1.05])\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Summary\n\nFor robust and calibrated estimators:\n- remove outliers before training;\n- reduce variance by ensembling estimators;\n- drive your analysis with loss functions that are robust to outliers;\n - avoid the squared error loss!\n- calibrate the output of your classifier if probabilities are important for your problem.",
"_____no_output_____"
]
],
[
[
"questions?",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
cbbd9fe3f4b02e973d721efb097a5ec5e11c23c0
| 13,555 |
ipynb
|
Jupyter Notebook
|
06_CNN/CNN_03_MNIST.ipynb
|
JAICHANGPARK/Tensorflow_Win10
|
9b56e4dc1f00a0e0bc3a31f3d06782751c9adc47
|
[
"MIT",
"Unlicense"
] | null | null | null |
06_CNN/CNN_03_MNIST.ipynb
|
JAICHANGPARK/Tensorflow_Win10
|
9b56e4dc1f00a0e0bc3a31f3d06782751c9adc47
|
[
"MIT",
"Unlicense"
] | null | null | null |
06_CNN/CNN_03_MNIST.ipynb
|
JAICHANGPARK/Tensorflow_Win10
|
9b56e4dc1f00a0e0bc3a31f3d06782751c9adc47
|
[
"MIT",
"Unlicense"
] | null | null | null | 40.342262 | 5,376 | 0.695979 |
[
[
[
"import tensorflow as tf\nimport matplotlib.pyplot as plt\nimport random\nimport time",
"_____no_output_____"
],
[
"from tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)",
"Extracting MNIST_data/train-images-idx3-ubyte.gz\nExtracting MNIST_data/train-labels-idx1-ubyte.gz\nExtracting MNIST_data/t10k-images-idx3-ubyte.gz\nExtracting MNIST_data/t10k-labels-idx1-ubyte.gz\n"
],
[
"learning_rate = 0.001\ntraining_epochs = 15\nbatch_size = 100",
"_____no_output_____"
],
[
"input_x = tf.placeholder(tf.float32, [None,784])\n# 이미지 크기 28x28 흑/백\ninput_x_img = tf.reshape(input_x, [-1,28,28,1])\ny = tf.placeholder(tf.float32, [None,10])",
"_____no_output_____"
],
[
"weight_01 = tf.Variable(tf.random_normal([3,3,1,32], stddev=0.01))\nlayer_01 = tf.nn.conv2d(input_x_img, weight_01, strides=[1,1,1,1],padding='SAME')\nprint(layer_01.shape)\nlayer_01 = tf.nn.relu(layer_01)\nprint(layer_01.shape)\nlayer_01 = tf.nn.max_pool(layer_01, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\nprint(layer_01.shape)",
"(?, 28, 28, 32)\n(?, 28, 28, 32)\n(?, 14, 14, 32)\n"
],
[
"weight_02 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.01))\nlayer_02 = tf.nn.conv2d(layer_01,weight_02,strides=[1,1,1,1],padding='SAME')\nlayer_02 = tf.nn.relu(layer_02)\nlayer_02 = tf.nn.max_pool(layer_02, ksize=[1, 2, 2, 1], strides=[1,2,2,1], padding='SAME')\nlayer_02_flat = tf.reshape(layer_02, [-1, 7*7*64])\nprint(layer_02_flat.shape)",
"(?, 3136)\n"
],
[
"weight_03 = tf.get_variable(\"weight_03\", shape=[7*7*64, 10], initializer=tf.contrib.layers.xavier_initializer())\nbias = tf.Variable(tf.random_normal([10]))\nlogits = tf.matmul(layer_02_flat, weight_03) + bias",
"_____no_output_____"
],
[
"cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels = y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)",
"_____no_output_____"
],
[
"session = tf.InteractiveSession()",
"_____no_output_____"
],
[
"session.run(tf.global_variables_initializer())",
"_____no_output_____"
],
[
"start = time.time()\nprint(\"train start\")\nfor epoch in range(training_epochs):\n average_cost = 0\n total_batch = int(mnist.train.num_examples / batch_size)\n \n for i in range(total_batch):\n batch_x, batch_y = mnist.train.next_batch(batch_size) # batch size = 100\n feed_dict = {input_x:batch_x, y: batch_y}\n c, _ = session.run([cost, optimizer], feed_dict=feed_dict)\n average_cost += c / total_batch # c / int(mnist.train.num_examples / batch_size)\n print('Epoch:', '%04d' % ( epoch + 1), 'cost =', '{:.9f}'.format(average_cost))\nprint('training finish')\nend = time.time() - start\nprint(end)",
"train start\nEpoch: 0001 cost = 0.417886820\nEpoch: 0002 cost = 0.100923526\nEpoch: 0003 cost = 0.070302976\nEpoch: 0004 cost = 0.056870009\nEpoch: 0005 cost = 0.047615337\nEpoch: 0006 cost = 0.040887784\nEpoch: 0007 cost = 0.035696651\nEpoch: 0008 cost = 0.031369645\nEpoch: 0009 cost = 0.027907827\nEpoch: 0010 cost = 0.024304986\nEpoch: 0011 cost = 0.021419153\nEpoch: 0012 cost = 0.020057813\nEpoch: 0013 cost = 0.017052165\nEpoch: 0014 cost = 0.015299189\nEpoch: 0015 cost = 0.012961761\ntraining finish\n71.74727749824524\n"
],
[
"correct_prdic = tf.equal(tf.argmax(logits,1), tf.argmax(y,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prdic, tf.float32))\ncalc = session.run(accuracy, feed_dict={input_x: mnist.test.images, y: mnist.test.labels})\nprint('정확도 :', calc * 100 , '%')",
"정확도 : 98.9400148392 %\n"
],
[
"rand = random.randint(0, mnist.test.num_examples - 1)\nprint(mnist.test.num_examples)\nprint(rand)",
"10000\n7114\n"
],
[
"labels = session.run(tf.argmax(mnist.test.labels[rand : rand + 1], 1))\nprint(mnist.test.labels[rand : rand + 1])\nprint(labels)\nplt.imshow(mnist.test.images[rand : rand + 1].reshape(28,28), cmap='Greys', interpolation='nearest')\nplt.show()",
"[[ 0. 0. 0. 0. 0. 0. 0. 0. 1. 0.]]\n[8]\n"
],
[
"prediction = tf.argmax(logits, 1)\nprint(prediction)\nsess_prdic = session.run(prediction, feed_dict={input_x: mnist.test.images[rand: rand + 1]})\nprint(sess_prdic)",
"Tensor(\"ArgMax_1:0\", shape=(?,), dtype=int64)\n[8]\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbbda4bd38a02773ae88e6c5e65261b9a3069e2d
| 194,937 |
ipynb
|
Jupyter Notebook
|
create_test_image.ipynb
|
RTANC/keras-yolo3
|
3448fcee8a2e26a8248d979aa12bffeaa3fc5a2d
|
[
"MIT"
] | null | null | null |
create_test_image.ipynb
|
RTANC/keras-yolo3
|
3448fcee8a2e26a8248d979aa12bffeaa3fc5a2d
|
[
"MIT"
] | null | null | null |
create_test_image.ipynb
|
RTANC/keras-yolo3
|
3448fcee8a2e26a8248d979aa12bffeaa3fc5a2d
|
[
"MIT"
] | null | null | null | 1,188.640244 | 190,404 | 0.957966 |
[
[
[
"import json\nimport numpy as np\nfrom PIL import Image, ImageFont, ImageDraw, ImageEnhance\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nfrom skimage.filters import threshold_otsu\nfrom skimage.io import imread\nfrom skimage.color import rgb2gray\nfrom skimage.measure import label, regionprops\nimport json\nimport os\n# import uuid\nimport random",
"_____no_output_____"
],
[
"labels = os.path.join('training_data','test.json')\ncount = 1\nwith open(labels,'r') as json_file:\n datas = json.load(json_file)\n for data in datas:\n img_name = data['image']['pathname']\n names = img_name.split('/')\n result = os.path.join('training_data',names[1],names[2])\n cmd = \"copy \" + result + \" \" + str(count) + \".jpg\"\n os.system(cmd)\n objects = data['objects']\n count += 1\n print(result)",
"_____no_output_____"
],
[
"labels = os.path.join('training_data','test.json')\ncount = 1\ncountP = 0\nwith open(labels,'r') as json_file:\n datas = json.load(json_file)\n for data in datas:\n img_name = data['image']['pathname']\n names = img_name.split('/')\n result = os.path.join('training_data',names[1],names[2])\n img = Image.open(result)\n objects = data['objects']\n for obj in objects:\n minr = obj['bounding_box']['minimum']['r']\n minc = obj['bounding_box']['minimum']['c']\n maxr = obj['bounding_box']['maximum']['r']\n maxc = obj['bounding_box']['maximum']['c']\n category = obj['category']\n if category == \"ring\" or category == \"trophozoite\" or category == \"schizont\" or category == \"gametocyte\":\n draw = ImageDraw.Draw(img)\n draw.rectangle((minc, minr,maxc,maxr), fill=None,outline=\"red\",width=5)\n countP = countP + 1\n save_path = os.path.join('test','labeled',str(count)+ \"-label\" + \".jpg\")\n img.save(save_path)\n count += 1\n# break",
"_____no_output_____"
],
[
"print(countP)",
"303\n"
],
[
"img = Image.open(\"training_data\\\\images\\\\225\\\\c7ec51d5-9f45-4209-8357-39e9a984a733.png\")\nbbs = \"337,783,487,933,0 914,822,1051,959,0 1448,765,1600,930,1 233,0,508,240,1\"\nobjects = bbs.split()\nfor obj in objects:\n bb = obj.split(',')\n draw = ImageDraw.Draw(img)\n draw.rectangle((int(bb[0]), int(bb[1]),int(bb[2]),int(bb[3])), fill=None,outline=\"red\",width=5)\nplt.imshow(img)\niw, ih = img.size\nprint(\"W: %d, H: %d\" % (iw,ih))\nimg.save(\"simple.png\")",
"W: 1600, H: 1200\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
]
] |
cbbdaa49052c3f52a42710b231884a675601c342
| 8,892 |
ipynb
|
Jupyter Notebook
|
004list.ipynb
|
Roc-J/Python_data_science
|
a8299a8ca58334013cb582d4397e069e09103880
|
[
"Apache-2.0"
] | null | null | null |
004list.ipynb
|
Roc-J/Python_data_science
|
a8299a8ca58334013cb582d4397e069e09103880
|
[
"Apache-2.0"
] | null | null | null |
004list.ipynb
|
Roc-J/Python_data_science
|
a8299a8ca58334013cb582d4397e069e09103880
|
[
"Apache-2.0"
] | null | null | null | 16.937143 | 60 | 0.402497 |
[
[
[
"### list",
"_____no_output_____"
]
],
[
[
"# create a list\na = range(1,10)\nprint a\nb = [\"a\",\"b\",\"c\",\"d\"]\nprint b",
"[1, 2, 3, 4, 5, 6, 7, 8, 9]\n['a', 'b', 'c', 'd']\n"
],
[
"# access element by index\nprint a[0]\nprint b[-3]",
"1\nb\n"
],
[
"# list can split\nprint a[1:6]\nprint a[1:]\nprint a[:5]\nprint a[-1:]\nprint a[:-1]\nprint b[1:3]\n",
"[2, 3, 4, 5, 6]\n[2, 3, 4, 5, 6, 7, 8, 9]\n[1, 2, 3, 4, 5]\n[9]\n[1, 2, 3, 4, 5, 6, 7, 8]\n['b', 'c']\n"
],
[
"# list + operator\na = [1,4]\nb = [5,6]\nprint a+b",
"[1, 4, 5, 6]\n"
],
[
"# the min and max of list\nprint max(a)\nprint min(a)",
"4\n1\n"
],
[
"# in and not in \nif 2 in a:\n print 'the element 2 is in a'\nelse:\n print 'the element 2 is not in a'",
"the element 2 is not in a\n"
],
[
"# append and extend\na = range(10)\nprint a \na.append(10)",
"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n"
],
[
"print a",
"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n"
],
[
"# stack\na_stack = []\na_stack.append(1)\na_stack.append('Roc')\na_stack.append(\"hello\")\n\nprint a_stack",
"[1, 'Roc', 'hello']\n"
],
[
"print a_stack.pop()\nprint a_stack.pop()\nprint a_stack.pop()",
"hello\nRoc\n1\n"
],
[
"# queue\na_queue = []\na_queue.append(\"who\")\na_queue.append(\"are\")\na_queue.append(\"you\")\nprint a_queue",
"['who', 'are', 'you']\n"
],
[
"print a_queue.pop(0)\nprint a_queue.pop(0)\nprint a_queue.pop(0)",
"who\nare\nyou\n"
],
[
"# shuffle , sort and reverse\nfrom random import shuffle\na = range(15)\nshuffle(a)\nprint a",
"[2, 3, 12, 7, 6, 9, 4, 10, 13, 1, 14, 11, 5, 8, 0]\n"
],
[
"a.sort()\nprint a",
"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]\n"
],
[
"a.reverse()\nprint a",
"[14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]\n"
]
],
[
[
"#### 从一个列表创建一个列表\n",
"_____no_output_____"
]
],
[
[
"# generate list from a list\n\na = [1,3,5,0,-5,7,-8]\n\nb = [pow(x,2) for x in a if x<0]\nprint b",
"[25, 64]\n"
],
[
"# generate dict from dict\na = {'a':1,'b':4,'c':5}\nb = {x:pow(y,2) for x,y in a.items()}\nprint b",
"{'a': 1, 'c': 25, 'b': 16}\n"
],
[
"# tuple\ndef process(x):\n if isinstance(x,str):\n return x.lower()\n elif isinstance(x,int):\n return x*x\n else:\n return -1\na = (1,2,-1,-2,'D','R',3,4,-3,'E')\nb = tuple(process(x) for x in a)\nprint b",
"(1, 4, 1, 4, 'd', 'r', 9, 16, 9, 'e')\n"
]
],
[
[
"### 列表排序",
"_____no_output_____"
]
],
[
[
"# sort 和sorted不同\nfrom random import shuffle\na = range(10)\nshuffle(a)\nprint a",
"[0, 6, 4, 3, 2, 5, 8, 9, 1, 7]\n"
],
[
"# sort\na.sort()\nprint a",
"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n"
],
[
"b = range(10)\nshuffle(b)\nprint b",
"[5, 0, 2, 7, 9, 6, 4, 8, 1, 3]\n"
],
[
"print sorted(b)",
"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n"
],
[
"print b",
"[5, 0, 2, 7, 9, 6, 4, 8, 1, 3]\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
cbbdb03394a689ef5dba39c8ea52e0cc82e99fda
| 19,985 |
ipynb
|
Jupyter Notebook
|
gQuant/plugins/cusignal_plugin/notebooks/windows_examples.ipynb
|
t-triobox/gQuant
|
6ee3ba104ce4c6f17a5755e7782298902d125563
|
[
"Apache-2.0"
] | null | null | null |
gQuant/plugins/cusignal_plugin/notebooks/windows_examples.ipynb
|
t-triobox/gQuant
|
6ee3ba104ce4c6f17a5755e7782298902d125563
|
[
"Apache-2.0"
] | null | null | null |
gQuant/plugins/cusignal_plugin/notebooks/windows_examples.ipynb
|
t-triobox/gQuant
|
6ee3ba104ce4c6f17a5755e7782298902d125563
|
[
"Apache-2.0"
] | null | null | null | 43.445652 | 6,812 | 0.657743 |
[
[
[
"## Benchmarking Scipy Signal vs cuSignal Time to Create Windows in Greenflow",
"_____no_output_____"
],
[
"The windows examples were taken from the example [cusignal windows notebook](https://github.com/rapidsai/cusignal/blob/branch-21.08/notebooks/api_guide/windows_examples.ipynb).",
"_____no_output_____"
],
[
"### General Parameters",
"_____no_output_____"
]
],
[
[
"import cupy.testing as cptest",
"_____no_output_____"
],
[
"from greenflow.dataframe_flow import (TaskGraph, TaskSpecSchema)",
"_____no_output_____"
],
[
"# Num Points in Array - Reduce if getting out of memory errors\nM = int(1e7)",
"_____no_output_____"
],
[
"tgraph_win = TaskGraph.load_taskgraph('./taskgraphs/window_signal.gq.yaml')\ntgraph_win.build()\ntgraph_win.draw(show='ipynb', show_ports=True, pydot_options={'rankdir': 'LR'})",
"_____no_output_____"
]
],
[
[
"### General Cosine",
"_____no_output_____"
]
],
[
[
"HFT90D = [1, 1.942604, 1.340318, 0.440811, 0.043097]\nwin_params = {\n 'window_type': 'general_cosine',\n 'M': M,\n 'a': HFT90D,\n 'sym': False\n}\n\nreplace_gpu = {\n 'winsig': {\n TaskSpecSchema.conf: win_params\n }\n}\n\nreplace_cpu = {\n 'winsig': {\n TaskSpecSchema.conf: {\n **win_params,\n 'use_cpu': True\n }\n }\n}\n\n(gwin,) = tgraph_win.run(['winsig.window'], replace=replace_gpu)\n(cwin,) = tgraph_win.run(['winsig.window'], replace=replace_cpu)\ncptest.assert_array_almost_equal(gwin, cwin)",
"_____no_output_____"
],
[
"gpu_time = %timeit -o (gwin,) = tgraph_win.run(['winsig.window'], replace=replace_gpu)",
"3.57 ms ± 25.2 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
],
[
"cpu_time = %timeit -o (cwin,) = tgraph_win.run(['winsig.window'], replace=replace_cpu)",
"910 ms ± 1.71 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
],
[
"print('SPEEDUP: {}x'.format(round(cpu_time.average / gpu_time.average, 1)))",
"SPEEDUP: 255.2x\n"
]
],
[
[
"### Timing CuSignal Windows",
"_____no_output_____"
],
[
"Using the base taskgraph for generating a window we can use the programmatic API to profile and compare generating windows via cusignal on GPU and scipy on CPU.",
"_____no_output_____"
]
],
[
[
"from copy import deepcopy\n\ndefault_params = {'M': M} # M is defined above\nwins_map = {\n 'boxcar': {},\n 'triang': {},\n 'bohman': {},\n 'blackman': {},\n 'nuttall': {},\n 'blackmanharris': {},\n 'flattop': {},\n 'bartlett': {},\n 'hann': {},\n 'tukey': {\n 'params': {\n 'alpha': 0.5,\n 'sym': True\n }\n },\n 'barthann': {},\n 'general_hamming': {\n 'params': {\n 'alpha': 0.5,\n 'sym': True\n }\n },\n 'hamming': {},\n 'kaiser': {\n 'params': {\n 'beta': 0.5\n }\n },\n 'gaussian': {\n 'params': {\n 'std': 7\n }\n },\n 'general_gaussian': {\n 'params': {\n 'p': 1.5,\n 'sig': 7, # for older API\n }\n },\n 'cosine': {},\n 'exponential': {\n 'params': {\n 'tau': 3.0\n }\n }\n}\n\nfor wintype, winconf in wins_map.items():\n win_params = default_params.copy()\n win_params['window_type'] = wintype\n win_params.update(winconf.get('params', {}))\n replace_gpu = {\n 'winsig': {\n TaskSpecSchema.conf: win_params,\n }\n }\n replace_cpu = deepcopy(replace_gpu)\n replace_cpu['winsig'][TaskSpecSchema.conf]['use_cpu'] = True\n print('WINDOW TYPE: {}'.format(wintype))\n print('GPU TIMING')\n tgraph_win.build(replace=replace_gpu)\n gpu_time = %timeit -o (gwin,) = tgraph_win.run(['winsig.window'], build=False)\n print('CPU TIMING')\n tgraph_win.build(replace=replace_cpu)\n cpu_time = %timeit -o (cwin,) = tgraph_win.run(['winsig.window'], build=False)\n print('SPEEDUP: {}x'.format(round(cpu_time.average / gpu_time.average, 1)))\n print('\\n')\n\n# reset tgraph build\ntgraph_win.build()",
"WINDOW TYPE: boxcar\nGPU TIMING\n2.07 ms ± 52.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n21.4 ms ± 61 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\nSPEEDUP: 10.3x\n\n\nWINDOW TYPE: triang\nGPU TIMING\n2.05 ms ± 8.82 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n70.4 ms ± 182 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\nSPEEDUP: 34.4x\n\n\nWINDOW TYPE: bohman\nGPU TIMING\n2.05 ms ± 8.12 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n446 ms ± 1.16 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\nSPEEDUP: 218.0x\n\n\nWINDOW TYPE: blackman\nGPU TIMING\n2.98 ms ± 8.52 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n525 ms ± 1.32 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\nSPEEDUP: 175.9x\n\n\nWINDOW TYPE: nuttall\nGPU TIMING\n3.03 ms ± 16.1 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n719 ms ± 920 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)\nSPEEDUP: 237.3x\n\n\nWINDOW TYPE: blackmanharris\nGPU TIMING\n3.02 ms ± 15.3 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n718 ms ± 1.03 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\nSPEEDUP: 237.6x\n\n\nWINDOW TYPE: flattop\nGPU TIMING\n3.09 ms ± 26.7 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n936 ms ± 833 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)\nSPEEDUP: 302.9x\n\n\nWINDOW TYPE: bartlett\nGPU TIMING\n2.08 ms ± 22.9 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n181 ms ± 216 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\nSPEEDUP: 86.9x\n\n\nWINDOW TYPE: hann\nGPU TIMING\n2.93 ms ± 20.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n338 ms ± 143 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)\nSPEEDUP: 115.4x\n\n\nWINDOW TYPE: tukey\nGPU TIMING\n2.05 ms ± 8.33 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n138 ms ± 640 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\nSPEEDUP: 67.3x\n\n\nWINDOW TYPE: barthann\nGPU TIMING\n2.05 ms ± 15.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n317 ms ± 859 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)\nSPEEDUP: 154.8x\n\n\nWINDOW TYPE: general_hamming\nGPU TIMING\n2.93 ms ± 22.2 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n336 ms ± 1.32 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\nSPEEDUP: 114.7x\n\n\nWINDOW TYPE: hamming\nGPU TIMING\n2.1 ms ± 12.7 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n338 ms ± 1.21 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\nSPEEDUP: 161.1x\n\n\nWINDOW TYPE: kaiser\nGPU TIMING\n2.05 ms ± 10.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n742 ms ± 654 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)\nSPEEDUP: 361.9x\n\n\nWINDOW TYPE: gaussian\nGPU TIMING\n2.05 ms ± 16.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n208 ms ± 734 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)\nSPEEDUP: 101.5x\n\n\nWINDOW TYPE: general_gaussian\nGPU TIMING\n2.08 ms ± 48.3 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n433 ms ± 1.37 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\nSPEEDUP: 207.9x\n\n\nWINDOW TYPE: cosine\nGPU TIMING\n2.05 ms ± 15.1 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n175 ms ± 379 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\nSPEEDUP: 85.3x\n\n\nWINDOW TYPE: exponential\nGPU TIMING\n2.05 ms ± 5.35 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\nCPU TIMING\n208 ms ± 622 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)\nSPEEDUP: 101.6x\n\n\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
cbbdc2353983073a6f2f4f838475e926116d8936
| 5,665 |
ipynb
|
Jupyter Notebook
|
notebooks/templates/due_diligence.ipynb
|
HETorresjr/StaffTerminal
|
b6abcb68a77b3ff23185d7532b6a40f599237b22
|
[
"MIT"
] | 3 |
2021-04-13T06:26:46.000Z
|
2022-01-26T05:11:22.000Z
|
notebooks/templates/due_diligence.ipynb
|
HETorresjr/StaffTerminal
|
b6abcb68a77b3ff23185d7532b6a40f599237b22
|
[
"MIT"
] | null | null | null |
notebooks/templates/due_diligence.ipynb
|
HETorresjr/StaffTerminal
|
b6abcb68a77b3ff23185d7532b6a40f599237b22
|
[
"MIT"
] | null | null | null | 25.986239 | 112 | 0.54722 |
[
[
[
"import importlib\nimport pathlib\nimport os\nimport sys\nfrom datetime import datetime, timedelta\nimport pandas as pd\nmodule_path = os.path.abspath(os.path.join('../..'))\nif module_path not in sys.path:\n sys.path.append(module_path)",
"_____no_output_____"
],
[
"datetime.now()",
"_____no_output_____"
],
[
"ticker=\"GME\"\nreport_name=f\"{ticker}_{datetime.now().strftime('%Y%m%d_%H%M%S')}_due_diligence\"\nbase_path=os.path.abspath(os.path.join('.'))",
"_____no_output_____"
],
[
"report_cache_dir = pathlib.Path(base_path, \"notebooks\", \"reports\", report_name)\nif not os.path.isdir(report_cache_dir):\n print(f\"Reports data directory not found. Creating {report_cache_dir}\")\n os.mkdir(report_cache_dir)\nelse:\n print(f\"Found reports directory {report_cache_dir}\")\n print(os.listdir(report_cache_dir))",
"_____no_output_____"
],
[
"from gamestonk_terminal.technical_analysis import trendline_api as trend\nfrom gamestonk_terminal.due_diligence import finviz_api as finviz",
"_____no_output_____"
],
[
"df_stock_cache = pathlib.Path(report_cache_dir, f\"{ticker}_stock_data.pkl\")\nif os.path.isfile(df_stock_cache):\n print(f\"Found a cache file. Loading {df_stock_cache}\")\n df_stock = pd.read_pickle(df_stock_cache)\nelse:\n print(\"Cache file not found. Getting data\")\n df_stock = trend.load_ticker(ticker, (datetime.now() - timedelta(days=180)).strftime(\"%Y-%m-%d\"))\n df_stock = trend.find_trendline(df_stock, \"OC_High\", \"high\")\n df_stock = trend.find_trendline(df_stock, \"OC_Low\", \"how\")\n print(\"Savind cache file\")\n df_stock.to_pickle(df_stock_cache)\n print(os.listdir(report_cache_dir))",
"_____no_output_____"
],
[
"import mplfinance as mpf\nmc = mpf.make_marketcolors(up='green',down='red',\n edge='black',\n wick='black',\n volume='in',\n ohlc='i')\ns = mpf.make_mpf_style(marketcolors=mc, gridstyle=\":\", y_on_right=True)\n\nap0 = []\n\nif \"OC_High_trend\" in df_stock.columns:\n ap0.append(\n mpf.make_addplot(df_stock[\"OC_High_trend\"], color=\"g\"),\n )\n\nif \"OC_Low_trend\" in df_stock.columns:\n ap0.append(\n mpf.make_addplot(df_stock[\"OC_Low_trend\"], color=\"b\"),\n )",
"_____no_output_____"
],
[
"mpf.plot(df_stock,type='candle',mav=(20,50,200),volume=True, addplot=ap0,\n xrotation=0, style=s, figratio=(10,7), figscale=2.00, \n update_width_config=dict(candle_linewidth=1.0,candle_width=0.8, volume_linewidth=1.0))",
"_____no_output_____"
],
[
"df_fa = finviz.analyst_df(ticker)\ndf_fa",
"_____no_output_____"
],
[
"from gamestonk_terminal.fundamental_analysis import market_watch_api as mw",
"_____no_output_____"
],
[
"mw.prepare_df_financials(ticker, \"income\")",
"_____no_output_____"
],
[
"mw.prepare_df_financials(ticker, \"income\", quarter=True)",
"_____no_output_____"
],
[
"mw.prepare_df_financials(ticker, \"balance\")",
"_____no_output_____"
],
[
"mw.prepare_df_financials(ticker, \"cashflow\")",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
cbbdc23a3cf4fa686cca403a8edf2feb599f4a1f
| 5,166 |
ipynb
|
Jupyter Notebook
|
pipeline.ipynb
|
Nyarish/CarND-Advanced-Lane-Lines
|
76114d437f817a949b3b3e66a7b159018f6bda78
|
[
"MIT"
] | null | null | null |
pipeline.ipynb
|
Nyarish/CarND-Advanced-Lane-Lines
|
76114d437f817a949b3b3e66a7b159018f6bda78
|
[
"MIT"
] | null | null | null |
pipeline.ipynb
|
Nyarish/CarND-Advanced-Lane-Lines
|
76114d437f817a949b3b3e66a7b159018f6bda78
|
[
"MIT"
] | null | null | null | 27.924324 | 129 | 0.573171 |
[
[
[
"# Get imports\nimport pickle\nimport numpy as np\nimport cv2\nimport glob\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n%matplotlib qt",
"_____no_output_____"
],
[
"# import helper functions\nimport camera_calibrator\nimport distortion_correction\nimport image_binary_gradient\nimport perspective_transform\nimport detect_lane_pixels\nimport measure_curvature",
"_____no_output_____"
],
[
"# Files to undistort\ncalibration1 = ('./camera_cal/calibration1.jpg')\nstraight_lines1 = ('./test_images/straight_lines1.jpg')\nstraight_lines2 = ('./test_images/straight_lines2.jpg')\ntest1 = ('./test_images/test1.jpg')\ntest2 = ('./test_images/test2.jpg')\ntest3 = ('./test_images/test3.jpg')\ntest4 = ('./test_images/test4.jpg')\ntest5 = ('./test_images/test5.jpg')\ntest6 = ('./test_images/test6.jpg')\n",
"_____no_output_____"
],
[
"def pipeline(image):\n \n # Get matrix from calibration\n mtx, dist = camera_calibrator.get_calibration_matrix()\n \n \n # Smoothen the lanes using clahe\n LUV = image\n lab = cv2.cvtColor(LUV, cv2.COLOR_RGB2LUV)\n lab_planes = cv2.split(lab)\n gridsize = 3\n clahe = cv2.createCLAHE(clipLimit=2.0,tileGridSize=(gridsize,gridsize))\n lab_planes[0] = clahe.apply(lab_planes[0])\n lab = cv2.merge(lab_planes)\n image_original = cv2.cvtColor(lab, cv2.COLOR_LUV2RGB)\n\n\n \n # Undistoretd image \n undist_img = distortion_correction.get_undistorted_image(image_original, mtx, dist, gray_scale=False)#image_original\n \n # Binary Image \n \n color_binary, binary_image = image_binary_gradient.get_binary_image(undist_img)\n\n \n # Perspective trasform\n top_down = perspective_transform.get_transformed_perspective(binary_image) \n \n # Warp the color zone back to original image space using inverse perspective matrix (Minv)\n color_zone_warp = measure_curvature.get_color_zone_warp(top_down)\n newwarp = perspective_transform.get_original_perspective(color_zone_warp)\n\n original_img = cv2.addWeighted(image_original, 1, newwarp, 0.3, 0)#image_original\n result = measure_curvature.add_text(original_img, top_down)\n \n # If gray scale convert to triple channel format\n \n if len(result.shape) == 2:\n result = np.dstack((original_img,)*3)\n\n # If binary image, scale to full 8-bit values\n if np.max(result) <= 1:\n result *= 255\n \n return result",
"_____no_output_____"
],
[
"img = lambda fname: mpimg.imread(fname)\nresult = pipeline(img(test4))\nplt.imshow(result)",
"_____no_output_____"
]
],
[
[
"#### Test pipeline on videos",
"_____no_output_____"
]
],
[
[
"# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML",
"_____no_output_____"
],
[
"# fname = 'project_video.mp4'\n# output = 'videos_out/project_video_processed.mp4'\n\n# clip1 = VideoFileClip(fname)\n# clip = clip1.fl_image(pipeline)\n# %time clip.write_videofile(output, audio=False)",
"_____no_output_____"
],
[
"# # Challange video\n\n# clip2 = VideoFileClip('challenge_video.mp4')\n# clip_challenge = clip2.fl_image(pipeline)\n# %time clip_challenge.write_videofile('videos_out/challenge_video_processed.mp4', audio=False)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
cbbddd8418a671baafb032bbcabc0355935855e6
| 10,445 |
ipynb
|
Jupyter Notebook
|
data_analysis/rnn_embeddings/rnn_embeddings.ipynb
|
ScottishFold007/recsys_telenor
|
cc32382910da46a3f02a290fb11efcbf9d9f756d
|
[
"MIT"
] | 1 |
2021-11-17T02:31:42.000Z
|
2021-11-17T02:31:42.000Z
|
data_analysis/rnn_embeddings/rnn_embeddings.ipynb
|
ScottishFold007/recsys_telenor
|
cc32382910da46a3f02a290fb11efcbf9d9f756d
|
[
"MIT"
] | null | null | null |
data_analysis/rnn_embeddings/rnn_embeddings.ipynb
|
ScottishFold007/recsys_telenor
|
cc32382910da46a3f02a290fb11efcbf9d9f756d
|
[
"MIT"
] | null | null | null | 32.039877 | 81 | 0.545716 |
[
[
[
"import pickle\nimport pandas as pd",
"_____no_output_____"
],
[
"embeddings, labels = pickle.load( open( \"embeddings.p\", \"rb\" ) )",
"_____no_output_____"
],
[
"emb = []\nlab = []\nfor i, label in enumerate(labels):\n emb.append(embeddings[i].tolist())\n lab.append(label)\ntokens_df = pd.DataFrame(emb)\nlabels_df = pd.DataFrame(lab)\ntokens_df.to_csv('rnn_emb.csv', sep='\\t', index=False, header=None)\nlabels_df.to_csv('rnn_lab.csv',sep='\\t', index=False, header=None)",
"_____no_output_____"
],
[
"labels",
"_____no_output_____"
],
[
"print(embeddings)",
"tensor([[-2.4702, -0.5111, 0.0394, ..., -0.1261, 1.1983, 2.8913],\n [ 0.3123, -1.9285, -0.7931, ..., -0.4298, 0.0516, -0.4627],\n [-0.8981, 0.5944, -0.8680, ..., 1.0738, -1.1459, 0.5132],\n ...,\n [-1.1513, 2.7297, -0.2117, ..., -1.4909, 0.2409, -0.6725],\n [ 0.7842, -1.6716, -1.0766, ..., -1.4737, 0.3162, 0.7037],\n [ 0.6026, -1.4020, 1.3053, ..., 0.9034, -0.6024, -0.7758]],\n device='cuda:0')\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
]
] |
cbbde042140957aabd4b670c40f78417e2b8a06d
| 29,928 |
ipynb
|
Jupyter Notebook
|
exercises/minimal_regression/surface_iris.ipynb
|
kolibril13/data-science-and-big-data-analytics
|
68abb8c8aed3ecd73ad3de62d6f41894893f37a6
|
[
"Apache-2.0"
] | 1 |
2019-12-15T19:42:06.000Z
|
2019-12-15T19:42:06.000Z
|
exercises/minimal_regression/surface_iris.ipynb
|
kolibril13/data-science-and-big-data-analytics
|
68abb8c8aed3ecd73ad3de62d6f41894893f37a6
|
[
"Apache-2.0"
] | null | null | null |
exercises/minimal_regression/surface_iris.ipynb
|
kolibril13/data-science-and-big-data-analytics
|
68abb8c8aed3ecd73ad3de62d6f41894893f37a6
|
[
"Apache-2.0"
] | null | null | null | 48.505673 | 438 | 0.484396 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.